diff --git a/.gitignore b/.gitignore index 3bcd892..4b08cc7 100755 --- a/.gitignore +++ b/.gitignore @@ -20,3 +20,5 @@ lib/ .DS_Store *.DS_Store /bin +*.ctxt +*.class \ No newline at end of file diff --git a/examples/FindCircles/FindCircles.pde b/examples/FindCircles/FindCircles.pde new file mode 100644 index 0000000..ad10158 --- /dev/null +++ b/examples/FindCircles/FindCircles.pde @@ -0,0 +1,29 @@ +import gab.opencv.*; + +OpenCV opencv; + +void setup() { + size(300*2, 400); + ellipseMode(RADIUS); + + opencv = new OpenCV(this, 300, 400); + +} + +void draw() { + opencv.loadImage("sample.jpg"); + image(opencv.getSnapshot(),0,0); + image(loadImage("sample.jpg"),300,0); + + ArrayList< ArrayList< Integer> > arr = opencv.findCircles(); + + for (ArrayList zx : arr) + { + ellipse(zx.get(0),zx.get(1),zx.get(2),zx.get(2)); //x, y, radius, radius (of the circle) + } + + noFill(); + stroke(0, 255, 0); + strokeWeight(3); + +} diff --git a/examples/FindCircles/sample.jpg b/examples/FindCircles/sample.jpg new file mode 100644 index 0000000..6be518d Binary files /dev/null and b/examples/FindCircles/sample.jpg differ diff --git a/lib/.DS_Store b/lib/.DS_Store deleted file mode 100644 index 5008ddf..0000000 Binary files a/lib/.DS_Store and /dev/null differ diff --git a/lib/arm7/libopencv_calib3d.so b/lib/arm7/libopencv_calib3d.so deleted file mode 120000 index 37c62ef..0000000 --- a/lib/arm7/libopencv_calib3d.so +++ /dev/null @@ -1 +0,0 @@ -libopencv_calib3d.so.2.4 \ No newline at end of file diff --git a/lib/arm7/libopencv_calib3d.so.2.4 b/lib/arm7/libopencv_calib3d.so.2.4 deleted file mode 120000 index 9819e07..0000000 --- a/lib/arm7/libopencv_calib3d.so.2.4 +++ /dev/null @@ -1 +0,0 @@ -libopencv_calib3d.so.2.4.5 \ No newline at end of file diff --git a/lib/arm7/libopencv_contrib.so b/lib/arm7/libopencv_contrib.so deleted file mode 120000 index d8a80d5..0000000 --- a/lib/arm7/libopencv_contrib.so +++ /dev/null @@ -1 +0,0 @@ -libopencv_contrib.so.2.4 \ No newline at end of file diff --git a/lib/arm7/libopencv_contrib.so.2.4 b/lib/arm7/libopencv_contrib.so.2.4 deleted file mode 120000 index 3332855..0000000 --- a/lib/arm7/libopencv_contrib.so.2.4 +++ /dev/null @@ -1 +0,0 @@ -libopencv_contrib.so.2.4.5 \ No newline at end of file diff --git a/lib/arm7/libopencv_core.so b/lib/arm7/libopencv_core.so deleted file mode 120000 index 4a68931..0000000 --- a/lib/arm7/libopencv_core.so +++ /dev/null @@ -1 +0,0 @@ -libopencv_core.so.2.4 \ No newline at end of file diff --git a/lib/arm7/libopencv_core.so.2.4 b/lib/arm7/libopencv_core.so.2.4 deleted file mode 120000 index ae2ae7b..0000000 --- a/lib/arm7/libopencv_core.so.2.4 +++ /dev/null @@ -1 +0,0 @@ -libopencv_core.so.2.4.5 \ No newline at end of file diff --git a/lib/arm7/libopencv_features2d.so b/lib/arm7/libopencv_features2d.so deleted file mode 120000 index 171141c..0000000 --- a/lib/arm7/libopencv_features2d.so +++ /dev/null @@ -1 +0,0 @@ -libopencv_features2d.so.2.4 \ No newline at end of file diff --git a/lib/arm7/libopencv_features2d.so.2.4 b/lib/arm7/libopencv_features2d.so.2.4 deleted file mode 120000 index 5cd3acb..0000000 --- a/lib/arm7/libopencv_features2d.so.2.4 +++ /dev/null @@ -1 +0,0 @@ -libopencv_features2d.so.2.4.5 \ No newline at end of file diff --git a/lib/arm7/libopencv_flann.so b/lib/arm7/libopencv_flann.so deleted file mode 120000 index 818d581..0000000 --- a/lib/arm7/libopencv_flann.so +++ /dev/null @@ -1 +0,0 @@ -libopencv_flann.so.2.4 \ No newline at end of file diff --git a/lib/arm7/libopencv_flann.so.2.4 b/lib/arm7/libopencv_flann.so.2.4 deleted file mode 120000 index fd7593e..0000000 --- a/lib/arm7/libopencv_flann.so.2.4 +++ /dev/null @@ -1 +0,0 @@ -libopencv_flann.so.2.4.5 \ No newline at end of file diff --git a/lib/arm7/libopencv_gpu.so b/lib/arm7/libopencv_gpu.so deleted file mode 120000 index 61edaa4..0000000 --- a/lib/arm7/libopencv_gpu.so +++ /dev/null @@ -1 +0,0 @@ -libopencv_gpu.so.2.4 \ No newline at end of file diff --git a/lib/arm7/libopencv_gpu.so.2.4 b/lib/arm7/libopencv_gpu.so.2.4 deleted file mode 120000 index a72f295..0000000 --- a/lib/arm7/libopencv_gpu.so.2.4 +++ /dev/null @@ -1 +0,0 @@ -libopencv_gpu.so.2.4.5 \ No newline at end of file diff --git a/lib/arm7/libopencv_highgui.so b/lib/arm7/libopencv_highgui.so deleted file mode 120000 index d95a21f..0000000 --- a/lib/arm7/libopencv_highgui.so +++ /dev/null @@ -1 +0,0 @@ -libopencv_highgui.so.2.4 \ No newline at end of file diff --git a/lib/arm7/libopencv_highgui.so.2.4 b/lib/arm7/libopencv_highgui.so.2.4 deleted file mode 120000 index 773f303..0000000 --- a/lib/arm7/libopencv_highgui.so.2.4 +++ /dev/null @@ -1 +0,0 @@ -libopencv_highgui.so.2.4.5 \ No newline at end of file diff --git a/lib/arm7/libopencv_imgproc.so b/lib/arm7/libopencv_imgproc.so deleted file mode 120000 index 70e4328..0000000 --- a/lib/arm7/libopencv_imgproc.so +++ /dev/null @@ -1 +0,0 @@ -libopencv_imgproc.so.2.4 \ No newline at end of file diff --git a/lib/arm7/libopencv_imgproc.so.2.4 b/lib/arm7/libopencv_imgproc.so.2.4 deleted file mode 120000 index e8d4579..0000000 --- a/lib/arm7/libopencv_imgproc.so.2.4 +++ /dev/null @@ -1 +0,0 @@ -libopencv_imgproc.so.2.4.5 \ No newline at end of file diff --git a/lib/arm7/libopencv_legacy.so b/lib/arm7/libopencv_legacy.so deleted file mode 120000 index 1afd5e1..0000000 --- a/lib/arm7/libopencv_legacy.so +++ /dev/null @@ -1 +0,0 @@ -libopencv_legacy.so.2.4 \ No newline at end of file diff --git a/lib/arm7/libopencv_legacy.so.2.4 b/lib/arm7/libopencv_legacy.so.2.4 deleted file mode 120000 index 0213de4..0000000 --- a/lib/arm7/libopencv_legacy.so.2.4 +++ /dev/null @@ -1 +0,0 @@ -libopencv_legacy.so.2.4.5 \ No newline at end of file diff --git a/lib/arm7/libopencv_ml.so b/lib/arm7/libopencv_ml.so deleted file mode 120000 index 4e71450..0000000 --- a/lib/arm7/libopencv_ml.so +++ /dev/null @@ -1 +0,0 @@ -libopencv_ml.so.2.4 \ No newline at end of file diff --git a/lib/arm7/libopencv_ml.so.2.4 b/lib/arm7/libopencv_ml.so.2.4 deleted file mode 120000 index 338dffa..0000000 --- a/lib/arm7/libopencv_ml.so.2.4 +++ /dev/null @@ -1 +0,0 @@ -libopencv_ml.so.2.4.5 \ No newline at end of file diff --git a/lib/arm7/libopencv_nonfree.so b/lib/arm7/libopencv_nonfree.so deleted file mode 120000 index 73c1613..0000000 --- a/lib/arm7/libopencv_nonfree.so +++ /dev/null @@ -1 +0,0 @@ -libopencv_nonfree.so.2.4 \ No newline at end of file diff --git a/lib/arm7/libopencv_nonfree.so.2.4 b/lib/arm7/libopencv_nonfree.so.2.4 deleted file mode 120000 index 2d6c369..0000000 --- a/lib/arm7/libopencv_nonfree.so.2.4 +++ /dev/null @@ -1 +0,0 @@ -libopencv_nonfree.so.2.4.5 \ No newline at end of file diff --git a/lib/arm7/libopencv_objdetect.so b/lib/arm7/libopencv_objdetect.so deleted file mode 120000 index 3c4cef9..0000000 --- a/lib/arm7/libopencv_objdetect.so +++ /dev/null @@ -1 +0,0 @@ -libopencv_objdetect.so.2.4 \ No newline at end of file diff --git a/lib/arm7/libopencv_objdetect.so.2.4 b/lib/arm7/libopencv_objdetect.so.2.4 deleted file mode 120000 index 2be60de..0000000 --- a/lib/arm7/libopencv_objdetect.so.2.4 +++ /dev/null @@ -1 +0,0 @@ -libopencv_objdetect.so.2.4.5 \ No newline at end of file diff --git a/lib/arm7/libopencv_photo.so b/lib/arm7/libopencv_photo.so deleted file mode 120000 index 387bc42..0000000 --- a/lib/arm7/libopencv_photo.so +++ /dev/null @@ -1 +0,0 @@ -libopencv_photo.so.2.4 \ No newline at end of file diff --git a/lib/arm7/libopencv_photo.so.2.4 b/lib/arm7/libopencv_photo.so.2.4 deleted file mode 120000 index 45b8eb2..0000000 --- a/lib/arm7/libopencv_photo.so.2.4 +++ /dev/null @@ -1 +0,0 @@ -libopencv_photo.so.2.4.5 \ No newline at end of file diff --git a/lib/arm7/libopencv_stitching.so b/lib/arm7/libopencv_stitching.so deleted file mode 120000 index 10b36f8..0000000 --- a/lib/arm7/libopencv_stitching.so +++ /dev/null @@ -1 +0,0 @@ -libopencv_stitching.so.2.4 \ No newline at end of file diff --git a/lib/arm7/libopencv_stitching.so.2.4 b/lib/arm7/libopencv_stitching.so.2.4 deleted file mode 120000 index 2cf2908..0000000 --- a/lib/arm7/libopencv_stitching.so.2.4 +++ /dev/null @@ -1 +0,0 @@ -libopencv_stitching.so.2.4.5 \ No newline at end of file diff --git a/lib/arm7/libopencv_superres.so b/lib/arm7/libopencv_superres.so deleted file mode 120000 index dbad36c..0000000 --- a/lib/arm7/libopencv_superres.so +++ /dev/null @@ -1 +0,0 @@ -libopencv_superres.so.2.4 \ No newline at end of file diff --git a/lib/arm7/libopencv_superres.so.2.4 b/lib/arm7/libopencv_superres.so.2.4 deleted file mode 120000 index 42dc315..0000000 --- a/lib/arm7/libopencv_superres.so.2.4 +++ /dev/null @@ -1 +0,0 @@ -libopencv_superres.so.2.4.5 \ No newline at end of file diff --git a/lib/arm7/libopencv_ts.so b/lib/arm7/libopencv_ts.so deleted file mode 120000 index 88f5375..0000000 --- a/lib/arm7/libopencv_ts.so +++ /dev/null @@ -1 +0,0 @@ -libopencv_ts.so.2.4 \ No newline at end of file diff --git a/lib/arm7/libopencv_ts.so.2.4 b/lib/arm7/libopencv_ts.so.2.4 deleted file mode 120000 index 391bebc..0000000 --- a/lib/arm7/libopencv_ts.so.2.4 +++ /dev/null @@ -1 +0,0 @@ -libopencv_ts.so.2.4.5 \ No newline at end of file diff --git a/lib/arm7/libopencv_video.so b/lib/arm7/libopencv_video.so deleted file mode 120000 index d5ddd6c..0000000 --- a/lib/arm7/libopencv_video.so +++ /dev/null @@ -1 +0,0 @@ -libopencv_video.so.2.4 \ No newline at end of file diff --git a/lib/arm7/libopencv_video.so.2.4 b/lib/arm7/libopencv_video.so.2.4 deleted file mode 120000 index 0e319f2..0000000 --- a/lib/arm7/libopencv_video.so.2.4 +++ /dev/null @@ -1 +0,0 @@ -libopencv_video.so.2.4.5 \ No newline at end of file diff --git a/lib/arm7/libopencv_videostab.so b/lib/arm7/libopencv_videostab.so deleted file mode 120000 index faeb668..0000000 --- a/lib/arm7/libopencv_videostab.so +++ /dev/null @@ -1 +0,0 @@ -libopencv_videostab.so.2.4 \ No newline at end of file diff --git a/lib/arm7/libopencv_videostab.so.2.4 b/lib/arm7/libopencv_videostab.so.2.4 deleted file mode 120000 index 85a3c08..0000000 --- a/lib/arm7/libopencv_videostab.so.2.4 +++ /dev/null @@ -1 +0,0 @@ -libopencv_videostab.so.2.4.5 \ No newline at end of file diff --git a/lib/linux32/libopencv_calib3d.so b/lib/linux32/libopencv_calib3d.so deleted file mode 120000 index 37c62ef..0000000 --- a/lib/linux32/libopencv_calib3d.so +++ /dev/null @@ -1 +0,0 @@ -libopencv_calib3d.so.2.4 \ No newline at end of file diff --git a/lib/linux32/libopencv_calib3d.so.2.4 b/lib/linux32/libopencv_calib3d.so.2.4 deleted file mode 120000 index 9819e07..0000000 --- a/lib/linux32/libopencv_calib3d.so.2.4 +++ /dev/null @@ -1 +0,0 @@ -libopencv_calib3d.so.2.4.5 \ No newline at end of file diff --git a/lib/linux32/libopencv_contrib.so b/lib/linux32/libopencv_contrib.so deleted file mode 120000 index d8a80d5..0000000 --- a/lib/linux32/libopencv_contrib.so +++ /dev/null @@ -1 +0,0 @@ -libopencv_contrib.so.2.4 \ No newline at end of file diff --git a/lib/linux32/libopencv_contrib.so.2.4 b/lib/linux32/libopencv_contrib.so.2.4 deleted file mode 120000 index 3332855..0000000 --- a/lib/linux32/libopencv_contrib.so.2.4 +++ /dev/null @@ -1 +0,0 @@ -libopencv_contrib.so.2.4.5 \ No newline at end of file diff --git a/lib/linux32/libopencv_core.so b/lib/linux32/libopencv_core.so deleted file mode 120000 index 4a68931..0000000 --- a/lib/linux32/libopencv_core.so +++ /dev/null @@ -1 +0,0 @@ -libopencv_core.so.2.4 \ No newline at end of file diff --git a/lib/linux32/libopencv_core.so.2.4 b/lib/linux32/libopencv_core.so.2.4 deleted file mode 120000 index ae2ae7b..0000000 --- a/lib/linux32/libopencv_core.so.2.4 +++ /dev/null @@ -1 +0,0 @@ -libopencv_core.so.2.4.5 \ No newline at end of file diff --git a/lib/linux32/libopencv_features2d.so b/lib/linux32/libopencv_features2d.so deleted file mode 120000 index 171141c..0000000 --- a/lib/linux32/libopencv_features2d.so +++ /dev/null @@ -1 +0,0 @@ -libopencv_features2d.so.2.4 \ No newline at end of file diff --git a/lib/linux32/libopencv_features2d.so.2.4 b/lib/linux32/libopencv_features2d.so.2.4 deleted file mode 120000 index 5cd3acb..0000000 --- a/lib/linux32/libopencv_features2d.so.2.4 +++ /dev/null @@ -1 +0,0 @@ -libopencv_features2d.so.2.4.5 \ No newline at end of file diff --git a/lib/linux32/libopencv_flann.so b/lib/linux32/libopencv_flann.so deleted file mode 120000 index 818d581..0000000 --- a/lib/linux32/libopencv_flann.so +++ /dev/null @@ -1 +0,0 @@ -libopencv_flann.so.2.4 \ No newline at end of file diff --git a/lib/linux32/libopencv_flann.so.2.4 b/lib/linux32/libopencv_flann.so.2.4 deleted file mode 120000 index fd7593e..0000000 --- a/lib/linux32/libopencv_flann.so.2.4 +++ /dev/null @@ -1 +0,0 @@ -libopencv_flann.so.2.4.5 \ No newline at end of file diff --git a/lib/linux32/libopencv_gpu.so b/lib/linux32/libopencv_gpu.so deleted file mode 120000 index 61edaa4..0000000 --- a/lib/linux32/libopencv_gpu.so +++ /dev/null @@ -1 +0,0 @@ -libopencv_gpu.so.2.4 \ No newline at end of file diff --git a/lib/linux32/libopencv_gpu.so.2.4 b/lib/linux32/libopencv_gpu.so.2.4 deleted file mode 120000 index a72f295..0000000 --- a/lib/linux32/libopencv_gpu.so.2.4 +++ /dev/null @@ -1 +0,0 @@ -libopencv_gpu.so.2.4.5 \ No newline at end of file diff --git a/lib/linux32/libopencv_highgui.so b/lib/linux32/libopencv_highgui.so deleted file mode 120000 index d95a21f..0000000 --- a/lib/linux32/libopencv_highgui.so +++ /dev/null @@ -1 +0,0 @@ -libopencv_highgui.so.2.4 \ No newline at end of file diff --git a/lib/linux32/libopencv_highgui.so.2.4 b/lib/linux32/libopencv_highgui.so.2.4 deleted file mode 120000 index 773f303..0000000 --- a/lib/linux32/libopencv_highgui.so.2.4 +++ /dev/null @@ -1 +0,0 @@ -libopencv_highgui.so.2.4.5 \ No newline at end of file diff --git a/lib/linux32/libopencv_imgproc.so b/lib/linux32/libopencv_imgproc.so deleted file mode 120000 index 70e4328..0000000 --- a/lib/linux32/libopencv_imgproc.so +++ /dev/null @@ -1 +0,0 @@ -libopencv_imgproc.so.2.4 \ No newline at end of file diff --git a/lib/linux32/libopencv_imgproc.so.2.4 b/lib/linux32/libopencv_imgproc.so.2.4 deleted file mode 120000 index e8d4579..0000000 --- a/lib/linux32/libopencv_imgproc.so.2.4 +++ /dev/null @@ -1 +0,0 @@ -libopencv_imgproc.so.2.4.5 \ No newline at end of file diff --git a/lib/linux32/libopencv_legacy.so b/lib/linux32/libopencv_legacy.so deleted file mode 120000 index 1afd5e1..0000000 --- a/lib/linux32/libopencv_legacy.so +++ /dev/null @@ -1 +0,0 @@ -libopencv_legacy.so.2.4 \ No newline at end of file diff --git a/lib/linux32/libopencv_legacy.so.2.4 b/lib/linux32/libopencv_legacy.so.2.4 deleted file mode 120000 index 0213de4..0000000 --- a/lib/linux32/libopencv_legacy.so.2.4 +++ /dev/null @@ -1 +0,0 @@ -libopencv_legacy.so.2.4.5 \ No newline at end of file diff --git a/lib/linux32/libopencv_ml.so b/lib/linux32/libopencv_ml.so deleted file mode 120000 index 4e71450..0000000 --- a/lib/linux32/libopencv_ml.so +++ /dev/null @@ -1 +0,0 @@ -libopencv_ml.so.2.4 \ No newline at end of file diff --git a/lib/linux32/libopencv_ml.so.2.4 b/lib/linux32/libopencv_ml.so.2.4 deleted file mode 120000 index 338dffa..0000000 --- a/lib/linux32/libopencv_ml.so.2.4 +++ /dev/null @@ -1 +0,0 @@ -libopencv_ml.so.2.4.5 \ No newline at end of file diff --git a/lib/linux32/libopencv_nonfree.so b/lib/linux32/libopencv_nonfree.so deleted file mode 120000 index 73c1613..0000000 --- a/lib/linux32/libopencv_nonfree.so +++ /dev/null @@ -1 +0,0 @@ -libopencv_nonfree.so.2.4 \ No newline at end of file diff --git a/lib/linux32/libopencv_nonfree.so.2.4 b/lib/linux32/libopencv_nonfree.so.2.4 deleted file mode 120000 index 2d6c369..0000000 --- a/lib/linux32/libopencv_nonfree.so.2.4 +++ /dev/null @@ -1 +0,0 @@ -libopencv_nonfree.so.2.4.5 \ No newline at end of file diff --git a/lib/linux32/libopencv_objdetect.so b/lib/linux32/libopencv_objdetect.so deleted file mode 120000 index 3c4cef9..0000000 --- a/lib/linux32/libopencv_objdetect.so +++ /dev/null @@ -1 +0,0 @@ -libopencv_objdetect.so.2.4 \ No newline at end of file diff --git a/lib/linux32/libopencv_objdetect.so.2.4 b/lib/linux32/libopencv_objdetect.so.2.4 deleted file mode 120000 index 2be60de..0000000 --- a/lib/linux32/libopencv_objdetect.so.2.4 +++ /dev/null @@ -1 +0,0 @@ -libopencv_objdetect.so.2.4.5 \ No newline at end of file diff --git a/lib/linux32/libopencv_photo.so b/lib/linux32/libopencv_photo.so deleted file mode 120000 index 387bc42..0000000 --- a/lib/linux32/libopencv_photo.so +++ /dev/null @@ -1 +0,0 @@ -libopencv_photo.so.2.4 \ No newline at end of file diff --git a/lib/linux32/libopencv_photo.so.2.4 b/lib/linux32/libopencv_photo.so.2.4 deleted file mode 120000 index 45b8eb2..0000000 --- a/lib/linux32/libopencv_photo.so.2.4 +++ /dev/null @@ -1 +0,0 @@ -libopencv_photo.so.2.4.5 \ No newline at end of file diff --git a/lib/linux32/libopencv_superres.so b/lib/linux32/libopencv_superres.so deleted file mode 120000 index dbad36c..0000000 --- a/lib/linux32/libopencv_superres.so +++ /dev/null @@ -1 +0,0 @@ -libopencv_superres.so.2.4 \ No newline at end of file diff --git a/lib/linux32/libopencv_superres.so.2.4 b/lib/linux32/libopencv_superres.so.2.4 deleted file mode 120000 index 42dc315..0000000 --- a/lib/linux32/libopencv_superres.so.2.4 +++ /dev/null @@ -1 +0,0 @@ -libopencv_superres.so.2.4.5 \ No newline at end of file diff --git a/lib/linux32/libopencv_ts.so b/lib/linux32/libopencv_ts.so deleted file mode 120000 index 88f5375..0000000 --- a/lib/linux32/libopencv_ts.so +++ /dev/null @@ -1 +0,0 @@ -libopencv_ts.so.2.4 \ No newline at end of file diff --git a/lib/linux32/libopencv_ts.so.2.4 b/lib/linux32/libopencv_ts.so.2.4 deleted file mode 120000 index 391bebc..0000000 --- a/lib/linux32/libopencv_ts.so.2.4 +++ /dev/null @@ -1 +0,0 @@ -libopencv_ts.so.2.4.5 \ No newline at end of file diff --git a/lib/linux32/libopencv_video.so b/lib/linux32/libopencv_video.so deleted file mode 120000 index d5ddd6c..0000000 --- a/lib/linux32/libopencv_video.so +++ /dev/null @@ -1 +0,0 @@ -libopencv_video.so.2.4 \ No newline at end of file diff --git a/lib/linux32/libopencv_video.so.2.4 b/lib/linux32/libopencv_video.so.2.4 deleted file mode 120000 index 0e319f2..0000000 --- a/lib/linux32/libopencv_video.so.2.4 +++ /dev/null @@ -1 +0,0 @@ -libopencv_video.so.2.4.5 \ No newline at end of file diff --git a/lib/linux32/libopencv_videostab.so b/lib/linux32/libopencv_videostab.so deleted file mode 120000 index faeb668..0000000 --- a/lib/linux32/libopencv_videostab.so +++ /dev/null @@ -1 +0,0 @@ -libopencv_videostab.so.2.4 \ No newline at end of file diff --git a/lib/linux32/libopencv_videostab.so.2.4 b/lib/linux32/libopencv_videostab.so.2.4 deleted file mode 120000 index 85a3c08..0000000 --- a/lib/linux32/libopencv_videostab.so.2.4 +++ /dev/null @@ -1 +0,0 @@ -libopencv_videostab.so.2.4.5 \ No newline at end of file diff --git a/lib/linux64/libopencv_calib3d.so b/lib/linux64/libopencv_calib3d.so deleted file mode 120000 index 37c62ef..0000000 --- a/lib/linux64/libopencv_calib3d.so +++ /dev/null @@ -1 +0,0 @@ -libopencv_calib3d.so.2.4 \ No newline at end of file diff --git a/lib/linux64/libopencv_calib3d.so.2.4 b/lib/linux64/libopencv_calib3d.so.2.4 deleted file mode 120000 index 9819e07..0000000 --- a/lib/linux64/libopencv_calib3d.so.2.4 +++ /dev/null @@ -1 +0,0 @@ -libopencv_calib3d.so.2.4.5 \ No newline at end of file diff --git a/lib/linux64/libopencv_contrib.so b/lib/linux64/libopencv_contrib.so deleted file mode 120000 index d8a80d5..0000000 --- a/lib/linux64/libopencv_contrib.so +++ /dev/null @@ -1 +0,0 @@ -libopencv_contrib.so.2.4 \ No newline at end of file diff --git a/lib/linux64/libopencv_contrib.so.2.4 b/lib/linux64/libopencv_contrib.so.2.4 deleted file mode 120000 index 3332855..0000000 --- a/lib/linux64/libopencv_contrib.so.2.4 +++ /dev/null @@ -1 +0,0 @@ -libopencv_contrib.so.2.4.5 \ No newline at end of file diff --git a/lib/linux64/libopencv_core.so b/lib/linux64/libopencv_core.so deleted file mode 120000 index 4a68931..0000000 --- a/lib/linux64/libopencv_core.so +++ /dev/null @@ -1 +0,0 @@ -libopencv_core.so.2.4 \ No newline at end of file diff --git a/lib/linux64/libopencv_core.so.2.4 b/lib/linux64/libopencv_core.so.2.4 deleted file mode 120000 index ae2ae7b..0000000 --- a/lib/linux64/libopencv_core.so.2.4 +++ /dev/null @@ -1 +0,0 @@ -libopencv_core.so.2.4.5 \ No newline at end of file diff --git a/lib/linux64/libopencv_features2d.so b/lib/linux64/libopencv_features2d.so deleted file mode 120000 index 171141c..0000000 --- a/lib/linux64/libopencv_features2d.so +++ /dev/null @@ -1 +0,0 @@ -libopencv_features2d.so.2.4 \ No newline at end of file diff --git a/lib/linux64/libopencv_features2d.so.2.4 b/lib/linux64/libopencv_features2d.so.2.4 deleted file mode 120000 index 5cd3acb..0000000 --- a/lib/linux64/libopencv_features2d.so.2.4 +++ /dev/null @@ -1 +0,0 @@ -libopencv_features2d.so.2.4.5 \ No newline at end of file diff --git a/lib/linux64/libopencv_flann.so b/lib/linux64/libopencv_flann.so deleted file mode 120000 index 818d581..0000000 --- a/lib/linux64/libopencv_flann.so +++ /dev/null @@ -1 +0,0 @@ -libopencv_flann.so.2.4 \ No newline at end of file diff --git a/lib/linux64/libopencv_flann.so.2.4 b/lib/linux64/libopencv_flann.so.2.4 deleted file mode 120000 index fd7593e..0000000 --- a/lib/linux64/libopencv_flann.so.2.4 +++ /dev/null @@ -1 +0,0 @@ -libopencv_flann.so.2.4.5 \ No newline at end of file diff --git a/lib/linux64/libopencv_gpu.so b/lib/linux64/libopencv_gpu.so deleted file mode 120000 index 61edaa4..0000000 --- a/lib/linux64/libopencv_gpu.so +++ /dev/null @@ -1 +0,0 @@ -libopencv_gpu.so.2.4 \ No newline at end of file diff --git a/lib/linux64/libopencv_gpu.so.2.4 b/lib/linux64/libopencv_gpu.so.2.4 deleted file mode 120000 index a72f295..0000000 --- a/lib/linux64/libopencv_gpu.so.2.4 +++ /dev/null @@ -1 +0,0 @@ -libopencv_gpu.so.2.4.5 \ No newline at end of file diff --git a/lib/linux64/libopencv_highgui.so b/lib/linux64/libopencv_highgui.so deleted file mode 120000 index d95a21f..0000000 --- a/lib/linux64/libopencv_highgui.so +++ /dev/null @@ -1 +0,0 @@ -libopencv_highgui.so.2.4 \ No newline at end of file diff --git a/lib/linux64/libopencv_highgui.so.2.4 b/lib/linux64/libopencv_highgui.so.2.4 deleted file mode 120000 index 773f303..0000000 --- a/lib/linux64/libopencv_highgui.so.2.4 +++ /dev/null @@ -1 +0,0 @@ -libopencv_highgui.so.2.4.5 \ No newline at end of file diff --git a/lib/linux64/libopencv_imgproc.so b/lib/linux64/libopencv_imgproc.so deleted file mode 120000 index 70e4328..0000000 --- a/lib/linux64/libopencv_imgproc.so +++ /dev/null @@ -1 +0,0 @@ -libopencv_imgproc.so.2.4 \ No newline at end of file diff --git a/lib/linux64/libopencv_imgproc.so.2.4 b/lib/linux64/libopencv_imgproc.so.2.4 deleted file mode 120000 index e8d4579..0000000 --- a/lib/linux64/libopencv_imgproc.so.2.4 +++ /dev/null @@ -1 +0,0 @@ -libopencv_imgproc.so.2.4.5 \ No newline at end of file diff --git a/lib/linux64/libopencv_legacy.so b/lib/linux64/libopencv_legacy.so deleted file mode 120000 index 1afd5e1..0000000 --- a/lib/linux64/libopencv_legacy.so +++ /dev/null @@ -1 +0,0 @@ -libopencv_legacy.so.2.4 \ No newline at end of file diff --git a/lib/linux64/libopencv_legacy.so.2.4 b/lib/linux64/libopencv_legacy.so.2.4 deleted file mode 120000 index 0213de4..0000000 --- a/lib/linux64/libopencv_legacy.so.2.4 +++ /dev/null @@ -1 +0,0 @@ -libopencv_legacy.so.2.4.5 \ No newline at end of file diff --git a/lib/linux64/libopencv_ml.so b/lib/linux64/libopencv_ml.so deleted file mode 120000 index 4e71450..0000000 --- a/lib/linux64/libopencv_ml.so +++ /dev/null @@ -1 +0,0 @@ -libopencv_ml.so.2.4 \ No newline at end of file diff --git a/lib/linux64/libopencv_ml.so.2.4 b/lib/linux64/libopencv_ml.so.2.4 deleted file mode 120000 index 338dffa..0000000 --- a/lib/linux64/libopencv_ml.so.2.4 +++ /dev/null @@ -1 +0,0 @@ -libopencv_ml.so.2.4.5 \ No newline at end of file diff --git a/lib/linux64/libopencv_nonfree.so b/lib/linux64/libopencv_nonfree.so deleted file mode 120000 index 73c1613..0000000 --- a/lib/linux64/libopencv_nonfree.so +++ /dev/null @@ -1 +0,0 @@ -libopencv_nonfree.so.2.4 \ No newline at end of file diff --git a/lib/linux64/libopencv_nonfree.so.2.4 b/lib/linux64/libopencv_nonfree.so.2.4 deleted file mode 120000 index 2d6c369..0000000 --- a/lib/linux64/libopencv_nonfree.so.2.4 +++ /dev/null @@ -1 +0,0 @@ -libopencv_nonfree.so.2.4.5 \ No newline at end of file diff --git a/lib/linux64/libopencv_objdetect.so b/lib/linux64/libopencv_objdetect.so deleted file mode 120000 index 3c4cef9..0000000 --- a/lib/linux64/libopencv_objdetect.so +++ /dev/null @@ -1 +0,0 @@ -libopencv_objdetect.so.2.4 \ No newline at end of file diff --git a/lib/linux64/libopencv_objdetect.so.2.4 b/lib/linux64/libopencv_objdetect.so.2.4 deleted file mode 120000 index 2be60de..0000000 --- a/lib/linux64/libopencv_objdetect.so.2.4 +++ /dev/null @@ -1 +0,0 @@ -libopencv_objdetect.so.2.4.5 \ No newline at end of file diff --git a/lib/linux64/libopencv_photo.so b/lib/linux64/libopencv_photo.so deleted file mode 120000 index 387bc42..0000000 --- a/lib/linux64/libopencv_photo.so +++ /dev/null @@ -1 +0,0 @@ -libopencv_photo.so.2.4 \ No newline at end of file diff --git a/lib/linux64/libopencv_photo.so.2.4 b/lib/linux64/libopencv_photo.so.2.4 deleted file mode 120000 index 45b8eb2..0000000 --- a/lib/linux64/libopencv_photo.so.2.4 +++ /dev/null @@ -1 +0,0 @@ -libopencv_photo.so.2.4.5 \ No newline at end of file diff --git a/lib/linux64/libopencv_stitching.so b/lib/linux64/libopencv_stitching.so deleted file mode 120000 index 10b36f8..0000000 --- a/lib/linux64/libopencv_stitching.so +++ /dev/null @@ -1 +0,0 @@ -libopencv_stitching.so.2.4 \ No newline at end of file diff --git a/lib/linux64/libopencv_stitching.so.2.4 b/lib/linux64/libopencv_stitching.so.2.4 deleted file mode 120000 index 2cf2908..0000000 --- a/lib/linux64/libopencv_stitching.so.2.4 +++ /dev/null @@ -1 +0,0 @@ -libopencv_stitching.so.2.4.5 \ No newline at end of file diff --git a/lib/linux64/libopencv_superres.so b/lib/linux64/libopencv_superres.so deleted file mode 120000 index dbad36c..0000000 --- a/lib/linux64/libopencv_superres.so +++ /dev/null @@ -1 +0,0 @@ -libopencv_superres.so.2.4 \ No newline at end of file diff --git a/lib/linux64/libopencv_superres.so.2.4 b/lib/linux64/libopencv_superres.so.2.4 deleted file mode 120000 index 42dc315..0000000 --- a/lib/linux64/libopencv_superres.so.2.4 +++ /dev/null @@ -1 +0,0 @@ -libopencv_superres.so.2.4.5 \ No newline at end of file diff --git a/lib/linux64/libopencv_ts.so b/lib/linux64/libopencv_ts.so deleted file mode 120000 index 88f5375..0000000 --- a/lib/linux64/libopencv_ts.so +++ /dev/null @@ -1 +0,0 @@ -libopencv_ts.so.2.4 \ No newline at end of file diff --git a/lib/linux64/libopencv_ts.so.2.4 b/lib/linux64/libopencv_ts.so.2.4 deleted file mode 120000 index 391bebc..0000000 --- a/lib/linux64/libopencv_ts.so.2.4 +++ /dev/null @@ -1 +0,0 @@ -libopencv_ts.so.2.4.5 \ No newline at end of file diff --git a/lib/linux64/libopencv_video.so b/lib/linux64/libopencv_video.so deleted file mode 120000 index d5ddd6c..0000000 --- a/lib/linux64/libopencv_video.so +++ /dev/null @@ -1 +0,0 @@ -libopencv_video.so.2.4 \ No newline at end of file diff --git a/lib/linux64/libopencv_video.so.2.4 b/lib/linux64/libopencv_video.so.2.4 deleted file mode 120000 index 0e319f2..0000000 --- a/lib/linux64/libopencv_video.so.2.4 +++ /dev/null @@ -1 +0,0 @@ -libopencv_video.so.2.4.5 \ No newline at end of file diff --git a/lib/linux64/libopencv_videostab.so b/lib/linux64/libopencv_videostab.so deleted file mode 120000 index faeb668..0000000 --- a/lib/linux64/libopencv_videostab.so +++ /dev/null @@ -1 +0,0 @@ -libopencv_videostab.so.2.4 \ No newline at end of file diff --git a/lib/linux64/libopencv_videostab.so.2.4 b/lib/linux64/libopencv_videostab.so.2.4 deleted file mode 120000 index 85a3c08..0000000 --- a/lib/linux64/libopencv_videostab.so.2.4 +++ /dev/null @@ -1 +0,0 @@ -libopencv_videostab.so.2.4.5 \ No newline at end of file diff --git a/lib/arm7/cv2.so b/library/arm7/cv2.so old mode 100755 new mode 100644 similarity index 100% rename from lib/arm7/cv2.so rename to library/arm7/cv2.so diff --git a/library/arm7/libopencv_calib3d.so b/library/arm7/libopencv_calib3d.so new file mode 100644 index 0000000..37c62ef --- /dev/null +++ b/library/arm7/libopencv_calib3d.so @@ -0,0 +1 @@ +libopencv_calib3d.so.2.4 \ No newline at end of file diff --git a/library/arm7/libopencv_calib3d.so.2.4 b/library/arm7/libopencv_calib3d.so.2.4 new file mode 100644 index 0000000..9819e07 --- /dev/null +++ b/library/arm7/libopencv_calib3d.so.2.4 @@ -0,0 +1 @@ +libopencv_calib3d.so.2.4.5 \ No newline at end of file diff --git a/lib/arm7/libopencv_calib3d.so.2.4.5 b/library/arm7/libopencv_calib3d.so.2.4.5 old mode 100755 new mode 100644 similarity index 100% rename from lib/arm7/libopencv_calib3d.so.2.4.5 rename to library/arm7/libopencv_calib3d.so.2.4.5 diff --git a/lib/arm7/libopencv_calib3d_pch_dephelp.a b/library/arm7/libopencv_calib3d_pch_dephelp.a similarity index 100% rename from lib/arm7/libopencv_calib3d_pch_dephelp.a rename to library/arm7/libopencv_calib3d_pch_dephelp.a diff --git a/library/arm7/libopencv_contrib.so b/library/arm7/libopencv_contrib.so new file mode 100644 index 0000000..d8a80d5 --- /dev/null +++ b/library/arm7/libopencv_contrib.so @@ -0,0 +1 @@ +libopencv_contrib.so.2.4 \ No newline at end of file diff --git a/library/arm7/libopencv_contrib.so.2.4 b/library/arm7/libopencv_contrib.so.2.4 new file mode 100644 index 0000000..3332855 --- /dev/null +++ b/library/arm7/libopencv_contrib.so.2.4 @@ -0,0 +1 @@ +libopencv_contrib.so.2.4.5 \ No newline at end of file diff --git a/lib/arm7/libopencv_contrib.so.2.4.5 b/library/arm7/libopencv_contrib.so.2.4.5 old mode 100755 new mode 100644 similarity index 100% rename from lib/arm7/libopencv_contrib.so.2.4.5 rename to library/arm7/libopencv_contrib.so.2.4.5 diff --git a/lib/arm7/libopencv_contrib_pch_dephelp.a b/library/arm7/libopencv_contrib_pch_dephelp.a similarity index 100% rename from lib/arm7/libopencv_contrib_pch_dephelp.a rename to library/arm7/libopencv_contrib_pch_dephelp.a diff --git a/library/arm7/libopencv_core.so b/library/arm7/libopencv_core.so new file mode 100644 index 0000000..4a68931 --- /dev/null +++ b/library/arm7/libopencv_core.so @@ -0,0 +1 @@ +libopencv_core.so.2.4 \ No newline at end of file diff --git a/library/arm7/libopencv_core.so.2.4 b/library/arm7/libopencv_core.so.2.4 new file mode 100644 index 0000000..ae2ae7b --- /dev/null +++ b/library/arm7/libopencv_core.so.2.4 @@ -0,0 +1 @@ +libopencv_core.so.2.4.5 \ No newline at end of file diff --git a/lib/arm7/libopencv_core.so.2.4.5 b/library/arm7/libopencv_core.so.2.4.5 old mode 100755 new mode 100644 similarity index 100% rename from lib/arm7/libopencv_core.so.2.4.5 rename to library/arm7/libopencv_core.so.2.4.5 diff --git a/lib/arm7/libopencv_core_pch_dephelp.a b/library/arm7/libopencv_core_pch_dephelp.a similarity index 100% rename from lib/arm7/libopencv_core_pch_dephelp.a rename to library/arm7/libopencv_core_pch_dephelp.a diff --git a/library/arm7/libopencv_features2d.so b/library/arm7/libopencv_features2d.so new file mode 100644 index 0000000..171141c --- /dev/null +++ b/library/arm7/libopencv_features2d.so @@ -0,0 +1 @@ +libopencv_features2d.so.2.4 \ No newline at end of file diff --git a/library/arm7/libopencv_features2d.so.2.4 b/library/arm7/libopencv_features2d.so.2.4 new file mode 100644 index 0000000..5cd3acb --- /dev/null +++ b/library/arm7/libopencv_features2d.so.2.4 @@ -0,0 +1 @@ +libopencv_features2d.so.2.4.5 \ No newline at end of file diff --git a/lib/arm7/libopencv_features2d.so.2.4.5 b/library/arm7/libopencv_features2d.so.2.4.5 old mode 100755 new mode 100644 similarity index 100% rename from lib/arm7/libopencv_features2d.so.2.4.5 rename to library/arm7/libopencv_features2d.so.2.4.5 diff --git a/lib/arm7/libopencv_features2d_pch_dephelp.a b/library/arm7/libopencv_features2d_pch_dephelp.a similarity index 100% rename from lib/arm7/libopencv_features2d_pch_dephelp.a rename to library/arm7/libopencv_features2d_pch_dephelp.a diff --git a/library/arm7/libopencv_flann.so b/library/arm7/libopencv_flann.so new file mode 100644 index 0000000..818d581 --- /dev/null +++ b/library/arm7/libopencv_flann.so @@ -0,0 +1 @@ +libopencv_flann.so.2.4 \ No newline at end of file diff --git a/library/arm7/libopencv_flann.so.2.4 b/library/arm7/libopencv_flann.so.2.4 new file mode 100644 index 0000000..fd7593e --- /dev/null +++ b/library/arm7/libopencv_flann.so.2.4 @@ -0,0 +1 @@ +libopencv_flann.so.2.4.5 \ No newline at end of file diff --git a/lib/arm7/libopencv_flann.so.2.4.5 b/library/arm7/libopencv_flann.so.2.4.5 old mode 100755 new mode 100644 similarity index 100% rename from lib/arm7/libopencv_flann.so.2.4.5 rename to library/arm7/libopencv_flann.so.2.4.5 diff --git a/lib/arm7/libopencv_flann_pch_dephelp.a b/library/arm7/libopencv_flann_pch_dephelp.a similarity index 100% rename from lib/arm7/libopencv_flann_pch_dephelp.a rename to library/arm7/libopencv_flann_pch_dephelp.a diff --git a/library/arm7/libopencv_gpu.so b/library/arm7/libopencv_gpu.so new file mode 100644 index 0000000..61edaa4 --- /dev/null +++ b/library/arm7/libopencv_gpu.so @@ -0,0 +1 @@ +libopencv_gpu.so.2.4 \ No newline at end of file diff --git a/library/arm7/libopencv_gpu.so.2.4 b/library/arm7/libopencv_gpu.so.2.4 new file mode 100644 index 0000000..a72f295 --- /dev/null +++ b/library/arm7/libopencv_gpu.so.2.4 @@ -0,0 +1 @@ +libopencv_gpu.so.2.4.5 \ No newline at end of file diff --git a/lib/arm7/libopencv_gpu.so.2.4.5 b/library/arm7/libopencv_gpu.so.2.4.5 old mode 100755 new mode 100644 similarity index 100% rename from lib/arm7/libopencv_gpu.so.2.4.5 rename to library/arm7/libopencv_gpu.so.2.4.5 diff --git a/lib/arm7/libopencv_gpu_pch_dephelp.a b/library/arm7/libopencv_gpu_pch_dephelp.a similarity index 100% rename from lib/arm7/libopencv_gpu_pch_dephelp.a rename to library/arm7/libopencv_gpu_pch_dephelp.a diff --git a/lib/arm7/libopencv_haartraining_engine.a b/library/arm7/libopencv_haartraining_engine.a similarity index 100% rename from lib/arm7/libopencv_haartraining_engine.a rename to library/arm7/libopencv_haartraining_engine.a diff --git a/library/arm7/libopencv_highgui.so b/library/arm7/libopencv_highgui.so new file mode 100644 index 0000000..d95a21f --- /dev/null +++ b/library/arm7/libopencv_highgui.so @@ -0,0 +1 @@ +libopencv_highgui.so.2.4 \ No newline at end of file diff --git a/library/arm7/libopencv_highgui.so.2.4 b/library/arm7/libopencv_highgui.so.2.4 new file mode 100644 index 0000000..773f303 --- /dev/null +++ b/library/arm7/libopencv_highgui.so.2.4 @@ -0,0 +1 @@ +libopencv_highgui.so.2.4.5 \ No newline at end of file diff --git a/lib/arm7/libopencv_highgui.so.2.4.5 b/library/arm7/libopencv_highgui.so.2.4.5 old mode 100755 new mode 100644 similarity index 100% rename from lib/arm7/libopencv_highgui.so.2.4.5 rename to library/arm7/libopencv_highgui.so.2.4.5 diff --git a/lib/arm7/libopencv_highgui_pch_dephelp.a b/library/arm7/libopencv_highgui_pch_dephelp.a similarity index 100% rename from lib/arm7/libopencv_highgui_pch_dephelp.a rename to library/arm7/libopencv_highgui_pch_dephelp.a diff --git a/library/arm7/libopencv_imgproc.so b/library/arm7/libopencv_imgproc.so new file mode 100644 index 0000000..70e4328 --- /dev/null +++ b/library/arm7/libopencv_imgproc.so @@ -0,0 +1 @@ +libopencv_imgproc.so.2.4 \ No newline at end of file diff --git a/library/arm7/libopencv_imgproc.so.2.4 b/library/arm7/libopencv_imgproc.so.2.4 new file mode 100644 index 0000000..e8d4579 --- /dev/null +++ b/library/arm7/libopencv_imgproc.so.2.4 @@ -0,0 +1 @@ +libopencv_imgproc.so.2.4.5 \ No newline at end of file diff --git a/lib/arm7/libopencv_imgproc.so.2.4.5 b/library/arm7/libopencv_imgproc.so.2.4.5 old mode 100755 new mode 100644 similarity index 100% rename from lib/arm7/libopencv_imgproc.so.2.4.5 rename to library/arm7/libopencv_imgproc.so.2.4.5 diff --git a/lib/arm7/libopencv_imgproc_pch_dephelp.a b/library/arm7/libopencv_imgproc_pch_dephelp.a similarity index 100% rename from lib/arm7/libopencv_imgproc_pch_dephelp.a rename to library/arm7/libopencv_imgproc_pch_dephelp.a diff --git a/lib/arm7/libopencv_java245.so b/library/arm7/libopencv_java245.so old mode 100755 new mode 100644 similarity index 100% rename from lib/arm7/libopencv_java245.so rename to library/arm7/libopencv_java245.so diff --git a/library/arm7/libopencv_legacy.so b/library/arm7/libopencv_legacy.so new file mode 100644 index 0000000..1afd5e1 --- /dev/null +++ b/library/arm7/libopencv_legacy.so @@ -0,0 +1 @@ +libopencv_legacy.so.2.4 \ No newline at end of file diff --git a/library/arm7/libopencv_legacy.so.2.4 b/library/arm7/libopencv_legacy.so.2.4 new file mode 100644 index 0000000..0213de4 --- /dev/null +++ b/library/arm7/libopencv_legacy.so.2.4 @@ -0,0 +1 @@ +libopencv_legacy.so.2.4.5 \ No newline at end of file diff --git a/lib/arm7/libopencv_legacy.so.2.4.5 b/library/arm7/libopencv_legacy.so.2.4.5 old mode 100755 new mode 100644 similarity index 100% rename from lib/arm7/libopencv_legacy.so.2.4.5 rename to library/arm7/libopencv_legacy.so.2.4.5 diff --git a/lib/arm7/libopencv_legacy_pch_dephelp.a b/library/arm7/libopencv_legacy_pch_dephelp.a similarity index 100% rename from lib/arm7/libopencv_legacy_pch_dephelp.a rename to library/arm7/libopencv_legacy_pch_dephelp.a diff --git a/library/arm7/libopencv_ml.so b/library/arm7/libopencv_ml.so new file mode 100644 index 0000000..4e71450 --- /dev/null +++ b/library/arm7/libopencv_ml.so @@ -0,0 +1 @@ +libopencv_ml.so.2.4 \ No newline at end of file diff --git a/library/arm7/libopencv_ml.so.2.4 b/library/arm7/libopencv_ml.so.2.4 new file mode 100644 index 0000000..338dffa --- /dev/null +++ b/library/arm7/libopencv_ml.so.2.4 @@ -0,0 +1 @@ +libopencv_ml.so.2.4.5 \ No newline at end of file diff --git a/lib/arm7/libopencv_ml.so.2.4.5 b/library/arm7/libopencv_ml.so.2.4.5 old mode 100755 new mode 100644 similarity index 100% rename from lib/arm7/libopencv_ml.so.2.4.5 rename to library/arm7/libopencv_ml.so.2.4.5 diff --git a/lib/arm7/libopencv_ml_pch_dephelp.a b/library/arm7/libopencv_ml_pch_dephelp.a similarity index 100% rename from lib/arm7/libopencv_ml_pch_dephelp.a rename to library/arm7/libopencv_ml_pch_dephelp.a diff --git a/library/arm7/libopencv_nonfree.so b/library/arm7/libopencv_nonfree.so new file mode 100644 index 0000000..73c1613 --- /dev/null +++ b/library/arm7/libopencv_nonfree.so @@ -0,0 +1 @@ +libopencv_nonfree.so.2.4 \ No newline at end of file diff --git a/library/arm7/libopencv_nonfree.so.2.4 b/library/arm7/libopencv_nonfree.so.2.4 new file mode 100644 index 0000000..2d6c369 --- /dev/null +++ b/library/arm7/libopencv_nonfree.so.2.4 @@ -0,0 +1 @@ +libopencv_nonfree.so.2.4.5 \ No newline at end of file diff --git a/lib/arm7/libopencv_nonfree.so.2.4.5 b/library/arm7/libopencv_nonfree.so.2.4.5 old mode 100755 new mode 100644 similarity index 100% rename from lib/arm7/libopencv_nonfree.so.2.4.5 rename to library/arm7/libopencv_nonfree.so.2.4.5 diff --git a/lib/arm7/libopencv_nonfree_pch_dephelp.a b/library/arm7/libopencv_nonfree_pch_dephelp.a similarity index 100% rename from lib/arm7/libopencv_nonfree_pch_dephelp.a rename to library/arm7/libopencv_nonfree_pch_dephelp.a diff --git a/library/arm7/libopencv_objdetect.so b/library/arm7/libopencv_objdetect.so new file mode 100644 index 0000000..3c4cef9 --- /dev/null +++ b/library/arm7/libopencv_objdetect.so @@ -0,0 +1 @@ +libopencv_objdetect.so.2.4 \ No newline at end of file diff --git a/library/arm7/libopencv_objdetect.so.2.4 b/library/arm7/libopencv_objdetect.so.2.4 new file mode 100644 index 0000000..2be60de --- /dev/null +++ b/library/arm7/libopencv_objdetect.so.2.4 @@ -0,0 +1 @@ +libopencv_objdetect.so.2.4.5 \ No newline at end of file diff --git a/lib/arm7/libopencv_objdetect.so.2.4.5 b/library/arm7/libopencv_objdetect.so.2.4.5 old mode 100755 new mode 100644 similarity index 100% rename from lib/arm7/libopencv_objdetect.so.2.4.5 rename to library/arm7/libopencv_objdetect.so.2.4.5 diff --git a/lib/arm7/libopencv_objdetect_pch_dephelp.a b/library/arm7/libopencv_objdetect_pch_dephelp.a similarity index 100% rename from lib/arm7/libopencv_objdetect_pch_dephelp.a rename to library/arm7/libopencv_objdetect_pch_dephelp.a diff --git a/lib/arm7/libopencv_perf_calib3d_pch_dephelp.a b/library/arm7/libopencv_perf_calib3d_pch_dephelp.a similarity index 100% rename from lib/arm7/libopencv_perf_calib3d_pch_dephelp.a rename to library/arm7/libopencv_perf_calib3d_pch_dephelp.a diff --git a/lib/arm7/libopencv_perf_core_pch_dephelp.a b/library/arm7/libopencv_perf_core_pch_dephelp.a similarity index 100% rename from lib/arm7/libopencv_perf_core_pch_dephelp.a rename to library/arm7/libopencv_perf_core_pch_dephelp.a diff --git a/lib/arm7/libopencv_perf_features2d_pch_dephelp.a b/library/arm7/libopencv_perf_features2d_pch_dephelp.a similarity index 100% rename from lib/arm7/libopencv_perf_features2d_pch_dephelp.a rename to library/arm7/libopencv_perf_features2d_pch_dephelp.a diff --git a/lib/arm7/libopencv_perf_gpu_pch_dephelp.a b/library/arm7/libopencv_perf_gpu_pch_dephelp.a similarity index 100% rename from lib/arm7/libopencv_perf_gpu_pch_dephelp.a rename to library/arm7/libopencv_perf_gpu_pch_dephelp.a diff --git a/lib/arm7/libopencv_perf_highgui_pch_dephelp.a b/library/arm7/libopencv_perf_highgui_pch_dephelp.a similarity index 100% rename from lib/arm7/libopencv_perf_highgui_pch_dephelp.a rename to library/arm7/libopencv_perf_highgui_pch_dephelp.a diff --git a/lib/arm7/libopencv_perf_imgproc_pch_dephelp.a b/library/arm7/libopencv_perf_imgproc_pch_dephelp.a similarity index 100% rename from lib/arm7/libopencv_perf_imgproc_pch_dephelp.a rename to library/arm7/libopencv_perf_imgproc_pch_dephelp.a diff --git a/lib/arm7/libopencv_perf_nonfree_pch_dephelp.a b/library/arm7/libopencv_perf_nonfree_pch_dephelp.a similarity index 100% rename from lib/arm7/libopencv_perf_nonfree_pch_dephelp.a rename to library/arm7/libopencv_perf_nonfree_pch_dephelp.a diff --git a/lib/arm7/libopencv_perf_objdetect_pch_dephelp.a b/library/arm7/libopencv_perf_objdetect_pch_dephelp.a similarity index 100% rename from lib/arm7/libopencv_perf_objdetect_pch_dephelp.a rename to library/arm7/libopencv_perf_objdetect_pch_dephelp.a diff --git a/lib/arm7/libopencv_perf_photo_pch_dephelp.a b/library/arm7/libopencv_perf_photo_pch_dephelp.a similarity index 100% rename from lib/arm7/libopencv_perf_photo_pch_dephelp.a rename to library/arm7/libopencv_perf_photo_pch_dephelp.a diff --git a/lib/arm7/libopencv_perf_stitching_pch_dephelp.a b/library/arm7/libopencv_perf_stitching_pch_dephelp.a similarity index 100% rename from lib/arm7/libopencv_perf_stitching_pch_dephelp.a rename to library/arm7/libopencv_perf_stitching_pch_dephelp.a diff --git a/lib/arm7/libopencv_perf_superres_pch_dephelp.a b/library/arm7/libopencv_perf_superres_pch_dephelp.a similarity index 100% rename from lib/arm7/libopencv_perf_superres_pch_dephelp.a rename to library/arm7/libopencv_perf_superres_pch_dephelp.a diff --git a/lib/arm7/libopencv_perf_video_pch_dephelp.a b/library/arm7/libopencv_perf_video_pch_dephelp.a similarity index 100% rename from lib/arm7/libopencv_perf_video_pch_dephelp.a rename to library/arm7/libopencv_perf_video_pch_dephelp.a diff --git a/library/arm7/libopencv_photo.so b/library/arm7/libopencv_photo.so new file mode 100644 index 0000000..387bc42 --- /dev/null +++ b/library/arm7/libopencv_photo.so @@ -0,0 +1 @@ +libopencv_photo.so.2.4 \ No newline at end of file diff --git a/library/arm7/libopencv_photo.so.2.4 b/library/arm7/libopencv_photo.so.2.4 new file mode 100644 index 0000000..45b8eb2 --- /dev/null +++ b/library/arm7/libopencv_photo.so.2.4 @@ -0,0 +1 @@ +libopencv_photo.so.2.4.5 \ No newline at end of file diff --git a/lib/arm7/libopencv_photo.so.2.4.5 b/library/arm7/libopencv_photo.so.2.4.5 old mode 100755 new mode 100644 similarity index 100% rename from lib/arm7/libopencv_photo.so.2.4.5 rename to library/arm7/libopencv_photo.so.2.4.5 diff --git a/lib/arm7/libopencv_photo_pch_dephelp.a b/library/arm7/libopencv_photo_pch_dephelp.a similarity index 100% rename from lib/arm7/libopencv_photo_pch_dephelp.a rename to library/arm7/libopencv_photo_pch_dephelp.a diff --git a/library/arm7/libopencv_stitching.so b/library/arm7/libopencv_stitching.so new file mode 100644 index 0000000..10b36f8 --- /dev/null +++ b/library/arm7/libopencv_stitching.so @@ -0,0 +1 @@ +libopencv_stitching.so.2.4 \ No newline at end of file diff --git a/library/arm7/libopencv_stitching.so.2.4 b/library/arm7/libopencv_stitching.so.2.4 new file mode 100644 index 0000000..2cf2908 --- /dev/null +++ b/library/arm7/libopencv_stitching.so.2.4 @@ -0,0 +1 @@ +libopencv_stitching.so.2.4.5 \ No newline at end of file diff --git a/lib/arm7/libopencv_stitching.so.2.4.5 b/library/arm7/libopencv_stitching.so.2.4.5 old mode 100755 new mode 100644 similarity index 100% rename from lib/arm7/libopencv_stitching.so.2.4.5 rename to library/arm7/libopencv_stitching.so.2.4.5 diff --git a/lib/arm7/libopencv_stitching_pch_dephelp.a b/library/arm7/libopencv_stitching_pch_dephelp.a similarity index 100% rename from lib/arm7/libopencv_stitching_pch_dephelp.a rename to library/arm7/libopencv_stitching_pch_dephelp.a diff --git a/library/arm7/libopencv_superres.so b/library/arm7/libopencv_superres.so new file mode 100644 index 0000000..dbad36c --- /dev/null +++ b/library/arm7/libopencv_superres.so @@ -0,0 +1 @@ +libopencv_superres.so.2.4 \ No newline at end of file diff --git a/library/arm7/libopencv_superres.so.2.4 b/library/arm7/libopencv_superres.so.2.4 new file mode 100644 index 0000000..42dc315 --- /dev/null +++ b/library/arm7/libopencv_superres.so.2.4 @@ -0,0 +1 @@ +libopencv_superres.so.2.4.5 \ No newline at end of file diff --git a/lib/arm7/libopencv_superres.so.2.4.5 b/library/arm7/libopencv_superres.so.2.4.5 old mode 100755 new mode 100644 similarity index 100% rename from lib/arm7/libopencv_superres.so.2.4.5 rename to library/arm7/libopencv_superres.so.2.4.5 diff --git a/lib/arm7/libopencv_superres_pch_dephelp.a b/library/arm7/libopencv_superres_pch_dephelp.a similarity index 100% rename from lib/arm7/libopencv_superres_pch_dephelp.a rename to library/arm7/libopencv_superres_pch_dephelp.a diff --git a/lib/arm7/libopencv_test_calib3d_pch_dephelp.a b/library/arm7/libopencv_test_calib3d_pch_dephelp.a similarity index 100% rename from lib/arm7/libopencv_test_calib3d_pch_dephelp.a rename to library/arm7/libopencv_test_calib3d_pch_dephelp.a diff --git a/lib/arm7/libopencv_test_contrib_pch_dephelp.a b/library/arm7/libopencv_test_contrib_pch_dephelp.a similarity index 100% rename from lib/arm7/libopencv_test_contrib_pch_dephelp.a rename to library/arm7/libopencv_test_contrib_pch_dephelp.a diff --git a/lib/arm7/libopencv_test_core_pch_dephelp.a b/library/arm7/libopencv_test_core_pch_dephelp.a similarity index 100% rename from lib/arm7/libopencv_test_core_pch_dephelp.a rename to library/arm7/libopencv_test_core_pch_dephelp.a diff --git a/lib/arm7/libopencv_test_features2d_pch_dephelp.a b/library/arm7/libopencv_test_features2d_pch_dephelp.a similarity index 100% rename from lib/arm7/libopencv_test_features2d_pch_dephelp.a rename to library/arm7/libopencv_test_features2d_pch_dephelp.a diff --git a/lib/arm7/libopencv_test_flann_pch_dephelp.a b/library/arm7/libopencv_test_flann_pch_dephelp.a similarity index 100% rename from lib/arm7/libopencv_test_flann_pch_dephelp.a rename to library/arm7/libopencv_test_flann_pch_dephelp.a diff --git a/lib/arm7/libopencv_test_gpu_pch_dephelp.a b/library/arm7/libopencv_test_gpu_pch_dephelp.a similarity index 100% rename from lib/arm7/libopencv_test_gpu_pch_dephelp.a rename to library/arm7/libopencv_test_gpu_pch_dephelp.a diff --git a/lib/arm7/libopencv_test_highgui_pch_dephelp.a b/library/arm7/libopencv_test_highgui_pch_dephelp.a similarity index 100% rename from lib/arm7/libopencv_test_highgui_pch_dephelp.a rename to library/arm7/libopencv_test_highgui_pch_dephelp.a diff --git a/lib/arm7/libopencv_test_imgproc_pch_dephelp.a b/library/arm7/libopencv_test_imgproc_pch_dephelp.a similarity index 100% rename from lib/arm7/libopencv_test_imgproc_pch_dephelp.a rename to library/arm7/libopencv_test_imgproc_pch_dephelp.a diff --git a/lib/arm7/libopencv_test_legacy_pch_dephelp.a b/library/arm7/libopencv_test_legacy_pch_dephelp.a similarity index 100% rename from lib/arm7/libopencv_test_legacy_pch_dephelp.a rename to library/arm7/libopencv_test_legacy_pch_dephelp.a diff --git a/lib/arm7/libopencv_test_ml_pch_dephelp.a b/library/arm7/libopencv_test_ml_pch_dephelp.a similarity index 100% rename from lib/arm7/libopencv_test_ml_pch_dephelp.a rename to library/arm7/libopencv_test_ml_pch_dephelp.a diff --git a/lib/arm7/libopencv_test_nonfree_pch_dephelp.a b/library/arm7/libopencv_test_nonfree_pch_dephelp.a similarity index 100% rename from lib/arm7/libopencv_test_nonfree_pch_dephelp.a rename to library/arm7/libopencv_test_nonfree_pch_dephelp.a diff --git a/lib/arm7/libopencv_test_objdetect_pch_dephelp.a b/library/arm7/libopencv_test_objdetect_pch_dephelp.a similarity index 100% rename from lib/arm7/libopencv_test_objdetect_pch_dephelp.a rename to library/arm7/libopencv_test_objdetect_pch_dephelp.a diff --git a/lib/arm7/libopencv_test_photo_pch_dephelp.a b/library/arm7/libopencv_test_photo_pch_dephelp.a similarity index 100% rename from lib/arm7/libopencv_test_photo_pch_dephelp.a rename to library/arm7/libopencv_test_photo_pch_dephelp.a diff --git a/lib/arm7/libopencv_test_stitching_pch_dephelp.a b/library/arm7/libopencv_test_stitching_pch_dephelp.a similarity index 100% rename from lib/arm7/libopencv_test_stitching_pch_dephelp.a rename to library/arm7/libopencv_test_stitching_pch_dephelp.a diff --git a/lib/arm7/libopencv_test_superres_pch_dephelp.a b/library/arm7/libopencv_test_superres_pch_dephelp.a similarity index 100% rename from lib/arm7/libopencv_test_superres_pch_dephelp.a rename to library/arm7/libopencv_test_superres_pch_dephelp.a diff --git a/lib/arm7/libopencv_test_video_pch_dephelp.a b/library/arm7/libopencv_test_video_pch_dephelp.a similarity index 100% rename from lib/arm7/libopencv_test_video_pch_dephelp.a rename to library/arm7/libopencv_test_video_pch_dephelp.a diff --git a/library/arm7/libopencv_ts.so b/library/arm7/libopencv_ts.so new file mode 100644 index 0000000..88f5375 --- /dev/null +++ b/library/arm7/libopencv_ts.so @@ -0,0 +1 @@ +libopencv_ts.so.2.4 \ No newline at end of file diff --git a/library/arm7/libopencv_ts.so.2.4 b/library/arm7/libopencv_ts.so.2.4 new file mode 100644 index 0000000..391bebc --- /dev/null +++ b/library/arm7/libopencv_ts.so.2.4 @@ -0,0 +1 @@ +libopencv_ts.so.2.4.5 \ No newline at end of file diff --git a/lib/arm7/libopencv_ts.so.2.4.5 b/library/arm7/libopencv_ts.so.2.4.5 old mode 100755 new mode 100644 similarity index 100% rename from lib/arm7/libopencv_ts.so.2.4.5 rename to library/arm7/libopencv_ts.so.2.4.5 diff --git a/lib/arm7/libopencv_ts_pch_dephelp.a b/library/arm7/libopencv_ts_pch_dephelp.a similarity index 100% rename from lib/arm7/libopencv_ts_pch_dephelp.a rename to library/arm7/libopencv_ts_pch_dephelp.a diff --git a/library/arm7/libopencv_video.so b/library/arm7/libopencv_video.so new file mode 100644 index 0000000..d5ddd6c --- /dev/null +++ b/library/arm7/libopencv_video.so @@ -0,0 +1 @@ +libopencv_video.so.2.4 \ No newline at end of file diff --git a/library/arm7/libopencv_video.so.2.4 b/library/arm7/libopencv_video.so.2.4 new file mode 100644 index 0000000..0e319f2 --- /dev/null +++ b/library/arm7/libopencv_video.so.2.4 @@ -0,0 +1 @@ +libopencv_video.so.2.4.5 \ No newline at end of file diff --git a/lib/arm7/libopencv_video.so.2.4.5 b/library/arm7/libopencv_video.so.2.4.5 old mode 100755 new mode 100644 similarity index 100% rename from lib/arm7/libopencv_video.so.2.4.5 rename to library/arm7/libopencv_video.so.2.4.5 diff --git a/lib/arm7/libopencv_video_pch_dephelp.a b/library/arm7/libopencv_video_pch_dephelp.a similarity index 100% rename from lib/arm7/libopencv_video_pch_dephelp.a rename to library/arm7/libopencv_video_pch_dephelp.a diff --git a/library/arm7/libopencv_videostab.so b/library/arm7/libopencv_videostab.so new file mode 100644 index 0000000..faeb668 --- /dev/null +++ b/library/arm7/libopencv_videostab.so @@ -0,0 +1 @@ +libopencv_videostab.so.2.4 \ No newline at end of file diff --git a/library/arm7/libopencv_videostab.so.2.4 b/library/arm7/libopencv_videostab.so.2.4 new file mode 100644 index 0000000..85a3c08 --- /dev/null +++ b/library/arm7/libopencv_videostab.so.2.4 @@ -0,0 +1 @@ +libopencv_videostab.so.2.4.5 \ No newline at end of file diff --git a/lib/arm7/libopencv_videostab.so.2.4.5 b/library/arm7/libopencv_videostab.so.2.4.5 old mode 100755 new mode 100644 similarity index 100% rename from lib/arm7/libopencv_videostab.so.2.4.5 rename to library/arm7/libopencv_videostab.so.2.4.5 diff --git a/lib/arm7/libopencv_videostab_pch_dephelp.a b/library/arm7/libopencv_videostab_pch_dephelp.a similarity index 100% rename from lib/arm7/libopencv_videostab_pch_dephelp.a rename to library/arm7/libopencv_videostab_pch_dephelp.a diff --git a/lib/cascade-files/haarcascade_clock.xml b/library/cascade-files/haarcascade_clock.xml similarity index 97% rename from lib/cascade-files/haarcascade_clock.xml rename to library/cascade-files/haarcascade_clock.xml index 677d9c0..99258c3 100644 --- a/lib/cascade-files/haarcascade_clock.xml +++ b/library/cascade-files/haarcascade_clock.xml @@ -1,3101 +1,3101 @@ - - - - - 30 30 - - <_> - - - <_> - - <_> - - - - <_> - 11 0 10 10 -1. - <_> - 11 5 10 5 2. - 0 - 0.0225503891706467 - -0.7207304835319519 - 0.7884858250617981 - <_> - - <_> - - - - <_> - 12 14 6 3 -1. - <_> - 14 15 2 1 9. - 0 - -0.0103679997846484 - 0.8748232126235962 - -0.5662534236907959 - <_> - - <_> - - - - <_> - 13 11 4 9 -1. - <_> - 13 14 4 3 3. - 0 - -7.6229930855333805e-003 - 0.7921038269996643 - -0.4398050904273987 - <_> - - <_> - - - - <_> - 3 10 6 15 -1. - <_> - 6 10 3 15 2. - 0 - 0.0142955500632524 - -0.4856897890567780 - 0.8144654035568237 - -0.9805700778961182 - -1 - -1 - <_> - - - <_> - - <_> - - - - <_> - 3 20 27 10 -1. - <_> - 3 25 27 5 2. - 0 - -0.0418560616672039 - 0.7715684771537781 - -0.7308530807495117 - <_> - - <_> - - - - <_> - 14 14 3 3 -1. - <_> - 15 15 1 1 9. - 0 - -6.2480890192091465e-003 - 0.7600126862525940 - -0.5264171957969666 - <_> - - <_> - - - - <_> - 12 0 15 10 -1. - <_> - 12 5 15 5 2. - 0 - 0.0479770787060261 - -0.4011876881122589 - 0.7997202277183533 - <_> - - <_> - - - - <_> - 18 3 9 26 -1. - <_> - 18 16 9 13 2. - 0 - 0.0318866707384586 - 0.3455348908901215 - -0.8596624732017517 - <_> - - <_> - - - - <_> - 20 2 10 12 -1. - <_> - 20 2 5 6 2. - <_> - 25 8 5 6 2. - 0 - -0.0194444395601749 - 0.8260732889175415 - -0.4276879131793976 - <_> - - <_> - - - - <_> - 26 9 4 14 -1. - <_> - 28 9 2 14 2. - 0 - -0.0200596991926432 - 0.9874691963195801 - -0.3553096055984497 - <_> - - <_> - - - - <_> - 26 0 2 2 -1. - <_> - 27 0 1 2 2. - 0 - -7.7831762610003352e-004 - -0.8497620224952698 - 0.4054605960845947 - <_> - - <_> - - - - <_> - 9 19 10 8 -1. - <_> - 9 21 10 4 2. - 0 - 0.0116476295515895 - -0.3601523935794830 - 0.8574079871177673 - -0.8330519199371338 - 0 - -1 - <_> - - - <_> - - <_> - - - - <_> - 20 5 10 14 -1. - <_> - 25 5 5 14 2. - 0 - -0.0520163811743259 - 0.8257145285606384 - -0.5637528896331787 - <_> - - <_> - - - - <_> - 14 15 6 2 -1. - <_> - 14 15 3 1 2. - <_> - 17 16 3 1 2. - 0 - -2.3776830639690161e-003 - 0.8298984766006470 - -0.3037792146205902 - <_> - - <_> - - - - <_> - 0 10 18 19 -1. - <_> - 6 10 6 19 3. - 0 - 0.0171877499669790 - -0.5477277040481567 - 0.5136498808860779 - <_> - - <_> - - - - <_> - 28 4 2 14 -1. - <_> - 28 4 1 7 2. - <_> - 29 11 1 7 2. - 0 - -5.2252239547669888e-003 - 0.8670595884323120 - -0.3483909070491791 - <_> - - <_> - - - - <_> - 1 6 28 19 -1. - <_> - 8 6 14 19 2. - 0 - 0.1614976972341538 - -0.2469431012868881 - 0.8995053768157959 - <_> - - <_> - - - - <_> - 5 23 2 2 -1. - <_> - 5 23 1 2 2. - 1 - -8.6788518819957972e-004 - -0.6489925980567932 - 0.4482645988464356 - <_> - - <_> - - - - <_> - 5 0 17 12 -1. - <_> - 5 3 17 6 2. - 0 - 0.0405330397188663 - -0.3314704000949860 - 0.8627082705497742 - -1.8573789596557617 - 1 - -1 - <_> - - - <_> - - <_> - - - - <_> - 14 15 16 1 -1. - <_> - 18 15 8 1 2. - 0 - 4.0193069726228714e-003 - -0.6957365274429321 - 0.6457979083061218 - <_> - - <_> - - - - <_> - 11 15 9 1 -1. - <_> - 14 15 3 1 3. - 0 - -3.7396959960460663e-003 - 0.6279641985893250 - -0.5662031173706055 - <_> - - <_> - - - - <_> - 25 8 3 15 -1. - <_> - 26 8 1 15 3. - 0 - 2.4585970677435398e-003 - -0.4059694111347199 - 0.7348414063453674 - <_> - - <_> - - - - <_> - 0 0 2 4 -1. - <_> - 0 2 2 2 2. - 0 - 1.1789749842137098e-003 - 0.3537071943283081 - -0.9093698859214783 - <_> - - <_> - - - - <_> - 0 9 15 10 -1. - <_> - 5 9 5 10 3. - 0 - 0.0275318492203951 - -0.4571217894554138 - 0.6919301152229309 - <_> - - <_> - - - - <_> - 26 11 4 9 -1. - <_> - 27 11 2 9 2. - 0 - 3.1117910984903574e-003 - -0.4389519989490509 - 0.6670482754707336 - -1.1042749881744385 - 2 - -1 - <_> - - - <_> - - <_> - - - - <_> - 2 8 28 2 -1. - <_> - 9 8 14 2 2. - 0 - 0.0207930002361536 - -0.5435373187065125 - 0.7769594192504883 - <_> - - <_> - - - - <_> - 12 15 6 1 -1. - <_> - 14 15 2 1 3. - 0 - -3.5948599688708782e-003 - 0.7313253283500671 - -0.4182578027248383 - <_> - - <_> - - - - <_> - 1 13 3 6 -1. - <_> - 2 13 1 6 3. - 0 - 4.5345202088356018e-003 - -0.2915262877941132 - 1.0000820159912109 - <_> - - <_> - - - - <_> - 16 0 14 5 -1. - <_> - 16 0 7 5 2. - 1 - 0.0156572908163071 - 0.4315113127231598 - -0.8470829725265503 - <_> - - <_> - - - - <_> - 20 2 10 4 -1. - <_> - 20 2 5 4 2. - 1 - -0.0203227400779724 - -0.8424695730209351 - 0.2959519028663635 - -0.6548693776130676 - 3 - -1 - <_> - - - <_> - - <_> - - - - <_> - 14 14 3 3 -1. - <_> - 15 15 1 1 9. - 0 - -8.3805844187736511e-003 - 0.8370696902275085 - -0.5038247108459473 - <_> - - <_> - - - - <_> - 20 14 10 14 -1. - <_> - 25 14 5 14 2. - 0 - -0.0148145696148276 - 0.5616933107376099 - -0.6403117775917053 - <_> - - <_> - - - - <_> - 17 0 2 16 -1. - <_> - 17 0 2 8 2. - 1 - 0.0163473393768072 - 0.3776484131813049 - -0.9327405095100403 - <_> - - <_> - - - - <_> - 13 0 5 15 -1. - <_> - 13 5 5 5 3. - 0 - 0.0117841102182865 - -0.6357597112655640 - 0.5127261877059937 - <_> - - <_> - - - - <_> - 4 0 10 28 -1. - <_> - 4 14 10 14 2. - 0 - -0.0387781895697117 - -0.7584123015403748 - 0.3491626977920532 - -1.0530849695205688 - 4 - -1 - <_> - - - <_> - - <_> - - - - <_> - 2 12 8 15 -1. - <_> - 6 12 4 15 2. - 0 - 0.0404665991663933 - -0.4351164996623993 - 0.8230059742927551 - <_> - - <_> - - - - <_> - 4 0 14 18 -1. - <_> - 4 6 14 6 3. - 0 - 0.0402202606201172 - -0.5208637118339539 - 0.5568476915359497 - <_> - - <_> - - - - <_> - 15 14 1 3 -1. - <_> - 15 15 1 1 3. - 0 - -3.1198970973491669e-003 - 0.9094204902648926 - -0.2997655868530273 - <_> - - <_> - - - - <_> - 27 13 3 4 -1. - <_> - 28 13 1 4 3. - 0 - 3.9229649119079113e-003 - -0.3093683123588562 - 0.9037017226219177 - <_> - - <_> - - - - <_> - 0 8 28 11 -1. - <_> - 14 8 14 11 2. - 0 - -0.0547299198806286 - -0.9201089739799500 - 0.4091405868530273 - <_> - - <_> - - - - <_> - 25 12 3 7 -1. - <_> - 26 12 1 7 3. - 0 - 4.0078898891806602e-003 - -0.4236168861389160 - 0.8053380846977234 - <_> - - <_> - - - - <_> - 5 5 22 2 -1. - <_> - 16 5 11 2 2. - 0 - 0.0119076501578093 - 0.3813633024692535 - -0.7564094066619873 - -1.1982270479202271 - 5 - -1 - <_> - - - <_> - - <_> - - - - <_> - 6 0 22 14 -1. - <_> - 6 7 22 7 2. - 0 - 0.0797815322875977 - -0.6493945121765137 - 0.5762786865234375 - <_> - - <_> - - - - <_> - 19 12 1 6 -1. - <_> - 17 14 1 2 3. - 1 - -2.7952969539910555e-003 - 0.5456848144531250 - -0.5883920788764954 - <_> - - <_> - - - - <_> - 2 13 14 12 -1. - <_> - 2 13 7 6 2. - <_> - 9 19 7 6 2. - 0 - 8.4679108113050461e-003 - -0.5249853134155273 - 0.4567469060420990 - <_> - - <_> - - - - <_> - 15 4 7 26 -1. - <_> - 15 17 7 13 2. - 0 - 0.0316940285265446 - 0.2529393136501312 - -0.8642746806144714 - <_> - - <_> - - - - <_> - 9 26 13 4 -1. - <_> - 9 28 13 2 2. - 0 - -0.0128996297717094 - 0.7359365224838257 - -0.3432675004005432 - <_> - - <_> - - - - <_> - 10 3 6 8 -1. - <_> - 10 3 3 8 2. - 1 - -0.0229662600904703 - 0.7252805233001709 - -0.4172959923744202 - <_> - - <_> - - - - <_> - 0 10 3 10 -1. - <_> - 1 10 1 10 3. - 0 - -7.0529622025787830e-003 - 0.8382613062858582 - -0.2421897947788239 - <_> - - <_> - - - - <_> - 14 12 3 4 -1. - <_> - 14 13 3 2 2. - 0 - 1.8983749905601144e-003 - -0.3964825868606567 - 0.6354545950889587 - -1.7664920091629028 - 6 - -1 - <_> - - - <_> - - <_> - - - - <_> - 8 2 19 28 -1. - <_> - 8 9 19 14 2. - 0 - 0.2004013061523438 - -0.4439170062541962 - 0.8234676122665405 - <_> - - <_> - - - - <_> - 0 0 30 12 -1. - <_> - 0 6 30 6 2. - 0 - 0.0495737306773663 - -0.6449897289276123 - 0.4417080879211426 - <_> - - <_> - - - - <_> - 14 11 2 9 -1. - <_> - 14 14 2 3 3. - 0 - -6.0293218120932579e-003 - 0.5647888779640198 - -0.4946784079074860 - <_> - - <_> - - - - <_> - 3 0 3 2 -1. - <_> - 3 0 3 1 2. - 1 - 5.9228722238913178e-004 - 0.4513243138790131 - -0.5798317193984985 - <_> - - <_> - - - - <_> - 0 11 6 11 -1. - <_> - 2 11 2 11 3. - 0 - 0.0139415403828025 - -0.3902432918548584 - 0.7450913190841675 - <_> - - <_> - - - - <_> - 17 8 8 20 -1. - <_> - 17 8 4 10 2. - <_> - 21 18 4 10 2. - 0 - -4.4980688835494220e-004 - 0.5301743149757385 - -0.5319514870643616 - <_> - - <_> - - - - <_> - 8 18 4 8 -1. - <_> - 6 20 4 4 2. - 1 - -0.0143874799832702 - 0.8146824240684509 - -0.3091411888599396 - <_> - - <_> - - - - <_> - 24 19 6 4 -1. - <_> - 26 21 2 4 3. - 1 - 0.0157648399472237 - -0.2650843858718872 - 0.8585258126258850 - -1.5048580169677734 - 7 - -1 - <_> - - - <_> - - <_> - - - - <_> - 25 14 2 8 -1. - <_> - 25 14 1 4 2. - <_> - 26 18 1 4 2. - 0 - -1.9776010885834694e-003 - 0.8342393040657044 - -0.3764109015464783 - <_> - - <_> - - - - <_> - 14 15 4 2 -1. - <_> - 14 15 2 1 2. - <_> - 16 16 2 1 2. - 0 - -1.5312379691749811e-003 - 0.7800230979919434 - -0.3976786136627197 - <_> - - <_> - - - - <_> - 16 6 13 24 -1. - <_> - 16 18 13 12 2. - 0 - -5.5937091819941998e-003 - 0.3976748883724213 - -0.8354712128639221 - <_> - - <_> - - - - <_> - 0 14 6 2 -1. - <_> - 3 14 3 2 2. - 0 - 5.9340591542422771e-003 - -0.4098539948463440 - 0.7775127887725830 - <_> - - <_> - - - - <_> - 17 14 5 6 -1. - <_> - 17 14 5 3 2. - 1 - 3.3641920890659094e-003 - 0.4648639857769013 - -0.5968496799468994 - <_> - - <_> - - - - <_> - 6 20 6 3 -1. - <_> - 6 20 3 3 2. - 1 - -6.3608391210436821e-003 - -0.8452699184417725 - 0.3319250047206879 - <_> - - <_> - - - - <_> - 14 15 2 2 -1. - <_> - 14 15 1 1 2. - <_> - 15 16 1 1 2. - 0 - 1.0717130498960614e-003 - -0.3603565990924835 - 0.8019682765007019 - <_> - - <_> - - - - <_> - 28 11 2 16 -1. - <_> - 28 11 1 8 2. - <_> - 29 19 1 8 2. - 0 - 4.5385858975350857e-003 - -0.2635689079761505 - 0.8338183164596558 - -0.7491639256477356 - 8 - -1 - <_> - - - <_> - - <_> - - - - <_> - 1 18 28 8 -1. - <_> - 8 18 14 8 2. - 0 - 0.0700757801532745 - -0.4914397895336151 - 0.6778938174247742 - <_> - - <_> - - - - <_> - 16 0 6 15 -1. - <_> - 18 2 2 15 3. - 1 - 0.0229521002620459 - -0.3336066901683807 - 0.7829133868217468 - <_> - - <_> - - - - <_> - 8 1 2 7 -1. - <_> - 8 1 1 7 2. - 1 - -6.8707908503711224e-003 - 0.9234185218811035 - -0.2476124018430710 - <_> - - <_> - - - - <_> - 14 12 6 4 -1. - <_> - 14 12 3 2 2. - <_> - 17 14 3 2 2. - 0 - 2.0509921014308929e-003 - -0.4796935021877289 - 0.5479726195335388 - <_> - - <_> - - - - <_> - 26 0 2 22 -1. - <_> - 26 0 2 11 2. - 1 - 0.0209642108529806 - 0.3271762132644653 - -0.8076078891754150 - <_> - - <_> - - - - <_> - 28 10 2 2 -1. - <_> - 28 10 1 1 2. - <_> - 29 11 1 1 2. - 0 - -2.8584629762917757e-004 - 0.8164829015731812 - -0.3120633959770203 - <_> - - <_> - - - - <_> - 25 8 3 10 -1. - <_> - 26 8 1 10 3. - 0 - 5.0798300653696060e-003 - -0.2668131887912750 - 0.7880414128303528 - <_> - - <_> - - - - <_> - 14 24 2 6 -1. - <_> - 14 27 2 3 2. - 0 - -1.6909160185605288e-003 - 0.5380467772483826 - -0.4121227860450745 - -1.2660059928894043 - 9 - -1 - <_> - - - <_> - - <_> - - - - <_> - 19 4 6 7 -1. - <_> - 22 4 3 7 2. - 0 - -5.4764188826084137e-003 - 0.6139761805534363 - -0.5204738974571228 - <_> - - <_> - - - - <_> - 13 5 3 18 -1. - <_> - 13 11 3 6 3. - 0 - 4.7526010894216597e-004 - 0.4232788085937500 - -0.6906324028968811 - <_> - - <_> - - - - <_> - 0 2 12 14 -1. - <_> - 6 2 6 14 2. - 0 - 9.8068211227655411e-003 - -0.7110623121261597 - 0.4150972068309784 - <_> - - <_> - - - - <_> - 14 15 3 1 -1. - <_> - 15 15 1 1 3. - 0 - -2.8263509739190340e-003 - 0.8587607145309448 - -0.3086710870265961 - <_> - - <_> - - - - <_> - 24 8 6 15 -1. - <_> - 27 8 3 15 2. - 0 - -0.0207858793437481 - 0.5591353178024292 - -0.5492148995399475 - <_> - - <_> - - - - <_> - 23 0 3 30 -1. - <_> - 23 15 3 15 2. - 0 - 0.0284755192697048 - 0.2707023024559021 - -0.9300810098648071 - <_> - - <_> - - - - <_> - 14 13 4 3 -1. - <_> - 13 14 4 1 3. - 1 - 6.1908899806439877e-003 - -0.2891514003276825 - 0.8885921835899353 - -1.6723439693450928 - 10 - -1 - <_> - - - <_> - - <_> - - - - <_> - 19 7 10 19 -1. - <_> - 24 7 5 19 2. - 0 - -0.0639207363128662 - 0.5404042005538940 - -0.4567835032939911 - <_> - - <_> - - - - <_> - 11 7 10 15 -1. - <_> - 11 12 10 5 3. - 0 - -2.6347399689257145e-003 - 0.4270741045475006 - -0.5876396894454956 - <_> - - <_> - - - - <_> - 2 20 26 8 -1. - <_> - 15 20 13 8 2. - 0 - -0.0461380295455456 - -0.7739400267601013 - 0.3122020959854126 - <_> - - <_> - - - - <_> - 25 8 2 6 -1. - <_> - 25 8 1 3 2. - <_> - 26 11 1 3 2. - 0 - 2.0124330185353756e-003 - -0.3222776949405670 - 0.8423414826393127 - <_> - - <_> - - - - <_> - 27 13 3 5 -1. - <_> - 28 13 1 5 3. - 0 - 6.1421301215887070e-003 - -0.3080565035343170 - 0.8616139888763428 - <_> - - <_> - - - - <_> - 14 14 2 4 -1. - <_> - 14 15 2 2 2. - 0 - -1.6880210023373365e-003 - 0.5805559754371643 - -0.3706024885177612 - <_> - - <_> - - - - <_> - 3 10 8 2 -1. - <_> - 3 10 4 2 2. - 1 - -0.0101441303268075 - 0.5537341833114624 - -0.3941943049430847 - <_> - - <_> - - - - <_> - 24 17 6 7 -1. - <_> - 26 19 2 7 3. - 1 - 0.0335026010870934 - -0.2556783854961395 - 0.8882070183753967 - -1.3683170080184937 - 11 - -1 - <_> - - - <_> - - <_> - - - - <_> - 21 2 2 28 -1. - <_> - 21 9 2 14 2. - 0 - 0.0224439799785614 - -0.5313345193862915 - 0.6142271161079407 - <_> - - <_> - - - - <_> - 2 4 18 26 -1. - <_> - 2 17 18 13 2. - 0 - 0.0533409006893635 - 0.2682515084743500 - -0.9193441271781921 - <_> - - <_> - - - - <_> - 26 14 4 4 -1. - <_> - 28 14 2 4 2. - 0 - -5.0225141458213329e-003 - 0.5458484888076782 - -0.4496412873268127 - <_> - - <_> - - - - <_> - 23 15 6 8 -1. - <_> - 23 15 3 8 2. - 1 - 0.0459533594548702 - -0.3108670115470886 - 0.8686702251434326 - <_> - - <_> - - - - <_> - 0 9 6 11 -1. - <_> - 2 9 2 11 3. - 0 - 9.1376658529043198e-003 - -0.3542624115943909 - 0.6663610935211182 - <_> - - <_> - - - - <_> - 14 14 1 3 -1. - <_> - 14 15 1 1 3. - 0 - -2.2559710778295994e-003 - 0.7523422241210938 - -0.3447830975055695 - <_> - - <_> - - - - <_> - 1 7 28 1 -1. - <_> - 8 7 14 1 2. - 0 - 9.0435370802879333e-003 - -0.3231815099716187 - 0.6448699235916138 - -1.0777670145034790 - 12 - -1 - <_> - - - <_> - - <_> - - - - <_> - 20 13 10 15 -1. - <_> - 25 13 5 15 2. - 0 - -0.0394573509693146 - 0.4782564938068390 - -0.5722619295120239 - <_> - - <_> - - - - <_> - 9 15 8 1 -1. - <_> - 13 15 4 1 2. - 0 - -1.7344199586659670e-003 - 0.3705500066280365 - -0.6115717887878418 - <_> - - <_> - - - - <_> - 19 0 8 1 -1. - <_> - 21 0 4 1 2. - 0 - -7.7608181163668633e-004 - -0.7950387001037598 - 0.2622818052768707 - <_> - - <_> - - - - <_> - 5 20 2 2 -1. - <_> - 5 20 1 1 2. - <_> - 6 21 1 1 2. - 0 - 6.0399679932743311e-004 - -0.2708126008510590 - 0.8733022809028626 - <_> - - <_> - - - - <_> - 22 11 8 10 -1. - <_> - 24 11 4 10 2. - 0 - 0.0211945194751024 - -0.3263381123542786 - 0.7960063815116882 - <_> - - <_> - - - - <_> - 2 18 7 6 -1. - <_> - 2 21 7 3 2. - 0 - -3.3754170872271061e-003 - 0.5355839729309082 - -0.5585852265357971 - <_> - - <_> - - - - <_> - 6 17 8 5 -1. - <_> - 8 19 4 5 2. - 1 - 7.7950168633833528e-004 - -0.6128119230270386 - 0.3950763940811157 - <_> - - <_> - - - - <_> - 10 0 5 12 -1. - <_> - 10 6 5 6 2. - 0 - 6.2134041218087077e-004 - -0.7983394265174866 - 0.2523753941059113 - <_> - - <_> - - - - <_> - 19 8 8 8 -1. - <_> - 19 8 8 4 2. - 1 - 5.1883992273360491e-004 - -0.7581666707992554 - 0.2751871049404144 - -1.1804150342941284 - 13 - -1 - <_> - - - <_> - - <_> - - - - <_> - 8 20 18 8 -1. - <_> - 8 24 18 4 2. - 0 - -0.0746768414974213 - 0.8516380190849304 - -0.3425028026103973 - <_> - - <_> - - - - <_> - 26 15 4 1 -1. - <_> - 27 16 2 1 2. - 1 - 1.5731110470369458e-003 - -0.3031556010246277 - 0.6837754249572754 - <_> - - <_> - - - - <_> - 22 3 8 25 -1. - <_> - 26 3 4 25 2. - 0 - -0.0625705122947693 - 0.5789077877998352 - -0.4484055042266846 - <_> - - <_> - - - - <_> - 12 15 6 1 -1. - <_> - 14 15 2 1 3. - 0 - -2.3908941075205803e-003 - 0.4731529951095581 - -0.5635436773300171 - <_> - - <_> - - - - <_> - 11 24 7 6 -1. - <_> - 11 26 7 2 3. - 0 - 2.1347070578485727e-003 - -0.4449442028999329 - 0.5285379290580750 - <_> - - <_> - - - - <_> - 12 0 16 18 -1. - <_> - 12 6 16 6 3. - 0 - 0.0103090200573206 - -0.7689601182937622 - 0.2924349009990692 - <_> - - <_> - - - - <_> - 7 13 11 6 -1. - <_> - 7 13 11 3 2. - 1 - 0.0620706714689732 - -0.4327771961688995 - 0.8331649899482727 - <_> - - <_> - - - - <_> - 11 13 9 6 -1. - <_> - 14 15 3 2 9. - 0 - -2.5847749784588814e-003 - 0.2928090989589691 - -0.8889489173889160 - -1.1310479640960693 - 14 - -1 - <_> - - - <_> - - <_> - - - - <_> - 4 0 21 26 -1. - <_> - 4 13 21 13 2. - 0 - 0.0588057599961758 - -0.4991154074668884 - 0.6187056899070740 - <_> - - <_> - - - - <_> - 13 0 9 11 -1. - <_> - 16 3 3 11 3. - 1 - 0.0356934182345867 - -0.4802035093307495 - 0.6672577857971191 - <_> - - <_> - - - - <_> - 0 8 6 8 -1. - <_> - 2 8 2 8 3. - 0 - 3.7319110706448555e-003 - -0.5551087856292725 - 0.6358423233032227 - <_> - - <_> - - - - <_> - 6 17 24 12 -1. - <_> - 18 17 12 12 2. - 0 - -3.4886640496551991e-003 - 0.3998816013336182 - -0.8779597282409668 - <_> - - <_> - - - - <_> - 14 11 3 9 -1. - <_> - 14 14 3 3 3. - 0 - -3.5188349429517984e-003 - 0.2896867990493774 - -0.8983190059661865 - <_> - - <_> - - - - <_> - 25 12 3 6 -1. - <_> - 26 14 1 2 9. - 0 - 2.6123720454052091e-004 - -0.7029349207878113 - 0.5493162274360657 - <_> - - <_> - - - - <_> - 15 0 15 21 -1. - <_> - 15 7 15 7 3. - 0 - 6.4898689743131399e-005 - -0.9439805150032044 - 0.2820520997047424 - <_> - - <_> - - - - <_> - 14 15 10 2 -1. - <_> - 14 15 5 1 2. - <_> - 19 16 5 1 2. - 0 - -1.3600759848486632e-004 - 0.4432930052280426 - -0.7824695110321045 - <_> - - <_> - - - - <_> - 0 20 2 6 -1. - <_> - 1 20 1 6 2. - 0 - -9.0165252913720906e-005 - 0.6236552000045776 - -0.6018273830413818 - <_> - - <_> - - - - <_> - 0 5 10 25 -1. - <_> - 5 5 5 25 2. - 0 - 9.2873163521289825e-003 - -0.7339897751808167 - 0.4519324898719788 - <_> - - <_> - - - - <_> - 2 5 2 10 -1. - <_> - 2 5 1 5 2. - <_> - 3 10 1 5 2. - 0 - -3.7979730404913425e-004 - 0.6942034959793091 - -0.5841832756996155 - <_> - - <_> - - - - <_> - 4 7 6 6 -1. - <_> - 4 10 6 3 2. - 0 - 2.1245880052447319e-003 - -0.4886597096920013 - 0.6439890265464783 - -2.0186989307403564 - 15 - -1 - <_> - - - <_> - - <_> - - - - <_> - 18 2 12 12 -1. - <_> - 24 2 6 12 2. - 0 - -0.0131159899756312 - 0.4102598130702972 - -0.7836742997169495 - <_> - - <_> - - - - <_> - 12 15 4 1 -1. - <_> - 14 15 2 1 2. - 0 - -1.9107479602098465e-003 - 0.4352349936962128 - -0.5894374251365662 - <_> - - <_> - - - - <_> - 0 14 15 15 -1. - <_> - 0 19 15 5 3. - 0 - 2.8926940285600722e-004 - -0.7347720861434937 - 0.3091743886470795 - <_> - - <_> - - - - <_> - 5 23 15 6 -1. - <_> - 5 25 15 2 3. - 0 - 0.0108875995501876 - -0.4113591015338898 - 0.7446773052215576 - <_> - - <_> - - - - <_> - 6 23 20 1 -1. - <_> - 11 23 10 1 2. - 0 - 8.4888264536857605e-003 - -0.4784755110740662 - 0.7682887911796570 - <_> - - <_> - - - - <_> - 7 13 15 6 -1. - <_> - 12 15 5 2 9. - 0 - -7.8473137691617012e-003 - 0.3737767040729523 - -0.8986917138099670 - <_> - - <_> - - - - <_> - 26 11 4 9 -1. - <_> - 28 11 2 9 2. - 0 - -8.6469706147909164e-003 - 0.5991634726524353 - -0.6494582891464233 - <_> - - <_> - - - - <_> - 0 2 24 18 -1. - <_> - 6 2 12 18 2. - 0 - 1.2370230397209525e-003 - -0.8902391195297241 - 0.3708789944648743 - <_> - - <_> - - - - <_> - 14 12 3 3 -1. - <_> - 15 13 1 1 9. - 0 - 2.5298888795077801e-004 - -0.8120170235633850 - 0.4935150146484375 - <_> - - <_> - - - - <_> - 13 5 15 10 -1. - <_> - 18 5 5 10 3. - 0 - 5.1526250317692757e-003 - -0.9192841053009033 - 0.4452421963214874 - -1.3165810108184814 - 16 - -1 - <_> - - - <_> - - <_> - - - - <_> - 15 12 12 18 -1. - <_> - 15 18 12 6 3. - 0 - 0.0295117292553186 - -0.5727707743644714 - 0.4841710031032562 - <_> - - <_> - - - - <_> - 20 0 9 16 -1. - <_> - 20 8 9 8 2. - 0 - 0.0143716000020504 - -0.7799515128135681 - 0.3565404117107391 - <_> - - <_> - - - - <_> - 2 0 6 3 -1. - <_> - 4 1 2 1 9. - 0 - 1.0884779840125702e-005 - -0.6723812222480774 - 0.4785112142562866 - <_> - - <_> - - - - <_> - 1 0 29 24 -1. - <_> - 1 6 29 12 2. - 0 - 4.3139769695699215e-003 - -0.8979778885841370 - 0.2318837046623230 - <_> - - <_> - - - - <_> - 0 29 10 1 -1. - <_> - 5 29 5 1 2. - 0 - -7.7994642197154462e-005 - 0.3738532960414887 - -0.7187057137489319 - <_> - - <_> - - - - <_> - 11 12 6 4 -1. - <_> - 11 12 3 2 2. - <_> - 14 14 3 2 2. - 0 - -3.0512181110680103e-003 - 0.5300139784812927 - -0.5830147266387940 - <_> - - <_> - - - - <_> - 5 14 6 5 -1. - <_> - 5 14 3 5 2. - 1 - -4.3142150389030576e-004 - 0.4394023120403290 - -0.6322596073150635 - <_> - - <_> - - - - <_> - 1 11 4 7 -1. - <_> - 2 11 2 7 2. - 0 - 7.0738680660724640e-003 - -0.3457511961460114 - 0.9177647233009338 - <_> - - <_> - - - - <_> - 28 27 2 3 -1. - <_> - 29 27 1 3 2. - 0 - 7.7551658250740729e-006 - -0.6041529774665833 - 0.4977194964885712 - <_> - - <_> - - - - <_> - 12 15 8 6 -1. - <_> - 10 17 8 2 3. - 1 - 0.0139424195513129 - -0.4868971109390259 - 0.7841137051582336 - -1.8075209856033325 - 17 - -1 - <_> - - - <_> - - <_> - - - - <_> - 15 15 2 2 -1. - <_> - 15 15 1 1 2. - <_> - 16 16 1 1 2. - 0 - -1.8129580421373248e-003 - 0.9541606903076172 - -0.2965695858001709 - <_> - - <_> - - - - <_> - 2 12 16 12 -1. - <_> - 2 12 8 6 2. - <_> - 10 18 8 6 2. - 0 - 4.7363578341901302e-003 - -0.6298993825912476 - 0.4632642865180969 - <_> - - <_> - - - - <_> - 25 14 2 10 -1. - <_> - 25 14 1 5 2. - <_> - 26 19 1 5 2. - 0 - -3.3910579513758421e-003 - 0.8871492147445679 - -0.4089617133140564 - <_> - - <_> - - - - <_> - 24 10 6 16 -1. - <_> - 27 10 3 16 2. - 0 - -0.0291099399328232 - 0.5941541790962219 - -0.4963997900485992 - <_> - - <_> - - - - <_> - 0 3 24 10 -1. - <_> - 12 3 12 10 2. - 0 - 1.8045129763777368e-005 - -0.8051089048385620 - 0.2899082005023956 - <_> - - <_> - - - - <_> - 5 18 8 4 -1. - <_> - 4 19 8 2 2. - 1 - 7.3270881548523903e-003 - -0.5242174267768860 - 0.8847057819366455 - <_> - - <_> - - - - <_> - 16 6 9 6 -1. - <_> - 19 9 3 6 3. - 1 - -1.9007539958693087e-004 - 0.5221701860427856 - -0.8480635881423950 - <_> - - <_> - - - - <_> - 16 5 1 16 -1. - <_> - 16 5 1 8 2. - 1 - 4.3798499973490834e-005 - -0.7982841730117798 - 0.4523805975914002 - <_> - - <_> - - - - <_> - 0 0 1 30 -1. - <_> - 0 10 1 10 3. - 0 - -2.6992160201189108e-005 - 0.5484765172004700 - -0.7896834015846252 - <_> - - <_> - - - - <_> - 18 1 2 8 -1. - <_> - 18 5 2 4 2. - 0 - 2.6483249384909868e-003 - -0.6831504702568054 - 0.5447096824645996 - <_> - - <_> - - - - <_> - 2 1 2 18 -1. - <_> - 2 1 1 9 2. - <_> - 3 10 1 9 2. - 0 - -1.2241229705978185e-004 - 0.6463962197303772 - -0.7322003245353699 - -1.2552789449691772 - 18 - -1 - <_> - - - <_> - - <_> - - - - <_> - 14 14 3 3 -1. - <_> - 15 15 1 1 9. - 0 - -0.0118554998189211 - 0.7671378254890442 - -0.3722873032093048 - <_> - - <_> - - - - <_> - 15 12 15 18 -1. - <_> - 15 18 15 6 3. - 0 - 0.0129950996488333 - -0.5752075910568237 - 0.4080007970333099 - <_> - - <_> - - - - <_> - 0 15 15 5 -1. - <_> - 5 15 5 5 3. - 0 - 0.0123613402247429 - -0.5057299137115479 - 0.4008283019065857 - <_> - - <_> - - - - <_> - 24 11 6 10 -1. - <_> - 26 11 2 10 3. - 0 - 0.0217736903578043 - -0.3811939060688019 - 0.7375351190567017 - <_> - - <_> - - - - <_> - 20 4 9 18 -1. - <_> - 20 10 9 6 3. - 0 - 8.2471058703958988e-004 - -0.7928907275199890 - 0.3049820065498352 - <_> - - <_> - - - - <_> - 15 12 4 4 -1. - <_> - 15 12 4 2 2. - 1 - -8.0548477126285434e-004 - 0.3002581894397736 - -0.8209298849105835 - <_> - - <_> - - - - <_> - 27 26 3 4 -1. - <_> - 27 28 3 2 2. - 0 - 4.6441138692898676e-005 - -0.5512930154800415 - 0.4281317889690399 - <_> - - <_> - - - - <_> - 0 6 28 4 -1. - <_> - 14 6 14 4 2. - 0 - 1.3028540706727654e-004 - -0.7330580949783325 - 0.3167754113674164 - <_> - - <_> - - - - <_> - 12 5 6 18 -1. - <_> - 14 11 2 6 9. - 0 - -3.0463289003819227e-003 - 0.3088589906692505 - -0.7799909114837647 - <_> - - <_> - - - - <_> - 13 11 3 6 -1. - <_> - 14 13 1 2 9. - 0 - 5.1273731514811516e-003 - -0.4092488884925842 - 0.7003359198570252 - <_> - - <_> - - - - <_> - 14 13 4 3 -1. - <_> - 13 14 4 1 3. - 1 - 6.9407821865752339e-004 - -0.6798236966133118 - 0.5320472121238709 - <_> - - <_> - - - - <_> - 3 0 17 21 -1. - <_> - 3 7 17 7 3. - 0 - 4.9300299724563956e-004 - -0.9494745135307312 - 0.2453067004680634 - -1.2338080406188965 - 19 - -1 - <_> - - - <_> - - <_> - - - - <_> - 6 5 22 24 -1. - <_> - 6 11 22 12 2. - 0 - 0.1109884008765221 - -0.4732984006404877 - 0.6041498184204102 - <_> - - <_> - - - - <_> - 15 3 15 13 -1. - <_> - 20 3 5 13 3. - 0 - 8.8524278253316879e-003 - -0.7683498263359070 - 0.2962965071201325 - <_> - - <_> - - - - <_> - 0 2 27 8 -1. - <_> - 9 2 9 8 3. - 0 - 2.9951189644634724e-003 - -0.7832775712013245 - 0.2668977975845337 - <_> - - <_> - - - - <_> - 24 18 3 3 -1. - <_> - 25 19 1 1 9. - 0 - 7.0615397999063134e-004 - -0.5898581147193909 - 0.4767473042011261 - <_> - - <_> - - - - <_> - 2 10 14 20 -1. - <_> - 2 15 14 10 2. - 0 - 8.2862451672554016e-003 - -0.8107367157936096 - 0.2578361928462982 - <_> - - <_> - - - - <_> - 17 14 3 2 -1. - <_> - 18 15 1 2 3. - 1 - 8.9599809143692255e-004 - -0.7087581753730774 - 0.5773987770080566 - <_> - - <_> - - - - <_> - 19 6 4 10 -1. - <_> - 19 6 4 5 2. - 1 - 2.2114950115792453e-004 - -0.9175388813018799 - 0.3938092887401581 - <_> - - <_> - - - - <_> - 19 9 10 20 -1. - <_> - 19 19 10 10 2. - 0 - -2.6218120474368334e-003 - 0.2289039939641953 - -0.9661834239959717 - <_> - - <_> - - - - <_> - 13 24 9 6 -1. - <_> - 13 26 9 2 3. - 0 - 8.1112459301948547e-003 - -0.6872652173042297 - 0.7748587727546692 - <_> - - <_> - - - - <_> - 10 14 12 3 -1. - <_> - 14 15 4 1 9. - 0 - -6.6581218561623245e-005 - 0.4069637060165405 - -0.9821419119834900 - <_> - - <_> - - - - <_> - 20 4 10 11 -1. - <_> - 25 4 5 11 2. - 0 - -4.3942942284047604e-003 - 0.3025366067886353 - -0.9085376858711243 - <_> - - <_> - - - - <_> - 14 14 4 4 -1. - <_> - 14 14 2 2 2. - <_> - 16 16 2 2 2. - 0 - -8.1969819802907296e-006 - 0.3476018905639648 - -0.8090888857841492 - <_> - - <_> - - - - <_> - 2 13 3 5 -1. - <_> - 3 13 1 5 3. - 0 - -1.7878259532153606e-003 - 0.7400094270706177 - -0.4940893948078156 - -1.6455509662628174 - 20 - -1 - <_> - - - <_> - - <_> - - - - <_> - 6 1 3 18 -1. - <_> - 6 10 3 9 2. - 0 - 0.0170281101018190 - -0.6075357794761658 - 0.6752921938896179 - <_> - - <_> - - - - <_> - 19 0 10 18 -1. - <_> - 19 9 10 9 2. - 0 - 4.9286349676549435e-003 - -0.9023544788360596 - 0.2912414968013763 - <_> - - <_> - - - - <_> - 15 11 1 6 -1. - <_> - 15 13 1 2 3. - 0 - 4.5951400534249842e-004 - -0.6812731027603149 - 0.5965930819511414 - <_> - - <_> - - - - <_> - 13 4 6 18 -1. - <_> - 15 10 2 6 9. - 0 - -4.2040079279104248e-005 - 0.4269096851348877 - -0.8979231715202332 - <_> - - <_> - - - - <_> - 18 4 11 26 -1. - <_> - 18 17 11 13 2. - 0 - -1.1093009961768985e-003 - 0.3619905114173889 - -0.9165890216827393 - <_> - - <_> - - - - <_> - 10 3 6 27 -1. - <_> - 12 12 2 9 9. - 0 - 7.1232998743653297e-003 - -0.5551241040229797 - 0.5782169103622437 - <_> - - <_> - - - - <_> - 8 1 4 8 -1. - <_> - 9 2 2 8 2. - 1 - 3.6009349860250950e-003 - -0.6728715896606445 - 0.7024763822555542 - <_> - - <_> - - - - <_> - 8 0 8 2 -1. - <_> - 12 0 4 2 2. - 0 - -9.7479542091605254e-006 - 0.5475305914878845 - -0.8654465079307556 - <_> - - <_> - - - - <_> - 0 20 18 10 -1. - <_> - 0 25 18 5 2. - 0 - -2.2331129293888807e-003 - 0.3924748003482819 - -0.8936464190483093 - <_> - - <_> - - - - <_> - 20 1 7 3 -1. - <_> - 19 2 7 1 3. - 1 - 4.9669588916003704e-003 - -0.4396930932998657 - 0.8690040111541748 - <_> - - <_> - - - - <_> - 5 15 1 4 -1. - <_> - 4 16 1 2 2. - 1 - 4.6299301175167784e-005 - -0.6883816123008728 - 0.6461343765258789 - <_> - - <_> - - - - <_> - 5 1 20 8 -1. - <_> - 10 1 10 8 2. - 0 - 2.0605750614777207e-004 - -0.9089515805244446 - 0.5352932214736939 - -1.3342789411544800 - 21 - -1 - <_> - - - <_> - - <_> - - - - <_> - 19 0 2 14 -1. - <_> - 19 0 2 7 2. - 1 - -9.7629595547914505e-003 - 0.6056637167930603 - -0.6816167235374451 - <_> - - <_> - - - - <_> - 0 2 26 10 -1. - <_> - 13 2 13 10 2. - 0 - 1.1452470207586884e-003 - -0.7568649053573608 - 0.4382646977901459 - <_> - - <_> - - - - <_> - 14 14 3 3 -1. - <_> - 15 15 1 1 9. - 0 - -8.1162629649043083e-003 - 0.6009442806243897 - -0.5972846150398254 - <_> - - <_> - - - - <_> - 10 0 4 20 -1. - <_> - 10 5 4 10 2. - 0 - 9.4810684458934702e-006 - -0.7322263121604919 - 0.4546971917152405 - <_> - - <_> - - - - <_> - 22 21 6 6 -1. - <_> - 22 21 3 3 2. - <_> - 25 24 3 3 2. - 0 - -8.2458636825322174e-006 - 0.6875557899475098 - -0.5961893796920776 - <_> - - <_> - - - - <_> - 23 1 6 3 -1. - <_> - 23 2 6 1 3. - 0 - 8.2058722910005599e-006 - -0.5708162784576416 - 0.6666625738143921 - <_> - - <_> - - - - <_> - 12 24 4 2 -1. - <_> - 12 24 2 2 2. - 1 - 2.3102159611880779e-003 - -0.6336330771446228 - 0.7040169239044190 - <_> - - <_> - - - - <_> - 21 4 4 26 -1. - <_> - 21 17 4 13 2. - 0 - -3.1256309739546850e-005 - 0.3984279930591583 - -0.9526088833808899 - <_> - - <_> - - - - <_> - 2 3 6 8 -1. - <_> - 2 3 3 4 2. - <_> - 5 7 3 4 2. - 0 - -5.6237089447677135e-003 - 0.8949983119964600 - -0.6286303997039795 - -1.1762020587921143 - 22 - -1 - <_> - - - <_> - - <_> - - - - <_> - 0 0 4 1 -1. - <_> - 2 0 2 1 2. - 0 - 6.9548498140648007e-004 - 0.2593482136726379 - -0.9198864102363586 - <_> - - <_> - - - - <_> - 24 13 2 10 -1. - <_> - 25 13 1 10 2. - 0 - -4.9878000281751156e-003 - 0.8619614839553833 - -0.3343923985958099 - <_> - - <_> - - - - <_> - 2 4 28 15 -1. - <_> - 16 4 14 15 2. - 0 - 0.0454013496637344 - 0.3592154085636139 - -0.7624815106391907 - <_> - - <_> - - - - <_> - 0 13 30 10 -1. - <_> - 15 13 15 10 2. - 0 - -0.0440022610127926 - -0.7319048047065735 - 0.3194361031055450 - <_> - - <_> - - - - <_> - 25 0 4 3 -1. - <_> - 26 0 2 3 2. - 0 - 9.2827458865940571e-004 - 0.3184696137905121 - -0.7557029128074646 - -1.2034360170364380 - 23 - -1 - + + + + + 30 30 + + <_> + + + <_> + + <_> + + + + <_> + 11 0 10 10 -1. + <_> + 11 5 10 5 2. + 0 + 0.0225503891706467 + -0.7207304835319519 + 0.7884858250617981 + <_> + + <_> + + + + <_> + 12 14 6 3 -1. + <_> + 14 15 2 1 9. + 0 + -0.0103679997846484 + 0.8748232126235962 + -0.5662534236907959 + <_> + + <_> + + + + <_> + 13 11 4 9 -1. + <_> + 13 14 4 3 3. + 0 + -7.6229930855333805e-003 + 0.7921038269996643 + -0.4398050904273987 + <_> + + <_> + + + + <_> + 3 10 6 15 -1. + <_> + 6 10 3 15 2. + 0 + 0.0142955500632524 + -0.4856897890567780 + 0.8144654035568237 + -0.9805700778961182 + -1 + -1 + <_> + + + <_> + + <_> + + + + <_> + 3 20 27 10 -1. + <_> + 3 25 27 5 2. + 0 + -0.0418560616672039 + 0.7715684771537781 + -0.7308530807495117 + <_> + + <_> + + + + <_> + 14 14 3 3 -1. + <_> + 15 15 1 1 9. + 0 + -6.2480890192091465e-003 + 0.7600126862525940 + -0.5264171957969666 + <_> + + <_> + + + + <_> + 12 0 15 10 -1. + <_> + 12 5 15 5 2. + 0 + 0.0479770787060261 + -0.4011876881122589 + 0.7997202277183533 + <_> + + <_> + + + + <_> + 18 3 9 26 -1. + <_> + 18 16 9 13 2. + 0 + 0.0318866707384586 + 0.3455348908901215 + -0.8596624732017517 + <_> + + <_> + + + + <_> + 20 2 10 12 -1. + <_> + 20 2 5 6 2. + <_> + 25 8 5 6 2. + 0 + -0.0194444395601749 + 0.8260732889175415 + -0.4276879131793976 + <_> + + <_> + + + + <_> + 26 9 4 14 -1. + <_> + 28 9 2 14 2. + 0 + -0.0200596991926432 + 0.9874691963195801 + -0.3553096055984497 + <_> + + <_> + + + + <_> + 26 0 2 2 -1. + <_> + 27 0 1 2 2. + 0 + -7.7831762610003352e-004 + -0.8497620224952698 + 0.4054605960845947 + <_> + + <_> + + + + <_> + 9 19 10 8 -1. + <_> + 9 21 10 4 2. + 0 + 0.0116476295515895 + -0.3601523935794830 + 0.8574079871177673 + -0.8330519199371338 + 0 + -1 + <_> + + + <_> + + <_> + + + + <_> + 20 5 10 14 -1. + <_> + 25 5 5 14 2. + 0 + -0.0520163811743259 + 0.8257145285606384 + -0.5637528896331787 + <_> + + <_> + + + + <_> + 14 15 6 2 -1. + <_> + 14 15 3 1 2. + <_> + 17 16 3 1 2. + 0 + -2.3776830639690161e-003 + 0.8298984766006470 + -0.3037792146205902 + <_> + + <_> + + + + <_> + 0 10 18 19 -1. + <_> + 6 10 6 19 3. + 0 + 0.0171877499669790 + -0.5477277040481567 + 0.5136498808860779 + <_> + + <_> + + + + <_> + 28 4 2 14 -1. + <_> + 28 4 1 7 2. + <_> + 29 11 1 7 2. + 0 + -5.2252239547669888e-003 + 0.8670595884323120 + -0.3483909070491791 + <_> + + <_> + + + + <_> + 1 6 28 19 -1. + <_> + 8 6 14 19 2. + 0 + 0.1614976972341538 + -0.2469431012868881 + 0.8995053768157959 + <_> + + <_> + + + + <_> + 5 23 2 2 -1. + <_> + 5 23 1 2 2. + 1 + -8.6788518819957972e-004 + -0.6489925980567932 + 0.4482645988464356 + <_> + + <_> + + + + <_> + 5 0 17 12 -1. + <_> + 5 3 17 6 2. + 0 + 0.0405330397188663 + -0.3314704000949860 + 0.8627082705497742 + -1.8573789596557617 + 1 + -1 + <_> + + + <_> + + <_> + + + + <_> + 14 15 16 1 -1. + <_> + 18 15 8 1 2. + 0 + 4.0193069726228714e-003 + -0.6957365274429321 + 0.6457979083061218 + <_> + + <_> + + + + <_> + 11 15 9 1 -1. + <_> + 14 15 3 1 3. + 0 + -3.7396959960460663e-003 + 0.6279641985893250 + -0.5662031173706055 + <_> + + <_> + + + + <_> + 25 8 3 15 -1. + <_> + 26 8 1 15 3. + 0 + 2.4585970677435398e-003 + -0.4059694111347199 + 0.7348414063453674 + <_> + + <_> + + + + <_> + 0 0 2 4 -1. + <_> + 0 2 2 2 2. + 0 + 1.1789749842137098e-003 + 0.3537071943283081 + -0.9093698859214783 + <_> + + <_> + + + + <_> + 0 9 15 10 -1. + <_> + 5 9 5 10 3. + 0 + 0.0275318492203951 + -0.4571217894554138 + 0.6919301152229309 + <_> + + <_> + + + + <_> + 26 11 4 9 -1. + <_> + 27 11 2 9 2. + 0 + 3.1117910984903574e-003 + -0.4389519989490509 + 0.6670482754707336 + -1.1042749881744385 + 2 + -1 + <_> + + + <_> + + <_> + + + + <_> + 2 8 28 2 -1. + <_> + 9 8 14 2 2. + 0 + 0.0207930002361536 + -0.5435373187065125 + 0.7769594192504883 + <_> + + <_> + + + + <_> + 12 15 6 1 -1. + <_> + 14 15 2 1 3. + 0 + -3.5948599688708782e-003 + 0.7313253283500671 + -0.4182578027248383 + <_> + + <_> + + + + <_> + 1 13 3 6 -1. + <_> + 2 13 1 6 3. + 0 + 4.5345202088356018e-003 + -0.2915262877941132 + 1.0000820159912109 + <_> + + <_> + + + + <_> + 16 0 14 5 -1. + <_> + 16 0 7 5 2. + 1 + 0.0156572908163071 + 0.4315113127231598 + -0.8470829725265503 + <_> + + <_> + + + + <_> + 20 2 10 4 -1. + <_> + 20 2 5 4 2. + 1 + -0.0203227400779724 + -0.8424695730209351 + 0.2959519028663635 + -0.6548693776130676 + 3 + -1 + <_> + + + <_> + + <_> + + + + <_> + 14 14 3 3 -1. + <_> + 15 15 1 1 9. + 0 + -8.3805844187736511e-003 + 0.8370696902275085 + -0.5038247108459473 + <_> + + <_> + + + + <_> + 20 14 10 14 -1. + <_> + 25 14 5 14 2. + 0 + -0.0148145696148276 + 0.5616933107376099 + -0.6403117775917053 + <_> + + <_> + + + + <_> + 17 0 2 16 -1. + <_> + 17 0 2 8 2. + 1 + 0.0163473393768072 + 0.3776484131813049 + -0.9327405095100403 + <_> + + <_> + + + + <_> + 13 0 5 15 -1. + <_> + 13 5 5 5 3. + 0 + 0.0117841102182865 + -0.6357597112655640 + 0.5127261877059937 + <_> + + <_> + + + + <_> + 4 0 10 28 -1. + <_> + 4 14 10 14 2. + 0 + -0.0387781895697117 + -0.7584123015403748 + 0.3491626977920532 + -1.0530849695205688 + 4 + -1 + <_> + + + <_> + + <_> + + + + <_> + 2 12 8 15 -1. + <_> + 6 12 4 15 2. + 0 + 0.0404665991663933 + -0.4351164996623993 + 0.8230059742927551 + <_> + + <_> + + + + <_> + 4 0 14 18 -1. + <_> + 4 6 14 6 3. + 0 + 0.0402202606201172 + -0.5208637118339539 + 0.5568476915359497 + <_> + + <_> + + + + <_> + 15 14 1 3 -1. + <_> + 15 15 1 1 3. + 0 + -3.1198970973491669e-003 + 0.9094204902648926 + -0.2997655868530273 + <_> + + <_> + + + + <_> + 27 13 3 4 -1. + <_> + 28 13 1 4 3. + 0 + 3.9229649119079113e-003 + -0.3093683123588562 + 0.9037017226219177 + <_> + + <_> + + + + <_> + 0 8 28 11 -1. + <_> + 14 8 14 11 2. + 0 + -0.0547299198806286 + -0.9201089739799500 + 0.4091405868530273 + <_> + + <_> + + + + <_> + 25 12 3 7 -1. + <_> + 26 12 1 7 3. + 0 + 4.0078898891806602e-003 + -0.4236168861389160 + 0.8053380846977234 + <_> + + <_> + + + + <_> + 5 5 22 2 -1. + <_> + 16 5 11 2 2. + 0 + 0.0119076501578093 + 0.3813633024692535 + -0.7564094066619873 + -1.1982270479202271 + 5 + -1 + <_> + + + <_> + + <_> + + + + <_> + 6 0 22 14 -1. + <_> + 6 7 22 7 2. + 0 + 0.0797815322875977 + -0.6493945121765137 + 0.5762786865234375 + <_> + + <_> + + + + <_> + 19 12 1 6 -1. + <_> + 17 14 1 2 3. + 1 + -2.7952969539910555e-003 + 0.5456848144531250 + -0.5883920788764954 + <_> + + <_> + + + + <_> + 2 13 14 12 -1. + <_> + 2 13 7 6 2. + <_> + 9 19 7 6 2. + 0 + 8.4679108113050461e-003 + -0.5249853134155273 + 0.4567469060420990 + <_> + + <_> + + + + <_> + 15 4 7 26 -1. + <_> + 15 17 7 13 2. + 0 + 0.0316940285265446 + 0.2529393136501312 + -0.8642746806144714 + <_> + + <_> + + + + <_> + 9 26 13 4 -1. + <_> + 9 28 13 2 2. + 0 + -0.0128996297717094 + 0.7359365224838257 + -0.3432675004005432 + <_> + + <_> + + + + <_> + 10 3 6 8 -1. + <_> + 10 3 3 8 2. + 1 + -0.0229662600904703 + 0.7252805233001709 + -0.4172959923744202 + <_> + + <_> + + + + <_> + 0 10 3 10 -1. + <_> + 1 10 1 10 3. + 0 + -7.0529622025787830e-003 + 0.8382613062858582 + -0.2421897947788239 + <_> + + <_> + + + + <_> + 14 12 3 4 -1. + <_> + 14 13 3 2 2. + 0 + 1.8983749905601144e-003 + -0.3964825868606567 + 0.6354545950889587 + -1.7664920091629028 + 6 + -1 + <_> + + + <_> + + <_> + + + + <_> + 8 2 19 28 -1. + <_> + 8 9 19 14 2. + 0 + 0.2004013061523438 + -0.4439170062541962 + 0.8234676122665405 + <_> + + <_> + + + + <_> + 0 0 30 12 -1. + <_> + 0 6 30 6 2. + 0 + 0.0495737306773663 + -0.6449897289276123 + 0.4417080879211426 + <_> + + <_> + + + + <_> + 14 11 2 9 -1. + <_> + 14 14 2 3 3. + 0 + -6.0293218120932579e-003 + 0.5647888779640198 + -0.4946784079074860 + <_> + + <_> + + + + <_> + 3 0 3 2 -1. + <_> + 3 0 3 1 2. + 1 + 5.9228722238913178e-004 + 0.4513243138790131 + -0.5798317193984985 + <_> + + <_> + + + + <_> + 0 11 6 11 -1. + <_> + 2 11 2 11 3. + 0 + 0.0139415403828025 + -0.3902432918548584 + 0.7450913190841675 + <_> + + <_> + + + + <_> + 17 8 8 20 -1. + <_> + 17 8 4 10 2. + <_> + 21 18 4 10 2. + 0 + -4.4980688835494220e-004 + 0.5301743149757385 + -0.5319514870643616 + <_> + + <_> + + + + <_> + 8 18 4 8 -1. + <_> + 6 20 4 4 2. + 1 + -0.0143874799832702 + 0.8146824240684509 + -0.3091411888599396 + <_> + + <_> + + + + <_> + 24 19 6 4 -1. + <_> + 26 21 2 4 3. + 1 + 0.0157648399472237 + -0.2650843858718872 + 0.8585258126258850 + -1.5048580169677734 + 7 + -1 + <_> + + + <_> + + <_> + + + + <_> + 25 14 2 8 -1. + <_> + 25 14 1 4 2. + <_> + 26 18 1 4 2. + 0 + -1.9776010885834694e-003 + 0.8342393040657044 + -0.3764109015464783 + <_> + + <_> + + + + <_> + 14 15 4 2 -1. + <_> + 14 15 2 1 2. + <_> + 16 16 2 1 2. + 0 + -1.5312379691749811e-003 + 0.7800230979919434 + -0.3976786136627197 + <_> + + <_> + + + + <_> + 16 6 13 24 -1. + <_> + 16 18 13 12 2. + 0 + -5.5937091819941998e-003 + 0.3976748883724213 + -0.8354712128639221 + <_> + + <_> + + + + <_> + 0 14 6 2 -1. + <_> + 3 14 3 2 2. + 0 + 5.9340591542422771e-003 + -0.4098539948463440 + 0.7775127887725830 + <_> + + <_> + + + + <_> + 17 14 5 6 -1. + <_> + 17 14 5 3 2. + 1 + 3.3641920890659094e-003 + 0.4648639857769013 + -0.5968496799468994 + <_> + + <_> + + + + <_> + 6 20 6 3 -1. + <_> + 6 20 3 3 2. + 1 + -6.3608391210436821e-003 + -0.8452699184417725 + 0.3319250047206879 + <_> + + <_> + + + + <_> + 14 15 2 2 -1. + <_> + 14 15 1 1 2. + <_> + 15 16 1 1 2. + 0 + 1.0717130498960614e-003 + -0.3603565990924835 + 0.8019682765007019 + <_> + + <_> + + + + <_> + 28 11 2 16 -1. + <_> + 28 11 1 8 2. + <_> + 29 19 1 8 2. + 0 + 4.5385858975350857e-003 + -0.2635689079761505 + 0.8338183164596558 + -0.7491639256477356 + 8 + -1 + <_> + + + <_> + + <_> + + + + <_> + 1 18 28 8 -1. + <_> + 8 18 14 8 2. + 0 + 0.0700757801532745 + -0.4914397895336151 + 0.6778938174247742 + <_> + + <_> + + + + <_> + 16 0 6 15 -1. + <_> + 18 2 2 15 3. + 1 + 0.0229521002620459 + -0.3336066901683807 + 0.7829133868217468 + <_> + + <_> + + + + <_> + 8 1 2 7 -1. + <_> + 8 1 1 7 2. + 1 + -6.8707908503711224e-003 + 0.9234185218811035 + -0.2476124018430710 + <_> + + <_> + + + + <_> + 14 12 6 4 -1. + <_> + 14 12 3 2 2. + <_> + 17 14 3 2 2. + 0 + 2.0509921014308929e-003 + -0.4796935021877289 + 0.5479726195335388 + <_> + + <_> + + + + <_> + 26 0 2 22 -1. + <_> + 26 0 2 11 2. + 1 + 0.0209642108529806 + 0.3271762132644653 + -0.8076078891754150 + <_> + + <_> + + + + <_> + 28 10 2 2 -1. + <_> + 28 10 1 1 2. + <_> + 29 11 1 1 2. + 0 + -2.8584629762917757e-004 + 0.8164829015731812 + -0.3120633959770203 + <_> + + <_> + + + + <_> + 25 8 3 10 -1. + <_> + 26 8 1 10 3. + 0 + 5.0798300653696060e-003 + -0.2668131887912750 + 0.7880414128303528 + <_> + + <_> + + + + <_> + 14 24 2 6 -1. + <_> + 14 27 2 3 2. + 0 + -1.6909160185605288e-003 + 0.5380467772483826 + -0.4121227860450745 + -1.2660059928894043 + 9 + -1 + <_> + + + <_> + + <_> + + + + <_> + 19 4 6 7 -1. + <_> + 22 4 3 7 2. + 0 + -5.4764188826084137e-003 + 0.6139761805534363 + -0.5204738974571228 + <_> + + <_> + + + + <_> + 13 5 3 18 -1. + <_> + 13 11 3 6 3. + 0 + 4.7526010894216597e-004 + 0.4232788085937500 + -0.6906324028968811 + <_> + + <_> + + + + <_> + 0 2 12 14 -1. + <_> + 6 2 6 14 2. + 0 + 9.8068211227655411e-003 + -0.7110623121261597 + 0.4150972068309784 + <_> + + <_> + + + + <_> + 14 15 3 1 -1. + <_> + 15 15 1 1 3. + 0 + -2.8263509739190340e-003 + 0.8587607145309448 + -0.3086710870265961 + <_> + + <_> + + + + <_> + 24 8 6 15 -1. + <_> + 27 8 3 15 2. + 0 + -0.0207858793437481 + 0.5591353178024292 + -0.5492148995399475 + <_> + + <_> + + + + <_> + 23 0 3 30 -1. + <_> + 23 15 3 15 2. + 0 + 0.0284755192697048 + 0.2707023024559021 + -0.9300810098648071 + <_> + + <_> + + + + <_> + 14 13 4 3 -1. + <_> + 13 14 4 1 3. + 1 + 6.1908899806439877e-003 + -0.2891514003276825 + 0.8885921835899353 + -1.6723439693450928 + 10 + -1 + <_> + + + <_> + + <_> + + + + <_> + 19 7 10 19 -1. + <_> + 24 7 5 19 2. + 0 + -0.0639207363128662 + 0.5404042005538940 + -0.4567835032939911 + <_> + + <_> + + + + <_> + 11 7 10 15 -1. + <_> + 11 12 10 5 3. + 0 + -2.6347399689257145e-003 + 0.4270741045475006 + -0.5876396894454956 + <_> + + <_> + + + + <_> + 2 20 26 8 -1. + <_> + 15 20 13 8 2. + 0 + -0.0461380295455456 + -0.7739400267601013 + 0.3122020959854126 + <_> + + <_> + + + + <_> + 25 8 2 6 -1. + <_> + 25 8 1 3 2. + <_> + 26 11 1 3 2. + 0 + 2.0124330185353756e-003 + -0.3222776949405670 + 0.8423414826393127 + <_> + + <_> + + + + <_> + 27 13 3 5 -1. + <_> + 28 13 1 5 3. + 0 + 6.1421301215887070e-003 + -0.3080565035343170 + 0.8616139888763428 + <_> + + <_> + + + + <_> + 14 14 2 4 -1. + <_> + 14 15 2 2 2. + 0 + -1.6880210023373365e-003 + 0.5805559754371643 + -0.3706024885177612 + <_> + + <_> + + + + <_> + 3 10 8 2 -1. + <_> + 3 10 4 2 2. + 1 + -0.0101441303268075 + 0.5537341833114624 + -0.3941943049430847 + <_> + + <_> + + + + <_> + 24 17 6 7 -1. + <_> + 26 19 2 7 3. + 1 + 0.0335026010870934 + -0.2556783854961395 + 0.8882070183753967 + -1.3683170080184937 + 11 + -1 + <_> + + + <_> + + <_> + + + + <_> + 21 2 2 28 -1. + <_> + 21 9 2 14 2. + 0 + 0.0224439799785614 + -0.5313345193862915 + 0.6142271161079407 + <_> + + <_> + + + + <_> + 2 4 18 26 -1. + <_> + 2 17 18 13 2. + 0 + 0.0533409006893635 + 0.2682515084743500 + -0.9193441271781921 + <_> + + <_> + + + + <_> + 26 14 4 4 -1. + <_> + 28 14 2 4 2. + 0 + -5.0225141458213329e-003 + 0.5458484888076782 + -0.4496412873268127 + <_> + + <_> + + + + <_> + 23 15 6 8 -1. + <_> + 23 15 3 8 2. + 1 + 0.0459533594548702 + -0.3108670115470886 + 0.8686702251434326 + <_> + + <_> + + + + <_> + 0 9 6 11 -1. + <_> + 2 9 2 11 3. + 0 + 9.1376658529043198e-003 + -0.3542624115943909 + 0.6663610935211182 + <_> + + <_> + + + + <_> + 14 14 1 3 -1. + <_> + 14 15 1 1 3. + 0 + -2.2559710778295994e-003 + 0.7523422241210938 + -0.3447830975055695 + <_> + + <_> + + + + <_> + 1 7 28 1 -1. + <_> + 8 7 14 1 2. + 0 + 9.0435370802879333e-003 + -0.3231815099716187 + 0.6448699235916138 + -1.0777670145034790 + 12 + -1 + <_> + + + <_> + + <_> + + + + <_> + 20 13 10 15 -1. + <_> + 25 13 5 15 2. + 0 + -0.0394573509693146 + 0.4782564938068390 + -0.5722619295120239 + <_> + + <_> + + + + <_> + 9 15 8 1 -1. + <_> + 13 15 4 1 2. + 0 + -1.7344199586659670e-003 + 0.3705500066280365 + -0.6115717887878418 + <_> + + <_> + + + + <_> + 19 0 8 1 -1. + <_> + 21 0 4 1 2. + 0 + -7.7608181163668633e-004 + -0.7950387001037598 + 0.2622818052768707 + <_> + + <_> + + + + <_> + 5 20 2 2 -1. + <_> + 5 20 1 1 2. + <_> + 6 21 1 1 2. + 0 + 6.0399679932743311e-004 + -0.2708126008510590 + 0.8733022809028626 + <_> + + <_> + + + + <_> + 22 11 8 10 -1. + <_> + 24 11 4 10 2. + 0 + 0.0211945194751024 + -0.3263381123542786 + 0.7960063815116882 + <_> + + <_> + + + + <_> + 2 18 7 6 -1. + <_> + 2 21 7 3 2. + 0 + -3.3754170872271061e-003 + 0.5355839729309082 + -0.5585852265357971 + <_> + + <_> + + + + <_> + 6 17 8 5 -1. + <_> + 8 19 4 5 2. + 1 + 7.7950168633833528e-004 + -0.6128119230270386 + 0.3950763940811157 + <_> + + <_> + + + + <_> + 10 0 5 12 -1. + <_> + 10 6 5 6 2. + 0 + 6.2134041218087077e-004 + -0.7983394265174866 + 0.2523753941059113 + <_> + + <_> + + + + <_> + 19 8 8 8 -1. + <_> + 19 8 8 4 2. + 1 + 5.1883992273360491e-004 + -0.7581666707992554 + 0.2751871049404144 + -1.1804150342941284 + 13 + -1 + <_> + + + <_> + + <_> + + + + <_> + 8 20 18 8 -1. + <_> + 8 24 18 4 2. + 0 + -0.0746768414974213 + 0.8516380190849304 + -0.3425028026103973 + <_> + + <_> + + + + <_> + 26 15 4 1 -1. + <_> + 27 16 2 1 2. + 1 + 1.5731110470369458e-003 + -0.3031556010246277 + 0.6837754249572754 + <_> + + <_> + + + + <_> + 22 3 8 25 -1. + <_> + 26 3 4 25 2. + 0 + -0.0625705122947693 + 0.5789077877998352 + -0.4484055042266846 + <_> + + <_> + + + + <_> + 12 15 6 1 -1. + <_> + 14 15 2 1 3. + 0 + -2.3908941075205803e-003 + 0.4731529951095581 + -0.5635436773300171 + <_> + + <_> + + + + <_> + 11 24 7 6 -1. + <_> + 11 26 7 2 3. + 0 + 2.1347070578485727e-003 + -0.4449442028999329 + 0.5285379290580750 + <_> + + <_> + + + + <_> + 12 0 16 18 -1. + <_> + 12 6 16 6 3. + 0 + 0.0103090200573206 + -0.7689601182937622 + 0.2924349009990692 + <_> + + <_> + + + + <_> + 7 13 11 6 -1. + <_> + 7 13 11 3 2. + 1 + 0.0620706714689732 + -0.4327771961688995 + 0.8331649899482727 + <_> + + <_> + + + + <_> + 11 13 9 6 -1. + <_> + 14 15 3 2 9. + 0 + -2.5847749784588814e-003 + 0.2928090989589691 + -0.8889489173889160 + -1.1310479640960693 + 14 + -1 + <_> + + + <_> + + <_> + + + + <_> + 4 0 21 26 -1. + <_> + 4 13 21 13 2. + 0 + 0.0588057599961758 + -0.4991154074668884 + 0.6187056899070740 + <_> + + <_> + + + + <_> + 13 0 9 11 -1. + <_> + 16 3 3 11 3. + 1 + 0.0356934182345867 + -0.4802035093307495 + 0.6672577857971191 + <_> + + <_> + + + + <_> + 0 8 6 8 -1. + <_> + 2 8 2 8 3. + 0 + 3.7319110706448555e-003 + -0.5551087856292725 + 0.6358423233032227 + <_> + + <_> + + + + <_> + 6 17 24 12 -1. + <_> + 18 17 12 12 2. + 0 + -3.4886640496551991e-003 + 0.3998816013336182 + -0.8779597282409668 + <_> + + <_> + + + + <_> + 14 11 3 9 -1. + <_> + 14 14 3 3 3. + 0 + -3.5188349429517984e-003 + 0.2896867990493774 + -0.8983190059661865 + <_> + + <_> + + + + <_> + 25 12 3 6 -1. + <_> + 26 14 1 2 9. + 0 + 2.6123720454052091e-004 + -0.7029349207878113 + 0.5493162274360657 + <_> + + <_> + + + + <_> + 15 0 15 21 -1. + <_> + 15 7 15 7 3. + 0 + 6.4898689743131399e-005 + -0.9439805150032044 + 0.2820520997047424 + <_> + + <_> + + + + <_> + 14 15 10 2 -1. + <_> + 14 15 5 1 2. + <_> + 19 16 5 1 2. + 0 + -1.3600759848486632e-004 + 0.4432930052280426 + -0.7824695110321045 + <_> + + <_> + + + + <_> + 0 20 2 6 -1. + <_> + 1 20 1 6 2. + 0 + -9.0165252913720906e-005 + 0.6236552000045776 + -0.6018273830413818 + <_> + + <_> + + + + <_> + 0 5 10 25 -1. + <_> + 5 5 5 25 2. + 0 + 9.2873163521289825e-003 + -0.7339897751808167 + 0.4519324898719788 + <_> + + <_> + + + + <_> + 2 5 2 10 -1. + <_> + 2 5 1 5 2. + <_> + 3 10 1 5 2. + 0 + -3.7979730404913425e-004 + 0.6942034959793091 + -0.5841832756996155 + <_> + + <_> + + + + <_> + 4 7 6 6 -1. + <_> + 4 10 6 3 2. + 0 + 2.1245880052447319e-003 + -0.4886597096920013 + 0.6439890265464783 + -2.0186989307403564 + 15 + -1 + <_> + + + <_> + + <_> + + + + <_> + 18 2 12 12 -1. + <_> + 24 2 6 12 2. + 0 + -0.0131159899756312 + 0.4102598130702972 + -0.7836742997169495 + <_> + + <_> + + + + <_> + 12 15 4 1 -1. + <_> + 14 15 2 1 2. + 0 + -1.9107479602098465e-003 + 0.4352349936962128 + -0.5894374251365662 + <_> + + <_> + + + + <_> + 0 14 15 15 -1. + <_> + 0 19 15 5 3. + 0 + 2.8926940285600722e-004 + -0.7347720861434937 + 0.3091743886470795 + <_> + + <_> + + + + <_> + 5 23 15 6 -1. + <_> + 5 25 15 2 3. + 0 + 0.0108875995501876 + -0.4113591015338898 + 0.7446773052215576 + <_> + + <_> + + + + <_> + 6 23 20 1 -1. + <_> + 11 23 10 1 2. + 0 + 8.4888264536857605e-003 + -0.4784755110740662 + 0.7682887911796570 + <_> + + <_> + + + + <_> + 7 13 15 6 -1. + <_> + 12 15 5 2 9. + 0 + -7.8473137691617012e-003 + 0.3737767040729523 + -0.8986917138099670 + <_> + + <_> + + + + <_> + 26 11 4 9 -1. + <_> + 28 11 2 9 2. + 0 + -8.6469706147909164e-003 + 0.5991634726524353 + -0.6494582891464233 + <_> + + <_> + + + + <_> + 0 2 24 18 -1. + <_> + 6 2 12 18 2. + 0 + 1.2370230397209525e-003 + -0.8902391195297241 + 0.3708789944648743 + <_> + + <_> + + + + <_> + 14 12 3 3 -1. + <_> + 15 13 1 1 9. + 0 + 2.5298888795077801e-004 + -0.8120170235633850 + 0.4935150146484375 + <_> + + <_> + + + + <_> + 13 5 15 10 -1. + <_> + 18 5 5 10 3. + 0 + 5.1526250317692757e-003 + -0.9192841053009033 + 0.4452421963214874 + -1.3165810108184814 + 16 + -1 + <_> + + + <_> + + <_> + + + + <_> + 15 12 12 18 -1. + <_> + 15 18 12 6 3. + 0 + 0.0295117292553186 + -0.5727707743644714 + 0.4841710031032562 + <_> + + <_> + + + + <_> + 20 0 9 16 -1. + <_> + 20 8 9 8 2. + 0 + 0.0143716000020504 + -0.7799515128135681 + 0.3565404117107391 + <_> + + <_> + + + + <_> + 2 0 6 3 -1. + <_> + 4 1 2 1 9. + 0 + 1.0884779840125702e-005 + -0.6723812222480774 + 0.4785112142562866 + <_> + + <_> + + + + <_> + 1 0 29 24 -1. + <_> + 1 6 29 12 2. + 0 + 4.3139769695699215e-003 + -0.8979778885841370 + 0.2318837046623230 + <_> + + <_> + + + + <_> + 0 29 10 1 -1. + <_> + 5 29 5 1 2. + 0 + -7.7994642197154462e-005 + 0.3738532960414887 + -0.7187057137489319 + <_> + + <_> + + + + <_> + 11 12 6 4 -1. + <_> + 11 12 3 2 2. + <_> + 14 14 3 2 2. + 0 + -3.0512181110680103e-003 + 0.5300139784812927 + -0.5830147266387940 + <_> + + <_> + + + + <_> + 5 14 6 5 -1. + <_> + 5 14 3 5 2. + 1 + -4.3142150389030576e-004 + 0.4394023120403290 + -0.6322596073150635 + <_> + + <_> + + + + <_> + 1 11 4 7 -1. + <_> + 2 11 2 7 2. + 0 + 7.0738680660724640e-003 + -0.3457511961460114 + 0.9177647233009338 + <_> + + <_> + + + + <_> + 28 27 2 3 -1. + <_> + 29 27 1 3 2. + 0 + 7.7551658250740729e-006 + -0.6041529774665833 + 0.4977194964885712 + <_> + + <_> + + + + <_> + 12 15 8 6 -1. + <_> + 10 17 8 2 3. + 1 + 0.0139424195513129 + -0.4868971109390259 + 0.7841137051582336 + -1.8075209856033325 + 17 + -1 + <_> + + + <_> + + <_> + + + + <_> + 15 15 2 2 -1. + <_> + 15 15 1 1 2. + <_> + 16 16 1 1 2. + 0 + -1.8129580421373248e-003 + 0.9541606903076172 + -0.2965695858001709 + <_> + + <_> + + + + <_> + 2 12 16 12 -1. + <_> + 2 12 8 6 2. + <_> + 10 18 8 6 2. + 0 + 4.7363578341901302e-003 + -0.6298993825912476 + 0.4632642865180969 + <_> + + <_> + + + + <_> + 25 14 2 10 -1. + <_> + 25 14 1 5 2. + <_> + 26 19 1 5 2. + 0 + -3.3910579513758421e-003 + 0.8871492147445679 + -0.4089617133140564 + <_> + + <_> + + + + <_> + 24 10 6 16 -1. + <_> + 27 10 3 16 2. + 0 + -0.0291099399328232 + 0.5941541790962219 + -0.4963997900485992 + <_> + + <_> + + + + <_> + 0 3 24 10 -1. + <_> + 12 3 12 10 2. + 0 + 1.8045129763777368e-005 + -0.8051089048385620 + 0.2899082005023956 + <_> + + <_> + + + + <_> + 5 18 8 4 -1. + <_> + 4 19 8 2 2. + 1 + 7.3270881548523903e-003 + -0.5242174267768860 + 0.8847057819366455 + <_> + + <_> + + + + <_> + 16 6 9 6 -1. + <_> + 19 9 3 6 3. + 1 + -1.9007539958693087e-004 + 0.5221701860427856 + -0.8480635881423950 + <_> + + <_> + + + + <_> + 16 5 1 16 -1. + <_> + 16 5 1 8 2. + 1 + 4.3798499973490834e-005 + -0.7982841730117798 + 0.4523805975914002 + <_> + + <_> + + + + <_> + 0 0 1 30 -1. + <_> + 0 10 1 10 3. + 0 + -2.6992160201189108e-005 + 0.5484765172004700 + -0.7896834015846252 + <_> + + <_> + + + + <_> + 18 1 2 8 -1. + <_> + 18 5 2 4 2. + 0 + 2.6483249384909868e-003 + -0.6831504702568054 + 0.5447096824645996 + <_> + + <_> + + + + <_> + 2 1 2 18 -1. + <_> + 2 1 1 9 2. + <_> + 3 10 1 9 2. + 0 + -1.2241229705978185e-004 + 0.6463962197303772 + -0.7322003245353699 + -1.2552789449691772 + 18 + -1 + <_> + + + <_> + + <_> + + + + <_> + 14 14 3 3 -1. + <_> + 15 15 1 1 9. + 0 + -0.0118554998189211 + 0.7671378254890442 + -0.3722873032093048 + <_> + + <_> + + + + <_> + 15 12 15 18 -1. + <_> + 15 18 15 6 3. + 0 + 0.0129950996488333 + -0.5752075910568237 + 0.4080007970333099 + <_> + + <_> + + + + <_> + 0 15 15 5 -1. + <_> + 5 15 5 5 3. + 0 + 0.0123613402247429 + -0.5057299137115479 + 0.4008283019065857 + <_> + + <_> + + + + <_> + 24 11 6 10 -1. + <_> + 26 11 2 10 3. + 0 + 0.0217736903578043 + -0.3811939060688019 + 0.7375351190567017 + <_> + + <_> + + + + <_> + 20 4 9 18 -1. + <_> + 20 10 9 6 3. + 0 + 8.2471058703958988e-004 + -0.7928907275199890 + 0.3049820065498352 + <_> + + <_> + + + + <_> + 15 12 4 4 -1. + <_> + 15 12 4 2 2. + 1 + -8.0548477126285434e-004 + 0.3002581894397736 + -0.8209298849105835 + <_> + + <_> + + + + <_> + 27 26 3 4 -1. + <_> + 27 28 3 2 2. + 0 + 4.6441138692898676e-005 + -0.5512930154800415 + 0.4281317889690399 + <_> + + <_> + + + + <_> + 0 6 28 4 -1. + <_> + 14 6 14 4 2. + 0 + 1.3028540706727654e-004 + -0.7330580949783325 + 0.3167754113674164 + <_> + + <_> + + + + <_> + 12 5 6 18 -1. + <_> + 14 11 2 6 9. + 0 + -3.0463289003819227e-003 + 0.3088589906692505 + -0.7799909114837647 + <_> + + <_> + + + + <_> + 13 11 3 6 -1. + <_> + 14 13 1 2 9. + 0 + 5.1273731514811516e-003 + -0.4092488884925842 + 0.7003359198570252 + <_> + + <_> + + + + <_> + 14 13 4 3 -1. + <_> + 13 14 4 1 3. + 1 + 6.9407821865752339e-004 + -0.6798236966133118 + 0.5320472121238709 + <_> + + <_> + + + + <_> + 3 0 17 21 -1. + <_> + 3 7 17 7 3. + 0 + 4.9300299724563956e-004 + -0.9494745135307312 + 0.2453067004680634 + -1.2338080406188965 + 19 + -1 + <_> + + + <_> + + <_> + + + + <_> + 6 5 22 24 -1. + <_> + 6 11 22 12 2. + 0 + 0.1109884008765221 + -0.4732984006404877 + 0.6041498184204102 + <_> + + <_> + + + + <_> + 15 3 15 13 -1. + <_> + 20 3 5 13 3. + 0 + 8.8524278253316879e-003 + -0.7683498263359070 + 0.2962965071201325 + <_> + + <_> + + + + <_> + 0 2 27 8 -1. + <_> + 9 2 9 8 3. + 0 + 2.9951189644634724e-003 + -0.7832775712013245 + 0.2668977975845337 + <_> + + <_> + + + + <_> + 24 18 3 3 -1. + <_> + 25 19 1 1 9. + 0 + 7.0615397999063134e-004 + -0.5898581147193909 + 0.4767473042011261 + <_> + + <_> + + + + <_> + 2 10 14 20 -1. + <_> + 2 15 14 10 2. + 0 + 8.2862451672554016e-003 + -0.8107367157936096 + 0.2578361928462982 + <_> + + <_> + + + + <_> + 17 14 3 2 -1. + <_> + 18 15 1 2 3. + 1 + 8.9599809143692255e-004 + -0.7087581753730774 + 0.5773987770080566 + <_> + + <_> + + + + <_> + 19 6 4 10 -1. + <_> + 19 6 4 5 2. + 1 + 2.2114950115792453e-004 + -0.9175388813018799 + 0.3938092887401581 + <_> + + <_> + + + + <_> + 19 9 10 20 -1. + <_> + 19 19 10 10 2. + 0 + -2.6218120474368334e-003 + 0.2289039939641953 + -0.9661834239959717 + <_> + + <_> + + + + <_> + 13 24 9 6 -1. + <_> + 13 26 9 2 3. + 0 + 8.1112459301948547e-003 + -0.6872652173042297 + 0.7748587727546692 + <_> + + <_> + + + + <_> + 10 14 12 3 -1. + <_> + 14 15 4 1 9. + 0 + -6.6581218561623245e-005 + 0.4069637060165405 + -0.9821419119834900 + <_> + + <_> + + + + <_> + 20 4 10 11 -1. + <_> + 25 4 5 11 2. + 0 + -4.3942942284047604e-003 + 0.3025366067886353 + -0.9085376858711243 + <_> + + <_> + + + + <_> + 14 14 4 4 -1. + <_> + 14 14 2 2 2. + <_> + 16 16 2 2 2. + 0 + -8.1969819802907296e-006 + 0.3476018905639648 + -0.8090888857841492 + <_> + + <_> + + + + <_> + 2 13 3 5 -1. + <_> + 3 13 1 5 3. + 0 + -1.7878259532153606e-003 + 0.7400094270706177 + -0.4940893948078156 + -1.6455509662628174 + 20 + -1 + <_> + + + <_> + + <_> + + + + <_> + 6 1 3 18 -1. + <_> + 6 10 3 9 2. + 0 + 0.0170281101018190 + -0.6075357794761658 + 0.6752921938896179 + <_> + + <_> + + + + <_> + 19 0 10 18 -1. + <_> + 19 9 10 9 2. + 0 + 4.9286349676549435e-003 + -0.9023544788360596 + 0.2912414968013763 + <_> + + <_> + + + + <_> + 15 11 1 6 -1. + <_> + 15 13 1 2 3. + 0 + 4.5951400534249842e-004 + -0.6812731027603149 + 0.5965930819511414 + <_> + + <_> + + + + <_> + 13 4 6 18 -1. + <_> + 15 10 2 6 9. + 0 + -4.2040079279104248e-005 + 0.4269096851348877 + -0.8979231715202332 + <_> + + <_> + + + + <_> + 18 4 11 26 -1. + <_> + 18 17 11 13 2. + 0 + -1.1093009961768985e-003 + 0.3619905114173889 + -0.9165890216827393 + <_> + + <_> + + + + <_> + 10 3 6 27 -1. + <_> + 12 12 2 9 9. + 0 + 7.1232998743653297e-003 + -0.5551241040229797 + 0.5782169103622437 + <_> + + <_> + + + + <_> + 8 1 4 8 -1. + <_> + 9 2 2 8 2. + 1 + 3.6009349860250950e-003 + -0.6728715896606445 + 0.7024763822555542 + <_> + + <_> + + + + <_> + 8 0 8 2 -1. + <_> + 12 0 4 2 2. + 0 + -9.7479542091605254e-006 + 0.5475305914878845 + -0.8654465079307556 + <_> + + <_> + + + + <_> + 0 20 18 10 -1. + <_> + 0 25 18 5 2. + 0 + -2.2331129293888807e-003 + 0.3924748003482819 + -0.8936464190483093 + <_> + + <_> + + + + <_> + 20 1 7 3 -1. + <_> + 19 2 7 1 3. + 1 + 4.9669588916003704e-003 + -0.4396930932998657 + 0.8690040111541748 + <_> + + <_> + + + + <_> + 5 15 1 4 -1. + <_> + 4 16 1 2 2. + 1 + 4.6299301175167784e-005 + -0.6883816123008728 + 0.6461343765258789 + <_> + + <_> + + + + <_> + 5 1 20 8 -1. + <_> + 10 1 10 8 2. + 0 + 2.0605750614777207e-004 + -0.9089515805244446 + 0.5352932214736939 + -1.3342789411544800 + 21 + -1 + <_> + + + <_> + + <_> + + + + <_> + 19 0 2 14 -1. + <_> + 19 0 2 7 2. + 1 + -9.7629595547914505e-003 + 0.6056637167930603 + -0.6816167235374451 + <_> + + <_> + + + + <_> + 0 2 26 10 -1. + <_> + 13 2 13 10 2. + 0 + 1.1452470207586884e-003 + -0.7568649053573608 + 0.4382646977901459 + <_> + + <_> + + + + <_> + 14 14 3 3 -1. + <_> + 15 15 1 1 9. + 0 + -8.1162629649043083e-003 + 0.6009442806243897 + -0.5972846150398254 + <_> + + <_> + + + + <_> + 10 0 4 20 -1. + <_> + 10 5 4 10 2. + 0 + 9.4810684458934702e-006 + -0.7322263121604919 + 0.4546971917152405 + <_> + + <_> + + + + <_> + 22 21 6 6 -1. + <_> + 22 21 3 3 2. + <_> + 25 24 3 3 2. + 0 + -8.2458636825322174e-006 + 0.6875557899475098 + -0.5961893796920776 + <_> + + <_> + + + + <_> + 23 1 6 3 -1. + <_> + 23 2 6 1 3. + 0 + 8.2058722910005599e-006 + -0.5708162784576416 + 0.6666625738143921 + <_> + + <_> + + + + <_> + 12 24 4 2 -1. + <_> + 12 24 2 2 2. + 1 + 2.3102159611880779e-003 + -0.6336330771446228 + 0.7040169239044190 + <_> + + <_> + + + + <_> + 21 4 4 26 -1. + <_> + 21 17 4 13 2. + 0 + -3.1256309739546850e-005 + 0.3984279930591583 + -0.9526088833808899 + <_> + + <_> + + + + <_> + 2 3 6 8 -1. + <_> + 2 3 3 4 2. + <_> + 5 7 3 4 2. + 0 + -5.6237089447677135e-003 + 0.8949983119964600 + -0.6286303997039795 + -1.1762020587921143 + 22 + -1 + <_> + + + <_> + + <_> + + + + <_> + 0 0 4 1 -1. + <_> + 2 0 2 1 2. + 0 + 6.9548498140648007e-004 + 0.2593482136726379 + -0.9198864102363586 + <_> + + <_> + + + + <_> + 24 13 2 10 -1. + <_> + 25 13 1 10 2. + 0 + -4.9878000281751156e-003 + 0.8619614839553833 + -0.3343923985958099 + <_> + + <_> + + + + <_> + 2 4 28 15 -1. + <_> + 16 4 14 15 2. + 0 + 0.0454013496637344 + 0.3592154085636139 + -0.7624815106391907 + <_> + + <_> + + + + <_> + 0 13 30 10 -1. + <_> + 15 13 15 10 2. + 0 + -0.0440022610127926 + -0.7319048047065735 + 0.3194361031055450 + <_> + + <_> + + + + <_> + 25 0 4 3 -1. + <_> + 26 0 2 3 2. + 0 + 9.2827458865940571e-004 + 0.3184696137905121 + -0.7557029128074646 + -1.2034360170364380 + 23 + -1 + diff --git a/lib/cascade-files/haarcascade_eye.xml b/library/cascade-files/haarcascade_eye.xml similarity index 100% rename from lib/cascade-files/haarcascade_eye.xml rename to library/cascade-files/haarcascade_eye.xml diff --git a/lib/cascade-files/haarcascade_eye_tree_eyeglasses.xml b/library/cascade-files/haarcascade_eye_tree_eyeglasses.xml similarity index 100% rename from lib/cascade-files/haarcascade_eye_tree_eyeglasses.xml rename to library/cascade-files/haarcascade_eye_tree_eyeglasses.xml diff --git a/lib/cascade-files/haarcascade_frontalface_alt.xml b/library/cascade-files/haarcascade_frontalface_alt.xml similarity index 100% rename from lib/cascade-files/haarcascade_frontalface_alt.xml rename to library/cascade-files/haarcascade_frontalface_alt.xml diff --git a/lib/cascade-files/haarcascade_frontalface_alt2.xml b/library/cascade-files/haarcascade_frontalface_alt2.xml similarity index 100% rename from lib/cascade-files/haarcascade_frontalface_alt2.xml rename to library/cascade-files/haarcascade_frontalface_alt2.xml diff --git a/lib/cascade-files/haarcascade_frontalface_alt_tree.xml b/library/cascade-files/haarcascade_frontalface_alt_tree.xml similarity index 100% rename from lib/cascade-files/haarcascade_frontalface_alt_tree.xml rename to library/cascade-files/haarcascade_frontalface_alt_tree.xml diff --git a/lib/cascade-files/haarcascade_frontalface_default.xml b/library/cascade-files/haarcascade_frontalface_default.xml similarity index 100% rename from lib/cascade-files/haarcascade_frontalface_default.xml rename to library/cascade-files/haarcascade_frontalface_default.xml diff --git a/lib/cascade-files/haarcascade_fullbody.xml b/library/cascade-files/haarcascade_fullbody.xml similarity index 100% rename from lib/cascade-files/haarcascade_fullbody.xml rename to library/cascade-files/haarcascade_fullbody.xml diff --git a/lib/cascade-files/haarcascade_lefteye_2splits.xml b/library/cascade-files/haarcascade_lefteye_2splits.xml similarity index 100% rename from lib/cascade-files/haarcascade_lefteye_2splits.xml rename to library/cascade-files/haarcascade_lefteye_2splits.xml diff --git a/lib/cascade-files/haarcascade_lowerbody.xml b/library/cascade-files/haarcascade_lowerbody.xml similarity index 100% rename from lib/cascade-files/haarcascade_lowerbody.xml rename to library/cascade-files/haarcascade_lowerbody.xml diff --git a/lib/cascade-files/haarcascade_mcs_eyepair_big.xml b/library/cascade-files/haarcascade_mcs_eyepair_big.xml similarity index 100% rename from lib/cascade-files/haarcascade_mcs_eyepair_big.xml rename to library/cascade-files/haarcascade_mcs_eyepair_big.xml diff --git a/lib/cascade-files/haarcascade_mcs_eyepair_small.xml b/library/cascade-files/haarcascade_mcs_eyepair_small.xml similarity index 100% rename from lib/cascade-files/haarcascade_mcs_eyepair_small.xml rename to library/cascade-files/haarcascade_mcs_eyepair_small.xml diff --git a/lib/cascade-files/haarcascade_mcs_leftear.xml b/library/cascade-files/haarcascade_mcs_leftear.xml similarity index 100% rename from lib/cascade-files/haarcascade_mcs_leftear.xml rename to library/cascade-files/haarcascade_mcs_leftear.xml diff --git a/lib/cascade-files/haarcascade_mcs_lefteye.xml b/library/cascade-files/haarcascade_mcs_lefteye.xml similarity index 100% rename from lib/cascade-files/haarcascade_mcs_lefteye.xml rename to library/cascade-files/haarcascade_mcs_lefteye.xml diff --git a/lib/cascade-files/haarcascade_mcs_mouth.xml b/library/cascade-files/haarcascade_mcs_mouth.xml similarity index 100% rename from lib/cascade-files/haarcascade_mcs_mouth.xml rename to library/cascade-files/haarcascade_mcs_mouth.xml diff --git a/lib/cascade-files/haarcascade_mcs_nose.xml b/library/cascade-files/haarcascade_mcs_nose.xml similarity index 100% rename from lib/cascade-files/haarcascade_mcs_nose.xml rename to library/cascade-files/haarcascade_mcs_nose.xml diff --git a/lib/cascade-files/haarcascade_mcs_rightear.xml b/library/cascade-files/haarcascade_mcs_rightear.xml similarity index 100% rename from lib/cascade-files/haarcascade_mcs_rightear.xml rename to library/cascade-files/haarcascade_mcs_rightear.xml diff --git a/lib/cascade-files/haarcascade_mcs_righteye.xml b/library/cascade-files/haarcascade_mcs_righteye.xml similarity index 100% rename from lib/cascade-files/haarcascade_mcs_righteye.xml rename to library/cascade-files/haarcascade_mcs_righteye.xml diff --git a/lib/cascade-files/haarcascade_mcs_upperbody.xml b/library/cascade-files/haarcascade_mcs_upperbody.xml similarity index 100% rename from lib/cascade-files/haarcascade_mcs_upperbody.xml rename to library/cascade-files/haarcascade_mcs_upperbody.xml diff --git a/lib/cascade-files/haarcascade_profileface.xml b/library/cascade-files/haarcascade_profileface.xml similarity index 100% rename from lib/cascade-files/haarcascade_profileface.xml rename to library/cascade-files/haarcascade_profileface.xml diff --git a/lib/cascade-files/haarcascade_righteye_2splits.xml b/library/cascade-files/haarcascade_righteye_2splits.xml similarity index 100% rename from lib/cascade-files/haarcascade_righteye_2splits.xml rename to library/cascade-files/haarcascade_righteye_2splits.xml diff --git a/lib/cascade-files/haarcascade_upperbody.xml b/library/cascade-files/haarcascade_upperbody.xml similarity index 100% rename from lib/cascade-files/haarcascade_upperbody.xml rename to library/cascade-files/haarcascade_upperbody.xml diff --git a/lib/cascade-files/hogcascade_pedestrians.xml b/library/cascade-files/hogcascade_pedestrians.xml similarity index 100% rename from lib/cascade-files/hogcascade_pedestrians.xml rename to library/cascade-files/hogcascade_pedestrians.xml diff --git a/lib/cascade-files/lbpcascade_frontalface.xml b/library/cascade-files/lbpcascade_frontalface.xml similarity index 97% rename from lib/cascade-files/lbpcascade_frontalface.xml rename to library/cascade-files/lbpcascade_frontalface.xml index 59850cb..fc7648e 100644 --- a/lib/cascade-files/lbpcascade_frontalface.xml +++ b/library/cascade-files/lbpcascade_frontalface.xml @@ -1,1505 +1,1505 @@ - - - - - BOOST - LBP - 24 - 24 - - GAB - 0.9950000047683716 - 0.5000000000000000 - 0.9500000000000000 - 1 - 100 - - 256 - 20 - - - <_> - 3 - -0.7520892024040222 - - - <_> - - 0 -1 46 -67130709 -21569 -1426120013 -1275125205 -21585 - -16385 587145899 -24005 - - -0.6543210148811340 0.8888888955116272 - - <_> - - 0 -1 13 -163512766 -769593758 -10027009 -262145 -514457854 - -193593353 -524289 -1 - - -0.7739216089248657 0.7278633713722229 - - <_> - - 0 -1 2 -363936790 -893203669 -1337948010 -136907894 - 1088782736 -134217726 -741544961 -1590337 - - -0.7068563103675842 0.6761534214019775 - - <_> - 4 - -0.4872078299522400 - - - <_> - - 0 -1 84 2147483647 1946124287 -536870913 2147450879 - 738132490 1061101567 243204619 2147446655 - - -0.8083735704421997 0.7685696482658386 - - <_> - - 0 -1 21 2147483647 263176079 1879048191 254749487 1879048191 - -134252545 -268435457 801111999 - - -0.7698410153388977 0.6592915654182434 - - <_> - - 0 -1 106 -98110272 1610939566 -285484400 -850010381 - -189334372 -1671954433 -571026695 -262145 - - -0.7506558895111084 0.5444605946540833 - - <_> - - 0 -1 48 -798690576 -131075 1095771153 -237144073 -65569 -1 - -216727745 -69206049 - - -0.7775990366935730 0.5465461611747742 - - <_> - 4 - -1.1592328548431396 - - - <_> - - 0 -1 47 -21585 -20549 -100818262 -738254174 -20561 -36865 - -151016790 -134238549 - - -0.5601882934570313 0.7743113040924072 - - <_> - - 0 -1 12 -286003217 183435247 -268994614 -421330945 - -402686081 1090387966 -286785545 -402653185 - - -0.6124526262283325 0.6978127956390381 - - <_> - - 0 -1 26 -50347012 970882927 -50463492 -1253377 -134218251 - -50364513 -33619992 -172490753 - - -0.6114496588706970 0.6537628173828125 - - <_> - - 0 -1 8 -273 -135266321 1877977738 -2088243418 -134217987 - 2146926575 -18910642 1095231247 - - -0.6854077577590942 0.5403239130973816 - - <_> - 5 - -0.7562355995178223 - - - <_> - - 0 -1 96 -1273 1870659519 -20971602 -67633153 -134250731 - 2004875127 -250 -150995969 - - -0.4051094949245453 0.7584033608436585 - - <_> - - 0 -1 33 -868162224 -76810262 -4262145 -257 1465211989 - -268959873 -2656269 -524289 - - -0.7388162612915039 0.5340843200683594 - - <_> - - 0 -1 57 -12817 -49 -541103378 -152950 -38993 -20481 -1153876 - -72478976 - - -0.6582943797111511 0.5339496731758118 - - <_> - - 0 -1 125 -269484161 -452984961 -319816180 -1594032130 -2111 - -990117891 -488975296 -520947741 - - -0.5981323719024658 0.5323504805564880 - - <_> - - 0 -1 53 557787431 670265215 -1342193665 -1075892225 - 1998528318 1056964607 -33570977 -1 - - -0.6498787999153137 0.4913350641727448 - - <_> - 5 - -0.8085358142852783 - - - <_> - - 0 -1 60 -536873708 880195381 -16842788 -20971521 -176687276 - -168427659 -16777260 -33554626 - - -0.5278195738792419 0.6946372389793396 - - <_> - - 0 -1 7 -1 -62981529 -1090591130 805330978 -8388827 -41945787 - -39577 -531118985 - - -0.5206505060195923 0.6329920291900635 - - <_> - - 0 -1 98 -725287348 1347747543 -852489 -16809993 1489881036 - -167903241 -1 -1 - - -0.7516061067581177 0.4232024252414703 - - <_> - - 0 -1 44 -32777 1006582562 -65 935312171 -8388609 -1078198273 - -1 733886267 - - -0.7639313936233521 0.4123568832874298 - - <_> - - 0 -1 24 -85474705 2138828511 -1036436754 817625855 - 1123369029 -58796809 -1013468481 -194513409 - - -0.5123769044876099 0.5791834592819214 - - <_> - 5 - -0.5549971461296082 - - - <_> - - 0 -1 42 -17409 -20481 -268457797 -134239493 -17473 -1 -21829 - -21846 - - -0.3763174116611481 0.7298233509063721 - - <_> - - 0 -1 6 -805310737 -2098262358 -269504725 682502698 - 2147483519 1740574719 -1090519233 -268472385 - - -0.5352765917778015 0.5659480094909668 - - <_> - - 0 -1 61 -67109678 -6145 -8 -87884584 -20481 -1073762305 - -50856216 -16849696 - - -0.5678374171257019 0.4961479902267456 - - <_> - - 0 -1 123 -138428633 1002418167 -1359008245 -1908670465 - -1346685918 910098423 -1359010520 -1346371657 - - -0.5706262588500977 0.4572288393974304 - - <_> - - 0 -1 9 -89138513 -4196353 1256531674 -1330665426 1216308261 - -36190633 33498198 -151796633 - - -0.5344601869583130 0.4672054052352905 - - <_> - 5 - -0.8776460289955139 - - - <_> - - 0 -1 105 1073769576 206601725 -34013449 -33554433 -789514004 - -101384321 -690225153 -264193 - - -0.7700348496437073 0.5943940877914429 - - <_> - - 0 -1 30 -1432340997 -823623681 -49153 -34291724 -269484035 - -1342767105 -1078198273 -1277955 - - -0.5043668746948242 0.6151274442672730 - - <_> - - 0 -1 35 -1067385040 -195758209 -436748425 -134217731 - -50855988 -129 -1 -1 - - -0.6808040738105774 0.4667325913906097 - - <_> - - 0 -1 119 832534325 -34111555 -26050561 -423659521 -268468364 - 2105014143 -2114244 -17367185 - - -0.4927591383457184 0.5401885509490967 - - <_> - - 0 -1 82 -1089439888 -1080524865 2143059967 -1114121 - -1140949004 -3 -2361356 -739516 - - -0.6445107460021973 0.4227822124958038 - - <_> - 6 - -1.1139287948608398 - - - <_> - - 0 -1 52 -1074071553 -1074003969 -1 -1280135430 -5324817 -1 - -335548482 582134442 - - -0.5307556986808777 0.6258179545402527 - - <_> - - 0 -1 99 -706937396 -705364068 -540016724 -570495027 - -570630659 -587857963 -33628164 -35848193 - - -0.5227634310722351 0.5049746036529541 - - <_> - - 0 -1 18 -2035630093 42119158 -268503053 -1671444 261017599 - 1325432815 1954394111 -805306449 - - -0.4983572661876679 0.5106441378593445 - - <_> - - 0 -1 111 -282529488 -1558073088 1426018736 -170526448 - -546832487 -5113037 -34243375 -570427929 - - -0.4990860521793366 0.5060507059097290 - - <_> - - 0 -1 92 1016332500 -606301707 915094269 -1080086049 - -1837027144 -1361600280 2147318747 1067975613 - - -0.5695009231567383 0.4460467398166657 - - <_> - - 0 -1 51 -656420166 -15413034 -141599534 -603435836 - 1505950458 -787556946 -79823438 -1326199134 - - -0.6590405106544495 0.3616424500942230 - - <_> - 7 - -0.8243625760078430 - - - <_> - - 0 -1 28 -901591776 -201916417 -262 -67371009 -143312112 - -524289 -41943178 -1 - - -0.4972776770591736 0.6027074456214905 - - <_> - - 0 -1 112 -4507851 -411340929 -268437513 -67502145 -17350859 - -32901 -71344315 -29377 - - -0.4383158981800079 0.5966237187385559 - - <_> - - 0 -1 69 -75894785 -117379438 -239063587 -12538500 1485072126 - 2076233213 2123118847 801906927 - - -0.6386105418205261 0.3977999985218048 - - <_> - - 0 -1 19 -823480413 786628589 -16876049 -1364262914 242165211 - 1315930109 -696268833 -455082829 - - -0.5512794256210327 0.4282079637050629 - - <_> - - 0 -1 73 -521411968 6746762 -1396236286 -2038436114 - -185612509 57669627 -143132877 -1041235973 - - -0.6418755054473877 0.3549866080284119 - - <_> - - 0 -1 126 -478153869 1076028979 -1645895615 1365298272 - -557859073 -339771473 1442574528 -1058802061 - - -0.4841901361942291 0.4668019413948059 - - <_> - - 0 -1 45 -246350404 -1650402048 -1610612745 -788400696 - 1467604861 -2787397 1476263935 -4481349 - - -0.5855734348297119 0.3879135847091675 - - <_> - 7 - -1.2237116098403931 - - - <_> - - 0 -1 114 -24819 1572863935 -16809993 -67108865 2146778388 - 1433927541 -268608444 -34865205 - - -0.2518476545810700 0.7088654041290283 - - <_> - - 0 -1 97 -1841359 -134271049 -32769 -5767369 -1116675 -2185 - -8231 -33603327 - - -0.4303432404994965 0.5283288359642029 - - <_> - - 0 -1 25 -1359507589 -1360593090 -1073778729 -269553812 - -809512977 1744707583 -41959433 -134758978 - - -0.4259553551673889 0.5440809130668640 - - <_> - - 0 -1 34 729753407 -134270989 -1140907329 -235200777 - 658456383 2147467263 -1140900929 -16385 - - -0.5605589151382446 0.4220733344554901 - - <_> - - 0 -1 134 -310380553 -420675595 -193005472 -353568129 - 1205338070 -990380036 887604324 -420544526 - - -0.5192656517028809 0.4399855434894562 - - <_> - - 0 -1 16 -1427119361 1978920959 -287119734 -487068946 - 114759245 -540578051 -707510259 -671660453 - - -0.5013077259063721 0.4570254683494568 - - <_> - - 0 -1 74 -738463762 -889949281 -328301948 -121832450 - -1142658284 -1863576559 2146417353 -263185 - - -0.4631414115428925 0.4790246188640595 - - <_> - 7 - -0.5544230937957764 - - - <_> - - 0 -1 113 -76228780 -65538 -1 -67174401 -148007 -33 -221796 - -272842924 - - -0.3949716091156006 0.6082032322883606 - - <_> - - 0 -1 110 369147696 -1625232112 2138570036 -1189900 790708019 - -1212613127 799948719 -4456483 - - -0.4855885505676270 0.4785369932651520 - - <_> - - 0 -1 37 784215839 -290015241 536832799 -402984963 - -1342414991 -838864897 -176769 -268456129 - - -0.4620285332202911 0.4989669024944305 - - <_> - - 0 -1 41 -486418688 -171915327 -340294900 -21938 -519766032 - -772751172 -73096060 -585322623 - - -0.6420643329620361 0.3624351918697357 - - <_> - - 0 -1 117 -33554953 -475332625 -1423463824 -2077230421 - -4849669 -2080505925 -219032928 -1071915349 - - -0.4820112884044647 0.4632140696048737 - - <_> - - 0 -1 65 -834130468 -134217476 -1349314083 -1073803559 - -619913764 -1449131844 -1386890321 -1979118423 - - -0.4465552568435669 0.5061788558959961 - - <_> - - 0 -1 56 -285249779 1912569855 -16530 -1731022870 -1161904146 - -1342177297 -268439634 -1464078708 - - -0.5190586447715759 0.4441480338573456 - - <_> - 7 - -0.7161560654640198 - - - <_> - - 0 -1 20 1246232575 1078001186 -10027057 60102 -277348353 - -43646987 -1210581153 1195769615 - - -0.4323809444904327 0.5663768053054810 - - <_> - - 0 -1 15 -778583572 -612921106 -578775890 -4036478 - -1946580497 -1164766570 -1986687009 -12103599 - - -0.4588732719421387 0.4547033011913300 - - <_> - - 0 -1 129 -1073759445 2013231743 -1363169553 -1082459201 - -1414286549 868185983 -1356133589 -1077936257 - - -0.5218553543090820 0.4111092388629913 - - <_> - - 0 -1 102 -84148365 -2093417722 -1204850272 564290299 - -67121221 -1342177350 -1309195902 -776734797 - - -0.4920000731945038 0.4326725304126740 - - <_> - - 0 -1 88 -25694458 67104495 -290216278 -168563037 2083877442 - 1702788383 -144191964 -234882162 - - -0.4494568109512329 0.4448510706424713 - - <_> - - 0 -1 59 -857980836 904682741 -1612267521 232279415 - 1550862252 -574825221 -357380888 -4579409 - - -0.5180826783180237 0.3888972699642181 - - <_> - - 0 -1 27 -98549440 -137838400 494928389 -246013630 939541351 - -1196072350 -620603549 2137216273 - - -0.6081240773200989 0.3333222270011902 - - <_> - 8 - -0.6743940711021423 - - - <_> - - 0 -1 29 -150995201 2071191945 -1302151626 536934335 - -1059008937 914128709 1147328110 -268369925 - - -0.1790193915367127 0.6605972051620483 - - <_> - - 0 -1 128 -134509479 1610575703 -1342177289 1861484541 - -1107833788 1577058173 -333558568 -136319041 - - -0.3681024610996246 0.5139749646186829 - - <_> - - 0 -1 70 -1 1060154476 -1090984524 -630918524 -539492875 - 779616255 -839568424 -321 - - -0.3217232525348663 0.6171553134918213 - - <_> - - 0 -1 4 -269562385 -285029906 -791084350 -17923776 235286671 - 1275504943 1344390399 -966276889 - - -0.4373284578323364 0.4358185231685638 - - <_> - - 0 -1 76 17825984 -747628419 595427229 1474759671 575672208 - -1684005538 872217086 -1155858277 - - -0.4404836893081665 0.4601220190525055 - - <_> - - 0 -1 124 -336593039 1873735591 -822231622 -355795238 - -470820869 -1997537409 -1057132384 -1015285005 - - -0.4294152259826660 0.4452161788940430 - - <_> - - 0 -1 54 -834212130 -593694721 -322142257 -364892500 - -951029539 -302125121 -1615106053 -79249765 - - -0.3973052501678467 0.4854526817798615 - - <_> - - 0 -1 95 1342144479 2147431935 -33554561 -47873 -855685912 -1 - 1988052447 536827383 - - -0.7054683566093445 0.2697997391223908 - - <_> - 9 - -1.2042298316955566 - - - <_> - - 0 -1 39 1431368960 -183437936 -537002499 -137497097 - 1560590321 -84611081 -2097193 -513 - - -0.5905947685241699 0.5101932883262634 - - <_> - - 0 -1 120 -1645259691 2105491231 2130706431 1458995007 - -8567536 -42483883 -33780003 -21004417 - - -0.4449204802513123 0.4490709304809570 - - <_> - - 0 -1 89 -612381022 -505806938 -362027516 -452985106 - 275854917 1920431639 -12600561 -134221825 - - -0.4693818688392639 0.4061094820499420 - - <_> - - 0 -1 14 -805573153 -161 -554172679 -530519488 -16779441 - 2000682871 -33604275 -150997129 - - -0.3600351214408875 0.5056326985359192 - - <_> - - 0 -1 67 6192 435166195 1467449341 2046691505 -1608493775 - -4755729 -1083162625 -71365637 - - -0.4459891915321350 0.4132415652275085 - - <_> - - 0 -1 86 -41689215 -3281034 1853357967 -420712635 -415924289 - -270209208 -1088293113 -825311232 - - -0.4466069042682648 0.4135067760944367 - - <_> - - 0 -1 80 -117391116 -42203396 2080374461 -188709 -542008165 - -356831940 -1091125345 -1073796897 - - -0.3394956290721893 0.5658645033836365 - - <_> - - 0 -1 75 -276830049 1378714472 -1342181951 757272098 - 1073740607 -282199241 -415761549 170896931 - - -0.5346512198448181 0.3584479391574860 - - <_> - - 0 -1 55 -796075825 -123166849 2113667055 -217530421 - -1107432194 -16385 -806359809 -391188771 - - -0.4379335641860962 0.4123645126819611 - - <_> - 10 - -0.8402050137519836 - - - <_> - - 0 -1 71 -890246622 15525883 -487690486 47116238 -1212319899 - -1291847681 -68159890 -469829921 - - -0.2670986354351044 0.6014143228530884 - - <_> - - 0 -1 31 -1361180685 -1898008841 -1090588811 -285410071 - -1074016265 -840443905 2147221487 -262145 - - -0.4149844348430634 0.4670888185501099 - - <_> - - 0 -1 40 1426190596 1899364271 2142731795 -142607505 - -508232452 -21563393 -41960001 -65 - - -0.4985891580581665 0.3719584941864014 - - <_> - - 0 -1 109 -201337965 10543906 -236498096 -746195597 - 1974565825 -15204415 921907633 -190058309 - - -0.4568729996681213 0.3965812027454376 - - <_> - - 0 -1 130 -595026732 -656401928 -268649235 -571490699 - -440600392 -133131 -358810952 -2004088646 - - -0.4770836830139160 0.3862601518630981 - - <_> - - 0 -1 66 941674740 -1107882114 1332789109 -67691015 - -1360463693 -1556612430 -609108546 733546933 - - -0.4877715110778809 0.3778986334800720 - - <_> - - 0 -1 49 -17114945 -240061474 1552871558 -82775604 -932393844 - -1308544889 -532635478 -99042357 - - -0.3721654713153839 0.4994400143623352 - - <_> - - 0 -1 133 -655906006 1405502603 -939205164 1884929228 - -498859222 559417357 -1928559445 -286264385 - - -0.3934195041656494 0.4769641458988190 - - <_> - - 0 -1 0 -335837777 1860677295 -90 -1946186226 931096183 - 251612987 2013265917 -671232197 - - -0.4323300719261169 0.4342164099216461 - - <_> - - 0 -1 103 37769424 -137772680 374692301 2002666345 -536176194 - -1644484728 807009019 1069089930 - - -0.4993278682231903 0.3665378093719482 - - <_> - 9 - -1.1974394321441650 - - - <_> - - 0 -1 43 -5505 2147462911 2143265466 -4511070 -16450 -257 - -201348440 -71333206 - - -0.3310225307941437 0.5624626278877258 - - <_> - - 0 -1 90 -136842268 -499330741 2015250980 -87107126 - -641665744 -788524639 -1147864792 -134892563 - - -0.5266560912132263 0.3704403042793274 - - <_> - - 0 -1 104 -146800880 -1780368555 2111170033 -140904684 - -16777551 -1946681885 -1646463595 -839131947 - - -0.4171888828277588 0.4540435671806335 - - <_> - - 0 -1 85 -832054034 -981663763 -301990281 -578814081 - -932319000 -1997406723 -33555201 -69206017 - - -0.4556705355644226 0.3704262077808380 - - <_> - - 0 -1 24 -118492417 -1209026825 1119023838 -1334313353 - 1112948738 -297319313 1378887291 -139469193 - - -0.4182529747486115 0.4267231225967407 - - <_> - - 0 -1 78 -1714382628 -2353704 -112094959 -549613092 - -1567058760 -1718550464 -342315012 -1074972227 - - -0.3625369668006897 0.4684656262397766 - - <_> - - 0 -1 5 -85219702 316836394 -33279 1904970288 2117267315 - -260901769 -621461759 -88607770 - - -0.4742925167083740 0.3689507246017456 - - <_> - - 0 -1 11 -294654041 -353603585 -1641159686 -50331921 - -2080899877 1145569279 -143132713 -152044037 - - -0.3666271567344666 0.4580127298831940 - - <_> - - 0 -1 32 1887453658 -638545712 -1877976819 -34320972 - -1071067983 -661345416 -583338277 1060190561 - - -0.4567637443542481 0.3894708156585693 - - <_> - 9 - -0.5733128190040588 - - - <_> - - 0 -1 122 -994063296 1088745462 -318837116 -319881377 - 1102566613 1165490103 -121679694 -134744129 - - -0.4055117964744568 0.5487945079803467 - - <_> - - 0 -1 68 -285233233 -538992907 1811935199 -369234005 -529 - -20593 -20505 -1561401854 - - -0.3787897229194641 0.4532003402709961 - - <_> - - 0 -1 58 -1335245632 1968917183 1940861695 536816369 - -1226071367 -570908176 457026619 1000020667 - - -0.4258328974246979 0.4202791750431061 - - <_> - - 0 -1 94 -1360318719 -1979797897 -50435249 -18646473 - -608879292 -805306691 -269304244 -17840167 - - -0.4561023116111755 0.4002747833728790 - - <_> - - 0 -1 87 2062765935 -16449 -1275080721 -16406 45764335 - -1090552065 -772846337 -570464322 - - -0.4314672648906708 0.4086346626281738 - - <_> - - 0 -1 127 -536896021 1080817663 -738234288 -965478709 - -2082767969 1290855887 1993822934 -990381609 - - -0.4174543321132660 0.4249868988990784 - - <_> - - 0 -1 3 -818943025 168730891 -293610428 -79249354 669224671 - 621166734 1086506807 1473768907 - - -0.4321364760398865 0.4090838730335236 - - <_> - - 0 -1 79 -68895696 -67107736 -1414315879 -841676168 - -619843344 -1180610531 -1081990469 1043203389 - - -0.5018386244773865 0.3702533841133118 - - <_> - - 0 -1 116 -54002134 -543485719 -2124882422 -1437445858 - -115617074 -1195787391 -1096024366 -2140472445 - - -0.5037505626678467 0.3564981222152710 - - <_> - 9 - -0.4892596900463104 - - - <_> - - 0 -1 132 -67113211 2003808111 1862135111 846461923 -2752 - 2002237273 -273154752 1937223539 - - -0.2448196411132813 0.5689709186553955 - - <_> - - 0 -1 62 1179423888 -78064940 -611839555 -539167899 - -1289358360 -1650810108 -892540499 -1432827684 - - -0.4633283913135529 0.3587929606437683 - - <_> - - 0 -1 23 -285212705 -78450761 -656212031 -264050110 -27787425 - -1334349961 -547662981 -135796924 - - -0.3731099069118500 0.4290455579757690 - - <_> - - 0 -1 77 341863476 403702016 -550588417 1600194541 - -1080690735 951127993 -1388580949 -1153717473 - - -0.3658909499645233 0.4556473195552826 - - <_> - - 0 -1 22 -586880702 -204831512 -100644596 -39319550 - -1191150794 705692513 457203315 -75806957 - - -0.5214384198188782 0.3221037387847900 - - <_> - - 0 -1 72 -416546870 545911370 -673716192 -775559454 - -264113598 139424 -183369982 -204474641 - - -0.4289036989212036 0.4004956185817719 - - <_> - - 0 -1 50 -1026505020 -589692154 -1740499937 -1563770497 - 1348491006 -60710713 -1109853489 -633909413 - - -0.4621542394161224 0.3832748532295227 - - <_> - - 0 -1 108 -1448872304 -477895040 -1778390608 -772418127 - -1789923416 -1612057181 -805306693 -1415842113 - - -0.3711548447608948 0.4612701535224915 - - <_> - - 0 -1 92 407905424 -582449988 52654751 -1294472 -285103725 - -74633006 1871559083 1057955850 - - -0.5180652141571045 0.3205870389938355 - - <_> - 10 - -0.5911940932273865 - - - <_> - - 0 -1 81 4112 -1259563825 -846671428 -100902460 1838164148 - -74153752 -90653988 -1074263896 - - -0.2592592537403107 0.5873016119003296 - - <_> - - 0 -1 1 -285216785 -823206977 -1085589 -1081346 1207959293 - 1157103471 2097133565 -2097169 - - -0.3801195919513702 0.4718827307224274 - - <_> - - 0 -1 121 -12465 -536875169 2147478367 2130706303 -37765492 - -866124467 -318782328 -1392509185 - - -0.3509117066860199 0.5094807147979736 - - <_> - - 0 -1 38 2147449663 -20741 -16794757 1945873146 -16710 -1 - -8406341 -67663041 - - -0.4068757295608521 0.4130136370658875 - - <_> - - 0 -1 17 -155191713 866117231 1651407483 548272812 -479201468 - -447742449 1354229504 -261884429 - - -0.4557141065597534 0.3539792001247406 - - <_> - - 0 -1 100 -225319378 -251682065 -492783986 -792341777 - -1287261695 1393643841 -11274182 -213909521 - - -0.4117803275585175 0.4118592441082001 - - <_> - - 0 -1 63 -382220122 -2002072729 -51404800 -371201558 - -923011069 -2135301457 -2066104743 -1042557441 - - -0.4008397758007050 0.4034757018089294 - - <_> - - 0 -1 101 -627353764 -48295149 1581203952 -436258614 - -105268268 -1435893445 -638126888 -1061107126 - - -0.5694189667701721 0.2964762747287750 - - <_> - - 0 -1 118 -8399181 1058107691 -621022752 -251003468 -12582915 - -574619739 -994397789 -1648362021 - - -0.3195341229438782 0.5294018983840942 - - <_> - - 0 -1 92 -348343812 -1078389516 1717960437 364735981 - -1783841602 -4883137 -457572354 -1076950384 - - -0.3365339040756226 0.5067458748817444 - - <_> - 10 - -0.7612916231155396 - - - <_> - - 0 -1 10 -1976661318 -287957604 -1659497122 -782068 43591089 - -453637880 1435470000 -1077438561 - - -0.4204545319080353 0.5165745615959168 - - <_> - - 0 -1 131 -67110925 14874979 -142633168 -1338923040 - 2046713291 -2067933195 1473503712 -789579837 - - -0.3762553930282593 0.4075302779674530 - - <_> - - 0 -1 83 -272814301 -1577073 -1118685 -305156120 -1052289 - -1073813756 -538971154 -355523038 - - -0.4253497421741486 0.3728055357933044 - - <_> - - 0 -1 135 -2233 -214486242 -538514758 573747007 -159390971 - 1994225489 -973738098 -203424005 - - -0.3601998090744019 0.4563256204128265 - - <_> - - 0 -1 115 -261031688 -1330369299 -641860609 1029570301 - -1306461192 -1196149518 -1529767778 683139823 - - -0.4034293889999390 0.4160816967487335 - - <_> - - 0 -1 64 -572993608 -34042628 -417865 -111109 -1433365268 - -19869715 -1920939864 -1279457063 - - -0.3620899617671967 0.4594142735004425 - - <_> - - 0 -1 36 -626275097 -615256993 1651946018 805366393 - 2016559730 -430780849 -799868165 -16580645 - - -0.3903816640377045 0.4381459355354309 - - <_> - - 0 -1 93 1354797300 -1090957603 1976418270 -1342502178 - -1851873892 -1194637077 -1153521668 -1108399474 - - -0.3591445386409760 0.4624078869819641 - - <_> - - 0 -1 91 68157712 1211368313 -304759523 1063017136 798797750 - -275513546 648167355 -1145357350 - - -0.4297670423984528 0.4023293554782867 - - <_> - - 0 -1 107 -546318240 -1628569602 -163577944 -537002306 - -545456389 -1325465645 -380446736 -1058473386 - - -0.5727006793022156 0.2995934784412384 - - <_> - - 0 0 3 5 - <_> - - 0 0 4 2 - <_> - - 0 0 6 3 - <_> - - 0 1 2 3 - <_> - - 0 1 3 3 - <_> - - 0 1 3 7 - <_> - - 0 4 3 3 - <_> - - 0 11 3 4 - <_> - - 0 12 8 4 - <_> - - 0 14 4 3 - <_> - - 1 0 5 3 - <_> - - 1 1 2 2 - <_> - - 1 3 3 1 - <_> - - 1 7 4 4 - <_> - - 1 12 2 2 - <_> - - 1 13 4 1 - <_> - - 1 14 4 3 - <_> - - 1 17 3 2 - <_> - - 2 0 2 3 - <_> - - 2 1 2 2 - <_> - - 2 2 4 6 - <_> - - 2 3 4 4 - <_> - - 2 7 2 1 - <_> - - 2 11 2 3 - <_> - - 2 17 3 2 - <_> - - 3 0 2 2 - <_> - - 3 1 7 3 - <_> - - 3 7 2 1 - <_> - - 3 7 2 4 - <_> - - 3 18 2 2 - <_> - - 4 0 2 3 - <_> - - 4 3 2 1 - <_> - - 4 6 2 1 - <_> - - 4 6 2 5 - <_> - - 4 7 5 2 - <_> - - 4 8 4 3 - <_> - - 4 18 2 2 - <_> - - 5 0 2 2 - <_> - - 5 3 4 4 - <_> - - 5 6 2 5 - <_> - - 5 9 2 2 - <_> - - 5 10 2 2 - <_> - - 6 3 4 4 - <_> - - 6 4 4 3 - <_> - - 6 5 2 3 - <_> - - 6 5 2 5 - <_> - - 6 5 4 3 - <_> - - 6 6 4 2 - <_> - - 6 6 4 4 - <_> - - 6 18 1 2 - <_> - - 6 21 2 1 - <_> - - 7 0 3 7 - <_> - - 7 4 2 3 - <_> - - 7 9 5 1 - <_> - - 7 21 2 1 - <_> - - 8 0 1 4 - <_> - - 8 5 2 2 - <_> - - 8 5 3 2 - <_> - - 8 17 3 1 - <_> - - 8 18 1 2 - <_> - - 9 0 5 3 - <_> - - 9 2 2 6 - <_> - - 9 5 1 1 - <_> - - 9 11 1 1 - <_> - - 9 16 1 1 - <_> - - 9 16 2 1 - <_> - - 9 17 1 1 - <_> - - 9 18 1 1 - <_> - - 10 5 1 2 - <_> - - 10 5 3 3 - <_> - - 10 7 1 5 - <_> - - 10 8 1 1 - <_> - - 10 9 1 1 - <_> - - 10 10 1 1 - <_> - - 10 10 1 2 - <_> - - 10 14 3 3 - <_> - - 10 15 1 1 - <_> - - 10 15 2 1 - <_> - - 10 16 1 1 - <_> - - 10 16 2 1 - <_> - - 10 17 1 1 - <_> - - 10 21 1 1 - <_> - - 11 3 2 2 - <_> - - 11 5 1 2 - <_> - - 11 5 3 3 - <_> - - 11 5 4 6 - <_> - - 11 6 1 1 - <_> - - 11 7 2 2 - <_> - - 11 8 1 2 - <_> - - 11 10 1 1 - <_> - - 11 10 1 2 - <_> - - 11 15 1 1 - <_> - - 11 17 1 1 - <_> - - 11 18 1 1 - <_> - - 12 0 2 2 - <_> - - 12 1 2 5 - <_> - - 12 2 4 1 - <_> - - 12 3 1 3 - <_> - - 12 7 3 4 - <_> - - 12 10 3 2 - <_> - - 12 11 1 1 - <_> - - 12 12 3 2 - <_> - - 12 14 4 3 - <_> - - 12 17 1 1 - <_> - - 12 21 2 1 - <_> - - 13 6 2 5 - <_> - - 13 7 3 5 - <_> - - 13 11 3 2 - <_> - - 13 17 2 2 - <_> - - 13 17 3 2 - <_> - - 13 18 1 2 - <_> - - 13 18 2 2 - <_> - - 14 0 2 2 - <_> - - 14 1 1 3 - <_> - - 14 2 3 2 - <_> - - 14 7 2 1 - <_> - - 14 13 2 1 - <_> - - 14 13 3 3 - <_> - - 14 17 2 2 - <_> - - 15 0 2 2 - <_> - - 15 0 2 3 - <_> - - 15 4 3 2 - <_> - - 15 4 3 6 - <_> - - 15 6 3 2 - <_> - - 15 11 3 4 - <_> - - 15 13 3 2 - <_> - - 15 17 2 2 - <_> - - 15 17 3 2 - <_> - - 16 1 2 3 - <_> - - 16 3 2 4 - <_> - - 16 6 1 1 - <_> - - 16 16 2 2 - <_> - - 17 1 2 2 - <_> - - 17 1 2 5 - <_> - - 17 12 2 2 - <_> - - 18 0 2 2 - + + + + + BOOST + LBP + 24 + 24 + + GAB + 0.9950000047683716 + 0.5000000000000000 + 0.9500000000000000 + 1 + 100 + + 256 + 20 + + + <_> + 3 + -0.7520892024040222 + + + <_> + + 0 -1 46 -67130709 -21569 -1426120013 -1275125205 -21585 + -16385 587145899 -24005 + + -0.6543210148811340 0.8888888955116272 + + <_> + + 0 -1 13 -163512766 -769593758 -10027009 -262145 -514457854 + -193593353 -524289 -1 + + -0.7739216089248657 0.7278633713722229 + + <_> + + 0 -1 2 -363936790 -893203669 -1337948010 -136907894 + 1088782736 -134217726 -741544961 -1590337 + + -0.7068563103675842 0.6761534214019775 + + <_> + 4 + -0.4872078299522400 + + + <_> + + 0 -1 84 2147483647 1946124287 -536870913 2147450879 + 738132490 1061101567 243204619 2147446655 + + -0.8083735704421997 0.7685696482658386 + + <_> + + 0 -1 21 2147483647 263176079 1879048191 254749487 1879048191 + -134252545 -268435457 801111999 + + -0.7698410153388977 0.6592915654182434 + + <_> + + 0 -1 106 -98110272 1610939566 -285484400 -850010381 + -189334372 -1671954433 -571026695 -262145 + + -0.7506558895111084 0.5444605946540833 + + <_> + + 0 -1 48 -798690576 -131075 1095771153 -237144073 -65569 -1 + -216727745 -69206049 + + -0.7775990366935730 0.5465461611747742 + + <_> + 4 + -1.1592328548431396 + + + <_> + + 0 -1 47 -21585 -20549 -100818262 -738254174 -20561 -36865 + -151016790 -134238549 + + -0.5601882934570313 0.7743113040924072 + + <_> + + 0 -1 12 -286003217 183435247 -268994614 -421330945 + -402686081 1090387966 -286785545 -402653185 + + -0.6124526262283325 0.6978127956390381 + + <_> + + 0 -1 26 -50347012 970882927 -50463492 -1253377 -134218251 + -50364513 -33619992 -172490753 + + -0.6114496588706970 0.6537628173828125 + + <_> + + 0 -1 8 -273 -135266321 1877977738 -2088243418 -134217987 + 2146926575 -18910642 1095231247 + + -0.6854077577590942 0.5403239130973816 + + <_> + 5 + -0.7562355995178223 + + + <_> + + 0 -1 96 -1273 1870659519 -20971602 -67633153 -134250731 + 2004875127 -250 -150995969 + + -0.4051094949245453 0.7584033608436585 + + <_> + + 0 -1 33 -868162224 -76810262 -4262145 -257 1465211989 + -268959873 -2656269 -524289 + + -0.7388162612915039 0.5340843200683594 + + <_> + + 0 -1 57 -12817 -49 -541103378 -152950 -38993 -20481 -1153876 + -72478976 + + -0.6582943797111511 0.5339496731758118 + + <_> + + 0 -1 125 -269484161 -452984961 -319816180 -1594032130 -2111 + -990117891 -488975296 -520947741 + + -0.5981323719024658 0.5323504805564880 + + <_> + + 0 -1 53 557787431 670265215 -1342193665 -1075892225 + 1998528318 1056964607 -33570977 -1 + + -0.6498787999153137 0.4913350641727448 + + <_> + 5 + -0.8085358142852783 + + + <_> + + 0 -1 60 -536873708 880195381 -16842788 -20971521 -176687276 + -168427659 -16777260 -33554626 + + -0.5278195738792419 0.6946372389793396 + + <_> + + 0 -1 7 -1 -62981529 -1090591130 805330978 -8388827 -41945787 + -39577 -531118985 + + -0.5206505060195923 0.6329920291900635 + + <_> + + 0 -1 98 -725287348 1347747543 -852489 -16809993 1489881036 + -167903241 -1 -1 + + -0.7516061067581177 0.4232024252414703 + + <_> + + 0 -1 44 -32777 1006582562 -65 935312171 -8388609 -1078198273 + -1 733886267 + + -0.7639313936233521 0.4123568832874298 + + <_> + + 0 -1 24 -85474705 2138828511 -1036436754 817625855 + 1123369029 -58796809 -1013468481 -194513409 + + -0.5123769044876099 0.5791834592819214 + + <_> + 5 + -0.5549971461296082 + + + <_> + + 0 -1 42 -17409 -20481 -268457797 -134239493 -17473 -1 -21829 + -21846 + + -0.3763174116611481 0.7298233509063721 + + <_> + + 0 -1 6 -805310737 -2098262358 -269504725 682502698 + 2147483519 1740574719 -1090519233 -268472385 + + -0.5352765917778015 0.5659480094909668 + + <_> + + 0 -1 61 -67109678 -6145 -8 -87884584 -20481 -1073762305 + -50856216 -16849696 + + -0.5678374171257019 0.4961479902267456 + + <_> + + 0 -1 123 -138428633 1002418167 -1359008245 -1908670465 + -1346685918 910098423 -1359010520 -1346371657 + + -0.5706262588500977 0.4572288393974304 + + <_> + + 0 -1 9 -89138513 -4196353 1256531674 -1330665426 1216308261 + -36190633 33498198 -151796633 + + -0.5344601869583130 0.4672054052352905 + + <_> + 5 + -0.8776460289955139 + + + <_> + + 0 -1 105 1073769576 206601725 -34013449 -33554433 -789514004 + -101384321 -690225153 -264193 + + -0.7700348496437073 0.5943940877914429 + + <_> + + 0 -1 30 -1432340997 -823623681 -49153 -34291724 -269484035 + -1342767105 -1078198273 -1277955 + + -0.5043668746948242 0.6151274442672730 + + <_> + + 0 -1 35 -1067385040 -195758209 -436748425 -134217731 + -50855988 -129 -1 -1 + + -0.6808040738105774 0.4667325913906097 + + <_> + + 0 -1 119 832534325 -34111555 -26050561 -423659521 -268468364 + 2105014143 -2114244 -17367185 + + -0.4927591383457184 0.5401885509490967 + + <_> + + 0 -1 82 -1089439888 -1080524865 2143059967 -1114121 + -1140949004 -3 -2361356 -739516 + + -0.6445107460021973 0.4227822124958038 + + <_> + 6 + -1.1139287948608398 + + + <_> + + 0 -1 52 -1074071553 -1074003969 -1 -1280135430 -5324817 -1 + -335548482 582134442 + + -0.5307556986808777 0.6258179545402527 + + <_> + + 0 -1 99 -706937396 -705364068 -540016724 -570495027 + -570630659 -587857963 -33628164 -35848193 + + -0.5227634310722351 0.5049746036529541 + + <_> + + 0 -1 18 -2035630093 42119158 -268503053 -1671444 261017599 + 1325432815 1954394111 -805306449 + + -0.4983572661876679 0.5106441378593445 + + <_> + + 0 -1 111 -282529488 -1558073088 1426018736 -170526448 + -546832487 -5113037 -34243375 -570427929 + + -0.4990860521793366 0.5060507059097290 + + <_> + + 0 -1 92 1016332500 -606301707 915094269 -1080086049 + -1837027144 -1361600280 2147318747 1067975613 + + -0.5695009231567383 0.4460467398166657 + + <_> + + 0 -1 51 -656420166 -15413034 -141599534 -603435836 + 1505950458 -787556946 -79823438 -1326199134 + + -0.6590405106544495 0.3616424500942230 + + <_> + 7 + -0.8243625760078430 + + + <_> + + 0 -1 28 -901591776 -201916417 -262 -67371009 -143312112 + -524289 -41943178 -1 + + -0.4972776770591736 0.6027074456214905 + + <_> + + 0 -1 112 -4507851 -411340929 -268437513 -67502145 -17350859 + -32901 -71344315 -29377 + + -0.4383158981800079 0.5966237187385559 + + <_> + + 0 -1 69 -75894785 -117379438 -239063587 -12538500 1485072126 + 2076233213 2123118847 801906927 + + -0.6386105418205261 0.3977999985218048 + + <_> + + 0 -1 19 -823480413 786628589 -16876049 -1364262914 242165211 + 1315930109 -696268833 -455082829 + + -0.5512794256210327 0.4282079637050629 + + <_> + + 0 -1 73 -521411968 6746762 -1396236286 -2038436114 + -185612509 57669627 -143132877 -1041235973 + + -0.6418755054473877 0.3549866080284119 + + <_> + + 0 -1 126 -478153869 1076028979 -1645895615 1365298272 + -557859073 -339771473 1442574528 -1058802061 + + -0.4841901361942291 0.4668019413948059 + + <_> + + 0 -1 45 -246350404 -1650402048 -1610612745 -788400696 + 1467604861 -2787397 1476263935 -4481349 + + -0.5855734348297119 0.3879135847091675 + + <_> + 7 + -1.2237116098403931 + + + <_> + + 0 -1 114 -24819 1572863935 -16809993 -67108865 2146778388 + 1433927541 -268608444 -34865205 + + -0.2518476545810700 0.7088654041290283 + + <_> + + 0 -1 97 -1841359 -134271049 -32769 -5767369 -1116675 -2185 + -8231 -33603327 + + -0.4303432404994965 0.5283288359642029 + + <_> + + 0 -1 25 -1359507589 -1360593090 -1073778729 -269553812 + -809512977 1744707583 -41959433 -134758978 + + -0.4259553551673889 0.5440809130668640 + + <_> + + 0 -1 34 729753407 -134270989 -1140907329 -235200777 + 658456383 2147467263 -1140900929 -16385 + + -0.5605589151382446 0.4220733344554901 + + <_> + + 0 -1 134 -310380553 -420675595 -193005472 -353568129 + 1205338070 -990380036 887604324 -420544526 + + -0.5192656517028809 0.4399855434894562 + + <_> + + 0 -1 16 -1427119361 1978920959 -287119734 -487068946 + 114759245 -540578051 -707510259 -671660453 + + -0.5013077259063721 0.4570254683494568 + + <_> + + 0 -1 74 -738463762 -889949281 -328301948 -121832450 + -1142658284 -1863576559 2146417353 -263185 + + -0.4631414115428925 0.4790246188640595 + + <_> + 7 + -0.5544230937957764 + + + <_> + + 0 -1 113 -76228780 -65538 -1 -67174401 -148007 -33 -221796 + -272842924 + + -0.3949716091156006 0.6082032322883606 + + <_> + + 0 -1 110 369147696 -1625232112 2138570036 -1189900 790708019 + -1212613127 799948719 -4456483 + + -0.4855885505676270 0.4785369932651520 + + <_> + + 0 -1 37 784215839 -290015241 536832799 -402984963 + -1342414991 -838864897 -176769 -268456129 + + -0.4620285332202911 0.4989669024944305 + + <_> + + 0 -1 41 -486418688 -171915327 -340294900 -21938 -519766032 + -772751172 -73096060 -585322623 + + -0.6420643329620361 0.3624351918697357 + + <_> + + 0 -1 117 -33554953 -475332625 -1423463824 -2077230421 + -4849669 -2080505925 -219032928 -1071915349 + + -0.4820112884044647 0.4632140696048737 + + <_> + + 0 -1 65 -834130468 -134217476 -1349314083 -1073803559 + -619913764 -1449131844 -1386890321 -1979118423 + + -0.4465552568435669 0.5061788558959961 + + <_> + + 0 -1 56 -285249779 1912569855 -16530 -1731022870 -1161904146 + -1342177297 -268439634 -1464078708 + + -0.5190586447715759 0.4441480338573456 + + <_> + 7 + -0.7161560654640198 + + + <_> + + 0 -1 20 1246232575 1078001186 -10027057 60102 -277348353 + -43646987 -1210581153 1195769615 + + -0.4323809444904327 0.5663768053054810 + + <_> + + 0 -1 15 -778583572 -612921106 -578775890 -4036478 + -1946580497 -1164766570 -1986687009 -12103599 + + -0.4588732719421387 0.4547033011913300 + + <_> + + 0 -1 129 -1073759445 2013231743 -1363169553 -1082459201 + -1414286549 868185983 -1356133589 -1077936257 + + -0.5218553543090820 0.4111092388629913 + + <_> + + 0 -1 102 -84148365 -2093417722 -1204850272 564290299 + -67121221 -1342177350 -1309195902 -776734797 + + -0.4920000731945038 0.4326725304126740 + + <_> + + 0 -1 88 -25694458 67104495 -290216278 -168563037 2083877442 + 1702788383 -144191964 -234882162 + + -0.4494568109512329 0.4448510706424713 + + <_> + + 0 -1 59 -857980836 904682741 -1612267521 232279415 + 1550862252 -574825221 -357380888 -4579409 + + -0.5180826783180237 0.3888972699642181 + + <_> + + 0 -1 27 -98549440 -137838400 494928389 -246013630 939541351 + -1196072350 -620603549 2137216273 + + -0.6081240773200989 0.3333222270011902 + + <_> + 8 + -0.6743940711021423 + + + <_> + + 0 -1 29 -150995201 2071191945 -1302151626 536934335 + -1059008937 914128709 1147328110 -268369925 + + -0.1790193915367127 0.6605972051620483 + + <_> + + 0 -1 128 -134509479 1610575703 -1342177289 1861484541 + -1107833788 1577058173 -333558568 -136319041 + + -0.3681024610996246 0.5139749646186829 + + <_> + + 0 -1 70 -1 1060154476 -1090984524 -630918524 -539492875 + 779616255 -839568424 -321 + + -0.3217232525348663 0.6171553134918213 + + <_> + + 0 -1 4 -269562385 -285029906 -791084350 -17923776 235286671 + 1275504943 1344390399 -966276889 + + -0.4373284578323364 0.4358185231685638 + + <_> + + 0 -1 76 17825984 -747628419 595427229 1474759671 575672208 + -1684005538 872217086 -1155858277 + + -0.4404836893081665 0.4601220190525055 + + <_> + + 0 -1 124 -336593039 1873735591 -822231622 -355795238 + -470820869 -1997537409 -1057132384 -1015285005 + + -0.4294152259826660 0.4452161788940430 + + <_> + + 0 -1 54 -834212130 -593694721 -322142257 -364892500 + -951029539 -302125121 -1615106053 -79249765 + + -0.3973052501678467 0.4854526817798615 + + <_> + + 0 -1 95 1342144479 2147431935 -33554561 -47873 -855685912 -1 + 1988052447 536827383 + + -0.7054683566093445 0.2697997391223908 + + <_> + 9 + -1.2042298316955566 + + + <_> + + 0 -1 39 1431368960 -183437936 -537002499 -137497097 + 1560590321 -84611081 -2097193 -513 + + -0.5905947685241699 0.5101932883262634 + + <_> + + 0 -1 120 -1645259691 2105491231 2130706431 1458995007 + -8567536 -42483883 -33780003 -21004417 + + -0.4449204802513123 0.4490709304809570 + + <_> + + 0 -1 89 -612381022 -505806938 -362027516 -452985106 + 275854917 1920431639 -12600561 -134221825 + + -0.4693818688392639 0.4061094820499420 + + <_> + + 0 -1 14 -805573153 -161 -554172679 -530519488 -16779441 + 2000682871 -33604275 -150997129 + + -0.3600351214408875 0.5056326985359192 + + <_> + + 0 -1 67 6192 435166195 1467449341 2046691505 -1608493775 + -4755729 -1083162625 -71365637 + + -0.4459891915321350 0.4132415652275085 + + <_> + + 0 -1 86 -41689215 -3281034 1853357967 -420712635 -415924289 + -270209208 -1088293113 -825311232 + + -0.4466069042682648 0.4135067760944367 + + <_> + + 0 -1 80 -117391116 -42203396 2080374461 -188709 -542008165 + -356831940 -1091125345 -1073796897 + + -0.3394956290721893 0.5658645033836365 + + <_> + + 0 -1 75 -276830049 1378714472 -1342181951 757272098 + 1073740607 -282199241 -415761549 170896931 + + -0.5346512198448181 0.3584479391574860 + + <_> + + 0 -1 55 -796075825 -123166849 2113667055 -217530421 + -1107432194 -16385 -806359809 -391188771 + + -0.4379335641860962 0.4123645126819611 + + <_> + 10 + -0.8402050137519836 + + + <_> + + 0 -1 71 -890246622 15525883 -487690486 47116238 -1212319899 + -1291847681 -68159890 -469829921 + + -0.2670986354351044 0.6014143228530884 + + <_> + + 0 -1 31 -1361180685 -1898008841 -1090588811 -285410071 + -1074016265 -840443905 2147221487 -262145 + + -0.4149844348430634 0.4670888185501099 + + <_> + + 0 -1 40 1426190596 1899364271 2142731795 -142607505 + -508232452 -21563393 -41960001 -65 + + -0.4985891580581665 0.3719584941864014 + + <_> + + 0 -1 109 -201337965 10543906 -236498096 -746195597 + 1974565825 -15204415 921907633 -190058309 + + -0.4568729996681213 0.3965812027454376 + + <_> + + 0 -1 130 -595026732 -656401928 -268649235 -571490699 + -440600392 -133131 -358810952 -2004088646 + + -0.4770836830139160 0.3862601518630981 + + <_> + + 0 -1 66 941674740 -1107882114 1332789109 -67691015 + -1360463693 -1556612430 -609108546 733546933 + + -0.4877715110778809 0.3778986334800720 + + <_> + + 0 -1 49 -17114945 -240061474 1552871558 -82775604 -932393844 + -1308544889 -532635478 -99042357 + + -0.3721654713153839 0.4994400143623352 + + <_> + + 0 -1 133 -655906006 1405502603 -939205164 1884929228 + -498859222 559417357 -1928559445 -286264385 + + -0.3934195041656494 0.4769641458988190 + + <_> + + 0 -1 0 -335837777 1860677295 -90 -1946186226 931096183 + 251612987 2013265917 -671232197 + + -0.4323300719261169 0.4342164099216461 + + <_> + + 0 -1 103 37769424 -137772680 374692301 2002666345 -536176194 + -1644484728 807009019 1069089930 + + -0.4993278682231903 0.3665378093719482 + + <_> + 9 + -1.1974394321441650 + + + <_> + + 0 -1 43 -5505 2147462911 2143265466 -4511070 -16450 -257 + -201348440 -71333206 + + -0.3310225307941437 0.5624626278877258 + + <_> + + 0 -1 90 -136842268 -499330741 2015250980 -87107126 + -641665744 -788524639 -1147864792 -134892563 + + -0.5266560912132263 0.3704403042793274 + + <_> + + 0 -1 104 -146800880 -1780368555 2111170033 -140904684 + -16777551 -1946681885 -1646463595 -839131947 + + -0.4171888828277588 0.4540435671806335 + + <_> + + 0 -1 85 -832054034 -981663763 -301990281 -578814081 + -932319000 -1997406723 -33555201 -69206017 + + -0.4556705355644226 0.3704262077808380 + + <_> + + 0 -1 24 -118492417 -1209026825 1119023838 -1334313353 + 1112948738 -297319313 1378887291 -139469193 + + -0.4182529747486115 0.4267231225967407 + + <_> + + 0 -1 78 -1714382628 -2353704 -112094959 -549613092 + -1567058760 -1718550464 -342315012 -1074972227 + + -0.3625369668006897 0.4684656262397766 + + <_> + + 0 -1 5 -85219702 316836394 -33279 1904970288 2117267315 + -260901769 -621461759 -88607770 + + -0.4742925167083740 0.3689507246017456 + + <_> + + 0 -1 11 -294654041 -353603585 -1641159686 -50331921 + -2080899877 1145569279 -143132713 -152044037 + + -0.3666271567344666 0.4580127298831940 + + <_> + + 0 -1 32 1887453658 -638545712 -1877976819 -34320972 + -1071067983 -661345416 -583338277 1060190561 + + -0.4567637443542481 0.3894708156585693 + + <_> + 9 + -0.5733128190040588 + + + <_> + + 0 -1 122 -994063296 1088745462 -318837116 -319881377 + 1102566613 1165490103 -121679694 -134744129 + + -0.4055117964744568 0.5487945079803467 + + <_> + + 0 -1 68 -285233233 -538992907 1811935199 -369234005 -529 + -20593 -20505 -1561401854 + + -0.3787897229194641 0.4532003402709961 + + <_> + + 0 -1 58 -1335245632 1968917183 1940861695 536816369 + -1226071367 -570908176 457026619 1000020667 + + -0.4258328974246979 0.4202791750431061 + + <_> + + 0 -1 94 -1360318719 -1979797897 -50435249 -18646473 + -608879292 -805306691 -269304244 -17840167 + + -0.4561023116111755 0.4002747833728790 + + <_> + + 0 -1 87 2062765935 -16449 -1275080721 -16406 45764335 + -1090552065 -772846337 -570464322 + + -0.4314672648906708 0.4086346626281738 + + <_> + + 0 -1 127 -536896021 1080817663 -738234288 -965478709 + -2082767969 1290855887 1993822934 -990381609 + + -0.4174543321132660 0.4249868988990784 + + <_> + + 0 -1 3 -818943025 168730891 -293610428 -79249354 669224671 + 621166734 1086506807 1473768907 + + -0.4321364760398865 0.4090838730335236 + + <_> + + 0 -1 79 -68895696 -67107736 -1414315879 -841676168 + -619843344 -1180610531 -1081990469 1043203389 + + -0.5018386244773865 0.3702533841133118 + + <_> + + 0 -1 116 -54002134 -543485719 -2124882422 -1437445858 + -115617074 -1195787391 -1096024366 -2140472445 + + -0.5037505626678467 0.3564981222152710 + + <_> + 9 + -0.4892596900463104 + + + <_> + + 0 -1 132 -67113211 2003808111 1862135111 846461923 -2752 + 2002237273 -273154752 1937223539 + + -0.2448196411132813 0.5689709186553955 + + <_> + + 0 -1 62 1179423888 -78064940 -611839555 -539167899 + -1289358360 -1650810108 -892540499 -1432827684 + + -0.4633283913135529 0.3587929606437683 + + <_> + + 0 -1 23 -285212705 -78450761 -656212031 -264050110 -27787425 + -1334349961 -547662981 -135796924 + + -0.3731099069118500 0.4290455579757690 + + <_> + + 0 -1 77 341863476 403702016 -550588417 1600194541 + -1080690735 951127993 -1388580949 -1153717473 + + -0.3658909499645233 0.4556473195552826 + + <_> + + 0 -1 22 -586880702 -204831512 -100644596 -39319550 + -1191150794 705692513 457203315 -75806957 + + -0.5214384198188782 0.3221037387847900 + + <_> + + 0 -1 72 -416546870 545911370 -673716192 -775559454 + -264113598 139424 -183369982 -204474641 + + -0.4289036989212036 0.4004956185817719 + + <_> + + 0 -1 50 -1026505020 -589692154 -1740499937 -1563770497 + 1348491006 -60710713 -1109853489 -633909413 + + -0.4621542394161224 0.3832748532295227 + + <_> + + 0 -1 108 -1448872304 -477895040 -1778390608 -772418127 + -1789923416 -1612057181 -805306693 -1415842113 + + -0.3711548447608948 0.4612701535224915 + + <_> + + 0 -1 92 407905424 -582449988 52654751 -1294472 -285103725 + -74633006 1871559083 1057955850 + + -0.5180652141571045 0.3205870389938355 + + <_> + 10 + -0.5911940932273865 + + + <_> + + 0 -1 81 4112 -1259563825 -846671428 -100902460 1838164148 + -74153752 -90653988 -1074263896 + + -0.2592592537403107 0.5873016119003296 + + <_> + + 0 -1 1 -285216785 -823206977 -1085589 -1081346 1207959293 + 1157103471 2097133565 -2097169 + + -0.3801195919513702 0.4718827307224274 + + <_> + + 0 -1 121 -12465 -536875169 2147478367 2130706303 -37765492 + -866124467 -318782328 -1392509185 + + -0.3509117066860199 0.5094807147979736 + + <_> + + 0 -1 38 2147449663 -20741 -16794757 1945873146 -16710 -1 + -8406341 -67663041 + + -0.4068757295608521 0.4130136370658875 + + <_> + + 0 -1 17 -155191713 866117231 1651407483 548272812 -479201468 + -447742449 1354229504 -261884429 + + -0.4557141065597534 0.3539792001247406 + + <_> + + 0 -1 100 -225319378 -251682065 -492783986 -792341777 + -1287261695 1393643841 -11274182 -213909521 + + -0.4117803275585175 0.4118592441082001 + + <_> + + 0 -1 63 -382220122 -2002072729 -51404800 -371201558 + -923011069 -2135301457 -2066104743 -1042557441 + + -0.4008397758007050 0.4034757018089294 + + <_> + + 0 -1 101 -627353764 -48295149 1581203952 -436258614 + -105268268 -1435893445 -638126888 -1061107126 + + -0.5694189667701721 0.2964762747287750 + + <_> + + 0 -1 118 -8399181 1058107691 -621022752 -251003468 -12582915 + -574619739 -994397789 -1648362021 + + -0.3195341229438782 0.5294018983840942 + + <_> + + 0 -1 92 -348343812 -1078389516 1717960437 364735981 + -1783841602 -4883137 -457572354 -1076950384 + + -0.3365339040756226 0.5067458748817444 + + <_> + 10 + -0.7612916231155396 + + + <_> + + 0 -1 10 -1976661318 -287957604 -1659497122 -782068 43591089 + -453637880 1435470000 -1077438561 + + -0.4204545319080353 0.5165745615959168 + + <_> + + 0 -1 131 -67110925 14874979 -142633168 -1338923040 + 2046713291 -2067933195 1473503712 -789579837 + + -0.3762553930282593 0.4075302779674530 + + <_> + + 0 -1 83 -272814301 -1577073 -1118685 -305156120 -1052289 + -1073813756 -538971154 -355523038 + + -0.4253497421741486 0.3728055357933044 + + <_> + + 0 -1 135 -2233 -214486242 -538514758 573747007 -159390971 + 1994225489 -973738098 -203424005 + + -0.3601998090744019 0.4563256204128265 + + <_> + + 0 -1 115 -261031688 -1330369299 -641860609 1029570301 + -1306461192 -1196149518 -1529767778 683139823 + + -0.4034293889999390 0.4160816967487335 + + <_> + + 0 -1 64 -572993608 -34042628 -417865 -111109 -1433365268 + -19869715 -1920939864 -1279457063 + + -0.3620899617671967 0.4594142735004425 + + <_> + + 0 -1 36 -626275097 -615256993 1651946018 805366393 + 2016559730 -430780849 -799868165 -16580645 + + -0.3903816640377045 0.4381459355354309 + + <_> + + 0 -1 93 1354797300 -1090957603 1976418270 -1342502178 + -1851873892 -1194637077 -1153521668 -1108399474 + + -0.3591445386409760 0.4624078869819641 + + <_> + + 0 -1 91 68157712 1211368313 -304759523 1063017136 798797750 + -275513546 648167355 -1145357350 + + -0.4297670423984528 0.4023293554782867 + + <_> + + 0 -1 107 -546318240 -1628569602 -163577944 -537002306 + -545456389 -1325465645 -380446736 -1058473386 + + -0.5727006793022156 0.2995934784412384 + + <_> + + 0 0 3 5 + <_> + + 0 0 4 2 + <_> + + 0 0 6 3 + <_> + + 0 1 2 3 + <_> + + 0 1 3 3 + <_> + + 0 1 3 7 + <_> + + 0 4 3 3 + <_> + + 0 11 3 4 + <_> + + 0 12 8 4 + <_> + + 0 14 4 3 + <_> + + 1 0 5 3 + <_> + + 1 1 2 2 + <_> + + 1 3 3 1 + <_> + + 1 7 4 4 + <_> + + 1 12 2 2 + <_> + + 1 13 4 1 + <_> + + 1 14 4 3 + <_> + + 1 17 3 2 + <_> + + 2 0 2 3 + <_> + + 2 1 2 2 + <_> + + 2 2 4 6 + <_> + + 2 3 4 4 + <_> + + 2 7 2 1 + <_> + + 2 11 2 3 + <_> + + 2 17 3 2 + <_> + + 3 0 2 2 + <_> + + 3 1 7 3 + <_> + + 3 7 2 1 + <_> + + 3 7 2 4 + <_> + + 3 18 2 2 + <_> + + 4 0 2 3 + <_> + + 4 3 2 1 + <_> + + 4 6 2 1 + <_> + + 4 6 2 5 + <_> + + 4 7 5 2 + <_> + + 4 8 4 3 + <_> + + 4 18 2 2 + <_> + + 5 0 2 2 + <_> + + 5 3 4 4 + <_> + + 5 6 2 5 + <_> + + 5 9 2 2 + <_> + + 5 10 2 2 + <_> + + 6 3 4 4 + <_> + + 6 4 4 3 + <_> + + 6 5 2 3 + <_> + + 6 5 2 5 + <_> + + 6 5 4 3 + <_> + + 6 6 4 2 + <_> + + 6 6 4 4 + <_> + + 6 18 1 2 + <_> + + 6 21 2 1 + <_> + + 7 0 3 7 + <_> + + 7 4 2 3 + <_> + + 7 9 5 1 + <_> + + 7 21 2 1 + <_> + + 8 0 1 4 + <_> + + 8 5 2 2 + <_> + + 8 5 3 2 + <_> + + 8 17 3 1 + <_> + + 8 18 1 2 + <_> + + 9 0 5 3 + <_> + + 9 2 2 6 + <_> + + 9 5 1 1 + <_> + + 9 11 1 1 + <_> + + 9 16 1 1 + <_> + + 9 16 2 1 + <_> + + 9 17 1 1 + <_> + + 9 18 1 1 + <_> + + 10 5 1 2 + <_> + + 10 5 3 3 + <_> + + 10 7 1 5 + <_> + + 10 8 1 1 + <_> + + 10 9 1 1 + <_> + + 10 10 1 1 + <_> + + 10 10 1 2 + <_> + + 10 14 3 3 + <_> + + 10 15 1 1 + <_> + + 10 15 2 1 + <_> + + 10 16 1 1 + <_> + + 10 16 2 1 + <_> + + 10 17 1 1 + <_> + + 10 21 1 1 + <_> + + 11 3 2 2 + <_> + + 11 5 1 2 + <_> + + 11 5 3 3 + <_> + + 11 5 4 6 + <_> + + 11 6 1 1 + <_> + + 11 7 2 2 + <_> + + 11 8 1 2 + <_> + + 11 10 1 1 + <_> + + 11 10 1 2 + <_> + + 11 15 1 1 + <_> + + 11 17 1 1 + <_> + + 11 18 1 1 + <_> + + 12 0 2 2 + <_> + + 12 1 2 5 + <_> + + 12 2 4 1 + <_> + + 12 3 1 3 + <_> + + 12 7 3 4 + <_> + + 12 10 3 2 + <_> + + 12 11 1 1 + <_> + + 12 12 3 2 + <_> + + 12 14 4 3 + <_> + + 12 17 1 1 + <_> + + 12 21 2 1 + <_> + + 13 6 2 5 + <_> + + 13 7 3 5 + <_> + + 13 11 3 2 + <_> + + 13 17 2 2 + <_> + + 13 17 3 2 + <_> + + 13 18 1 2 + <_> + + 13 18 2 2 + <_> + + 14 0 2 2 + <_> + + 14 1 1 3 + <_> + + 14 2 3 2 + <_> + + 14 7 2 1 + <_> + + 14 13 2 1 + <_> + + 14 13 3 3 + <_> + + 14 17 2 2 + <_> + + 15 0 2 2 + <_> + + 15 0 2 3 + <_> + + 15 4 3 2 + <_> + + 15 4 3 6 + <_> + + 15 6 3 2 + <_> + + 15 11 3 4 + <_> + + 15 13 3 2 + <_> + + 15 17 2 2 + <_> + + 15 17 3 2 + <_> + + 16 1 2 3 + <_> + + 16 3 2 4 + <_> + + 16 6 1 1 + <_> + + 16 16 2 2 + <_> + + 17 1 2 2 + <_> + + 17 1 2 5 + <_> + + 17 12 2 2 + <_> + + 18 0 2 2 + diff --git a/library/linux32/libopencv_calib3d.so b/library/linux32/libopencv_calib3d.so new file mode 100644 index 0000000..37c62ef --- /dev/null +++ b/library/linux32/libopencv_calib3d.so @@ -0,0 +1 @@ +libopencv_calib3d.so.2.4 \ No newline at end of file diff --git a/library/linux32/libopencv_calib3d.so.2.4 b/library/linux32/libopencv_calib3d.so.2.4 new file mode 100644 index 0000000..9819e07 --- /dev/null +++ b/library/linux32/libopencv_calib3d.so.2.4 @@ -0,0 +1 @@ +libopencv_calib3d.so.2.4.5 \ No newline at end of file diff --git a/lib/linux32/libopencv_calib3d.so.2.4.5 b/library/linux32/libopencv_calib3d.so.2.4.5 old mode 100755 new mode 100644 similarity index 100% rename from lib/linux32/libopencv_calib3d.so.2.4.5 rename to library/linux32/libopencv_calib3d.so.2.4.5 diff --git a/library/linux32/libopencv_contrib.so b/library/linux32/libopencv_contrib.so new file mode 100644 index 0000000..d8a80d5 --- /dev/null +++ b/library/linux32/libopencv_contrib.so @@ -0,0 +1 @@ +libopencv_contrib.so.2.4 \ No newline at end of file diff --git a/library/linux32/libopencv_contrib.so.2.4 b/library/linux32/libopencv_contrib.so.2.4 new file mode 100644 index 0000000..3332855 --- /dev/null +++ b/library/linux32/libopencv_contrib.so.2.4 @@ -0,0 +1 @@ +libopencv_contrib.so.2.4.5 \ No newline at end of file diff --git a/lib/linux32/libopencv_contrib.so.2.4.5 b/library/linux32/libopencv_contrib.so.2.4.5 old mode 100755 new mode 100644 similarity index 100% rename from lib/linux32/libopencv_contrib.so.2.4.5 rename to library/linux32/libopencv_contrib.so.2.4.5 diff --git a/library/linux32/libopencv_core.so b/library/linux32/libopencv_core.so new file mode 100644 index 0000000..4a68931 --- /dev/null +++ b/library/linux32/libopencv_core.so @@ -0,0 +1 @@ +libopencv_core.so.2.4 \ No newline at end of file diff --git a/library/linux32/libopencv_core.so.2.4 b/library/linux32/libopencv_core.so.2.4 new file mode 100644 index 0000000..ae2ae7b --- /dev/null +++ b/library/linux32/libopencv_core.so.2.4 @@ -0,0 +1 @@ +libopencv_core.so.2.4.5 \ No newline at end of file diff --git a/lib/linux32/libopencv_core.so.2.4.5 b/library/linux32/libopencv_core.so.2.4.5 old mode 100755 new mode 100644 similarity index 100% rename from lib/linux32/libopencv_core.so.2.4.5 rename to library/linux32/libopencv_core.so.2.4.5 diff --git a/library/linux32/libopencv_features2d.so b/library/linux32/libopencv_features2d.so new file mode 100644 index 0000000..171141c --- /dev/null +++ b/library/linux32/libopencv_features2d.so @@ -0,0 +1 @@ +libopencv_features2d.so.2.4 \ No newline at end of file diff --git a/library/linux32/libopencv_features2d.so.2.4 b/library/linux32/libopencv_features2d.so.2.4 new file mode 100644 index 0000000..5cd3acb --- /dev/null +++ b/library/linux32/libopencv_features2d.so.2.4 @@ -0,0 +1 @@ +libopencv_features2d.so.2.4.5 \ No newline at end of file diff --git a/lib/linux32/libopencv_features2d.so.2.4.5 b/library/linux32/libopencv_features2d.so.2.4.5 old mode 100755 new mode 100644 similarity index 100% rename from lib/linux32/libopencv_features2d.so.2.4.5 rename to library/linux32/libopencv_features2d.so.2.4.5 diff --git a/library/linux32/libopencv_flann.so b/library/linux32/libopencv_flann.so new file mode 100644 index 0000000..818d581 --- /dev/null +++ b/library/linux32/libopencv_flann.so @@ -0,0 +1 @@ +libopencv_flann.so.2.4 \ No newline at end of file diff --git a/library/linux32/libopencv_flann.so.2.4 b/library/linux32/libopencv_flann.so.2.4 new file mode 100644 index 0000000..fd7593e --- /dev/null +++ b/library/linux32/libopencv_flann.so.2.4 @@ -0,0 +1 @@ +libopencv_flann.so.2.4.5 \ No newline at end of file diff --git a/lib/linux32/libopencv_flann.so.2.4.5 b/library/linux32/libopencv_flann.so.2.4.5 old mode 100755 new mode 100644 similarity index 100% rename from lib/linux32/libopencv_flann.so.2.4.5 rename to library/linux32/libopencv_flann.so.2.4.5 diff --git a/library/linux32/libopencv_gpu.so b/library/linux32/libopencv_gpu.so new file mode 100644 index 0000000..61edaa4 --- /dev/null +++ b/library/linux32/libopencv_gpu.so @@ -0,0 +1 @@ +libopencv_gpu.so.2.4 \ No newline at end of file diff --git a/library/linux32/libopencv_gpu.so.2.4 b/library/linux32/libopencv_gpu.so.2.4 new file mode 100644 index 0000000..a72f295 --- /dev/null +++ b/library/linux32/libopencv_gpu.so.2.4 @@ -0,0 +1 @@ +libopencv_gpu.so.2.4.5 \ No newline at end of file diff --git a/lib/linux32/libopencv_gpu.so.2.4.5 b/library/linux32/libopencv_gpu.so.2.4.5 old mode 100755 new mode 100644 similarity index 100% rename from lib/linux32/libopencv_gpu.so.2.4.5 rename to library/linux32/libopencv_gpu.so.2.4.5 diff --git a/library/linux32/libopencv_highgui.so b/library/linux32/libopencv_highgui.so new file mode 100644 index 0000000..d95a21f --- /dev/null +++ b/library/linux32/libopencv_highgui.so @@ -0,0 +1 @@ +libopencv_highgui.so.2.4 \ No newline at end of file diff --git a/library/linux32/libopencv_highgui.so.2.4 b/library/linux32/libopencv_highgui.so.2.4 new file mode 100644 index 0000000..773f303 --- /dev/null +++ b/library/linux32/libopencv_highgui.so.2.4 @@ -0,0 +1 @@ +libopencv_highgui.so.2.4.5 \ No newline at end of file diff --git a/lib/linux32/libopencv_highgui.so.2.4.5 b/library/linux32/libopencv_highgui.so.2.4.5 old mode 100755 new mode 100644 similarity index 100% rename from lib/linux32/libopencv_highgui.so.2.4.5 rename to library/linux32/libopencv_highgui.so.2.4.5 diff --git a/library/linux32/libopencv_imgproc.so b/library/linux32/libopencv_imgproc.so new file mode 100644 index 0000000..70e4328 --- /dev/null +++ b/library/linux32/libopencv_imgproc.so @@ -0,0 +1 @@ +libopencv_imgproc.so.2.4 \ No newline at end of file diff --git a/library/linux32/libopencv_imgproc.so.2.4 b/library/linux32/libopencv_imgproc.so.2.4 new file mode 100644 index 0000000..e8d4579 --- /dev/null +++ b/library/linux32/libopencv_imgproc.so.2.4 @@ -0,0 +1 @@ +libopencv_imgproc.so.2.4.5 \ No newline at end of file diff --git a/lib/linux32/libopencv_imgproc.so.2.4.5 b/library/linux32/libopencv_imgproc.so.2.4.5 old mode 100755 new mode 100644 similarity index 100% rename from lib/linux32/libopencv_imgproc.so.2.4.5 rename to library/linux32/libopencv_imgproc.so.2.4.5 diff --git a/lib/linux32/libopencv_java245.so b/library/linux32/libopencv_java245.so old mode 100755 new mode 100644 similarity index 100% rename from lib/linux32/libopencv_java245.so rename to library/linux32/libopencv_java245.so diff --git a/library/linux32/libopencv_legacy.so b/library/linux32/libopencv_legacy.so new file mode 100644 index 0000000..1afd5e1 --- /dev/null +++ b/library/linux32/libopencv_legacy.so @@ -0,0 +1 @@ +libopencv_legacy.so.2.4 \ No newline at end of file diff --git a/library/linux32/libopencv_legacy.so.2.4 b/library/linux32/libopencv_legacy.so.2.4 new file mode 100644 index 0000000..0213de4 --- /dev/null +++ b/library/linux32/libopencv_legacy.so.2.4 @@ -0,0 +1 @@ +libopencv_legacy.so.2.4.5 \ No newline at end of file diff --git a/lib/linux32/libopencv_legacy.so.2.4.5 b/library/linux32/libopencv_legacy.so.2.4.5 old mode 100755 new mode 100644 similarity index 100% rename from lib/linux32/libopencv_legacy.so.2.4.5 rename to library/linux32/libopencv_legacy.so.2.4.5 diff --git a/library/linux32/libopencv_ml.so b/library/linux32/libopencv_ml.so new file mode 100644 index 0000000..4e71450 --- /dev/null +++ b/library/linux32/libopencv_ml.so @@ -0,0 +1 @@ +libopencv_ml.so.2.4 \ No newline at end of file diff --git a/library/linux32/libopencv_ml.so.2.4 b/library/linux32/libopencv_ml.so.2.4 new file mode 100644 index 0000000..338dffa --- /dev/null +++ b/library/linux32/libopencv_ml.so.2.4 @@ -0,0 +1 @@ +libopencv_ml.so.2.4.5 \ No newline at end of file diff --git a/lib/linux32/libopencv_ml.so.2.4.5 b/library/linux32/libopencv_ml.so.2.4.5 old mode 100755 new mode 100644 similarity index 100% rename from lib/linux32/libopencv_ml.so.2.4.5 rename to library/linux32/libopencv_ml.so.2.4.5 diff --git a/library/linux32/libopencv_nonfree.so b/library/linux32/libopencv_nonfree.so new file mode 100644 index 0000000..73c1613 --- /dev/null +++ b/library/linux32/libopencv_nonfree.so @@ -0,0 +1 @@ +libopencv_nonfree.so.2.4 \ No newline at end of file diff --git a/library/linux32/libopencv_nonfree.so.2.4 b/library/linux32/libopencv_nonfree.so.2.4 new file mode 100644 index 0000000..2d6c369 --- /dev/null +++ b/library/linux32/libopencv_nonfree.so.2.4 @@ -0,0 +1 @@ +libopencv_nonfree.so.2.4.5 \ No newline at end of file diff --git a/lib/linux32/libopencv_nonfree.so.2.4.5 b/library/linux32/libopencv_nonfree.so.2.4.5 old mode 100755 new mode 100644 similarity index 100% rename from lib/linux32/libopencv_nonfree.so.2.4.5 rename to library/linux32/libopencv_nonfree.so.2.4.5 diff --git a/library/linux32/libopencv_objdetect.so b/library/linux32/libopencv_objdetect.so new file mode 100644 index 0000000..3c4cef9 --- /dev/null +++ b/library/linux32/libopencv_objdetect.so @@ -0,0 +1 @@ +libopencv_objdetect.so.2.4 \ No newline at end of file diff --git a/library/linux32/libopencv_objdetect.so.2.4 b/library/linux32/libopencv_objdetect.so.2.4 new file mode 100644 index 0000000..2be60de --- /dev/null +++ b/library/linux32/libopencv_objdetect.so.2.4 @@ -0,0 +1 @@ +libopencv_objdetect.so.2.4.5 \ No newline at end of file diff --git a/lib/linux32/libopencv_objdetect.so.2.4.5 b/library/linux32/libopencv_objdetect.so.2.4.5 old mode 100755 new mode 100644 similarity index 100% rename from lib/linux32/libopencv_objdetect.so.2.4.5 rename to library/linux32/libopencv_objdetect.so.2.4.5 diff --git a/library/linux32/libopencv_photo.so b/library/linux32/libopencv_photo.so new file mode 100644 index 0000000..387bc42 --- /dev/null +++ b/library/linux32/libopencv_photo.so @@ -0,0 +1 @@ +libopencv_photo.so.2.4 \ No newline at end of file diff --git a/library/linux32/libopencv_photo.so.2.4 b/library/linux32/libopencv_photo.so.2.4 new file mode 100644 index 0000000..45b8eb2 --- /dev/null +++ b/library/linux32/libopencv_photo.so.2.4 @@ -0,0 +1 @@ +libopencv_photo.so.2.4.5 \ No newline at end of file diff --git a/lib/linux32/libopencv_photo.so.2.4.5 b/library/linux32/libopencv_photo.so.2.4.5 old mode 100755 new mode 100644 similarity index 100% rename from lib/linux32/libopencv_photo.so.2.4.5 rename to library/linux32/libopencv_photo.so.2.4.5 diff --git a/library/linux32/libopencv_superres.so b/library/linux32/libopencv_superres.so new file mode 100644 index 0000000..dbad36c --- /dev/null +++ b/library/linux32/libopencv_superres.so @@ -0,0 +1 @@ +libopencv_superres.so.2.4 \ No newline at end of file diff --git a/library/linux32/libopencv_superres.so.2.4 b/library/linux32/libopencv_superres.so.2.4 new file mode 100644 index 0000000..42dc315 --- /dev/null +++ b/library/linux32/libopencv_superres.so.2.4 @@ -0,0 +1 @@ +libopencv_superres.so.2.4.5 \ No newline at end of file diff --git a/lib/linux32/libopencv_superres.so.2.4.5 b/library/linux32/libopencv_superres.so.2.4.5 old mode 100755 new mode 100644 similarity index 100% rename from lib/linux32/libopencv_superres.so.2.4.5 rename to library/linux32/libopencv_superres.so.2.4.5 diff --git a/library/linux32/libopencv_ts.so b/library/linux32/libopencv_ts.so new file mode 100644 index 0000000..88f5375 --- /dev/null +++ b/library/linux32/libopencv_ts.so @@ -0,0 +1 @@ +libopencv_ts.so.2.4 \ No newline at end of file diff --git a/library/linux32/libopencv_ts.so.2.4 b/library/linux32/libopencv_ts.so.2.4 new file mode 100644 index 0000000..391bebc --- /dev/null +++ b/library/linux32/libopencv_ts.so.2.4 @@ -0,0 +1 @@ +libopencv_ts.so.2.4.5 \ No newline at end of file diff --git a/lib/linux32/libopencv_ts.so.2.4.5 b/library/linux32/libopencv_ts.so.2.4.5 old mode 100755 new mode 100644 similarity index 100% rename from lib/linux32/libopencv_ts.so.2.4.5 rename to library/linux32/libopencv_ts.so.2.4.5 diff --git a/library/linux32/libopencv_video.so b/library/linux32/libopencv_video.so new file mode 100644 index 0000000..d5ddd6c --- /dev/null +++ b/library/linux32/libopencv_video.so @@ -0,0 +1 @@ +libopencv_video.so.2.4 \ No newline at end of file diff --git a/library/linux32/libopencv_video.so.2.4 b/library/linux32/libopencv_video.so.2.4 new file mode 100644 index 0000000..0e319f2 --- /dev/null +++ b/library/linux32/libopencv_video.so.2.4 @@ -0,0 +1 @@ +libopencv_video.so.2.4.5 \ No newline at end of file diff --git a/lib/linux32/libopencv_video.so.2.4.5 b/library/linux32/libopencv_video.so.2.4.5 old mode 100755 new mode 100644 similarity index 100% rename from lib/linux32/libopencv_video.so.2.4.5 rename to library/linux32/libopencv_video.so.2.4.5 diff --git a/library/linux32/libopencv_videostab.so b/library/linux32/libopencv_videostab.so new file mode 100644 index 0000000..faeb668 --- /dev/null +++ b/library/linux32/libopencv_videostab.so @@ -0,0 +1 @@ +libopencv_videostab.so.2.4 \ No newline at end of file diff --git a/library/linux32/libopencv_videostab.so.2.4 b/library/linux32/libopencv_videostab.so.2.4 new file mode 100644 index 0000000..85a3c08 --- /dev/null +++ b/library/linux32/libopencv_videostab.so.2.4 @@ -0,0 +1 @@ +libopencv_videostab.so.2.4.5 \ No newline at end of file diff --git a/lib/linux32/libopencv_videostab.so.2.4.5 b/library/linux32/libopencv_videostab.so.2.4.5 old mode 100755 new mode 100644 similarity index 100% rename from lib/linux32/libopencv_videostab.so.2.4.5 rename to library/linux32/libopencv_videostab.so.2.4.5 diff --git a/library/linux64/libopencv_calib3d.so b/library/linux64/libopencv_calib3d.so new file mode 100644 index 0000000..37c62ef --- /dev/null +++ b/library/linux64/libopencv_calib3d.so @@ -0,0 +1 @@ +libopencv_calib3d.so.2.4 \ No newline at end of file diff --git a/library/linux64/libopencv_calib3d.so.2.4 b/library/linux64/libopencv_calib3d.so.2.4 new file mode 100644 index 0000000..9819e07 --- /dev/null +++ b/library/linux64/libopencv_calib3d.so.2.4 @@ -0,0 +1 @@ +libopencv_calib3d.so.2.4.5 \ No newline at end of file diff --git a/lib/linux64/libopencv_calib3d.so.2.4.5 b/library/linux64/libopencv_calib3d.so.2.4.5 old mode 100755 new mode 100644 similarity index 100% rename from lib/linux64/libopencv_calib3d.so.2.4.5 rename to library/linux64/libopencv_calib3d.so.2.4.5 diff --git a/library/linux64/libopencv_contrib.so b/library/linux64/libopencv_contrib.so new file mode 100644 index 0000000..d8a80d5 --- /dev/null +++ b/library/linux64/libopencv_contrib.so @@ -0,0 +1 @@ +libopencv_contrib.so.2.4 \ No newline at end of file diff --git a/library/linux64/libopencv_contrib.so.2.4 b/library/linux64/libopencv_contrib.so.2.4 new file mode 100644 index 0000000..3332855 --- /dev/null +++ b/library/linux64/libopencv_contrib.so.2.4 @@ -0,0 +1 @@ +libopencv_contrib.so.2.4.5 \ No newline at end of file diff --git a/lib/linux64/libopencv_contrib.so.2.4.5 b/library/linux64/libopencv_contrib.so.2.4.5 old mode 100755 new mode 100644 similarity index 100% rename from lib/linux64/libopencv_contrib.so.2.4.5 rename to library/linux64/libopencv_contrib.so.2.4.5 diff --git a/library/linux64/libopencv_core.so b/library/linux64/libopencv_core.so new file mode 100644 index 0000000..4a68931 --- /dev/null +++ b/library/linux64/libopencv_core.so @@ -0,0 +1 @@ +libopencv_core.so.2.4 \ No newline at end of file diff --git a/library/linux64/libopencv_core.so.2.4 b/library/linux64/libopencv_core.so.2.4 new file mode 100644 index 0000000..ae2ae7b --- /dev/null +++ b/library/linux64/libopencv_core.so.2.4 @@ -0,0 +1 @@ +libopencv_core.so.2.4.5 \ No newline at end of file diff --git a/lib/linux64/libopencv_core.so.2.4.5 b/library/linux64/libopencv_core.so.2.4.5 old mode 100755 new mode 100644 similarity index 100% rename from lib/linux64/libopencv_core.so.2.4.5 rename to library/linux64/libopencv_core.so.2.4.5 diff --git a/library/linux64/libopencv_features2d.so b/library/linux64/libopencv_features2d.so new file mode 100644 index 0000000..171141c --- /dev/null +++ b/library/linux64/libopencv_features2d.so @@ -0,0 +1 @@ +libopencv_features2d.so.2.4 \ No newline at end of file diff --git a/library/linux64/libopencv_features2d.so.2.4 b/library/linux64/libopencv_features2d.so.2.4 new file mode 100644 index 0000000..5cd3acb --- /dev/null +++ b/library/linux64/libopencv_features2d.so.2.4 @@ -0,0 +1 @@ +libopencv_features2d.so.2.4.5 \ No newline at end of file diff --git a/lib/linux64/libopencv_features2d.so.2.4.5 b/library/linux64/libopencv_features2d.so.2.4.5 old mode 100755 new mode 100644 similarity index 100% rename from lib/linux64/libopencv_features2d.so.2.4.5 rename to library/linux64/libopencv_features2d.so.2.4.5 diff --git a/library/linux64/libopencv_flann.so b/library/linux64/libopencv_flann.so new file mode 100644 index 0000000..818d581 --- /dev/null +++ b/library/linux64/libopencv_flann.so @@ -0,0 +1 @@ +libopencv_flann.so.2.4 \ No newline at end of file diff --git a/library/linux64/libopencv_flann.so.2.4 b/library/linux64/libopencv_flann.so.2.4 new file mode 100644 index 0000000..fd7593e --- /dev/null +++ b/library/linux64/libopencv_flann.so.2.4 @@ -0,0 +1 @@ +libopencv_flann.so.2.4.5 \ No newline at end of file diff --git a/lib/linux64/libopencv_flann.so.2.4.5 b/library/linux64/libopencv_flann.so.2.4.5 old mode 100755 new mode 100644 similarity index 100% rename from lib/linux64/libopencv_flann.so.2.4.5 rename to library/linux64/libopencv_flann.so.2.4.5 diff --git a/library/linux64/libopencv_gpu.so b/library/linux64/libopencv_gpu.so new file mode 100644 index 0000000..61edaa4 --- /dev/null +++ b/library/linux64/libopencv_gpu.so @@ -0,0 +1 @@ +libopencv_gpu.so.2.4 \ No newline at end of file diff --git a/library/linux64/libopencv_gpu.so.2.4 b/library/linux64/libopencv_gpu.so.2.4 new file mode 100644 index 0000000..a72f295 --- /dev/null +++ b/library/linux64/libopencv_gpu.so.2.4 @@ -0,0 +1 @@ +libopencv_gpu.so.2.4.5 \ No newline at end of file diff --git a/lib/linux64/libopencv_gpu.so.2.4.5 b/library/linux64/libopencv_gpu.so.2.4.5 old mode 100755 new mode 100644 similarity index 100% rename from lib/linux64/libopencv_gpu.so.2.4.5 rename to library/linux64/libopencv_gpu.so.2.4.5 diff --git a/library/linux64/libopencv_highgui.so b/library/linux64/libopencv_highgui.so new file mode 100644 index 0000000..d95a21f --- /dev/null +++ b/library/linux64/libopencv_highgui.so @@ -0,0 +1 @@ +libopencv_highgui.so.2.4 \ No newline at end of file diff --git a/library/linux64/libopencv_highgui.so.2.4 b/library/linux64/libopencv_highgui.so.2.4 new file mode 100644 index 0000000..773f303 --- /dev/null +++ b/library/linux64/libopencv_highgui.so.2.4 @@ -0,0 +1 @@ +libopencv_highgui.so.2.4.5 \ No newline at end of file diff --git a/lib/linux64/libopencv_highgui.so.2.4.5 b/library/linux64/libopencv_highgui.so.2.4.5 old mode 100755 new mode 100644 similarity index 100% rename from lib/linux64/libopencv_highgui.so.2.4.5 rename to library/linux64/libopencv_highgui.so.2.4.5 diff --git a/library/linux64/libopencv_imgproc.so b/library/linux64/libopencv_imgproc.so new file mode 100644 index 0000000..70e4328 --- /dev/null +++ b/library/linux64/libopencv_imgproc.so @@ -0,0 +1 @@ +libopencv_imgproc.so.2.4 \ No newline at end of file diff --git a/library/linux64/libopencv_imgproc.so.2.4 b/library/linux64/libopencv_imgproc.so.2.4 new file mode 100644 index 0000000..e8d4579 --- /dev/null +++ b/library/linux64/libopencv_imgproc.so.2.4 @@ -0,0 +1 @@ +libopencv_imgproc.so.2.4.5 \ No newline at end of file diff --git a/lib/linux64/libopencv_imgproc.so.2.4.5 b/library/linux64/libopencv_imgproc.so.2.4.5 old mode 100755 new mode 100644 similarity index 100% rename from lib/linux64/libopencv_imgproc.so.2.4.5 rename to library/linux64/libopencv_imgproc.so.2.4.5 diff --git a/lib/linux64/libopencv_java245.so b/library/linux64/libopencv_java245.so old mode 100755 new mode 100644 similarity index 100% rename from lib/linux64/libopencv_java245.so rename to library/linux64/libopencv_java245.so diff --git a/library/linux64/libopencv_legacy.so b/library/linux64/libopencv_legacy.so new file mode 100644 index 0000000..1afd5e1 --- /dev/null +++ b/library/linux64/libopencv_legacy.so @@ -0,0 +1 @@ +libopencv_legacy.so.2.4 \ No newline at end of file diff --git a/library/linux64/libopencv_legacy.so.2.4 b/library/linux64/libopencv_legacy.so.2.4 new file mode 100644 index 0000000..0213de4 --- /dev/null +++ b/library/linux64/libopencv_legacy.so.2.4 @@ -0,0 +1 @@ +libopencv_legacy.so.2.4.5 \ No newline at end of file diff --git a/lib/linux64/libopencv_legacy.so.2.4.5 b/library/linux64/libopencv_legacy.so.2.4.5 old mode 100755 new mode 100644 similarity index 100% rename from lib/linux64/libopencv_legacy.so.2.4.5 rename to library/linux64/libopencv_legacy.so.2.4.5 diff --git a/library/linux64/libopencv_ml.so b/library/linux64/libopencv_ml.so new file mode 100644 index 0000000..4e71450 --- /dev/null +++ b/library/linux64/libopencv_ml.so @@ -0,0 +1 @@ +libopencv_ml.so.2.4 \ No newline at end of file diff --git a/library/linux64/libopencv_ml.so.2.4 b/library/linux64/libopencv_ml.so.2.4 new file mode 100644 index 0000000..338dffa --- /dev/null +++ b/library/linux64/libopencv_ml.so.2.4 @@ -0,0 +1 @@ +libopencv_ml.so.2.4.5 \ No newline at end of file diff --git a/lib/linux64/libopencv_ml.so.2.4.5 b/library/linux64/libopencv_ml.so.2.4.5 old mode 100755 new mode 100644 similarity index 100% rename from lib/linux64/libopencv_ml.so.2.4.5 rename to library/linux64/libopencv_ml.so.2.4.5 diff --git a/library/linux64/libopencv_nonfree.so b/library/linux64/libopencv_nonfree.so new file mode 100644 index 0000000..73c1613 --- /dev/null +++ b/library/linux64/libopencv_nonfree.so @@ -0,0 +1 @@ +libopencv_nonfree.so.2.4 \ No newline at end of file diff --git a/library/linux64/libopencv_nonfree.so.2.4 b/library/linux64/libopencv_nonfree.so.2.4 new file mode 100644 index 0000000..2d6c369 --- /dev/null +++ b/library/linux64/libopencv_nonfree.so.2.4 @@ -0,0 +1 @@ +libopencv_nonfree.so.2.4.5 \ No newline at end of file diff --git a/lib/linux64/libopencv_nonfree.so.2.4.5 b/library/linux64/libopencv_nonfree.so.2.4.5 old mode 100755 new mode 100644 similarity index 100% rename from lib/linux64/libopencv_nonfree.so.2.4.5 rename to library/linux64/libopencv_nonfree.so.2.4.5 diff --git a/library/linux64/libopencv_objdetect.so b/library/linux64/libopencv_objdetect.so new file mode 100644 index 0000000..3c4cef9 --- /dev/null +++ b/library/linux64/libopencv_objdetect.so @@ -0,0 +1 @@ +libopencv_objdetect.so.2.4 \ No newline at end of file diff --git a/library/linux64/libopencv_objdetect.so.2.4 b/library/linux64/libopencv_objdetect.so.2.4 new file mode 100644 index 0000000..2be60de --- /dev/null +++ b/library/linux64/libopencv_objdetect.so.2.4 @@ -0,0 +1 @@ +libopencv_objdetect.so.2.4.5 \ No newline at end of file diff --git a/lib/linux64/libopencv_objdetect.so.2.4.5 b/library/linux64/libopencv_objdetect.so.2.4.5 old mode 100755 new mode 100644 similarity index 100% rename from lib/linux64/libopencv_objdetect.so.2.4.5 rename to library/linux64/libopencv_objdetect.so.2.4.5 diff --git a/library/linux64/libopencv_photo.so b/library/linux64/libopencv_photo.so new file mode 100644 index 0000000..387bc42 --- /dev/null +++ b/library/linux64/libopencv_photo.so @@ -0,0 +1 @@ +libopencv_photo.so.2.4 \ No newline at end of file diff --git a/library/linux64/libopencv_photo.so.2.4 b/library/linux64/libopencv_photo.so.2.4 new file mode 100644 index 0000000..45b8eb2 --- /dev/null +++ b/library/linux64/libopencv_photo.so.2.4 @@ -0,0 +1 @@ +libopencv_photo.so.2.4.5 \ No newline at end of file diff --git a/lib/linux64/libopencv_photo.so.2.4.5 b/library/linux64/libopencv_photo.so.2.4.5 old mode 100755 new mode 100644 similarity index 100% rename from lib/linux64/libopencv_photo.so.2.4.5 rename to library/linux64/libopencv_photo.so.2.4.5 diff --git a/library/linux64/libopencv_stitching.so b/library/linux64/libopencv_stitching.so new file mode 100644 index 0000000..10b36f8 --- /dev/null +++ b/library/linux64/libopencv_stitching.so @@ -0,0 +1 @@ +libopencv_stitching.so.2.4 \ No newline at end of file diff --git a/library/linux64/libopencv_stitching.so.2.4 b/library/linux64/libopencv_stitching.so.2.4 new file mode 100644 index 0000000..2cf2908 --- /dev/null +++ b/library/linux64/libopencv_stitching.so.2.4 @@ -0,0 +1 @@ +libopencv_stitching.so.2.4.5 \ No newline at end of file diff --git a/lib/linux64/libopencv_stitching.so.2.4.5 b/library/linux64/libopencv_stitching.so.2.4.5 old mode 100755 new mode 100644 similarity index 100% rename from lib/linux64/libopencv_stitching.so.2.4.5 rename to library/linux64/libopencv_stitching.so.2.4.5 diff --git a/library/linux64/libopencv_superres.so b/library/linux64/libopencv_superres.so new file mode 100644 index 0000000..dbad36c --- /dev/null +++ b/library/linux64/libopencv_superres.so @@ -0,0 +1 @@ +libopencv_superres.so.2.4 \ No newline at end of file diff --git a/library/linux64/libopencv_superres.so.2.4 b/library/linux64/libopencv_superres.so.2.4 new file mode 100644 index 0000000..42dc315 --- /dev/null +++ b/library/linux64/libopencv_superres.so.2.4 @@ -0,0 +1 @@ +libopencv_superres.so.2.4.5 \ No newline at end of file diff --git a/lib/linux64/libopencv_superres.so.2.4.5 b/library/linux64/libopencv_superres.so.2.4.5 old mode 100755 new mode 100644 similarity index 100% rename from lib/linux64/libopencv_superres.so.2.4.5 rename to library/linux64/libopencv_superres.so.2.4.5 diff --git a/library/linux64/libopencv_ts.so b/library/linux64/libopencv_ts.so new file mode 100644 index 0000000..88f5375 --- /dev/null +++ b/library/linux64/libopencv_ts.so @@ -0,0 +1 @@ +libopencv_ts.so.2.4 \ No newline at end of file diff --git a/library/linux64/libopencv_ts.so.2.4 b/library/linux64/libopencv_ts.so.2.4 new file mode 100644 index 0000000..391bebc --- /dev/null +++ b/library/linux64/libopencv_ts.so.2.4 @@ -0,0 +1 @@ +libopencv_ts.so.2.4.5 \ No newline at end of file diff --git a/lib/linux64/libopencv_ts.so.2.4.5 b/library/linux64/libopencv_ts.so.2.4.5 old mode 100755 new mode 100644 similarity index 100% rename from lib/linux64/libopencv_ts.so.2.4.5 rename to library/linux64/libopencv_ts.so.2.4.5 diff --git a/library/linux64/libopencv_video.so b/library/linux64/libopencv_video.so new file mode 100644 index 0000000..d5ddd6c --- /dev/null +++ b/library/linux64/libopencv_video.so @@ -0,0 +1 @@ +libopencv_video.so.2.4 \ No newline at end of file diff --git a/library/linux64/libopencv_video.so.2.4 b/library/linux64/libopencv_video.so.2.4 new file mode 100644 index 0000000..0e319f2 --- /dev/null +++ b/library/linux64/libopencv_video.so.2.4 @@ -0,0 +1 @@ +libopencv_video.so.2.4.5 \ No newline at end of file diff --git a/lib/linux64/libopencv_video.so.2.4.5 b/library/linux64/libopencv_video.so.2.4.5 old mode 100755 new mode 100644 similarity index 100% rename from lib/linux64/libopencv_video.so.2.4.5 rename to library/linux64/libopencv_video.so.2.4.5 diff --git a/library/linux64/libopencv_videostab.so b/library/linux64/libopencv_videostab.so new file mode 100644 index 0000000..faeb668 --- /dev/null +++ b/library/linux64/libopencv_videostab.so @@ -0,0 +1 @@ +libopencv_videostab.so.2.4 \ No newline at end of file diff --git a/library/linux64/libopencv_videostab.so.2.4 b/library/linux64/libopencv_videostab.so.2.4 new file mode 100644 index 0000000..85a3c08 --- /dev/null +++ b/library/linux64/libopencv_videostab.so.2.4 @@ -0,0 +1 @@ +libopencv_videostab.so.2.4.5 \ No newline at end of file diff --git a/lib/linux64/libopencv_videostab.so.2.4.5 b/library/linux64/libopencv_videostab.so.2.4.5 old mode 100755 new mode 100644 similarity index 100% rename from lib/linux64/libopencv_videostab.so.2.4.5 rename to library/linux64/libopencv_videostab.so.2.4.5 diff --git a/lib/macosx64/libopencv_calib3d.2.4.5.dylib b/library/macosx64/libopencv_calib3d.2.4.5.dylib similarity index 100% rename from lib/macosx64/libopencv_calib3d.2.4.5.dylib rename to library/macosx64/libopencv_calib3d.2.4.5.dylib diff --git a/lib/macosx64/libopencv_calib3d.2.4.dylib b/library/macosx64/libopencv_calib3d.2.4.dylib similarity index 100% rename from lib/macosx64/libopencv_calib3d.2.4.dylib rename to library/macosx64/libopencv_calib3d.2.4.dylib diff --git a/lib/macosx64/libopencv_calib3d.dylib b/library/macosx64/libopencv_calib3d.dylib similarity index 100% rename from lib/macosx64/libopencv_calib3d.dylib rename to library/macosx64/libopencv_calib3d.dylib diff --git a/lib/macosx64/libopencv_contrib.2.4.5.dylib b/library/macosx64/libopencv_contrib.2.4.5.dylib similarity index 100% rename from lib/macosx64/libopencv_contrib.2.4.5.dylib rename to library/macosx64/libopencv_contrib.2.4.5.dylib diff --git a/lib/macosx64/libopencv_contrib.2.4.dylib b/library/macosx64/libopencv_contrib.2.4.dylib similarity index 100% rename from lib/macosx64/libopencv_contrib.2.4.dylib rename to library/macosx64/libopencv_contrib.2.4.dylib diff --git a/lib/macosx64/libopencv_contrib.dylib b/library/macosx64/libopencv_contrib.dylib similarity index 100% rename from lib/macosx64/libopencv_contrib.dylib rename to library/macosx64/libopencv_contrib.dylib diff --git a/lib/macosx64/libopencv_core.2.4.5.dylib b/library/macosx64/libopencv_core.2.4.5.dylib similarity index 100% rename from lib/macosx64/libopencv_core.2.4.5.dylib rename to library/macosx64/libopencv_core.2.4.5.dylib diff --git a/lib/macosx64/libopencv_core.2.4.dylib b/library/macosx64/libopencv_core.2.4.dylib similarity index 100% rename from lib/macosx64/libopencv_core.2.4.dylib rename to library/macosx64/libopencv_core.2.4.dylib diff --git a/lib/macosx64/libopencv_core.dylib b/library/macosx64/libopencv_core.dylib similarity index 100% rename from lib/macosx64/libopencv_core.dylib rename to library/macosx64/libopencv_core.dylib diff --git a/lib/macosx64/libopencv_features2d.2.4.5.dylib b/library/macosx64/libopencv_features2d.2.4.5.dylib similarity index 100% rename from lib/macosx64/libopencv_features2d.2.4.5.dylib rename to library/macosx64/libopencv_features2d.2.4.5.dylib diff --git a/lib/macosx64/libopencv_features2d.2.4.dylib b/library/macosx64/libopencv_features2d.2.4.dylib similarity index 100% rename from lib/macosx64/libopencv_features2d.2.4.dylib rename to library/macosx64/libopencv_features2d.2.4.dylib diff --git a/lib/macosx64/libopencv_features2d.dylib b/library/macosx64/libopencv_features2d.dylib similarity index 100% rename from lib/macosx64/libopencv_features2d.dylib rename to library/macosx64/libopencv_features2d.dylib diff --git a/lib/macosx64/libopencv_flann.2.4.5.dylib b/library/macosx64/libopencv_flann.2.4.5.dylib similarity index 100% rename from lib/macosx64/libopencv_flann.2.4.5.dylib rename to library/macosx64/libopencv_flann.2.4.5.dylib diff --git a/lib/macosx64/libopencv_flann.2.4.dylib b/library/macosx64/libopencv_flann.2.4.dylib similarity index 100% rename from lib/macosx64/libopencv_flann.2.4.dylib rename to library/macosx64/libopencv_flann.2.4.dylib diff --git a/lib/macosx64/libopencv_flann.dylib b/library/macosx64/libopencv_flann.dylib similarity index 100% rename from lib/macosx64/libopencv_flann.dylib rename to library/macosx64/libopencv_flann.dylib diff --git a/lib/macosx64/libopencv_gpu.2.4.5.dylib b/library/macosx64/libopencv_gpu.2.4.5.dylib similarity index 100% rename from lib/macosx64/libopencv_gpu.2.4.5.dylib rename to library/macosx64/libopencv_gpu.2.4.5.dylib diff --git a/lib/macosx64/libopencv_gpu.2.4.dylib b/library/macosx64/libopencv_gpu.2.4.dylib similarity index 100% rename from lib/macosx64/libopencv_gpu.2.4.dylib rename to library/macosx64/libopencv_gpu.2.4.dylib diff --git a/lib/macosx64/libopencv_gpu.dylib b/library/macosx64/libopencv_gpu.dylib similarity index 100% rename from lib/macosx64/libopencv_gpu.dylib rename to library/macosx64/libopencv_gpu.dylib diff --git a/lib/macosx64/libopencv_haartraining_engine.a b/library/macosx64/libopencv_haartraining_engine.a similarity index 100% rename from lib/macosx64/libopencv_haartraining_engine.a rename to library/macosx64/libopencv_haartraining_engine.a diff --git a/lib/macosx64/libopencv_highgui.2.4.5.dylib b/library/macosx64/libopencv_highgui.2.4.5.dylib similarity index 100% rename from lib/macosx64/libopencv_highgui.2.4.5.dylib rename to library/macosx64/libopencv_highgui.2.4.5.dylib diff --git a/lib/macosx64/libopencv_highgui.2.4.dylib b/library/macosx64/libopencv_highgui.2.4.dylib similarity index 100% rename from lib/macosx64/libopencv_highgui.2.4.dylib rename to library/macosx64/libopencv_highgui.2.4.dylib diff --git a/lib/macosx64/libopencv_highgui.dylib b/library/macosx64/libopencv_highgui.dylib similarity index 100% rename from lib/macosx64/libopencv_highgui.dylib rename to library/macosx64/libopencv_highgui.dylib diff --git a/lib/macosx64/libopencv_imgproc.2.4.5.dylib b/library/macosx64/libopencv_imgproc.2.4.5.dylib similarity index 100% rename from lib/macosx64/libopencv_imgproc.2.4.5.dylib rename to library/macosx64/libopencv_imgproc.2.4.5.dylib diff --git a/lib/macosx64/libopencv_imgproc.2.4.dylib b/library/macosx64/libopencv_imgproc.2.4.dylib similarity index 100% rename from lib/macosx64/libopencv_imgproc.2.4.dylib rename to library/macosx64/libopencv_imgproc.2.4.dylib diff --git a/lib/macosx64/libopencv_imgproc.dylib b/library/macosx64/libopencv_imgproc.dylib similarity index 100% rename from lib/macosx64/libopencv_imgproc.dylib rename to library/macosx64/libopencv_imgproc.dylib diff --git a/lib/macosx64/libopencv_java245.dylib b/library/macosx64/libopencv_java245.dylib similarity index 100% rename from lib/macosx64/libopencv_java245.dylib rename to library/macosx64/libopencv_java245.dylib diff --git a/lib/macosx64/libopencv_legacy.2.4.5.dylib b/library/macosx64/libopencv_legacy.2.4.5.dylib similarity index 100% rename from lib/macosx64/libopencv_legacy.2.4.5.dylib rename to library/macosx64/libopencv_legacy.2.4.5.dylib diff --git a/lib/macosx64/libopencv_legacy.2.4.dylib b/library/macosx64/libopencv_legacy.2.4.dylib similarity index 100% rename from lib/macosx64/libopencv_legacy.2.4.dylib rename to library/macosx64/libopencv_legacy.2.4.dylib diff --git a/lib/macosx64/libopencv_legacy.dylib b/library/macosx64/libopencv_legacy.dylib similarity index 100% rename from lib/macosx64/libopencv_legacy.dylib rename to library/macosx64/libopencv_legacy.dylib diff --git a/lib/macosx64/libopencv_ml.2.4.5.dylib b/library/macosx64/libopencv_ml.2.4.5.dylib similarity index 100% rename from lib/macosx64/libopencv_ml.2.4.5.dylib rename to library/macosx64/libopencv_ml.2.4.5.dylib diff --git a/lib/macosx64/libopencv_ml.2.4.dylib b/library/macosx64/libopencv_ml.2.4.dylib similarity index 100% rename from lib/macosx64/libopencv_ml.2.4.dylib rename to library/macosx64/libopencv_ml.2.4.dylib diff --git a/lib/macosx64/libopencv_ml.dylib b/library/macosx64/libopencv_ml.dylib similarity index 100% rename from lib/macosx64/libopencv_ml.dylib rename to library/macosx64/libopencv_ml.dylib diff --git a/lib/macosx64/libopencv_nonfree.2.4.5.dylib b/library/macosx64/libopencv_nonfree.2.4.5.dylib similarity index 100% rename from lib/macosx64/libopencv_nonfree.2.4.5.dylib rename to library/macosx64/libopencv_nonfree.2.4.5.dylib diff --git a/lib/macosx64/libopencv_nonfree.2.4.dylib b/library/macosx64/libopencv_nonfree.2.4.dylib similarity index 100% rename from lib/macosx64/libopencv_nonfree.2.4.dylib rename to library/macosx64/libopencv_nonfree.2.4.dylib diff --git a/lib/macosx64/libopencv_nonfree.dylib b/library/macosx64/libopencv_nonfree.dylib similarity index 100% rename from lib/macosx64/libopencv_nonfree.dylib rename to library/macosx64/libopencv_nonfree.dylib diff --git a/lib/macosx64/libopencv_objdetect.2.4.5.dylib b/library/macosx64/libopencv_objdetect.2.4.5.dylib similarity index 100% rename from lib/macosx64/libopencv_objdetect.2.4.5.dylib rename to library/macosx64/libopencv_objdetect.2.4.5.dylib diff --git a/lib/macosx64/libopencv_objdetect.2.4.dylib b/library/macosx64/libopencv_objdetect.2.4.dylib similarity index 100% rename from lib/macosx64/libopencv_objdetect.2.4.dylib rename to library/macosx64/libopencv_objdetect.2.4.dylib diff --git a/lib/macosx64/libopencv_objdetect.dylib b/library/macosx64/libopencv_objdetect.dylib similarity index 100% rename from lib/macosx64/libopencv_objdetect.dylib rename to library/macosx64/libopencv_objdetect.dylib diff --git a/lib/macosx64/libopencv_ocl.2.4.5.dylib b/library/macosx64/libopencv_ocl.2.4.5.dylib similarity index 100% rename from lib/macosx64/libopencv_ocl.2.4.5.dylib rename to library/macosx64/libopencv_ocl.2.4.5.dylib diff --git a/lib/macosx64/libopencv_ocl.2.4.dylib b/library/macosx64/libopencv_ocl.2.4.dylib similarity index 100% rename from lib/macosx64/libopencv_ocl.2.4.dylib rename to library/macosx64/libopencv_ocl.2.4.dylib diff --git a/lib/macosx64/libopencv_ocl.dylib b/library/macosx64/libopencv_ocl.dylib similarity index 100% rename from lib/macosx64/libopencv_ocl.dylib rename to library/macosx64/libopencv_ocl.dylib diff --git a/lib/macosx64/libopencv_photo.2.4.5.dylib b/library/macosx64/libopencv_photo.2.4.5.dylib similarity index 100% rename from lib/macosx64/libopencv_photo.2.4.5.dylib rename to library/macosx64/libopencv_photo.2.4.5.dylib diff --git a/lib/macosx64/libopencv_photo.2.4.dylib b/library/macosx64/libopencv_photo.2.4.dylib similarity index 100% rename from lib/macosx64/libopencv_photo.2.4.dylib rename to library/macosx64/libopencv_photo.2.4.dylib diff --git a/lib/macosx64/libopencv_photo.dylib b/library/macosx64/libopencv_photo.dylib similarity index 100% rename from lib/macosx64/libopencv_photo.dylib rename to library/macosx64/libopencv_photo.dylib diff --git a/lib/macosx64/libopencv_stitching.2.4.5.dylib b/library/macosx64/libopencv_stitching.2.4.5.dylib similarity index 100% rename from lib/macosx64/libopencv_stitching.2.4.5.dylib rename to library/macosx64/libopencv_stitching.2.4.5.dylib diff --git a/lib/macosx64/libopencv_stitching.2.4.dylib b/library/macosx64/libopencv_stitching.2.4.dylib similarity index 100% rename from lib/macosx64/libopencv_stitching.2.4.dylib rename to library/macosx64/libopencv_stitching.2.4.dylib diff --git a/lib/macosx64/libopencv_stitching.dylib b/library/macosx64/libopencv_stitching.dylib similarity index 100% rename from lib/macosx64/libopencv_stitching.dylib rename to library/macosx64/libopencv_stitching.dylib diff --git a/lib/macosx64/libopencv_superres.2.4.5.dylib b/library/macosx64/libopencv_superres.2.4.5.dylib similarity index 100% rename from lib/macosx64/libopencv_superres.2.4.5.dylib rename to library/macosx64/libopencv_superres.2.4.5.dylib diff --git a/lib/macosx64/libopencv_superres.2.4.dylib b/library/macosx64/libopencv_superres.2.4.dylib similarity index 100% rename from lib/macosx64/libopencv_superres.2.4.dylib rename to library/macosx64/libopencv_superres.2.4.dylib diff --git a/lib/macosx64/libopencv_superres.dylib b/library/macosx64/libopencv_superres.dylib similarity index 100% rename from lib/macosx64/libopencv_superres.dylib rename to library/macosx64/libopencv_superres.dylib diff --git a/lib/macosx64/libopencv_ts.2.4.5.dylib b/library/macosx64/libopencv_ts.2.4.5.dylib similarity index 100% rename from lib/macosx64/libopencv_ts.2.4.5.dylib rename to library/macosx64/libopencv_ts.2.4.5.dylib diff --git a/lib/macosx64/libopencv_ts.2.4.dylib b/library/macosx64/libopencv_ts.2.4.dylib similarity index 100% rename from lib/macosx64/libopencv_ts.2.4.dylib rename to library/macosx64/libopencv_ts.2.4.dylib diff --git a/lib/macosx64/libopencv_ts.dylib b/library/macosx64/libopencv_ts.dylib similarity index 100% rename from lib/macosx64/libopencv_ts.dylib rename to library/macosx64/libopencv_ts.dylib diff --git a/lib/macosx64/libopencv_video.2.4.5.dylib b/library/macosx64/libopencv_video.2.4.5.dylib similarity index 100% rename from lib/macosx64/libopencv_video.2.4.5.dylib rename to library/macosx64/libopencv_video.2.4.5.dylib diff --git a/lib/macosx64/libopencv_video.2.4.dylib b/library/macosx64/libopencv_video.2.4.dylib similarity index 100% rename from lib/macosx64/libopencv_video.2.4.dylib rename to library/macosx64/libopencv_video.2.4.dylib diff --git a/lib/macosx64/libopencv_video.dylib b/library/macosx64/libopencv_video.dylib similarity index 100% rename from lib/macosx64/libopencv_video.dylib rename to library/macosx64/libopencv_video.dylib diff --git a/lib/macosx64/libopencv_videostab.2.4.5.dylib b/library/macosx64/libopencv_videostab.2.4.5.dylib similarity index 100% rename from lib/macosx64/libopencv_videostab.2.4.5.dylib rename to library/macosx64/libopencv_videostab.2.4.5.dylib diff --git a/lib/macosx64/libopencv_videostab.2.4.dylib b/library/macosx64/libopencv_videostab.2.4.dylib similarity index 100% rename from lib/macosx64/libopencv_videostab.2.4.dylib rename to library/macosx64/libopencv_videostab.2.4.dylib diff --git a/lib/macosx64/libopencv_videostab.dylib b/library/macosx64/libopencv_videostab.dylib similarity index 100% rename from lib/macosx64/libopencv_videostab.dylib rename to library/macosx64/libopencv_videostab.dylib diff --git a/lib/opencv-245.jar b/library/opencv-245.jar similarity index 100% rename from lib/opencv-245.jar rename to library/opencv-245.jar diff --git a/library/opencv_processing.jar b/library/opencv_processing.jar new file mode 100644 index 0000000..ef01952 Binary files /dev/null and b/library/opencv_processing.jar differ diff --git a/lib/windows32/opencv_java245.dll b/library/windows32/opencv_java245.dll similarity index 100% rename from lib/windows32/opencv_java245.dll rename to library/windows32/opencv_java245.dll diff --git a/lib/windows64/opencv_java245.dll b/library/windows64/opencv_java245.dll similarity index 100% rename from lib/windows64/opencv_java245.dll rename to library/windows64/opencv_java245.dll diff --git a/src/gab/opencv/Flow.java b/src/gab/opencv/Flow.java index a9998fd..b05b72d 100644 --- a/src/gab/opencv/Flow.java +++ b/src/gab/opencv/Flow.java @@ -59,7 +59,7 @@ public PVector getTotalFlowInRegion(int x, int y, int w, int h) { public PVector getAverageFlowInRegion(int x, int y, int w, int h) { PVector total = getTotalFlowInRegion(x, y, w, h); - return new PVector(total.x/(w*h), total.y/(w*h)); + return new PVector(total.x/(flow.width() * flow.height()), total.y/(flow.width()*flow.height())); } public PVector getTotalFlow() { diff --git a/src/gab/opencv/OpenCV.java b/src/gab/opencv/OpenCV.java index e1e4ebb..68f7b22 100644 --- a/src/gab/opencv/OpenCV.java +++ b/src/gab/opencv/OpenCV.java @@ -1,9 +1,9 @@ /** - * ##library.name## - * ##library.sentence## - * ##library.url## + * OpenCV for Processing + * Computer vision with OpenCV. + * https://github.com/atduskgreg/opencv-processing * - * Copyright ##copyright## ##author## + * Copyright (c) 2013 Greg Borenstein http://gregborenstein.com * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public @@ -20,9 +20,9 @@ * Free Software Foundation, Inc., 59 Temple Place, Suite 330, * Boston, MA 02111-1307 USA * - * @author ##author## - * @modified ##date## - * @version ##library.prettyVersion## (##library.version##) + * @author Greg Borenstein http://gregborenstein.com + * @modified 12/08/2014 + * @version 0.5.2 (13) */ @@ -53,6 +53,7 @@ import org.opencv.core.MatOfRect; import org.opencv.core.MatOfPoint; import org.opencv.core.MatOfPoint2f; +import org.opencv.core.MatOfPoint3f; import org.opencv.core.MatOfInt; import org.opencv.core.MatOfFloat; import org.opencv.core.Rect; @@ -69,74 +70,70 @@ import processing.core.*; /** - * OpenCV is the main class for using OpenCV for Processing. Most of the documentation is found here. + * This is a template class and can be used to start a new processing library or tool. + * Make sure you rename this class as well as the name of the example package 'template' + * to your own library or tool naming convention. * - * OpenCV for Processing is a computer vision library for the Processing creative coding toolkit. - * It's based on OpenCV, which is widely used throughout industry and academic research. OpenCV for - * Processing provides friendly, Processing-style functions for doing all of the most common tasks - * in computer vision: loading images, filtering them, detecting faces, finding contours, background - * subtraction, optical flow, calculating histograms etc. OpenCV also provides access to all native - * OpenCV data types and functions. So advanced users can do anything described in the OpenCV java - * documentation: http://docs.opencv.org/java/ - * - * A text is also underway to provide a narrative introduction to computer vision for beginners using - * OpenCV for Processing: https://github.com/atduskgreg/opencv-processing-book/blob/master/book/toc.md + * @example Hello * + * (the tag @example followed by the name of an example included in folder 'examples' will + * automatically include the example in the javadoc.) + * */ public class OpenCV { - - PApplet parent; - - public int width; - public int height; - - private int roiWidth; - private int roiHeight; - - public Mat matBGRA; - public Mat matR, matG, matB, matA; - public Mat matHSV; - public Mat matH, matS, matV; - public Mat matGray; - public Mat matROI; - public Mat nonROImat; // so that releaseROI() can return to color/gray as appropriate - - private boolean useColor; - private boolean useROI; - public int colorSpace; - - private PImage outputImage; - private PImage inputImage; - - private boolean nativeLoaded; - private boolean isArm = false; - - public CascadeClassifier classifier; - BackgroundSubtractorMOG backgroundSubtractor; - public Flow flow; + + PApplet parent; + + public int width; + public int height; + + private int roiWidth; + private int roiHeight; + + public Mat matBGRA; + public Mat matR, matG, matB, matA; + public Mat matHSV; + public Mat matH, matS, matV; + public Mat matGray; + public Mat matROI; + public Mat nonROImat; // so that releaseROI() can return to color/gray as appropriate + + private boolean useColor; + private boolean useROI; + public int colorSpace; + + private PImage outputImage; + private PImage inputImage; + + private boolean nativeLoaded; + private boolean isArm = false; + + public CascadeClassifier classifier; + BackgroundSubtractorMOG backgroundSubtractor; + public Flow flow; - public final static String VERSION = "##library.prettyVersion##"; - public final static String CASCADE_FRONTALFACE = "haarcascade_frontalface_alt.xml"; - public final static String CASCADE_PEDESTRIANS = "hogcascade_pedestrians.xml"; - public final static String CASCADE_EYE = "haarcascade_eye.xml"; - public final static String CASCADE_CLOCK = "haarcascade_clock.xml"; - public final static String CASCADE_NOSE = "haarcascade_mcs_nose.xml"; - public final static String CASCADE_MOUTH = "haarcascade_mcs_mouth.xml"; - public final static String CASCADE_UPPERBODY = "haarcascade_upperbody.xml"; - public final static String CASCADE_LOWERBODY = "haarcascade_lowerbody.xml"; - public final static String CASCADE_FULLBODY = "haarcascade_fullbody.xml"; - public final static String CASCADE_PEDESTRIAN = "hogcascade_pedestrians.xml"; + public final static String VERSION = "0.5.2"; + public final static String CASCADE_FRONTALFACE = "haarcascade_frontalface_alt.xml"; + public final static String CASCADE_PEDESTRIANS = "hogcascade_pedestrians.xml"; + public final static String CASCADE_EYE = "haarcascade_eye.xml"; + public final static String CASCADE_CLOCK = "haarcascade_clock.xml"; + public final static String CASCADE_NOSE = "haarcascade_mcs_nose.xml"; + public final static String CASCADE_MOUTH = "haarcascade_mcs_mouth.xml"; + public final static String CASCADE_UPPERBODY = "haarcascade_upperbody.xml"; + public final static String CASCADE_LOWERBODY = "haarcascade_lowerbody.xml"; + public final static String CASCADE_FULLBODY = "haarcascade_fullbody.xml"; + public final static String CASCADE_PEDESTRIAN = "hogcascade_pedestrians.xml"; - public final static String CASCADE_RIGHT_EAR = "haarcascade_mcs_rightear.xml"; - public final static String CASCADE_PROFILEFACE = "haarcascade_profileface.xml"; - - // used for both Scharr edge detection orientation - // and flip(). Values are set for flip, arbitrary from POV of Scharr - public final static int HORIZONTAL = 1; - public final static int VERTICAL = 0; - public final static int BOTH = -1; - + public final static String CASCADE_RIGHT_EAR = "haarcascade_mcs_rightear.xml"; + public final static String CASCADE_PROFILEFACE = "haarcascade_profileface.xml"; + + // used for both Scharr edge detection orientation + // and flip(). Values are set for flip, arbitrary from POV of Scharr + public final static int HORIZONTAL = 1; + public final static int VERTICAL = 0; + public final static int BOTH = -1; + /** @@ -147,9 +144,9 @@ public class OpenCV { * @param pathToImg - A String with a path to the image to be loaded */ public OpenCV(PApplet theParent, String pathToImg){ - initNative(); - useColor = false; - loadFromString(theParent, pathToImg); + initNative(); + useColor = false; + loadFromString(theParent, pathToImg); } /** @@ -161,19 +158,19 @@ public OpenCV(PApplet theParent, String pathToImg){ * @param useColor - (Optional) Set to true if you want to use the color version of the image for processing. */ public OpenCV(PApplet theParent, String pathToImg, boolean useColor){ - initNative(); - this.useColor = useColor; - if(useColor){ - useColor(); // have to set the color space. - } - loadFromString(theParent, pathToImg); + initNative(); + this.useColor = useColor; + if(useColor){ + useColor(); // have to set the color space. + } + loadFromString(theParent, pathToImg); } private void loadFromString(PApplet theParent, String pathToImg){ - parent = theParent; - PImage imageToLoad = parent.loadImage(pathToImg); - init(imageToLoad.width, imageToLoad.height); - loadImage(imageToLoad); + parent = theParent; + PImage imageToLoad = parent.loadImage(pathToImg); + init(imageToLoad.width, imageToLoad.height); + loadImage(imageToLoad); } /** @@ -181,14 +178,14 @@ private void loadFromString(PApplet theParent, String pathToImg){ * The image's pixels will be copied and prepared for processing. * * @param theParent - * A PApplet representing the user sketch, i.e "this" + * A PApplet representing the user sketch, i.e "this" * @param img - * A PImage to be loaded + * A PImage to be loaded */ public OpenCV(PApplet theParent, PImage img){ - initNative(); - useColor = false; - loadFromPImage(theParent, img); + initNative(); + useColor = false; + loadFromPImage(theParent, img); } /** @@ -196,25 +193,25 @@ public OpenCV(PApplet theParent, PImage img){ * The image's pixels will be copiedd and prepared for processing. * * @param theParent - * A PApplet representing the user sketch, i.e "this" + * A PApplet representing the user sketch, i.e "this" * @param img - * A PImage to be loaded + * A PImage to be loaded * @param useColor - * (Optional) Set to true if you want to use the color version of the image for processing. + * (Optional) Set to true if you want to use the color version of the image for processing. */ public OpenCV(PApplet theParent, PImage img, boolean useColor){ - initNative(); - this.useColor = useColor; - if(useColor){ - useColor(); - } - loadFromPImage(theParent, img); + initNative(); + this.useColor = useColor; + if(useColor){ + useColor(); + } + loadFromPImage(theParent, img); } private void loadFromPImage(PApplet theParent, PImage img){ - parent = theParent; - init(img.width, img.height); - loadImage(img); + parent = theParent; + init(img.width, img.height); + loadImage(img); } /** @@ -228,7 +225,7 @@ private void loadFromPImage(PApplet theParent, PImage img){ * */ public void useColor(){ - useColor(PApplet.RGB); + useColor(PApplet.RGB); } /** @@ -236,11 +233,11 @@ public void useColor(){ * Get the colorSpace of the current color image. Will be either RGB or HSB. * * @return - * + * * The color space of the color mats. Either PApplet.RGB or PApplet.HSB */ public int getColorSpace(){ - return colorSpace; + return colorSpace; } /** @@ -251,39 +248,39 @@ public int getColorSpace(){ * * * @param colorSpace - * The color space of the image to be processed. Either RGB or HSB. + * The color space of the image to be processed. Either RGB or HSB. */ public void useColor(int colorSpace){ - useColor = true; - if(colorSpace != PApplet.RGB && colorSpace != PApplet.HSB){ - PApplet.println("ERROR: color space must be either RGB or HSB"); - } else { - this.colorSpace = colorSpace; - } - - if(this.colorSpace == PApplet.HSB){ - populateHSV(); - } + useColor = true; + if(colorSpace != PApplet.RGB && colorSpace != PApplet.HSB){ + PApplet.println("ERROR: color space must be either RGB or HSB"); + } else { + this.colorSpace = colorSpace; + } + + if(this.colorSpace == PApplet.HSB){ + populateHSV(); + } } private void populateHSV(){ - matHSV = imitate(matBGRA); - Imgproc.cvtColor(matBGRA, matHSV, Imgproc.COLOR_BGR2HSV); - ArrayList channels = new ArrayList(); - Core.split(matHSV, channels); - - matH = channels.get(0); - matS = channels.get(1); - matV = channels.get(2); + matHSV = imitate(matBGRA); + Imgproc.cvtColor(matBGRA, matHSV, Imgproc.COLOR_BGR2HSV); + ArrayList channels = new ArrayList(); + Core.split(matHSV, channels); + + matH = channels.get(0); + matS = channels.get(1); + matV = channels.get(2); } private void populateBGRA(){ - ArrayList channels = new ArrayList(); - Core.split(matBGRA, channels); - matB = channels.get(0); - matG = channels.get(1); - matR = channels.get(2); - matA = channels.get(3); + ArrayList channels = new ArrayList(); + Core.split(matBGRA, channels); + matB = channels.get(0); + matG = channels.get(1); + matR = channels.get(2); + matA = channels.get(3); } /** @@ -293,7 +290,7 @@ private void populateBGRA(){ * */ public void useGray(){ - useColor = false; + useColor = false; } /** @@ -302,24 +299,24 @@ public void useGray(){ * or the grayscale version. * * @return - * True if OpenCV is currently using the color version of the image. + * True if OpenCV is currently using the color version of the image. */ public boolean getUseColor(){ - return useColor; + return useColor; } private Mat getCurrentMat(){ - if(useROI){ - return matROI; - - } else{ - - if(useColor){ - return matBGRA; - } else{ - return matGray; - } - } + if(useROI){ + return matROI; + + } else{ + + if(useColor){ + return matBGRA; + } else{ + return matGray; + } + } } /** @@ -328,40 +325,40 @@ private Mat getCurrentMat(){ * See copy(PImage img). * * @param theParent - * A PApplet representing the user sketch, i.e "this" + * A PApplet representing the user sketch, i.e "this" * @param width - * int + * int * @param height - * int + * int */ - public OpenCV(PApplet theParent, int width, int height) { - initNative(); - parent = theParent; - init(width, height); - } + public OpenCV(PApplet theParent, int width, int height) { + initNative(); + parent = theParent; + init(width, height); + } private void init(int w, int h){ - width = w; - height = h; - welcome(); - setupWorkingImages(); - setupFlow(); - - matR = new Mat(height, width, CvType.CV_8UC1); - matG = new Mat(height, width, CvType.CV_8UC1); - matB = new Mat(height, width, CvType.CV_8UC1); - matA = new Mat(height, width, CvType.CV_8UC1); - matGray = new Mat(height, width, CvType.CV_8UC1); - - matBGRA = new Mat(height, width, CvType.CV_8UC4); + width = w; + height = h; + welcome(); + setupWorkingImages(); + setupFlow(); + + matR = new Mat(height, width, CvType.CV_8UC1); + matG = new Mat(height, width, CvType.CV_8UC1); + matB = new Mat(height, width, CvType.CV_8UC1); + matA = new Mat(height, width, CvType.CV_8UC1); + matGray = new Mat(height, width, CvType.CV_8UC1); + + matBGRA = new Mat(height, width, CvType.CV_8UC4); } private void setupFlow(){ - flow = new Flow(parent); + flow = new Flow(parent); } private void setupWorkingImages(){ - outputImage = parent.createImage(width,height, PConstants.ARGB); + outputImage = parent.createImage(width,height, PConstants.ARGB); } private String getLibPath() { @@ -393,55 +390,55 @@ private String getLibPath() { } private void initNative(){ - if(!nativeLoaded){ - int bitsJVM = PApplet.parseInt(System.getProperty("sun.arch.data.model")); - - String osArch = System.getProperty("os.arch"); - - String nativeLibPath = getLibPath(); - - String path = null; + if(!nativeLoaded){ + int bitsJVM = PApplet.parseInt(System.getProperty("sun.arch.data.model")); + + String osArch = System.getProperty("os.arch"); + + String nativeLibPath = getLibPath(); + + String path = null; - // determine the path to the platform-specific opencv libs - if (PApplet.platform == PConstants.WINDOWS) { //platform Windows - path = nativeLibPath + "windows" + bitsJVM; - } - if (PApplet.platform == PConstants.MACOSX) { //platform Mac - path = nativeLibPath + "macosx" + bitsJVM; - } - if (PApplet.platform == PConstants.LINUX) { //platform Linux - // attempt to detect arm architecture - is it fair to assume linux for ARM devices? - isArm = osArch.contains("arm"); - path = isArm ? nativeLibPath + "arm7" : nativeLibPath + "linux" + bitsJVM; - } - - // ensure the determined path exists - try { - File libDir = new File(path); - if (libDir.exists()) { - nativeLibPath = path; - } - } catch (NullPointerException e) { - // platform couldn't be determined - System.err.println("Cannot load local version of opencv_java245 : Linux 32/64, arm7, Windows 32 bits or Mac Os 64 bits are only avaible"); - e.printStackTrace(); - } - - // this check might be redundant now... - if((PApplet.platform == PConstants.MACOSX && bitsJVM == 64) || (PApplet.platform == PConstants.WINDOWS) || (PApplet.platform == PConstants.LINUX)){ - try { - addLibraryPath(nativeLibPath); - } catch (Exception e) { - e.printStackTrace(); - } - System.loadLibrary("opencv_java245"); - } - else{ - System.err.println("Cannot load local version of opencv_java245 : Linux 32/64, Windows 32 bits or Mac Os 64 bits are only avaible"); - } - - nativeLoaded = true; - } + // determine the path to the platform-specific opencv libs + if (PApplet.platform == PConstants.WINDOWS) { //platform Windows + path = nativeLibPath + "windows" + bitsJVM; + } + if (PApplet.platform == PConstants.MACOSX) { //platform Mac + path = nativeLibPath + "macosx" + bitsJVM; + } + if (PApplet.platform == PConstants.LINUX) { //platform Linux + // attempt to detect arm architecture - is it fair to assume linux for ARM devices? + isArm = osArch.contains("arm"); + path = isArm ? nativeLibPath + "arm7" : nativeLibPath + "linux" + bitsJVM; + } + + // ensure the determined path exists + try { + File libDir = new File(path); + if (libDir.exists()) { + nativeLibPath = path; + } + } catch (NullPointerException e) { + // platform couldn't be determined + System.err.println("Cannot load local version of opencv_java245 : Linux 32/64, arm7, Windows 32 bits or Mac Os 64 bits are only avaible"); + e.printStackTrace(); + } + + // this check might be redundant now... + if((PApplet.platform == PConstants.MACOSX && bitsJVM == 64) || (PApplet.platform == PConstants.WINDOWS) || (PApplet.platform == PConstants.LINUX)){ + try { + addLibraryPath(nativeLibPath); + } catch (Exception e) { + e.printStackTrace(); + } + System.loadLibrary("opencv_java245"); + } + else{ + System.err.println("Cannot load local version of opencv_java245 : Linux 32/64, Windows 32 bits or Mac Os 64 bits are only avaible"); + } + + nativeLoaded = true; + } } @@ -452,16 +449,16 @@ private void addLibraryPath(String path) throws Exception { // which conflict with the arm-specific libs. To fix this, we remove the linux32 segments from the path. // // Alternatively, we could do one of the following: - // A) prepend to the path instead of append, forcing our libs to be used - // B) rename the libopencv_java245 in the arm7 dir and add logic to load it instead above in System.loadLibrary(...) + // A) prepend to the path instead of append, forcing our libs to be used + // B) rename the libopencv_java245 in the arm7 dir and add logic to load it instead above in System.loadLibrary(...) if (isArm) { - if (originalPath.indexOf("linux32") != -1) { - originalPath = originalPath.replaceAll(":[^:]*?linux32", ""); - } + if (originalPath.indexOf("linux32") != -1) { + originalPath = originalPath.replaceAll(":[^:]*?linux32", ""); + } } - System.setProperty("java.library.path", originalPath +System.getProperty("path.separator")+ path); + System.setProperty("java.library.path", originalPath +System.getProperty("path.separator")+ path); //set sys_paths to null final Field sysPathsField = ClassLoader.class.getDeclaredField("sys_paths"); @@ -469,706 +466,766 @@ private void addLibraryPath(String path) throws Exception { sysPathsField.set(null, null); } - /** - * Load a cascade file for face or object detection. - * Expects one of: - * - *
-	 * OpenCV.CASCADE_FRONTALFACE
-	 * OpenCV.CASCADE_PEDESTRIANS
-	 * OpenCV.CASCADE_EYE			
-	 * OpenCV.CASCADE_CLOCK		
-	 * OpenCV.CASCADE_NOSE 		
-	 * OpenCV.CASCADE_MOUTH		
-	 * OpenCV.CASCADE_UPPERBODY 	
-	 * OpenCV.CASCADE_LOWERBODY 	
-	 * OpenCV.CASCADE_FULLBODY 	
-	 * OpenCV.CASCADE_PEDESTRIANS
-	 * OpenCV.CASCADE_RIGHT_EAR 	
-	 * OpenCV.CASCADE_PROFILEFACE
-	 * 
- * - * To pass your own cascade file, provide an absolute path and a second - * argument of true, thusly: - * - *
-	 * opencv.loadCascade("/path/to/my/custom/cascade.xml", true)
-	 * 
- * - * (NB: ant build scripts copy the data folder outside of the - * jar so that this will work.) - * - * @param cascadeFileName - * The name of the cascade file to be loaded form within OpenCV for Processing. - * Must be one of the constants provided by this library - */ - public void loadCascade(String cascadeFileName){ + /** + * Load a cascade file for face or object detection. + * Expects one of: + * + *
+     * OpenCV.CASCADE_FRONTALFACE
+     * OpenCV.CASCADE_PEDESTRIANS
+     * OpenCV.CASCADE_EYE           
+     * OpenCV.CASCADE_CLOCK     
+     * OpenCV.CASCADE_NOSE      
+     * OpenCV.CASCADE_MOUTH     
+     * OpenCV.CASCADE_UPPERBODY     
+     * OpenCV.CASCADE_LOWERBODY     
+     * OpenCV.CASCADE_FULLBODY  
+     * OpenCV.CASCADE_PEDESTRIANS
+     * OpenCV.CASCADE_RIGHT_EAR     
+     * OpenCV.CASCADE_PROFILEFACE
+     * 
+ * + * To pass your own cascade file, provide an absolute path and a second + * argument of true, thusly: + * + *
+     * opencv.loadCascade("/path/to/my/custom/cascade.xml", true)
+     * 
+ * + * (NB: ant build scripts copy the data folder outside of the + * jar so that this will work.) + * + * @param cascadeFileName + * The name of the cascade file to be loaded form within OpenCV for Processing. + * Must be one of the constants provided by this library + */ + public void loadCascade(String cascadeFileName){ - // localize path to cascade file to point at the library's data folder - String relativePath = "cascade-files/" + cascadeFileName; - String cascadePath = getLibPath(); - cascadePath += relativePath; - - PApplet.println("Load cascade from: " + cascadePath); + // localize path to cascade file to point at the library's data folder + String relativePath = "cascade-files/" + cascadeFileName; + String cascadePath = getLibPath(); + cascadePath += relativePath; + + //PApplet.println("Load cascade from: " + cascadePath); - classifier = new CascadeClassifier(cascadePath); + classifier = new CascadeClassifier(cascadePath); if(classifier.empty()){ - PApplet.println("Cascade failed to load"); // raise exception here? - } else { - PApplet.println("Cascade loaded: " + cascadeFileName); + PApplet.println("Cascade failed to load"); // raise exception here? + } + /* else { + PApplet.println("Cascade loaded: " + cascadeFileName); } - } + */ + } - /** - * Load a cascade file for face or object detection. - * If absolute is true, cascadeFilePath must be an - * absolute path to a cascade xml file. If it is false - * then cascadeFilePath must be one of the options provided - * by OpenCV for Processing as in the single-argument - * version of this function. - * - * @param cascadeFilePath - * A string. Either an absolute path to a cascade XML file or - * one of the constants provided by this library. - * @param absolute - * Whether or not the cascadeFilePath is an absolute path to an XML file. - */ - public void loadCascade(String cascadeFilePath, boolean absolute){ - if(absolute){ - classifier = new CascadeClassifier(cascadeFilePath); - - if(classifier.empty()){ - PApplet.println("Cascade failed to load"); // raise exception here? - } else { - PApplet.println("Cascade loaded from absolute path: " + cascadeFilePath); - } - } else { - loadCascade(cascadeFilePath); - } - } - - /** - * Convert an array of OpenCV Rect objects into - * an array of java.awt.Rectangle rectangles. - * Especially useful when working with - * classifier.detectMultiScale(). - * - * @param Rect[] rects - * - * @return - * A Rectangle[] of java.awt.Rectangle - */ - public static Rectangle[] toProcessing(Rect[] rects){ - Rectangle[] results = new Rectangle[rects.length]; - for(int i = 0; i < rects.length; i++){ - results[i] = new Rectangle(rects[i].x, rects[i].y, rects[i].width, rects[i].height); - } - return results; - } - - /** - * Detect objects using the cascade classifier. loadCascade() must already - * have been called to setup the classifier. See the OpenCV documentation - * for details on the arguments: http://docs.opencv.org/java/org/opencv/objdetect/CascadeClassifier.html#detectMultiScale(org.opencv.core.Mat, org.opencv.core.MatOfRect, double, int, int, org.opencv.core.Size, org.opencv.core.Size) - * - * A simpler version of detect() that doesn't need these arguments is also available. - * - * @param scaleFactor - * @param minNeighbors - * @param flags - * @param minSize - * @param maxSize - * @return - * An array of java.awt.Rectangle objects with the location, width, and height of each detected object. - */ - public Rectangle[] detect(double scaleFactor , int minNeighbors , int flags, int minSize , int maxSize){ - Size minS = new Size(minSize, minSize); - Size maxS = new Size(maxSize, maxSize); - - MatOfRect detections = new MatOfRect(); - classifier.detectMultiScale(getCurrentMat(), detections, scaleFactor, minNeighbors, flags, minS, maxS ); + /** + * Load a cascade file for face or object detection. + * If absolute is true, cascadeFilePath must be an + * absolute path to a cascade xml file. If it is false + * then cascadeFilePath must be one of the options provided + * by OpenCV for Processing as in the single-argument + * version of this function. + * + * @param cascadeFilePath + * A string. Either an absolute path to a cascade XML file or + * one of the constants provided by this library. + * @param absolute + * Whether or not the cascadeFilePath is an absolute path to an XML file. + */ + public void loadCascade(String cascadeFilePath, boolean absolute){ + if(absolute){ + classifier = new CascadeClassifier(cascadeFilePath); + + if(classifier.empty()){ + PApplet.println("Cascade failed to load"); // raise exception here? + } + /* else { + PApplet.println("Cascade loaded from absolute path: " + cascadeFilePath); + } + */ + } else { + loadCascade(cascadeFilePath); + } + } + + /** + * Convert an array of OpenCV Rect objects into + * an array of java.awt.Rectangle rectangles. + * Especially useful when working with + * classifier.detectMultiScale(). + * + * @param Rect[] rects + * + * @return + * A Rectangle[] of java.awt.Rectangle + */ + public static Rectangle[] toProcessing(Rect[] rects){ + Rectangle[] results = new Rectangle[rects.length]; + for(int i = 0; i < rects.length; i++){ + results[i] = new Rectangle(rects[i].x, rects[i].y, rects[i].width, rects[i].height); + } + return results; + } + + /** + * Detect objects using the cascade classifier. loadCascade() must already + * have been called to setup the classifier. See the OpenCV documentation + * for details on the arguments: http://docs.opencv.org/java/org/opencv/objdetect/CascadeClassifier.html#detectMultiScale(org.opencv.core.Mat, org.opencv.core.MatOfRect, double, int, int, org.opencv.core.Size, org.opencv.core.Size) + * + * A simpler version of detect() that doesn't need these arguments is also available. + * + * @param scaleFactor + * @param minNeighbors + * @param flags + * @param minSize + * @param maxSize + * @return + * An array of java.awt.Rectangle objects with the location, width, and height of each detected object. + */ + public Rectangle[] detect(double scaleFactor , int minNeighbors , int flags, int minSize , int maxSize){ + Size minS = new Size(minSize, minSize); + Size maxS = new Size(maxSize, maxSize); + + MatOfRect detections = new MatOfRect(); + classifier.detectMultiScale(getCurrentMat(), detections, scaleFactor, minNeighbors, flags, minS, maxS ); - return OpenCV.toProcessing(detections.toArray()); - } - - /** - * Detect objects using the cascade classifier. loadCascade() must already - * have been called to setup the classifier. - * - * @return - * An array of java.awt.Rectnangle objects with the location, width, and height of each detected object. - */ - public Rectangle[] detect(){ - MatOfRect detections = new MatOfRect(); - classifier.detectMultiScale(getCurrentMat(), detections); - - return OpenCV.toProcessing(detections.toArray()); - } - - /** - * Setup background subtraction. After calling this function, - * updateBackground() must be called with each new frame - * you want to add to the running background subtraction calculation. - * - * For details on the arguments, see: - * http://docs.opencv.org/java/org/opencv/video/BackgroundSubtractorMOG.html#BackgroundSubtractorMOG(int, int, double) - * - * @param history - * @param nMixtures - * @param backgroundRatio - */ - public void startBackgroundSubtraction(int history, int nMixtures, double backgroundRatio){ - backgroundSubtractor = new BackgroundSubtractorMOG(history, nMixtures, backgroundRatio); - } - - /** - * Update the running background for background subtraction based on - * the current image loaded into OpenCV. startBackgroundSubtraction() - * must have been called before this to setup the background subtractor. - * - */ - public void updateBackground(){ - Mat foreground = imitate(getCurrentMat()); - backgroundSubtractor.apply(getCurrentMat(), foreground, 0.05); - setGray(foreground); - } - - /** - * Calculate the optical flow of the current image relative - * to a running series of images (typically frames from video). - * Optical flow is useful for detecting what parts of the image - * are moving and in what direction. - * - */ - public void calculateOpticalFlow(){ - flow.calculateOpticalFlow(getCurrentMat()); - } - - /* - * Get the total optical flow within a region of the image. - * Be sure to call calculateOpticalFlow() first. - * - */ - public PVector getTotalFlowInRegion(int x, int y, int w, int h) { - return flow.getTotalFlowInRegion(x, y, w, h); - } - - /* - * Get the average optical flow within a region of the image. - * Be sure to call calculateOpticalFlow() first. - * - */ - public PVector getAverageFlowInRegion(int x, int y, int w, int h) { - return flow.getAverageFlowInRegion(x,y,w,h); - } - - /* - * Get the total optical flow for the entire image. - * Be sure to call calculateOpticalFlow() first. - */ - public PVector getTotalFlow() { - return flow.getTotalFlow(); - } - - /* - * Get the average optical flow for the entire image. - * Be sure to call calculateOpticalFlow() first. - */ - public PVector getAverageFlow() { - return flow.getAverageFlow(); - } - - /* - * Get the optical flow at a single point in the image. - * Be sure to call calcuateOpticalFlow() first. - */ - public PVector getFlowAt(int x, int y){ - return flow.getFlowAt(x,y); - } - - /* - * Draw the optical flow. - * Be sure to call calcuateOpticalFlow() first. - */ - public void drawOpticalFlow(){ - flow.draw(); - } - - /** - * Flip the current image. - * - * @param direction - * One of: OpenCV.HORIZONTAL, OpenCV.VERTICAL, or OpenCV.BOTH - */ - public void flip(int direction){ - Core.flip(getCurrentMat(), getCurrentMat(), direction); - } - - /** - * - * Adjust the contrast of the image. Works on color or black and white images. - * - * @param amt - * Amount of contrast to apply. 0-1.0 reduces contrast. Above 1.0 increases contrast. - * - **/ - public void contrast(float amt){ - Scalar modifier; - if(useColor){ - modifier = new Scalar(amt,amt,amt,1); + return OpenCV.toProcessing(detections.toArray()); + } + + /** + * Detect objects using the cascade classifier. loadCascade() must already + * have been called to setup the classifier. + * + * @return + * An array of java.awt.Rectnangle objects with the location, width, and height of each detected object. + */ + public Rectangle[] detect(){ + MatOfRect detections = new MatOfRect(); + classifier.detectMultiScale(getCurrentMat(), detections); + + return OpenCV.toProcessing(detections.toArray()); + } + + /** + * Setup background subtraction. After calling this function, + * updateBackground() must be called with each new frame + * you want to add to the running background subtraction calculation. + * + * For details on the arguments, see: + * http://docs.opencv.org/java/org/opencv/video/BackgroundSubtractorMOG.html#BackgroundSubtractorMOG(int, int, double) + * + * @param history + * @param nMixtures + * @param backgroundRatio + */ + public void startBackgroundSubtraction(int history, int nMixtures, double backgroundRatio){ + backgroundSubtractor = new BackgroundSubtractorMOG(history, nMixtures, backgroundRatio); + } + + /** + * Update the running background for background subtraction based on + * the current image loaded into OpenCV. startBackgroundSubtraction() + * must have been called before this to setup the background subtractor. + * + */ + public void updateBackground(){ + Mat foreground = imitate(getCurrentMat()); + backgroundSubtractor.apply(getCurrentMat(), foreground, 0.05); + setGray(foreground); + } + + /** + * Calculate the optical flow of the current image relative + * to a running series of images (typically frames from video). + * Optical flow is useful for detecting what parts of the image + * are moving and in what direction. + * + */ + public void calculateOpticalFlow(){ + flow.calculateOpticalFlow(getCurrentMat()); + } + + /* + * Get the total optical flow within a region of the image. + * Be sure to call calculateOpticalFlow() first. + * + */ + public PVector getTotalFlowInRegion(int x, int y, int w, int h) { + return flow.getTotalFlowInRegion(x, y, w, h); + } + + /* + * Get the average optical flow within a region of the image. + * Be sure to call calculateOpticalFlow() first. + * + */ + public PVector getAverageFlowInRegion(int x, int y, int w, int h) { + return flow.getAverageFlowInRegion(x,y,w,h); + } + + /* + * Get the total optical flow for the entire image. + * Be sure to call calculateOpticalFlow() first. + */ + public PVector getTotalFlow() { + return flow.getTotalFlow(); + } + + /* + * Get the average optical flow for the entire image. + * Be sure to call calculateOpticalFlow() first. + */ + public PVector getAverageFlow() { + return flow.getAverageFlow(); + } + + /* + * Get the optical flow at a single point in the image. + * Be sure to call calcuateOpticalFlow() first. + */ + public PVector getFlowAt(int x, int y){ + return flow.getFlowAt(x,y); + } + + /* + * Draw the optical flow. + * Be sure to call calcuateOpticalFlow() first. + */ + public void drawOpticalFlow(){ + flow.draw(); + } + + /** + * Flip the current image. + * + * @param direction + * One of: OpenCV.HORIZONTAL, OpenCV.VERTICAL, or OpenCV.BOTH + */ + public void flip(int direction){ + Core.flip(getCurrentMat(), getCurrentMat(), direction); + } + + /** + * + * Adjust the contrast of the image. Works on color or black and white images. + * + * @param amt + * Amount of contrast to apply. 0-1.0 reduces contrast. Above 1.0 increases contrast. + * + **/ + public void contrast(float amt){ + Scalar modifier; + if(useColor){ + modifier = new Scalar(amt,amt,amt,1); - } else{ - modifier = new Scalar(amt); - } - - Core.multiply(getCurrentMat(), modifier, getCurrentMat()); - } - - /** - * Get the x-y location of the maximum value in the current image. - * - * @return - * A PVector with the location of the maximum value. - */ - public PVector max(){ - MinMaxLocResult r = Core.minMaxLoc(getCurrentMat()); - return OpenCV.pointToPVector(r.maxLoc); - } - - /** - * Get the x-y location of the minimum value in the current image. - * - * @return - * A PVector with the location of the minimum value. - */ - public PVector min(){ - MinMaxLocResult r = Core.minMaxLoc(getCurrentMat()); - return OpenCV.pointToPVector(r.minLoc); - } - - /** - * Helper function to convert an OpenCV Point into a Processing PVector - * - * @param p - * A Point - * @return - * A PVector - */ - public static PVector pointToPVector(Point p){ - return new PVector((float)p.x, (float)p.y); - } - - - /** - * Adjust the brightness of the image. Works on color or black and white images. - * - * @param amt - * The amount to brighten the image. Ranges -255 to 255. - * - **/ - public void brightness(int amt){ - Scalar modifier; - if(useColor){ - modifier = new Scalar(amt,amt,amt, 1); + } else{ + modifier = new Scalar(amt); + } + + Core.multiply(getCurrentMat(), modifier, getCurrentMat()); + } + + /** + * Get the x-y location of the maximum value in the current image. + * + * @return + * A PVector with the location of the maximum value. + */ + public PVector max(){ + MinMaxLocResult r = Core.minMaxLoc(getCurrentMat()); + return OpenCV.pointToPVector(r.maxLoc); + } + + /** + * Get the x-y location of the minimum value in the current image. + * + * @return + * A PVector with the location of the minimum value. + */ + public PVector min(){ + MinMaxLocResult r = Core.minMaxLoc(getCurrentMat()); + return OpenCV.pointToPVector(r.minLoc); + } + + /** + * Helper function to convert an OpenCV Point into a Processing PVector + * + * @param p + * A Point + * @return + * A PVector + */ + public static PVector pointToPVector(Point p){ + return new PVector((float)p.x, (float)p.y); + } + + + /** + * Adjust the brightness of the image. Works on color or black and white images. + * + * @param amt + * The amount to brighten the image. Ranges -255 to 255. + * + **/ + public void brightness(int amt){ + Scalar modifier; + if(useColor){ + modifier = new Scalar(amt,amt,amt, 1); - } else{ - modifier = new Scalar(amt); - } - - Core.add(getCurrentMat(), modifier, getCurrentMat()); - } - - /** - * Helper to create a new OpenCV Mat whose channels and - * bit-depth mask an existing Mat. - * - * @param m - * The Mat to match - * @return - * A new Mat - */ - public static Mat imitate(Mat m){ - return new Mat(m.height(), m.width(), m.type()); - } - - /** - * Calculate the difference between the current image - * loaded into OpenCV and a second image. The result is stored - * in the loaded image in OpenCV. Works on both color and grayscale - * images. - * - * @param img - * A PImage to diff against. - */ - public void diff(PImage img){ - Mat imgMat = imitate(getColor()); - toCv(img, imgMat); + } else{ + modifier = new Scalar(amt); + } + + Core.add(getCurrentMat(), modifier, getCurrentMat()); + } + + /** + * Helper to create a new OpenCV Mat whose channels and + * bit-depth mask an existing Mat. + * + * @param m + * The Mat to match + * @return + * A new Mat + */ + public static Mat imitate(Mat m){ + return new Mat(m.height(), m.width(), m.type()); + } + + /** + * Calculate the difference between the current image + * loaded into OpenCV and a second image. The result is stored + * in the loaded image in OpenCV. Works on both color and grayscale + * images. + * + * @param img + * A PImage to diff against. + */ + public void diff(PImage img){ + Mat imgMat = imitate(getColor()); + toCv(img, imgMat); - Mat dst = imitate(getCurrentMat()); + Mat dst = imitate(getCurrentMat()); - if(useColor){ - ARGBtoBGRA(imgMat, imgMat); - Core.absdiff(getCurrentMat(), imgMat, dst); - } else { - Core.absdiff(getCurrentMat(), OpenCV.gray(imgMat), dst); - } - - dst.assignTo(getCurrentMat()); - } - - /** - * A helper function that diffs two Mats using absdiff. - * Places the result back into mat1 - * - * @param mat1 - * The destination Mat - * @param mat2 - * The Mat to diff against - */ - public static void diff(Mat mat1, Mat mat2){ - Mat dst = imitate(mat1); - Core.absdiff(mat1, mat2, dst); - dst.assignTo(mat1); - } - - /** - * Apply a global threshold to an image. Produces a binary image - * with white pixels where the original image was above the threshold - * and black where it was below. - * - * @param threshold - * An int from 0-255. - */ - public void threshold(int threshold){ - Imgproc.threshold(getCurrentMat(), getCurrentMat(), threshold, 255, Imgproc.THRESH_BINARY); - } - - /** - * Apply an adaptive threshold to an image. Produces a binary image - * with white pixels where the original image was above the threshold - * and black where it was below. - * - * See: - * http://docs.opencv.org/java/org/opencv/imgproc/Imgproc.html#adaptiveThreshold(org.opencv.core.Mat, org.opencv.core.Mat, double, int, int, int, double) - * - * @param blockSize - * The size of the pixel neighborhood to use. - * @param c - * A constant subtracted from the mean of each neighborhood. - */ - public void adaptiveThreshold(int blockSize, int c){ - try{ - Imgproc.adaptiveThreshold(getCurrentMat(), getCurrentMat(), 255, Imgproc.ADAPTIVE_THRESH_GAUSSIAN_C, Imgproc.THRESH_BINARY, blockSize, c); - } catch(CvException e){ - PApplet.println("ERROR: adaptiveThreshold function only works on gray images."); - } - } - - /** - * Normalize the histogram of the image. This will spread the image's color - * spectrum over the full 0-255 range. Only works on grayscale images. - * - * - * See: http://docs.opencv.org/java/org/opencv/imgproc/Imgproc.html#equalizeHist(org.opencv.core.Mat, org.opencv.core.Mat) - * - */ - public void equalizeHistogram(){ - try{ - Imgproc.equalizeHist(getCurrentMat(), getCurrentMat()); - } catch(CvException e){ - PApplet.println("ERROR: equalizeHistogram only works on a gray image."); - } - } - - /** - * Invert the image. - * See: http://docs.opencv.org/java/org/opencv/core/Core.html#bitwise_not(org.opencv.core.Mat, org.opencv.core.Mat) - * - */ - public void invert(){ - Core.bitwise_not(getCurrentMat(),getCurrentMat()); - } - - /** - * Dilate the image. Dilation is a morphological operation (i.e. it affects the shape) often used to - * close holes in contours. It expands white areas of the image. - * - * See: - * http://docs.opencv.org/java/org/opencv/imgproc/Imgproc.html#dilate(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat) - * - */ - public void dilate(){ - Imgproc.dilate(getCurrentMat(), getCurrentMat(), new Mat()); - } - - /** - * Erode the image. Erosion is a morphological operation (i.e. it affects the shape) often used to - * close holes in contours. It contracts white areas of the image. - * - * See: - * http://docs.opencv.org/java/org/opencv/imgproc/Imgproc.html#erode(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat) - * - */ - public void erode(){ - Imgproc.erode(getCurrentMat(), getCurrentMat(), new Mat()); - } - - /** - * Blur an image symetrically by a given number of pixels. - * - * @param blurSize - * int - the amount to blur by in x- and y-directions. - */ - public void blur(int blurSize){ - Imgproc.blur(getCurrentMat(), getCurrentMat(), new Size(blurSize, blurSize)); - } - - /** - * Blur an image assymetrically by a different number of pixels in x- and y-directions. - * - * @param blurW - * amount to blur in the x-direction - * @param blurH - * amount to blur in the y-direction - */ - public void blur(int blurW, int blurH){ - Imgproc.blur(getCurrentMat(), getCurrentMat(), new Size(blurW, blurH)); - } - - /** - * Find edges in the image using Canny edge detection. - * - * @param lowThreshold - * @param highThreshold - */ - public void findCannyEdges(int lowThreshold, int highThreshold){ - Imgproc.Canny(getCurrentMat(), getCurrentMat(), lowThreshold, highThreshold); - } - - public void findSobelEdges(int dx, int dy){ - Mat sobeled = new Mat(getCurrentMat().height(), getCurrentMat().width(), CvType.CV_32F); - Imgproc.Sobel(getCurrentMat(), sobeled, CvType.CV_32F, dx, dy); - sobeled.convertTo(getCurrentMat(), getCurrentMat().type()); - } - - public void findScharrEdges(int direction){ - if(direction == HORIZONTAL){ - Imgproc.Scharr(getCurrentMat(), getCurrentMat(), -1, 1, 0 ); - } - - if(direction == VERTICAL){ - Imgproc.Scharr(getCurrentMat(), getCurrentMat(), -1, 0, 1 ); - } - - if(direction == BOTH){ - Mat hMat = imitate(getCurrentMat()); - Mat vMat = imitate(getCurrentMat()); - Imgproc.Scharr(getCurrentMat(), hMat, -1, 1, 0 ); - Imgproc.Scharr(getCurrentMat(), vMat, -1, 0, 1 ); - Core.add(vMat,hMat, getCurrentMat()); - } - } - - public ArrayList findContours(){ - return findContours(true, false); - } - - public ArrayList findContours(boolean findHoles, boolean sort){ - ArrayList result = new ArrayList(); - - ArrayList contourMat = new ArrayList(); - try{ - int contourFindingMode = (findHoles ? Imgproc.RETR_LIST : Imgproc.RETR_EXTERNAL); - - Imgproc.findContours(getCurrentMat(), contourMat, new Mat(), contourFindingMode, Imgproc.CHAIN_APPROX_NONE); - } catch(CvException e){ - PApplet.println("ERROR: findContours only works with a gray image."); - } - for (MatOfPoint c : contourMat) { - result.add(new Contour(parent, c)); - } - - if(sort){ - Collections.sort(result, new ContourComparator()); - } - - return result; - } - - public ArrayList findLines(int threshold, double minLineLength, double maxLineGap){ - ArrayList result = new ArrayList(); - - Mat lineMat = new Mat(); - Imgproc.HoughLinesP(getCurrentMat(), lineMat, 1, PConstants.PI/180.0, threshold, minLineLength, maxLineGap); - for (int i = 0; i < lineMat.width(); i++) { - double[] coords = lineMat.get(0, i); - result.add(new Line(coords[0], coords[1], coords[2], coords[3])); - } - - return result; - } - - public ArrayList findChessboardCorners(int patternWidth, int patternHeight){ - MatOfPoint2f corners = new MatOfPoint2f(); - Calib3d.findChessboardCorners(getCurrentMat(), new Size(patternWidth,patternHeight), corners); - return matToPVectors(corners); - } - - /** - * - * @param mat - * The mat from which to calculate the histogram. Get this from getGray(), getR(), getG(), getB(), etc.. - * By default this will normalize the histogram (scale the values to 0.0-1.0). Pass false as the third argument to keep values unormalized. - * @param numBins - * The number of bins into which divide the histogram should be divided. - * @param normalize (optional) - * Whether or not to normalize the histogram (scale the values to 0.0-1.0). Defaults to true. - * @return - * A Histogram object that you can call draw() on. - */ - public Histogram findHistogram(Mat mat, int numBins){ - return findHistogram(mat, numBins, true); - } - - - public Histogram findHistogram(Mat mat, int numBins, boolean normalize){ - - MatOfInt channels = new MatOfInt(0); - MatOfInt histSize = new MatOfInt(numBins); - float[] r = {0f, 256f}; - MatOfFloat ranges = new MatOfFloat(r); - Mat hist = new Mat(); - - ArrayList images = new ArrayList(); - images.add(mat); + if(useColor){ + ARGBtoBGRA(imgMat, imgMat); + Core.absdiff(getCurrentMat(), imgMat, dst); + } else { + Core.absdiff(getCurrentMat(), OpenCV.gray(imgMat), dst); + } + + dst.assignTo(getCurrentMat()); + } + + /** + * A helper function that diffs two Mats using absdiff. + * Places the result back into mat1 + * + * @param mat1 + * The destination Mat + * @param mat2 + * The Mat to diff against + */ + public static void diff(Mat mat1, Mat mat2){ + Mat dst = imitate(mat1); + Core.absdiff(mat1, mat2, dst); + dst.assignTo(mat1); + } + + /** + * Apply a global threshold to an image. Produces a binary image + * with white pixels where the original image was above the threshold + * and black where it was below. + * + * @param threshold + * An int from 0-255. + */ + public void threshold(int threshold){ + Imgproc.threshold(getCurrentMat(), getCurrentMat(), threshold, 255, Imgproc.THRESH_BINARY); + } + + + /** + *

Finds circles in a grayscale image using the Hough transform.

+ * + *

The function finds circles in a grayscale image using a modification of the + * Hough transform. + * @param image 8-bit, single-channel, grayscale input image. + * @param circles Output vector of found circles. Each vector is encoded as a + * 3-element floating-point vector (x, y, radius). + * @param method Detection method to use. Currently, the only implemented method + * is CV_HOUGH_GRADIENT, which is basically *21HT*, described in + * [Yuen90]. + * @param dp Inverse ratio of the accumulator resolution to the image + * resolution. For example, if dp=1, the accumulator has the same + * resolution as the input image. If dp=2, the accumulator has half + * as big width and height. + * @param minDist Minimum distance between the centers of the detected circles. + * If the parameter is too small, multiple neighbor circles may be falsely + * detected in addition to a true one. If it is too large, some circles may be + * missed. + * @param param1 First method-specific parameter. In case of CV_HOUGH_GRADIENT, + * it is the higher threshold of the two passed to the "Canny" edge detector + * (the lower one is twice smaller). + * @param param2 Second method-specific parameter. In case of CV_HOUGH_GRADIENT, + * it is the accumulator threshold for the circle centers at the detection + * stage. The smaller it is, the more false circles may be detected. Circles, + * corresponding to the larger accumulator values, will be returned first. + * @param minRadius Minimum circle radius. + * @param maxRadius Maximum circle radius. + * + * @see org.opencv.imgproc.Imgproc.HoughCircles + * @see org.opencv.imgproc.Imgproc#minEnclosingCircle + * @see org.opencv.imgproc.Imgproc#fitEllipse + */ + /** + * Apply an adaptive threshold to an image. Produces a binary image + * with white pixels where the original image was above the threshold + * and black where it was below. + * + * See: + * http://docs.opencv.org/java/org/opencv/imgproc/Imgproc.html#adaptiveThreshold(org.opencv.core.Mat, org.opencv.core.Mat, double, int, int, int, double) + * + * @param blockSize + * The size of the pixel neighborhood to use. + * @param c + * A constant subtracted from the mean of each neighborhood. + */ + + public ArrayList< ArrayList > findCircles() + { + Mat circles=new Mat(0,0,0); //Don't care, The Mat will be recreated in the HoughCircles function anyway. + Imgproc.HoughCircles(getCurrentMat(), circles, Imgproc.CV_HOUGH_GRADIENT,2,getCurrentMat().width()/4, 200, 100,0,0); + ArrayList< ArrayList > arr=new ArrayList< ArrayList >(); + + for(int y=0;y circle = new ArrayList(); + circle.add((int)foo[0]); //X + circle.add((int)foo[1]); //Y + circle.add((int)foo[2]); //Radius + arr.add(circle); + } + + return arr; + } + + + public void adaptiveThreshold(int blockSize, int c){ + try{ + Imgproc.adaptiveThreshold(getCurrentMat(), getCurrentMat(), 255, Imgproc.ADAPTIVE_THRESH_GAUSSIAN_C, Imgproc.THRESH_BINARY, blockSize, c); + } catch(CvException e){ + PApplet.println("ERROR: adaptiveThreshold function only works on gray images."); + } + } + + /** + * Normalize the histogram of the image. This will spread the image's color + * spectrum over the full 0-255 range. Only works on grayscale images. + * + * + * See: http://docs.opencv.org/java/org/opencv/imgproc/Imgproc.html#equalizeHist(org.opencv.core.Mat, org.opencv.core.Mat) + * + */ + public void equalizeHistogram(){ + try{ + Imgproc.equalizeHist(getCurrentMat(), getCurrentMat()); + } catch(CvException e){ + PApplet.println("ERROR: equalizeHistogram only works on a gray image."); + } + } + + /** + * Invert the image. + * See: http://docs.opencv.org/java/org/opencv/core/Core.html#bitwise_not(org.opencv.core.Mat, org.opencv.core.Mat) + * + */ + public void invert(){ + Core.bitwise_not(getCurrentMat(),getCurrentMat()); + } + + /** + * Dilate the image. Dilation is a morphological operation (i.e. it affects the shape) often used to + * close holes in contours. It expands white areas of the image. + * + * See: + * http://docs.opencv.org/java/org/opencv/imgproc/Imgproc.html#dilate(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat) + * + */ + public void dilate(){ + Imgproc.dilate(getCurrentMat(), getCurrentMat(), new Mat()); + } + + /** + * Erode the image. Erosion is a morphological operation (i.e. it affects the shape) often used to + * close holes in contours. It contracts white areas of the image. + * + * See: + * http://docs.opencv.org/java/org/opencv/imgproc/Imgproc.html#erode(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat) + * + */ + public void erode(){ + Imgproc.erode(getCurrentMat(), getCurrentMat(), new Mat()); + } + + /** + * Blur an image symetrically by a given number of pixels. + * + * @param blurSize + * int - the amount to blur by in x- and y-directions. + */ + public void blur(int blurSize){ + Imgproc.blur(getCurrentMat(), getCurrentMat(), new Size(blurSize, blurSize)); + } + + /** + * Blur an image assymetrically by a different number of pixels in x- and y-directions. + * + * @param blurW + * amount to blur in the x-direction + * @param blurH + * amount to blur in the y-direction + */ + public void blur(int blurW, int blurH){ + Imgproc.blur(getCurrentMat(), getCurrentMat(), new Size(blurW, blurH)); + } + + /** + * Find edges in the image using Canny edge detection. + * + * @param lowThreshold + * @param highThreshold + */ + public void findCannyEdges(int lowThreshold, int highThreshold){ + Imgproc.Canny(getCurrentMat(), getCurrentMat(), lowThreshold, highThreshold); + } + + public void findSobelEdges(int dx, int dy){ + Mat sobeled = new Mat(getCurrentMat().height(), getCurrentMat().width(), CvType.CV_32F); + Imgproc.Sobel(getCurrentMat(), sobeled, CvType.CV_32F, dx, dy); + sobeled.convertTo(getCurrentMat(), getCurrentMat().type()); + } + + public void findScharrEdges(int direction){ + if(direction == HORIZONTAL){ + Imgproc.Scharr(getCurrentMat(), getCurrentMat(), -1, 1, 0 ); + } + + if(direction == VERTICAL){ + Imgproc.Scharr(getCurrentMat(), getCurrentMat(), -1, 0, 1 ); + } + + if(direction == BOTH){ + Mat hMat = imitate(getCurrentMat()); + Mat vMat = imitate(getCurrentMat()); + Imgproc.Scharr(getCurrentMat(), hMat, -1, 1, 0 ); + Imgproc.Scharr(getCurrentMat(), vMat, -1, 0, 1 ); + Core.add(vMat,hMat, getCurrentMat()); + } + } + + public ArrayList findContours(){ + return findContours(true, false); + } + + public ArrayList findContours(boolean findHoles, boolean sort){ + ArrayList result = new ArrayList(); + + ArrayList contourMat = new ArrayList(); + try{ + int contourFindingMode = (findHoles ? Imgproc.RETR_LIST : Imgproc.RETR_EXTERNAL); + + Imgproc.findContours(getCurrentMat(), contourMat, new Mat(), contourFindingMode, Imgproc.CHAIN_APPROX_NONE); + } catch(CvException e){ + PApplet.println("ERROR: findContours only works with a gray image."); + } + for (MatOfPoint c : contourMat) { + result.add(new Contour(parent, c)); + } + + if(sort){ + Collections.sort(result, new ContourComparator()); + } + + return result; + } + + public ArrayList findLines(int threshold, double minLineLength, double maxLineGap){ + ArrayList result = new ArrayList(); + + Mat lineMat = new Mat(); + Imgproc.HoughLinesP(getCurrentMat(), lineMat, 1, PConstants.PI/180.0, threshold, minLineLength, maxLineGap); + for (int i = 0; i < lineMat.width(); i++) { + double[] coords = lineMat.get(0, i); + result.add(new Line(coords[0], coords[1], coords[2], coords[3])); + } + + return result; + } + + public ArrayList findChessboardCorners(int patternWidth, int patternHeight){ + MatOfPoint2f corners = new MatOfPoint2f(); + Calib3d.findChessboardCorners(getCurrentMat(), new Size(patternWidth,patternHeight), corners); + return matToPVectors(corners); + } + + /** + * + * @param mat + * The mat from which to calculate the histogram. Get this from getGray(), getR(), getG(), getB(), etc.. + * By default this will normalize the histogram (scale the values to 0.0-1.0). Pass false as the third argument to keep values unormalized. + * @param numBins + * The number of bins into which divide the histogram should be divided. + * @param normalize (optional) + * Whether or not to normalize the histogram (scale the values to 0.0-1.0). Defaults to true. + * @return + * A Histogram object that you can call draw() on. + */ + public Histogram findHistogram(Mat mat, int numBins){ + return findHistogram(mat, numBins, true); + } + + + public Histogram findHistogram(Mat mat, int numBins, boolean normalize){ + + MatOfInt channels = new MatOfInt(0); + MatOfInt histSize = new MatOfInt(numBins); + float[] r = {0f, 256f}; + MatOfFloat ranges = new MatOfFloat(r); + Mat hist = new Mat(); + + ArrayList images = new ArrayList(); + images.add(mat); - Imgproc.calcHist( images, channels, new Mat(), hist, histSize, ranges); - - if(normalize){ - Core.normalize(hist, hist); - } - - return new Histogram(parent, hist); - } - - /** - * - * Filter the image for values between a lower and upper bound. - * Converts the current image into a binary image with white where pixel - * values were within bounds and black elsewhere. - * - * @param lowerBound - * @param upperBound - */ - public void inRange(int lowerBound, int upperBound){ - Core.inRange(getCurrentMat(), new Scalar(lowerBound), new Scalar(upperBound), getCurrentMat()); - } - - /** - * - * @param src - * A Mat of type 8UC4 with channels arranged as BGRA. - * @return - * A Mat of type 8UC1 in grayscale. - */ - public static Mat gray(Mat src){ - Mat result = new Mat(src.height(), src.width(), CvType.CV_8UC1); - Imgproc.cvtColor(src, result, Imgproc.COLOR_BGRA2GRAY); - - return result; - } - - public void gray(){ - matGray = gray(matBGRA); - useGray(); //??? - } - - /** - * Set a Region of Interest within the image. Subsequent image processing - * functions will apply to this ROI rather than the full image. - * Full image will display be included in output. - * - * @return - * False if requested ROI exceed the bounds of the working image. - * True if ROI was successfully set. - */ - public boolean setROI(int x, int y, int w, int h){ - if(x < 0 || - x + w > width || - y < 0 || - y + h > height){ - return false; - } else{ - roiWidth = w; - roiHeight = h; - - if(useColor){ - nonROImat = matBGRA; - matROI = new Mat(matBGRA, new Rect(x, y, w, h)); - } else { - nonROImat = matGray; - matROI = new Mat(matGray, new Rect(x, y, w, h)); - } - useROI = true; - - return true; - } - } - - public void releaseROI(){ - useROI = false; - } + Imgproc.calcHist( images, channels, new Mat(), hist, histSize, ranges); + + if(normalize){ + Core.normalize(hist, hist); + } + + return new Histogram(parent, hist); + } + + /** + * + * Filter the image for values between a lower and upper bound. + * Converts the current image into a binary image with white where pixel + * values were within bounds and black elsewhere. + * + * @param lowerBound + * @param upperBound + */ + public void inRange(int lowerBound, int upperBound){ + Core.inRange(getCurrentMat(), new Scalar(lowerBound), new Scalar(upperBound), getCurrentMat()); + } + + /** + * + * @param src + * A Mat of type 8UC4 with channels arranged as BGRA. + * @return + * A Mat of type 8UC1 in grayscale. + */ + public static Mat gray(Mat src){ + Mat result = new Mat(src.height(), src.width(), CvType.CV_8UC1); + Imgproc.cvtColor(src, result, Imgproc.COLOR_BGRA2GRAY); + + return result; + } + + public void gray(){ + matGray = gray(matBGRA); + useGray(); //??? + } + + /** + * Set a Region of Interest within the image. Subsequent image processing + * functions will apply to this ROI rather than the full image. + * Full image will display be included in output. + * + * @return + * False if requested ROI exceed the bounds of the working image. + * True if ROI was successfully set. + */ + public boolean setROI(int x, int y, int w, int h){ + if(x < 0 || + x + w > width || + y < 0 || + y + h > height){ + return false; + } else{ + roiWidth = w; + roiHeight = h; + + if(useColor){ + nonROImat = matBGRA; + matROI = new Mat(matBGRA, new Rect(x, y, w, h)); + } else { + nonROImat = matGray; + matROI = new Mat(matGray, new Rect(x, y, w, h)); + } + useROI = true; + + return true; + } + } + + public void releaseROI(){ + useROI = false; + } - /** - * Load an image from a path. - * - * @param imgPath - * String with the path to the image - */ - public void loadImage(String imgPath){ - loadImage(parent.loadImage(imgPath)); - } - - // NOTE: We're not handling the signed/unsigned - // conversion. Is that any issue? - public void loadImage(PImage img){ - // FIXME: is there a better way to hold onto - // this? - inputImage = img; - - toCv(img, matBGRA); - ARGBtoBGRA(matBGRA,matBGRA); - populateBGRA(); - - if(useColor){ - useColor(this.colorSpace); - } else { - gray(); - } - - } - - public static void ARGBtoBGRA(Mat rgba, Mat bgra){ - ArrayList channels = new ArrayList(); - Core.split(rgba, channels); + /** + * Load an image from a path. + * + * @param imgPath + * String with the path to the image + */ + public void loadImage(String imgPath){ + loadImage(parent.loadImage(imgPath)); + } + + // NOTE: We're not handling the signed/unsigned + // conversion. Is that any issue? + public void loadImage(PImage img){ + // FIXME: is there a better way to hold onto + // this? + inputImage = img; + + toCv(img, matBGRA); + ARGBtoBGRA(matBGRA,matBGRA); + populateBGRA(); + + if(useColor){ + useColor(this.colorSpace); + } else { + gray(); + } + + } + + public static void ARGBtoBGRA(Mat rgba, Mat bgra){ + ArrayList channels = new ArrayList(); + Core.split(rgba, channels); - ArrayList reordered = new ArrayList(); - // Starts as ARGB. - // Make into BGRA. - - reordered.add(channels.get(3)); - reordered.add(channels.get(2)); - reordered.add(channels.get(1)); - reordered.add(channels.get(0)); - - Core.merge(reordered, bgra); - } - - - public int getSize(){ - return width * height; - } + ArrayList reordered = new ArrayList(); + // Starts as ARGB. + // Make into BGRA. + + reordered.add(channels.get(3)); + reordered.add(channels.get(2)); + reordered.add(channels.get(1)); + reordered.add(channels.get(0)); + + Core.merge(reordered, bgra); + } + + + public int getSize(){ + return width * height; + } /** * @@ -1177,12 +1234,12 @@ public int getSize(){ * pixel array. * * @param m - * An RGBA Mat we want converted + * An RGBA Mat we want converted * @return - * An int[] formatted to be the pixels of a PImage + * An int[] formatted to be the pixels of a PImage */ public int[] matToARGBPixels(Mat m){ - int pImageChannels = 4; + int pImageChannels = 4; int numPixels = m.width()*m.height(); int[] intPixels = new int[numPixels]; byte[] matPixels = new byte[numPixels*pImageChannels]; @@ -1193,185 +1250,183 @@ public int[] matToARGBPixels(Mat m){ } - /** - * Convert an OpenCV Mat object into a PImage - * to be used in other Processing code. - * Copies the Mat's pixel data into the PImage's pixel array. - * Iterates over each pixel in the Mat, i.e. expensive. - * - * (Mainly used internally by OpenCV. Inspired by toCv() - * from KyleMcDonald's ofxCv.) - * - * @param m - * A Mat you want converted - * @param img - * The PImage you want the Mat converted into. - */ - public void toPImage(Mat m, PImage img){ - img.loadPixels(); + /** + * Convert an OpenCV Mat object into a PImage + * to be used in other Processing code. + * Copies the Mat's pixel data into the PImage's pixel array. + * Iterates over each pixel in the Mat, i.e. expensive. + * + * (Mainly used internally by OpenCV. Inspired by toCv() + * from KyleMcDonald's ofxCv.) + * + * @param m + * A Mat you want converted + * @param img + * The PImage you want the Mat converted into. + */ + public void toPImage(Mat m, PImage img){ + img.loadPixels(); - if(m.channels() == 3){ - Mat m2 = new Mat(); - Imgproc.cvtColor(m, m2, Imgproc.COLOR_RGB2RGBA); + if(m.channels() == 3){ + Mat m2 = new Mat(); + Imgproc.cvtColor(m, m2, Imgproc.COLOR_RGB2RGBA); img.pixels = matToARGBPixels(m2); - } else if(m.channels() == 1){ - Mat m2 = new Mat(); - Imgproc.cvtColor(m, m2, Imgproc.COLOR_GRAY2RGBA); + } else if(m.channels() == 1){ + Mat m2 = new Mat(); + Imgproc.cvtColor(m, m2, Imgproc.COLOR_GRAY2RGBA); img.pixels = matToARGBPixels(m2); - } else if(m.channels() == 4){ + } else if(m.channels() == 4){ img.pixels = matToARGBPixels(m); - } - - img.updatePixels(); - } - - /** - * Convert a Processing PImage to an OpenCV Mat. - * (Inspired by Kyle McDonald's ofxCv's toOf()) - * - * @param img - * The PImage to convert. - * @param m - * The Mat to receive the image data. - */ - public static void toCv(PImage img, Mat m){ - BufferedImage image = (BufferedImage)img.getNative(); - int[] matPixels = ((DataBufferInt)image.getRaster().getDataBuffer()).getData(); - - ByteBuffer bb = ByteBuffer.allocate(matPixels.length * 4); - IntBuffer ib = bb.asIntBuffer(); - ib.put(matPixels); - - byte[] bvals = bb.array(); + } + + img.updatePixels(); + } + + /** + * Convert a Processing PImage to an OpenCV Mat. + * (Inspired by Kyle McDonald's ofxCv's toOf()) + * + * @param img + * The PImage to convert. + * @param m + * The Mat to receive the image data. + */ + public static void toCv(PImage img, Mat m){ + BufferedImage image = (BufferedImage)img.getNative(); + int[] matPixels = ((DataBufferInt)image.getRaster().getDataBuffer()).getData(); + + ByteBuffer bb = ByteBuffer.allocate(matPixels.length * 4); + IntBuffer ib = bb.asIntBuffer(); + ib.put(matPixels); + + byte[] bvals = bb.array(); - m.put(0,0, bvals); - } - - public static ArrayList matToPVectors(MatOfPoint mat){ - ArrayList result = new ArrayList(); - Point[] points = mat.toArray(); - for(int i = 0; i < points.length; i++){ - result.add(new PVector((float)points[i].x, (float)points[i].y)); - } - - return result; - } + m.put(0,0, bvals); + } + + public static ArrayList matToPVectors(MatOfPoint mat){ + ArrayList result = new ArrayList(); + Point[] points = mat.toArray(); + for(int i = 0; i < points.length; i++){ + result.add(new PVector((float)points[i].x, (float)points[i].y)); + } + + return result; + } - public static ArrayList matToPVectors(MatOfPoint2f mat){ - ArrayList result = new ArrayList(); - Point[] points = mat.toArray(); - for(int i = 0; i < points.length; i++){ - result.add(new PVector((float)points[i].x, (float)points[i].y)); - } - - return result; - } - - public String matToS(Mat mat){ - return CvType.typeToString(mat.type()); - } - - public PImage getInput(){ - return inputImage; - } - - public PImage getOutput(){ - if(useColor){ - toPImage(matBGRA, outputImage); - } else { - toPImage(matGray, outputImage); - } - - return outputImage; - } - - public PImage getSnapshot(){ - PImage result; - - if(useROI){ - result = getSnapshot(matROI); - } else { - if(useColor){ - if(colorSpace == PApplet.HSB){ - result = getSnapshot(matHSV); - } else { - result = getSnapshot(matBGRA); - } - } else { - result = getSnapshot(matGray); - } - } - return result; - } - - public PImage getSnapshot(Mat m){ - PImage result = parent.createImage(m.width(), m.height(), PApplet.ARGB); - toPImage(m, result); - return result; - } + public static ArrayList matToPVectors(MatOfPoint2f mat){ + ArrayList result = new ArrayList(); + Point[] points = mat.toArray(); + for(int i = 0; i < points.length; i++){ + result.add(new PVector((float)points[i].x, (float)points[i].y)); + } + + return result; + } + + public String matToS(Mat mat){ + return CvType.typeToString(mat.type()); + } + + public PImage getInput(){ + return inputImage; + } + + public PImage getOutput(){ + if(useColor){ + toPImage(matBGRA, outputImage); + } else { + toPImage(matGray, outputImage); + } + + return outputImage; + } + + public PImage getSnapshot(){ + PImage result; + + if(useColor){ + if(colorSpace == PApplet.HSB){ + result = getSnapshot(matHSV); + } else { + result = getSnapshot(matBGRA); + } + } else { + result = getSnapshot(matGray); + } + + return result; + } + + public PImage getSnapshot(Mat m){ + PImage result = parent.createImage(m.width(), m.height(), PApplet.ARGB); + toPImage(m, result); + return result; + } - public Mat getR(){ - return matR; - } - - public Mat getG(){ - return matG; - } - - public Mat getB(){ - return matB; - } - - public Mat getA(){ - return matA; - } - - public Mat getH(){ - return matH; - } - - public Mat getS(){ - return matS; - } - - public Mat getV(){ - return matV; - } - - public Mat getGray(){ - return matGray; - } - - public void setGray(Mat m){ - matGray = m; - useColor = false; - } - - public void setColor(Mat m){ - matBGRA = m; - useColor = true; - } - - public Mat getColor(){ - return matBGRA; - } - - public Mat getROI(){ - return matROI; - } + public Mat getR(){ + return matR; + } + + public Mat getG(){ + return matG; + } + + public Mat getB(){ + return matB; + } + + public Mat getA(){ + return matA; + } + + public Mat getH(){ + return matH; + } + + public Mat getS(){ + return matS; + } + + public Mat getV(){ + return matV; + } + + public Mat getGray(){ + return matGray; + } + + public void setGray(Mat m){ + matGray = m; + useColor = false; + } + + public void setColor(Mat m){ + matBGRA = m; + useColor = true; + } + + public Mat getColor(){ + return matBGRA; + } + + public Mat getROI(){ + return matROI; + } - private void welcome() { - System.out.println("##library.name## ##library.prettyVersion## by ##author##"); - System.out.println("Using Java OpenCV " + Core.VERSION); - } - - /** - * return the version of the library. - * - * @return String - */ - public static String version() { - return VERSION; - } + private void welcome() { + System.out.println("OpenCV for Processing by Evans Jahja http://github.com/charon77"); + System.out.println("Based on: OpenCV for Processing 0.5.2 by Greg Borenstein http://gregborenstein.com"); + System.out.println("Using Java OpenCV " + Core.VERSION); + } + + /** + * return the version of the library. + * + * @return String + */ + public static String version() { + return VERSION; + } } diff --git a/src/gab/opencv/package.bluej b/src/gab/opencv/package.bluej new file mode 100644 index 0000000..191dbda --- /dev/null +++ b/src/gab/opencv/package.bluej @@ -0,0 +1,99 @@ +#BlueJ package file +dependency1.from=OpenCV +dependency1.to=Flow +dependency1.type=UsesDependency +dependency2.from=OpenCV +dependency2.to=Contour +dependency2.type=UsesDependency +dependency3.from=OpenCV +dependency3.to=ContourComparator +dependency3.type=UsesDependency +dependency4.from=OpenCV +dependency4.to=Line +dependency4.type=UsesDependency +dependency5.from=OpenCV +dependency5.to=Histogram +dependency5.type=UsesDependency +dependency6.from=ContourComparator +dependency6.to=Contour +dependency6.type=UsesDependency +package.editor.height=400 +package.editor.width=553 +package.editor.x=305 +package.editor.y=161 +package.numDependencies=6 +package.numTargets=6 +package.showExtends=true +package.showUses=true +target1.editor.height=700 +target1.editor.width=900 +target1.editor.x=110 +target1.editor.y=90 +target1.height=50 +target1.name=Flow +target1.naviview.expanded=true +target1.showInterface=false +target1.type=ClassTarget +target1.width=80 +target1.x=160 +target1.y=70 +target2.editor.height=744 +target2.editor.width=1370 +target2.editor.x=-8 +target2.editor.y=-8 +target2.height=50 +target2.name=OpenCV +target2.naviview.expanded=true +target2.showInterface=false +target2.type=ClassTarget +target2.width=80 +target2.x=160 +target2.y=10 +target3.editor.height=700 +target3.editor.width=900 +target3.editor.x=110 +target3.editor.y=90 +target3.height=50 +target3.name=ContourComparator +target3.naviview.expanded=true +target3.showInterface=false +target3.type=ClassTarget +target3.width=140 +target3.x=10 +target3.y=90 +target4.editor.height=700 +target4.editor.width=900 +target4.editor.x=110 +target4.editor.y=90 +target4.height=50 +target4.name=Line +target4.naviview.expanded=true +target4.showInterface=false +target4.type=ClassTarget +target4.width=80 +target4.x=160 +target4.y=130 +target5.editor.height=700 +target5.editor.width=900 +target5.editor.x=110 +target5.editor.y=90 +target5.height=50 +target5.name=Contour +target5.naviview.expanded=true +target5.showInterface=false +target5.type=ClassTarget +target5.width=80 +target5.x=100 +target5.y=190 +target6.editor.height=700 +target6.editor.width=900 +target6.editor.x=110 +target6.editor.y=90 +target6.height=50 +target6.name=Histogram +target6.naviview.expanded=true +target6.showInterface=false +target6.type=ClassTarget +target6.width=80 +target6.x=10 +target6.y=150 diff --git a/src/gab/package.bluej b/src/gab/package.bluej new file mode 100644 index 0000000..6c644ae --- /dev/null +++ b/src/gab/package.bluej @@ -0,0 +1,15 @@ +#BlueJ package file +package.editor.height=400 +package.editor.width=560 +package.editor.x=285 +package.editor.y=141 +package.numDependencies=0 +package.numTargets=1 +package.showExtends=true +package.showUses=true +target1.height=62 +target1.name=opencv +target1.type=PackageTarget +target1.width=80 +target1.x=160 +target1.y=10 diff --git a/src/org/opencv/calib3d/Calib3d.java b/src/org/opencv/calib3d/Calib3d.java new file mode 100644 index 0000000..4ef2ac3 --- /dev/null +++ b/src/org/opencv/calib3d/Calib3d.java @@ -0,0 +1,3010 @@ + +// +// This file is auto-generated. Please don't modify it! +// +package org.opencv.calib3d; + +import java.util.ArrayList; +import java.util.List; +import org.opencv.core.Mat; +import org.opencv.core.MatOfDouble; +import org.opencv.core.MatOfPoint2f; +import org.opencv.core.MatOfPoint3f; +import org.opencv.core.Point; +import org.opencv.core.Rect; +import org.opencv.core.Size; +import org.opencv.core.TermCriteria; +import org.opencv.utils.Converters; + +public class Calib3d { + + private static final int + CV_LMEDS = 4, + CV_RANSAC = 8, + CV_FM_LMEDS = CV_LMEDS, + CV_FM_RANSAC = CV_RANSAC, + CV_FM_7POINT = 1, + CV_FM_8POINT = 2, + CV_CALIB_USE_INTRINSIC_GUESS = 1, + CV_CALIB_FIX_ASPECT_RATIO = 2, + CV_CALIB_FIX_PRINCIPAL_POINT = 4, + CV_CALIB_ZERO_TANGENT_DIST = 8, + CV_CALIB_FIX_FOCAL_LENGTH = 16, + CV_CALIB_FIX_K1 = 32, + CV_CALIB_FIX_K2 = 64, + CV_CALIB_FIX_K3 = 128, + CV_CALIB_FIX_K4 = 2048, + CV_CALIB_FIX_K5 = 4096, + CV_CALIB_FIX_K6 = 8192, + CV_CALIB_RATIONAL_MODEL = 16384, + CV_CALIB_FIX_INTRINSIC = 256, + CV_CALIB_SAME_FOCAL_LENGTH = 512, + CV_CALIB_ZERO_DISPARITY = 1024; + + + public static final int + CV_ITERATIVE = 0, + CV_EPNP = 1, + CV_P3P = 2, + LMEDS = CV_LMEDS, + RANSAC = CV_RANSAC, + ITERATIVE = CV_ITERATIVE, + EPNP = CV_EPNP, + P3P = CV_P3P, + CALIB_CB_ADAPTIVE_THRESH = 1, + CALIB_CB_NORMALIZE_IMAGE = 2, + CALIB_CB_FILTER_QUADS = 4, + CALIB_CB_FAST_CHECK = 8, + CALIB_CB_SYMMETRIC_GRID = 1, + CALIB_CB_ASYMMETRIC_GRID = 2, + CALIB_CB_CLUSTERING = 4, + CALIB_USE_INTRINSIC_GUESS = CV_CALIB_USE_INTRINSIC_GUESS, + CALIB_FIX_ASPECT_RATIO = CV_CALIB_FIX_ASPECT_RATIO, + CALIB_FIX_PRINCIPAL_POINT = CV_CALIB_FIX_PRINCIPAL_POINT, + CALIB_ZERO_TANGENT_DIST = CV_CALIB_ZERO_TANGENT_DIST, + CALIB_FIX_FOCAL_LENGTH = CV_CALIB_FIX_FOCAL_LENGTH, + CALIB_FIX_K1 = CV_CALIB_FIX_K1, + CALIB_FIX_K2 = CV_CALIB_FIX_K2, + CALIB_FIX_K3 = CV_CALIB_FIX_K3, + CALIB_FIX_K4 = CV_CALIB_FIX_K4, + CALIB_FIX_K5 = CV_CALIB_FIX_K5, + CALIB_FIX_K6 = CV_CALIB_FIX_K6, + CALIB_RATIONAL_MODEL = CV_CALIB_RATIONAL_MODEL, + CALIB_FIX_INTRINSIC = CV_CALIB_FIX_INTRINSIC, + CALIB_SAME_FOCAL_LENGTH = CV_CALIB_SAME_FOCAL_LENGTH, + CALIB_ZERO_DISPARITY = CV_CALIB_ZERO_DISPARITY, + FM_7POINT = CV_FM_7POINT, + FM_8POINT = CV_FM_8POINT, + FM_LMEDS = CV_FM_LMEDS, + FM_RANSAC = CV_FM_RANSAC; + + + // + // C++: Vec3d RQDecomp3x3(Mat src, Mat& mtxR, Mat& mtxQ, Mat& Qx = Mat(), Mat& Qy = Mat(), Mat& Qz = Mat()) + // + +/** + *

Computes an RQ decomposition of 3x3 matrices.

+ * + *

The function computes a RQ decomposition using the given rotations. This + * function is used in "decomposeProjectionMatrix" to decompose the left 3x3 + * submatrix of a projection matrix into a camera and a rotation matrix.

+ * + *

It optionally returns three rotation matrices, one for each axis, and the + * three Euler angles in degrees (as the return value) that could be used in + * OpenGL. Note, there is always more than one sequence of rotations about the + * three principle axes that results in the same orientation of an object, eg. + * see [Slabaugh]. Returned tree rotation matrices and corresponding three Euler + * angules are only one of the possible solutions.

+ * + * @param src 3x3 input matrix. + * @param mtxR Output 3x3 upper-triangular matrix. + * @param mtxQ Output 3x3 orthogonal matrix. + * @param Qx Optional output 3x3 rotation matrix around x-axis. + * @param Qy Optional output 3x3 rotation matrix around y-axis. + * @param Qz Optional output 3x3 rotation matrix around z-axis. + * + * @see org.opencv.calib3d.Calib3d.RQDecomp3x3 + */ + public static double[] RQDecomp3x3(Mat src, Mat mtxR, Mat mtxQ, Mat Qx, Mat Qy, Mat Qz) + { + + double[] retVal = RQDecomp3x3_0(src.nativeObj, mtxR.nativeObj, mtxQ.nativeObj, Qx.nativeObj, Qy.nativeObj, Qz.nativeObj); + + return retVal; + } + +/** + *

Computes an RQ decomposition of 3x3 matrices.

+ * + *

The function computes a RQ decomposition using the given rotations. This + * function is used in "decomposeProjectionMatrix" to decompose the left 3x3 + * submatrix of a projection matrix into a camera and a rotation matrix.

+ * + *

It optionally returns three rotation matrices, one for each axis, and the + * three Euler angles in degrees (as the return value) that could be used in + * OpenGL. Note, there is always more than one sequence of rotations about the + * three principle axes that results in the same orientation of an object, eg. + * see [Slabaugh]. Returned tree rotation matrices and corresponding three Euler + * angules are only one of the possible solutions.

+ * + * @param src 3x3 input matrix. + * @param mtxR Output 3x3 upper-triangular matrix. + * @param mtxQ Output 3x3 orthogonal matrix. + * + * @see org.opencv.calib3d.Calib3d.RQDecomp3x3 + */ + public static double[] RQDecomp3x3(Mat src, Mat mtxR, Mat mtxQ) + { + + double[] retVal = RQDecomp3x3_1(src.nativeObj, mtxR.nativeObj, mtxQ.nativeObj); + + return retVal; + } + + + // + // C++: void Rodrigues(Mat src, Mat& dst, Mat& jacobian = Mat()) + // + +/** + *

Converts a rotation matrix to a rotation vector or vice versa.

+ * + *

theta <- norm(r) + * r <- r/ theta + * R = cos(theta) I + (1- cos(theta)) r r^T + sin(theta) + * |0 -r_z r_y| + * |r_z 0 -r_x| + * |-r_y r_x 0| + *

+ * + *

Inverse transformation can be also done easily, since

+ * + *

sin(theta) + * |0 -r_z r_y| + * |r_z 0 -r_x| + * |-r_y r_x 0| + * = (R - R^T)/2

+ * + *

A rotation vector is a convenient and most compact representation of a + * rotation matrix (since any rotation matrix has just 3 degrees of freedom). + * The representation is used in the global 3D geometry optimization procedures + * like "calibrateCamera", "stereoCalibrate", or "solvePnP".

+ * + * @param src Input rotation vector (3x1 or 1x3) or rotation matrix (3x3). + * @param dst Output rotation matrix (3x3) or rotation vector (3x1 or 1x3), + * respectively. + * @param jacobian Optional output Jacobian matrix, 3x9 or 9x3, which is a + * matrix of partial derivatives of the output array components with respect to + * the input array components. + * + * @see org.opencv.calib3d.Calib3d.Rodrigues + */ + public static void Rodrigues(Mat src, Mat dst, Mat jacobian) + { + + Rodrigues_0(src.nativeObj, dst.nativeObj, jacobian.nativeObj); + + return; + } + +/** + *

Converts a rotation matrix to a rotation vector or vice versa.

+ * + *

theta <- norm(r) + * r <- r/ theta + * R = cos(theta) I + (1- cos(theta)) r r^T + sin(theta) + * |0 -r_z r_y| + * |r_z 0 -r_x| + * |-r_y r_x 0| + *

+ * + *

Inverse transformation can be also done easily, since

+ * + *

sin(theta) + * |0 -r_z r_y| + * |r_z 0 -r_x| + * |-r_y r_x 0| + * = (R - R^T)/2

+ * + *

A rotation vector is a convenient and most compact representation of a + * rotation matrix (since any rotation matrix has just 3 degrees of freedom). + * The representation is used in the global 3D geometry optimization procedures + * like "calibrateCamera", "stereoCalibrate", or "solvePnP".

+ * + * @param src Input rotation vector (3x1 or 1x3) or rotation matrix (3x3). + * @param dst Output rotation matrix (3x3) or rotation vector (3x1 or 1x3), + * respectively. + * + * @see org.opencv.calib3d.Calib3d.Rodrigues + */ + public static void Rodrigues(Mat src, Mat dst) + { + + Rodrigues_1(src.nativeObj, dst.nativeObj); + + return; + } + + + // + // C++: double calibrateCamera(vector_Mat objectPoints, vector_Mat imagePoints, Size imageSize, Mat& cameraMatrix, Mat& distCoeffs, vector_Mat& rvecs, vector_Mat& tvecs, int flags = 0, TermCriteria criteria = TermCriteria( TermCriteria::COUNT+TermCriteria::EPS, 30, DBL_EPSILON)) + // + +/** + *

Finds the camera intrinsic and extrinsic parameters from several views of a + * calibration pattern.

+ * + *

The function estimates the intrinsic camera parameters and extrinsic + * parameters for each of the views. The algorithm is based on [Zhang2000] and + * [BouguetMCT]. The coordinates of 3D object points and their corresponding 2D + * projections in each view must be specified. That may be achieved by using an + * object with a known geometry and easily detectable feature points. + * Such an object is called a calibration rig or calibration pattern, and OpenCV + * has built-in support for a chessboard as a calibration rig (see + * "findChessboardCorners"). Currently, initialization of intrinsic parameters + * (when CV_CALIB_USE_INTRINSIC_GUESS is not set) is only + * implemented for planar calibration patterns (where Z-coordinates of the + * object points must be all zeros). 3D calibration rigs can also be used as + * long as initial cameraMatrix is provided.

+ * + *

The algorithm performs the following steps:

+ *
    + *
  • Compute the initial intrinsic parameters (the option only available + * for planar calibration patterns) or read them from the input parameters. The + * distortion coefficients are all set to zeros initially unless some of + * CV_CALIB_FIX_K? are specified. + *
  • Estimate the initial camera pose as if the intrinsic parameters have + * been already known. This is done using "solvePnP". + *
  • Run the global Levenberg-Marquardt optimization algorithm to minimize + * the reprojection error, that is, the total sum of squared distances between + * the observed feature points imagePoints and the projected (using + * the current estimates for camera parameters and the poses) object points + * objectPoints. See "projectPoints" for details. + *
+ * + *

The function returns the final re-projection error.

+ * + *

Note:

+ * + *

If you use a non-square (=non-NxN) grid and "findChessboardCorners" for + * calibration, and calibrateCamera returns bad values (zero + * distortion coefficients, an image center very far from (w/2-0.5,h/2-0.5), + * and/or large differences between f_x and f_y (ratios of + * 10:1 or more)), then you have probably used patternSize=cvSize(rows,cols) + * instead of using patternSize=cvSize(cols,rows) in + * "findChessboardCorners".

+ * + * @param objectPoints In the new interface it is a vector of vectors of + * calibration pattern points in the calibration pattern coordinate space. The + * outer vector contains as many elements as the number of the pattern views. If + * the same calibration pattern is shown in each view and it is fully visible, + * all the vectors will be the same. Although, it is possible to use partially + * occluded patterns, or even different patterns in different views. Then, the + * vectors will be different. The points are 3D, but since they are in a pattern + * coordinate system, then, if the rig is planar, it may make sense to put the + * model to a XY coordinate plane so that Z-coordinate of each input object + * point is 0. + * + *

In the old interface all the vectors of object points from different views + * are concatenated together.

+ * @param imagePoints In the new interface it is a vector of vectors of the + * projections of calibration pattern points. imagePoints.size() + * and objectPoints.size() and imagePoints[i].size() + * must be equal to objectPoints[i].size() for each i. + * + *

In the old interface all the vectors of object points from different views + * are concatenated together.

+ * @param imageSize Size of the image used only to initialize the intrinsic + * camera matrix. + * @param cameraMatrix Output 3x3 floating-point camera matrix A = + *

|f_x 0 c_x| + * |0 f_y c_y| + * |0 0 1| + * . If CV_CALIB_USE_INTRINSIC_GUESS and/or CV_CALIB_FIX_ASPECT_RATIO + * are specified, some or all of fx, fy, cx, cy must be initialized + * before calling the function.

+ * @param distCoeffs Output vector of distortion coefficients (k_1, k_2, + * p_1, p_2[, k_3[, k_4, k_5, k_6]]) of 4, 5, or 8 elements. + * @param rvecs Output vector of rotation vectors (see "Rodrigues") estimated + * for each pattern view. That is, each k-th rotation vector together with the + * corresponding k-th translation vector (see the next output parameter + * description) brings the calibration pattern from the model coordinate space + * (in which object points are specified) to the world coordinate space, that + * is, a real position of the calibration pattern in the k-th pattern view + * (k=0.. *M* -1). + * @param tvecs Output vector of translation vectors estimated for each pattern + * view. + * @param flags Different flags that may be zero or a combination of the + * following values: + *
    + *
  • CV_CALIB_USE_INTRINSIC_GUESS cameraMatrix contains valid + * initial values of fx, fy, cx, cy that are optimized further. + * Otherwise, (cx, cy) is initially set to the image center + * (imageSize is used), and focal distances are computed in a + * least-squares fashion. Note, that if intrinsic parameters are known, there is + * no need to use this function just to estimate extrinsic parameters. Use + * "solvePnP" instead. + *
  • CV_CALIB_FIX_PRINCIPAL_POINT The principal point is not changed during + * the global optimization. It stays at the center or at a different location + * specified when CV_CALIB_USE_INTRINSIC_GUESS is set too. + *
  • CV_CALIB_FIX_ASPECT_RATIO The functions considers only fy + * as a free parameter. The ratio fx/fy stays the same as in the + * input cameraMatrix. When CV_CALIB_USE_INTRINSIC_GUESS + * is not set, the actual input values of fx and fy + * are ignored, only their ratio is computed and used further. + *
  • CV_CALIB_ZERO_TANGENT_DIST Tangential distortion coefficients + * (p_1, p_2) are set to zeros and stay zero. + *
  • CV_CALIB_FIX_K1,...,CV_CALIB_FIX_K6 The corresponding radial + * distortion coefficient is not changed during the optimization. If + * CV_CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the + * supplied distCoeffs matrix is used. Otherwise, it is set to 0. + *
  • CV_CALIB_RATIONAL_MODEL Coefficients k4, k5, and k6 are enabled. To + * provide the backward compatibility, this extra flag should be explicitly + * specified to make the calibration function use the rational model and return + * 8 coefficients. If the flag is not set, the function computes and returns + * only 5 distortion coefficients. + *
+ * @param criteria Termination criteria for the iterative optimization + * algorithm. + * + * @see org.opencv.calib3d.Calib3d.calibrateCamera + * @see org.opencv.calib3d.Calib3d#initCameraMatrix2D + * @see org.opencv.calib3d.Calib3d#stereoCalibrate + * @see org.opencv.calib3d.Calib3d#findChessboardCorners + * @see org.opencv.calib3d.Calib3d#solvePnP + * @see org.opencv.imgproc.Imgproc#undistort + */ + public static double calibrateCamera(List objectPoints, List imagePoints, Size imageSize, Mat cameraMatrix, Mat distCoeffs, List rvecs, List tvecs, int flags, TermCriteria criteria) + { + Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints); + Mat imagePoints_mat = Converters.vector_Mat_to_Mat(imagePoints); + Mat rvecs_mat = new Mat(); + Mat tvecs_mat = new Mat(); + double retVal = calibrateCamera_0(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, imageSize.width, imageSize.height, cameraMatrix.nativeObj, distCoeffs.nativeObj, rvecs_mat.nativeObj, tvecs_mat.nativeObj, flags, criteria.type, criteria.maxCount, criteria.epsilon); + Converters.Mat_to_vector_Mat(rvecs_mat, rvecs); + Converters.Mat_to_vector_Mat(tvecs_mat, tvecs); + return retVal; + } + +/** + *

Finds the camera intrinsic and extrinsic parameters from several views of a + * calibration pattern.

+ * + *

The function estimates the intrinsic camera parameters and extrinsic + * parameters for each of the views. The algorithm is based on [Zhang2000] and + * [BouguetMCT]. The coordinates of 3D object points and their corresponding 2D + * projections in each view must be specified. That may be achieved by using an + * object with a known geometry and easily detectable feature points. + * Such an object is called a calibration rig or calibration pattern, and OpenCV + * has built-in support for a chessboard as a calibration rig (see + * "findChessboardCorners"). Currently, initialization of intrinsic parameters + * (when CV_CALIB_USE_INTRINSIC_GUESS is not set) is only + * implemented for planar calibration patterns (where Z-coordinates of the + * object points must be all zeros). 3D calibration rigs can also be used as + * long as initial cameraMatrix is provided.

+ * + *

The algorithm performs the following steps:

+ *
    + *
  • Compute the initial intrinsic parameters (the option only available + * for planar calibration patterns) or read them from the input parameters. The + * distortion coefficients are all set to zeros initially unless some of + * CV_CALIB_FIX_K? are specified. + *
  • Estimate the initial camera pose as if the intrinsic parameters have + * been already known. This is done using "solvePnP". + *
  • Run the global Levenberg-Marquardt optimization algorithm to minimize + * the reprojection error, that is, the total sum of squared distances between + * the observed feature points imagePoints and the projected (using + * the current estimates for camera parameters and the poses) object points + * objectPoints. See "projectPoints" for details. + *
+ * + *

The function returns the final re-projection error.

+ * + *

Note:

+ * + *

If you use a non-square (=non-NxN) grid and "findChessboardCorners" for + * calibration, and calibrateCamera returns bad values (zero + * distortion coefficients, an image center very far from (w/2-0.5,h/2-0.5), + * and/or large differences between f_x and f_y (ratios of + * 10:1 or more)), then you have probably used patternSize=cvSize(rows,cols) + * instead of using patternSize=cvSize(cols,rows) in + * "findChessboardCorners".

+ * + * @param objectPoints In the new interface it is a vector of vectors of + * calibration pattern points in the calibration pattern coordinate space. The + * outer vector contains as many elements as the number of the pattern views. If + * the same calibration pattern is shown in each view and it is fully visible, + * all the vectors will be the same. Although, it is possible to use partially + * occluded patterns, or even different patterns in different views. Then, the + * vectors will be different. The points are 3D, but since they are in a pattern + * coordinate system, then, if the rig is planar, it may make sense to put the + * model to a XY coordinate plane so that Z-coordinate of each input object + * point is 0. + * + *

In the old interface all the vectors of object points from different views + * are concatenated together.

+ * @param imagePoints In the new interface it is a vector of vectors of the + * projections of calibration pattern points. imagePoints.size() + * and objectPoints.size() and imagePoints[i].size() + * must be equal to objectPoints[i].size() for each i. + * + *

In the old interface all the vectors of object points from different views + * are concatenated together.

+ * @param imageSize Size of the image used only to initialize the intrinsic + * camera matrix. + * @param cameraMatrix Output 3x3 floating-point camera matrix A = + *

|f_x 0 c_x| + * |0 f_y c_y| + * |0 0 1| + * . If CV_CALIB_USE_INTRINSIC_GUESS and/or CV_CALIB_FIX_ASPECT_RATIO + * are specified, some or all of fx, fy, cx, cy must be initialized + * before calling the function.

+ * @param distCoeffs Output vector of distortion coefficients (k_1, k_2, + * p_1, p_2[, k_3[, k_4, k_5, k_6]]) of 4, 5, or 8 elements. + * @param rvecs Output vector of rotation vectors (see "Rodrigues") estimated + * for each pattern view. That is, each k-th rotation vector together with the + * corresponding k-th translation vector (see the next output parameter + * description) brings the calibration pattern from the model coordinate space + * (in which object points are specified) to the world coordinate space, that + * is, a real position of the calibration pattern in the k-th pattern view + * (k=0.. *M* -1). + * @param tvecs Output vector of translation vectors estimated for each pattern + * view. + * @param flags Different flags that may be zero or a combination of the + * following values: + *
    + *
  • CV_CALIB_USE_INTRINSIC_GUESS cameraMatrix contains valid + * initial values of fx, fy, cx, cy that are optimized further. + * Otherwise, (cx, cy) is initially set to the image center + * (imageSize is used), and focal distances are computed in a + * least-squares fashion. Note, that if intrinsic parameters are known, there is + * no need to use this function just to estimate extrinsic parameters. Use + * "solvePnP" instead. + *
  • CV_CALIB_FIX_PRINCIPAL_POINT The principal point is not changed during + * the global optimization. It stays at the center or at a different location + * specified when CV_CALIB_USE_INTRINSIC_GUESS is set too. + *
  • CV_CALIB_FIX_ASPECT_RATIO The functions considers only fy + * as a free parameter. The ratio fx/fy stays the same as in the + * input cameraMatrix. When CV_CALIB_USE_INTRINSIC_GUESS + * is not set, the actual input values of fx and fy + * are ignored, only their ratio is computed and used further. + *
  • CV_CALIB_ZERO_TANGENT_DIST Tangential distortion coefficients + * (p_1, p_2) are set to zeros and stay zero. + *
  • CV_CALIB_FIX_K1,...,CV_CALIB_FIX_K6 The corresponding radial + * distortion coefficient is not changed during the optimization. If + * CV_CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the + * supplied distCoeffs matrix is used. Otherwise, it is set to 0. + *
  • CV_CALIB_RATIONAL_MODEL Coefficients k4, k5, and k6 are enabled. To + * provide the backward compatibility, this extra flag should be explicitly + * specified to make the calibration function use the rational model and return + * 8 coefficients. If the flag is not set, the function computes and returns + * only 5 distortion coefficients. + *
+ * + * @see org.opencv.calib3d.Calib3d.calibrateCamera + * @see org.opencv.calib3d.Calib3d#initCameraMatrix2D + * @see org.opencv.calib3d.Calib3d#stereoCalibrate + * @see org.opencv.calib3d.Calib3d#findChessboardCorners + * @see org.opencv.calib3d.Calib3d#solvePnP + * @see org.opencv.imgproc.Imgproc#undistort + */ + public static double calibrateCamera(List objectPoints, List imagePoints, Size imageSize, Mat cameraMatrix, Mat distCoeffs, List rvecs, List tvecs, int flags) + { + Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints); + Mat imagePoints_mat = Converters.vector_Mat_to_Mat(imagePoints); + Mat rvecs_mat = new Mat(); + Mat tvecs_mat = new Mat(); + double retVal = calibrateCamera_1(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, imageSize.width, imageSize.height, cameraMatrix.nativeObj, distCoeffs.nativeObj, rvecs_mat.nativeObj, tvecs_mat.nativeObj, flags); + Converters.Mat_to_vector_Mat(rvecs_mat, rvecs); + Converters.Mat_to_vector_Mat(tvecs_mat, tvecs); + return retVal; + } + +/** + *

Finds the camera intrinsic and extrinsic parameters from several views of a + * calibration pattern.

+ * + *

The function estimates the intrinsic camera parameters and extrinsic + * parameters for each of the views. The algorithm is based on [Zhang2000] and + * [BouguetMCT]. The coordinates of 3D object points and their corresponding 2D + * projections in each view must be specified. That may be achieved by using an + * object with a known geometry and easily detectable feature points. + * Such an object is called a calibration rig or calibration pattern, and OpenCV + * has built-in support for a chessboard as a calibration rig (see + * "findChessboardCorners"). Currently, initialization of intrinsic parameters + * (when CV_CALIB_USE_INTRINSIC_GUESS is not set) is only + * implemented for planar calibration patterns (where Z-coordinates of the + * object points must be all zeros). 3D calibration rigs can also be used as + * long as initial cameraMatrix is provided.

+ * + *

The algorithm performs the following steps:

+ *
    + *
  • Compute the initial intrinsic parameters (the option only available + * for planar calibration patterns) or read them from the input parameters. The + * distortion coefficients are all set to zeros initially unless some of + * CV_CALIB_FIX_K? are specified. + *
  • Estimate the initial camera pose as if the intrinsic parameters have + * been already known. This is done using "solvePnP". + *
  • Run the global Levenberg-Marquardt optimization algorithm to minimize + * the reprojection error, that is, the total sum of squared distances between + * the observed feature points imagePoints and the projected (using + * the current estimates for camera parameters and the poses) object points + * objectPoints. See "projectPoints" for details. + *
+ * + *

The function returns the final re-projection error.

+ * + *

Note:

+ * + *

If you use a non-square (=non-NxN) grid and "findChessboardCorners" for + * calibration, and calibrateCamera returns bad values (zero + * distortion coefficients, an image center very far from (w/2-0.5,h/2-0.5), + * and/or large differences between f_x and f_y (ratios of + * 10:1 or more)), then you have probably used patternSize=cvSize(rows,cols) + * instead of using patternSize=cvSize(cols,rows) in + * "findChessboardCorners".

+ * + * @param objectPoints In the new interface it is a vector of vectors of + * calibration pattern points in the calibration pattern coordinate space. The + * outer vector contains as many elements as the number of the pattern views. If + * the same calibration pattern is shown in each view and it is fully visible, + * all the vectors will be the same. Although, it is possible to use partially + * occluded patterns, or even different patterns in different views. Then, the + * vectors will be different. The points are 3D, but since they are in a pattern + * coordinate system, then, if the rig is planar, it may make sense to put the + * model to a XY coordinate plane so that Z-coordinate of each input object + * point is 0. + * + *

In the old interface all the vectors of object points from different views + * are concatenated together.

+ * @param imagePoints In the new interface it is a vector of vectors of the + * projections of calibration pattern points. imagePoints.size() + * and objectPoints.size() and imagePoints[i].size() + * must be equal to objectPoints[i].size() for each i. + * + *

In the old interface all the vectors of object points from different views + * are concatenated together.

+ * @param imageSize Size of the image used only to initialize the intrinsic + * camera matrix. + * @param cameraMatrix Output 3x3 floating-point camera matrix A = + *

|f_x 0 c_x| + * |0 f_y c_y| + * |0 0 1| + * . If CV_CALIB_USE_INTRINSIC_GUESS and/or CV_CALIB_FIX_ASPECT_RATIO + * are specified, some or all of fx, fy, cx, cy must be initialized + * before calling the function.

+ * @param distCoeffs Output vector of distortion coefficients (k_1, k_2, + * p_1, p_2[, k_3[, k_4, k_5, k_6]]) of 4, 5, or 8 elements. + * @param rvecs Output vector of rotation vectors (see "Rodrigues") estimated + * for each pattern view. That is, each k-th rotation vector together with the + * corresponding k-th translation vector (see the next output parameter + * description) brings the calibration pattern from the model coordinate space + * (in which object points are specified) to the world coordinate space, that + * is, a real position of the calibration pattern in the k-th pattern view + * (k=0.. *M* -1). + * @param tvecs Output vector of translation vectors estimated for each pattern + * view. + * + * @see org.opencv.calib3d.Calib3d.calibrateCamera + * @see org.opencv.calib3d.Calib3d#initCameraMatrix2D + * @see org.opencv.calib3d.Calib3d#stereoCalibrate + * @see org.opencv.calib3d.Calib3d#findChessboardCorners + * @see org.opencv.calib3d.Calib3d#solvePnP + * @see org.opencv.imgproc.Imgproc#undistort + */ + public static double calibrateCamera(List objectPoints, List imagePoints, Size imageSize, Mat cameraMatrix, Mat distCoeffs, List rvecs, List tvecs) + { + Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints); + Mat imagePoints_mat = Converters.vector_Mat_to_Mat(imagePoints); + Mat rvecs_mat = new Mat(); + Mat tvecs_mat = new Mat(); + double retVal = calibrateCamera_2(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, imageSize.width, imageSize.height, cameraMatrix.nativeObj, distCoeffs.nativeObj, rvecs_mat.nativeObj, tvecs_mat.nativeObj); + Converters.Mat_to_vector_Mat(rvecs_mat, rvecs); + Converters.Mat_to_vector_Mat(tvecs_mat, tvecs); + return retVal; + } + + + // + // C++: void calibrationMatrixValues(Mat cameraMatrix, Size imageSize, double apertureWidth, double apertureHeight, double& fovx, double& fovy, double& focalLength, Point2d& principalPoint, double& aspectRatio) + // + +/** + *

Computes useful camera characteristics from the camera matrix.

+ * + *

The function computes various useful camera characteristics from the + * previously estimated camera matrix.

+ * + * @param cameraMatrix Input camera matrix that can be estimated by + * "calibrateCamera" or "stereoCalibrate". + * @param imageSize Input image size in pixels. + * @param apertureWidth Physical width of the sensor. + * @param apertureHeight Physical height of the sensor. + * @param fovx Output field of view in degrees along the horizontal sensor axis. + * @param fovy Output field of view in degrees along the vertical sensor axis. + * @param focalLength Focal length of the lens in mm. + * @param principalPoint Principal point in pixels. + * @param aspectRatio f_y/f_x + * + * @see org.opencv.calib3d.Calib3d.calibrationMatrixValues + */ + public static void calibrationMatrixValues(Mat cameraMatrix, Size imageSize, double apertureWidth, double apertureHeight, double[] fovx, double[] fovy, double[] focalLength, Point principalPoint, double[] aspectRatio) + { + double[] fovx_out = new double[1]; + double[] fovy_out = new double[1]; + double[] focalLength_out = new double[1]; + double[] principalPoint_out = new double[2]; + double[] aspectRatio_out = new double[1]; + calibrationMatrixValues_0(cameraMatrix.nativeObj, imageSize.width, imageSize.height, apertureWidth, apertureHeight, fovx_out, fovy_out, focalLength_out, principalPoint_out, aspectRatio_out); + if(fovx!=null) fovx[0] = (double)fovx_out[0]; + if(fovy!=null) fovy[0] = (double)fovy_out[0]; + if(focalLength!=null) focalLength[0] = (double)focalLength_out[0]; + if(principalPoint!=null){ principalPoint.x = principalPoint_out[0]; principalPoint.y = principalPoint_out[1]; } + if(aspectRatio!=null) aspectRatio[0] = (double)aspectRatio_out[0]; + return; + } + + + // + // C++: void composeRT(Mat rvec1, Mat tvec1, Mat rvec2, Mat tvec2, Mat& rvec3, Mat& tvec3, Mat& dr3dr1 = Mat(), Mat& dr3dt1 = Mat(), Mat& dr3dr2 = Mat(), Mat& dr3dt2 = Mat(), Mat& dt3dr1 = Mat(), Mat& dt3dt1 = Mat(), Mat& dt3dr2 = Mat(), Mat& dt3dt2 = Mat()) + // + +/** + *

Combines two rotation-and-shift transformations.

+ * + *

The functions compute:

+ * + *

rvec3 = rodrigues ^(-1)(rodrigues(rvec2) * rodrigues(rvec1)) + * tvec3 = rodrigues(rvec2) * tvec1 + tvec2,

+ * + *

where rodrigues denotes a rotation vector to a rotation matrix + * transformation, and rodrigues^(-1) denotes the inverse + * transformation. See "Rodrigues" for details.

+ * + *

Also, the functions can compute the derivatives of the output vectors with + * regards to the input vectors (see "matMulDeriv"). + * The functions are used inside "stereoCalibrate" but can also be used in your + * own code where Levenberg-Marquardt or another gradient-based solver is used + * to optimize a function that contains a matrix multiplication.

+ * + * @param rvec1 First rotation vector. + * @param tvec1 First translation vector. + * @param rvec2 Second rotation vector. + * @param tvec2 Second translation vector. + * @param rvec3 Output rotation vector of the superposition. + * @param tvec3 Output translation vector of the superposition. + * @param dr3dr1 Optional output derivatives of rvec3 or + * tvec3 with regard to rvec1, rvec2, + * tvec1 and tvec2, respectively. + * @param dr3dt1 Optional output derivatives of rvec3 or + * tvec3 with regard to rvec1, rvec2, + * tvec1 and tvec2, respectively. + * @param dr3dr2 Optional output derivatives of rvec3 or + * tvec3 with regard to rvec1, rvec2, + * tvec1 and tvec2, respectively. + * @param dr3dt2 Optional output derivatives of rvec3 or + * tvec3 with regard to rvec1, rvec2, + * tvec1 and tvec2, respectively. + * @param dt3dr1 Optional output derivatives of rvec3 or + * tvec3 with regard to rvec1, rvec2, + * tvec1 and tvec2, respectively. + * @param dt3dt1 Optional output derivatives of rvec3 or + * tvec3 with regard to rvec1, rvec2, + * tvec1 and tvec2, respectively. + * @param dt3dr2 Optional output derivatives of rvec3 or + * tvec3 with regard to rvec1, rvec2, + * tvec1 and tvec2, respectively. + * @param dt3dt2 Optional output derivatives of rvec3 or + * tvec3 with regard to rvec1, rvec2, + * tvec1 and tvec2, respectively. + * + * @see org.opencv.calib3d.Calib3d.composeRT + */ + public static void composeRT(Mat rvec1, Mat tvec1, Mat rvec2, Mat tvec2, Mat rvec3, Mat tvec3, Mat dr3dr1, Mat dr3dt1, Mat dr3dr2, Mat dr3dt2, Mat dt3dr1, Mat dt3dt1, Mat dt3dr2, Mat dt3dt2) + { + + composeRT_0(rvec1.nativeObj, tvec1.nativeObj, rvec2.nativeObj, tvec2.nativeObj, rvec3.nativeObj, tvec3.nativeObj, dr3dr1.nativeObj, dr3dt1.nativeObj, dr3dr2.nativeObj, dr3dt2.nativeObj, dt3dr1.nativeObj, dt3dt1.nativeObj, dt3dr2.nativeObj, dt3dt2.nativeObj); + + return; + } + +/** + *

Combines two rotation-and-shift transformations.

+ * + *

The functions compute:

+ * + *

rvec3 = rodrigues ^(-1)(rodrigues(rvec2) * rodrigues(rvec1)) + * tvec3 = rodrigues(rvec2) * tvec1 + tvec2,

+ * + *

where rodrigues denotes a rotation vector to a rotation matrix + * transformation, and rodrigues^(-1) denotes the inverse + * transformation. See "Rodrigues" for details.

+ * + *

Also, the functions can compute the derivatives of the output vectors with + * regards to the input vectors (see "matMulDeriv"). + * The functions are used inside "stereoCalibrate" but can also be used in your + * own code where Levenberg-Marquardt or another gradient-based solver is used + * to optimize a function that contains a matrix multiplication.

+ * + * @param rvec1 First rotation vector. + * @param tvec1 First translation vector. + * @param rvec2 Second rotation vector. + * @param tvec2 Second translation vector. + * @param rvec3 Output rotation vector of the superposition. + * @param tvec3 Output translation vector of the superposition. + * + * @see org.opencv.calib3d.Calib3d.composeRT + */ + public static void composeRT(Mat rvec1, Mat tvec1, Mat rvec2, Mat tvec2, Mat rvec3, Mat tvec3) + { + + composeRT_1(rvec1.nativeObj, tvec1.nativeObj, rvec2.nativeObj, tvec2.nativeObj, rvec3.nativeObj, tvec3.nativeObj); + + return; + } + + + // + // C++: void convertPointsFromHomogeneous(Mat src, Mat& dst) + // + +/** + *

Converts points from homogeneous to Euclidean space.

+ * + *

The function converts points homogeneous to Euclidean space using perspective + * projection. That is, each point (x1, x2,... x(n-1), xn) is + * converted to (x1/xn, x2/xn,..., x(n-1)/xn). When + * xn=0, the output point coordinates will be (0,0,0,...).

+ * + * @param src Input vector of N-dimensional points. + * @param dst Output vector of N-1-dimensional points. + * + * @see org.opencv.calib3d.Calib3d.convertPointsFromHomogeneous + */ + public static void convertPointsFromHomogeneous(Mat src, Mat dst) + { + + convertPointsFromHomogeneous_0(src.nativeObj, dst.nativeObj); + + return; + } + + + // + // C++: void convertPointsToHomogeneous(Mat src, Mat& dst) + // + +/** + *

Converts points from Euclidean to homogeneous space.

+ * + *

The function converts points from Euclidean to homogeneous space by appending + * 1's to the tuple of point coordinates. That is, each point (x1, x2,..., + * xn) is converted to (x1, x2,..., xn, 1).

+ * + * @param src Input vector of N-dimensional points. + * @param dst Output vector of N+1-dimensional points. + * + * @see org.opencv.calib3d.Calib3d.convertPointsToHomogeneous + */ + public static void convertPointsToHomogeneous(Mat src, Mat dst) + { + + convertPointsToHomogeneous_0(src.nativeObj, dst.nativeObj); + + return; + } + + + // + // C++: void correctMatches(Mat F, Mat points1, Mat points2, Mat& newPoints1, Mat& newPoints2) + // + +/** + *

Refines coordinates of corresponding points.

+ * + *

The function implements the Optimal Triangulation Method (see Multiple View + * Geometry for details). For each given point correspondence points1[i] <-> + * points2[i], and a fundamental matrix F, it computes the corrected + * correspondences newPoints1[i] <-> newPoints2[i] that minimize the geometric + * error d(points1[i], newPoints1[i])^2 + d(points2[i],newPoints2[i])^2 + * (where d(a,b) is the geometric distance between points a + * and b) subject to the epipolar constraint newPoints2^T * F * + * newPoints1 = 0.

+ * + * @param F 3x3 fundamental matrix. + * @param points1 1xN array containing the first set of points. + * @param points2 1xN array containing the second set of points. + * @param newPoints1 The optimized points1. + * @param newPoints2 The optimized points2. + * + * @see org.opencv.calib3d.Calib3d.correctMatches + */ + public static void correctMatches(Mat F, Mat points1, Mat points2, Mat newPoints1, Mat newPoints2) + { + + correctMatches_0(F.nativeObj, points1.nativeObj, points2.nativeObj, newPoints1.nativeObj, newPoints2.nativeObj); + + return; + } + + + // + // C++: void decomposeProjectionMatrix(Mat projMatrix, Mat& cameraMatrix, Mat& rotMatrix, Mat& transVect, Mat& rotMatrixX = Mat(), Mat& rotMatrixY = Mat(), Mat& rotMatrixZ = Mat(), Mat& eulerAngles = Mat()) + // + +/** + *

Decomposes a projection matrix into a rotation matrix and a camera matrix.

+ * + *

The function computes a decomposition of a projection matrix into a + * calibration and a rotation matrix and the position of a camera.

+ * + *

It optionally returns three rotation matrices, one for each axis, and three + * Euler angles that could be used in OpenGL. Note, there is always more than + * one sequence of rotations about the three principle axes that results in the + * same orientation of an object, eg. see [Slabaugh]. Returned tree rotation + * matrices and corresponding three Euler angules are only one of the possible + * solutions.

+ * + *

The function is based on "RQDecomp3x3".

+ * + * @param projMatrix 3x4 input projection matrix P. + * @param cameraMatrix Output 3x3 camera matrix K. + * @param rotMatrix Output 3x3 external rotation matrix R. + * @param transVect Output 4x1 translation vector T. + * @param rotMatrixX a rotMatrixX + * @param rotMatrixY a rotMatrixY + * @param rotMatrixZ a rotMatrixZ + * @param eulerAngles Optional three-element vector containing three Euler + * angles of rotation in degrees. + * + * @see org.opencv.calib3d.Calib3d.decomposeProjectionMatrix + */ + public static void decomposeProjectionMatrix(Mat projMatrix, Mat cameraMatrix, Mat rotMatrix, Mat transVect, Mat rotMatrixX, Mat rotMatrixY, Mat rotMatrixZ, Mat eulerAngles) + { + + decomposeProjectionMatrix_0(projMatrix.nativeObj, cameraMatrix.nativeObj, rotMatrix.nativeObj, transVect.nativeObj, rotMatrixX.nativeObj, rotMatrixY.nativeObj, rotMatrixZ.nativeObj, eulerAngles.nativeObj); + + return; + } + +/** + *

Decomposes a projection matrix into a rotation matrix and a camera matrix.

+ * + *

The function computes a decomposition of a projection matrix into a + * calibration and a rotation matrix and the position of a camera.

+ * + *

It optionally returns three rotation matrices, one for each axis, and three + * Euler angles that could be used in OpenGL. Note, there is always more than + * one sequence of rotations about the three principle axes that results in the + * same orientation of an object, eg. see [Slabaugh]. Returned tree rotation + * matrices and corresponding three Euler angules are only one of the possible + * solutions.

+ * + *

The function is based on "RQDecomp3x3".

+ * + * @param projMatrix 3x4 input projection matrix P. + * @param cameraMatrix Output 3x3 camera matrix K. + * @param rotMatrix Output 3x3 external rotation matrix R. + * @param transVect Output 4x1 translation vector T. + * + * @see org.opencv.calib3d.Calib3d.decomposeProjectionMatrix + */ + public static void decomposeProjectionMatrix(Mat projMatrix, Mat cameraMatrix, Mat rotMatrix, Mat transVect) + { + + decomposeProjectionMatrix_1(projMatrix.nativeObj, cameraMatrix.nativeObj, rotMatrix.nativeObj, transVect.nativeObj); + + return; + } + + + // + // C++: void drawChessboardCorners(Mat& image, Size patternSize, vector_Point2f corners, bool patternWasFound) + // + +/** + *

Renders the detected chessboard corners.

+ * + *

The function draws individual chessboard corners detected either as red + * circles if the board was not found, or as colored corners connected with + * lines if the board was found.

+ * + * @param image Destination image. It must be an 8-bit color image. + * @param patternSize Number of inner corners per a chessboard row and column + * (patternSize = cv.Size(points_per_row,points_per_column)). + * @param corners Array of detected corners, the output of findChessboardCorners. + * @param patternWasFound Parameter indicating whether the complete board was + * found or not. The return value of "findChessboardCorners" should be passed + * here. + * + * @see org.opencv.calib3d.Calib3d.drawChessboardCorners + */ + public static void drawChessboardCorners(Mat image, Size patternSize, MatOfPoint2f corners, boolean patternWasFound) + { + Mat corners_mat = corners; + drawChessboardCorners_0(image.nativeObj, patternSize.width, patternSize.height, corners_mat.nativeObj, patternWasFound); + + return; + } + + + // + // C++: int estimateAffine3D(Mat src, Mat dst, Mat& out, Mat& inliers, double ransacThreshold = 3, double confidence = 0.99) + // + +/** + *

Computes an optimal affine transformation between two 3D point sets.

+ * + *

The function estimates an optimal 3D affine transformation between two 3D + * point sets using the RANSAC algorithm.

+ * + * @param src First input 3D point set. + * @param dst Second input 3D point set. + * @param out Output 3D affine transformation matrix 3 x 4. + * @param inliers Output vector indicating which points are inliers. + * @param ransacThreshold Maximum reprojection error in the RANSAC algorithm to + * consider a point as an inlier. + * @param confidence Confidence level, between 0 and 1, for the estimated + * transformation. Anything between 0.95 and 0.99 is usually good enough. Values + * too close to 1 can slow down the estimation significantly. Values lower than + * 0.8-0.9 can result in an incorrectly estimated transformation. + * + * @see org.opencv.calib3d.Calib3d.estimateAffine3D + */ + public static int estimateAffine3D(Mat src, Mat dst, Mat out, Mat inliers, double ransacThreshold, double confidence) + { + + int retVal = estimateAffine3D_0(src.nativeObj, dst.nativeObj, out.nativeObj, inliers.nativeObj, ransacThreshold, confidence); + + return retVal; + } + +/** + *

Computes an optimal affine transformation between two 3D point sets.

+ * + *

The function estimates an optimal 3D affine transformation between two 3D + * point sets using the RANSAC algorithm.

+ * + * @param src First input 3D point set. + * @param dst Second input 3D point set. + * @param out Output 3D affine transformation matrix 3 x 4. + * @param inliers Output vector indicating which points are inliers. + * + * @see org.opencv.calib3d.Calib3d.estimateAffine3D + */ + public static int estimateAffine3D(Mat src, Mat dst, Mat out, Mat inliers) + { + + int retVal = estimateAffine3D_1(src.nativeObj, dst.nativeObj, out.nativeObj, inliers.nativeObj); + + return retVal; + } + + + // + // C++: void filterSpeckles(Mat& img, double newVal, int maxSpeckleSize, double maxDiff, Mat& buf = Mat()) + // + +/** + *

Filters off small noise blobs (speckles) in the disparity map

+ * + * @param img The input 16-bit signed disparity image + * @param newVal The disparity value used to paint-off the speckles + * @param maxSpeckleSize The maximum speckle size to consider it a speckle. + * Larger blobs are not affected by the algorithm + * @param maxDiff Maximum difference between neighbor disparity pixels to put + * them into the same blob. Note that since StereoBM, StereoSGBM and may be + * other algorithms return a fixed-point disparity map, where disparity values + * are multiplied by 16, this scale factor should be taken into account when + * specifying this parameter value. + * @param buf The optional temporary buffer to avoid memory allocation within + * the function. + * + * @see org.opencv.calib3d.Calib3d.filterSpeckles + */ + public static void filterSpeckles(Mat img, double newVal, int maxSpeckleSize, double maxDiff, Mat buf) + { + + filterSpeckles_0(img.nativeObj, newVal, maxSpeckleSize, maxDiff, buf.nativeObj); + + return; + } + +/** + *

Filters off small noise blobs (speckles) in the disparity map

+ * + * @param img The input 16-bit signed disparity image + * @param newVal The disparity value used to paint-off the speckles + * @param maxSpeckleSize The maximum speckle size to consider it a speckle. + * Larger blobs are not affected by the algorithm + * @param maxDiff Maximum difference between neighbor disparity pixels to put + * them into the same blob. Note that since StereoBM, StereoSGBM and may be + * other algorithms return a fixed-point disparity map, where disparity values + * are multiplied by 16, this scale factor should be taken into account when + * specifying this parameter value. + * + * @see org.opencv.calib3d.Calib3d.filterSpeckles + */ + public static void filterSpeckles(Mat img, double newVal, int maxSpeckleSize, double maxDiff) + { + + filterSpeckles_1(img.nativeObj, newVal, maxSpeckleSize, maxDiff); + + return; + } + + + // + // C++: bool findChessboardCorners(Mat image, Size patternSize, vector_Point2f& corners, int flags = CALIB_CB_ADAPTIVE_THRESH+CALIB_CB_NORMALIZE_IMAGE) + // + +/** + *

Finds the positions of internal corners of the chessboard.

+ * + *

The function attempts to determine whether the input image is a view of the + * chessboard pattern and locate the internal chessboard corners. The function + * returns a non-zero value if all of the corners are found and they are placed + * in a certain order (row by row, left to right in every row). Otherwise, if + * the function fails to find all the corners or reorder them, it returns 0. For + * example, a regular chessboard has 8 x 8 squares and 7 x 7 internal corners, + * that is, points where the black squares touch each other. + * The detected coordinates are approximate, and to determine their positions + * more accurately, the function calls "cornerSubPix". + * You also may use the function "cornerSubPix" with different parameters if + * returned coordinates are not accurate enough. + * Sample usage of detecting and drawing chessboard corners:

+ * + *

// C++ code:

+ * + *

Size patternsize(8,6); //interior number of corners

+ * + *

Mat gray =....; //source image

+ * + *

vector corners; //this will be filled by the detected corners

+ * + *

//CALIB_CB_FAST_CHECK saves a lot of time on images

+ * + *

//that do not contain any chessboard corners

+ * + *

bool patternfound = findChessboardCorners(gray, patternsize, corners,

+ * + *

CALIB_CB_ADAPTIVE_THRESH + CALIB_CB_NORMALIZE_IMAGE

+ * + *

+ CALIB_CB_FAST_CHECK);

+ * + *

if(patternfound)

+ * + *

cornerSubPix(gray, corners, Size(11, 11), Size(-1, -1),

+ * + *

TermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 30, 0.1));

+ * + *

drawChessboardCorners(img, patternsize, Mat(corners), patternfound);

+ * + *

Note: The function requires white space (like a square-thick border, the + * wider the better) around the board to make the detection more robust in + * various environments. Otherwise, if there is no border and the background is + * dark, the outer black squares cannot be segmented properly and so the square + * grouping and ordering algorithm fails. + *

+ * + * @param image Source chessboard view. It must be an 8-bit grayscale or color + * image. + * @param patternSize Number of inner corners per a chessboard row and column + * (patternSize = cvSize(points_per_row,points_per_colum) = + * cvSize(columns,rows)). + * @param corners Output array of detected corners. + * @param flags Various operation flags that can be zero or a combination of the + * following values: + *
    + *
  • CV_CALIB_CB_ADAPTIVE_THRESH Use adaptive thresholding to convert the + * image to black and white, rather than a fixed threshold level (computed from + * the average image brightness). + *
  • CV_CALIB_CB_NORMALIZE_IMAGE Normalize the image gamma with + * "equalizeHist" before applying fixed or adaptive thresholding. + *
  • CV_CALIB_CB_FILTER_QUADS Use additional criteria (like contour area, + * perimeter, square-like shape) to filter out false quads extracted at the + * contour retrieval stage. + *
  • CALIB_CB_FAST_CHECK Run a fast check on the image that looks for + * chessboard corners, and shortcut the call if none is found. This can + * drastically speed up the call in the degenerate condition when no chessboard + * is observed. + *
+ * + * @see org.opencv.calib3d.Calib3d.findChessboardCorners + */ + public static boolean findChessboardCorners(Mat image, Size patternSize, MatOfPoint2f corners, int flags) + { + Mat corners_mat = corners; + boolean retVal = findChessboardCorners_0(image.nativeObj, patternSize.width, patternSize.height, corners_mat.nativeObj, flags); + + return retVal; + } + +/** + *

Finds the positions of internal corners of the chessboard.

+ * + *

The function attempts to determine whether the input image is a view of the + * chessboard pattern and locate the internal chessboard corners. The function + * returns a non-zero value if all of the corners are found and they are placed + * in a certain order (row by row, left to right in every row). Otherwise, if + * the function fails to find all the corners or reorder them, it returns 0. For + * example, a regular chessboard has 8 x 8 squares and 7 x 7 internal corners, + * that is, points where the black squares touch each other. + * The detected coordinates are approximate, and to determine their positions + * more accurately, the function calls "cornerSubPix". + * You also may use the function "cornerSubPix" with different parameters if + * returned coordinates are not accurate enough. + * Sample usage of detecting and drawing chessboard corners:

+ * + *

// C++ code:

+ * + *

Size patternsize(8,6); //interior number of corners

+ * + *

Mat gray =....; //source image

+ * + *

vector corners; //this will be filled by the detected corners

+ * + *

//CALIB_CB_FAST_CHECK saves a lot of time on images

+ * + *

//that do not contain any chessboard corners

+ * + *

bool patternfound = findChessboardCorners(gray, patternsize, corners,

+ * + *

CALIB_CB_ADAPTIVE_THRESH + CALIB_CB_NORMALIZE_IMAGE

+ * + *

+ CALIB_CB_FAST_CHECK);

+ * + *

if(patternfound)

+ * + *

cornerSubPix(gray, corners, Size(11, 11), Size(-1, -1),

+ * + *

TermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 30, 0.1));

+ * + *

drawChessboardCorners(img, patternsize, Mat(corners), patternfound);

+ * + *

Note: The function requires white space (like a square-thick border, the + * wider the better) around the board to make the detection more robust in + * various environments. Otherwise, if there is no border and the background is + * dark, the outer black squares cannot be segmented properly and so the square + * grouping and ordering algorithm fails. + *

+ * + * @param image Source chessboard view. It must be an 8-bit grayscale or color + * image. + * @param patternSize Number of inner corners per a chessboard row and column + * (patternSize = cvSize(points_per_row,points_per_colum) = + * cvSize(columns,rows)). + * @param corners Output array of detected corners. + * + * @see org.opencv.calib3d.Calib3d.findChessboardCorners + */ + public static boolean findChessboardCorners(Mat image, Size patternSize, MatOfPoint2f corners) + { + Mat corners_mat = corners; + boolean retVal = findChessboardCorners_1(image.nativeObj, patternSize.width, patternSize.height, corners_mat.nativeObj); + + return retVal; + } + + + // + // C++: bool findCirclesGrid(Mat image, Size patternSize, Mat& centers, int flags = CALIB_CB_SYMMETRIC_GRID, Ptr_FeatureDetector blobDetector = new SimpleBlobDetector()) + // + + // Unknown type 'Ptr_FeatureDetector' (I), skipping the function + + + // + // C++: bool findCirclesGridDefault(Mat image, Size patternSize, Mat& centers, int flags = CALIB_CB_SYMMETRIC_GRID) + // + + public static boolean findCirclesGridDefault(Mat image, Size patternSize, Mat centers, int flags) + { + + boolean retVal = findCirclesGridDefault_0(image.nativeObj, patternSize.width, patternSize.height, centers.nativeObj, flags); + + return retVal; + } + + public static boolean findCirclesGridDefault(Mat image, Size patternSize, Mat centers) + { + + boolean retVal = findCirclesGridDefault_1(image.nativeObj, patternSize.width, patternSize.height, centers.nativeObj); + + return retVal; + } + + + // + // C++: Mat findFundamentalMat(vector_Point2f points1, vector_Point2f points2, int method = FM_RANSAC, double param1 = 3., double param2 = 0.99, Mat& mask = Mat()) + // + +/** + *

Calculates a fundamental matrix from the corresponding points in two images.

+ * + *

The epipolar geometry is described by the following equation:

+ * + *

[p_2; 1]^T F [p_1; 1] = 0

+ * + *

where F is a fundamental matrix, p_1 and p_2 are + * corresponding points in the first and the second images, respectively.

+ * + *

The function calculates the fundamental matrix using one of four methods + * listed above and returns the found fundamental matrix. Normally just one + * matrix is found. But in case of the 7-point algorithm, the function may + * return up to 3 solutions (9 x 3 matrix that stores all 3 matrices + * sequentially).

+ * + *

The calculated fundamental matrix may be passed further to "computeCorrespondEpilines" + * that finds the epipolar lines corresponding to the specified points. It can + * also be passed to"stereoRectifyUncalibrated" to compute the rectification + * transformation. + *

+ * + *

// C++ code:

+ * + *

// Example. Estimation of fundamental matrix using the RANSAC algorithm

+ * + *

int point_count = 100;

+ * + *

vector points1(point_count);

+ * + *

vector points2(point_count);

+ * + *

// initialize the points here... * /

+ * + *

for(int i = 0; i < point_count; i++)

+ * + * + *

points1[i] =...;

+ * + *

points2[i] =...;

+ * + * + *

Mat fundamental_matrix =

+ * + *

findFundamentalMat(points1, points2, FM_RANSAC, 3, 0.99);

+ * + * @param points1 Array of N points from the first image. The point + * coordinates should be floating-point (single or double precision). + * @param points2 Array of the second image points of the same size and format + * as points1. + * @param method Method for computing a fundamental matrix. + *
    + *
  • CV_FM_7POINT for a 7-point algorithm. N = 7 + *
  • CV_FM_8POINT for an 8-point algorithm. N >= 8 + *
  • CV_FM_RANSAC for the RANSAC algorithm. N >= 8 + *
  • CV_FM_LMEDS for the LMedS algorithm. N >= 8 + *
+ * @param param1 Parameter used for RANSAC. It is the maximum distance from a + * point to an epipolar line in pixels, beyond which the point is considered an + * outlier and is not used for computing the final fundamental matrix. It can be + * set to something like 1-3, depending on the accuracy of the point + * localization, image resolution, and the image noise. + * @param param2 Parameter used for the RANSAC or LMedS methods only. It + * specifies a desirable level of confidence (probability) that the estimated + * matrix is correct. + * @param mask a mask + * + * @see org.opencv.calib3d.Calib3d.findFundamentalMat + */ + public static Mat findFundamentalMat(MatOfPoint2f points1, MatOfPoint2f points2, int method, double param1, double param2, Mat mask) + { + Mat points1_mat = points1; + Mat points2_mat = points2; + Mat retVal = new Mat(findFundamentalMat_0(points1_mat.nativeObj, points2_mat.nativeObj, method, param1, param2, mask.nativeObj)); + + return retVal; + } + +/** + *

Calculates a fundamental matrix from the corresponding points in two images.

+ * + *

The epipolar geometry is described by the following equation:

+ * + *

[p_2; 1]^T F [p_1; 1] = 0

+ * + *

where F is a fundamental matrix, p_1 and p_2 are + * corresponding points in the first and the second images, respectively.

+ * + *

The function calculates the fundamental matrix using one of four methods + * listed above and returns the found fundamental matrix. Normally just one + * matrix is found. But in case of the 7-point algorithm, the function may + * return up to 3 solutions (9 x 3 matrix that stores all 3 matrices + * sequentially).

+ * + *

The calculated fundamental matrix may be passed further to "computeCorrespondEpilines" + * that finds the epipolar lines corresponding to the specified points. It can + * also be passed to"stereoRectifyUncalibrated" to compute the rectification + * transformation. + *

+ * + *

// C++ code:

+ * + *

// Example. Estimation of fundamental matrix using the RANSAC algorithm

+ * + *

int point_count = 100;

+ * + *

vector points1(point_count);

+ * + *

vector points2(point_count);

+ * + *

// initialize the points here... * /

+ * + *

for(int i = 0; i < point_count; i++)

+ * + * + *

points1[i] =...;

+ * + *

points2[i] =...;

+ * + * + *

Mat fundamental_matrix =

+ * + *

findFundamentalMat(points1, points2, FM_RANSAC, 3, 0.99);

+ * + * @param points1 Array of N points from the first image. The point + * coordinates should be floating-point (single or double precision). + * @param points2 Array of the second image points of the same size and format + * as points1. + * @param method Method for computing a fundamental matrix. + *
    + *
  • CV_FM_7POINT for a 7-point algorithm. N = 7 + *
  • CV_FM_8POINT for an 8-point algorithm. N >= 8 + *
  • CV_FM_RANSAC for the RANSAC algorithm. N >= 8 + *
  • CV_FM_LMEDS for the LMedS algorithm. N >= 8 + *
+ * @param param1 Parameter used for RANSAC. It is the maximum distance from a + * point to an epipolar line in pixels, beyond which the point is considered an + * outlier and is not used for computing the final fundamental matrix. It can be + * set to something like 1-3, depending on the accuracy of the point + * localization, image resolution, and the image noise. + * @param param2 Parameter used for the RANSAC or LMedS methods only. It + * specifies a desirable level of confidence (probability) that the estimated + * matrix is correct. + * + * @see org.opencv.calib3d.Calib3d.findFundamentalMat + */ + public static Mat findFundamentalMat(MatOfPoint2f points1, MatOfPoint2f points2, int method, double param1, double param2) + { + Mat points1_mat = points1; + Mat points2_mat = points2; + Mat retVal = new Mat(findFundamentalMat_1(points1_mat.nativeObj, points2_mat.nativeObj, method, param1, param2)); + + return retVal; + } + +/** + *

Calculates a fundamental matrix from the corresponding points in two images.

+ * + *

The epipolar geometry is described by the following equation:

+ * + *

[p_2; 1]^T F [p_1; 1] = 0

+ * + *

where F is a fundamental matrix, p_1 and p_2 are + * corresponding points in the first and the second images, respectively.

+ * + *

The function calculates the fundamental matrix using one of four methods + * listed above and returns the found fundamental matrix. Normally just one + * matrix is found. But in case of the 7-point algorithm, the function may + * return up to 3 solutions (9 x 3 matrix that stores all 3 matrices + * sequentially).

+ * + *

The calculated fundamental matrix may be passed further to "computeCorrespondEpilines" + * that finds the epipolar lines corresponding to the specified points. It can + * also be passed to"stereoRectifyUncalibrated" to compute the rectification + * transformation. + *

+ * + *

// C++ code:

+ * + *

// Example. Estimation of fundamental matrix using the RANSAC algorithm

+ * + *

int point_count = 100;

+ * + *

vector points1(point_count);

+ * + *

vector points2(point_count);

+ * + *

// initialize the points here... * /

+ * + *

for(int i = 0; i < point_count; i++)

+ * + * + *

points1[i] =...;

+ * + *

points2[i] =...;

+ * + * + *

Mat fundamental_matrix =

+ * + *

findFundamentalMat(points1, points2, FM_RANSAC, 3, 0.99);

+ * + * @param points1 Array of N points from the first image. The point + * coordinates should be floating-point (single or double precision). + * @param points2 Array of the second image points of the same size and format + * as points1. + * + * @see org.opencv.calib3d.Calib3d.findFundamentalMat + */ + public static Mat findFundamentalMat(MatOfPoint2f points1, MatOfPoint2f points2) + { + Mat points1_mat = points1; + Mat points2_mat = points2; + Mat retVal = new Mat(findFundamentalMat_2(points1_mat.nativeObj, points2_mat.nativeObj)); + + return retVal; + } + + + // + // C++: Mat findHomography(vector_Point2f srcPoints, vector_Point2f dstPoints, int method = 0, double ransacReprojThreshold = 3, Mat& mask = Mat()) + // + +/** + *

Finds a perspective transformation between two planes.

+ * + *

The functions find and return the perspective transformation H + * between the source and the destination planes:

+ * + *

s_i [x'_i y'_i 1] ~ H [x_i y_i 1]

+ * + *

so that the back-projection error

+ * + *

sum _i(x'_i- (h_11 x_i + h_12 y_i + h_13)/(h_(31) x_i + h_32 y_i + + * h_33))^2+ (y'_i- (h_21 x_i + h_22 y_i + h_23)/(h_(31) x_i + h_32 y_i + + * h_33))^2

+ * + *

is minimized. If the parameter method is set to the default + * value 0, the function uses all the point pairs to compute an initial + * homography estimate with a simple least-squares scheme.

+ * + *

However, if not all of the point pairs (srcPoints_i,dstPoints_i) + * fit the rigid perspective transformation (that is, there are some outliers), + * this initial estimate will be poor. + * In this case, you can use one of the two robust methods. Both methods, + * RANSAC and LMeDS, try many different random subsets + * of the corresponding point pairs (of four pairs each), estimate the + * homography matrix using this subset and a simple least-square algorithm, and + * then compute the quality/goodness of the computed homography (which is the + * number of inliers for RANSAC or the median re-projection error for LMeDs). + * The best subset is then used to produce the initial estimate of the + * homography matrix and the mask of inliers/outliers.

+ * + *

Regardless of the method, robust or not, the computed homography matrix is + * refined further (using inliers only in case of a robust method) with the + * Levenberg-Marquardt method to reduce the re-projection error even more.

+ * + *

The method RANSAC can handle practically any ratio of outliers + * but it needs a threshold to distinguish inliers from outliers. + * The method LMeDS does not need any threshold but it works + * correctly only when there are more than 50% of inliers. Finally, if there are + * no outliers and the noise is rather small, use the default method + * (method=0).

+ * + *

The function is used to find initial intrinsic and extrinsic matrices. + * Homography matrix is determined up to a scale. Thus, it is normalized so that + * h_33=1.

+ * + * @param srcPoints Coordinates of the points in the original plane, a matrix of + * the type CV_32FC2 or vector. + * @param dstPoints Coordinates of the points in the target plane, a matrix of + * the type CV_32FC2 or a vector. + * @param method Method used to computed a homography matrix. The following + * methods are possible: + *
    + *
  • 0 - a regular method using all the points + *
  • CV_RANSAC - RANSAC-based robust method + *
  • CV_LMEDS - Least-Median robust method + *
+ * @param ransacReprojThreshold Maximum allowed reprojection error to treat a + * point pair as an inlier (used in the RANSAC method only). That is, if + * + *

| dstPoints _i - convertPointsHomogeneous(H * srcPoints _i)| > + * ransacReprojThreshold

+ * + *

then the point i is considered an outlier. If srcPoints + * and dstPoints are measured in pixels, it usually makes sense to + * set this parameter somewhere in the range of 1 to 10.

+ * @param mask Optional output mask set by a robust method (CV_RANSAC + * or CV_LMEDS). Note that the input mask values are ignored. + * + * @see org.opencv.calib3d.Calib3d.findHomography + * @see org.opencv.imgproc.Imgproc#warpPerspective + * @see org.opencv.core.Core#perspectiveTransform + * @see org.opencv.video.Video#estimateRigidTransform + * @see org.opencv.imgproc.Imgproc#getAffineTransform + * @see org.opencv.imgproc.Imgproc#getPerspectiveTransform + */ + public static Mat findHomography(MatOfPoint2f srcPoints, MatOfPoint2f dstPoints, int method, double ransacReprojThreshold, Mat mask) + { + Mat srcPoints_mat = srcPoints; + Mat dstPoints_mat = dstPoints; + Mat retVal = new Mat(findHomography_0(srcPoints_mat.nativeObj, dstPoints_mat.nativeObj, method, ransacReprojThreshold, mask.nativeObj)); + + return retVal; + } + +/** + *

Finds a perspective transformation between two planes.

+ * + *

The functions find and return the perspective transformation H + * between the source and the destination planes:

+ * + *

s_i [x'_i y'_i 1] ~ H [x_i y_i 1]

+ * + *

so that the back-projection error

+ * + *

sum _i(x'_i- (h_11 x_i + h_12 y_i + h_13)/(h_(31) x_i + h_32 y_i + + * h_33))^2+ (y'_i- (h_21 x_i + h_22 y_i + h_23)/(h_(31) x_i + h_32 y_i + + * h_33))^2

+ * + *

is minimized. If the parameter method is set to the default + * value 0, the function uses all the point pairs to compute an initial + * homography estimate with a simple least-squares scheme.

+ * + *

However, if not all of the point pairs (srcPoints_i,dstPoints_i) + * fit the rigid perspective transformation (that is, there are some outliers), + * this initial estimate will be poor. + * In this case, you can use one of the two robust methods. Both methods, + * RANSAC and LMeDS, try many different random subsets + * of the corresponding point pairs (of four pairs each), estimate the + * homography matrix using this subset and a simple least-square algorithm, and + * then compute the quality/goodness of the computed homography (which is the + * number of inliers for RANSAC or the median re-projection error for LMeDs). + * The best subset is then used to produce the initial estimate of the + * homography matrix and the mask of inliers/outliers.

+ * + *

Regardless of the method, robust or not, the computed homography matrix is + * refined further (using inliers only in case of a robust method) with the + * Levenberg-Marquardt method to reduce the re-projection error even more.

+ * + *

The method RANSAC can handle practically any ratio of outliers + * but it needs a threshold to distinguish inliers from outliers. + * The method LMeDS does not need any threshold but it works + * correctly only when there are more than 50% of inliers. Finally, if there are + * no outliers and the noise is rather small, use the default method + * (method=0).

+ * + *

The function is used to find initial intrinsic and extrinsic matrices. + * Homography matrix is determined up to a scale. Thus, it is normalized so that + * h_33=1.

+ * + * @param srcPoints Coordinates of the points in the original plane, a matrix of + * the type CV_32FC2 or vector. + * @param dstPoints Coordinates of the points in the target plane, a matrix of + * the type CV_32FC2 or a vector. + * @param method Method used to computed a homography matrix. The following + * methods are possible: + *
    + *
  • 0 - a regular method using all the points + *
  • CV_RANSAC - RANSAC-based robust method + *
  • CV_LMEDS - Least-Median robust method + *
+ * @param ransacReprojThreshold Maximum allowed reprojection error to treat a + * point pair as an inlier (used in the RANSAC method only). That is, if + * + *

| dstPoints _i - convertPointsHomogeneous(H * srcPoints _i)| > + * ransacReprojThreshold

+ * + *

then the point i is considered an outlier. If srcPoints + * and dstPoints are measured in pixels, it usually makes sense to + * set this parameter somewhere in the range of 1 to 10.

+ * + * @see org.opencv.calib3d.Calib3d.findHomography + * @see org.opencv.imgproc.Imgproc#warpPerspective + * @see org.opencv.core.Core#perspectiveTransform + * @see org.opencv.video.Video#estimateRigidTransform + * @see org.opencv.imgproc.Imgproc#getAffineTransform + * @see org.opencv.imgproc.Imgproc#getPerspectiveTransform + */ + public static Mat findHomography(MatOfPoint2f srcPoints, MatOfPoint2f dstPoints, int method, double ransacReprojThreshold) + { + Mat srcPoints_mat = srcPoints; + Mat dstPoints_mat = dstPoints; + Mat retVal = new Mat(findHomography_1(srcPoints_mat.nativeObj, dstPoints_mat.nativeObj, method, ransacReprojThreshold)); + + return retVal; + } + +/** + *

Finds a perspective transformation between two planes.

+ * + *

The functions find and return the perspective transformation H + * between the source and the destination planes:

+ * + *

s_i [x'_i y'_i 1] ~ H [x_i y_i 1]

+ * + *

so that the back-projection error

+ * + *

sum _i(x'_i- (h_11 x_i + h_12 y_i + h_13)/(h_(31) x_i + h_32 y_i + + * h_33))^2+ (y'_i- (h_21 x_i + h_22 y_i + h_23)/(h_(31) x_i + h_32 y_i + + * h_33))^2

+ * + *

is minimized. If the parameter method is set to the default + * value 0, the function uses all the point pairs to compute an initial + * homography estimate with a simple least-squares scheme.

+ * + *

However, if not all of the point pairs (srcPoints_i,dstPoints_i) + * fit the rigid perspective transformation (that is, there are some outliers), + * this initial estimate will be poor. + * In this case, you can use one of the two robust methods. Both methods, + * RANSAC and LMeDS, try many different random subsets + * of the corresponding point pairs (of four pairs each), estimate the + * homography matrix using this subset and a simple least-square algorithm, and + * then compute the quality/goodness of the computed homography (which is the + * number of inliers for RANSAC or the median re-projection error for LMeDs). + * The best subset is then used to produce the initial estimate of the + * homography matrix and the mask of inliers/outliers.

+ * + *

Regardless of the method, robust or not, the computed homography matrix is + * refined further (using inliers only in case of a robust method) with the + * Levenberg-Marquardt method to reduce the re-projection error even more.

+ * + *

The method RANSAC can handle practically any ratio of outliers + * but it needs a threshold to distinguish inliers from outliers. + * The method LMeDS does not need any threshold but it works + * correctly only when there are more than 50% of inliers. Finally, if there are + * no outliers and the noise is rather small, use the default method + * (method=0).

+ * + *

The function is used to find initial intrinsic and extrinsic matrices. + * Homography matrix is determined up to a scale. Thus, it is normalized so that + * h_33=1.

+ * + * @param srcPoints Coordinates of the points in the original plane, a matrix of + * the type CV_32FC2 or vector. + * @param dstPoints Coordinates of the points in the target plane, a matrix of + * the type CV_32FC2 or a vector. + * + * @see org.opencv.calib3d.Calib3d.findHomography + * @see org.opencv.imgproc.Imgproc#warpPerspective + * @see org.opencv.core.Core#perspectiveTransform + * @see org.opencv.video.Video#estimateRigidTransform + * @see org.opencv.imgproc.Imgproc#getAffineTransform + * @see org.opencv.imgproc.Imgproc#getPerspectiveTransform + */ + public static Mat findHomography(MatOfPoint2f srcPoints, MatOfPoint2f dstPoints) + { + Mat srcPoints_mat = srcPoints; + Mat dstPoints_mat = dstPoints; + Mat retVal = new Mat(findHomography_2(srcPoints_mat.nativeObj, dstPoints_mat.nativeObj)); + + return retVal; + } + + + // + // C++: Mat getOptimalNewCameraMatrix(Mat cameraMatrix, Mat distCoeffs, Size imageSize, double alpha, Size newImgSize = Size(), Rect* validPixROI = 0, bool centerPrincipalPoint = false) + // + +/** + *

Returns the new camera matrix based on the free scaling parameter.

+ * + *

The function computes and returns the optimal new camera matrix based on the + * free scaling parameter. By varying this parameter, you may retrieve only + * sensible pixels alpha=0, keep all the original image pixels if + * there is valuable information in the corners alpha=1, or get + * something in between. When alpha>0, the undistortion result is + * likely to have some black pixels corresponding to "virtual" pixels outside of + * the captured distorted image. The original camera matrix, distortion + * coefficients, the computed new camera matrix, and newImageSize + * should be passed to "initUndistortRectifyMap" to produce the maps for + * "remap".

+ * + * @param cameraMatrix Input camera matrix. + * @param distCoeffs Input vector of distortion coefficients (k_1, k_2, p_1, + * p_2[, k_3[, k_4, k_5, k_6]]) of 4, 5, or 8 elements. If the vector is + * NULL/empty, the zero distortion coefficients are assumed. + * @param imageSize Original image size. + * @param alpha Free scaling parameter between 0 (when all the pixels in the + * undistorted image are valid) and 1 (when all the source image pixels are + * retained in the undistorted image). See "stereoRectify" for details. + * @param newImgSize a newImgSize + * @param validPixROI Optional output rectangle that outlines all-good-pixels + * region in the undistorted image. See roi1, roi2 description in + * "stereoRectify". + * @param centerPrincipalPoint Optional flag that indicates whether in the new + * camera matrix the principal point should be at the image center or not. By + * default, the principal point is chosen to best fit a subset of the source + * image (determined by alpha) to the corrected image. + * + * @see org.opencv.calib3d.Calib3d.getOptimalNewCameraMatrix + */ + public static Mat getOptimalNewCameraMatrix(Mat cameraMatrix, Mat distCoeffs, Size imageSize, double alpha, Size newImgSize, Rect validPixROI, boolean centerPrincipalPoint) + { + double[] validPixROI_out = new double[4]; + Mat retVal = new Mat(getOptimalNewCameraMatrix_0(cameraMatrix.nativeObj, distCoeffs.nativeObj, imageSize.width, imageSize.height, alpha, newImgSize.width, newImgSize.height, validPixROI_out, centerPrincipalPoint)); + if(validPixROI!=null){ validPixROI.x = (int)validPixROI_out[0]; validPixROI.y = (int)validPixROI_out[1]; validPixROI.width = (int)validPixROI_out[2]; validPixROI.height = (int)validPixROI_out[3]; } + return retVal; + } + +/** + *

Returns the new camera matrix based on the free scaling parameter.

+ * + *

The function computes and returns the optimal new camera matrix based on the + * free scaling parameter. By varying this parameter, you may retrieve only + * sensible pixels alpha=0, keep all the original image pixels if + * there is valuable information in the corners alpha=1, or get + * something in between. When alpha>0, the undistortion result is + * likely to have some black pixels corresponding to "virtual" pixels outside of + * the captured distorted image. The original camera matrix, distortion + * coefficients, the computed new camera matrix, and newImageSize + * should be passed to "initUndistortRectifyMap" to produce the maps for + * "remap".

+ * + * @param cameraMatrix Input camera matrix. + * @param distCoeffs Input vector of distortion coefficients (k_1, k_2, p_1, + * p_2[, k_3[, k_4, k_5, k_6]]) of 4, 5, or 8 elements. If the vector is + * NULL/empty, the zero distortion coefficients are assumed. + * @param imageSize Original image size. + * @param alpha Free scaling parameter between 0 (when all the pixels in the + * undistorted image are valid) and 1 (when all the source image pixels are + * retained in the undistorted image). See "stereoRectify" for details. + * + * @see org.opencv.calib3d.Calib3d.getOptimalNewCameraMatrix + */ + public static Mat getOptimalNewCameraMatrix(Mat cameraMatrix, Mat distCoeffs, Size imageSize, double alpha) + { + + Mat retVal = new Mat(getOptimalNewCameraMatrix_1(cameraMatrix.nativeObj, distCoeffs.nativeObj, imageSize.width, imageSize.height, alpha)); + + return retVal; + } + + + // + // C++: Rect getValidDisparityROI(Rect roi1, Rect roi2, int minDisparity, int numberOfDisparities, int SADWindowSize) + // + + public static Rect getValidDisparityROI(Rect roi1, Rect roi2, int minDisparity, int numberOfDisparities, int SADWindowSize) + { + + Rect retVal = new Rect(getValidDisparityROI_0(roi1.x, roi1.y, roi1.width, roi1.height, roi2.x, roi2.y, roi2.width, roi2.height, minDisparity, numberOfDisparities, SADWindowSize)); + + return retVal; + } + + + // + // C++: Mat initCameraMatrix2D(vector_vector_Point3f objectPoints, vector_vector_Point2f imagePoints, Size imageSize, double aspectRatio = 1.) + // + +/** + *

Finds an initial camera matrix from 3D-2D point correspondences.

+ * + *

The function estimates and returns an initial camera matrix for the camera + * calibration process. + * Currently, the function only supports planar calibration patterns, which are + * patterns where each object point has z-coordinate =0.

+ * + * @param objectPoints Vector of vectors of the calibration pattern points in + * the calibration pattern coordinate space. In the old interface all the + * per-view vectors are concatenated. See "calibrateCamera" for details. + * @param imagePoints Vector of vectors of the projections of the calibration + * pattern points. In the old interface all the per-view vectors are + * concatenated. + * @param imageSize Image size in pixels used to initialize the principal point. + * @param aspectRatio If it is zero or negative, both f_x and + * f_y are estimated independently. Otherwise, f_x = f_y * + * aspectRatio. + * + * @see org.opencv.calib3d.Calib3d.initCameraMatrix2D + */ + public static Mat initCameraMatrix2D(List objectPoints, List imagePoints, Size imageSize, double aspectRatio) + { + List objectPoints_tmplm = new ArrayList((objectPoints != null) ? objectPoints.size() : 0); + Mat objectPoints_mat = Converters.vector_vector_Point3f_to_Mat(objectPoints, objectPoints_tmplm); + List imagePoints_tmplm = new ArrayList((imagePoints != null) ? imagePoints.size() : 0); + Mat imagePoints_mat = Converters.vector_vector_Point2f_to_Mat(imagePoints, imagePoints_tmplm); + Mat retVal = new Mat(initCameraMatrix2D_0(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, imageSize.width, imageSize.height, aspectRatio)); + + return retVal; + } + +/** + *

Finds an initial camera matrix from 3D-2D point correspondences.

+ * + *

The function estimates and returns an initial camera matrix for the camera + * calibration process. + * Currently, the function only supports planar calibration patterns, which are + * patterns where each object point has z-coordinate =0.

+ * + * @param objectPoints Vector of vectors of the calibration pattern points in + * the calibration pattern coordinate space. In the old interface all the + * per-view vectors are concatenated. See "calibrateCamera" for details. + * @param imagePoints Vector of vectors of the projections of the calibration + * pattern points. In the old interface all the per-view vectors are + * concatenated. + * @param imageSize Image size in pixels used to initialize the principal point. + * + * @see org.opencv.calib3d.Calib3d.initCameraMatrix2D + */ + public static Mat initCameraMatrix2D(List objectPoints, List imagePoints, Size imageSize) + { + List objectPoints_tmplm = new ArrayList((objectPoints != null) ? objectPoints.size() : 0); + Mat objectPoints_mat = Converters.vector_vector_Point3f_to_Mat(objectPoints, objectPoints_tmplm); + List imagePoints_tmplm = new ArrayList((imagePoints != null) ? imagePoints.size() : 0); + Mat imagePoints_mat = Converters.vector_vector_Point2f_to_Mat(imagePoints, imagePoints_tmplm); + Mat retVal = new Mat(initCameraMatrix2D_1(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, imageSize.width, imageSize.height)); + + return retVal; + } + + + // + // C++: void matMulDeriv(Mat A, Mat B, Mat& dABdA, Mat& dABdB) + // + +/** + *

Computes partial derivatives of the matrix product for each multiplied + * matrix.

+ * + *

The function computes partial derivatives of the elements of the matrix + * product A*B with regard to the elements of each of the two input + * matrices. The function is used to compute the Jacobian matrices in + * "stereoCalibrate" but can also be used in any other similar optimization + * function.

+ * + * @param A First multiplied matrix. + * @param B Second multiplied matrix. + * @param dABdA First output derivative matrix d(A*B)/dA of size + * A.rows*B.cols x (A.rows*A.cols). + * @param dABdB Second output derivative matrix d(A*B)/dB of size + * A.rows*B.cols x (B.rows*B.cols). + * + * @see org.opencv.calib3d.Calib3d.matMulDeriv + */ + public static void matMulDeriv(Mat A, Mat B, Mat dABdA, Mat dABdB) + { + + matMulDeriv_0(A.nativeObj, B.nativeObj, dABdA.nativeObj, dABdB.nativeObj); + + return; + } + + + // + // C++: void projectPoints(vector_Point3f objectPoints, Mat rvec, Mat tvec, Mat cameraMatrix, vector_double distCoeffs, vector_Point2f& imagePoints, Mat& jacobian = Mat(), double aspectRatio = 0) + // + +/** + *

Projects 3D points to an image plane.

+ * + *

The function computes projections of 3D points to the image plane given + * intrinsic and extrinsic camera parameters. Optionally, the function computes + * Jacobians - matrices of partial derivatives of image points coordinates (as + * functions of all the input parameters) with respect to the particular + * parameters, intrinsic and/or extrinsic. The Jacobians are used during the + * global optimization in "calibrateCamera", "solvePnP", and "stereoCalibrate". + * The function itself can also be used to compute a re-projection error given + * the current intrinsic and extrinsic parameters.

+ * + *

Note: By setting rvec=tvec=(0,0,0) or by setting + * cameraMatrix to a 3x3 identity matrix, or by passing zero + * distortion coefficients, you can get various useful partial cases of the + * function. This means that you can compute the distorted coordinates for a + * sparse set of points or apply a perspective transformation (and also compute + * the derivatives) in the ideal zero-distortion setup.

+ * + * @param objectPoints Array of object points, 3xN/Nx3 1-channel or 1xN/Nx1 + * 3-channel (or vector), where N is the number of points + * in the view. + * @param rvec Rotation vector. See "Rodrigues" for details. + * @param tvec Translation vector. + * @param cameraMatrix Camera matrix A = + *

|f_x 0 c_x| + * |0 f_y c_y| + * |0 0 _1| + * .

+ * @param distCoeffs Input vector of distortion coefficients (k_1, k_2, p_1, + * p_2[, k_3[, k_4, k_5, k_6]]) of 4, 5, or 8 elements. If the vector is + * NULL/empty, the zero distortion coefficients are assumed. + * @param imagePoints Output array of image points, 2xN/Nx2 1-channel or 1xN/Nx1 + * 2-channel, or vector. + * @param jacobian Optional output 2Nx(10+) jacobian matrix of + * derivatives of image points with respect to components of the rotation + * vector, translation vector, focal lengths, coordinates of the principal point + * and the distortion coefficients. In the old interface different components of + * the jacobian are returned via different output parameters. + * @param aspectRatio Optional "fixed aspect ratio" parameter. If the parameter + * is not 0, the function assumes that the aspect ratio (*fx/fy*) is fixed and + * correspondingly adjusts the jacobian matrix. + * + * @see org.opencv.calib3d.Calib3d.projectPoints + */ + public static void projectPoints(MatOfPoint3f objectPoints, Mat rvec, Mat tvec, Mat cameraMatrix, MatOfDouble distCoeffs, MatOfPoint2f imagePoints, Mat jacobian, double aspectRatio) + { + Mat objectPoints_mat = objectPoints; + Mat distCoeffs_mat = distCoeffs; + Mat imagePoints_mat = imagePoints; + projectPoints_0(objectPoints_mat.nativeObj, rvec.nativeObj, tvec.nativeObj, cameraMatrix.nativeObj, distCoeffs_mat.nativeObj, imagePoints_mat.nativeObj, jacobian.nativeObj, aspectRatio); + + return; + } + +/** + *

Projects 3D points to an image plane.

+ * + *

The function computes projections of 3D points to the image plane given + * intrinsic and extrinsic camera parameters. Optionally, the function computes + * Jacobians - matrices of partial derivatives of image points coordinates (as + * functions of all the input parameters) with respect to the particular + * parameters, intrinsic and/or extrinsic. The Jacobians are used during the + * global optimization in "calibrateCamera", "solvePnP", and "stereoCalibrate". + * The function itself can also be used to compute a re-projection error given + * the current intrinsic and extrinsic parameters.

+ * + *

Note: By setting rvec=tvec=(0,0,0) or by setting + * cameraMatrix to a 3x3 identity matrix, or by passing zero + * distortion coefficients, you can get various useful partial cases of the + * function. This means that you can compute the distorted coordinates for a + * sparse set of points or apply a perspective transformation (and also compute + * the derivatives) in the ideal zero-distortion setup.

+ * + * @param objectPoints Array of object points, 3xN/Nx3 1-channel or 1xN/Nx1 + * 3-channel (or vector), where N is the number of points + * in the view. + * @param rvec Rotation vector. See "Rodrigues" for details. + * @param tvec Translation vector. + * @param cameraMatrix Camera matrix A = + *

|f_x 0 c_x| + * |0 f_y c_y| + * |0 0 _1| + * .

+ * @param distCoeffs Input vector of distortion coefficients (k_1, k_2, p_1, + * p_2[, k_3[, k_4, k_5, k_6]]) of 4, 5, or 8 elements. If the vector is + * NULL/empty, the zero distortion coefficients are assumed. + * @param imagePoints Output array of image points, 2xN/Nx2 1-channel or 1xN/Nx1 + * 2-channel, or vector. + * + * @see org.opencv.calib3d.Calib3d.projectPoints + */ + public static void projectPoints(MatOfPoint3f objectPoints, Mat rvec, Mat tvec, Mat cameraMatrix, MatOfDouble distCoeffs, MatOfPoint2f imagePoints) + { + Mat objectPoints_mat = objectPoints; + Mat distCoeffs_mat = distCoeffs; + Mat imagePoints_mat = imagePoints; + projectPoints_1(objectPoints_mat.nativeObj, rvec.nativeObj, tvec.nativeObj, cameraMatrix.nativeObj, distCoeffs_mat.nativeObj, imagePoints_mat.nativeObj); + + return; + } + + + // + // C++: float rectify3Collinear(Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Mat cameraMatrix3, Mat distCoeffs3, vector_Mat imgpt1, vector_Mat imgpt3, Size imageSize, Mat R12, Mat T12, Mat R13, Mat T13, Mat& R1, Mat& R2, Mat& R3, Mat& P1, Mat& P2, Mat& P3, Mat& Q, double alpha, Size newImgSize, Rect* roi1, Rect* roi2, int flags) + // + + public static float rectify3Collinear(Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Mat cameraMatrix3, Mat distCoeffs3, List imgpt1, List imgpt3, Size imageSize, Mat R12, Mat T12, Mat R13, Mat T13, Mat R1, Mat R2, Mat R3, Mat P1, Mat P2, Mat P3, Mat Q, double alpha, Size newImgSize, Rect roi1, Rect roi2, int flags) + { + Mat imgpt1_mat = Converters.vector_Mat_to_Mat(imgpt1); + Mat imgpt3_mat = Converters.vector_Mat_to_Mat(imgpt3); + double[] roi1_out = new double[4]; + double[] roi2_out = new double[4]; + float retVal = rectify3Collinear_0(cameraMatrix1.nativeObj, distCoeffs1.nativeObj, cameraMatrix2.nativeObj, distCoeffs2.nativeObj, cameraMatrix3.nativeObj, distCoeffs3.nativeObj, imgpt1_mat.nativeObj, imgpt3_mat.nativeObj, imageSize.width, imageSize.height, R12.nativeObj, T12.nativeObj, R13.nativeObj, T13.nativeObj, R1.nativeObj, R2.nativeObj, R3.nativeObj, P1.nativeObj, P2.nativeObj, P3.nativeObj, Q.nativeObj, alpha, newImgSize.width, newImgSize.height, roi1_out, roi2_out, flags); + if(roi1!=null){ roi1.x = (int)roi1_out[0]; roi1.y = (int)roi1_out[1]; roi1.width = (int)roi1_out[2]; roi1.height = (int)roi1_out[3]; } + if(roi2!=null){ roi2.x = (int)roi2_out[0]; roi2.y = (int)roi2_out[1]; roi2.width = (int)roi2_out[2]; roi2.height = (int)roi2_out[3]; } + return retVal; + } + + + // + // C++: void reprojectImageTo3D(Mat disparity, Mat& _3dImage, Mat Q, bool handleMissingValues = false, int ddepth = -1) + // + +/** + *

Reprojects a disparity image to 3D space.

+ * + *

The function transforms a single-channel disparity map to a 3-channel image + * representing a 3D surface. That is, for each pixel (x,y) andthe + * corresponding disparity d=disparity(x,y), it computes:

+ * + *

[X Y Z W]^T = Q *[x y disparity(x,y) 1]^T + * _3dImage(x,y) = (X/W, Y/W, Z/W)

+ * + *

The matrix Q can be an arbitrary 4 x 4 matrix (for + * example, the one computed by "stereoRectify"). To reproject a sparse set of + * points {(x,y,d),...} to 3D space, use "perspectiveTransform".

+ * + * @param disparity Input single-channel 8-bit unsigned, 16-bit signed, 32-bit + * signed or 32-bit floating-point disparity image. + * @param _3dImage Output 3-channel floating-point image of the same size as + * disparity. Each element of _3dImage(x,y) contains + * 3D coordinates of the point (x,y) computed from the disparity + * map. + * @param Q 4 x 4 perspective transformation matrix that can be + * obtained with "stereoRectify". + * @param handleMissingValues Indicates, whether the function should handle + * missing values (i.e. points where the disparity was not computed). If + * handleMissingValues=true, then pixels with the minimal disparity + * that corresponds to the outliers (see :ocv:funcx:"StereoBM.operator()") are + * transformed to 3D points with a very large Z value (currently set to 10000). + * @param ddepth The optional output array depth. If it is -1, the + * output image will have CV_32F depth. ddepth can + * also be set to CV_16S, CV_32S or CV_32F. + * + * @see org.opencv.calib3d.Calib3d.reprojectImageTo3D + */ + public static void reprojectImageTo3D(Mat disparity, Mat _3dImage, Mat Q, boolean handleMissingValues, int ddepth) + { + + reprojectImageTo3D_0(disparity.nativeObj, _3dImage.nativeObj, Q.nativeObj, handleMissingValues, ddepth); + + return; + } + +/** + *

Reprojects a disparity image to 3D space.

+ * + *

The function transforms a single-channel disparity map to a 3-channel image + * representing a 3D surface. That is, for each pixel (x,y) andthe + * corresponding disparity d=disparity(x,y), it computes:

+ * + *

[X Y Z W]^T = Q *[x y disparity(x,y) 1]^T + * _3dImage(x,y) = (X/W, Y/W, Z/W)

+ * + *

The matrix Q can be an arbitrary 4 x 4 matrix (for + * example, the one computed by "stereoRectify"). To reproject a sparse set of + * points {(x,y,d),...} to 3D space, use "perspectiveTransform".

+ * + * @param disparity Input single-channel 8-bit unsigned, 16-bit signed, 32-bit + * signed or 32-bit floating-point disparity image. + * @param _3dImage Output 3-channel floating-point image of the same size as + * disparity. Each element of _3dImage(x,y) contains + * 3D coordinates of the point (x,y) computed from the disparity + * map. + * @param Q 4 x 4 perspective transformation matrix that can be + * obtained with "stereoRectify". + * @param handleMissingValues Indicates, whether the function should handle + * missing values (i.e. points where the disparity was not computed). If + * handleMissingValues=true, then pixels with the minimal disparity + * that corresponds to the outliers (see :ocv:funcx:"StereoBM.operator()") are + * transformed to 3D points with a very large Z value (currently set to 10000). + * + * @see org.opencv.calib3d.Calib3d.reprojectImageTo3D + */ + public static void reprojectImageTo3D(Mat disparity, Mat _3dImage, Mat Q, boolean handleMissingValues) + { + + reprojectImageTo3D_1(disparity.nativeObj, _3dImage.nativeObj, Q.nativeObj, handleMissingValues); + + return; + } + +/** + *

Reprojects a disparity image to 3D space.

+ * + *

The function transforms a single-channel disparity map to a 3-channel image + * representing a 3D surface. That is, for each pixel (x,y) andthe + * corresponding disparity d=disparity(x,y), it computes:

+ * + *

[X Y Z W]^T = Q *[x y disparity(x,y) 1]^T + * _3dImage(x,y) = (X/W, Y/W, Z/W)

+ * + *

The matrix Q can be an arbitrary 4 x 4 matrix (for + * example, the one computed by "stereoRectify"). To reproject a sparse set of + * points {(x,y,d),...} to 3D space, use "perspectiveTransform".

+ * + * @param disparity Input single-channel 8-bit unsigned, 16-bit signed, 32-bit + * signed or 32-bit floating-point disparity image. + * @param _3dImage Output 3-channel floating-point image of the same size as + * disparity. Each element of _3dImage(x,y) contains + * 3D coordinates of the point (x,y) computed from the disparity + * map. + * @param Q 4 x 4 perspective transformation matrix that can be + * obtained with "stereoRectify". + * + * @see org.opencv.calib3d.Calib3d.reprojectImageTo3D + */ + public static void reprojectImageTo3D(Mat disparity, Mat _3dImage, Mat Q) + { + + reprojectImageTo3D_2(disparity.nativeObj, _3dImage.nativeObj, Q.nativeObj); + + return; + } + + + // + // C++: bool solvePnP(vector_Point3f objectPoints, vector_Point2f imagePoints, Mat cameraMatrix, vector_double distCoeffs, Mat& rvec, Mat& tvec, bool useExtrinsicGuess = false, int flags = ITERATIVE) + // + +/** + *

Finds an object pose from 3D-2D point correspondences.

+ * + *

The function estimates the object pose given a set of object points, their + * corresponding image projections, as well as the camera matrix and the + * distortion coefficients.

+ * + * @param objectPoints Array of object points in the object coordinate space, + * 3xN/Nx3 1-channel or 1xN/Nx1 3-channel, where N is the number of points. + * vector can be also passed here. + * @param imagePoints Array of corresponding image points, 2xN/Nx2 1-channel or + * 1xN/Nx1 2-channel, where N is the number of points. vector + * can be also passed here. + * @param cameraMatrix Input camera matrix A = + *

|fx 0 cx| + * |0 fy cy| + * |0 0 1| + * .

+ * @param distCoeffs Input vector of distortion coefficients (k_1, k_2, p_1, + * p_2[, k_3[, k_4, k_5, k_6]]) of 4, 5, or 8 elements. If the vector is + * NULL/empty, the zero distortion coefficients are assumed. + * @param rvec Output rotation vector (see "Rodrigues") that, together with + * tvec, brings points from the model coordinate system to the + * camera coordinate system. + * @param tvec Output translation vector. + * @param useExtrinsicGuess If true (1), the function uses the provided + * rvec and tvec values as initial approximations of + * the rotation and translation vectors, respectively, and further optimizes + * them. + * @param flags Method for solving a PnP problem: + *
    + *
  • CV_ITERATIVE Iterative method is based on Levenberg-Marquardt + * optimization. In this case the function finds such a pose that minimizes + * reprojection error, that is the sum of squared distances between the observed + * projections imagePoints and the projected (using + * "projectPoints") objectPoints. + *
  • CV_P3P Method is based on the paper of X.S. Gao, X.-R. Hou, J. Tang, + * H.-F. Chang "Complete Solution Classification for the Perspective-Three-Point + * Problem". In this case the function requires exactly four object and image + * points. + *
  • CV_EPNP Method has been introduced by F.Moreno-Noguer, V.Lepetit and + * P.Fua in the paper "EPnP: Efficient Perspective-n-Point Camera Pose + * Estimation". + *
+ * + * @see org.opencv.calib3d.Calib3d.solvePnP + */ + public static boolean solvePnP(MatOfPoint3f objectPoints, MatOfPoint2f imagePoints, Mat cameraMatrix, MatOfDouble distCoeffs, Mat rvec, Mat tvec, boolean useExtrinsicGuess, int flags) + { + Mat objectPoints_mat = objectPoints; + Mat imagePoints_mat = imagePoints; + Mat distCoeffs_mat = distCoeffs; + boolean retVal = solvePnP_0(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, cameraMatrix.nativeObj, distCoeffs_mat.nativeObj, rvec.nativeObj, tvec.nativeObj, useExtrinsicGuess, flags); + + return retVal; + } + +/** + *

Finds an object pose from 3D-2D point correspondences.

+ * + *

The function estimates the object pose given a set of object points, their + * corresponding image projections, as well as the camera matrix and the + * distortion coefficients.

+ * + * @param objectPoints Array of object points in the object coordinate space, + * 3xN/Nx3 1-channel or 1xN/Nx1 3-channel, where N is the number of points. + * vector can be also passed here. + * @param imagePoints Array of corresponding image points, 2xN/Nx2 1-channel or + * 1xN/Nx1 2-channel, where N is the number of points. vector + * can be also passed here. + * @param cameraMatrix Input camera matrix A = + *

|fx 0 cx| + * |0 fy cy| + * |0 0 1| + * .

+ * @param distCoeffs Input vector of distortion coefficients (k_1, k_2, p_1, + * p_2[, k_3[, k_4, k_5, k_6]]) of 4, 5, or 8 elements. If the vector is + * NULL/empty, the zero distortion coefficients are assumed. + * @param rvec Output rotation vector (see "Rodrigues") that, together with + * tvec, brings points from the model coordinate system to the + * camera coordinate system. + * @param tvec Output translation vector. + * + * @see org.opencv.calib3d.Calib3d.solvePnP + */ + public static boolean solvePnP(MatOfPoint3f objectPoints, MatOfPoint2f imagePoints, Mat cameraMatrix, MatOfDouble distCoeffs, Mat rvec, Mat tvec) + { + Mat objectPoints_mat = objectPoints; + Mat imagePoints_mat = imagePoints; + Mat distCoeffs_mat = distCoeffs; + boolean retVal = solvePnP_1(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, cameraMatrix.nativeObj, distCoeffs_mat.nativeObj, rvec.nativeObj, tvec.nativeObj); + + return retVal; + } + + + // + // C++: void solvePnPRansac(vector_Point3f objectPoints, vector_Point2f imagePoints, Mat cameraMatrix, vector_double distCoeffs, Mat& rvec, Mat& tvec, bool useExtrinsicGuess = false, int iterationsCount = 100, float reprojectionError = 8.0, int minInliersCount = 100, Mat& inliers = Mat(), int flags = ITERATIVE) + // + +/** + *

Finds an object pose from 3D-2D point correspondences using the RANSAC + * scheme.

+ * + *

The function estimates an object pose given a set of object points, their + * corresponding image projections, as well as the camera matrix and the + * distortion coefficients. This function finds such a pose that minimizes + * reprojection error, that is, the sum of squared distances between the + * observed projections imagePoints and the projected (using + * "projectPoints") objectPoints. The use of RANSAC makes the + * function resistant to outliers. The function is parallelized with the TBB + * library.

+ * + * @param objectPoints Array of object points in the object coordinate space, + * 3xN/Nx3 1-channel or 1xN/Nx1 3-channel, where N is the number of points. + * vector can be also passed here. + * @param imagePoints Array of corresponding image points, 2xN/Nx2 1-channel or + * 1xN/Nx1 2-channel, where N is the number of points. vector + * can be also passed here. + * @param cameraMatrix Input camera matrix A = + *

|fx 0 cx| + * |0 fy cy| + * |0 0 1| + * .

+ * @param distCoeffs Input vector of distortion coefficients (k_1, k_2, p_1, + * p_2[, k_3[, k_4, k_5, k_6]]) of 4, 5, or 8 elements. If the vector is + * NULL/empty, the zero distortion coefficients are assumed. + * @param rvec Output rotation vector (see "Rodrigues") that, together with + * tvec, brings points from the model coordinate system to the + * camera coordinate system. + * @param tvec Output translation vector. + * @param useExtrinsicGuess If true (1), the function uses the provided + * rvec and tvec values as initial approximations of + * the rotation and translation vectors, respectively, and further optimizes + * them. + * @param iterationsCount Number of iterations. + * @param reprojectionError Inlier threshold value used by the RANSAC procedure. + * The parameter value is the maximum allowed distance between the observed and + * computed point projections to consider it an inlier. + * @param minInliersCount Number of inliers. If the algorithm at some stage + * finds more inliers than minInliersCount, it finishes. + * @param inliers Output vector that contains indices of inliers in + * objectPoints and imagePoints. + * @param flags Method for solving a PnP problem (see "solvePnP"). + * + * @see org.opencv.calib3d.Calib3d.solvePnPRansac + */ + public static void solvePnPRansac(MatOfPoint3f objectPoints, MatOfPoint2f imagePoints, Mat cameraMatrix, MatOfDouble distCoeffs, Mat rvec, Mat tvec, boolean useExtrinsicGuess, int iterationsCount, float reprojectionError, int minInliersCount, Mat inliers, int flags) + { + Mat objectPoints_mat = objectPoints; + Mat imagePoints_mat = imagePoints; + Mat distCoeffs_mat = distCoeffs; + solvePnPRansac_0(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, cameraMatrix.nativeObj, distCoeffs_mat.nativeObj, rvec.nativeObj, tvec.nativeObj, useExtrinsicGuess, iterationsCount, reprojectionError, minInliersCount, inliers.nativeObj, flags); + + return; + } + +/** + *

Finds an object pose from 3D-2D point correspondences using the RANSAC + * scheme.

+ * + *

The function estimates an object pose given a set of object points, their + * corresponding image projections, as well as the camera matrix and the + * distortion coefficients. This function finds such a pose that minimizes + * reprojection error, that is, the sum of squared distances between the + * observed projections imagePoints and the projected (using + * "projectPoints") objectPoints. The use of RANSAC makes the + * function resistant to outliers. The function is parallelized with the TBB + * library.

+ * + * @param objectPoints Array of object points in the object coordinate space, + * 3xN/Nx3 1-channel or 1xN/Nx1 3-channel, where N is the number of points. + * vector can be also passed here. + * @param imagePoints Array of corresponding image points, 2xN/Nx2 1-channel or + * 1xN/Nx1 2-channel, where N is the number of points. vector + * can be also passed here. + * @param cameraMatrix Input camera matrix A = + *

|fx 0 cx| + * |0 fy cy| + * |0 0 1| + * .

+ * @param distCoeffs Input vector of distortion coefficients (k_1, k_2, p_1, + * p_2[, k_3[, k_4, k_5, k_6]]) of 4, 5, or 8 elements. If the vector is + * NULL/empty, the zero distortion coefficients are assumed. + * @param rvec Output rotation vector (see "Rodrigues") that, together with + * tvec, brings points from the model coordinate system to the + * camera coordinate system. + * @param tvec Output translation vector. + * + * @see org.opencv.calib3d.Calib3d.solvePnPRansac + */ + public static void solvePnPRansac(MatOfPoint3f objectPoints, MatOfPoint2f imagePoints, Mat cameraMatrix, MatOfDouble distCoeffs, Mat rvec, Mat tvec) + { + Mat objectPoints_mat = objectPoints; + Mat imagePoints_mat = imagePoints; + Mat distCoeffs_mat = distCoeffs; + solvePnPRansac_1(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, cameraMatrix.nativeObj, distCoeffs_mat.nativeObj, rvec.nativeObj, tvec.nativeObj); + + return; + } + + + // + // C++: double stereoCalibrate(vector_Mat objectPoints, vector_Mat imagePoints1, vector_Mat imagePoints2, Mat& cameraMatrix1, Mat& distCoeffs1, Mat& cameraMatrix2, Mat& distCoeffs2, Size imageSize, Mat& R, Mat& T, Mat& E, Mat& F, TermCriteria criteria = TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 30, 1e-6), int flags = CALIB_FIX_INTRINSIC) + // + +/** + *

Calibrates the stereo camera.

+ * + *

The function estimates transformation between two cameras making a stereo + * pair. If you have a stereo camera where the relative position and orientation + * of two cameras is fixed, and if you computed poses of an object relative to + * the first camera and to the second camera, (R1, T1) and (R2, T2), + * respectively (this can be done with "solvePnP"), then those poses definitely + * relate to each other. This means that, given (R_1,T_1), it + * should be possible to compute (R_2,T_2). You only need to + * know the position and orientation of the second camera relative to the first + * camera. This is what the described function does. It computes + * (R,T) so that:

+ * + *

R_2=R*R_1<BR>T_2=R*T_1 + T,

+ * + *

Optionally, it computes the essential matrix E:

+ * + *

E= + * |0 -T_2 T_1| + * |T_2 0 -T_0| + * |-T_1 T_0 0|

+ *
    + *
  • R + *
+ * + *

where T_i are components of the translation vector T : + * T=[T_0, T_1, T_2]^T. And the function can also compute the + * fundamental matrix F:

+ * + *

F = cameraMatrix2^(-T) E cameraMatrix1^(-1)

+ * + *

Besides the stereo-related information, the function can also perform a full + * calibration of each of two cameras. However, due to the high dimensionality + * of the parameter space and noise in the input data, the function can diverge + * from the correct solution. If the intrinsic parameters can be estimated with + * high accuracy for each of the cameras individually (for example, using + * "calibrateCamera"), you are recommended to do so and then pass + * CV_CALIB_FIX_INTRINSIC flag to the function along with the + * computed intrinsic parameters. Otherwise, if all the parameters are estimated + * at once, it makes sense to restrict some parameters, for example, pass + * CV_CALIB_SAME_FOCAL_LENGTH and CV_CALIB_ZERO_TANGENT_DIST + * flags, which is usually a reasonable assumption.

+ * + *

Similarly to "calibrateCamera", the function minimizes the total + * re-projection error for all the points in all the available views from both + * cameras. The function returns the final value of the re-projection error.

+ * + * @param objectPoints Vector of vectors of the calibration pattern points. + * @param imagePoints1 Vector of vectors of the projections of the calibration + * pattern points, observed by the first camera. + * @param imagePoints2 Vector of vectors of the projections of the calibration + * pattern points, observed by the second camera. + * @param cameraMatrix1 Input/output first camera matrix: + *

|f_x^j 0 c_x^j| + * |0 f_y^j c_y^j| + * |0 0 1| + * , j = 0, 1. If any of CV_CALIB_USE_INTRINSIC_GUESS, + * CV_CALIB_FIX_ASPECT_RATIO, CV_CALIB_FIX_INTRINSIC, + * or CV_CALIB_FIX_FOCAL_LENGTH are specified, some or all of the + * matrix components must be initialized. See the flags description for details.

+ * @param distCoeffs1 Input/output vector of distortion coefficients (k_1, + * k_2, p_1, p_2[, k_3[, k_4, k_5, k_6]]) of 4, 5, or 8 elements. The + * output vector length depends on the flags. + * @param cameraMatrix2 Input/output second camera matrix. The parameter is + * similar to cameraMatrix1. + * @param distCoeffs2 Input/output lens distortion coefficients for the second + * camera. The parameter is similar to distCoeffs1. + * @param imageSize Size of the image used only to initialize intrinsic camera + * matrix. + * @param R Output rotation matrix between the 1st and the 2nd camera coordinate + * systems. + * @param T Output translation vector between the coordinate systems of the + * cameras. + * @param E Output essential matrix. + * @param F Output fundamental matrix. + * @param criteria a criteria + * @param flags Different flags that may be zero or a combination of the + * following values: + *
    + *
  • CV_CALIB_FIX_INTRINSIC Fix cameraMatrix? and + * distCoeffs? so that only R, T, E, and + * F matrices are estimated. + *
  • CV_CALIB_USE_INTRINSIC_GUESS Optimize some or all of the intrinsic + * parameters according to the specified flags. Initial values are provided by + * the user. + *
  • CV_CALIB_FIX_PRINCIPAL_POINT Fix the principal points during the + * optimization. + *
  • CV_CALIB_FIX_FOCAL_LENGTH Fix f^j_x and f^j_y. + *
  • CV_CALIB_FIX_ASPECT_RATIO Optimize f^j_y. Fix the ratio + * f^j_x/f^j_y. + *
  • CV_CALIB_SAME_FOCAL_LENGTH Enforce f^0_x=f^1_x and + * f^0_y=f^1_y. + *
  • CV_CALIB_ZERO_TANGENT_DIST Set tangential distortion coefficients for + * each camera to zeros and fix there. + *
  • CV_CALIB_FIX_K1,...,CV_CALIB_FIX_K6 Do not change the corresponding + * radial distortion coefficient during the optimization. If CV_CALIB_USE_INTRINSIC_GUESS + * is set, the coefficient from the supplied distCoeffs matrix is + * used. Otherwise, it is set to 0. + *
  • CV_CALIB_RATIONAL_MODEL Enable coefficients k4, k5, and k6. To provide + * the backward compatibility, this extra flag should be explicitly specified to + * make the calibration function use the rational model and return 8 + * coefficients. If the flag is not set, the function computes and returns only + * 5 distortion coefficients. + *
+ * + * @see org.opencv.calib3d.Calib3d.stereoCalibrate + */ + public static double stereoCalibrate(List objectPoints, List imagePoints1, List imagePoints2, Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Size imageSize, Mat R, Mat T, Mat E, Mat F, TermCriteria criteria, int flags) + { + Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints); + Mat imagePoints1_mat = Converters.vector_Mat_to_Mat(imagePoints1); + Mat imagePoints2_mat = Converters.vector_Mat_to_Mat(imagePoints2); + double retVal = stereoCalibrate_0(objectPoints_mat.nativeObj, imagePoints1_mat.nativeObj, imagePoints2_mat.nativeObj, cameraMatrix1.nativeObj, distCoeffs1.nativeObj, cameraMatrix2.nativeObj, distCoeffs2.nativeObj, imageSize.width, imageSize.height, R.nativeObj, T.nativeObj, E.nativeObj, F.nativeObj, criteria.type, criteria.maxCount, criteria.epsilon, flags); + + return retVal; + } + +/** + *

Calibrates the stereo camera.

+ * + *

The function estimates transformation between two cameras making a stereo + * pair. If you have a stereo camera where the relative position and orientation + * of two cameras is fixed, and if you computed poses of an object relative to + * the first camera and to the second camera, (R1, T1) and (R2, T2), + * respectively (this can be done with "solvePnP"), then those poses definitely + * relate to each other. This means that, given (R_1,T_1), it + * should be possible to compute (R_2,T_2). You only need to + * know the position and orientation of the second camera relative to the first + * camera. This is what the described function does. It computes + * (R,T) so that:

+ * + *

R_2=R*R_1<BR>T_2=R*T_1 + T,

+ * + *

Optionally, it computes the essential matrix E:

+ * + *

E= + * |0 -T_2 T_1| + * |T_2 0 -T_0| + * |-T_1 T_0 0|

+ *
    + *
  • R + *
+ * + *

where T_i are components of the translation vector T : + * T=[T_0, T_1, T_2]^T. And the function can also compute the + * fundamental matrix F:

+ * + *

F = cameraMatrix2^(-T) E cameraMatrix1^(-1)

+ * + *

Besides the stereo-related information, the function can also perform a full + * calibration of each of two cameras. However, due to the high dimensionality + * of the parameter space and noise in the input data, the function can diverge + * from the correct solution. If the intrinsic parameters can be estimated with + * high accuracy for each of the cameras individually (for example, using + * "calibrateCamera"), you are recommended to do so and then pass + * CV_CALIB_FIX_INTRINSIC flag to the function along with the + * computed intrinsic parameters. Otherwise, if all the parameters are estimated + * at once, it makes sense to restrict some parameters, for example, pass + * CV_CALIB_SAME_FOCAL_LENGTH and CV_CALIB_ZERO_TANGENT_DIST + * flags, which is usually a reasonable assumption.

+ * + *

Similarly to "calibrateCamera", the function minimizes the total + * re-projection error for all the points in all the available views from both + * cameras. The function returns the final value of the re-projection error.

+ * + * @param objectPoints Vector of vectors of the calibration pattern points. + * @param imagePoints1 Vector of vectors of the projections of the calibration + * pattern points, observed by the first camera. + * @param imagePoints2 Vector of vectors of the projections of the calibration + * pattern points, observed by the second camera. + * @param cameraMatrix1 Input/output first camera matrix: + *

|f_x^j 0 c_x^j| + * |0 f_y^j c_y^j| + * |0 0 1| + * , j = 0, 1. If any of CV_CALIB_USE_INTRINSIC_GUESS, + * CV_CALIB_FIX_ASPECT_RATIO, CV_CALIB_FIX_INTRINSIC, + * or CV_CALIB_FIX_FOCAL_LENGTH are specified, some or all of the + * matrix components must be initialized. See the flags description for details.

+ * @param distCoeffs1 Input/output vector of distortion coefficients (k_1, + * k_2, p_1, p_2[, k_3[, k_4, k_5, k_6]]) of 4, 5, or 8 elements. The + * output vector length depends on the flags. + * @param cameraMatrix2 Input/output second camera matrix. The parameter is + * similar to cameraMatrix1. + * @param distCoeffs2 Input/output lens distortion coefficients for the second + * camera. The parameter is similar to distCoeffs1. + * @param imageSize Size of the image used only to initialize intrinsic camera + * matrix. + * @param R Output rotation matrix between the 1st and the 2nd camera coordinate + * systems. + * @param T Output translation vector between the coordinate systems of the + * cameras. + * @param E Output essential matrix. + * @param F Output fundamental matrix. + * + * @see org.opencv.calib3d.Calib3d.stereoCalibrate + */ + public static double stereoCalibrate(List objectPoints, List imagePoints1, List imagePoints2, Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Size imageSize, Mat R, Mat T, Mat E, Mat F) + { + Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints); + Mat imagePoints1_mat = Converters.vector_Mat_to_Mat(imagePoints1); + Mat imagePoints2_mat = Converters.vector_Mat_to_Mat(imagePoints2); + double retVal = stereoCalibrate_1(objectPoints_mat.nativeObj, imagePoints1_mat.nativeObj, imagePoints2_mat.nativeObj, cameraMatrix1.nativeObj, distCoeffs1.nativeObj, cameraMatrix2.nativeObj, distCoeffs2.nativeObj, imageSize.width, imageSize.height, R.nativeObj, T.nativeObj, E.nativeObj, F.nativeObj); + + return retVal; + } + + + // + // C++: void stereoRectify(Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Size imageSize, Mat R, Mat T, Mat& R1, Mat& R2, Mat& P1, Mat& P2, Mat& Q, int flags = CALIB_ZERO_DISPARITY, double alpha = -1, Size newImageSize = Size(), Rect* validPixROI1 = 0, Rect* validPixROI2 = 0) + // + +/** + *

Computes rectification transforms for each head of a calibrated stereo + * camera.

+ * + *

The function computes the rotation matrices for each camera that (virtually) + * make both camera image planes the same plane. Consequently, this makes all + * the epipolar lines parallel and thus simplifies the dense stereo + * correspondence problem. The function takes the matrices computed by + * "stereoCalibrate" as input. As output, it provides two rotation matrices and + * also two projection matrices in the new coordinates. The function + * distinguishes the following two cases:

+ *
    + *
  • Horizontal stereo: the first and the second camera views are shifted + * relative to each other mainly along the x axis (with possible small vertical + * shift). In the rectified images, the corresponding epipolar lines in the left + * and right cameras are horizontal and have the same y-coordinate. P1 and P2 + * look like: + *
+ * + *

P1 = f 0 cx_1 0 + * 0 f cy 0 + * 0 0 1 0

+ * + * + * + *

P2 = f 0 cx_2 T_x*f + * 0 f cy 0 + * 0 0 1 0,

+ * + *

where T_x is a horizontal shift between the cameras and + * cx_1=cx_2 if CV_CALIB_ZERO_DISPARITY is set.

+ *
    + *
  • Vertical stereo: the first and the second camera views are shifted + * relative to each other mainly in vertical direction (and probably a bit in + * the horizontal direction too). The epipolar lines in the rectified images are + * vertical and have the same x-coordinate. P1 and P2 look like: + *
+ * + *

P1 = f 0 cx 0 + * 0 f cy_1 0 + * 0 0 1 0

+ * + * + * + *

P2 = f 0 cx 0 + * 0 f cy_2 T_y*f + * 0 0 1 0,

+ * + *

where T_y is a vertical shift between the cameras and + * cy_1=cy_2 if CALIB_ZERO_DISPARITY is set.

+ * + *

As you can see, the first three columns of P1 and + * P2 will effectively be the new "rectified" camera matrices. + * The matrices, together with R1 and R2, can then be + * passed to "initUndistortRectifyMap" to initialize the rectification map for + * each camera.

+ * + *

See below the screenshot from the stereo_calib.cpp sample. Some + * red horizontal lines pass through the corresponding image regions. This means + * that the images are well rectified, which is what most stereo correspondence + * algorithms rely on. The green rectangles are roi1 and + * roi2. You see that their interiors are all valid pixels.

+ * + * @param cameraMatrix1 First camera matrix. + * @param distCoeffs1 First camera distortion parameters. + * @param cameraMatrix2 Second camera matrix. + * @param distCoeffs2 Second camera distortion parameters. + * @param imageSize Size of the image used for stereo calibration. + * @param R Rotation matrix between the coordinate systems of the first and the + * second cameras. + * @param T Translation vector between coordinate systems of the cameras. + * @param R1 Output 3x3 rectification transform (rotation matrix) for the first + * camera. + * @param R2 Output 3x3 rectification transform (rotation matrix) for the second + * camera. + * @param P1 Output 3x4 projection matrix in the new (rectified) coordinate + * systems for the first camera. + * @param P2 Output 3x4 projection matrix in the new (rectified) coordinate + * systems for the second camera. + * @param Q Output 4 x 4 disparity-to-depth mapping matrix (see + * "reprojectImageTo3D"). + * @param flags Operation flags that may be zero or CV_CALIB_ZERO_DISPARITY. + * If the flag is set, the function makes the principal points of each camera + * have the same pixel coordinates in the rectified views. And if the flag is + * not set, the function may still shift the images in the horizontal or + * vertical direction (depending on the orientation of epipolar lines) to + * maximize the useful image area. + * @param alpha Free scaling parameter. If it is -1 or absent, the function + * performs the default scaling. Otherwise, the parameter should be between 0 + * and 1. alpha=0 means that the rectified images are zoomed and + * shifted so that only valid pixels are visible (no black areas after + * rectification). alpha=1 means that the rectified image is + * decimated and shifted so that all the pixels from the original images from + * the cameras are retained in the rectified images (no source image pixels are + * lost). Obviously, any intermediate value yields an intermediate result + * between those two extreme cases. + * @param newImageSize New image resolution after rectification. The same size + * should be passed to "initUndistortRectifyMap" (see the stereo_calib.cpp + * sample in OpenCV samples directory). When (0,0) is passed (default), it is + * set to the original imageSize. Setting it to larger value can + * help you preserve details in the original image, especially when there is a + * big radial distortion. + * @param validPixROI1 Optional output rectangles inside the rectified images + * where all the pixels are valid. If alpha=0, the ROIs cover the + * whole images. Otherwise, they are likely to be smaller (see the picture + * below). + * @param validPixROI2 Optional output rectangles inside the rectified images + * where all the pixels are valid. If alpha=0, the ROIs cover the + * whole images. Otherwise, they are likely to be smaller (see the picture + * below). + * + * @see org.opencv.calib3d.Calib3d.stereoRectify + */ + public static void stereoRectify(Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Size imageSize, Mat R, Mat T, Mat R1, Mat R2, Mat P1, Mat P2, Mat Q, int flags, double alpha, Size newImageSize, Rect validPixROI1, Rect validPixROI2) + { + double[] validPixROI1_out = new double[4]; + double[] validPixROI2_out = new double[4]; + stereoRectify_0(cameraMatrix1.nativeObj, distCoeffs1.nativeObj, cameraMatrix2.nativeObj, distCoeffs2.nativeObj, imageSize.width, imageSize.height, R.nativeObj, T.nativeObj, R1.nativeObj, R2.nativeObj, P1.nativeObj, P2.nativeObj, Q.nativeObj, flags, alpha, newImageSize.width, newImageSize.height, validPixROI1_out, validPixROI2_out); + if(validPixROI1!=null){ validPixROI1.x = (int)validPixROI1_out[0]; validPixROI1.y = (int)validPixROI1_out[1]; validPixROI1.width = (int)validPixROI1_out[2]; validPixROI1.height = (int)validPixROI1_out[3]; } + if(validPixROI2!=null){ validPixROI2.x = (int)validPixROI2_out[0]; validPixROI2.y = (int)validPixROI2_out[1]; validPixROI2.width = (int)validPixROI2_out[2]; validPixROI2.height = (int)validPixROI2_out[3]; } + return; + } + +/** + *

Computes rectification transforms for each head of a calibrated stereo + * camera.

+ * + *

The function computes the rotation matrices for each camera that (virtually) + * make both camera image planes the same plane. Consequently, this makes all + * the epipolar lines parallel and thus simplifies the dense stereo + * correspondence problem. The function takes the matrices computed by + * "stereoCalibrate" as input. As output, it provides two rotation matrices and + * also two projection matrices in the new coordinates. The function + * distinguishes the following two cases:

+ *
    + *
  • Horizontal stereo: the first and the second camera views are shifted + * relative to each other mainly along the x axis (with possible small vertical + * shift). In the rectified images, the corresponding epipolar lines in the left + * and right cameras are horizontal and have the same y-coordinate. P1 and P2 + * look like: + *
+ * + *

P1 = f 0 cx_1 0 + * 0 f cy 0 + * 0 0 1 0

+ * + * + * + *

P2 = f 0 cx_2 T_x*f + * 0 f cy 0 + * 0 0 1 0,

+ * + *

where T_x is a horizontal shift between the cameras and + * cx_1=cx_2 if CV_CALIB_ZERO_DISPARITY is set.

+ *
    + *
  • Vertical stereo: the first and the second camera views are shifted + * relative to each other mainly in vertical direction (and probably a bit in + * the horizontal direction too). The epipolar lines in the rectified images are + * vertical and have the same x-coordinate. P1 and P2 look like: + *
+ * + *

P1 = f 0 cx 0 + * 0 f cy_1 0 + * 0 0 1 0

+ * + * + * + *

P2 = f 0 cx 0 + * 0 f cy_2 T_y*f + * 0 0 1 0,

+ * + *

where T_y is a vertical shift between the cameras and + * cy_1=cy_2 if CALIB_ZERO_DISPARITY is set.

+ * + *

As you can see, the first three columns of P1 and + * P2 will effectively be the new "rectified" camera matrices. + * The matrices, together with R1 and R2, can then be + * passed to "initUndistortRectifyMap" to initialize the rectification map for + * each camera.

+ * + *

See below the screenshot from the stereo_calib.cpp sample. Some + * red horizontal lines pass through the corresponding image regions. This means + * that the images are well rectified, which is what most stereo correspondence + * algorithms rely on. The green rectangles are roi1 and + * roi2. You see that their interiors are all valid pixels.

+ * + * @param cameraMatrix1 First camera matrix. + * @param distCoeffs1 First camera distortion parameters. + * @param cameraMatrix2 Second camera matrix. + * @param distCoeffs2 Second camera distortion parameters. + * @param imageSize Size of the image used for stereo calibration. + * @param R Rotation matrix between the coordinate systems of the first and the + * second cameras. + * @param T Translation vector between coordinate systems of the cameras. + * @param R1 Output 3x3 rectification transform (rotation matrix) for the first + * camera. + * @param R2 Output 3x3 rectification transform (rotation matrix) for the second + * camera. + * @param P1 Output 3x4 projection matrix in the new (rectified) coordinate + * systems for the first camera. + * @param P2 Output 3x4 projection matrix in the new (rectified) coordinate + * systems for the second camera. + * @param Q Output 4 x 4 disparity-to-depth mapping matrix (see + * "reprojectImageTo3D"). + * + * @see org.opencv.calib3d.Calib3d.stereoRectify + */ + public static void stereoRectify(Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Size imageSize, Mat R, Mat T, Mat R1, Mat R2, Mat P1, Mat P2, Mat Q) + { + + stereoRectify_1(cameraMatrix1.nativeObj, distCoeffs1.nativeObj, cameraMatrix2.nativeObj, distCoeffs2.nativeObj, imageSize.width, imageSize.height, R.nativeObj, T.nativeObj, R1.nativeObj, R2.nativeObj, P1.nativeObj, P2.nativeObj, Q.nativeObj); + + return; + } + + + // + // C++: bool stereoRectifyUncalibrated(Mat points1, Mat points2, Mat F, Size imgSize, Mat& H1, Mat& H2, double threshold = 5) + // + +/** + *

Computes a rectification transform for an uncalibrated stereo camera.

+ * + *

The function computes the rectification transformations without knowing + * intrinsic parameters of the cameras and their relative position in the space, + * which explains the suffix "uncalibrated". Another related difference from + * "stereoRectify" is that the function outputs not the rectification + * transformations in the object (3D) space, but the planar perspective + * transformations encoded by the homography matrices H1 and + * H2. The function implements the algorithm [Hartley99].

+ * + *

Note:

+ * + *

While the algorithm does not need to know the intrinsic parameters of the + * cameras, it heavily depends on the epipolar geometry. Therefore, if the + * camera lenses have a significant distortion, it would be better to correct it + * before computing the fundamental matrix and calling this function. For + * example, distortion coefficients can be estimated for each head of stereo + * camera separately by using "calibrateCamera". Then, the images can be + * corrected using "undistort", or just the point coordinates can be corrected + * with "undistortPoints".

+ * + * @param points1 Array of feature points in the first image. + * @param points2 The corresponding points in the second image. The same formats + * as in "findFundamentalMat" are supported. + * @param F Input fundamental matrix. It can be computed from the same set of + * point pairs using "findFundamentalMat". + * @param imgSize Size of the image. + * @param H1 Output rectification homography matrix for the first image. + * @param H2 Output rectification homography matrix for the second image. + * @param threshold Optional threshold used to filter out the outliers. If the + * parameter is greater than zero, all the point pairs that do not comply with + * the epipolar geometry (that is, the points for which |points2[i]^T*F*points1[i]|>threshold) + * are rejected prior to computing the homographies. Otherwise,all the points + * are considered inliers. + * + * @see org.opencv.calib3d.Calib3d.stereoRectifyUncalibrated + */ + public static boolean stereoRectifyUncalibrated(Mat points1, Mat points2, Mat F, Size imgSize, Mat H1, Mat H2, double threshold) + { + + boolean retVal = stereoRectifyUncalibrated_0(points1.nativeObj, points2.nativeObj, F.nativeObj, imgSize.width, imgSize.height, H1.nativeObj, H2.nativeObj, threshold); + + return retVal; + } + +/** + *

Computes a rectification transform for an uncalibrated stereo camera.

+ * + *

The function computes the rectification transformations without knowing + * intrinsic parameters of the cameras and their relative position in the space, + * which explains the suffix "uncalibrated". Another related difference from + * "stereoRectify" is that the function outputs not the rectification + * transformations in the object (3D) space, but the planar perspective + * transformations encoded by the homography matrices H1 and + * H2. The function implements the algorithm [Hartley99].

+ * + *

Note:

+ * + *

While the algorithm does not need to know the intrinsic parameters of the + * cameras, it heavily depends on the epipolar geometry. Therefore, if the + * camera lenses have a significant distortion, it would be better to correct it + * before computing the fundamental matrix and calling this function. For + * example, distortion coefficients can be estimated for each head of stereo + * camera separately by using "calibrateCamera". Then, the images can be + * corrected using "undistort", or just the point coordinates can be corrected + * with "undistortPoints".

+ * + * @param points1 Array of feature points in the first image. + * @param points2 The corresponding points in the second image. The same formats + * as in "findFundamentalMat" are supported. + * @param F Input fundamental matrix. It can be computed from the same set of + * point pairs using "findFundamentalMat". + * @param imgSize Size of the image. + * @param H1 Output rectification homography matrix for the first image. + * @param H2 Output rectification homography matrix for the second image. + * + * @see org.opencv.calib3d.Calib3d.stereoRectifyUncalibrated + */ + public static boolean stereoRectifyUncalibrated(Mat points1, Mat points2, Mat F, Size imgSize, Mat H1, Mat H2) + { + + boolean retVal = stereoRectifyUncalibrated_1(points1.nativeObj, points2.nativeObj, F.nativeObj, imgSize.width, imgSize.height, H1.nativeObj, H2.nativeObj); + + return retVal; + } + + + // + // C++: void triangulatePoints(Mat projMatr1, Mat projMatr2, Mat projPoints1, Mat projPoints2, Mat& points4D) + // + +/** + *

Reconstructs points by triangulation.

+ * + *

The function reconstructs 3-dimensional points (in homogeneous coordinates) + * by using their observations with a stereo camera. Projections matrices can be + * obtained from "stereoRectify".

+ * + * @param projMatr1 3x4 projection matrix of the first camera. + * @param projMatr2 3x4 projection matrix of the second camera. + * @param projPoints1 2xN array of feature points in the first image. In case of + * c++ version it can be also a vector of feature points or two-channel matrix + * of size 1xN or Nx1. + * @param projPoints2 2xN array of corresponding points in the second image. In + * case of c++ version it can be also a vector of feature points or two-channel + * matrix of size 1xN or Nx1. + * @param points4D 4xN array of reconstructed points in homogeneous coordinates. + * + * @see org.opencv.calib3d.Calib3d.triangulatePoints + * @see org.opencv.calib3d.Calib3d#reprojectImageTo3D + */ + public static void triangulatePoints(Mat projMatr1, Mat projMatr2, Mat projPoints1, Mat projPoints2, Mat points4D) + { + + triangulatePoints_0(projMatr1.nativeObj, projMatr2.nativeObj, projPoints1.nativeObj, projPoints2.nativeObj, points4D.nativeObj); + + return; + } + + + // + // C++: void validateDisparity(Mat& disparity, Mat cost, int minDisparity, int numberOfDisparities, int disp12MaxDisp = 1) + // + + public static void validateDisparity(Mat disparity, Mat cost, int minDisparity, int numberOfDisparities, int disp12MaxDisp) + { + + validateDisparity_0(disparity.nativeObj, cost.nativeObj, minDisparity, numberOfDisparities, disp12MaxDisp); + + return; + } + + public static void validateDisparity(Mat disparity, Mat cost, int minDisparity, int numberOfDisparities) + { + + validateDisparity_1(disparity.nativeObj, cost.nativeObj, minDisparity, numberOfDisparities); + + return; + } + + + + + // C++: Vec3d RQDecomp3x3(Mat src, Mat& mtxR, Mat& mtxQ, Mat& Qx = Mat(), Mat& Qy = Mat(), Mat& Qz = Mat()) + private static native double[] RQDecomp3x3_0(long src_nativeObj, long mtxR_nativeObj, long mtxQ_nativeObj, long Qx_nativeObj, long Qy_nativeObj, long Qz_nativeObj); + private static native double[] RQDecomp3x3_1(long src_nativeObj, long mtxR_nativeObj, long mtxQ_nativeObj); + + // C++: void Rodrigues(Mat src, Mat& dst, Mat& jacobian = Mat()) + private static native void Rodrigues_0(long src_nativeObj, long dst_nativeObj, long jacobian_nativeObj); + private static native void Rodrigues_1(long src_nativeObj, long dst_nativeObj); + + // C++: double calibrateCamera(vector_Mat objectPoints, vector_Mat imagePoints, Size imageSize, Mat& cameraMatrix, Mat& distCoeffs, vector_Mat& rvecs, vector_Mat& tvecs, int flags = 0, TermCriteria criteria = TermCriteria( TermCriteria::COUNT+TermCriteria::EPS, 30, DBL_EPSILON)) + private static native double calibrateCamera_0(long objectPoints_mat_nativeObj, long imagePoints_mat_nativeObj, double imageSize_width, double imageSize_height, long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long rvecs_mat_nativeObj, long tvecs_mat_nativeObj, int flags, int criteria_type, int criteria_maxCount, double criteria_epsilon); + private static native double calibrateCamera_1(long objectPoints_mat_nativeObj, long imagePoints_mat_nativeObj, double imageSize_width, double imageSize_height, long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long rvecs_mat_nativeObj, long tvecs_mat_nativeObj, int flags); + private static native double calibrateCamera_2(long objectPoints_mat_nativeObj, long imagePoints_mat_nativeObj, double imageSize_width, double imageSize_height, long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long rvecs_mat_nativeObj, long tvecs_mat_nativeObj); + + // C++: void calibrationMatrixValues(Mat cameraMatrix, Size imageSize, double apertureWidth, double apertureHeight, double& fovx, double& fovy, double& focalLength, Point2d& principalPoint, double& aspectRatio) + private static native void calibrationMatrixValues_0(long cameraMatrix_nativeObj, double imageSize_width, double imageSize_height, double apertureWidth, double apertureHeight, double[] fovx_out, double[] fovy_out, double[] focalLength_out, double[] principalPoint_out, double[] aspectRatio_out); + + // C++: void composeRT(Mat rvec1, Mat tvec1, Mat rvec2, Mat tvec2, Mat& rvec3, Mat& tvec3, Mat& dr3dr1 = Mat(), Mat& dr3dt1 = Mat(), Mat& dr3dr2 = Mat(), Mat& dr3dt2 = Mat(), Mat& dt3dr1 = Mat(), Mat& dt3dt1 = Mat(), Mat& dt3dr2 = Mat(), Mat& dt3dt2 = Mat()) + private static native void composeRT_0(long rvec1_nativeObj, long tvec1_nativeObj, long rvec2_nativeObj, long tvec2_nativeObj, long rvec3_nativeObj, long tvec3_nativeObj, long dr3dr1_nativeObj, long dr3dt1_nativeObj, long dr3dr2_nativeObj, long dr3dt2_nativeObj, long dt3dr1_nativeObj, long dt3dt1_nativeObj, long dt3dr2_nativeObj, long dt3dt2_nativeObj); + private static native void composeRT_1(long rvec1_nativeObj, long tvec1_nativeObj, long rvec2_nativeObj, long tvec2_nativeObj, long rvec3_nativeObj, long tvec3_nativeObj); + + // C++: void convertPointsFromHomogeneous(Mat src, Mat& dst) + private static native void convertPointsFromHomogeneous_0(long src_nativeObj, long dst_nativeObj); + + // C++: void convertPointsToHomogeneous(Mat src, Mat& dst) + private static native void convertPointsToHomogeneous_0(long src_nativeObj, long dst_nativeObj); + + // C++: void correctMatches(Mat F, Mat points1, Mat points2, Mat& newPoints1, Mat& newPoints2) + private static native void correctMatches_0(long F_nativeObj, long points1_nativeObj, long points2_nativeObj, long newPoints1_nativeObj, long newPoints2_nativeObj); + + // C++: void decomposeProjectionMatrix(Mat projMatrix, Mat& cameraMatrix, Mat& rotMatrix, Mat& transVect, Mat& rotMatrixX = Mat(), Mat& rotMatrixY = Mat(), Mat& rotMatrixZ = Mat(), Mat& eulerAngles = Mat()) + private static native void decomposeProjectionMatrix_0(long projMatrix_nativeObj, long cameraMatrix_nativeObj, long rotMatrix_nativeObj, long transVect_nativeObj, long rotMatrixX_nativeObj, long rotMatrixY_nativeObj, long rotMatrixZ_nativeObj, long eulerAngles_nativeObj); + private static native void decomposeProjectionMatrix_1(long projMatrix_nativeObj, long cameraMatrix_nativeObj, long rotMatrix_nativeObj, long transVect_nativeObj); + + // C++: void drawChessboardCorners(Mat& image, Size patternSize, vector_Point2f corners, bool patternWasFound) + private static native void drawChessboardCorners_0(long image_nativeObj, double patternSize_width, double patternSize_height, long corners_mat_nativeObj, boolean patternWasFound); + + // C++: int estimateAffine3D(Mat src, Mat dst, Mat& out, Mat& inliers, double ransacThreshold = 3, double confidence = 0.99) + private static native int estimateAffine3D_0(long src_nativeObj, long dst_nativeObj, long out_nativeObj, long inliers_nativeObj, double ransacThreshold, double confidence); + private static native int estimateAffine3D_1(long src_nativeObj, long dst_nativeObj, long out_nativeObj, long inliers_nativeObj); + + // C++: void filterSpeckles(Mat& img, double newVal, int maxSpeckleSize, double maxDiff, Mat& buf = Mat()) + private static native void filterSpeckles_0(long img_nativeObj, double newVal, int maxSpeckleSize, double maxDiff, long buf_nativeObj); + private static native void filterSpeckles_1(long img_nativeObj, double newVal, int maxSpeckleSize, double maxDiff); + + // C++: bool findChessboardCorners(Mat image, Size patternSize, vector_Point2f& corners, int flags = CALIB_CB_ADAPTIVE_THRESH+CALIB_CB_NORMALIZE_IMAGE) + private static native boolean findChessboardCorners_0(long image_nativeObj, double patternSize_width, double patternSize_height, long corners_mat_nativeObj, int flags); + private static native boolean findChessboardCorners_1(long image_nativeObj, double patternSize_width, double patternSize_height, long corners_mat_nativeObj); + + // C++: bool findCirclesGridDefault(Mat image, Size patternSize, Mat& centers, int flags = CALIB_CB_SYMMETRIC_GRID) + private static native boolean findCirclesGridDefault_0(long image_nativeObj, double patternSize_width, double patternSize_height, long centers_nativeObj, int flags); + private static native boolean findCirclesGridDefault_1(long image_nativeObj, double patternSize_width, double patternSize_height, long centers_nativeObj); + + // C++: Mat findFundamentalMat(vector_Point2f points1, vector_Point2f points2, int method = FM_RANSAC, double param1 = 3., double param2 = 0.99, Mat& mask = Mat()) + private static native long findFundamentalMat_0(long points1_mat_nativeObj, long points2_mat_nativeObj, int method, double param1, double param2, long mask_nativeObj); + private static native long findFundamentalMat_1(long points1_mat_nativeObj, long points2_mat_nativeObj, int method, double param1, double param2); + private static native long findFundamentalMat_2(long points1_mat_nativeObj, long points2_mat_nativeObj); + + // C++: Mat findHomography(vector_Point2f srcPoints, vector_Point2f dstPoints, int method = 0, double ransacReprojThreshold = 3, Mat& mask = Mat()) + private static native long findHomography_0(long srcPoints_mat_nativeObj, long dstPoints_mat_nativeObj, int method, double ransacReprojThreshold, long mask_nativeObj); + private static native long findHomography_1(long srcPoints_mat_nativeObj, long dstPoints_mat_nativeObj, int method, double ransacReprojThreshold); + private static native long findHomography_2(long srcPoints_mat_nativeObj, long dstPoints_mat_nativeObj); + + // C++: Mat getOptimalNewCameraMatrix(Mat cameraMatrix, Mat distCoeffs, Size imageSize, double alpha, Size newImgSize = Size(), Rect* validPixROI = 0, bool centerPrincipalPoint = false) + private static native long getOptimalNewCameraMatrix_0(long cameraMatrix_nativeObj, long distCoeffs_nativeObj, double imageSize_width, double imageSize_height, double alpha, double newImgSize_width, double newImgSize_height, double[] validPixROI_out, boolean centerPrincipalPoint); + private static native long getOptimalNewCameraMatrix_1(long cameraMatrix_nativeObj, long distCoeffs_nativeObj, double imageSize_width, double imageSize_height, double alpha); + + // C++: Rect getValidDisparityROI(Rect roi1, Rect roi2, int minDisparity, int numberOfDisparities, int SADWindowSize) + private static native double[] getValidDisparityROI_0(int roi1_x, int roi1_y, int roi1_width, int roi1_height, int roi2_x, int roi2_y, int roi2_width, int roi2_height, int minDisparity, int numberOfDisparities, int SADWindowSize); + + // C++: Mat initCameraMatrix2D(vector_vector_Point3f objectPoints, vector_vector_Point2f imagePoints, Size imageSize, double aspectRatio = 1.) + private static native long initCameraMatrix2D_0(long objectPoints_mat_nativeObj, long imagePoints_mat_nativeObj, double imageSize_width, double imageSize_height, double aspectRatio); + private static native long initCameraMatrix2D_1(long objectPoints_mat_nativeObj, long imagePoints_mat_nativeObj, double imageSize_width, double imageSize_height); + + // C++: void matMulDeriv(Mat A, Mat B, Mat& dABdA, Mat& dABdB) + private static native void matMulDeriv_0(long A_nativeObj, long B_nativeObj, long dABdA_nativeObj, long dABdB_nativeObj); + + // C++: void projectPoints(vector_Point3f objectPoints, Mat rvec, Mat tvec, Mat cameraMatrix, vector_double distCoeffs, vector_Point2f& imagePoints, Mat& jacobian = Mat(), double aspectRatio = 0) + private static native void projectPoints_0(long objectPoints_mat_nativeObj, long rvec_nativeObj, long tvec_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_mat_nativeObj, long imagePoints_mat_nativeObj, long jacobian_nativeObj, double aspectRatio); + private static native void projectPoints_1(long objectPoints_mat_nativeObj, long rvec_nativeObj, long tvec_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_mat_nativeObj, long imagePoints_mat_nativeObj); + + // C++: float rectify3Collinear(Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Mat cameraMatrix3, Mat distCoeffs3, vector_Mat imgpt1, vector_Mat imgpt3, Size imageSize, Mat R12, Mat T12, Mat R13, Mat T13, Mat& R1, Mat& R2, Mat& R3, Mat& P1, Mat& P2, Mat& P3, Mat& Q, double alpha, Size newImgSize, Rect* roi1, Rect* roi2, int flags) + private static native float rectify3Collinear_0(long cameraMatrix1_nativeObj, long distCoeffs1_nativeObj, long cameraMatrix2_nativeObj, long distCoeffs2_nativeObj, long cameraMatrix3_nativeObj, long distCoeffs3_nativeObj, long imgpt1_mat_nativeObj, long imgpt3_mat_nativeObj, double imageSize_width, double imageSize_height, long R12_nativeObj, long T12_nativeObj, long R13_nativeObj, long T13_nativeObj, long R1_nativeObj, long R2_nativeObj, long R3_nativeObj, long P1_nativeObj, long P2_nativeObj, long P3_nativeObj, long Q_nativeObj, double alpha, double newImgSize_width, double newImgSize_height, double[] roi1_out, double[] roi2_out, int flags); + + // C++: void reprojectImageTo3D(Mat disparity, Mat& _3dImage, Mat Q, bool handleMissingValues = false, int ddepth = -1) + private static native void reprojectImageTo3D_0(long disparity_nativeObj, long _3dImage_nativeObj, long Q_nativeObj, boolean handleMissingValues, int ddepth); + private static native void reprojectImageTo3D_1(long disparity_nativeObj, long _3dImage_nativeObj, long Q_nativeObj, boolean handleMissingValues); + private static native void reprojectImageTo3D_2(long disparity_nativeObj, long _3dImage_nativeObj, long Q_nativeObj); + + // C++: bool solvePnP(vector_Point3f objectPoints, vector_Point2f imagePoints, Mat cameraMatrix, vector_double distCoeffs, Mat& rvec, Mat& tvec, bool useExtrinsicGuess = false, int flags = ITERATIVE) + private static native boolean solvePnP_0(long objectPoints_mat_nativeObj, long imagePoints_mat_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_mat_nativeObj, long rvec_nativeObj, long tvec_nativeObj, boolean useExtrinsicGuess, int flags); + private static native boolean solvePnP_1(long objectPoints_mat_nativeObj, long imagePoints_mat_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_mat_nativeObj, long rvec_nativeObj, long tvec_nativeObj); + + // C++: void solvePnPRansac(vector_Point3f objectPoints, vector_Point2f imagePoints, Mat cameraMatrix, vector_double distCoeffs, Mat& rvec, Mat& tvec, bool useExtrinsicGuess = false, int iterationsCount = 100, float reprojectionError = 8.0, int minInliersCount = 100, Mat& inliers = Mat(), int flags = ITERATIVE) + private static native void solvePnPRansac_0(long objectPoints_mat_nativeObj, long imagePoints_mat_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_mat_nativeObj, long rvec_nativeObj, long tvec_nativeObj, boolean useExtrinsicGuess, int iterationsCount, float reprojectionError, int minInliersCount, long inliers_nativeObj, int flags); + private static native void solvePnPRansac_1(long objectPoints_mat_nativeObj, long imagePoints_mat_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_mat_nativeObj, long rvec_nativeObj, long tvec_nativeObj); + + // C++: double stereoCalibrate(vector_Mat objectPoints, vector_Mat imagePoints1, vector_Mat imagePoints2, Mat& cameraMatrix1, Mat& distCoeffs1, Mat& cameraMatrix2, Mat& distCoeffs2, Size imageSize, Mat& R, Mat& T, Mat& E, Mat& F, TermCriteria criteria = TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 30, 1e-6), int flags = CALIB_FIX_INTRINSIC) + private static native double stereoCalibrate_0(long objectPoints_mat_nativeObj, long imagePoints1_mat_nativeObj, long imagePoints2_mat_nativeObj, long cameraMatrix1_nativeObj, long distCoeffs1_nativeObj, long cameraMatrix2_nativeObj, long distCoeffs2_nativeObj, double imageSize_width, double imageSize_height, long R_nativeObj, long T_nativeObj, long E_nativeObj, long F_nativeObj, int criteria_type, int criteria_maxCount, double criteria_epsilon, int flags); + private static native double stereoCalibrate_1(long objectPoints_mat_nativeObj, long imagePoints1_mat_nativeObj, long imagePoints2_mat_nativeObj, long cameraMatrix1_nativeObj, long distCoeffs1_nativeObj, long cameraMatrix2_nativeObj, long distCoeffs2_nativeObj, double imageSize_width, double imageSize_height, long R_nativeObj, long T_nativeObj, long E_nativeObj, long F_nativeObj); + + // C++: void stereoRectify(Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Size imageSize, Mat R, Mat T, Mat& R1, Mat& R2, Mat& P1, Mat& P2, Mat& Q, int flags = CALIB_ZERO_DISPARITY, double alpha = -1, Size newImageSize = Size(), Rect* validPixROI1 = 0, Rect* validPixROI2 = 0) + private static native void stereoRectify_0(long cameraMatrix1_nativeObj, long distCoeffs1_nativeObj, long cameraMatrix2_nativeObj, long distCoeffs2_nativeObj, double imageSize_width, double imageSize_height, long R_nativeObj, long T_nativeObj, long R1_nativeObj, long R2_nativeObj, long P1_nativeObj, long P2_nativeObj, long Q_nativeObj, int flags, double alpha, double newImageSize_width, double newImageSize_height, double[] validPixROI1_out, double[] validPixROI2_out); + private static native void stereoRectify_1(long cameraMatrix1_nativeObj, long distCoeffs1_nativeObj, long cameraMatrix2_nativeObj, long distCoeffs2_nativeObj, double imageSize_width, double imageSize_height, long R_nativeObj, long T_nativeObj, long R1_nativeObj, long R2_nativeObj, long P1_nativeObj, long P2_nativeObj, long Q_nativeObj); + + // C++: bool stereoRectifyUncalibrated(Mat points1, Mat points2, Mat F, Size imgSize, Mat& H1, Mat& H2, double threshold = 5) + private static native boolean stereoRectifyUncalibrated_0(long points1_nativeObj, long points2_nativeObj, long F_nativeObj, double imgSize_width, double imgSize_height, long H1_nativeObj, long H2_nativeObj, double threshold); + private static native boolean stereoRectifyUncalibrated_1(long points1_nativeObj, long points2_nativeObj, long F_nativeObj, double imgSize_width, double imgSize_height, long H1_nativeObj, long H2_nativeObj); + + // C++: void triangulatePoints(Mat projMatr1, Mat projMatr2, Mat projPoints1, Mat projPoints2, Mat& points4D) + private static native void triangulatePoints_0(long projMatr1_nativeObj, long projMatr2_nativeObj, long projPoints1_nativeObj, long projPoints2_nativeObj, long points4D_nativeObj); + + // C++: void validateDisparity(Mat& disparity, Mat cost, int minDisparity, int numberOfDisparities, int disp12MaxDisp = 1) + private static native void validateDisparity_0(long disparity_nativeObj, long cost_nativeObj, int minDisparity, int numberOfDisparities, int disp12MaxDisp); + private static native void validateDisparity_1(long disparity_nativeObj, long cost_nativeObj, int minDisparity, int numberOfDisparities); + +} diff --git a/src/org/opencv/calib3d/StereoBM.java b/src/org/opencv/calib3d/StereoBM.java new file mode 100644 index 0000000..a7dc0c9 --- /dev/null +++ b/src/org/opencv/calib3d/StereoBM.java @@ -0,0 +1,261 @@ + +// +// This file is auto-generated. Please don't modify it! +// +package org.opencv.calib3d; + +import org.opencv.core.Mat; + +// C++: class StereoBM +/** + *

Class for computing stereo correspondence using the block matching algorithm.

+ * + *

// Block matching stereo correspondence algorithm class StereoBM

+ * + *

// C++ code:

+ * + * + *

enum { NORMALIZED_RESPONSE = CV_STEREO_BM_NORMALIZED_RESPONSE,

+ * + *

BASIC_PRESET=CV_STEREO_BM_BASIC,

+ * + *

FISH_EYE_PRESET=CV_STEREO_BM_FISH_EYE,

+ * + *

NARROW_PRESET=CV_STEREO_BM_NARROW };

+ * + *

StereoBM();

+ * + *

// the preset is one of..._PRESET above.

+ * + *

// ndisparities is the size of disparity range,

+ * + *

// in which the optimal disparity at each pixel is searched for.

+ * + *

// SADWindowSize is the size of averaging window used to match pixel blocks

+ * + *

// (larger values mean better robustness to noise, but yield blurry disparity + * maps)

+ * + *

StereoBM(int preset, int ndisparities=0, int SADWindowSize=21);

+ * + *

// separate initialization function

+ * + *

void init(int preset, int ndisparities=0, int SADWindowSize=21);

+ * + *

// computes the disparity for the two rectified 8-bit single-channel images.

+ * + *

// the disparity will be 16-bit signed (fixed-point) or 32-bit floating-point + * image of the same size as left.

+ * + *

void operator()(InputArray left, InputArray right, OutputArray disparity, int + * disptype=CV_16S);

+ * + *

Ptr state;

+ * + *

};

+ * + *

The class is a C++ wrapper for the associated functions. In particular, + * :ocv:funcx:"StereoBM.operator()" is the wrapper for

+ * + *

"cvFindStereoCorrespondenceBM".

+ * + * @see org.opencv.calib3d.StereoBM + */ +public class StereoBM { + + protected final long nativeObj; + protected StereoBM(long addr) { nativeObj = addr; } + + + public static final int + PREFILTER_NORMALIZED_RESPONSE = 0, + PREFILTER_XSOBEL = 1, + BASIC_PRESET = 0, + FISH_EYE_PRESET = 1, + NARROW_PRESET = 2; + + + // + // C++: StereoBM::StereoBM() + // + +/** + *

The constructors.

+ * + *

The constructors initialize StereoBM state. You can then call + * StereoBM.operator() to compute disparity for a specific stereo + * pair.

+ * + *

Note: In the C API you need to deallocate CvStereoBM state when + * it is not needed anymore using cvReleaseStereoBMState(&stereobm).

+ * + * @see org.opencv.calib3d.StereoBM.StereoBM + */ + public StereoBM() + { + + nativeObj = StereoBM_0(); + + return; + } + + + // + // C++: StereoBM::StereoBM(int preset, int ndisparities = 0, int SADWindowSize = 21) + // + +/** + *

The constructors.

+ * + *

The constructors initialize StereoBM state. You can then call + * StereoBM.operator() to compute disparity for a specific stereo + * pair.

+ * + *

Note: In the C API you need to deallocate CvStereoBM state when + * it is not needed anymore using cvReleaseStereoBMState(&stereobm).

+ * + * @param preset specifies the whole set of algorithm parameters, one of: + *
    + *
  • BASIC_PRESET - parameters suitable for general cameras + *
  • FISH_EYE_PRESET - parameters suitable for wide-angle cameras + *
  • NARROW_PRESET - parameters suitable for narrow-angle cameras + *
+ * + *

After constructing the class, you can override any parameters set by the + * preset.

+ * @param ndisparities the disparity search range. For each pixel algorithm will + * find the best disparity from 0 (default minimum disparity) to + * ndisparities. The search range can then be shifted by changing + * the minimum disparity. + * @param SADWindowSize the linear size of the blocks compared by the algorithm. + * The size should be odd (as the block is centered at the current pixel). + * Larger block size implies smoother, though less accurate disparity map. + * Smaller block size gives more detailed disparity map, but there is higher + * chance for algorithm to find a wrong correspondence. + * + * @see org.opencv.calib3d.StereoBM.StereoBM + */ + public StereoBM(int preset, int ndisparities, int SADWindowSize) + { + + nativeObj = StereoBM_1(preset, ndisparities, SADWindowSize); + + return; + } + +/** + *

The constructors.

+ * + *

The constructors initialize StereoBM state. You can then call + * StereoBM.operator() to compute disparity for a specific stereo + * pair.

+ * + *

Note: In the C API you need to deallocate CvStereoBM state when + * it is not needed anymore using cvReleaseStereoBMState(&stereobm).

+ * + * @param preset specifies the whole set of algorithm parameters, one of: + *
    + *
  • BASIC_PRESET - parameters suitable for general cameras + *
  • FISH_EYE_PRESET - parameters suitable for wide-angle cameras + *
  • NARROW_PRESET - parameters suitable for narrow-angle cameras + *
+ * + *

After constructing the class, you can override any parameters set by the + * preset.

+ * + * @see org.opencv.calib3d.StereoBM.StereoBM + */ + public StereoBM(int preset) + { + + nativeObj = StereoBM_2(preset); + + return; + } + + + // + // C++: void StereoBM::operator ()(Mat left, Mat right, Mat& disparity, int disptype = CV_16S) + // + +/** + *

Computes disparity using the BM algorithm for a rectified stereo pair.

+ * + *

The method executes the BM algorithm on a rectified stereo pair. See the + * stereo_match.cpp OpenCV sample on how to prepare images and call + * the method. Note that the method is not constant, thus you should not use the + * same StereoBM instance from within different threads + * simultaneously. The function is parallelized with the TBB library.

+ * + * @param left Left 8-bit single-channel image. + * @param right Right image of the same size and the same type as the left one. + * @param disparity Output disparity map. It has the same size as the input + * images. When disptype==CV_16S, the map is a 16-bit signed + * single-channel image, containing disparity values scaled by 16. To get the + * true disparity values from such fixed-point representation, you will need to + * divide each disp element by 16. If disptype==CV_32F, + * the disparity map will already contain the real disparity values on output. + * @param disptype Type of the output disparity map, CV_16S + * (default) or CV_32F. + * + * @see org.opencv.calib3d.StereoBM.operator() + */ + public void compute(Mat left, Mat right, Mat disparity, int disptype) + { + + compute_0(nativeObj, left.nativeObj, right.nativeObj, disparity.nativeObj, disptype); + + return; + } + +/** + *

Computes disparity using the BM algorithm for a rectified stereo pair.

+ * + *

The method executes the BM algorithm on a rectified stereo pair. See the + * stereo_match.cpp OpenCV sample on how to prepare images and call + * the method. Note that the method is not constant, thus you should not use the + * same StereoBM instance from within different threads + * simultaneously. The function is parallelized with the TBB library.

+ * + * @param left Left 8-bit single-channel image. + * @param right Right image of the same size and the same type as the left one. + * @param disparity Output disparity map. It has the same size as the input + * images. When disptype==CV_16S, the map is a 16-bit signed + * single-channel image, containing disparity values scaled by 16. To get the + * true disparity values from such fixed-point representation, you will need to + * divide each disp element by 16. If disptype==CV_32F, + * the disparity map will already contain the real disparity values on output. + * + * @see org.opencv.calib3d.StereoBM.operator() + */ + public void compute(Mat left, Mat right, Mat disparity) + { + + compute_1(nativeObj, left.nativeObj, right.nativeObj, disparity.nativeObj); + + return; + } + + + @Override + protected void finalize() throws Throwable { + delete(nativeObj); + } + + + + // C++: StereoBM::StereoBM() + private static native long StereoBM_0(); + + // C++: StereoBM::StereoBM(int preset, int ndisparities = 0, int SADWindowSize = 21) + private static native long StereoBM_1(int preset, int ndisparities, int SADWindowSize); + private static native long StereoBM_2(int preset); + + // C++: void StereoBM::operator ()(Mat left, Mat right, Mat& disparity, int disptype = CV_16S) + private static native void compute_0(long nativeObj, long left_nativeObj, long right_nativeObj, long disparity_nativeObj, int disptype); + private static native void compute_1(long nativeObj, long left_nativeObj, long right_nativeObj, long disparity_nativeObj); + + // native support for java finalize() + private static native void delete(long nativeObj); + +} diff --git a/src/org/opencv/calib3d/StereoSGBM.java b/src/org/opencv/calib3d/StereoSGBM.java new file mode 100644 index 0000000..84354fa --- /dev/null +++ b/src/org/opencv/calib3d/StereoSGBM.java @@ -0,0 +1,590 @@ + +// +// This file is auto-generated. Please don't modify it! +// +package org.opencv.calib3d; + +import org.opencv.core.Mat; + +// C++: class StereoSGBM +/** + *

Class for computing stereo correspondence using the semi-global block + * matching algorithm.

+ * + *

class StereoSGBM

+ * + *

// C++ code:

+ * + * + *

StereoSGBM();

+ * + *

StereoSGBM(int minDisparity, int numDisparities, int SADWindowSize,

+ * + *

int P1=0, int P2=0, int disp12MaxDiff=0,

+ * + *

int preFilterCap=0, int uniquenessRatio=0,

+ * + *

int speckleWindowSize=0, int speckleRange=0,

+ * + *

bool fullDP=false);

+ * + *

virtual ~StereoSGBM();

+ * + *

virtual void operator()(InputArray left, InputArray right, OutputArray disp);

+ * + *

int minDisparity;

+ * + *

int numberOfDisparities;

+ * + *

int SADWindowSize;

+ * + *

int preFilterCap;

+ * + *

int uniquenessRatio;

+ * + *

int P1, P2;

+ * + *

int speckleWindowSize;

+ * + *

int speckleRange;

+ * + *

int disp12MaxDiff;

+ * + *

bool fullDP;...

+ * + *

};

+ * + *

The class implements the modified H. Hirschmuller algorithm [HH08] that + * differs from the original one as follows:

+ *
    + *
  • By default, the algorithm is single-pass, which means that you + * consider only 5 directions instead of 8. Set fullDP=true to run + * the full variant of the algorithm but beware that it may consume a lot of + * memory. + *
  • The algorithm matches blocks, not individual pixels. Though, setting + * SADWindowSize=1 reduces the blocks to single pixels. + *
  • Mutual information cost function is not implemented. Instead, a + * simpler Birchfield-Tomasi sub-pixel metric from [BT98] is used. Though, the + * color images are supported as well. + *
  • Some pre- and post- processing steps from K. Konolige algorithm + * :ocv:funcx:"StereoBM.operator()" are included, for example: pre-filtering + * (CV_STEREO_BM_XSOBEL type) and post-filtering (uniqueness check, + * quadratic interpolation and speckle filtering). + *
+ * + * @see org.opencv.calib3d.StereoSGBM + */ +public class StereoSGBM { + + protected final long nativeObj; + protected StereoSGBM(long addr) { nativeObj = addr; } + + + public static final int + DISP_SHIFT = 4, + DISP_SCALE = (1<Initializes StereoSGBM and sets parameters to custom values.??

+ * + *

The first constructor initializes StereoSGBM with all the + * default parameters. So, you only have to set StereoSGBM.numberOfDisparities + * at minimum. The second constructor enables you to set each parameter to a + * custom value.

+ * + * @see org.opencv.calib3d.StereoSGBM.StereoSGBM + */ + public StereoSGBM() + { + + nativeObj = StereoSGBM_0(); + + return; + } + + + // + // C++: StereoSGBM::StereoSGBM(int minDisparity, int numDisparities, int SADWindowSize, int P1 = 0, int P2 = 0, int disp12MaxDiff = 0, int preFilterCap = 0, int uniquenessRatio = 0, int speckleWindowSize = 0, int speckleRange = 0, bool fullDP = false) + // + +/** + *

Initializes StereoSGBM and sets parameters to custom values.??

+ * + *

The first constructor initializes StereoSGBM with all the + * default parameters. So, you only have to set StereoSGBM.numberOfDisparities + * at minimum. The second constructor enables you to set each parameter to a + * custom value.

+ * + * @param minDisparity Minimum possible disparity value. Normally, it is zero + * but sometimes rectification algorithms can shift images, so this parameter + * needs to be adjusted accordingly. + * @param numDisparities Maximum disparity minus minimum disparity. The value is + * always greater than zero. In the current implementation, this parameter must + * be divisible by 16. + * @param SADWindowSize Matched block size. It must be an odd number + * >=1. Normally, it should be somewhere in the 3..11 + * range. + * @param P1 The first parameter controlling the disparity smoothness. See + * below. + * @param P2 The second parameter controlling the disparity smoothness. The + * larger the values are, the smoother the disparity is. P1 is the + * penalty on the disparity change by plus or minus 1 between neighbor pixels. + * P2 is the penalty on the disparity change by more than 1 between + * neighbor pixels. The algorithm requires P2 > P1. See + * stereo_match.cpp sample where some reasonably good + * P1 and P2 values are shown (like 8*number_of_image_channels*SADWindowSize*SADWindowSize + * and 32*number_of_image_channels*SADWindowSize*SADWindowSize, + * respectively). + * @param disp12MaxDiff Maximum allowed difference (in integer pixel units) in + * the left-right disparity check. Set it to a non-positive value to disable the + * check. + * @param preFilterCap Truncation value for the prefiltered image pixels. The + * algorithm first computes x-derivative at each pixel and clips its value by + * [-preFilterCap, preFilterCap] interval. The result values are + * passed to the Birchfield-Tomasi pixel cost function. + * @param uniquenessRatio Margin in percentage by which the best (minimum) + * computed cost function value should "win" the second best value to consider + * the found match correct. Normally, a value within the 5-15 range is good + * enough. + * @param speckleWindowSize Maximum size of smooth disparity regions to consider + * their noise speckles and invalidate. Set it to 0 to disable speckle + * filtering. Otherwise, set it somewhere in the 50-200 range. + * @param speckleRange Maximum disparity variation within each connected + * component. If you do speckle filtering, set the parameter to a positive + * value, it will be implicitly multiplied by 16. Normally, 1 or 2 is good + * enough. + * @param fullDP Set it to true to run the full-scale two-pass + * dynamic programming algorithm. It will consume O(W*H*numDisparities) bytes, + * which is large for 640x480 stereo and huge for HD-size pictures. By default, + * it is set to false. + * + * @see org.opencv.calib3d.StereoSGBM.StereoSGBM + */ + public StereoSGBM(int minDisparity, int numDisparities, int SADWindowSize, int P1, int P2, int disp12MaxDiff, int preFilterCap, int uniquenessRatio, int speckleWindowSize, int speckleRange, boolean fullDP) + { + + nativeObj = StereoSGBM_1(minDisparity, numDisparities, SADWindowSize, P1, P2, disp12MaxDiff, preFilterCap, uniquenessRatio, speckleWindowSize, speckleRange, fullDP); + + return; + } + +/** + *

Initializes StereoSGBM and sets parameters to custom values.??

+ * + *

The first constructor initializes StereoSGBM with all the + * default parameters. So, you only have to set StereoSGBM.numberOfDisparities + * at minimum. The second constructor enables you to set each parameter to a + * custom value.

+ * + * @param minDisparity Minimum possible disparity value. Normally, it is zero + * but sometimes rectification algorithms can shift images, so this parameter + * needs to be adjusted accordingly. + * @param numDisparities Maximum disparity minus minimum disparity. The value is + * always greater than zero. In the current implementation, this parameter must + * be divisible by 16. + * @param SADWindowSize Matched block size. It must be an odd number + * >=1. Normally, it should be somewhere in the 3..11 + * range. + * + * @see org.opencv.calib3d.StereoSGBM.StereoSGBM + */ + public StereoSGBM(int minDisparity, int numDisparities, int SADWindowSize) + { + + nativeObj = StereoSGBM_2(minDisparity, numDisparities, SADWindowSize); + + return; + } + + + // + // C++: void StereoSGBM::operator ()(Mat left, Mat right, Mat& disp) + // + + public void compute(Mat left, Mat right, Mat disp) + { + + compute_0(nativeObj, left.nativeObj, right.nativeObj, disp.nativeObj); + + return; + } + + + // + // C++: int StereoSGBM::minDisparity + // + + public int get_minDisparity() + { + + int retVal = get_minDisparity_0(nativeObj); + + return retVal; + } + + + // + // C++: void StereoSGBM::minDisparity + // + + public void set_minDisparity(int minDisparity) + { + + set_minDisparity_0(nativeObj, minDisparity); + + return; + } + + + // + // C++: int StereoSGBM::numberOfDisparities + // + + public int get_numberOfDisparities() + { + + int retVal = get_numberOfDisparities_0(nativeObj); + + return retVal; + } + + + // + // C++: void StereoSGBM::numberOfDisparities + // + + public void set_numberOfDisparities(int numberOfDisparities) + { + + set_numberOfDisparities_0(nativeObj, numberOfDisparities); + + return; + } + + + // + // C++: int StereoSGBM::SADWindowSize + // + + public int get_SADWindowSize() + { + + int retVal = get_SADWindowSize_0(nativeObj); + + return retVal; + } + + + // + // C++: void StereoSGBM::SADWindowSize + // + + public void set_SADWindowSize(int SADWindowSize) + { + + set_SADWindowSize_0(nativeObj, SADWindowSize); + + return; + } + + + // + // C++: int StereoSGBM::preFilterCap + // + + public int get_preFilterCap() + { + + int retVal = get_preFilterCap_0(nativeObj); + + return retVal; + } + + + // + // C++: void StereoSGBM::preFilterCap + // + + public void set_preFilterCap(int preFilterCap) + { + + set_preFilterCap_0(nativeObj, preFilterCap); + + return; + } + + + // + // C++: int StereoSGBM::uniquenessRatio + // + + public int get_uniquenessRatio() + { + + int retVal = get_uniquenessRatio_0(nativeObj); + + return retVal; + } + + + // + // C++: void StereoSGBM::uniquenessRatio + // + + public void set_uniquenessRatio(int uniquenessRatio) + { + + set_uniquenessRatio_0(nativeObj, uniquenessRatio); + + return; + } + + + // + // C++: int StereoSGBM::P1 + // + + public int get_P1() + { + + int retVal = get_P1_0(nativeObj); + + return retVal; + } + + + // + // C++: void StereoSGBM::P1 + // + + public void set_P1(int P1) + { + + set_P1_0(nativeObj, P1); + + return; + } + + + // + // C++: int StereoSGBM::P2 + // + + public int get_P2() + { + + int retVal = get_P2_0(nativeObj); + + return retVal; + } + + + // + // C++: void StereoSGBM::P2 + // + + public void set_P2(int P2) + { + + set_P2_0(nativeObj, P2); + + return; + } + + + // + // C++: int StereoSGBM::speckleWindowSize + // + + public int get_speckleWindowSize() + { + + int retVal = get_speckleWindowSize_0(nativeObj); + + return retVal; + } + + + // + // C++: void StereoSGBM::speckleWindowSize + // + + public void set_speckleWindowSize(int speckleWindowSize) + { + + set_speckleWindowSize_0(nativeObj, speckleWindowSize); + + return; + } + + + // + // C++: int StereoSGBM::speckleRange + // + + public int get_speckleRange() + { + + int retVal = get_speckleRange_0(nativeObj); + + return retVal; + } + + + // + // C++: void StereoSGBM::speckleRange + // + + public void set_speckleRange(int speckleRange) + { + + set_speckleRange_0(nativeObj, speckleRange); + + return; + } + + + // + // C++: int StereoSGBM::disp12MaxDiff + // + + public int get_disp12MaxDiff() + { + + int retVal = get_disp12MaxDiff_0(nativeObj); + + return retVal; + } + + + // + // C++: void StereoSGBM::disp12MaxDiff + // + + public void set_disp12MaxDiff(int disp12MaxDiff) + { + + set_disp12MaxDiff_0(nativeObj, disp12MaxDiff); + + return; + } + + + // + // C++: bool StereoSGBM::fullDP + // + + public boolean get_fullDP() + { + + boolean retVal = get_fullDP_0(nativeObj); + + return retVal; + } + + + // + // C++: void StereoSGBM::fullDP + // + + public void set_fullDP(boolean fullDP) + { + + set_fullDP_0(nativeObj, fullDP); + + return; + } + + + @Override + protected void finalize() throws Throwable { + delete(nativeObj); + } + + + + // C++: StereoSGBM::StereoSGBM() + private static native long StereoSGBM_0(); + + // C++: StereoSGBM::StereoSGBM(int minDisparity, int numDisparities, int SADWindowSize, int P1 = 0, int P2 = 0, int disp12MaxDiff = 0, int preFilterCap = 0, int uniquenessRatio = 0, int speckleWindowSize = 0, int speckleRange = 0, bool fullDP = false) + private static native long StereoSGBM_1(int minDisparity, int numDisparities, int SADWindowSize, int P1, int P2, int disp12MaxDiff, int preFilterCap, int uniquenessRatio, int speckleWindowSize, int speckleRange, boolean fullDP); + private static native long StereoSGBM_2(int minDisparity, int numDisparities, int SADWindowSize); + + // C++: void StereoSGBM::operator ()(Mat left, Mat right, Mat& disp) + private static native void compute_0(long nativeObj, long left_nativeObj, long right_nativeObj, long disp_nativeObj); + + // C++: int StereoSGBM::minDisparity + private static native int get_minDisparity_0(long nativeObj); + + // C++: void StereoSGBM::minDisparity + private static native void set_minDisparity_0(long nativeObj, int minDisparity); + + // C++: int StereoSGBM::numberOfDisparities + private static native int get_numberOfDisparities_0(long nativeObj); + + // C++: void StereoSGBM::numberOfDisparities + private static native void set_numberOfDisparities_0(long nativeObj, int numberOfDisparities); + + // C++: int StereoSGBM::SADWindowSize + private static native int get_SADWindowSize_0(long nativeObj); + + // C++: void StereoSGBM::SADWindowSize + private static native void set_SADWindowSize_0(long nativeObj, int SADWindowSize); + + // C++: int StereoSGBM::preFilterCap + private static native int get_preFilterCap_0(long nativeObj); + + // C++: void StereoSGBM::preFilterCap + private static native void set_preFilterCap_0(long nativeObj, int preFilterCap); + + // C++: int StereoSGBM::uniquenessRatio + private static native int get_uniquenessRatio_0(long nativeObj); + + // C++: void StereoSGBM::uniquenessRatio + private static native void set_uniquenessRatio_0(long nativeObj, int uniquenessRatio); + + // C++: int StereoSGBM::P1 + private static native int get_P1_0(long nativeObj); + + // C++: void StereoSGBM::P1 + private static native void set_P1_0(long nativeObj, int P1); + + // C++: int StereoSGBM::P2 + private static native int get_P2_0(long nativeObj); + + // C++: void StereoSGBM::P2 + private static native void set_P2_0(long nativeObj, int P2); + + // C++: int StereoSGBM::speckleWindowSize + private static native int get_speckleWindowSize_0(long nativeObj); + + // C++: void StereoSGBM::speckleWindowSize + private static native void set_speckleWindowSize_0(long nativeObj, int speckleWindowSize); + + // C++: int StereoSGBM::speckleRange + private static native int get_speckleRange_0(long nativeObj); + + // C++: void StereoSGBM::speckleRange + private static native void set_speckleRange_0(long nativeObj, int speckleRange); + + // C++: int StereoSGBM::disp12MaxDiff + private static native int get_disp12MaxDiff_0(long nativeObj); + + // C++: void StereoSGBM::disp12MaxDiff + private static native void set_disp12MaxDiff_0(long nativeObj, int disp12MaxDiff); + + // C++: bool StereoSGBM::fullDP + private static native boolean get_fullDP_0(long nativeObj); + + // C++: void StereoSGBM::fullDP + private static native void set_fullDP_0(long nativeObj, boolean fullDP); + + // native support for java finalize() + private static native void delete(long nativeObj); + +} diff --git a/src/org/opencv/calib3d/package.bluej b/src/org/opencv/calib3d/package.bluej new file mode 100644 index 0000000..e69de29 diff --git a/src/org/opencv/contrib/Contrib.java b/src/org/opencv/contrib/Contrib.java new file mode 100644 index 0000000..1119797 --- /dev/null +++ b/src/org/opencv/contrib/Contrib.java @@ -0,0 +1,144 @@ + +// +// This file is auto-generated. Please don't modify it! +// +package org.opencv.contrib; + +import java.util.List; +import org.opencv.core.Mat; +import org.opencv.core.MatOfFloat; +import org.opencv.core.MatOfPoint; +import org.opencv.utils.Converters; + +public class Contrib { + + public static final int + ROTATION = 1, + TRANSLATION = 2, + RIGID_BODY_MOTION = 4, + COLORMAP_AUTUMN = 0, + COLORMAP_BONE = 1, + COLORMAP_JET = 2, + COLORMAP_WINTER = 3, + COLORMAP_RAINBOW = 4, + COLORMAP_OCEAN = 5, + COLORMAP_SUMMER = 6, + COLORMAP_SPRING = 7, + COLORMAP_COOL = 8, + COLORMAP_HSV = 9, + COLORMAP_PINK = 10, + COLORMAP_HOT = 11, + RETINA_COLOR_RANDOM = 0, + RETINA_COLOR_DIAGONAL = 1, + RETINA_COLOR_BAYER = 2; + + + // + // C++: void applyColorMap(Mat src, Mat& dst, int colormap) + // + +/** + *

Applies a GNU Octave/MATLAB equivalent colormap on a given image.

+ * + *

Currently the following GNU Octave/MATLAB equivalent colormaps are + * implemented: enum

+ * + *

// C++ code:

+ * + * + *

COLORMAP_AUTUMN = 0,

+ * + *

COLORMAP_BONE = 1,

+ * + *

COLORMAP_JET = 2,

+ * + *

COLORMAP_WINTER = 3,

+ * + *

COLORMAP_RAINBOW = 4,

+ * + *

COLORMAP_OCEAN = 5,

+ * + *

COLORMAP_SUMMER = 6,

+ * + *

COLORMAP_SPRING = 7,

+ * + *

COLORMAP_COOL = 8,

+ * + *

COLORMAP_HSV = 9,

+ * + *

COLORMAP_PINK = 10,

+ * + *

COLORMAP_HOT = 11

+ * + * + * @param src The source image, grayscale or colored does not matter. + * @param dst The result is the colormapped source image. Note: "Mat.create" is + * called on dst. + * @param colormap The colormap to apply, see the list of available colormaps + * below. + * + * @see org.opencv.contrib.Contrib.applyColorMap + */ + public static void applyColorMap(Mat src, Mat dst, int colormap) + { + + applyColorMap_0(src.nativeObj, dst.nativeObj, colormap); + + return; + } + + + // + // C++: int chamerMatching(Mat img, Mat templ, vector_vector_Point& results, vector_float& cost, double templScale = 1, int maxMatches = 20, double minMatchDistance = 1.0, int padX = 3, int padY = 3, int scales = 5, double minScale = 0.6, double maxScale = 1.6, double orientationWeight = 0.5, double truncate = 20) + // + + public static int chamerMatching(Mat img, Mat templ, List results, MatOfFloat cost, double templScale, int maxMatches, double minMatchDistance, int padX, int padY, int scales, double minScale, double maxScale, double orientationWeight, double truncate) + { + Mat results_mat = new Mat(); + Mat cost_mat = cost; + int retVal = chamerMatching_0(img.nativeObj, templ.nativeObj, results_mat.nativeObj, cost_mat.nativeObj, templScale, maxMatches, minMatchDistance, padX, padY, scales, minScale, maxScale, orientationWeight, truncate); + Converters.Mat_to_vector_vector_Point(results_mat, results); + return retVal; + } + + public static int chamerMatching(Mat img, Mat templ, List results, MatOfFloat cost) + { + Mat results_mat = new Mat(); + Mat cost_mat = cost; + int retVal = chamerMatching_1(img.nativeObj, templ.nativeObj, results_mat.nativeObj, cost_mat.nativeObj); + Converters.Mat_to_vector_vector_Point(results_mat, results); + return retVal; + } + + + // + // C++: Ptr_FaceRecognizer createEigenFaceRecognizer(int num_components = 0, double threshold = DBL_MAX) + // + + // Return type 'Ptr_FaceRecognizer' is not supported, skipping the function + + + // + // C++: Ptr_FaceRecognizer createFisherFaceRecognizer(int num_components = 0, double threshold = DBL_MAX) + // + + // Return type 'Ptr_FaceRecognizer' is not supported, skipping the function + + + // + // C++: Ptr_FaceRecognizer createLBPHFaceRecognizer(int radius = 1, int neighbors = 8, int grid_x = 8, int grid_y = 8, double threshold = DBL_MAX) + // + + // Return type 'Ptr_FaceRecognizer' is not supported, skipping the function + + + + + // C++: void applyColorMap(Mat src, Mat& dst, int colormap) + private static native void applyColorMap_0(long src_nativeObj, long dst_nativeObj, int colormap); + + // C++: int chamerMatching(Mat img, Mat templ, vector_vector_Point& results, vector_float& cost, double templScale = 1, int maxMatches = 20, double minMatchDistance = 1.0, int padX = 3, int padY = 3, int scales = 5, double minScale = 0.6, double maxScale = 1.6, double orientationWeight = 0.5, double truncate = 20) + private static native int chamerMatching_0(long img_nativeObj, long templ_nativeObj, long results_mat_nativeObj, long cost_mat_nativeObj, double templScale, int maxMatches, double minMatchDistance, int padX, int padY, int scales, double minScale, double maxScale, double orientationWeight, double truncate); + private static native int chamerMatching_1(long img_nativeObj, long templ_nativeObj, long results_mat_nativeObj, long cost_mat_nativeObj); + +} diff --git a/src/org/opencv/contrib/FaceRecognizer.java b/src/org/opencv/contrib/FaceRecognizer.java new file mode 100644 index 0000000..7cdf086 --- /dev/null +++ b/src/org/opencv/contrib/FaceRecognizer.java @@ -0,0 +1,406 @@ + +// +// This file is auto-generated. Please don't modify it! +// +package org.opencv.contrib; + +import java.lang.String; +import java.util.List; +import org.opencv.core.Algorithm; +import org.opencv.core.Mat; +import org.opencv.utils.Converters; + +// C++: class FaceRecognizer +/** + *

All face recognition models in OpenCV are derived from the abstract base + * class "FaceRecognizer", which provides a unified access to all face + * recongition algorithms in OpenCV.

+ * + *

class FaceRecognizer : public Algorithm

+ * + *

// C++ code:

+ * + * + *

public:

+ * + *

//! virtual destructor

+ * + *

virtual ~FaceRecognizer() {}

+ * + *

// Trains a FaceRecognizer.

+ * + *

virtual void train(InputArray src, InputArray labels) = 0;

+ * + *

// Updates a FaceRecognizer.

+ * + *

virtual void update(InputArrayOfArrays src, InputArray labels);

+ * + *

// Gets a prediction from a FaceRecognizer.

+ * + *

virtual int predict(InputArray src) const = 0;

+ * + *

// Predicts the label and confidence for a given sample.

+ * + *

virtual void predict(InputArray src, int &label, double &confidence) const = + * 0;

+ * + *

// Serializes this object to a given filename.

+ * + *

virtual void save(const string& filename) const;

+ * + *

// Deserializes this object from a given filename.

+ * + *

virtual void load(const string& filename);

+ * + *

// Serializes this object to a given cv.FileStorage.

+ * + *

virtual void save(FileStorage& fs) const = 0;

+ * + *

// Deserializes this object from a given cv.FileStorage.

+ * + *

virtual void load(const FileStorage& fs) = 0;

+ * + *

};

+ * + * @see org.opencv.contrib.FaceRecognizer : public Algorithm + */ +public class FaceRecognizer extends Algorithm { + + protected FaceRecognizer(long addr) { super(addr); } + + + // + // C++: void FaceRecognizer::load(string filename) + // + +/** + *

Loads a "FaceRecognizer" and its model state.

+ * + *

Loads a persisted model and state from a given XML or YAML file. Every + * "FaceRecognizer" has to overwrite FaceRecognizer.load(FileStorage& + * fs) to enable loading the model state. FaceRecognizer.load(FileStorage& + * fs) in turn gets called by FaceRecognizer.load(const string& + * filename), to ease saving a model.

+ * + * @param filename a filename + * + * @see org.opencv.contrib.FaceRecognizer.load + */ + public void load(String filename) + { + + load_0(nativeObj, filename); + + return; + } + + + // + // C++: void FaceRecognizer::predict(Mat src, int& label, double& confidence) + // + +/** + *

Predicts a label and associated confidence (e.g. distance) for a given input + * image.

+ * + *

The suffix const means that prediction does not affect the + * internal model state, so the method can be safely called from within + * different threads.

+ * + *

The following example shows how to get a prediction from a trained model: + * using namespace cv;

+ * + *

// C++ code:

+ * + *

// Do your initialization here (create the cv.FaceRecognizer model)...

+ * + *

//...

+ * + *

// Read in a sample image:

+ * + *

Mat img = imread("person1/3.jpg", CV_LOAD_IMAGE_GRAYSCALE);

+ * + *

// And get a prediction from the cv.FaceRecognizer:

+ * + *

int predicted = model->predict(img);

+ * + *

Or to get a prediction and the associated confidence (e.g. distance):

+ * + *

using namespace cv;

+ * + *

// C++ code:

+ * + *

// Do your initialization here (create the cv.FaceRecognizer model)...

+ * + *

//...

+ * + *

Mat img = imread("person1/3.jpg", CV_LOAD_IMAGE_GRAYSCALE);

+ * + *

// Some variables for the predicted label and associated confidence (e.g. + * distance):

+ * + *

int predicted_label = -1;

+ * + *

double predicted_confidence = 0.0;

+ * + *

// Get the prediction and associated confidence from the model

+ * + *

model->predict(img, predicted_label, predicted_confidence);

+ * + * @param src Sample image to get a prediction from. + * @param label The predicted label for the given image. + * @param confidence Associated confidence (e.g. distance) for the predicted + * label. + * + * @see org.opencv.contrib.FaceRecognizer.predict + */ + public void predict(Mat src, int[] label, double[] confidence) + { + double[] label_out = new double[1]; + double[] confidence_out = new double[1]; + predict_0(nativeObj, src.nativeObj, label_out, confidence_out); + if(label!=null) label[0] = (int)label_out[0]; + if(confidence!=null) confidence[0] = (double)confidence_out[0]; + return; + } + + + // + // C++: void FaceRecognizer::save(string filename) + // + +/** + *

Saves a "FaceRecognizer" and its model state.

+ * + *

Saves this model to a given filename, either as XML or YAML.

+ * + *

Saves this model to a given "FileStorage".

+ * + *

Every "FaceRecognizer" overwrites FaceRecognizer.save(FileStorage& + * fs) to save the internal model state. FaceRecognizer.save(const + * string& filename) saves the state of a model to the given filename.

+ * + *

The suffix const means that prediction does not affect the + * internal model state, so the method can be safely called from within + * different threads.

+ * + * @param filename The filename to store this "FaceRecognizer" to (either + * XML/YAML). + * + * @see org.opencv.contrib.FaceRecognizer.save + */ + public void save(String filename) + { + + save_0(nativeObj, filename); + + return; + } + + + // + // C++: void FaceRecognizer::train(vector_Mat src, Mat labels) + // + +/** + *

Trains a FaceRecognizer with given data and associated labels.

+ * + *

The following source code snippet shows you how to learn a Fisherfaces model + * on a given set of images. The images are read with "imread" and pushed into a + * std.vector. The labels of each image are stored within a + * std.vector (you could also use a "Mat" of type + * "CV_32SC1"). Think of the label as the subject (the person) this image + * belongs to, so same subjects (persons) should have the same label. For the + * available "FaceRecognizer" you don't have to pay any attention to the order + * of the labels, just make sure same persons have the same label: // holds + * images and labels

+ * + *

// C++ code:

+ * + *

vector images;

+ * + *

vector labels;

+ * + *

// images for first person

+ * + *

images.push_back(imread("person0/0.jpg", CV_LOAD_IMAGE_GRAYSCALE)); + * labels.push_back(0);

+ * + *

images.push_back(imread("person0/1.jpg", CV_LOAD_IMAGE_GRAYSCALE)); + * labels.push_back(0);

+ * + *

images.push_back(imread("person0/2.jpg", CV_LOAD_IMAGE_GRAYSCALE)); + * labels.push_back(0);

+ * + *

// images for second person

+ * + *

images.push_back(imread("person1/0.jpg", CV_LOAD_IMAGE_GRAYSCALE)); + * labels.push_back(1);

+ * + *

images.push_back(imread("person1/1.jpg", CV_LOAD_IMAGE_GRAYSCALE)); + * labels.push_back(1);

+ * + *

images.push_back(imread("person1/2.jpg", CV_LOAD_IMAGE_GRAYSCALE)); + * labels.push_back(1);

+ * + *

Now that you have read some images, we can create a new "FaceRecognizer". In + * this example I'll create a Fisherfaces model and decide to keep all of the + * possible Fisherfaces:

+ * + *

// Create a new Fisherfaces model and retain all available Fisherfaces, + *

+ * + *

// C++ code:

+ * + *

// this is the most common usage of this specific FaceRecognizer:

+ * + *

//

+ * + *

Ptr model = createFisherFaceRecognizer();

+ * + *

And finally train it on the given dataset (the face images and labels): + *

+ * + *

// This is the common interface to train all of the available + * cv.FaceRecognizer

+ * + *

// C++ code:

+ * + *

// implementations:

+ * + *

//

+ * + *

model->train(images, labels);

+ * + * @param src The training images, that means the faces you want to learn. The + * data has to be given as a vector. + * @param labels The labels corresponding to the images have to be given either + * as a vector or a + * + * @see org.opencv.contrib.FaceRecognizer.train + */ + public void train(List src, Mat labels) + { + Mat src_mat = Converters.vector_Mat_to_Mat(src); + train_0(nativeObj, src_mat.nativeObj, labels.nativeObj); + + return; + } + + + // + // C++: void FaceRecognizer::update(vector_Mat src, Mat labels) + // + +/** + *

Updates a FaceRecognizer with given data and associated labels.

+ * + *

This method updates a (probably trained) "FaceRecognizer", but only if the + * algorithm supports it. The Local Binary Patterns Histograms (LBPH) recognizer + * (see "createLBPHFaceRecognizer") can be updated. For the Eigenfaces and + * Fisherfaces method, this is algorithmically not possible and you have to + * re-estimate the model with "FaceRecognizer.train". In any case, a call to + * train empties the existing model and learns a new model, while update does + * not delete any model data. + * // Create a new LBPH model (it can be updated) and use the default + * parameters,

+ * + *

// C++ code:

+ * + *

// this is the most common usage of this specific FaceRecognizer:

+ * + *

//

+ * + *

Ptr model = createLBPHFaceRecognizer();

+ * + *

// This is the common interface to train all of the available + * cv.FaceRecognizer

+ * + *

// implementations:

+ * + *

//

+ * + *

model->train(images, labels);

+ * + *

// Some containers to hold new image:

+ * + *

vector newImages;

+ * + *

vector newLabels;

+ * + *

// You should add some images to the containers:

+ * + *

//

+ * + *

//...

+ * + *

//

+ * + *

// Now updating the model is as easy as calling:

+ * + *

model->update(newImages,newLabels);

+ * + *

// This will preserve the old model data and extend the existing model

+ * + *

// with the new features extracted from newImages!

+ * + *

Calling update on an Eigenfaces model (see "createEigenFaceRecognizer"), + * which doesn't support updating, will throw an error similar to:

+ * + *

OpenCV Error: The function/feature is not implemented (This FaceRecognizer + * (FaceRecognizer.Eigenfaces) does not support updating, you have to use + * FaceRecognizer.train to update it.) in update, file /home/philipp/git/opencv/modules/contrib/src/facerec.cpp, + * line 305

+ * + *

// C++ code:

+ * + *

terminate called after throwing an instance of 'cv.Exception'

+ * + *

Please note: The "FaceRecognizer" does not store your training images, + * because this would be very memory intense and it's not the responsibility of + * te "FaceRecognizer" to do so. The caller is responsible for maintaining the + * dataset, he want to work with. + *

+ * + * @param src The training images, that means the faces you want to learn. The + * data has to be given as a vector. + * @param labels The labels corresponding to the images have to be given either + * as a vector or a + * + * @see org.opencv.contrib.FaceRecognizer.update + */ + public void update(List src, Mat labels) + { + Mat src_mat = Converters.vector_Mat_to_Mat(src); + update_0(nativeObj, src_mat.nativeObj, labels.nativeObj); + + return; + } + + + @Override + protected void finalize() throws Throwable { + delete(nativeObj); + } + + + + // C++: void FaceRecognizer::load(string filename) + private static native void load_0(long nativeObj, String filename); + + // C++: void FaceRecognizer::predict(Mat src, int& label, double& confidence) + private static native void predict_0(long nativeObj, long src_nativeObj, double[] label_out, double[] confidence_out); + + // C++: void FaceRecognizer::save(string filename) + private static native void save_0(long nativeObj, String filename); + + // C++: void FaceRecognizer::train(vector_Mat src, Mat labels) + private static native void train_0(long nativeObj, long src_mat_nativeObj, long labels_nativeObj); + + // C++: void FaceRecognizer::update(vector_Mat src, Mat labels) + private static native void update_0(long nativeObj, long src_mat_nativeObj, long labels_nativeObj); + + // native support for java finalize() + private static native void delete(long nativeObj); + +} diff --git a/src/org/opencv/contrib/StereoVar.java b/src/org/opencv/contrib/StereoVar.java new file mode 100644 index 0000000..642a8a6 --- /dev/null +++ b/src/org/opencv/contrib/StereoVar.java @@ -0,0 +1,601 @@ + +// +// This file is auto-generated. Please don't modify it! +// +package org.opencv.contrib; + +import org.opencv.core.Mat; + +// C++: class StereoVar +/** + *

Class for computing stereo correspondence using the variational matching + * algorithm

+ * + *

class StereoVar

+ * + *

// C++ code:

+ * + * + *

StereoVar();

+ * + *

StereoVar(int levels, double pyrScale,

+ * + *

int nIt, int minDisp, int maxDisp,

+ * + *

int poly_n, double poly_sigma, float fi,

+ * + *

float lambda, int penalization, int cycle,

+ * + *

int flags);

+ * + *

virtual ~StereoVar();

+ * + *

virtual void operator()(InputArray left, InputArray right, OutputArray disp);

+ * + *

int levels;

+ * + *

double pyrScale;

+ * + *

int nIt;

+ * + *

int minDisp;

+ * + *

int maxDisp;

+ * + *

int poly_n;

+ * + *

double poly_sigma;

+ * + *

float fi;

+ * + *

float lambda;

+ * + *

int penalization;

+ * + *

int cycle;

+ * + *

int flags;...

+ * + *

};

+ * + *

The class implements the modified S. G. Kosov algorithm [Publication] that + * differs from the original one as follows:

+ *
    + *
  • The automatic initialization of method's parameters is added. + *
  • The method of Smart Iteration Distribution (SID) is implemented. + *
  • The support of Multi-Level Adaptation Technique (MLAT) is not + * included. + *
  • The method of dynamic adaptation of method's parameters is not + * included. + *
+ * + * @see org.opencv.contrib.StereoVar + */ +public class StereoVar { + + protected final long nativeObj; + protected StereoVar(long addr) { nativeObj = addr; } + + + public static final int + USE_INITIAL_DISPARITY = 1, + USE_EQUALIZE_HIST = 2, + USE_SMART_ID = 4, + USE_AUTO_PARAMS = 8, + USE_MEDIAN_FILTERING = 16, + CYCLE_O = 0, + CYCLE_V = 1, + PENALIZATION_TICHONOV = 0, + PENALIZATION_CHARBONNIER = 1, + PENALIZATION_PERONA_MALIK = 2; + + + // + // C++: StereoVar::StereoVar() + // + +/** + *

The constructor

+ * + *

The first constructor initializes StereoVar with all the default + * parameters. So, you only have to set StereoVar.maxDisp and / or + * StereoVar.minDisp at minimum. The second constructor enables + * you to set each parameter to a custom value.

+ * + * @see org.opencv.contrib.StereoVar.StereoVar + */ + public StereoVar() + { + + nativeObj = StereoVar_0(); + + return; + } + + + // + // C++: StereoVar::StereoVar(int levels, double pyrScale, int nIt, int minDisp, int maxDisp, int poly_n, double poly_sigma, float fi, float lambda, int penalization, int cycle, int flags) + // + +/** + *

The constructor

+ * + *

The first constructor initializes StereoVar with all the default + * parameters. So, you only have to set StereoVar.maxDisp and / or + * StereoVar.minDisp at minimum. The second constructor enables + * you to set each parameter to a custom value.

+ * + * @param levels The number of pyramid layers, including the initial image. + * levels=1 means that no extra layers are created and only the original images + * are used. This parameter is ignored if flag USE_AUTO_PARAMS is set. + * @param pyrScale Specifies the image scale (<1) to build the pyramids for each + * image. pyrScale=0.5 means the classical pyramid, where each next layer is + * twice smaller than the previous. (This parameter is ignored if flag + * USE_AUTO_PARAMS is set). + * @param nIt The number of iterations the algorithm does at each pyramid level. + * (If the flag USE_SMART_ID is set, the number of iterations will be + * redistributed in such a way, that more iterations will be done on more + * coarser levels.) + * @param minDisp Minimum possible disparity value. Could be negative in case + * the left and right input images change places. + * @param maxDisp Maximum possible disparity value. + * @param poly_n Size of the pixel neighbourhood used to find polynomial + * expansion in each pixel. The larger values mean that the image will be + * approximated with smoother surfaces, yielding more robust algorithm and more + * blurred motion field. Typically, poly_n = 3, 5 or 7 + * @param poly_sigma Standard deviation of the Gaussian that is used to smooth + * derivatives that are used as a basis for the polynomial expansion. For + * poly_n=5 you can set poly_sigma=1.1, for poly_n=7 a good value would be + * poly_sigma=1.5 + * @param fi The smoothness parameter, ot the weight coefficient for the + * smoothness term. + * @param lambda The threshold parameter for edge-preserving smoothness. (This + * parameter is ignored if PENALIZATION_CHARBONNIER or PENALIZATION_PERONA_MALIK + * is used.) + * @param penalization Possible values: PENALIZATION_TICHONOV - linear + * smoothness; PENALIZATION_CHARBONNIER - non-linear edge preserving smoothness; + * PENALIZATION_PERONA_MALIK - non-linear edge-enhancing smoothness. (This + * parameter is ignored if flag USE_AUTO_PARAMS is set). + * @param cycle Type of the multigrid cycle. Possible values: CYCLE_O and + * CYCLE_V for null- and v-cycles respectively. (This parameter is ignored if + * flag USE_AUTO_PARAMS is set). + * @param flags The operation flags; can be a combination of the following: + *
    + *
  • USE_INITIAL_DISPARITY: Use the input flow as the initial flow + * approximation. + *
  • USE_EQUALIZE_HIST: Use the histogram equalization in the + * pre-processing phase. + *
  • USE_SMART_ID: Use the smart iteration distribution (SID). + *
  • USE_AUTO_PARAMS: Allow the method to initialize the main parameters. + *
  • USE_MEDIAN_FILTERING: Use the median filer of the solution in the post + * processing phase. + *
+ * + * @see org.opencv.contrib.StereoVar.StereoVar + */ + public StereoVar(int levels, double pyrScale, int nIt, int minDisp, int maxDisp, int poly_n, double poly_sigma, float fi, float lambda, int penalization, int cycle, int flags) + { + + nativeObj = StereoVar_1(levels, pyrScale, nIt, minDisp, maxDisp, poly_n, poly_sigma, fi, lambda, penalization, cycle, flags); + + return; + } + + + // + // C++: void StereoVar::operator ()(Mat left, Mat right, Mat& disp) + // + + public void compute(Mat left, Mat right, Mat disp) + { + + compute_0(nativeObj, left.nativeObj, right.nativeObj, disp.nativeObj); + + return; + } + + + // + // C++: int StereoVar::levels + // + + public int get_levels() + { + + int retVal = get_levels_0(nativeObj); + + return retVal; + } + + + // + // C++: void StereoVar::levels + // + + public void set_levels(int levels) + { + + set_levels_0(nativeObj, levels); + + return; + } + + + // + // C++: double StereoVar::pyrScale + // + + public double get_pyrScale() + { + + double retVal = get_pyrScale_0(nativeObj); + + return retVal; + } + + + // + // C++: void StereoVar::pyrScale + // + + public void set_pyrScale(double pyrScale) + { + + set_pyrScale_0(nativeObj, pyrScale); + + return; + } + + + // + // C++: int StereoVar::nIt + // + + public int get_nIt() + { + + int retVal = get_nIt_0(nativeObj); + + return retVal; + } + + + // + // C++: void StereoVar::nIt + // + + public void set_nIt(int nIt) + { + + set_nIt_0(nativeObj, nIt); + + return; + } + + + // + // C++: int StereoVar::minDisp + // + + public int get_minDisp() + { + + int retVal = get_minDisp_0(nativeObj); + + return retVal; + } + + + // + // C++: void StereoVar::minDisp + // + + public void set_minDisp(int minDisp) + { + + set_minDisp_0(nativeObj, minDisp); + + return; + } + + + // + // C++: int StereoVar::maxDisp + // + + public int get_maxDisp() + { + + int retVal = get_maxDisp_0(nativeObj); + + return retVal; + } + + + // + // C++: void StereoVar::maxDisp + // + + public void set_maxDisp(int maxDisp) + { + + set_maxDisp_0(nativeObj, maxDisp); + + return; + } + + + // + // C++: int StereoVar::poly_n + // + + public int get_poly_n() + { + + int retVal = get_poly_n_0(nativeObj); + + return retVal; + } + + + // + // C++: void StereoVar::poly_n + // + + public void set_poly_n(int poly_n) + { + + set_poly_n_0(nativeObj, poly_n); + + return; + } + + + // + // C++: double StereoVar::poly_sigma + // + + public double get_poly_sigma() + { + + double retVal = get_poly_sigma_0(nativeObj); + + return retVal; + } + + + // + // C++: void StereoVar::poly_sigma + // + + public void set_poly_sigma(double poly_sigma) + { + + set_poly_sigma_0(nativeObj, poly_sigma); + + return; + } + + + // + // C++: float StereoVar::fi + // + + public float get_fi() + { + + float retVal = get_fi_0(nativeObj); + + return retVal; + } + + + // + // C++: void StereoVar::fi + // + + public void set_fi(float fi) + { + + set_fi_0(nativeObj, fi); + + return; + } + + + // + // C++: float StereoVar::lambda + // + + public float get_lambda() + { + + float retVal = get_lambda_0(nativeObj); + + return retVal; + } + + + // + // C++: void StereoVar::lambda + // + + public void set_lambda(float lambda) + { + + set_lambda_0(nativeObj, lambda); + + return; + } + + + // + // C++: int StereoVar::penalization + // + + public int get_penalization() + { + + int retVal = get_penalization_0(nativeObj); + + return retVal; + } + + + // + // C++: void StereoVar::penalization + // + + public void set_penalization(int penalization) + { + + set_penalization_0(nativeObj, penalization); + + return; + } + + + // + // C++: int StereoVar::cycle + // + + public int get_cycle() + { + + int retVal = get_cycle_0(nativeObj); + + return retVal; + } + + + // + // C++: void StereoVar::cycle + // + + public void set_cycle(int cycle) + { + + set_cycle_0(nativeObj, cycle); + + return; + } + + + // + // C++: int StereoVar::flags + // + + public int get_flags() + { + + int retVal = get_flags_0(nativeObj); + + return retVal; + } + + + // + // C++: void StereoVar::flags + // + + public void set_flags(int flags) + { + + set_flags_0(nativeObj, flags); + + return; + } + + + @Override + protected void finalize() throws Throwable { + delete(nativeObj); + } + + + + // C++: StereoVar::StereoVar() + private static native long StereoVar_0(); + + // C++: StereoVar::StereoVar(int levels, double pyrScale, int nIt, int minDisp, int maxDisp, int poly_n, double poly_sigma, float fi, float lambda, int penalization, int cycle, int flags) + private static native long StereoVar_1(int levels, double pyrScale, int nIt, int minDisp, int maxDisp, int poly_n, double poly_sigma, float fi, float lambda, int penalization, int cycle, int flags); + + // C++: void StereoVar::operator ()(Mat left, Mat right, Mat& disp) + private static native void compute_0(long nativeObj, long left_nativeObj, long right_nativeObj, long disp_nativeObj); + + // C++: int StereoVar::levels + private static native int get_levels_0(long nativeObj); + + // C++: void StereoVar::levels + private static native void set_levels_0(long nativeObj, int levels); + + // C++: double StereoVar::pyrScale + private static native double get_pyrScale_0(long nativeObj); + + // C++: void StereoVar::pyrScale + private static native void set_pyrScale_0(long nativeObj, double pyrScale); + + // C++: int StereoVar::nIt + private static native int get_nIt_0(long nativeObj); + + // C++: void StereoVar::nIt + private static native void set_nIt_0(long nativeObj, int nIt); + + // C++: int StereoVar::minDisp + private static native int get_minDisp_0(long nativeObj); + + // C++: void StereoVar::minDisp + private static native void set_minDisp_0(long nativeObj, int minDisp); + + // C++: int StereoVar::maxDisp + private static native int get_maxDisp_0(long nativeObj); + + // C++: void StereoVar::maxDisp + private static native void set_maxDisp_0(long nativeObj, int maxDisp); + + // C++: int StereoVar::poly_n + private static native int get_poly_n_0(long nativeObj); + + // C++: void StereoVar::poly_n + private static native void set_poly_n_0(long nativeObj, int poly_n); + + // C++: double StereoVar::poly_sigma + private static native double get_poly_sigma_0(long nativeObj); + + // C++: void StereoVar::poly_sigma + private static native void set_poly_sigma_0(long nativeObj, double poly_sigma); + + // C++: float StereoVar::fi + private static native float get_fi_0(long nativeObj); + + // C++: void StereoVar::fi + private static native void set_fi_0(long nativeObj, float fi); + + // C++: float StereoVar::lambda + private static native float get_lambda_0(long nativeObj); + + // C++: void StereoVar::lambda + private static native void set_lambda_0(long nativeObj, float lambda); + + // C++: int StereoVar::penalization + private static native int get_penalization_0(long nativeObj); + + // C++: void StereoVar::penalization + private static native void set_penalization_0(long nativeObj, int penalization); + + // C++: int StereoVar::cycle + private static native int get_cycle_0(long nativeObj); + + // C++: void StereoVar::cycle + private static native void set_cycle_0(long nativeObj, int cycle); + + // C++: int StereoVar::flags + private static native int get_flags_0(long nativeObj); + + // C++: void StereoVar::flags + private static native void set_flags_0(long nativeObj, int flags); + + // native support for java finalize() + private static native void delete(long nativeObj); + +} diff --git a/src/org/opencv/contrib/package.bluej b/src/org/opencv/contrib/package.bluej new file mode 100644 index 0000000..e69de29 diff --git a/src/org/opencv/core/Algorithm.java b/src/org/opencv/core/Algorithm.java new file mode 100644 index 0000000..bd855b9 --- /dev/null +++ b/src/org/opencv/core/Algorithm.java @@ -0,0 +1,361 @@ + +// +// This file is auto-generated. Please don't modify it! +// +package org.opencv.core; + +import java.lang.String; +import java.util.ArrayList; +import java.util.List; +import org.opencv.utils.Converters; + +// C++: class Algorithm +/** + *

This is a base class for all more or less complex algorithms in OpenCV, + * especially for classes of algorithms, for which there can be multiple + * implementations. The examples are stereo correspondence (for which there are + * algorithms like block matching, semi-global block matching, graph-cut etc.), + * background subtraction (which can be done using mixture-of-gaussians models, + * codebook-based algorithm etc.), optical flow (block matching, Lucas-Kanade, + * Horn-Schunck etc.).

+ * + *

The class provides the following features for all derived classes:

+ *
    + *
  • so called "virtual constructor". That is, each Algorithm derivative is + * registered at program start and you can get the list of registered algorithms + * and create instance of a particular algorithm by its name (see + * Algorithm.create). If you plan to add your own algorithms, it + * is good practice to add a unique prefix to your algorithms to distinguish + * them from other algorithms. + *
  • setting/retrieving algorithm parameters by name. If you used video + * capturing functionality from OpenCV highgui module, you are probably familar + * with cvSetCaptureProperty(), cvGetCaptureProperty(), + * VideoCapture.set() and VideoCapture.get(). + * Algorithm provides similar method where instead of integer id's + * you specify the parameter names as text strings. See Algorithm.set + * and Algorithm.get for details. + *
  • reading and writing parameters from/to XML or YAML files. Every + * Algorithm derivative can store all its parameters and then read them back. + * There is no need to re-implement it each time. + *
+ * + *

Here is example of SIFT use in your application via Algorithm interface:

+ * + *

#include "opencv2/opencv.hpp"

+ * + *

// C++ code:

+ * + *

#include "opencv2/nonfree/nonfree.hpp"...

+ * + *

initModule_nonfree(); // to load SURF/SIFT etc.

+ * + *

Ptr sift = Algorithm.create("Feature2D.SIFT");

+ * + *

FileStorage fs("sift_params.xml", FileStorage.READ);

+ * + *

if(fs.isOpened()) // if we have file with parameters, read them

+ * + * + *

sift->read(fs["sift_params"]);

+ * + *

fs.release();

+ * + * + *

else // else modify the parameters and store them; user can later edit the + * file to use different parameters

+ * + * + *

sift->set("contrastThreshold", 0.01f); // lower the contrast threshold, + * compared to the default value

+ * + * + *

WriteStructContext ws(fs, "sift_params", CV_NODE_MAP);

+ * + *

sift->write(fs);

+ * + * + * + *

Mat image = imread("myimage.png", 0), descriptors;

+ * + *

vector keypoints;

+ * + *

(*sift)(image, noArray(), keypoints, descriptors);

+ * + * @see org.opencv.core.Algorithm + */ +public class Algorithm { + + protected final long nativeObj; + protected Algorithm(long addr) { nativeObj = addr; } + + + // + // C++: static Ptr_Algorithm Algorithm::_create(string name) + // + + // Return type 'Ptr_Algorithm' is not supported, skipping the function + + + // + // C++: Ptr_Algorithm Algorithm::getAlgorithm(string name) + // + + // Return type 'Ptr_Algorithm' is not supported, skipping the function + + + // + // C++: bool Algorithm::getBool(string name) + // + + public boolean getBool(String name) + { + + boolean retVal = getBool_0(nativeObj, name); + + return retVal; + } + + + // + // C++: double Algorithm::getDouble(string name) + // + + public double getDouble(String name) + { + + double retVal = getDouble_0(nativeObj, name); + + return retVal; + } + + + // + // C++: int Algorithm::getInt(string name) + // + + public int getInt(String name) + { + + int retVal = getInt_0(nativeObj, name); + + return retVal; + } + + + // + // C++: static void Algorithm::getList(vector_string& algorithms) + // + + // Unknown type 'vector_string' (O), skipping the function + + + // + // C++: Mat Algorithm::getMat(string name) + // + + public Mat getMat(String name) + { + + Mat retVal = new Mat(getMat_0(nativeObj, name)); + + return retVal; + } + + + // + // C++: vector_Mat Algorithm::getMatVector(string name) + // + + public List getMatVector(String name) + { + List retVal = new ArrayList(); + Mat retValMat = new Mat(getMatVector_0(nativeObj, name)); + Converters.Mat_to_vector_Mat(retValMat, retVal); + return retVal; + } + + + // + // C++: void Algorithm::getParams(vector_string& names) + // + + // Unknown type 'vector_string' (O), skipping the function + + + // + // C++: string Algorithm::getString(string name) + // + + public String getString(String name) + { + + String retVal = getString_0(nativeObj, name); + + return retVal; + } + + + // + // C++: string Algorithm::paramHelp(string name) + // + + public String paramHelp(String name) + { + + String retVal = paramHelp_0(nativeObj, name); + + return retVal; + } + + + // + // C++: int Algorithm::paramType(string name) + // + + public int paramType(String name) + { + + int retVal = paramType_0(nativeObj, name); + + return retVal; + } + + + // + // C++: void Algorithm::setAlgorithm(string name, Ptr_Algorithm value) + // + + // Unknown type 'Ptr_Algorithm' (I), skipping the function + + + // + // C++: void Algorithm::setBool(string name, bool value) + // + + public void setBool(String name, boolean value) + { + + setBool_0(nativeObj, name, value); + + return; + } + + + // + // C++: void Algorithm::setDouble(string name, double value) + // + + public void setDouble(String name, double value) + { + + setDouble_0(nativeObj, name, value); + + return; + } + + + // + // C++: void Algorithm::setInt(string name, int value) + // + + public void setInt(String name, int value) + { + + setInt_0(nativeObj, name, value); + + return; + } + + + // + // C++: void Algorithm::setMat(string name, Mat value) + // + + public void setMat(String name, Mat value) + { + + setMat_0(nativeObj, name, value.nativeObj); + + return; + } + + + // + // C++: void Algorithm::setMatVector(string name, vector_Mat value) + // + + public void setMatVector(String name, List value) + { + Mat value_mat = Converters.vector_Mat_to_Mat(value); + setMatVector_0(nativeObj, name, value_mat.nativeObj); + + return; + } + + + // + // C++: void Algorithm::setString(string name, string value) + // + + public void setString(String name, String value) + { + + setString_0(nativeObj, name, value); + + return; + } + + + @Override + protected void finalize() throws Throwable { + delete(nativeObj); + } + + + + // C++: bool Algorithm::getBool(string name) + private static native boolean getBool_0(long nativeObj, String name); + + // C++: double Algorithm::getDouble(string name) + private static native double getDouble_0(long nativeObj, String name); + + // C++: int Algorithm::getInt(string name) + private static native int getInt_0(long nativeObj, String name); + + // C++: Mat Algorithm::getMat(string name) + private static native long getMat_0(long nativeObj, String name); + + // C++: vector_Mat Algorithm::getMatVector(string name) + private static native long getMatVector_0(long nativeObj, String name); + + // C++: string Algorithm::getString(string name) + private static native String getString_0(long nativeObj, String name); + + // C++: string Algorithm::paramHelp(string name) + private static native String paramHelp_0(long nativeObj, String name); + + // C++: int Algorithm::paramType(string name) + private static native int paramType_0(long nativeObj, String name); + + // C++: void Algorithm::setBool(string name, bool value) + private static native void setBool_0(long nativeObj, String name, boolean value); + + // C++: void Algorithm::setDouble(string name, double value) + private static native void setDouble_0(long nativeObj, String name, double value); + + // C++: void Algorithm::setInt(string name, int value) + private static native void setInt_0(long nativeObj, String name, int value); + + // C++: void Algorithm::setMat(string name, Mat value) + private static native void setMat_0(long nativeObj, String name, long value_nativeObj); + + // C++: void Algorithm::setMatVector(string name, vector_Mat value) + private static native void setMatVector_0(long nativeObj, String name, long value_mat_nativeObj); + + // C++: void Algorithm::setString(string name, string value) + private static native void setString_0(long nativeObj, String name, String value); + + // native support for java finalize() + private static native void delete(long nativeObj); + +} diff --git a/src/org/opencv/core/Core.java b/src/org/opencv/core/Core.java new file mode 100644 index 0000000..193797b --- /dev/null +++ b/src/org/opencv/core/Core.java @@ -0,0 +1,8198 @@ + +// +// This file is auto-generated. Please don't modify it! +// +package org.opencv.core; + +import java.lang.String; +import java.util.ArrayList; +import java.util.List; +import org.opencv.utils.Converters; + +public class Core { + + public static final String VERSION = "2.4.5.0", NATIVE_LIBRARY_NAME = "opencv_java245"; + public static final int VERSION_EPOCH = 2, VERSION_MAJOR = 4, VERSION_MINOR = 5, VERSION_REVISION = 0; + + private static final int + CV_8U = 0, + CV_8S = 1, + CV_16U = 2, + CV_16S = 3, + CV_32S = 4, + CV_32F = 5, + CV_64F = 6, + CV_USRTYPE1 = 7; + + + public static final int + SVD_MODIFY_A = 1, + SVD_NO_UV = 2, + SVD_FULL_UV = 4, + FILLED = -1, + LINE_AA = 16, + LINE_8 = 8, + LINE_4 = 4, + REDUCE_SUM = 0, + REDUCE_AVG = 1, + REDUCE_MAX = 2, + REDUCE_MIN = 3, + DECOMP_LU = 0, + DECOMP_SVD = 1, + DECOMP_EIG = 2, + DECOMP_CHOLESKY = 3, + DECOMP_QR = 4, + DECOMP_NORMAL = 16, + NORM_INF = 1, + NORM_L1 = 2, + NORM_L2 = 4, + NORM_L2SQR = 5, + NORM_HAMMING = 6, + NORM_HAMMING2 = 7, + NORM_TYPE_MASK = 7, + NORM_RELATIVE = 8, + NORM_MINMAX = 32, + CMP_EQ = 0, + CMP_GT = 1, + CMP_GE = 2, + CMP_LT = 3, + CMP_LE = 4, + CMP_NE = 5, + GEMM_1_T = 1, + GEMM_2_T = 2, + GEMM_3_T = 4, + DFT_INVERSE = 1, + DFT_SCALE = 2, + DFT_ROWS = 4, + DFT_COMPLEX_OUTPUT = 16, + DFT_REAL_OUTPUT = 32, + DCT_INVERSE = DFT_INVERSE, + DCT_ROWS = DFT_ROWS, + DEPTH_MASK_8U = 1 << CV_8U, + DEPTH_MASK_8S = 1 << CV_8S, + DEPTH_MASK_16U = 1 << CV_16U, + DEPTH_MASK_16S = 1 << CV_16S, + DEPTH_MASK_32S = 1 << CV_32S, + DEPTH_MASK_32F = 1 << CV_32F, + DEPTH_MASK_64F = 1 << CV_64F, + DEPTH_MASK_ALL = (DEPTH_MASK_64F<<1)-1, + DEPTH_MASK_ALL_BUT_8S = DEPTH_MASK_ALL & ~DEPTH_MASK_8S, + DEPTH_MASK_FLT = DEPTH_MASK_32F + DEPTH_MASK_64F, + MAGIC_MASK = 0xFFFF0000, + TYPE_MASK = 0x00000FFF, + DEPTH_MASK = 7, + SORT_EVERY_ROW = 0, + SORT_EVERY_COLUMN = 1, + SORT_ASCENDING = 0, + SORT_DESCENDING = 16, + COVAR_SCRAMBLED = 0, + COVAR_NORMAL = 1, + COVAR_USE_AVG = 2, + COVAR_SCALE = 4, + COVAR_ROWS = 8, + COVAR_COLS = 16, + KMEANS_RANDOM_CENTERS = 0, + KMEANS_PP_CENTERS = 2, + KMEANS_USE_INITIAL_LABELS = 1, + FONT_HERSHEY_SIMPLEX = 0, + FONT_HERSHEY_PLAIN = 1, + FONT_HERSHEY_DUPLEX = 2, + FONT_HERSHEY_COMPLEX = 3, + FONT_HERSHEY_TRIPLEX = 4, + FONT_HERSHEY_COMPLEX_SMALL = 5, + FONT_HERSHEY_SCRIPT_SIMPLEX = 6, + FONT_HERSHEY_SCRIPT_COMPLEX = 7, + FONT_ITALIC = 16; + + + // + // C++: void LUT(Mat src, Mat lut, Mat& dst, int interpolation = 0) + // + +/** + *

Performs a look-up table transform of an array.

+ * + *

The function LUT fills the output array with values from the + * look-up table. Indices of the entries are taken from the input array. That + * is, the function processes each element of src as follows:

+ * + *

dst(I) <- lut(src(I) + d)

+ * + *

where

+ * + *

d = 0 if src has depth CV_8U; 128 if src has depth CV_8S

+ * + * @param src input array of 8-bit elements. + * @param lut look-up table of 256 elements; in case of multi-channel input + * array, the table should either have a single channel (in this case the same + * table is used for all channels) or the same number of channels as in the + * input array. + * @param dst output array of the same size and number of channels as + * src, and the same depth as lut. + * @param interpolation a interpolation + * + * @see org.opencv.core.Core.LUT + * @see org.opencv.core.Mat#convertTo + * @see org.opencv.core.Core#convertScaleAbs + */ + public static void LUT(Mat src, Mat lut, Mat dst, int interpolation) + { + + LUT_0(src.nativeObj, lut.nativeObj, dst.nativeObj, interpolation); + + return; + } + +/** + *

Performs a look-up table transform of an array.

+ * + *

The function LUT fills the output array with values from the + * look-up table. Indices of the entries are taken from the input array. That + * is, the function processes each element of src as follows:

+ * + *

dst(I) <- lut(src(I) + d)

+ * + *

where

+ * + *

d = 0 if src has depth CV_8U; 128 if src has depth CV_8S

+ * + * @param src input array of 8-bit elements. + * @param lut look-up table of 256 elements; in case of multi-channel input + * array, the table should either have a single channel (in this case the same + * table is used for all channels) or the same number of channels as in the + * input array. + * @param dst output array of the same size and number of channels as + * src, and the same depth as lut. + * + * @see org.opencv.core.Core.LUT + * @see org.opencv.core.Mat#convertTo + * @see org.opencv.core.Core#convertScaleAbs + */ + public static void LUT(Mat src, Mat lut, Mat dst) + { + + LUT_1(src.nativeObj, lut.nativeObj, dst.nativeObj); + + return; + } + + + // + // C++: double Mahalanobis(Mat v1, Mat v2, Mat icovar) + // + +/** + *

Calculates the Mahalanobis distance between two vectors.

+ * + *

The function Mahalanobis calculates and returns the weighted + * distance between two vectors:

+ * + *

d(vec1, vec2)= sqrt(sum_(i,j)(icovar(i,j)*(vec1(I)-vec2(I))*(vec1(j)-vec2(j))))

+ * + *

The covariance matrix may be calculated using the "calcCovarMatrix" function + * and then inverted using the "invert" function (preferably using the + * DECOMP_SVD method, as the most accurate).

+ * + * @param v1 a v1 + * @param v2 a v2 + * @param icovar inverse covariance matrix. + * + * @see org.opencv.core.Core.Mahalanobis + */ + public static double Mahalanobis(Mat v1, Mat v2, Mat icovar) + { + + double retVal = Mahalanobis_0(v1.nativeObj, v2.nativeObj, icovar.nativeObj); + + return retVal; + } + + + // + // C++: void PCABackProject(Mat data, Mat mean, Mat eigenvectors, Mat& result) + // + + public static void PCABackProject(Mat data, Mat mean, Mat eigenvectors, Mat result) + { + + PCABackProject_0(data.nativeObj, mean.nativeObj, eigenvectors.nativeObj, result.nativeObj); + + return; + } + + + // + // C++: void PCACompute(Mat data, Mat& mean, Mat& eigenvectors, int maxComponents = 0) + // + + public static void PCACompute(Mat data, Mat mean, Mat eigenvectors, int maxComponents) + { + + PCACompute_0(data.nativeObj, mean.nativeObj, eigenvectors.nativeObj, maxComponents); + + return; + } + + public static void PCACompute(Mat data, Mat mean, Mat eigenvectors) + { + + PCACompute_1(data.nativeObj, mean.nativeObj, eigenvectors.nativeObj); + + return; + } + + + // + // C++: void PCAComputeVar(Mat data, Mat& mean, Mat& eigenvectors, double retainedVariance) + // + + public static void PCAComputeVar(Mat data, Mat mean, Mat eigenvectors, double retainedVariance) + { + + PCAComputeVar_0(data.nativeObj, mean.nativeObj, eigenvectors.nativeObj, retainedVariance); + + return; + } + + + // + // C++: void PCAProject(Mat data, Mat mean, Mat eigenvectors, Mat& result) + // + + public static void PCAProject(Mat data, Mat mean, Mat eigenvectors, Mat result) + { + + PCAProject_0(data.nativeObj, mean.nativeObj, eigenvectors.nativeObj, result.nativeObj); + + return; + } + + + // + // C++: void SVBackSubst(Mat w, Mat u, Mat vt, Mat rhs, Mat& dst) + // + + public static void SVBackSubst(Mat w, Mat u, Mat vt, Mat rhs, Mat dst) + { + + SVBackSubst_0(w.nativeObj, u.nativeObj, vt.nativeObj, rhs.nativeObj, dst.nativeObj); + + return; + } + + + // + // C++: void SVDecomp(Mat src, Mat& w, Mat& u, Mat& vt, int flags = 0) + // + + public static void SVDecomp(Mat src, Mat w, Mat u, Mat vt, int flags) + { + + SVDecomp_0(src.nativeObj, w.nativeObj, u.nativeObj, vt.nativeObj, flags); + + return; + } + + public static void SVDecomp(Mat src, Mat w, Mat u, Mat vt) + { + + SVDecomp_1(src.nativeObj, w.nativeObj, u.nativeObj, vt.nativeObj); + + return; + } + + + // + // C++: void absdiff(Mat src1, Mat src2, Mat& dst) + // + +/** + *

Calculates the per-element absolute difference between two arrays or between + * an array and a scalar.

+ * + *

The function absdiff calculates:

+ *
    + *
  • Absolute difference between two arrays when they have the same size + * and type: + *
+ * + *

dst(I) = saturate(| src1(I) - src2(I)|)

+ * + *
    + *
  • Absolute difference between an array and a scalar when the second + * array is constructed from Scalar or has as many elements as the + * number of channels in src1: + *
+ * + *

dst(I) = saturate(| src1(I) - src2|)

+ * + *
    + *
  • Absolute difference between a scalar and an array when the first array + * is constructed from Scalar or has as many elements as the number + * of channels in src2: + *
+ * + *

dst(I) = saturate(| src1 - src2(I)|)

+ * + *

where I is a multi-dimensional index of array elements. In case + * of multi-channel arrays, each channel is processed independently.

+ * + *

Note: Saturation is not applied when the arrays have the depth + * CV_32S. You may even get a negative value in the case of + * overflow.

+ * + * @param src1 first input array or a scalar. + * @param src2 second input array or a scalar. + * @param dst output array that has the same size and type as input arrays. + * + * @see org.opencv.core.Core.absdiff + */ + public static void absdiff(Mat src1, Mat src2, Mat dst) + { + + absdiff_0(src1.nativeObj, src2.nativeObj, dst.nativeObj); + + return; + } + + + // + // C++: void absdiff(Mat src1, Scalar src2, Mat& dst) + // + +/** + *

Calculates the per-element absolute difference between two arrays or between + * an array and a scalar.

+ * + *

The function absdiff calculates:

+ *
    + *
  • Absolute difference between two arrays when they have the same size + * and type: + *
+ * + *

dst(I) = saturate(| src1(I) - src2(I)|)

+ * + *
    + *
  • Absolute difference between an array and a scalar when the second + * array is constructed from Scalar or has as many elements as the + * number of channels in src1: + *
+ * + *

dst(I) = saturate(| src1(I) - src2|)

+ * + *
    + *
  • Absolute difference between a scalar and an array when the first array + * is constructed from Scalar or has as many elements as the number + * of channels in src2: + *
+ * + *

dst(I) = saturate(| src1 - src2(I)|)

+ * + *

where I is a multi-dimensional index of array elements. In case + * of multi-channel arrays, each channel is processed independently.

+ * + *

Note: Saturation is not applied when the arrays have the depth + * CV_32S. You may even get a negative value in the case of + * overflow.

+ * + * @param src1 first input array or a scalar. + * @param src2 second input array or a scalar. + * @param dst output array that has the same size and type as input arrays. + * + * @see org.opencv.core.Core.absdiff + */ + public static void absdiff(Mat src1, Scalar src2, Mat dst) + { + + absdiff_1(src1.nativeObj, src2.val[0], src2.val[1], src2.val[2], src2.val[3], dst.nativeObj); + + return; + } + + + // + // C++: void add(Mat src1, Mat src2, Mat& dst, Mat mask = Mat(), int dtype = -1) + // + +/** + *

Calculates the per-element sum of two arrays or an array and a scalar.

+ * + *

The function add calculates:

+ *
    + *
  • Sum of two arrays when both input arrays have the same size and the + * same number of channels: + *
+ * + *

dst(I) = saturate(src1(I) + src2(I)) if mask(I) != 0

+ * + *
    + *
  • Sum of an array and a scalar when src2 is constructed + * from Scalar or has the same number of elements as + * src1.channels(): + *
+ * + *

dst(I) = saturate(src1(I) + src2) if mask(I) != 0

+ * + *
    + *
  • Sum of a scalar and an array when src1 is constructed + * from Scalar or has the same number of elements as + * src2.channels(): + *
+ * + *

dst(I) = saturate(src1 + src2(I)) if mask(I) != 0

+ * + *

where I is a multi-dimensional index of array elements. In case + * of multi-channel arrays, each channel is processed independently. + * The first function in the list above can be replaced with matrix expressions: + *

+ * + *

// C++ code:

+ * + *

dst = src1 + src2;

+ * + *

dst += src1; // equivalent to add(dst, src1, dst);

+ * + *

The input arrays and the output array can all have the same or different + * depths. For example, you can add a 16-bit unsigned array to a 8-bit signed + * array and store the sum as a 32-bit floating-point array. Depth of the output + * array is determined by the dtype parameter. In the second and + * third cases above, as well as in the first case, when src1.depth() == + * src2.depth(), dtype can be set to the default + * -1. In this case, the output array will have the same depth as + * the input array, be it src1, src2 or both. + *

+ * + *

Note: Saturation is not applied when the output array has the depth + * CV_32S. You may even get result of an incorrect sign in the case + * of overflow.

+ * + * @param src1 first input array or a scalar. + * @param src2 second input array or a scalar. + * @param dst output array that has the same size and number of channels as the + * input array(s); the depth is defined by dtype or + * src1/src2. + * @param mask optional operation mask - 8-bit single channel array, that + * specifies elements of the output array to be changed. + * @param dtype optional depth of the output array (see the discussion below). + * + * @see org.opencv.core.Core.add + * @see org.opencv.core.Core#addWeighted + * @see org.opencv.core.Mat#convertTo + * @see org.opencv.core.Core#scaleAdd + * @see org.opencv.core.Core#subtract + */ + public static void add(Mat src1, Mat src2, Mat dst, Mat mask, int dtype) + { + + add_0(src1.nativeObj, src2.nativeObj, dst.nativeObj, mask.nativeObj, dtype); + + return; + } + +/** + *

Calculates the per-element sum of two arrays or an array and a scalar.

+ * + *

The function add calculates:

+ *
    + *
  • Sum of two arrays when both input arrays have the same size and the + * same number of channels: + *
+ * + *

dst(I) = saturate(src1(I) + src2(I)) if mask(I) != 0

+ * + *
    + *
  • Sum of an array and a scalar when src2 is constructed + * from Scalar or has the same number of elements as + * src1.channels(): + *
+ * + *

dst(I) = saturate(src1(I) + src2) if mask(I) != 0

+ * + *
    + *
  • Sum of a scalar and an array when src1 is constructed + * from Scalar or has the same number of elements as + * src2.channels(): + *
+ * + *

dst(I) = saturate(src1 + src2(I)) if mask(I) != 0

+ * + *

where I is a multi-dimensional index of array elements. In case + * of multi-channel arrays, each channel is processed independently. + * The first function in the list above can be replaced with matrix expressions: + *

+ * + *

// C++ code:

+ * + *

dst = src1 + src2;

+ * + *

dst += src1; // equivalent to add(dst, src1, dst);

+ * + *

The input arrays and the output array can all have the same or different + * depths. For example, you can add a 16-bit unsigned array to a 8-bit signed + * array and store the sum as a 32-bit floating-point array. Depth of the output + * array is determined by the dtype parameter. In the second and + * third cases above, as well as in the first case, when src1.depth() == + * src2.depth(), dtype can be set to the default + * -1. In this case, the output array will have the same depth as + * the input array, be it src1, src2 or both. + *

+ * + *

Note: Saturation is not applied when the output array has the depth + * CV_32S. You may even get result of an incorrect sign in the case + * of overflow.

+ * + * @param src1 first input array or a scalar. + * @param src2 second input array or a scalar. + * @param dst output array that has the same size and number of channels as the + * input array(s); the depth is defined by dtype or + * src1/src2. + * @param mask optional operation mask - 8-bit single channel array, that + * specifies elements of the output array to be changed. + * + * @see org.opencv.core.Core.add + * @see org.opencv.core.Core#addWeighted + * @see org.opencv.core.Mat#convertTo + * @see org.opencv.core.Core#scaleAdd + * @see org.opencv.core.Core#subtract + */ + public static void add(Mat src1, Mat src2, Mat dst, Mat mask) + { + + add_1(src1.nativeObj, src2.nativeObj, dst.nativeObj, mask.nativeObj); + + return; + } + +/** + *

Calculates the per-element sum of two arrays or an array and a scalar.

+ * + *

The function add calculates:

+ *
    + *
  • Sum of two arrays when both input arrays have the same size and the + * same number of channels: + *
+ * + *

dst(I) = saturate(src1(I) + src2(I)) if mask(I) != 0

+ * + *
    + *
  • Sum of an array and a scalar when src2 is constructed + * from Scalar or has the same number of elements as + * src1.channels(): + *
+ * + *

dst(I) = saturate(src1(I) + src2) if mask(I) != 0

+ * + *
    + *
  • Sum of a scalar and an array when src1 is constructed + * from Scalar or has the same number of elements as + * src2.channels(): + *
+ * + *

dst(I) = saturate(src1 + src2(I)) if mask(I) != 0

+ * + *

where I is a multi-dimensional index of array elements. In case + * of multi-channel arrays, each channel is processed independently. + * The first function in the list above can be replaced with matrix expressions: + *

+ * + *

// C++ code:

+ * + *

dst = src1 + src2;

+ * + *

dst += src1; // equivalent to add(dst, src1, dst);

+ * + *

The input arrays and the output array can all have the same or different + * depths. For example, you can add a 16-bit unsigned array to a 8-bit signed + * array and store the sum as a 32-bit floating-point array. Depth of the output + * array is determined by the dtype parameter. In the second and + * third cases above, as well as in the first case, when src1.depth() == + * src2.depth(), dtype can be set to the default + * -1. In this case, the output array will have the same depth as + * the input array, be it src1, src2 or both. + *

+ * + *

Note: Saturation is not applied when the output array has the depth + * CV_32S. You may even get result of an incorrect sign in the case + * of overflow.

+ * + * @param src1 first input array or a scalar. + * @param src2 second input array or a scalar. + * @param dst output array that has the same size and number of channels as the + * input array(s); the depth is defined by dtype or + * src1/src2. + * + * @see org.opencv.core.Core.add + * @see org.opencv.core.Core#addWeighted + * @see org.opencv.core.Mat#convertTo + * @see org.opencv.core.Core#scaleAdd + * @see org.opencv.core.Core#subtract + */ + public static void add(Mat src1, Mat src2, Mat dst) + { + + add_2(src1.nativeObj, src2.nativeObj, dst.nativeObj); + + return; + } + + + // + // C++: void add(Mat src1, Scalar src2, Mat& dst, Mat mask = Mat(), int dtype = -1) + // + +/** + *

Calculates the per-element sum of two arrays or an array and a scalar.

+ * + *

The function add calculates:

+ *
    + *
  • Sum of two arrays when both input arrays have the same size and the + * same number of channels: + *
+ * + *

dst(I) = saturate(src1(I) + src2(I)) if mask(I) != 0

+ * + *
    + *
  • Sum of an array and a scalar when src2 is constructed + * from Scalar or has the same number of elements as + * src1.channels(): + *
+ * + *

dst(I) = saturate(src1(I) + src2) if mask(I) != 0

+ * + *
    + *
  • Sum of a scalar and an array when src1 is constructed + * from Scalar or has the same number of elements as + * src2.channels(): + *
+ * + *

dst(I) = saturate(src1 + src2(I)) if mask(I) != 0

+ * + *

where I is a multi-dimensional index of array elements. In case + * of multi-channel arrays, each channel is processed independently. + * The first function in the list above can be replaced with matrix expressions: + *

+ * + *

// C++ code:

+ * + *

dst = src1 + src2;

+ * + *

dst += src1; // equivalent to add(dst, src1, dst);

+ * + *

The input arrays and the output array can all have the same or different + * depths. For example, you can add a 16-bit unsigned array to a 8-bit signed + * array and store the sum as a 32-bit floating-point array. Depth of the output + * array is determined by the dtype parameter. In the second and + * third cases above, as well as in the first case, when src1.depth() == + * src2.depth(), dtype can be set to the default + * -1. In this case, the output array will have the same depth as + * the input array, be it src1, src2 or both. + *

+ * + *

Note: Saturation is not applied when the output array has the depth + * CV_32S. You may even get result of an incorrect sign in the case + * of overflow.

+ * + * @param src1 first input array or a scalar. + * @param src2 second input array or a scalar. + * @param dst output array that has the same size and number of channels as the + * input array(s); the depth is defined by dtype or + * src1/src2. + * @param mask optional operation mask - 8-bit single channel array, that + * specifies elements of the output array to be changed. + * @param dtype optional depth of the output array (see the discussion below). + * + * @see org.opencv.core.Core.add + * @see org.opencv.core.Core#addWeighted + * @see org.opencv.core.Mat#convertTo + * @see org.opencv.core.Core#scaleAdd + * @see org.opencv.core.Core#subtract + */ + public static void add(Mat src1, Scalar src2, Mat dst, Mat mask, int dtype) + { + + add_3(src1.nativeObj, src2.val[0], src2.val[1], src2.val[2], src2.val[3], dst.nativeObj, mask.nativeObj, dtype); + + return; + } + +/** + *

Calculates the per-element sum of two arrays or an array and a scalar.

+ * + *

The function add calculates:

+ *
    + *
  • Sum of two arrays when both input arrays have the same size and the + * same number of channels: + *
+ * + *

dst(I) = saturate(src1(I) + src2(I)) if mask(I) != 0

+ * + *
    + *
  • Sum of an array and a scalar when src2 is constructed + * from Scalar or has the same number of elements as + * src1.channels(): + *
+ * + *

dst(I) = saturate(src1(I) + src2) if mask(I) != 0

+ * + *
    + *
  • Sum of a scalar and an array when src1 is constructed + * from Scalar or has the same number of elements as + * src2.channels(): + *
+ * + *

dst(I) = saturate(src1 + src2(I)) if mask(I) != 0

+ * + *

where I is a multi-dimensional index of array elements. In case + * of multi-channel arrays, each channel is processed independently. + * The first function in the list above can be replaced with matrix expressions: + *

+ * + *

// C++ code:

+ * + *

dst = src1 + src2;

+ * + *

dst += src1; // equivalent to add(dst, src1, dst);

+ * + *

The input arrays and the output array can all have the same or different + * depths. For example, you can add a 16-bit unsigned array to a 8-bit signed + * array and store the sum as a 32-bit floating-point array. Depth of the output + * array is determined by the dtype parameter. In the second and + * third cases above, as well as in the first case, when src1.depth() == + * src2.depth(), dtype can be set to the default + * -1. In this case, the output array will have the same depth as + * the input array, be it src1, src2 or both. + *

+ * + *

Note: Saturation is not applied when the output array has the depth + * CV_32S. You may even get result of an incorrect sign in the case + * of overflow.

+ * + * @param src1 first input array or a scalar. + * @param src2 second input array or a scalar. + * @param dst output array that has the same size and number of channels as the + * input array(s); the depth is defined by dtype or + * src1/src2. + * @param mask optional operation mask - 8-bit single channel array, that + * specifies elements of the output array to be changed. + * + * @see org.opencv.core.Core.add + * @see org.opencv.core.Core#addWeighted + * @see org.opencv.core.Mat#convertTo + * @see org.opencv.core.Core#scaleAdd + * @see org.opencv.core.Core#subtract + */ + public static void add(Mat src1, Scalar src2, Mat dst, Mat mask) + { + + add_4(src1.nativeObj, src2.val[0], src2.val[1], src2.val[2], src2.val[3], dst.nativeObj, mask.nativeObj); + + return; + } + +/** + *

Calculates the per-element sum of two arrays or an array and a scalar.

+ * + *

The function add calculates:

+ *
    + *
  • Sum of two arrays when both input arrays have the same size and the + * same number of channels: + *
+ * + *

dst(I) = saturate(src1(I) + src2(I)) if mask(I) != 0

+ * + *
    + *
  • Sum of an array and a scalar when src2 is constructed + * from Scalar or has the same number of elements as + * src1.channels(): + *
+ * + *

dst(I) = saturate(src1(I) + src2) if mask(I) != 0

+ * + *
    + *
  • Sum of a scalar and an array when src1 is constructed + * from Scalar or has the same number of elements as + * src2.channels(): + *
+ * + *

dst(I) = saturate(src1 + src2(I)) if mask(I) != 0

+ * + *

where I is a multi-dimensional index of array elements. In case + * of multi-channel arrays, each channel is processed independently. + * The first function in the list above can be replaced with matrix expressions: + *

+ * + *

// C++ code:

+ * + *

dst = src1 + src2;

+ * + *

dst += src1; // equivalent to add(dst, src1, dst);

+ * + *

The input arrays and the output array can all have the same or different + * depths. For example, you can add a 16-bit unsigned array to a 8-bit signed + * array and store the sum as a 32-bit floating-point array. Depth of the output + * array is determined by the dtype parameter. In the second and + * third cases above, as well as in the first case, when src1.depth() == + * src2.depth(), dtype can be set to the default + * -1. In this case, the output array will have the same depth as + * the input array, be it src1, src2 or both. + *

+ * + *

Note: Saturation is not applied when the output array has the depth + * CV_32S. You may even get result of an incorrect sign in the case + * of overflow.

+ * + * @param src1 first input array or a scalar. + * @param src2 second input array or a scalar. + * @param dst output array that has the same size and number of channels as the + * input array(s); the depth is defined by dtype or + * src1/src2. + * + * @see org.opencv.core.Core.add + * @see org.opencv.core.Core#addWeighted + * @see org.opencv.core.Mat#convertTo + * @see org.opencv.core.Core#scaleAdd + * @see org.opencv.core.Core#subtract + */ + public static void add(Mat src1, Scalar src2, Mat dst) + { + + add_5(src1.nativeObj, src2.val[0], src2.val[1], src2.val[2], src2.val[3], dst.nativeObj); + + return; + } + + + // + // C++: void addWeighted(Mat src1, double alpha, Mat src2, double beta, double gamma, Mat& dst, int dtype = -1) + // + +/** + *

Calculates the weighted sum of two arrays.

+ * + *

The function addWeighted calculates the weighted sum of two + * arrays as follows:

+ * + *

dst(I)= saturate(src1(I)* alpha + src2(I)* beta + gamma)

+ * + *

where I is a multi-dimensional index of array elements. In case + * of multi-channel arrays, each channel is processed independently. + * The function can be replaced with a matrix expression:

+ * + *

// C++ code:

+ * + *

dst = src1*alpha + src2*beta + gamma;

+ * + *

Note: Saturation is not applied when the output array has the depth + * CV_32S. You may even get result of an incorrect sign in the case + * of overflow. + *

+ * + * @param src1 first input array. + * @param alpha weight of the first array elements. + * @param src2 second input array of the same size and channel number as + * src1. + * @param beta weight of the second array elements. + * @param gamma scalar added to each sum. + * @param dst output array that has the same size and number of channels as the + * input arrays. + * @param dtype optional depth of the output array; when both input arrays have + * the same depth, dtype can be set to -1, which will + * be equivalent to src1.depth(). + * + * @see org.opencv.core.Core.addWeighted + * @see org.opencv.core.Core#add + * @see org.opencv.core.Core#scaleAdd + * @see org.opencv.core.Core#subtract + * @see org.opencv.core.Mat#convertTo + */ + public static void addWeighted(Mat src1, double alpha, Mat src2, double beta, double gamma, Mat dst, int dtype) + { + + addWeighted_0(src1.nativeObj, alpha, src2.nativeObj, beta, gamma, dst.nativeObj, dtype); + + return; + } + +/** + *

Calculates the weighted sum of two arrays.

+ * + *

The function addWeighted calculates the weighted sum of two + * arrays as follows:

+ * + *

dst(I)= saturate(src1(I)* alpha + src2(I)* beta + gamma)

+ * + *

where I is a multi-dimensional index of array elements. In case + * of multi-channel arrays, each channel is processed independently. + * The function can be replaced with a matrix expression:

+ * + *

// C++ code:

+ * + *

dst = src1*alpha + src2*beta + gamma;

+ * + *

Note: Saturation is not applied when the output array has the depth + * CV_32S. You may even get result of an incorrect sign in the case + * of overflow. + *

+ * + * @param src1 first input array. + * @param alpha weight of the first array elements. + * @param src2 second input array of the same size and channel number as + * src1. + * @param beta weight of the second array elements. + * @param gamma scalar added to each sum. + * @param dst output array that has the same size and number of channels as the + * input arrays. + * + * @see org.opencv.core.Core.addWeighted + * @see org.opencv.core.Core#add + * @see org.opencv.core.Core#scaleAdd + * @see org.opencv.core.Core#subtract + * @see org.opencv.core.Mat#convertTo + */ + public static void addWeighted(Mat src1, double alpha, Mat src2, double beta, double gamma, Mat dst) + { + + addWeighted_1(src1.nativeObj, alpha, src2.nativeObj, beta, gamma, dst.nativeObj); + + return; + } + + + // + // C++: void batchDistance(Mat src1, Mat src2, Mat& dist, int dtype, Mat& nidx, int normType = NORM_L2, int K = 0, Mat mask = Mat(), int update = 0, bool crosscheck = false) + // + + public static void batchDistance(Mat src1, Mat src2, Mat dist, int dtype, Mat nidx, int normType, int K, Mat mask, int update, boolean crosscheck) + { + + batchDistance_0(src1.nativeObj, src2.nativeObj, dist.nativeObj, dtype, nidx.nativeObj, normType, K, mask.nativeObj, update, crosscheck); + + return; + } + + public static void batchDistance(Mat src1, Mat src2, Mat dist, int dtype, Mat nidx, int normType, int K) + { + + batchDistance_1(src1.nativeObj, src2.nativeObj, dist.nativeObj, dtype, nidx.nativeObj, normType, K); + + return; + } + + public static void batchDistance(Mat src1, Mat src2, Mat dist, int dtype, Mat nidx) + { + + batchDistance_2(src1.nativeObj, src2.nativeObj, dist.nativeObj, dtype, nidx.nativeObj); + + return; + } + + + // + // C++: void bitwise_and(Mat src1, Mat src2, Mat& dst, Mat mask = Mat()) + // + +/** + *

Calculates the per-element bit-wise conjunction of two arrays or an array and + * a scalar.

+ * + *

The function calculates the per-element bit-wise logical conjunction for:

+ *
    + *
  • Two arrays when src1 and src2 have the same + * size: + *
+ * + *

dst(I) = src1(I) / src2(I) if mask(I) != 0

+ * + *
    + *
  • An array and a scalar when src2 is constructed from + * Scalar or has the same number of elements as src1.channels(): + *
+ * + *

dst(I) = src1(I) / src2 if mask(I) != 0

+ * + *
    + *
  • A scalar and an array when src1 is constructed from + * Scalar or has the same number of elements as src2.channels(): + *
+ * + *

dst(I) = src1 / src2(I) if mask(I) != 0

+ * + *

In case of floating-point arrays, their machine-specific bit representations + * (usually IEEE754-compliant) are used for the operation. In case of + * multi-channel arrays, each channel is processed independently. In the second + * and third cases above, the scalar is first converted to the array type.

+ * + * @param src1 first input array or a scalar. + * @param src2 second input array or a scalar. + * @param dst output array that has the same size and type as the input arrays. + * @param mask optional operation mask, 8-bit single channel array, that + * specifies elements of the output array to be changed. + * + * @see org.opencv.core.Core.bitwise_and + */ + public static void bitwise_and(Mat src1, Mat src2, Mat dst, Mat mask) + { + + bitwise_and_0(src1.nativeObj, src2.nativeObj, dst.nativeObj, mask.nativeObj); + + return; + } + +/** + *

Calculates the per-element bit-wise conjunction of two arrays or an array and + * a scalar.

+ * + *

The function calculates the per-element bit-wise logical conjunction for:

+ *
    + *
  • Two arrays when src1 and src2 have the same + * size: + *
+ * + *

dst(I) = src1(I) / src2(I) if mask(I) != 0

+ * + *
    + *
  • An array and a scalar when src2 is constructed from + * Scalar or has the same number of elements as src1.channels(): + *
+ * + *

dst(I) = src1(I) / src2 if mask(I) != 0

+ * + *
    + *
  • A scalar and an array when src1 is constructed from + * Scalar or has the same number of elements as src2.channels(): + *
+ * + *

dst(I) = src1 / src2(I) if mask(I) != 0

+ * + *

In case of floating-point arrays, their machine-specific bit representations + * (usually IEEE754-compliant) are used for the operation. In case of + * multi-channel arrays, each channel is processed independently. In the second + * and third cases above, the scalar is first converted to the array type.

+ * + * @param src1 first input array or a scalar. + * @param src2 second input array or a scalar. + * @param dst output array that has the same size and type as the input arrays. + * + * @see org.opencv.core.Core.bitwise_and + */ + public static void bitwise_and(Mat src1, Mat src2, Mat dst) + { + + bitwise_and_1(src1.nativeObj, src2.nativeObj, dst.nativeObj); + + return; + } + + + // + // C++: void bitwise_not(Mat src, Mat& dst, Mat mask = Mat()) + // + +/** + *

Inverts every bit of an array.

+ * + *

The function calculates per-element bit-wise inversion of the input array:

+ * + *

dst(I) = !src(I)

+ * + *

In case of a floating-point input array, its machine-specific bit + * representation (usually IEEE754-compliant) is used for the operation. In case + * of multi-channel arrays, each channel is processed independently.

+ * + * @param src input array. + * @param dst output array that has the same size and type as the input array. + * @param mask optional operation mask, 8-bit single channel array, that + * specifies elements of the output array to be changed. + * + * @see org.opencv.core.Core.bitwise_not + */ + public static void bitwise_not(Mat src, Mat dst, Mat mask) + { + + bitwise_not_0(src.nativeObj, dst.nativeObj, mask.nativeObj); + + return; + } + +/** + *

Inverts every bit of an array.

+ * + *

The function calculates per-element bit-wise inversion of the input array:

+ * + *

dst(I) = !src(I)

+ * + *

In case of a floating-point input array, its machine-specific bit + * representation (usually IEEE754-compliant) is used for the operation. In case + * of multi-channel arrays, each channel is processed independently.

+ * + * @param src input array. + * @param dst output array that has the same size and type as the input array. + * + * @see org.opencv.core.Core.bitwise_not + */ + public static void bitwise_not(Mat src, Mat dst) + { + + bitwise_not_1(src.nativeObj, dst.nativeObj); + + return; + } + + + // + // C++: void bitwise_or(Mat src1, Mat src2, Mat& dst, Mat mask = Mat()) + // + +/** + *

Calculates the per-element bit-wise disjunction of two arrays or an array and + * a scalar.

+ * + *

The function calculates the per-element bit-wise logical disjunction for:

+ *
    + *
  • Two arrays when src1 and src2 have the same + * size: + *
+ * + *

dst(I) = src1(I) V src2(I) if mask(I) != 0

+ * + *
    + *
  • An array and a scalar when src2 is constructed from + * Scalar or has the same number of elements as src1.channels(): + *
+ * + *

dst(I) = src1(I) V src2 if mask(I) != 0

+ * + *
    + *
  • A scalar and an array when src1 is constructed from + * Scalar or has the same number of elements as src2.channels(): + *
+ * + *

dst(I) = src1 V src2(I) if mask(I) != 0

+ * + *

In case of floating-point arrays, their machine-specific bit representations + * (usually IEEE754-compliant) are used for the operation. In case of + * multi-channel arrays, each channel is processed independently. In the second + * and third cases above, the scalar is first converted to the array type.

+ * + * @param src1 first input array or a scalar. + * @param src2 second input array or a scalar. + * @param dst output array that has the same size and type as the input arrays. + * @param mask optional operation mask, 8-bit single channel array, that + * specifies elements of the output array to be changed. + * + * @see org.opencv.core.Core.bitwise_or + */ + public static void bitwise_or(Mat src1, Mat src2, Mat dst, Mat mask) + { + + bitwise_or_0(src1.nativeObj, src2.nativeObj, dst.nativeObj, mask.nativeObj); + + return; + } + +/** + *

Calculates the per-element bit-wise disjunction of two arrays or an array and + * a scalar.

+ * + *

The function calculates the per-element bit-wise logical disjunction for:

+ *
    + *
  • Two arrays when src1 and src2 have the same + * size: + *
+ * + *

dst(I) = src1(I) V src2(I) if mask(I) != 0

+ * + *
    + *
  • An array and a scalar when src2 is constructed from + * Scalar or has the same number of elements as src1.channels(): + *
+ * + *

dst(I) = src1(I) V src2 if mask(I) != 0

+ * + *
    + *
  • A scalar and an array when src1 is constructed from + * Scalar or has the same number of elements as src2.channels(): + *
+ * + *

dst(I) = src1 V src2(I) if mask(I) != 0

+ * + *

In case of floating-point arrays, their machine-specific bit representations + * (usually IEEE754-compliant) are used for the operation. In case of + * multi-channel arrays, each channel is processed independently. In the second + * and third cases above, the scalar is first converted to the array type.

+ * + * @param src1 first input array or a scalar. + * @param src2 second input array or a scalar. + * @param dst output array that has the same size and type as the input arrays. + * + * @see org.opencv.core.Core.bitwise_or + */ + public static void bitwise_or(Mat src1, Mat src2, Mat dst) + { + + bitwise_or_1(src1.nativeObj, src2.nativeObj, dst.nativeObj); + + return; + } + + + // + // C++: void bitwise_xor(Mat src1, Mat src2, Mat& dst, Mat mask = Mat()) + // + +/** + *

Calculates the per-element bit-wise "exclusive or" operation on two arrays or + * an array and a scalar.

+ * + *

The function calculates the per-element bit-wise logical "exclusive-or" + * operation for:

+ *
    + *
  • Two arrays when src1 and src2 have the same + * size: + *
+ * + *

dst(I) = src1(I)(+) src2(I) if mask(I) != 0

+ * + *
    + *
  • An array and a scalar when src2 is constructed from + * Scalar or has the same number of elements as src1.channels(): + *
+ * + *

dst(I) = src1(I)(+) src2 if mask(I) != 0

+ * + *
    + *
  • A scalar and an array when src1 is constructed from + * Scalar or has the same number of elements as src2.channels(): + *
+ * + *

dst(I) = src1(+) src2(I) if mask(I) != 0

+ * + *

In case of floating-point arrays, their machine-specific bit representations + * (usually IEEE754-compliant) are used for the operation. In case of + * multi-channel arrays, each channel is processed independently. In the 2nd and + * 3rd cases above, the scalar is first converted to the array type.

+ * + * @param src1 first input array or a scalar. + * @param src2 second input array or a scalar. + * @param dst output array that has the same size and type as the input arrays. + * @param mask optional operation mask, 8-bit single channel array, that + * specifies elements of the output array to be changed. + * + * @see org.opencv.core.Core.bitwise_xor + */ + public static void bitwise_xor(Mat src1, Mat src2, Mat dst, Mat mask) + { + + bitwise_xor_0(src1.nativeObj, src2.nativeObj, dst.nativeObj, mask.nativeObj); + + return; + } + +/** + *

Calculates the per-element bit-wise "exclusive or" operation on two arrays or + * an array and a scalar.

+ * + *

The function calculates the per-element bit-wise logical "exclusive-or" + * operation for:

+ *
    + *
  • Two arrays when src1 and src2 have the same + * size: + *
+ * + *

dst(I) = src1(I)(+) src2(I) if mask(I) != 0

+ * + *
    + *
  • An array and a scalar when src2 is constructed from + * Scalar or has the same number of elements as src1.channels(): + *
+ * + *

dst(I) = src1(I)(+) src2 if mask(I) != 0

+ * + *
    + *
  • A scalar and an array when src1 is constructed from + * Scalar or has the same number of elements as src2.channels(): + *
+ * + *

dst(I) = src1(+) src2(I) if mask(I) != 0

+ * + *

In case of floating-point arrays, their machine-specific bit representations + * (usually IEEE754-compliant) are used for the operation. In case of + * multi-channel arrays, each channel is processed independently. In the 2nd and + * 3rd cases above, the scalar is first converted to the array type.

+ * + * @param src1 first input array or a scalar. + * @param src2 second input array or a scalar. + * @param dst output array that has the same size and type as the input arrays. + * + * @see org.opencv.core.Core.bitwise_xor + */ + public static void bitwise_xor(Mat src1, Mat src2, Mat dst) + { + + bitwise_xor_1(src1.nativeObj, src2.nativeObj, dst.nativeObj); + + return; + } + + + // + // C++: void calcCovarMatrix(Mat samples, Mat& covar, Mat& mean, int flags, int ctype = CV_64F) + // + +/** + *

Calculates the covariance matrix of a set of vectors.

+ * + *

The functions calcCovarMatrix calculate the covariance matrix + * and, optionally, the mean vector of the set of input vectors.

+ * + * @param samples samples stored either as separate matrices or as rows/columns + * of a single matrix. + * @param covar output covariance matrix of the type ctype and + * square size. + * @param mean input or output (depending on the flags) array as the average + * value of the input vectors. + * @param flags operation flags as a combination of the following values: + *
    + *
  • CV_COVAR_SCRAMBLED The output covariance matrix is calculated as: + *
+ * + *

scale * [ vects [0]- mean, vects [1]- mean,...]^T * [ vects [0]- mean, + * vects [1]- mean,...],

+ * + *

The covariance matrix will be nsamples x nsamples. Such an + * unusual covariance matrix is used for fast PCA of a set of very large vectors + * (see, for example, the EigenFaces technique for face recognition). + * Eigenvalues of this "scrambled" matrix match the eigenvalues of the true + * covariance matrix. The "true" eigenvectors can be easily calculated from the + * eigenvectors of the "scrambled" covariance matrix.

+ *
    + *
  • CV_COVAR_NORMAL The output covariance matrix is calculated as: + *
+ * + *

scale * [ vects [0]- mean, vects [1]- mean,...] * [ vects [0]- mean, + * vects [1]- mean,...]^T,

+ * + *

covar will be a square matrix of the same size as the total + * number of elements in each input vector. One and only one of + * CV_COVAR_SCRAMBLED and CV_COVAR_NORMAL must be + * specified.

+ *
    + *
  • CV_COVAR_USE_AVG If the flag is specified, the function does not + * calculate mean from the input vectors but, instead, uses the + * passed mean vector. This is useful if mean has been + * pre-calculated or known in advance, or if the covariance matrix is calculated + * by parts. In this case, mean is not a mean vector of the input + * sub-set of vectors but rather the mean vector of the whole set. + *
  • CV_COVAR_SCALE If the flag is specified, the covariance matrix is + * scaled. In the "normal" mode, scale is 1./nsamples. + * In the "scrambled" mode, scale is the reciprocal of the total + * number of elements in each input vector. By default (if the flag is not + * specified), the covariance matrix is not scaled (scale=1). + *
  • CV_COVAR_ROWS [Only useful in the second variant of the function] If + * the flag is specified, all the input vectors are stored as rows of the + * samples matrix. mean should be a single-row vector + * in this case. + *
  • CV_COVAR_COLS [Only useful in the second variant of the function] If + * the flag is specified, all the input vectors are stored as columns of the + * samples matrix. mean should be a single-column + * vector in this case. + *
+ * @param ctype type of the matrixl; it equals 'CV_64F' by default. + * + * @see org.opencv.core.Core.calcCovarMatrix + * @see org.opencv.core.Core#Mahalanobis + * @see org.opencv.core.Core#mulTransposed + */ + public static void calcCovarMatrix(Mat samples, Mat covar, Mat mean, int flags, int ctype) + { + + calcCovarMatrix_0(samples.nativeObj, covar.nativeObj, mean.nativeObj, flags, ctype); + + return; + } + +/** + *

Calculates the covariance matrix of a set of vectors.

+ * + *

The functions calcCovarMatrix calculate the covariance matrix + * and, optionally, the mean vector of the set of input vectors.

+ * + * @param samples samples stored either as separate matrices or as rows/columns + * of a single matrix. + * @param covar output covariance matrix of the type ctype and + * square size. + * @param mean input or output (depending on the flags) array as the average + * value of the input vectors. + * @param flags operation flags as a combination of the following values: + *
    + *
  • CV_COVAR_SCRAMBLED The output covariance matrix is calculated as: + *
+ * + *

scale * [ vects [0]- mean, vects [1]- mean,...]^T * [ vects [0]- mean, + * vects [1]- mean,...],

+ * + *

The covariance matrix will be nsamples x nsamples. Such an + * unusual covariance matrix is used for fast PCA of a set of very large vectors + * (see, for example, the EigenFaces technique for face recognition). + * Eigenvalues of this "scrambled" matrix match the eigenvalues of the true + * covariance matrix. The "true" eigenvectors can be easily calculated from the + * eigenvectors of the "scrambled" covariance matrix.

+ *
    + *
  • CV_COVAR_NORMAL The output covariance matrix is calculated as: + *
+ * + *

scale * [ vects [0]- mean, vects [1]- mean,...] * [ vects [0]- mean, + * vects [1]- mean,...]^T,

+ * + *

covar will be a square matrix of the same size as the total + * number of elements in each input vector. One and only one of + * CV_COVAR_SCRAMBLED and CV_COVAR_NORMAL must be + * specified.

+ *
    + *
  • CV_COVAR_USE_AVG If the flag is specified, the function does not + * calculate mean from the input vectors but, instead, uses the + * passed mean vector. This is useful if mean has been + * pre-calculated or known in advance, or if the covariance matrix is calculated + * by parts. In this case, mean is not a mean vector of the input + * sub-set of vectors but rather the mean vector of the whole set. + *
  • CV_COVAR_SCALE If the flag is specified, the covariance matrix is + * scaled. In the "normal" mode, scale is 1./nsamples. + * In the "scrambled" mode, scale is the reciprocal of the total + * number of elements in each input vector. By default (if the flag is not + * specified), the covariance matrix is not scaled (scale=1). + *
  • CV_COVAR_ROWS [Only useful in the second variant of the function] If + * the flag is specified, all the input vectors are stored as rows of the + * samples matrix. mean should be a single-row vector + * in this case. + *
  • CV_COVAR_COLS [Only useful in the second variant of the function] If + * the flag is specified, all the input vectors are stored as columns of the + * samples matrix. mean should be a single-column + * vector in this case. + *
+ * + * @see org.opencv.core.Core.calcCovarMatrix + * @see org.opencv.core.Core#Mahalanobis + * @see org.opencv.core.Core#mulTransposed + */ + public static void calcCovarMatrix(Mat samples, Mat covar, Mat mean, int flags) + { + + calcCovarMatrix_1(samples.nativeObj, covar.nativeObj, mean.nativeObj, flags); + + return; + } + + + // + // C++: void cartToPolar(Mat x, Mat y, Mat& magnitude, Mat& angle, bool angleInDegrees = false) + // + +/** + *

Calculates the magnitude and angle of 2D vectors.

+ * + *

The function cartToPolar calculates either the magnitude, angle, + * or both for every 2D vector (x(I),y(I)):

+ * + *

magnitude(I)= sqrt(x(I)^2+y(I)^2), + * angle(I)= atan2(y(I), x(I))[ *180 / pi ]

+ * + *

The angles are calculated with accuracy about 0.3 degrees. For the point + * (0,0), the angle is set to 0.

+ * + * @param x array of x-coordinates; this must be a single-precision or + * double-precision floating-point array. + * @param y array of y-coordinates, that must have the same size and same type + * as x. + * @param magnitude output array of magnitudes of the same size and type as + * x. + * @param angle output array of angles that has the same size and type as + * x; the angles are measured in radians (from 0 to 2*Pi) or in + * degrees (0 to 360 degrees). + * @param angleInDegrees a flag, indicating whether the angles are measured in + * radians (which is by default), or in degrees. + * + * @see org.opencv.core.Core.cartToPolar + * @see org.opencv.imgproc.Imgproc#Scharr + * @see org.opencv.imgproc.Imgproc#Sobel + */ + public static void cartToPolar(Mat x, Mat y, Mat magnitude, Mat angle, boolean angleInDegrees) + { + + cartToPolar_0(x.nativeObj, y.nativeObj, magnitude.nativeObj, angle.nativeObj, angleInDegrees); + + return; + } + +/** + *

Calculates the magnitude and angle of 2D vectors.

+ * + *

The function cartToPolar calculates either the magnitude, angle, + * or both for every 2D vector (x(I),y(I)):

+ * + *

magnitude(I)= sqrt(x(I)^2+y(I)^2), + * angle(I)= atan2(y(I), x(I))[ *180 / pi ]

+ * + *

The angles are calculated with accuracy about 0.3 degrees. For the point + * (0,0), the angle is set to 0.

+ * + * @param x array of x-coordinates; this must be a single-precision or + * double-precision floating-point array. + * @param y array of y-coordinates, that must have the same size and same type + * as x. + * @param magnitude output array of magnitudes of the same size and type as + * x. + * @param angle output array of angles that has the same size and type as + * x; the angles are measured in radians (from 0 to 2*Pi) or in + * degrees (0 to 360 degrees). + * + * @see org.opencv.core.Core.cartToPolar + * @see org.opencv.imgproc.Imgproc#Scharr + * @see org.opencv.imgproc.Imgproc#Sobel + */ + public static void cartToPolar(Mat x, Mat y, Mat magnitude, Mat angle) + { + + cartToPolar_1(x.nativeObj, y.nativeObj, magnitude.nativeObj, angle.nativeObj); + + return; + } + + + // + // C++: bool checkRange(Mat a, bool quiet = true, _hidden_ * pos = 0, double minVal = -DBL_MAX, double maxVal = DBL_MAX) + // + +/** + *

Checks every element of an input array for invalid values.

+ * + *

The functions checkRange check that every array element is + * neither NaN nor infinite. When minVal < -DBL_MAX and + * maxVal < DBL_MAX, the functions also check that each value is + * between minVal and maxVal. In case of multi-channel + * arrays, each channel is processed independently. + * If some values are out of range, position of the first outlier is stored in + * pos (when pos != NULL). Then, the functions either + * return false (when quiet=true) or throw an exception.

+ * + * @param a input array. + * @param quiet a flag, indicating whether the functions quietly return false + * when the array elements are out of range or they throw an exception. + * @param minVal inclusive lower boundary of valid values range. + * @param maxVal exclusive upper boundary of valid values range. + * + * @see org.opencv.core.Core.checkRange + */ + public static boolean checkRange(Mat a, boolean quiet, double minVal, double maxVal) + { + + boolean retVal = checkRange_0(a.nativeObj, quiet, minVal, maxVal); + + return retVal; + } + +/** + *

Checks every element of an input array for invalid values.

+ * + *

The functions checkRange check that every array element is + * neither NaN nor infinite. When minVal < -DBL_MAX and + * maxVal < DBL_MAX, the functions also check that each value is + * between minVal and maxVal. In case of multi-channel + * arrays, each channel is processed independently. + * If some values are out of range, position of the first outlier is stored in + * pos (when pos != NULL). Then, the functions either + * return false (when quiet=true) or throw an exception.

+ * + * @param a input array. + * + * @see org.opencv.core.Core.checkRange + */ + public static boolean checkRange(Mat a) + { + + boolean retVal = checkRange_1(a.nativeObj); + + return retVal; + } + + + // + // C++: void circle(Mat& img, Point center, int radius, Scalar color, int thickness = 1, int lineType = 8, int shift = 0) + // + +/** + *

Draws a circle.

+ * + *

The function circle draws a simple or filled circle with a given + * center and radius.

+ * + * @param img Image where the circle is drawn. + * @param center Center of the circle. + * @param radius Radius of the circle. + * @param color Circle color. + * @param thickness Thickness of the circle outline, if positive. Negative + * thickness means that a filled circle is to be drawn. + * @param lineType Type of the circle boundary. See the "line" description. + * @param shift Number of fractional bits in the coordinates of the center and + * in the radius value. + * + * @see org.opencv.core.Core.circle + */ + public static void circle(Mat img, Point center, int radius, Scalar color, int thickness, int lineType, int shift) + { + + circle_0(img.nativeObj, center.x, center.y, radius, color.val[0], color.val[1], color.val[2], color.val[3], thickness, lineType, shift); + + return; + } + +/** + *

Draws a circle.

+ * + *

The function circle draws a simple or filled circle with a given + * center and radius.

+ * + * @param img Image where the circle is drawn. + * @param center Center of the circle. + * @param radius Radius of the circle. + * @param color Circle color. + * @param thickness Thickness of the circle outline, if positive. Negative + * thickness means that a filled circle is to be drawn. + * + * @see org.opencv.core.Core.circle + */ + public static void circle(Mat img, Point center, int radius, Scalar color, int thickness) + { + + circle_1(img.nativeObj, center.x, center.y, radius, color.val[0], color.val[1], color.val[2], color.val[3], thickness); + + return; + } + +/** + *

Draws a circle.

+ * + *

The function circle draws a simple or filled circle with a given + * center and radius.

+ * + * @param img Image where the circle is drawn. + * @param center Center of the circle. + * @param radius Radius of the circle. + * @param color Circle color. + * + * @see org.opencv.core.Core.circle + */ + public static void circle(Mat img, Point center, int radius, Scalar color) + { + + circle_2(img.nativeObj, center.x, center.y, radius, color.val[0], color.val[1], color.val[2], color.val[3]); + + return; + } + + + // + // C++: bool clipLine(Rect imgRect, Point& pt1, Point& pt2) + // + +/** + *

Clips the line against the image rectangle.

+ * + *

The functions clipLine calculate a part of the line segment that + * is entirely within the specified rectangle. + * They return false if the line segment is completely outside the + * rectangle. Otherwise, they return true.

+ * + * @param imgRect Image rectangle. + * @param pt1 First line point. + * @param pt2 Second line point. + * + * @see org.opencv.core.Core.clipLine + */ + public static boolean clipLine(Rect imgRect, Point pt1, Point pt2) + { + double[] pt1_out = new double[2]; + double[] pt2_out = new double[2]; + boolean retVal = clipLine_0(imgRect.x, imgRect.y, imgRect.width, imgRect.height, pt1.x, pt1.y, pt1_out, pt2.x, pt2.y, pt2_out); + if(pt1!=null){ pt1.x = pt1_out[0]; pt1.y = pt1_out[1]; } + if(pt2!=null){ pt2.x = pt2_out[0]; pt2.y = pt2_out[1]; } + return retVal; + } + + + // + // C++: void compare(Mat src1, Mat src2, Mat& dst, int cmpop) + // + +/** + *

Performs the per-element comparison of two arrays or an array and scalar + * value.

+ * + *

The function compares:

+ *
    + *
  • Elements of two arrays when src1 and src2 + * have the same size: + *
+ * + *

dst(I) = src1(I) cmpop src2(I)

+ * + *
    + *
  • Elements of src1 with a scalar src2 when + * src2 is constructed from Scalar or has a single + * element: + *
+ * + *

dst(I) = src1(I) cmpop src2

+ * + *
    + *
  • src1 with elements of src2 when + * src1 is constructed from Scalar or has a single + * element: + *
+ * + *

dst(I) = src1 cmpop src2(I)

+ * + *

When the comparison result is true, the corresponding element of output array + * is set to 255.The comparison operations can be replaced with the equivalent + * matrix expressions:

+ * + *

// C++ code:

+ * + *

Mat dst1 = src1 >= src2;

+ * + *

Mat dst2 = src1 < 8;...

+ * + * @param src1 first input array or a scalar (in the case of cvCmp, + * cv.Cmp, cvCmpS, cv.CmpS it is always + * an array); when it is an array, it must have a single channel. + * @param src2 second input array or a scalar (in the case of cvCmp + * and cv.Cmp it is always an array; in the case of + * cvCmpS, cv.CmpS it is always a scalar); when it is + * an array, it must have a single channel. + * @param dst output array that has the same size as the input arrays and type= + * CV_8UC1. + * @param cmpop a flag, that specifies correspondence between the arrays: + *
    + *
  • CMP_EQ src1 is equal to src2. + *
  • CMP_GT src1 is greater than src2. + *
  • CMP_GE src1 is greater than or equal to src2. + *
  • CMP_LT src1 is less than src2. + *
  • CMP_LE src1 is less than or equal to src2. + *
  • CMP_NE src1 is unequal to src2. + *
+ * + * @see org.opencv.core.Core.compare + * @see org.opencv.imgproc.Imgproc#threshold + * @see org.opencv.core.Core#max + * @see org.opencv.core.Core#checkRange + * @see org.opencv.core.Core#min + */ + public static void compare(Mat src1, Mat src2, Mat dst, int cmpop) + { + + compare_0(src1.nativeObj, src2.nativeObj, dst.nativeObj, cmpop); + + return; + } + + + // + // C++: void compare(Mat src1, Scalar src2, Mat& dst, int cmpop) + // + +/** + *

Performs the per-element comparison of two arrays or an array and scalar + * value.

+ * + *

The function compares:

+ *
    + *
  • Elements of two arrays when src1 and src2 + * have the same size: + *
+ * + *

dst(I) = src1(I) cmpop src2(I)

+ * + *
    + *
  • Elements of src1 with a scalar src2 when + * src2 is constructed from Scalar or has a single + * element: + *
+ * + *

dst(I) = src1(I) cmpop src2

+ * + *
    + *
  • src1 with elements of src2 when + * src1 is constructed from Scalar or has a single + * element: + *
+ * + *

dst(I) = src1 cmpop src2(I)

+ * + *

When the comparison result is true, the corresponding element of output array + * is set to 255.The comparison operations can be replaced with the equivalent + * matrix expressions:

+ * + *

// C++ code:

+ * + *

Mat dst1 = src1 >= src2;

+ * + *

Mat dst2 = src1 < 8;...

+ * + * @param src1 first input array or a scalar (in the case of cvCmp, + * cv.Cmp, cvCmpS, cv.CmpS it is always + * an array); when it is an array, it must have a single channel. + * @param src2 second input array or a scalar (in the case of cvCmp + * and cv.Cmp it is always an array; in the case of + * cvCmpS, cv.CmpS it is always a scalar); when it is + * an array, it must have a single channel. + * @param dst output array that has the same size as the input arrays and type= + * CV_8UC1. + * @param cmpop a flag, that specifies correspondence between the arrays: + *
    + *
  • CMP_EQ src1 is equal to src2. + *
  • CMP_GT src1 is greater than src2. + *
  • CMP_GE src1 is greater than or equal to src2. + *
  • CMP_LT src1 is less than src2. + *
  • CMP_LE src1 is less than or equal to src2. + *
  • CMP_NE src1 is unequal to src2. + *
+ * + * @see org.opencv.core.Core.compare + * @see org.opencv.imgproc.Imgproc#threshold + * @see org.opencv.core.Core#max + * @see org.opencv.core.Core#checkRange + * @see org.opencv.core.Core#min + */ + public static void compare(Mat src1, Scalar src2, Mat dst, int cmpop) + { + + compare_1(src1.nativeObj, src2.val[0], src2.val[1], src2.val[2], src2.val[3], dst.nativeObj, cmpop); + + return; + } + + + // + // C++: void completeSymm(Mat& mtx, bool lowerToUpper = false) + // + +/** + *

Copies the lower or the upper half of a square matrix to another half.

+ * + *

The function completeSymm copies the lower half of a square + * matrix to its another half. The matrix diagonal remains unchanged:

+ *
    + *
  • mtx_(ij)=mtx_(ji) for i > j if lowerToUpper=false + *
  • mtx_(ij)=mtx_(ji) for i < j if lowerToUpper=true + *
+ * + * @param mtx input-output floating-point square matrix. + * @param lowerToUpper operation flag; if true, the lower half is copied to the + * upper half. Otherwise, the upper half is copied to the lower half. + * + * @see org.opencv.core.Core.completeSymm + * @see org.opencv.core.Core#transpose + * @see org.opencv.core.Core#flip + */ + public static void completeSymm(Mat mtx, boolean lowerToUpper) + { + + completeSymm_0(mtx.nativeObj, lowerToUpper); + + return; + } + +/** + *

Copies the lower or the upper half of a square matrix to another half.

+ * + *

The function completeSymm copies the lower half of a square + * matrix to its another half. The matrix diagonal remains unchanged:

+ *
    + *
  • mtx_(ij)=mtx_(ji) for i > j if lowerToUpper=false + *
  • mtx_(ij)=mtx_(ji) for i < j if lowerToUpper=true + *
+ * + * @param mtx input-output floating-point square matrix. + * + * @see org.opencv.core.Core.completeSymm + * @see org.opencv.core.Core#transpose + * @see org.opencv.core.Core#flip + */ + public static void completeSymm(Mat mtx) + { + + completeSymm_1(mtx.nativeObj); + + return; + } + + + // + // C++: void convertScaleAbs(Mat src, Mat& dst, double alpha = 1, double beta = 0) + // + +/** + *

Scales, calculates absolute values, and converts the result to 8-bit.

+ * + *

On each element of the input array, the function convertScaleAbs + * performs three operations sequentially: scaling, taking an absolute value, + * conversion to an unsigned 8-bit type:

+ * + *

dst(I)= saturate_cast<uchar>(| src(I)* alpha + beta|)<BR>In case + * of multi-channel arrays, the function processes each channel independently. + * When the output is not 8-bit, the operation can be emulated by calling the + * Mat.convertTo method(or by using matrix expressions) and then + * by calculating an absolute value of the result. For example: + * <BR><code>

+ * + *

// C++ code:

+ * + *

Mat_ A(30,30);

+ * + *

randu(A, Scalar(-100), Scalar(100));

+ * + *

Mat_ B = A*5 + 3;

+ * + *

B = abs(B);

+ * + *

// Mat_ B = abs(A*5+3) will also do the job,

+ * + *

// but it will allocate a temporary matrix

+ * + * @param src input array. + * @param dst output array. + * @param alpha optional scale factor. + * @param beta optional delta added to the scaled values. + * + * @see org.opencv.core.Core.convertScaleAbs + * @see org.opencv.core.Mat#convertTo + */ + public static void convertScaleAbs(Mat src, Mat dst, double alpha, double beta) + { + + convertScaleAbs_0(src.nativeObj, dst.nativeObj, alpha, beta); + + return; + } + +/** + *

Scales, calculates absolute values, and converts the result to 8-bit.

+ * + *

On each element of the input array, the function convertScaleAbs + * performs three operations sequentially: scaling, taking an absolute value, + * conversion to an unsigned 8-bit type:

+ * + *

dst(I)= saturate_cast<uchar>(| src(I)* alpha + beta|)<BR>In case + * of multi-channel arrays, the function processes each channel independently. + * When the output is not 8-bit, the operation can be emulated by calling the + * Mat.convertTo method(or by using matrix expressions) and then + * by calculating an absolute value of the result. For example: + * <BR><code>

+ * + *

// C++ code:

+ * + *

Mat_ A(30,30);

+ * + *

randu(A, Scalar(-100), Scalar(100));

+ * + *

Mat_ B = A*5 + 3;

+ * + *

B = abs(B);

+ * + *

// Mat_ B = abs(A*5+3) will also do the job,

+ * + *

// but it will allocate a temporary matrix

+ * + * @param src input array. + * @param dst output array. + * + * @see org.opencv.core.Core.convertScaleAbs + * @see org.opencv.core.Mat#convertTo + */ + public static void convertScaleAbs(Mat src, Mat dst) + { + + convertScaleAbs_1(src.nativeObj, dst.nativeObj); + + return; + } + + + // + // C++: int countNonZero(Mat src) + // + +/** + *

Counts non-zero array elements.

+ * + *

The function returns the number of non-zero elements in src :

+ * + *

sum(by: I: src(I) != 0) 1

+ * + * @param src single-channel array. + * + * @see org.opencv.core.Core.countNonZero + * @see org.opencv.core.Core#minMaxLoc + * @see org.opencv.core.Core#calcCovarMatrix + * @see org.opencv.core.Core#meanStdDev + * @see org.opencv.core.Core#norm + * @see org.opencv.core.Core#mean + */ + public static int countNonZero(Mat src) + { + + int retVal = countNonZero_0(src.nativeObj); + + return retVal; + } + + + // + // C++: float cubeRoot(float val) + // + +/** + *

Computes the cube root of an argument.

+ * + *

The function cubeRoot computes sqrt3(val). Negative + * arguments are handled correctly. NaN and Inf are not handled. The accuracy + * approaches the maximum possible accuracy for single-precision data.

+ * + * @param val A function argument. + * + * @see org.opencv.core.Core.cubeRoot + */ + public static float cubeRoot(float val) + { + + float retVal = cubeRoot_0(val); + + return retVal; + } + + + // + // C++: void dct(Mat src, Mat& dst, int flags = 0) + // + +/** + *

Performs a forward or inverse discrete Cosine transform of 1D or 2D array.

+ * + *

The function dct performs a forward or inverse discrete Cosine + * transform (DCT) of a 1D or 2D floating-point array:

+ *
    + *
  • Forward Cosine transform of a 1D vector of N elements: + *
+ * + *

Y = C^N * X

+ * + *

where

+ * + *

C^N_(jk)= sqrt(alpha_j/N) cos((pi(2k+1)j)/(2N))

+ * + *

and

+ * + *

alpha_0=1, alpha_j=2 for *j > 0*.

+ *
    + *
  • Inverse Cosine transform of a 1D vector of N elements: + *
+ * + *

X = (C^N)^(-1) * Y = (C^N)^T * Y

+ * + *

(since C^N is an orthogonal matrix, C^N * (C^N)^T = I)

+ *
    + *
  • Forward 2D Cosine transform of M x N matrix: + *
+ * + *

Y = C^N * X * (C^N)^T

+ * + *
    + *
  • Inverse 2D Cosine transform of M x N matrix: + *
+ * + *

X = (C^N)^T * X * C^N

+ * + *

The function chooses the mode of operation by looking at the flags and size + * of the input array:

+ *
    + *
  • If (flags & DCT_INVERSE) == 0, the function does a + * forward 1D or 2D transform. Otherwise, it is an inverse 1D or 2D transform. + *
  • If (flags & DCT_ROWS) != 0, the function performs a 1D + * transform of each row. + *
  • If the array is a single column or a single row, the function performs + * a 1D transform. + *
  • If none of the above is true, the function performs a 2D transform. + *
+ * + *

Note:

+ * + *

Currently dct supports even-size arrays (2, 4, 6...). For data + * analysis and approximation, you can pad the array when necessary.

+ * + *

Also, the function performance depends very much, and not monotonically, on + * the array size (see"getOptimalDFTSize"). In the current implementation DCT of + * a vector of size N is calculated via DFT of a vector of size + * N/2. Thus, the optimal DCT size N1 >= N can be + * calculated as:

+ * + *

// C++ code:

+ * + *

size_t getOptimalDCTSize(size_t N) { return 2*getOptimalDFTSize((N+1)/2); }

+ * + *

N1 = getOptimalDCTSize(N);

+ * + *

+ * + * @param src input floating-point array. + * @param dst output array of the same size and type as src. + * @param flags transformation flags as a combination of the following values: + *
    + *
  • DCT_INVERSE performs an inverse 1D or 2D transform instead of the + * default forward transform. + *
  • DCT_ROWS performs a forward or inverse transform of every individual + * row of the input matrix. This flag enables you to transform multiple vectors + * simultaneously and can be used to decrease the overhead (which is sometimes + * several times larger than the processing itself) to perform 3D and + * higher-dimensional transforms and so forth. + *
+ * + * @see org.opencv.core.Core.dct + * @see org.opencv.core.Core#dft + * @see org.opencv.core.Core#idct + * @see org.opencv.core.Core#getOptimalDFTSize + */ + public static void dct(Mat src, Mat dst, int flags) + { + + dct_0(src.nativeObj, dst.nativeObj, flags); + + return; + } + +/** + *

Performs a forward or inverse discrete Cosine transform of 1D or 2D array.

+ * + *

The function dct performs a forward or inverse discrete Cosine + * transform (DCT) of a 1D or 2D floating-point array:

+ *
    + *
  • Forward Cosine transform of a 1D vector of N elements: + *
+ * + *

Y = C^N * X

+ * + *

where

+ * + *

C^N_(jk)= sqrt(alpha_j/N) cos((pi(2k+1)j)/(2N))

+ * + *

and

+ * + *

alpha_0=1, alpha_j=2 for *j > 0*.

+ *
    + *
  • Inverse Cosine transform of a 1D vector of N elements: + *
+ * + *

X = (C^N)^(-1) * Y = (C^N)^T * Y

+ * + *

(since C^N is an orthogonal matrix, C^N * (C^N)^T = I)

+ *
    + *
  • Forward 2D Cosine transform of M x N matrix: + *
+ * + *

Y = C^N * X * (C^N)^T

+ * + *
    + *
  • Inverse 2D Cosine transform of M x N matrix: + *
+ * + *

X = (C^N)^T * X * C^N

+ * + *

The function chooses the mode of operation by looking at the flags and size + * of the input array:

+ *
    + *
  • If (flags & DCT_INVERSE) == 0, the function does a + * forward 1D or 2D transform. Otherwise, it is an inverse 1D or 2D transform. + *
  • If (flags & DCT_ROWS) != 0, the function performs a 1D + * transform of each row. + *
  • If the array is a single column or a single row, the function performs + * a 1D transform. + *
  • If none of the above is true, the function performs a 2D transform. + *
+ * + *

Note:

+ * + *

Currently dct supports even-size arrays (2, 4, 6...). For data + * analysis and approximation, you can pad the array when necessary.

+ * + *

Also, the function performance depends very much, and not monotonically, on + * the array size (see"getOptimalDFTSize"). In the current implementation DCT of + * a vector of size N is calculated via DFT of a vector of size + * N/2. Thus, the optimal DCT size N1 >= N can be + * calculated as:

+ * + *

// C++ code:

+ * + *

size_t getOptimalDCTSize(size_t N) { return 2*getOptimalDFTSize((N+1)/2); }

+ * + *

N1 = getOptimalDCTSize(N);

+ * + *

+ * + * @param src input floating-point array. + * @param dst output array of the same size and type as src. + * + * @see org.opencv.core.Core.dct + * @see org.opencv.core.Core#dft + * @see org.opencv.core.Core#idct + * @see org.opencv.core.Core#getOptimalDFTSize + */ + public static void dct(Mat src, Mat dst) + { + + dct_1(src.nativeObj, dst.nativeObj); + + return; + } + + + // + // C++: double determinant(Mat mtx) + // + +/** + *

Returns the determinant of a square floating-point matrix.

+ * + *

The function determinant calculates and returns the determinant + * of the specified matrix. For small matrices (mtx.cols=mtx.rows<=3), + * the direct method is used. For larger matrices, the function uses LU + * factorization with partial pivoting.

+ * + *

For symmetric positively-determined matrices, it is also possible to use + * "eigen" decomposition to calculate the determinant.

+ * + * @param mtx input matrix that must have CV_32FC1 or + * CV_64FC1 type and square size. + * + * @see org.opencv.core.Core.determinant + * @see org.opencv.core.Core#invert + * @see org.opencv.core.Core#solve + * @see org.opencv.core.Core#eigen + * @see org.opencv.core.Core#trace + */ + public static double determinant(Mat mtx) + { + + double retVal = determinant_0(mtx.nativeObj); + + return retVal; + } + + + // + // C++: void dft(Mat src, Mat& dst, int flags = 0, int nonzeroRows = 0) + // + +/** + *

Performs a forward or inverse Discrete Fourier transform of a 1D or 2D + * floating-point array.

+ * + *

The function performs one of the following:

+ *
    + *
  • Forward the Fourier transform of a 1D vector of N + * elements: + *
+ * + *

Y = F^N * X,

+ * + *

where F^N_(jk)=exp(-2pi i j k/N) and i=sqrt(-1)

+ *
    + *
  • Inverse the Fourier transform of a 1D vector of N + * elements: + *
+ * + *

X'= (F^N)^(-1) * Y = (F^N)^* * y + * X = (1/N) * X,

+ * + *

where F^*=(Re(F^N)-Im(F^N))^T

+ *
    + *
  • Forward the 2D Fourier transform of a M x N matrix: + *
+ * + *

Y = F^M * X * F^N

+ * + *
    + *
  • Inverse the 2D Fourier transform of a M x N matrix: + *
+ * + *

X'= (F^M)^* * Y * (F^N)^* + * X = 1/(M * N) * X'

+ * + *

In case of real (single-channel) data, the output spectrum of the forward + * Fourier transform or input spectrum of the inverse Fourier transform can be + * represented in a packed format called *CCS* (complex-conjugate-symmetrical). + * It was borrowed from IPL (Intel* Image Processing Library). Here is how 2D + * *CCS* spectrum looks:

+ * + *

Re Y_(0,0) Re Y_(0,1) Im Y_(0,1) Re Y_(0,2) Im Y_(0,2) *s Re Y_(0,N/2-1) + * Im Y_(0,N/2-1) Re Y_(0,N/2) + * Re Y_(1,0) Re Y_(1,1) Im Y_(1,1) Re Y_(1,2) Im Y_(1,2) *s Re Y_(1,N/2-1) Im + * Y_(1,N/2-1) Re Y_(1,N/2) + * Im Y_(1,0) Re Y_(2,1) Im Y_(2,1) Re Y_(2,2) Im Y_(2,2) *s Re Y_(2,N/2-1) Im + * Y_(2,N/2-1) Im Y_(1,N/2)........................... + * Re Y_(M/2-1,0) Re Y_(M-3,1) Im Y_(M-3,1)......... Re Y_(M-3,N/2-1) Im + * Y_(M-3,N/2-1) Re Y_(M/2-1,N/2) + * Im Y_(M/2-1,0) Re Y_(M-2,1) Im Y_(M-2,1)......... Re Y_(M-2,N/2-1) Im + * Y_(M-2,N/2-1) Im Y_(M/2-1,N/2) + * Re Y_(M/2,0) Re Y_(M-1,1) Im Y_(M-1,1)......... Re Y_(M-1,N/2-1) Im + * Y_(M-1,N/2-1) Re Y_(M/2,N/2)

+ * + *

In case of 1D transform of a real vector, the output looks like the first row + * of the matrix above.

+ * + *

So, the function chooses an operation mode depending on the flags and size of + * the input array:

+ *
    + *
  • If DFT_ROWS is set or the input array has a single row or + * single column, the function performs a 1D forward or inverse transform of + * each row of a matrix when DFT_ROWS is set. Otherwise, it + * performs a 2D transform. + *
  • If the input array is real and DFT_INVERSE is not set, + * the function performs a forward 1D or 2D transform: + *
  • When DFT_COMPLEX_OUTPUT is set, the output is a complex + * matrix of the same size as input. + *
  • When DFT_COMPLEX_OUTPUT is not set, the output is a real + * matrix of the same size as input. In case of 2D transform, it uses the packed + * format as shown above. In case of a single 1D transform, it looks like the + * first row of the matrix above. In case of multiple 1D transforms (when using + * the DCT_ROWS flag), each row of the output matrix looks like the + * first row of the matrix above. + *
  • If the input array is complex and either DFT_INVERSE or + * DFT_REAL_OUTPUT are not set, the output is a complex array of + * the same size as input. The function performs a forward or inverse 1D or 2D + * transform of the whole input array or each row of the input array + * independently, depending on the flags DFT_INVERSE and + * DFT_ROWS. + *
  • When DFT_INVERSE is set and the input array is real, or + * it is complex but DFT_REAL_OUTPUT is set, the output is a real + * array of the same size as input. The function performs a 1D or 2D inverse + * transformation of the whole input array or each individual row, depending on + * the flags DFT_INVERSE and DFT_ROWS. + *
+ * + *

If DFT_SCALE is set, the scaling is done after the + * transformation.

+ * + *

Unlike "dct", the function supports arrays of arbitrary size. But only those + * arrays are processed efficiently, whose sizes can be factorized in a product + * of small prime numbers (2, 3, and 5 in the current implementation). Such an + * efficient DFT size can be calculated using the "getOptimalDFTSize" method. + * The sample below illustrates how to calculate a DFT-based convolution of two + * 2D real arrays:

+ * + *

// C++ code:

+ * + *

void convolveDFT(InputArray A, InputArray B, OutputArray C)

+ * + * + *

// reallocate the output array if needed

+ * + *

C.create(abs(A.rows - B.rows)+1, abs(A.cols - B.cols)+1, A.type());

+ * + *

Size dftSize;

+ * + *

// calculate the size of DFT transform

+ * + *

dftSize.width = getOptimalDFTSize(A.cols + B.cols - 1);

+ * + *

dftSize.height = getOptimalDFTSize(A.rows + B.rows - 1);

+ * + *

// allocate temporary buffers and initialize them with 0's

+ * + *

Mat tempA(dftSize, A.type(), Scalar.all(0));

+ * + *

Mat tempB(dftSize, B.type(), Scalar.all(0));

+ * + *

// copy A and B to the top-left corners of tempA and tempB, respectively

+ * + *

Mat roiA(tempA, Rect(0,0,A.cols,A.rows));

+ * + *

A.copyTo(roiA);

+ * + *

Mat roiB(tempB, Rect(0,0,B.cols,B.rows));

+ * + *

B.copyTo(roiB);

+ * + *

// now transform the padded A & B in-place;

+ * + *

// use "nonzeroRows" hint for faster processing

+ * + *

dft(tempA, tempA, 0, A.rows);

+ * + *

dft(tempB, tempB, 0, B.rows);

+ * + *

// multiply the spectrums;

+ * + *

// the function handles packed spectrum representations well

+ * + *

mulSpectrums(tempA, tempB, tempA);

+ * + *

// transform the product back from the frequency domain.

+ * + *

// Even though all the result rows will be non-zero,

+ * + *

// you need only the first C.rows of them, and thus you

+ * + *

// pass nonzeroRows == C.rows

+ * + *

dft(tempA, tempA, DFT_INVERSE + DFT_SCALE, C.rows);

+ * + *

// now copy the result back to C.

+ * + *

tempA(Rect(0, 0, C.cols, C.rows)).copyTo(C);

+ * + *

// all the temporary buffers will be deallocated automatically

+ * + * + *

To optimize this sample, consider the following approaches:

+ *
    + *
  • Since nonzeroRows != 0 is passed to the forward transform + * calls and since A and B are copied to the top-left + * corners of tempA and tempB, respectively, it is not + * necessary to clear the whole tempA and tempB. It is + * only necessary to clear the tempA.cols - A.cols + * (tempB.cols - B.cols) rightmost columns of the matrices. + *
  • This DFT-based convolution does not have to be applied to the whole + * big arrays, especially if B is significantly smaller than + * A or vice versa. Instead, you can calculate convolution by + * parts. To do this, you need to split the output array C into + * multiple tiles. For each tile, estimate which parts of A and + * B are required to calculate convolution in this tile. If the + * tiles in C are too small, the speed will decrease a lot because + * of repeated work. In the ultimate case, when each tile in C is a + * single pixel, the algorithm becomes equivalent to the naive convolution + * algorithm. If the tiles are too big, the temporary arrays tempA + * and tempB become too big and there is also a slowdown because of + * bad cache locality. So, there is an optimal tile size somewhere in the + * middle. + *
  • If different tiles in C can be calculated in parallel + * and, thus, the convolution is done by parts, the loop can be threaded. + *
+ * + *

All of the above improvements have been implemented in "matchTemplate" and + * "filter2D". Therefore, by using them, you can get the performance even better + * than with the above theoretically optimal implementation. Though, those two + * functions actually calculate cross-correlation, not convolution, so you need + * to "flip" the second convolution operand B vertically and + * horizontally using "flip".

+ * + * @param src input array that could be real or complex. + * @param dst output array whose size and type depends on the flags. + * @param flags transformation flags, representing a combination of the + * following values: + *
    + *
  • DFT_INVERSE performs an inverse 1D or 2D transform instead of the + * default forward transform. + *
  • DFT_SCALE scales the result: divide it by the number of array + * elements. Normally, it is combined with DFT_INVERSE. + *
  • DFT_ROWS performs a forward or inverse transform of every individual + * row of the input matrix; this flag enables you to transform multiple vectors + * simultaneously and can be used to decrease the overhead (which is sometimes + * several times larger than the processing itself) to perform 3D and + * higher-dimensional transformations and so forth. + *
  • DFT_COMPLEX_OUTPUT performs a forward transformation of 1D or 2D real + * array; the result, though being a complex array, has complex-conjugate + * symmetry (*CCS*, see the function description below for details), and such an + * array can be packed into a real array of the same size as input, which is the + * fastest option and which is what the function does by default; however, you + * may wish to get a full complex array (for simpler spectrum analysis, and so + * on) - pass the flag to enable the function to produce a full-size complex + * output array. + *
  • DFT_REAL_OUTPUT performs an inverse transformation of a 1D or 2D + * complex array; the result is normally a complex array of the same size, + * however, if the input array has conjugate-complex symmetry (for example, it + * is a result of forward transformation with DFT_COMPLEX_OUTPUT + * flag), the output is a real array; while the function itself does not check + * whether the input is symmetrical or not, you can pass the flag and then the + * function will assume the symmetry and produce the real output array (note + * that when the input is packed into a real array and inverse transformation is + * executed, the function treats the input as a packed complex-conjugate + * symmetrical array, and the output will also be a real array). + *
+ * @param nonzeroRows when the parameter is not zero, the function assumes that + * only the first nonzeroRows rows of the input array + * (DFT_INVERSE is not set) or only the first nonzeroRows + * of the output array (DFT_INVERSE is set) contain non-zeros, + * thus, the function can handle the rest of the rows more efficiently and save + * some time; this technique is very useful for calculating array + * cross-correlation or convolution using DFT. + * + * @see org.opencv.core.Core.dft + * @see org.opencv.imgproc.Imgproc#matchTemplate + * @see org.opencv.core.Core#mulSpectrums + * @see org.opencv.core.Core#cartToPolar + * @see org.opencv.core.Core#flip + * @see org.opencv.core.Core#magnitude + * @see org.opencv.core.Core#phase + * @see org.opencv.core.Core#dct + * @see org.opencv.imgproc.Imgproc#filter2D + * @see org.opencv.core.Core#getOptimalDFTSize + */ + public static void dft(Mat src, Mat dst, int flags, int nonzeroRows) + { + + dft_0(src.nativeObj, dst.nativeObj, flags, nonzeroRows); + + return; + } + +/** + *

Performs a forward or inverse Discrete Fourier transform of a 1D or 2D + * floating-point array.

+ * + *

The function performs one of the following:

+ *
    + *
  • Forward the Fourier transform of a 1D vector of N + * elements: + *
+ * + *

Y = F^N * X,

+ * + *

where F^N_(jk)=exp(-2pi i j k/N) and i=sqrt(-1)

+ *
    + *
  • Inverse the Fourier transform of a 1D vector of N + * elements: + *
+ * + *

X'= (F^N)^(-1) * Y = (F^N)^* * y + * X = (1/N) * X,

+ * + *

where F^*=(Re(F^N)-Im(F^N))^T

+ *
    + *
  • Forward the 2D Fourier transform of a M x N matrix: + *
+ * + *

Y = F^M * X * F^N

+ * + *
    + *
  • Inverse the 2D Fourier transform of a M x N matrix: + *
+ * + *

X'= (F^M)^* * Y * (F^N)^* + * X = 1/(M * N) * X'

+ * + *

In case of real (single-channel) data, the output spectrum of the forward + * Fourier transform or input spectrum of the inverse Fourier transform can be + * represented in a packed format called *CCS* (complex-conjugate-symmetrical). + * It was borrowed from IPL (Intel* Image Processing Library). Here is how 2D + * *CCS* spectrum looks:

+ * + *

Re Y_(0,0) Re Y_(0,1) Im Y_(0,1) Re Y_(0,2) Im Y_(0,2) *s Re Y_(0,N/2-1) + * Im Y_(0,N/2-1) Re Y_(0,N/2) + * Re Y_(1,0) Re Y_(1,1) Im Y_(1,1) Re Y_(1,2) Im Y_(1,2) *s Re Y_(1,N/2-1) Im + * Y_(1,N/2-1) Re Y_(1,N/2) + * Im Y_(1,0) Re Y_(2,1) Im Y_(2,1) Re Y_(2,2) Im Y_(2,2) *s Re Y_(2,N/2-1) Im + * Y_(2,N/2-1) Im Y_(1,N/2)........................... + * Re Y_(M/2-1,0) Re Y_(M-3,1) Im Y_(M-3,1)......... Re Y_(M-3,N/2-1) Im + * Y_(M-3,N/2-1) Re Y_(M/2-1,N/2) + * Im Y_(M/2-1,0) Re Y_(M-2,1) Im Y_(M-2,1)......... Re Y_(M-2,N/2-1) Im + * Y_(M-2,N/2-1) Im Y_(M/2-1,N/2) + * Re Y_(M/2,0) Re Y_(M-1,1) Im Y_(M-1,1)......... Re Y_(M-1,N/2-1) Im + * Y_(M-1,N/2-1) Re Y_(M/2,N/2)

+ * + *

In case of 1D transform of a real vector, the output looks like the first row + * of the matrix above.

+ * + *

So, the function chooses an operation mode depending on the flags and size of + * the input array:

+ *
    + *
  • If DFT_ROWS is set or the input array has a single row or + * single column, the function performs a 1D forward or inverse transform of + * each row of a matrix when DFT_ROWS is set. Otherwise, it + * performs a 2D transform. + *
  • If the input array is real and DFT_INVERSE is not set, + * the function performs a forward 1D or 2D transform: + *
  • When DFT_COMPLEX_OUTPUT is set, the output is a complex + * matrix of the same size as input. + *
  • When DFT_COMPLEX_OUTPUT is not set, the output is a real + * matrix of the same size as input. In case of 2D transform, it uses the packed + * format as shown above. In case of a single 1D transform, it looks like the + * first row of the matrix above. In case of multiple 1D transforms (when using + * the DCT_ROWS flag), each row of the output matrix looks like the + * first row of the matrix above. + *
  • If the input array is complex and either DFT_INVERSE or + * DFT_REAL_OUTPUT are not set, the output is a complex array of + * the same size as input. The function performs a forward or inverse 1D or 2D + * transform of the whole input array or each row of the input array + * independently, depending on the flags DFT_INVERSE and + * DFT_ROWS. + *
  • When DFT_INVERSE is set and the input array is real, or + * it is complex but DFT_REAL_OUTPUT is set, the output is a real + * array of the same size as input. The function performs a 1D or 2D inverse + * transformation of the whole input array or each individual row, depending on + * the flags DFT_INVERSE and DFT_ROWS. + *
+ * + *

If DFT_SCALE is set, the scaling is done after the + * transformation.

+ * + *

Unlike "dct", the function supports arrays of arbitrary size. But only those + * arrays are processed efficiently, whose sizes can be factorized in a product + * of small prime numbers (2, 3, and 5 in the current implementation). Such an + * efficient DFT size can be calculated using the "getOptimalDFTSize" method. + * The sample below illustrates how to calculate a DFT-based convolution of two + * 2D real arrays:

+ * + *

// C++ code:

+ * + *

void convolveDFT(InputArray A, InputArray B, OutputArray C)

+ * + * + *

// reallocate the output array if needed

+ * + *

C.create(abs(A.rows - B.rows)+1, abs(A.cols - B.cols)+1, A.type());

+ * + *

Size dftSize;

+ * + *

// calculate the size of DFT transform

+ * + *

dftSize.width = getOptimalDFTSize(A.cols + B.cols - 1);

+ * + *

dftSize.height = getOptimalDFTSize(A.rows + B.rows - 1);

+ * + *

// allocate temporary buffers and initialize them with 0's

+ * + *

Mat tempA(dftSize, A.type(), Scalar.all(0));

+ * + *

Mat tempB(dftSize, B.type(), Scalar.all(0));

+ * + *

// copy A and B to the top-left corners of tempA and tempB, respectively

+ * + *

Mat roiA(tempA, Rect(0,0,A.cols,A.rows));

+ * + *

A.copyTo(roiA);

+ * + *

Mat roiB(tempB, Rect(0,0,B.cols,B.rows));

+ * + *

B.copyTo(roiB);

+ * + *

// now transform the padded A & B in-place;

+ * + *

// use "nonzeroRows" hint for faster processing

+ * + *

dft(tempA, tempA, 0, A.rows);

+ * + *

dft(tempB, tempB, 0, B.rows);

+ * + *

// multiply the spectrums;

+ * + *

// the function handles packed spectrum representations well

+ * + *

mulSpectrums(tempA, tempB, tempA);

+ * + *

// transform the product back from the frequency domain.

+ * + *

// Even though all the result rows will be non-zero,

+ * + *

// you need only the first C.rows of them, and thus you

+ * + *

// pass nonzeroRows == C.rows

+ * + *

dft(tempA, tempA, DFT_INVERSE + DFT_SCALE, C.rows);

+ * + *

// now copy the result back to C.

+ * + *

tempA(Rect(0, 0, C.cols, C.rows)).copyTo(C);

+ * + *

// all the temporary buffers will be deallocated automatically

+ * + * + *

To optimize this sample, consider the following approaches:

+ *
    + *
  • Since nonzeroRows != 0 is passed to the forward transform + * calls and since A and B are copied to the top-left + * corners of tempA and tempB, respectively, it is not + * necessary to clear the whole tempA and tempB. It is + * only necessary to clear the tempA.cols - A.cols + * (tempB.cols - B.cols) rightmost columns of the matrices. + *
  • This DFT-based convolution does not have to be applied to the whole + * big arrays, especially if B is significantly smaller than + * A or vice versa. Instead, you can calculate convolution by + * parts. To do this, you need to split the output array C into + * multiple tiles. For each tile, estimate which parts of A and + * B are required to calculate convolution in this tile. If the + * tiles in C are too small, the speed will decrease a lot because + * of repeated work. In the ultimate case, when each tile in C is a + * single pixel, the algorithm becomes equivalent to the naive convolution + * algorithm. If the tiles are too big, the temporary arrays tempA + * and tempB become too big and there is also a slowdown because of + * bad cache locality. So, there is an optimal tile size somewhere in the + * middle. + *
  • If different tiles in C can be calculated in parallel + * and, thus, the convolution is done by parts, the loop can be threaded. + *
+ * + *

All of the above improvements have been implemented in "matchTemplate" and + * "filter2D". Therefore, by using them, you can get the performance even better + * than with the above theoretically optimal implementation. Though, those two + * functions actually calculate cross-correlation, not convolution, so you need + * to "flip" the second convolution operand B vertically and + * horizontally using "flip".

+ * + * @param src input array that could be real or complex. + * @param dst output array whose size and type depends on the flags. + * + * @see org.opencv.core.Core.dft + * @see org.opencv.imgproc.Imgproc#matchTemplate + * @see org.opencv.core.Core#mulSpectrums + * @see org.opencv.core.Core#cartToPolar + * @see org.opencv.core.Core#flip + * @see org.opencv.core.Core#magnitude + * @see org.opencv.core.Core#phase + * @see org.opencv.core.Core#dct + * @see org.opencv.imgproc.Imgproc#filter2D + * @see org.opencv.core.Core#getOptimalDFTSize + */ + public static void dft(Mat src, Mat dst) + { + + dft_1(src.nativeObj, dst.nativeObj); + + return; + } + + + // + // C++: void divide(Mat src1, Mat src2, Mat& dst, double scale = 1, int dtype = -1) + // + +/** + *

Performs per-element division of two arrays or a scalar by an array.

+ * + *

The functions divide divide one array by another:

+ * + *

dst(I) = saturate(src1(I)*scale/src2(I))

+ * + *

or a scalar by an array when there is no src1 :

+ * + *

dst(I) = saturate(scale/src2(I))

+ * + *

When src2(I) is zero, dst(I) will also be zero. + * Different channels of multi-channel arrays are processed independently.

+ * + *

Note: Saturation is not applied when the output array has the depth + * CV_32S. You may even get result of an incorrect sign in the case + * of overflow.

+ * + * @param src1 first input array. + * @param src2 second input array of the same size and type as src1. + * @param dst output array of the same size and type as src2. + * @param scale scalar factor. + * @param dtype optional depth of the output array; if -1, + * dst will have depth src2.depth(), but in case of an + * array-by-array division, you can only pass -1 when + * src1.depth()==src2.depth(). + * + * @see org.opencv.core.Core.divide + * @see org.opencv.core.Core#multiply + * @see org.opencv.core.Core#add + * @see org.opencv.core.Core#subtract + */ + public static void divide(Mat src1, Mat src2, Mat dst, double scale, int dtype) + { + + divide_0(src1.nativeObj, src2.nativeObj, dst.nativeObj, scale, dtype); + + return; + } + +/** + *

Performs per-element division of two arrays or a scalar by an array.

+ * + *

The functions divide divide one array by another:

+ * + *

dst(I) = saturate(src1(I)*scale/src2(I))

+ * + *

or a scalar by an array when there is no src1 :

+ * + *

dst(I) = saturate(scale/src2(I))

+ * + *

When src2(I) is zero, dst(I) will also be zero. + * Different channels of multi-channel arrays are processed independently.

+ * + *

Note: Saturation is not applied when the output array has the depth + * CV_32S. You may even get result of an incorrect sign in the case + * of overflow.

+ * + * @param src1 first input array. + * @param src2 second input array of the same size and type as src1. + * @param dst output array of the same size and type as src2. + * @param scale scalar factor. + * + * @see org.opencv.core.Core.divide + * @see org.opencv.core.Core#multiply + * @see org.opencv.core.Core#add + * @see org.opencv.core.Core#subtract + */ + public static void divide(Mat src1, Mat src2, Mat dst, double scale) + { + + divide_1(src1.nativeObj, src2.nativeObj, dst.nativeObj, scale); + + return; + } + +/** + *

Performs per-element division of two arrays or a scalar by an array.

+ * + *

The functions divide divide one array by another:

+ * + *

dst(I) = saturate(src1(I)*scale/src2(I))

+ * + *

or a scalar by an array when there is no src1 :

+ * + *

dst(I) = saturate(scale/src2(I))

+ * + *

When src2(I) is zero, dst(I) will also be zero. + * Different channels of multi-channel arrays are processed independently.

+ * + *

Note: Saturation is not applied when the output array has the depth + * CV_32S. You may even get result of an incorrect sign in the case + * of overflow.

+ * + * @param src1 first input array. + * @param src2 second input array of the same size and type as src1. + * @param dst output array of the same size and type as src2. + * + * @see org.opencv.core.Core.divide + * @see org.opencv.core.Core#multiply + * @see org.opencv.core.Core#add + * @see org.opencv.core.Core#subtract + */ + public static void divide(Mat src1, Mat src2, Mat dst) + { + + divide_2(src1.nativeObj, src2.nativeObj, dst.nativeObj); + + return; + } + + + // + // C++: void divide(double scale, Mat src2, Mat& dst, int dtype = -1) + // + +/** + *

Performs per-element division of two arrays or a scalar by an array.

+ * + *

The functions divide divide one array by another:

+ * + *

dst(I) = saturate(src1(I)*scale/src2(I))

+ * + *

or a scalar by an array when there is no src1 :

+ * + *

dst(I) = saturate(scale/src2(I))

+ * + *

When src2(I) is zero, dst(I) will also be zero. + * Different channels of multi-channel arrays are processed independently.

+ * + *

Note: Saturation is not applied when the output array has the depth + * CV_32S. You may even get result of an incorrect sign in the case + * of overflow.

+ * + * @param scale scalar factor. + * @param src2 second input array of the same size and type as src1. + * @param dst output array of the same size and type as src2. + * @param dtype optional depth of the output array; if -1, + * dst will have depth src2.depth(), but in case of an + * array-by-array division, you can only pass -1 when + * src1.depth()==src2.depth(). + * + * @see org.opencv.core.Core.divide + * @see org.opencv.core.Core#multiply + * @see org.opencv.core.Core#add + * @see org.opencv.core.Core#subtract + */ + public static void divide(double scale, Mat src2, Mat dst, int dtype) + { + + divide_3(scale, src2.nativeObj, dst.nativeObj, dtype); + + return; + } + +/** + *

Performs per-element division of two arrays or a scalar by an array.

+ * + *

The functions divide divide one array by another:

+ * + *

dst(I) = saturate(src1(I)*scale/src2(I))

+ * + *

or a scalar by an array when there is no src1 :

+ * + *

dst(I) = saturate(scale/src2(I))

+ * + *

When src2(I) is zero, dst(I) will also be zero. + * Different channels of multi-channel arrays are processed independently.

+ * + *

Note: Saturation is not applied when the output array has the depth + * CV_32S. You may even get result of an incorrect sign in the case + * of overflow.

+ * + * @param scale scalar factor. + * @param src2 second input array of the same size and type as src1. + * @param dst output array of the same size and type as src2. + * + * @see org.opencv.core.Core.divide + * @see org.opencv.core.Core#multiply + * @see org.opencv.core.Core#add + * @see org.opencv.core.Core#subtract + */ + public static void divide(double scale, Mat src2, Mat dst) + { + + divide_4(scale, src2.nativeObj, dst.nativeObj); + + return; + } + + + // + // C++: void divide(Mat src1, Scalar src2, Mat& dst, double scale = 1, int dtype = -1) + // + +/** + *

Performs per-element division of two arrays or a scalar by an array.

+ * + *

The functions divide divide one array by another:

+ * + *

dst(I) = saturate(src1(I)*scale/src2(I))

+ * + *

or a scalar by an array when there is no src1 :

+ * + *

dst(I) = saturate(scale/src2(I))

+ * + *

When src2(I) is zero, dst(I) will also be zero. + * Different channels of multi-channel arrays are processed independently.

+ * + *

Note: Saturation is not applied when the output array has the depth + * CV_32S. You may even get result of an incorrect sign in the case + * of overflow.

+ * + * @param src1 first input array. + * @param src2 second input array of the same size and type as src1. + * @param dst output array of the same size and type as src2. + * @param scale scalar factor. + * @param dtype optional depth of the output array; if -1, + * dst will have depth src2.depth(), but in case of an + * array-by-array division, you can only pass -1 when + * src1.depth()==src2.depth(). + * + * @see org.opencv.core.Core.divide + * @see org.opencv.core.Core#multiply + * @see org.opencv.core.Core#add + * @see org.opencv.core.Core#subtract + */ + public static void divide(Mat src1, Scalar src2, Mat dst, double scale, int dtype) + { + + divide_5(src1.nativeObj, src2.val[0], src2.val[1], src2.val[2], src2.val[3], dst.nativeObj, scale, dtype); + + return; + } + +/** + *

Performs per-element division of two arrays or a scalar by an array.

+ * + *

The functions divide divide one array by another:

+ * + *

dst(I) = saturate(src1(I)*scale/src2(I))

+ * + *

or a scalar by an array when there is no src1 :

+ * + *

dst(I) = saturate(scale/src2(I))

+ * + *

When src2(I) is zero, dst(I) will also be zero. + * Different channels of multi-channel arrays are processed independently.

+ * + *

Note: Saturation is not applied when the output array has the depth + * CV_32S. You may even get result of an incorrect sign in the case + * of overflow.

+ * + * @param src1 first input array. + * @param src2 second input array of the same size and type as src1. + * @param dst output array of the same size and type as src2. + * @param scale scalar factor. + * + * @see org.opencv.core.Core.divide + * @see org.opencv.core.Core#multiply + * @see org.opencv.core.Core#add + * @see org.opencv.core.Core#subtract + */ + public static void divide(Mat src1, Scalar src2, Mat dst, double scale) + { + + divide_6(src1.nativeObj, src2.val[0], src2.val[1], src2.val[2], src2.val[3], dst.nativeObj, scale); + + return; + } + +/** + *

Performs per-element division of two arrays or a scalar by an array.

+ * + *

The functions divide divide one array by another:

+ * + *

dst(I) = saturate(src1(I)*scale/src2(I))

+ * + *

or a scalar by an array when there is no src1 :

+ * + *

dst(I) = saturate(scale/src2(I))

+ * + *

When src2(I) is zero, dst(I) will also be zero. + * Different channels of multi-channel arrays are processed independently.

+ * + *

Note: Saturation is not applied when the output array has the depth + * CV_32S. You may even get result of an incorrect sign in the case + * of overflow.

+ * + * @param src1 first input array. + * @param src2 second input array of the same size and type as src1. + * @param dst output array of the same size and type as src2. + * + * @see org.opencv.core.Core.divide + * @see org.opencv.core.Core#multiply + * @see org.opencv.core.Core#add + * @see org.opencv.core.Core#subtract + */ + public static void divide(Mat src1, Scalar src2, Mat dst) + { + + divide_7(src1.nativeObj, src2.val[0], src2.val[1], src2.val[2], src2.val[3], dst.nativeObj); + + return; + } + + + // + // C++: bool eigen(Mat src, bool computeEigenvectors, Mat& eigenvalues, Mat& eigenvectors) + // + +/** + *

Calculates eigenvalues and eigenvectors of a symmetric matrix.

+ * + *

The functions eigen calculate just eigenvalues, or eigenvalues + * and eigenvectors of the symmetric matrix src :

+ * + *

// C++ code:

+ * + *

src*eigenvectors.row(i).t() = eigenvalues.at(i)*eigenvectors.row(i).t()

+ * + *

Note: in the new and the old interfaces different ordering of eigenvalues and + * eigenvectors parameters is used. + *

+ * + * @param src input matrix that must have CV_32FC1 or + * CV_64FC1 type, square size and be symmetrical (src^"T" + * == src). + * @param computeEigenvectors a computeEigenvectors + * @param eigenvalues output vector of eigenvalues of the same type as + * src; the eigenvalues are stored in the descending order. + * @param eigenvectors output matrix of eigenvectors; it has the same size and + * type as src; the eigenvectors are stored as subsequent matrix + * rows, in the same order as the corresponding eigenvalues. + * + * @see org.opencv.core.Core.eigen + * @see org.opencv.core.Core#completeSymm + */ + public static boolean eigen(Mat src, boolean computeEigenvectors, Mat eigenvalues, Mat eigenvectors) + { + + boolean retVal = eigen_0(src.nativeObj, computeEigenvectors, eigenvalues.nativeObj, eigenvectors.nativeObj); + + return retVal; + } + + + // + // C++: void ellipse(Mat& img, Point center, Size axes, double angle, double startAngle, double endAngle, Scalar color, int thickness = 1, int lineType = 8, int shift = 0) + // + +/** + *

Draws a simple or thick elliptic arc or fills an ellipse sector.

+ * + *

The functions ellipse with less parameters draw an ellipse + * outline, a filled ellipse, an elliptic arc, or a filled ellipse sector. + * A piecewise-linear curve is used to approximate the elliptic arc boundary. If + * you need more control of the ellipse rendering, you can retrieve the curve + * using "ellipse2Poly" and then render it with "polylines" or fill it with + * "fillPoly". If you use the first variant of the function and want to draw the + * whole ellipse, not an arc, pass startAngle=0 and + * endAngle=360. The figure below explains the meaning of the + * parameters. + * Figure 1. Parameters of Elliptic Arc

+ * + * @param img Image. + * @param center Center of the ellipse. + * @param axes Length of the ellipse axes. + * @param angle Ellipse rotation angle in degrees. + * @param startAngle Starting angle of the elliptic arc in degrees. + * @param endAngle Ending angle of the elliptic arc in degrees. + * @param color Ellipse color. + * @param thickness Thickness of the ellipse arc outline, if positive. + * Otherwise, this indicates that a filled ellipse sector is to be drawn. + * @param lineType Type of the ellipse boundary. See the "line" description. + * @param shift Number of fractional bits in the coordinates of the center and + * values of axes. + * + * @see org.opencv.core.Core.ellipse + */ + public static void ellipse(Mat img, Point center, Size axes, double angle, double startAngle, double endAngle, Scalar color, int thickness, int lineType, int shift) + { + + ellipse_0(img.nativeObj, center.x, center.y, axes.width, axes.height, angle, startAngle, endAngle, color.val[0], color.val[1], color.val[2], color.val[3], thickness, lineType, shift); + + return; + } + +/** + *

Draws a simple or thick elliptic arc or fills an ellipse sector.

+ * + *

The functions ellipse with less parameters draw an ellipse + * outline, a filled ellipse, an elliptic arc, or a filled ellipse sector. + * A piecewise-linear curve is used to approximate the elliptic arc boundary. If + * you need more control of the ellipse rendering, you can retrieve the curve + * using "ellipse2Poly" and then render it with "polylines" or fill it with + * "fillPoly". If you use the first variant of the function and want to draw the + * whole ellipse, not an arc, pass startAngle=0 and + * endAngle=360. The figure below explains the meaning of the + * parameters. + * Figure 1. Parameters of Elliptic Arc

+ * + * @param img Image. + * @param center Center of the ellipse. + * @param axes Length of the ellipse axes. + * @param angle Ellipse rotation angle in degrees. + * @param startAngle Starting angle of the elliptic arc in degrees. + * @param endAngle Ending angle of the elliptic arc in degrees. + * @param color Ellipse color. + * @param thickness Thickness of the ellipse arc outline, if positive. + * Otherwise, this indicates that a filled ellipse sector is to be drawn. + * + * @see org.opencv.core.Core.ellipse + */ + public static void ellipse(Mat img, Point center, Size axes, double angle, double startAngle, double endAngle, Scalar color, int thickness) + { + + ellipse_1(img.nativeObj, center.x, center.y, axes.width, axes.height, angle, startAngle, endAngle, color.val[0], color.val[1], color.val[2], color.val[3], thickness); + + return; + } + +/** + *

Draws a simple or thick elliptic arc or fills an ellipse sector.

+ * + *

The functions ellipse with less parameters draw an ellipse + * outline, a filled ellipse, an elliptic arc, or a filled ellipse sector. + * A piecewise-linear curve is used to approximate the elliptic arc boundary. If + * you need more control of the ellipse rendering, you can retrieve the curve + * using "ellipse2Poly" and then render it with "polylines" or fill it with + * "fillPoly". If you use the first variant of the function and want to draw the + * whole ellipse, not an arc, pass startAngle=0 and + * endAngle=360. The figure below explains the meaning of the + * parameters. + * Figure 1. Parameters of Elliptic Arc

+ * + * @param img Image. + * @param center Center of the ellipse. + * @param axes Length of the ellipse axes. + * @param angle Ellipse rotation angle in degrees. + * @param startAngle Starting angle of the elliptic arc in degrees. + * @param endAngle Ending angle of the elliptic arc in degrees. + * @param color Ellipse color. + * + * @see org.opencv.core.Core.ellipse + */ + public static void ellipse(Mat img, Point center, Size axes, double angle, double startAngle, double endAngle, Scalar color) + { + + ellipse_2(img.nativeObj, center.x, center.y, axes.width, axes.height, angle, startAngle, endAngle, color.val[0], color.val[1], color.val[2], color.val[3]); + + return; + } + + + // + // C++: void ellipse(Mat& img, RotatedRect box, Scalar color, int thickness = 1, int lineType = 8) + // + +/** + *

Draws a simple or thick elliptic arc or fills an ellipse sector.

+ * + *

The functions ellipse with less parameters draw an ellipse + * outline, a filled ellipse, an elliptic arc, or a filled ellipse sector. + * A piecewise-linear curve is used to approximate the elliptic arc boundary. If + * you need more control of the ellipse rendering, you can retrieve the curve + * using "ellipse2Poly" and then render it with "polylines" or fill it with + * "fillPoly". If you use the first variant of the function and want to draw the + * whole ellipse, not an arc, pass startAngle=0 and + * endAngle=360. The figure below explains the meaning of the + * parameters. + * Figure 1. Parameters of Elliptic Arc

+ * + * @param img Image. + * @param box Alternative ellipse representation via "RotatedRect" or + * CvBox2D. This means that the function draws an ellipse inscribed + * in the rotated rectangle. + * @param color Ellipse color. + * @param thickness Thickness of the ellipse arc outline, if positive. + * Otherwise, this indicates that a filled ellipse sector is to be drawn. + * @param lineType Type of the ellipse boundary. See the "line" description. + * + * @see org.opencv.core.Core.ellipse + */ + public static void ellipse(Mat img, RotatedRect box, Scalar color, int thickness, int lineType) + { + + ellipse_3(img.nativeObj, box.center.x, box.center.y, box.size.width, box.size.height, box.angle, color.val[0], color.val[1], color.val[2], color.val[3], thickness, lineType); + + return; + } + +/** + *

Draws a simple or thick elliptic arc or fills an ellipse sector.

+ * + *

The functions ellipse with less parameters draw an ellipse + * outline, a filled ellipse, an elliptic arc, or a filled ellipse sector. + * A piecewise-linear curve is used to approximate the elliptic arc boundary. If + * you need more control of the ellipse rendering, you can retrieve the curve + * using "ellipse2Poly" and then render it with "polylines" or fill it with + * "fillPoly". If you use the first variant of the function and want to draw the + * whole ellipse, not an arc, pass startAngle=0 and + * endAngle=360. The figure below explains the meaning of the + * parameters. + * Figure 1. Parameters of Elliptic Arc

+ * + * @param img Image. + * @param box Alternative ellipse representation via "RotatedRect" or + * CvBox2D. This means that the function draws an ellipse inscribed + * in the rotated rectangle. + * @param color Ellipse color. + * @param thickness Thickness of the ellipse arc outline, if positive. + * Otherwise, this indicates that a filled ellipse sector is to be drawn. + * + * @see org.opencv.core.Core.ellipse + */ + public static void ellipse(Mat img, RotatedRect box, Scalar color, int thickness) + { + + ellipse_4(img.nativeObj, box.center.x, box.center.y, box.size.width, box.size.height, box.angle, color.val[0], color.val[1], color.val[2], color.val[3], thickness); + + return; + } + +/** + *

Draws a simple or thick elliptic arc or fills an ellipse sector.

+ * + *

The functions ellipse with less parameters draw an ellipse + * outline, a filled ellipse, an elliptic arc, or a filled ellipse sector. + * A piecewise-linear curve is used to approximate the elliptic arc boundary. If + * you need more control of the ellipse rendering, you can retrieve the curve + * using "ellipse2Poly" and then render it with "polylines" or fill it with + * "fillPoly". If you use the first variant of the function and want to draw the + * whole ellipse, not an arc, pass startAngle=0 and + * endAngle=360. The figure below explains the meaning of the + * parameters. + * Figure 1. Parameters of Elliptic Arc

+ * + * @param img Image. + * @param box Alternative ellipse representation via "RotatedRect" or + * CvBox2D. This means that the function draws an ellipse inscribed + * in the rotated rectangle. + * @param color Ellipse color. + * + * @see org.opencv.core.Core.ellipse + */ + public static void ellipse(Mat img, RotatedRect box, Scalar color) + { + + ellipse_5(img.nativeObj, box.center.x, box.center.y, box.size.width, box.size.height, box.angle, color.val[0], color.val[1], color.val[2], color.val[3]); + + return; + } + + + // + // C++: void ellipse2Poly(Point center, Size axes, int angle, int arcStart, int arcEnd, int delta, vector_Point& pts) + // + +/** + *

Approximates an elliptic arc with a polyline.

+ * + *

The function ellipse2Poly computes the vertices of a polyline + * that approximates the specified elliptic arc. It is used by "ellipse".

+ * + * @param center Center of the arc. + * @param axes Half-sizes of the arc. See the "ellipse" for details. + * @param angle Rotation angle of the ellipse in degrees. See the "ellipse" for + * details. + * @param arcStart Starting angle of the elliptic arc in degrees. + * @param arcEnd Ending angle of the elliptic arc in degrees. + * @param delta Angle between the subsequent polyline vertices. It defines the + * approximation accuracy. + * @param pts Output vector of polyline vertices. + * + * @see org.opencv.core.Core.ellipse2Poly + */ + public static void ellipse2Poly(Point center, Size axes, int angle, int arcStart, int arcEnd, int delta, MatOfPoint pts) + { + Mat pts_mat = pts; + ellipse2Poly_0(center.x, center.y, axes.width, axes.height, angle, arcStart, arcEnd, delta, pts_mat.nativeObj); + + return; + } + + + // + // C++: void exp(Mat src, Mat& dst) + // + +/** + *

Calculates the exponent of every array element.

+ * + *

The function exp calculates the exponent of every element of the + * input array:

+ * + *

dst [I] = e^(src(I))

+ * + *

The maximum relative error is about 7e-6 for single-precision + * input and less than 1e-10 for double-precision input. Currently, + * the function converts denormalized values to zeros on output. Special values + * (NaN, Inf) are not handled.

+ * + * @param src input array. + * @param dst output array of the same size and type as src. + * + * @see org.opencv.core.Core.exp + * @see org.opencv.core.Core#log + * @see org.opencv.core.Core#cartToPolar + * @see org.opencv.core.Core#pow + * @see org.opencv.core.Core#sqrt + * @see org.opencv.core.Core#magnitude + * @see org.opencv.core.Core#polarToCart + * @see org.opencv.core.Core#phase + */ + public static void exp(Mat src, Mat dst) + { + + exp_0(src.nativeObj, dst.nativeObj); + + return; + } + + + // + // C++: void extractChannel(Mat src, Mat& dst, int coi) + // + + public static void extractChannel(Mat src, Mat dst, int coi) + { + + extractChannel_0(src.nativeObj, dst.nativeObj, coi); + + return; + } + + + // + // C++: float fastAtan2(float y, float x) + // + +/** + *

Calculates the angle of a 2D vector in degrees.

+ * + *

The function fastAtan2 calculates the full-range angle of an + * input 2D vector. The angle is measured in degrees and varies from 0 to 360 + * degrees. The accuracy is about 0.3 degrees.

+ * + * @param y y-coordinate of the vector. + * @param x x-coordinate of the vector. + * + * @see org.opencv.core.Core.fastAtan2 + */ + public static float fastAtan2(float y, float x) + { + + float retVal = fastAtan2_0(y, x); + + return retVal; + } + + + // + // C++: void fillConvexPoly(Mat& img, vector_Point points, Scalar color, int lineType = 8, int shift = 0) + // + +/** + *

Fills a convex polygon.

+ * + *

The function fillConvexPoly draws a filled convex polygon. + * This function is much faster than the function fillPoly. It can + * fill not only convex polygons but any monotonic polygon without + * self-intersections, that is, a polygon whose contour intersects every + * horizontal line (scan line) twice at the most (though, its top-most and/or + * the bottom edge could be horizontal).

+ * + * @param img Image. + * @param points a points + * @param color Polygon color. + * @param lineType Type of the polygon boundaries. See the "line" description. + * @param shift Number of fractional bits in the vertex coordinates. + * + * @see org.opencv.core.Core.fillConvexPoly + */ + public static void fillConvexPoly(Mat img, MatOfPoint points, Scalar color, int lineType, int shift) + { + Mat points_mat = points; + fillConvexPoly_0(img.nativeObj, points_mat.nativeObj, color.val[0], color.val[1], color.val[2], color.val[3], lineType, shift); + + return; + } + +/** + *

Fills a convex polygon.

+ * + *

The function fillConvexPoly draws a filled convex polygon. + * This function is much faster than the function fillPoly. It can + * fill not only convex polygons but any monotonic polygon without + * self-intersections, that is, a polygon whose contour intersects every + * horizontal line (scan line) twice at the most (though, its top-most and/or + * the bottom edge could be horizontal).

+ * + * @param img Image. + * @param points a points + * @param color Polygon color. + * + * @see org.opencv.core.Core.fillConvexPoly + */ + public static void fillConvexPoly(Mat img, MatOfPoint points, Scalar color) + { + Mat points_mat = points; + fillConvexPoly_1(img.nativeObj, points_mat.nativeObj, color.val[0], color.val[1], color.val[2], color.val[3]); + + return; + } + + + // + // C++: void fillPoly(Mat& img, vector_vector_Point pts, Scalar color, int lineType = 8, int shift = 0, Point offset = Point()) + // + +/** + *

Fills the area bounded by one or more polygons.

+ * + *

The function fillPoly fills an area bounded by several polygonal + * contours. The function can fill complex areas, for example, areas with holes, + * contours with self-intersections (some of their parts), and so forth.

+ * + * @param img Image. + * @param pts Array of polygons where each polygon is represented as an array of + * points. + * @param color Polygon color. + * @param lineType Type of the polygon boundaries. See the "line" description. + * @param shift Number of fractional bits in the vertex coordinates. + * @param offset Optional offset of all points of the contours. + * + * @see org.opencv.core.Core.fillPoly + */ + public static void fillPoly(Mat img, List pts, Scalar color, int lineType, int shift, Point offset) + { + List pts_tmplm = new ArrayList((pts != null) ? pts.size() : 0); + Mat pts_mat = Converters.vector_vector_Point_to_Mat(pts, pts_tmplm); + fillPoly_0(img.nativeObj, pts_mat.nativeObj, color.val[0], color.val[1], color.val[2], color.val[3], lineType, shift, offset.x, offset.y); + + return; + } + +/** + *

Fills the area bounded by one or more polygons.

+ * + *

The function fillPoly fills an area bounded by several polygonal + * contours. The function can fill complex areas, for example, areas with holes, + * contours with self-intersections (some of their parts), and so forth.

+ * + * @param img Image. + * @param pts Array of polygons where each polygon is represented as an array of + * points. + * @param color Polygon color. + * + * @see org.opencv.core.Core.fillPoly + */ + public static void fillPoly(Mat img, List pts, Scalar color) + { + List pts_tmplm = new ArrayList((pts != null) ? pts.size() : 0); + Mat pts_mat = Converters.vector_vector_Point_to_Mat(pts, pts_tmplm); + fillPoly_1(img.nativeObj, pts_mat.nativeObj, color.val[0], color.val[1], color.val[2], color.val[3]); + + return; + } + + + // + // C++: void findNonZero(Mat src, Mat& idx) + // + + public static void findNonZero(Mat src, Mat idx) + { + + findNonZero_0(src.nativeObj, idx.nativeObj); + + return; + } + + + // + // C++: void flip(Mat src, Mat& dst, int flipCode) + // + +/** + *

Flips a 2D array around vertical, horizontal, or both axes.

+ * + *

The function flip flips the array in one of three different ways + * (row and column indices are 0-based):

+ * + *

dst _(ij) =<BR> <= ft(<BR> ltBR gtsrc _(src.rows-i-1,j) if + * flipCode = 0 + * ltBR gtsrc _(i, src.cols -j-1) if flipCode gt 0 + * ltBR gtsrc _(src.rows -i-1, src.cols -j-1) if flipCode lt 0 + * ltBR gt<BR>right.

+ * + *

The example scenarios of using the function are the following:

+ *
    + *
  • Vertical flipping of the image (flipCode == 0) to switch + * between top-left and bottom-left image origin. This is a typical operation in + * video processing on Microsoft Windows* OS. + *
  • Horizontal flipping of the image with the subsequent horizontal shift + * and absolute difference calculation to check for a vertical-axis symmetry + * (flipCode > 0). + *
  • Simultaneous horizontal and vertical flipping of the image with the + * subsequent shift and absolute difference calculation to check for a central + * symmetry (flipCode < 0). + *
  • Reversing the order of point arrays (flipCode > 0 or + * flipCode == 0). + *
+ * + * @param src input array. + * @param dst output array of the same size and type as src. + * @param flipCode a flag to specify how to flip the array; 0 means flipping + * around the x-axis and positive value (for example, 1) means flipping around + * y-axis. Negative value (for example, -1) means flipping around both axes (see + * the discussion below for the formulas). + * + * @see org.opencv.core.Core.flip + * @see org.opencv.core.Core#repeat + * @see org.opencv.core.Core#transpose + * @see org.opencv.core.Core#completeSymm + */ + public static void flip(Mat src, Mat dst, int flipCode) + { + + flip_0(src.nativeObj, dst.nativeObj, flipCode); + + return; + } + + + // + // C++: void gemm(Mat src1, Mat src2, double alpha, Mat src3, double gamma, Mat& dst, int flags = 0) + // + +/** + *

Performs generalized matrix multiplication.

+ * + *

The function performs generalized matrix multiplication similar to the + * gemm functions in BLAS level 3. For example, gemm(src1, + * src2, alpha, src3, beta, dst, GEMM_1_T + GEMM_3_T) corresponds to

+ * + *

dst = alpha * src1 ^T * src2 + beta * src3 ^T<BR>The function can be + * replaced with a matrix expression. For example, the above call can be + * replaced with: <BR><code>

+ * + *

// C++ code:

+ * + *

dst = alpha*src1.t()*src2 + beta*src3.t();

+ * + *

+ * + * @param src1 first multiplied input matrix that should have CV_32FC1, + * CV_64FC1, CV_32FC2, or CV_64FC2 type. + * @param src2 second multiplied input matrix of the same type as + * src1. + * @param alpha weight of the matrix product. + * @param src3 third optional delta matrix added to the matrix product; it + * should have the same type as src1 and src2. + * @param gamma a gamma + * @param dst output matrix; it has the proper size and the same type as input + * matrices. + * @param flags operation flags: + *
    + *
  • GEMM_1_T transposes src1. + *
  • GEMM_2_T transposes src2. + *
  • GEMM_3_T transposes src3. + *
+ * + * @see org.opencv.core.Core.gemm + * @see org.opencv.core.Core#mulTransposed + * @see org.opencv.core.Core#transform + */ + public static void gemm(Mat src1, Mat src2, double alpha, Mat src3, double gamma, Mat dst, int flags) + { + + gemm_0(src1.nativeObj, src2.nativeObj, alpha, src3.nativeObj, gamma, dst.nativeObj, flags); + + return; + } + +/** + *

Performs generalized matrix multiplication.

+ * + *

The function performs generalized matrix multiplication similar to the + * gemm functions in BLAS level 3. For example, gemm(src1, + * src2, alpha, src3, beta, dst, GEMM_1_T + GEMM_3_T) corresponds to

+ * + *

dst = alpha * src1 ^T * src2 + beta * src3 ^T<BR>The function can be + * replaced with a matrix expression. For example, the above call can be + * replaced with: <BR><code>

+ * + *

// C++ code:

+ * + *

dst = alpha*src1.t()*src2 + beta*src3.t();

+ * + *

+ * + * @param src1 first multiplied input matrix that should have CV_32FC1, + * CV_64FC1, CV_32FC2, or CV_64FC2 type. + * @param src2 second multiplied input matrix of the same type as + * src1. + * @param alpha weight of the matrix product. + * @param src3 third optional delta matrix added to the matrix product; it + * should have the same type as src1 and src2. + * @param gamma a gamma + * @param dst output matrix; it has the proper size and the same type as input + * matrices. + * + * @see org.opencv.core.Core.gemm + * @see org.opencv.core.Core#mulTransposed + * @see org.opencv.core.Core#transform + */ + public static void gemm(Mat src1, Mat src2, double alpha, Mat src3, double gamma, Mat dst) + { + + gemm_1(src1.nativeObj, src2.nativeObj, alpha, src3.nativeObj, gamma, dst.nativeObj); + + return; + } + + + // + // C++: string getBuildInformation() + // + +/** + *

Returns full configuration time cmake output.

+ * + *

Returned value is raw cmake output including version control system revision, + * compiler version, compiler flags, enabled modules and third party libraries, + * etc. Output format depends on target architecture.

+ * + * @see org.opencv.core.Core.getBuildInformation + */ + public static String getBuildInformation() + { + + String retVal = getBuildInformation_0(); + + return retVal; + } + + + // + // C++: int64 getCPUTickCount() + // + +/** + *

Returns the number of CPU ticks.

+ * + *

The function returns the current number of CPU ticks on some architectures + * (such as x86, x64, PowerPC). On other platforms the function is equivalent to + * getTickCount. It can also be used for very accurate time + * measurements, as well as for RNG initialization. Note that in case of + * multi-CPU systems a thread, from which getCPUTickCount is + * called, can be suspended and resumed at another CPU with its own counter. So, + * theoretically (and practically) the subsequent calls to the function do not + * necessary return the monotonously increasing values. Also, since a modern CPU + * varies the CPU frequency depending on the load, the number of CPU clocks + * spent in some code cannot be directly converted to time units. Therefore, + * getTickCount is generally a preferable solution for measuring + * execution time.

+ * + * @see org.opencv.core.Core.getCPUTickCount + */ + public static long getCPUTickCount() + { + + long retVal = getCPUTickCount_0(); + + return retVal; + } + + + // + // C++: int getNumberOfCPUs() + // + +/** + *

Returns the number of logical CPUs available for the process.

+ * + * @see org.opencv.core.Core.getNumberOfCPUs + */ + public static int getNumberOfCPUs() + { + + int retVal = getNumberOfCPUs_0(); + + return retVal; + } + + + // + // C++: int getOptimalDFTSize(int vecsize) + // + +/** + *

Returns the optimal DFT size for a given vector size.

+ * + *

DFT performance is not a monotonic function of a vector size. Therefore, when + * you calculate convolution of two arrays or perform the spectral analysis of + * an array, it usually makes sense to pad the input data with zeros to get a + * bit larger array that can be transformed much faster than the original one. + * Arrays whose size is a power-of-two (2, 4, 8, 16, 32,...) are the fastest to + * process. Though, the arrays whose size is a product of 2's, 3's, and 5's (for + * example, 300 = 5*5*3*2*2) are also processed quite efficiently.

+ * + *

The function getOptimalDFTSize returns the minimum number + * N that is greater than or equal to vecsize so that + * the DFT of a vector of size N can be processed efficiently. In + * the current implementation N = 2^"p" * 3^"q" * 5^"r" for some + * integer p, q, r.

+ * + *

The function returns a negative number if vecsize is too large + * (very close to INT_MAX).

+ * + *

While the function cannot be used directly to estimate the optimal vector + * size for DCT transform (since the current DCT implementation supports only + * even-size vectors), it can be easily processed as getOptimalDFTSize((vecsize+1)/2)*2.

+ * + * @param vecsize vector size. + * + * @see org.opencv.core.Core.getOptimalDFTSize + * @see org.opencv.core.Core#dft + * @see org.opencv.core.Core#dct + * @see org.opencv.core.Core#idct + * @see org.opencv.core.Core#mulSpectrums + * @see org.opencv.core.Core#idft + */ + public static int getOptimalDFTSize(int vecsize) + { + + int retVal = getOptimalDFTSize_0(vecsize); + + return retVal; + } + + + // + // C++: int64 getTickCount() + // + +/** + *

Returns the number of ticks.

+ * + *

The function returns the number of ticks after the certain event (for + * example, when the machine was turned on). + * It can be used to initialize "RNG" or to measure a function execution time by + * reading the tick count before and after the function call. See also the tick + * frequency.

+ * + * @see org.opencv.core.Core.getTickCount + */ + public static long getTickCount() + { + + long retVal = getTickCount_0(); + + return retVal; + } + + + // + // C++: double getTickFrequency() + // + +/** + *

Returns the number of ticks per second.

+ * + *

The function returns the number of ticks per second.That is, the following + * code computes the execution time in seconds:

+ * + *

// C++ code:

+ * + *

double t = (double)getTickCount();

+ * + *

// do something...

+ * + *

t = ((double)getTickCount() - t)/getTickFrequency();

+ * + * @see org.opencv.core.Core.getTickFrequency + */ + public static double getTickFrequency() + { + + double retVal = getTickFrequency_0(); + + return retVal; + } + + + // + // C++: void hconcat(vector_Mat src, Mat& dst) + // + + public static void hconcat(List src, Mat dst) + { + Mat src_mat = Converters.vector_Mat_to_Mat(src); + hconcat_0(src_mat.nativeObj, dst.nativeObj); + + return; + } + + + // + // C++: void idct(Mat src, Mat& dst, int flags = 0) + // + +/** + *

Calculates the inverse Discrete Cosine Transform of a 1D or 2D array.

+ * + *

idct(src, dst, flags) is equivalent to dct(src, dst, flags + * | DCT_INVERSE).

+ * + * @param src input floating-point single-channel array. + * @param dst output array of the same size and type as src. + * @param flags operation flags. + * + * @see org.opencv.core.Core.idct + * @see org.opencv.core.Core#dft + * @see org.opencv.core.Core#dct + * @see org.opencv.core.Core#getOptimalDFTSize + * @see org.opencv.core.Core#idft + */ + public static void idct(Mat src, Mat dst, int flags) + { + + idct_0(src.nativeObj, dst.nativeObj, flags); + + return; + } + +/** + *

Calculates the inverse Discrete Cosine Transform of a 1D or 2D array.

+ * + *

idct(src, dst, flags) is equivalent to dct(src, dst, flags + * | DCT_INVERSE).

+ * + * @param src input floating-point single-channel array. + * @param dst output array of the same size and type as src. + * + * @see org.opencv.core.Core.idct + * @see org.opencv.core.Core#dft + * @see org.opencv.core.Core#dct + * @see org.opencv.core.Core#getOptimalDFTSize + * @see org.opencv.core.Core#idft + */ + public static void idct(Mat src, Mat dst) + { + + idct_1(src.nativeObj, dst.nativeObj); + + return; + } + + + // + // C++: void idft(Mat src, Mat& dst, int flags = 0, int nonzeroRows = 0) + // + +/** + *

Calculates the inverse Discrete Fourier Transform of a 1D or 2D array.

+ * + *

idft(src, dst, flags) is equivalent to dft(src, dst, flags + * | DFT_INVERSE).

+ * + *

See "dft" for details.

+ * + *

Note: None of dft and idft scales the result by + * default. So, you should pass DFT_SCALE to one of + * dft or idft explicitly to make these transforms + * mutually inverse.

+ * + * @param src input floating-point real or complex array. + * @param dst output array whose size and type depend on the flags. + * @param flags operation flags (see "dft"). + * @param nonzeroRows number of dst rows to process; the rest of + * the rows have undefined content (see the convolution sample in "dft" + * description. + * + * @see org.opencv.core.Core.idft + * @see org.opencv.core.Core#dft + * @see org.opencv.core.Core#dct + * @see org.opencv.core.Core#getOptimalDFTSize + * @see org.opencv.core.Core#idct + * @see org.opencv.core.Core#mulSpectrums + */ + public static void idft(Mat src, Mat dst, int flags, int nonzeroRows) + { + + idft_0(src.nativeObj, dst.nativeObj, flags, nonzeroRows); + + return; + } + +/** + *

Calculates the inverse Discrete Fourier Transform of a 1D or 2D array.

+ * + *

idft(src, dst, flags) is equivalent to dft(src, dst, flags + * | DFT_INVERSE).

+ * + *

See "dft" for details.

+ * + *

Note: None of dft and idft scales the result by + * default. So, you should pass DFT_SCALE to one of + * dft or idft explicitly to make these transforms + * mutually inverse.

+ * + * @param src input floating-point real or complex array. + * @param dst output array whose size and type depend on the flags. + * + * @see org.opencv.core.Core.idft + * @see org.opencv.core.Core#dft + * @see org.opencv.core.Core#dct + * @see org.opencv.core.Core#getOptimalDFTSize + * @see org.opencv.core.Core#idct + * @see org.opencv.core.Core#mulSpectrums + */ + public static void idft(Mat src, Mat dst) + { + + idft_1(src.nativeObj, dst.nativeObj); + + return; + } + + + // + // C++: void inRange(Mat src, Scalar lowerb, Scalar upperb, Mat& dst) + // + +/** + *

Checks if array elements lie between the elements of two other arrays.

+ * + *

The function checks the range as follows:

+ *
    + *
  • For every element of a single-channel input array: + *
+ * + *

dst(I)= lowerb(I)_0 <= src(I)_0 <= upperb(I)_0

+ * + *
    + *
  • For two-channel arrays: + *
+ * + *

dst(I)= lowerb(I)_0 <= src(I)_0 <= upperb(I)_0 land lowerb(I)_1 <= + * src(I)_1 <= upperb(I)_1

+ * + *
    + *
  • and so forth. + *
+ * + *

That is, dst (I) is set to 255 (all 1 -bits) if + * src (I) is within the specified 1D, 2D, 3D,... box and 0 + * otherwise.

+ * + *

When the lower and/or upper boundary parameters are scalars, the indexes + * (I) at lowerb and upperb in the above + * formulas should be omitted.

+ * + * @param src first input array. + * @param lowerb inclusive lower boundary array or a scalar. + * @param upperb inclusive upper boundary array or a scalar. + * @param dst output array of the same size as src and + * CV_8U type. + * + * @see org.opencv.core.Core.inRange + */ + public static void inRange(Mat src, Scalar lowerb, Scalar upperb, Mat dst) + { + + inRange_0(src.nativeObj, lowerb.val[0], lowerb.val[1], lowerb.val[2], lowerb.val[3], upperb.val[0], upperb.val[1], upperb.val[2], upperb.val[3], dst.nativeObj); + + return; + } + + + // + // C++: void insertChannel(Mat src, Mat& dst, int coi) + // + + public static void insertChannel(Mat src, Mat dst, int coi) + { + + insertChannel_0(src.nativeObj, dst.nativeObj, coi); + + return; + } + + + // + // C++: double invert(Mat src, Mat& dst, int flags = DECOMP_LU) + // + +/** + *

Finds the inverse or pseudo-inverse of a matrix.

+ * + *

The function invert inverts the matrix src and + * stores the result in dst. + * When the matrix src is singular or non-square, the function + * calculates the pseudo-inverse matrix (the dst matrix) so that + * norm(src*dst - I) is minimal, where I is an identity matrix.

+ * + *

In case of the DECOMP_LU method, the function returns non-zero + * value if the inverse has been successfully calculated and 0 if + * src is singular.

+ * + *

In case of the DECOMP_SVD method, the function returns the + * inverse condition number of src (the ratio of the smallest + * singular value to the largest singular value) and 0 if src is + * singular. The SVD method calculates a pseudo-inverse matrix if + * src is singular.

+ * + *

Similarly to DECOMP_LU, the method DECOMP_CHOLESKY + * works only with non-singular square matrices that should also be symmetrical + * and positively defined. In this case, the function stores the inverted matrix + * in dst and returns non-zero. Otherwise, it returns 0.

+ * + * @param src input floating-point M x N matrix. + * @param dst output matrix of N x M size and the same type as + * src. + * @param flags inversion method : + *
    + *
  • DECOMP_LU Gaussian elimination with the optimal pivot element chosen. + *
  • DECOMP_SVD singular value decomposition (SVD) method. + *
  • DECOMP_CHOLESKY Cholesky decomposition; the matrix must be symmetrical + * and positively defined. + *
+ * + * @see org.opencv.core.Core.invert + * @see org.opencv.core.Core#solve + */ + public static double invert(Mat src, Mat dst, int flags) + { + + double retVal = invert_0(src.nativeObj, dst.nativeObj, flags); + + return retVal; + } + +/** + *

Finds the inverse or pseudo-inverse of a matrix.

+ * + *

The function invert inverts the matrix src and + * stores the result in dst. + * When the matrix src is singular or non-square, the function + * calculates the pseudo-inverse matrix (the dst matrix) so that + * norm(src*dst - I) is minimal, where I is an identity matrix.

+ * + *

In case of the DECOMP_LU method, the function returns non-zero + * value if the inverse has been successfully calculated and 0 if + * src is singular.

+ * + *

In case of the DECOMP_SVD method, the function returns the + * inverse condition number of src (the ratio of the smallest + * singular value to the largest singular value) and 0 if src is + * singular. The SVD method calculates a pseudo-inverse matrix if + * src is singular.

+ * + *

Similarly to DECOMP_LU, the method DECOMP_CHOLESKY + * works only with non-singular square matrices that should also be symmetrical + * and positively defined. In this case, the function stores the inverted matrix + * in dst and returns non-zero. Otherwise, it returns 0.

+ * + * @param src input floating-point M x N matrix. + * @param dst output matrix of N x M size and the same type as + * src. + * + * @see org.opencv.core.Core.invert + * @see org.opencv.core.Core#solve + */ + public static double invert(Mat src, Mat dst) + { + + double retVal = invert_1(src.nativeObj, dst.nativeObj); + + return retVal; + } + + + // + // C++: double kmeans(Mat data, int K, Mat& bestLabels, TermCriteria criteria, int attempts, int flags, Mat& centers = Mat()) + // + +/** + *

Finds centers of clusters and groups input samples around the clusters.

+ * + *

The function kmeans implements a k-means algorithm that finds + * the centers of cluster_count clusters and groups the input + * samples around the clusters. As an output, labels_i contains a + * 0-based cluster index for the sample stored in the i^(th) row of the + * samples matrix.

+ * + *

The function returns the compactness measure that is computed as

+ * + *

sum _i|samples _i - centers _(labels _i)| ^2

+ * + *

after every attempt. The best (minimum) value is chosen and the corresponding + * labels and the compactness value are returned by the function. + * Basically, you can use only the core of the function, set the number of + * attempts to 1, initialize labels each time using a custom algorithm, pass + * them with the (flags = KMEANS_USE_INITIAL_LABELS) + * flag, and then choose the best (most-compact) clustering.

+ * + * @param data a data + * @param K a K + * @param bestLabels a bestLabels + * @param criteria The algorithm termination criteria, that is, the maximum + * number of iterations and/or the desired accuracy. The accuracy is specified + * as criteria.epsilon. As soon as each of the cluster centers + * moves by less than criteria.epsilon on some iteration, the + * algorithm stops. + * @param attempts Flag to specify the number of times the algorithm is executed + * using different initial labellings. The algorithm returns the labels that + * yield the best compactness (see the last function parameter). + * @param flags Flag that can take the following values: + *
    + *
  • KMEANS_RANDOM_CENTERS Select random initial centers in each attempt. + *
  • KMEANS_PP_CENTERS Use kmeans++ center initialization by + * Arthur and Vassilvitskii [Arthur2007]. + *
  • KMEANS_USE_INITIAL_LABELS During the first (and possibly the only) + * attempt, use the user-supplied labels instead of computing them from the + * initial centers. For the second and further attempts, use the random or + * semi-random centers. Use one of KMEANS_*_CENTERS flag to specify + * the exact method. + *
+ * @param centers Output matrix of the cluster centers, one row per each cluster + * center. + * + * @see org.opencv.core.Core.kmeans + */ + public static double kmeans(Mat data, int K, Mat bestLabels, TermCriteria criteria, int attempts, int flags, Mat centers) + { + + double retVal = kmeans_0(data.nativeObj, K, bestLabels.nativeObj, criteria.type, criteria.maxCount, criteria.epsilon, attempts, flags, centers.nativeObj); + + return retVal; + } + +/** + *

Finds centers of clusters and groups input samples around the clusters.

+ * + *

The function kmeans implements a k-means algorithm that finds + * the centers of cluster_count clusters and groups the input + * samples around the clusters. As an output, labels_i contains a + * 0-based cluster index for the sample stored in the i^(th) row of the + * samples matrix.

+ * + *

The function returns the compactness measure that is computed as

+ * + *

sum _i|samples _i - centers _(labels _i)| ^2

+ * + *

after every attempt. The best (minimum) value is chosen and the corresponding + * labels and the compactness value are returned by the function. + * Basically, you can use only the core of the function, set the number of + * attempts to 1, initialize labels each time using a custom algorithm, pass + * them with the (flags = KMEANS_USE_INITIAL_LABELS) + * flag, and then choose the best (most-compact) clustering.

+ * + * @param data a data + * @param K a K + * @param bestLabels a bestLabels + * @param criteria The algorithm termination criteria, that is, the maximum + * number of iterations and/or the desired accuracy. The accuracy is specified + * as criteria.epsilon. As soon as each of the cluster centers + * moves by less than criteria.epsilon on some iteration, the + * algorithm stops. + * @param attempts Flag to specify the number of times the algorithm is executed + * using different initial labellings. The algorithm returns the labels that + * yield the best compactness (see the last function parameter). + * @param flags Flag that can take the following values: + *
    + *
  • KMEANS_RANDOM_CENTERS Select random initial centers in each attempt. + *
  • KMEANS_PP_CENTERS Use kmeans++ center initialization by + * Arthur and Vassilvitskii [Arthur2007]. + *
  • KMEANS_USE_INITIAL_LABELS During the first (and possibly the only) + * attempt, use the user-supplied labels instead of computing them from the + * initial centers. For the second and further attempts, use the random or + * semi-random centers. Use one of KMEANS_*_CENTERS flag to specify + * the exact method. + *
+ * + * @see org.opencv.core.Core.kmeans + */ + public static double kmeans(Mat data, int K, Mat bestLabels, TermCriteria criteria, int attempts, int flags) + { + + double retVal = kmeans_1(data.nativeObj, K, bestLabels.nativeObj, criteria.type, criteria.maxCount, criteria.epsilon, attempts, flags); + + return retVal; + } + + + // + // C++: void line(Mat& img, Point pt1, Point pt2, Scalar color, int thickness = 1, int lineType = 8, int shift = 0) + // + +/** + *

Draws a line segment connecting two points.

+ * + *

The function line draws the line segment between + * pt1 and pt2 points in the image. The line is + * clipped by the image boundaries. For non-antialiased lines with integer + * coordinates, the 8-connected or 4-connected Bresenham algorithm is used. + * Thick lines are drawn with rounding endings. + * Antialiased lines are drawn using Gaussian filtering. To specify the line + * color, you may use the macro CV_RGB(r, g, b).

+ * + * @param img Image. + * @param pt1 First point of the line segment. + * @param pt2 Second point of the line segment. + * @param color Line color. + * @param thickness Line thickness. + * @param lineType Type of the line: + *
    + *
  • 8 (or omitted) - 8-connected line. + *
  • 4 - 4-connected line. + *
  • CV_AA - antialiased line. + *
+ * @param shift Number of fractional bits in the point coordinates. + * + * @see org.opencv.core.Core.line + */ + public static void line(Mat img, Point pt1, Point pt2, Scalar color, int thickness, int lineType, int shift) + { + + line_0(img.nativeObj, pt1.x, pt1.y, pt2.x, pt2.y, color.val[0], color.val[1], color.val[2], color.val[3], thickness, lineType, shift); + + return; + } + +/** + *

Draws a line segment connecting two points.

+ * + *

The function line draws the line segment between + * pt1 and pt2 points in the image. The line is + * clipped by the image boundaries. For non-antialiased lines with integer + * coordinates, the 8-connected or 4-connected Bresenham algorithm is used. + * Thick lines are drawn with rounding endings. + * Antialiased lines are drawn using Gaussian filtering. To specify the line + * color, you may use the macro CV_RGB(r, g, b).

+ * + * @param img Image. + * @param pt1 First point of the line segment. + * @param pt2 Second point of the line segment. + * @param color Line color. + * @param thickness Line thickness. + * + * @see org.opencv.core.Core.line + */ + public static void line(Mat img, Point pt1, Point pt2, Scalar color, int thickness) + { + + line_1(img.nativeObj, pt1.x, pt1.y, pt2.x, pt2.y, color.val[0], color.val[1], color.val[2], color.val[3], thickness); + + return; + } + +/** + *

Draws a line segment connecting two points.

+ * + *

The function line draws the line segment between + * pt1 and pt2 points in the image. The line is + * clipped by the image boundaries. For non-antialiased lines with integer + * coordinates, the 8-connected or 4-connected Bresenham algorithm is used. + * Thick lines are drawn with rounding endings. + * Antialiased lines are drawn using Gaussian filtering. To specify the line + * color, you may use the macro CV_RGB(r, g, b).

+ * + * @param img Image. + * @param pt1 First point of the line segment. + * @param pt2 Second point of the line segment. + * @param color Line color. + * + * @see org.opencv.core.Core.line + */ + public static void line(Mat img, Point pt1, Point pt2, Scalar color) + { + + line_2(img.nativeObj, pt1.x, pt1.y, pt2.x, pt2.y, color.val[0], color.val[1], color.val[2], color.val[3]); + + return; + } + + + // + // C++: void log(Mat src, Mat& dst) + // + +/** + *

Calculates the natural logarithm of every array element.

+ * + *

The function log calculates the natural logarithm of the + * absolute value of every element of the input array:

+ * + *

dst(I) = log|src(I)| if src(I) != 0 ; C otherwise

+ * + *

where C is a large negative number (about -700 in the current + * implementation). + * The maximum relative error is about 7e-6 for single-precision + * input and less than 1e-10 for double-precision input. Special + * values (NaN, Inf) are not handled.

+ * + * @param src input array. + * @param dst output array of the same size and type as src. + * + * @see org.opencv.core.Core.log + * @see org.opencv.core.Core#cartToPolar + * @see org.opencv.core.Core#pow + * @see org.opencv.core.Core#sqrt + * @see org.opencv.core.Core#magnitude + * @see org.opencv.core.Core#polarToCart + * @see org.opencv.core.Core#exp + * @see org.opencv.core.Core#phase + */ + public static void log(Mat src, Mat dst) + { + + log_0(src.nativeObj, dst.nativeObj); + + return; + } + + + // + // C++: void magnitude(Mat x, Mat y, Mat& magnitude) + // + +/** + *

Calculates the magnitude of 2D vectors.

+ * + *

The function magnitude calculates the magnitude of 2D vectors + * formed from the corresponding elements of x and y + * arrays:

+ * + *

dst(I) = sqrt(x(I)^2 + y(I)^2)

+ * + * @param x floating-point array of x-coordinates of the vectors. + * @param y floating-point array of y-coordinates of the vectors; it must have + * the same size as x. + * @param magnitude output array of the same size and type as x. + * + * @see org.opencv.core.Core.magnitude + * @see org.opencv.core.Core#cartToPolar + * @see org.opencv.core.Core#phase + * @see org.opencv.core.Core#sqrt + * @see org.opencv.core.Core#polarToCart + */ + public static void magnitude(Mat x, Mat y, Mat magnitude) + { + + magnitude_0(x.nativeObj, y.nativeObj, magnitude.nativeObj); + + return; + } + + + // + // C++: void max(Mat src1, Mat src2, Mat& dst) + // + +/** + *

Calculates per-element maximum of two arrays or an array and a scalar.

+ * + *

The functions max calculate the per-element maximum of two + * arrays:

+ * + *

dst(I)= max(src1(I), src2(I))

+ * + *

or array and a scalar:

+ * + *

dst(I)= max(src1(I), value)

+ * + *

In the second variant, when the input array is multi-channel, each channel is + * compared with value independently.

+ * + *

The first 3 variants of the function listed above are actually a part of + * "MatrixExpressions". They return an expression object that can be further + * either transformed/ assigned to a matrix, or passed to a function, and so on.

+ * + * @param src1 first input array. + * @param src2 second input array of the same size and type as src1. + * @param dst output array of the same size and type as src1. + * + * @see org.opencv.core.Core.max + * @see org.opencv.core.Core#compare + * @see org.opencv.core.Core#inRange + * @see org.opencv.core.Core#minMaxLoc + * @see org.opencv.core.Core#min + */ + public static void max(Mat src1, Mat src2, Mat dst) + { + + max_0(src1.nativeObj, src2.nativeObj, dst.nativeObj); + + return; + } + + + // + // C++: void max(Mat src1, Scalar src2, Mat& dst) + // + +/** + *

Calculates per-element maximum of two arrays or an array and a scalar.

+ * + *

The functions max calculate the per-element maximum of two + * arrays:

+ * + *

dst(I)= max(src1(I), src2(I))

+ * + *

or array and a scalar:

+ * + *

dst(I)= max(src1(I), value)

+ * + *

In the second variant, when the input array is multi-channel, each channel is + * compared with value independently.

+ * + *

The first 3 variants of the function listed above are actually a part of + * "MatrixExpressions". They return an expression object that can be further + * either transformed/ assigned to a matrix, or passed to a function, and so on.

+ * + * @param src1 first input array. + * @param src2 second input array of the same size and type as src1. + * @param dst output array of the same size and type as src1. + * + * @see org.opencv.core.Core.max + * @see org.opencv.core.Core#compare + * @see org.opencv.core.Core#inRange + * @see org.opencv.core.Core#minMaxLoc + * @see org.opencv.core.Core#min + */ + public static void max(Mat src1, Scalar src2, Mat dst) + { + + max_1(src1.nativeObj, src2.val[0], src2.val[1], src2.val[2], src2.val[3], dst.nativeObj); + + return; + } + + + // + // C++: Scalar mean(Mat src, Mat mask = Mat()) + // + +/** + *

Calculates an average (mean) of array elements.

+ * + *

The function mean calculates the mean value M of + * array elements, independently for each channel, and return it:

+ * + *

N = sum(by: I: mask(I) != 0) 1 + * M_c = (sum(by: I: mask(I) != 0)(mtx(I)_c))/N

+ * + *

When all the mask elements are 0's, the functions return Scalar.all(0).

+ * + * @param src input array that should have from 1 to 4 channels so that the + * result can be stored in "Scalar_". + * @param mask optional operation mask. + * + * @see org.opencv.core.Core.mean + * @see org.opencv.core.Core#countNonZero + * @see org.opencv.core.Core#meanStdDev + * @see org.opencv.core.Core#norm + * @see org.opencv.core.Core#minMaxLoc + */ + public static Scalar mean(Mat src, Mat mask) + { + + Scalar retVal = new Scalar(mean_0(src.nativeObj, mask.nativeObj)); + + return retVal; + } + +/** + *

Calculates an average (mean) of array elements.

+ * + *

The function mean calculates the mean value M of + * array elements, independently for each channel, and return it:

+ * + *

N = sum(by: I: mask(I) != 0) 1 + * M_c = (sum(by: I: mask(I) != 0)(mtx(I)_c))/N

+ * + *

When all the mask elements are 0's, the functions return Scalar.all(0).

+ * + * @param src input array that should have from 1 to 4 channels so that the + * result can be stored in "Scalar_". + * + * @see org.opencv.core.Core.mean + * @see org.opencv.core.Core#countNonZero + * @see org.opencv.core.Core#meanStdDev + * @see org.opencv.core.Core#norm + * @see org.opencv.core.Core#minMaxLoc + */ + public static Scalar mean(Mat src) + { + + Scalar retVal = new Scalar(mean_1(src.nativeObj)); + + return retVal; + } + + + // + // C++: void meanStdDev(Mat src, vector_double& mean, vector_double& stddev, Mat mask = Mat()) + // + +/** + *

Calculates a mean and standard deviation of array elements.

+ * + *

The function meanStdDev calculates the mean and the standard + * deviation M of array elements independently for each channel and + * returns it via the output parameters:

+ * + *

N = sum(by: I, mask(I) != 0) 1 + * mean _c = (sum_(I: mask(I) != 0) src(I)_c)/(N) + * stddev _c = sqrt((sum_(I: mask(I) != 0)(src(I)_c - mean _c)^2)/(N))

+ * + *

When all the mask elements are 0's, the functions return mean=stddev=Scalar.all(0).

+ * + *

Note: The calculated standard deviation is only the diagonal of the complete + * normalized covariance matrix. If the full matrix is needed, you can reshape + * the multi-channel array M x N to the single-channel array + * M*N x mtx.channels() (only possible when the matrix is + * continuous) and then pass the matrix to "calcCovarMatrix".

+ * + * @param src input array that should have from 1 to 4 channels so that the + * results can be stored in "Scalar_" 's. + * @param mean output parameter: calculated mean value. + * @param stddev output parameter: calculateded standard deviation. + * @param mask optional operation mask. + * + * @see org.opencv.core.Core.meanStdDev + * @see org.opencv.core.Core#countNonZero + * @see org.opencv.core.Core#calcCovarMatrix + * @see org.opencv.core.Core#minMaxLoc + * @see org.opencv.core.Core#norm + * @see org.opencv.core.Core#mean + */ + public static void meanStdDev(Mat src, MatOfDouble mean, MatOfDouble stddev, Mat mask) + { + Mat mean_mat = mean; + Mat stddev_mat = stddev; + meanStdDev_0(src.nativeObj, mean_mat.nativeObj, stddev_mat.nativeObj, mask.nativeObj); + + return; + } + +/** + *

Calculates a mean and standard deviation of array elements.

+ * + *

The function meanStdDev calculates the mean and the standard + * deviation M of array elements independently for each channel and + * returns it via the output parameters:

+ * + *

N = sum(by: I, mask(I) != 0) 1 + * mean _c = (sum_(I: mask(I) != 0) src(I)_c)/(N) + * stddev _c = sqrt((sum_(I: mask(I) != 0)(src(I)_c - mean _c)^2)/(N))

+ * + *

When all the mask elements are 0's, the functions return mean=stddev=Scalar.all(0).

+ * + *

Note: The calculated standard deviation is only the diagonal of the complete + * normalized covariance matrix. If the full matrix is needed, you can reshape + * the multi-channel array M x N to the single-channel array + * M*N x mtx.channels() (only possible when the matrix is + * continuous) and then pass the matrix to "calcCovarMatrix".

+ * + * @param src input array that should have from 1 to 4 channels so that the + * results can be stored in "Scalar_" 's. + * @param mean output parameter: calculated mean value. + * @param stddev output parameter: calculateded standard deviation. + * + * @see org.opencv.core.Core.meanStdDev + * @see org.opencv.core.Core#countNonZero + * @see org.opencv.core.Core#calcCovarMatrix + * @see org.opencv.core.Core#minMaxLoc + * @see org.opencv.core.Core#norm + * @see org.opencv.core.Core#mean + */ + public static void meanStdDev(Mat src, MatOfDouble mean, MatOfDouble stddev) + { + Mat mean_mat = mean; + Mat stddev_mat = stddev; + meanStdDev_1(src.nativeObj, mean_mat.nativeObj, stddev_mat.nativeObj); + + return; + } + + + // + // C++: void merge(vector_Mat mv, Mat& dst) + // + +/** + *

Creates one multichannel array out of several single-channel ones.

+ * + *

The functions merge merge several arrays to make a single + * multi-channel array. That is, each element of the output array will be a + * concatenation of the elements of the input arrays, where elements of i-th + * input array are treated as mv[i].channels()-element vectors.

+ * + *

The function "split" does the reverse operation. If you need to shuffle + * channels in some other advanced way, use "mixChannels".

+ * + * @param mv input array or vector of matrices to be merged; all the matrices in + * mv must have the same size and the same depth. + * @param dst output array of the same size and the same depth as + * mv[0]; The number of channels will be the total number of + * channels in the matrix array. + * + * @see org.opencv.core.Core.merge + * @see org.opencv.core.Mat#reshape + * @see org.opencv.core.Core#mixChannels + * @see org.opencv.core.Core#split + */ + public static void merge(List mv, Mat dst) + { + Mat mv_mat = Converters.vector_Mat_to_Mat(mv); + merge_0(mv_mat.nativeObj, dst.nativeObj); + + return; + } + + + // + // C++: void min(Mat src1, Mat src2, Mat& dst) + // + +/** + *

Calculates per-element minimum of two arrays or an array and a scalar.

+ * + *

The functions min calculate the per-element minimum of two + * arrays:

+ * + *

dst(I)= min(src1(I), src2(I))

+ * + *

or array and a scalar:

+ * + *

dst(I)= min(src1(I), value)

+ * + *

In the second variant, when the input array is multi-channel, each channel is + * compared with value independently.

+ * + *

The first three variants of the function listed above are actually a part of + * "MatrixExpressions". They return the expression object that can be further + * either transformed/assigned to a matrix, or passed to a function, and so on.

+ * + * @param src1 first input array. + * @param src2 second input array of the same size and type as src1. + * @param dst output array of the same size and type as src1. + * + * @see org.opencv.core.Core.min + * @see org.opencv.core.Core#max + * @see org.opencv.core.Core#compare + * @see org.opencv.core.Core#inRange + * @see org.opencv.core.Core#minMaxLoc + */ + public static void min(Mat src1, Mat src2, Mat dst) + { + + min_0(src1.nativeObj, src2.nativeObj, dst.nativeObj); + + return; + } + + + // + // C++: void min(Mat src1, Scalar src2, Mat& dst) + // + +/** + *

Calculates per-element minimum of two arrays or an array and a scalar.

+ * + *

The functions min calculate the per-element minimum of two + * arrays:

+ * + *

dst(I)= min(src1(I), src2(I))

+ * + *

or array and a scalar:

+ * + *

dst(I)= min(src1(I), value)

+ * + *

In the second variant, when the input array is multi-channel, each channel is + * compared with value independently.

+ * + *

The first three variants of the function listed above are actually a part of + * "MatrixExpressions". They return the expression object that can be further + * either transformed/assigned to a matrix, or passed to a function, and so on.

+ * + * @param src1 first input array. + * @param src2 second input array of the same size and type as src1. + * @param dst output array of the same size and type as src1. + * + * @see org.opencv.core.Core.min + * @see org.opencv.core.Core#max + * @see org.opencv.core.Core#compare + * @see org.opencv.core.Core#inRange + * @see org.opencv.core.Core#minMaxLoc + */ + public static void min(Mat src1, Scalar src2, Mat dst) + { + + min_1(src1.nativeObj, src2.val[0], src2.val[1], src2.val[2], src2.val[3], dst.nativeObj); + + return; + } + + + // + // C++: void mixChannels(vector_Mat src, vector_Mat dst, vector_int fromTo) + // + +/** + *

Copies specified channels from input arrays to the specified channels of + * output arrays.

+ * + *

The functions mixChannels provide an advanced mechanism for + * shuffling image channels.

+ * + *

"split" and "merge" and some forms of "cvtColor" are partial cases of + * mixChannels. + * In the example below, the code splits a 4-channel RGBA image into a 3-channel + * BGR (with R and B channels swapped) and a separate alpha-channel image: + *

+ * + *

// C++ code:

+ * + *

Mat rgba(100, 100, CV_8UC4, Scalar(1,2,3,4));

+ * + *

Mat bgr(rgba.rows, rgba.cols, CV_8UC3);

+ * + *

Mat alpha(rgba.rows, rgba.cols, CV_8UC1);

+ * + *

// forming an array of matrices is a quite efficient operation,

+ * + *

// because the matrix data is not copied, only the headers

+ * + *

Mat out[] = { bgr, alpha };

+ * + *

// rgba[0] -> bgr[2], rgba[1] -> bgr[1],

+ * + *

// rgba[2] -> bgr[0], rgba[3] -> alpha[0]

+ * + *

int from_to[] = { 0,2, 1,1, 2,0, 3,3 };

+ * + *

mixChannels(&rgba, 1, out, 2, from_to, 4);

+ * + *

Note: Unlike many other new-style C++ functions in OpenCV (see the + * introduction section and "Mat.create"), mixChannels requires + * the output arrays to be pre-allocated before calling the function. + *

+ * + * @param src input array or vector of matricesl; all of the matrices must have + * the same size and the same depth. + * @param dst output array or vector of matrices; all the matrices *must be + * allocated*; their size and depth must be the same as in src[0]. + * @param fromTo array of index pairs specifying which channels are copied and + * where; fromTo[k*2] is a 0-based index of the input channel in + * src, fromTo[k*2+1] is an index of the output + * channel in dst; the continuous channel numbering is used: the + * first input image channels are indexed from 0 to + * src[0].channels()-1, the second input image channels are indexed + * from src[0].channels() to src[0].channels() + + * src[1].channels()-1, and so on, the same scheme is used for the output + * image channels; as a special case, when fromTo[k*2] is negative, + * the corresponding output channel is filled with zero. + * + * @see org.opencv.core.Core.mixChannels + * @see org.opencv.core.Core#merge + * @see org.opencv.core.Core#split + * @see org.opencv.imgproc.Imgproc#cvtColor + */ + public static void mixChannels(List src, List dst, MatOfInt fromTo) + { + Mat src_mat = Converters.vector_Mat_to_Mat(src); + Mat dst_mat = Converters.vector_Mat_to_Mat(dst); + Mat fromTo_mat = fromTo; + mixChannels_0(src_mat.nativeObj, dst_mat.nativeObj, fromTo_mat.nativeObj); + + return; + } + + + // + // C++: void mulSpectrums(Mat a, Mat b, Mat& c, int flags, bool conjB = false) + // + +/** + *

Performs the per-element multiplication of two Fourier spectrums.

+ * + *

The function mulSpectrums performs the per-element + * multiplication of the two CCS-packed or complex matrices that are results of + * a real or complex Fourier transform.

+ * + *

The function, together with "dft" and "idft", may be used to calculate + * convolution (pass conjB=false) or correlation (pass + * conjB=true) of two arrays rapidly. When the arrays are complex, + * they are simply multiplied (per element) with an optional conjugation of the + * second-array elements. When the arrays are real, they are assumed to be + * CCS-packed (see "dft" for details).

+ * + * @param a a a + * @param b a b + * @param c a c + * @param flags operation flags; currently, the only supported flag is + * DFT_ROWS, which indicates that each row of src1 and + * src2 is an independent 1D Fourier spectrum. + * @param conjB optional flag that conjugates the second input array before the + * multiplication (true) or not (false). + * + * @see org.opencv.core.Core.mulSpectrums + */ + public static void mulSpectrums(Mat a, Mat b, Mat c, int flags, boolean conjB) + { + + mulSpectrums_0(a.nativeObj, b.nativeObj, c.nativeObj, flags, conjB); + + return; + } + +/** + *

Performs the per-element multiplication of two Fourier spectrums.

+ * + *

The function mulSpectrums performs the per-element + * multiplication of the two CCS-packed or complex matrices that are results of + * a real or complex Fourier transform.

+ * + *

The function, together with "dft" and "idft", may be used to calculate + * convolution (pass conjB=false) or correlation (pass + * conjB=true) of two arrays rapidly. When the arrays are complex, + * they are simply multiplied (per element) with an optional conjugation of the + * second-array elements. When the arrays are real, they are assumed to be + * CCS-packed (see "dft" for details).

+ * + * @param a a a + * @param b a b + * @param c a c + * @param flags operation flags; currently, the only supported flag is + * DFT_ROWS, which indicates that each row of src1 and + * src2 is an independent 1D Fourier spectrum. + * + * @see org.opencv.core.Core.mulSpectrums + */ + public static void mulSpectrums(Mat a, Mat b, Mat c, int flags) + { + + mulSpectrums_1(a.nativeObj, b.nativeObj, c.nativeObj, flags); + + return; + } + + + // + // C++: void mulTransposed(Mat src, Mat& dst, bool aTa, Mat delta = Mat(), double scale = 1, int dtype = -1) + // + +/** + *

Calculates the product of a matrix and its transposition.

+ * + *

The function mulTransposed calculates the product of + * src and its transposition:

+ * + *

dst = scale(src - delta)^T(src - delta)

+ * + *

if aTa=true, and

+ * + *

dst = scale(src - delta)(src - delta)^T

+ * + *

otherwise. The function is used to calculate the covariance matrix. With zero + * delta, it can be used as a faster substitute for general matrix product + * A*B when B=A'

+ * + * @param src input single-channel matrix. Note that unlike "gemm", the function + * can multiply not only floating-point matrices. + * @param dst output square matrix. + * @param aTa Flag specifying the multiplication ordering. See the description + * below. + * @param delta Optional delta matrix subtracted from src before + * the multiplication. When the matrix is empty (delta=noArray()), + * it is assumed to be zero, that is, nothing is subtracted. If it has the same + * size as src, it is simply subtracted. Otherwise, it is + * "repeated" (see "repeat") to cover the full src and then + * subtracted. Type of the delta matrix, when it is not empty, must be the same + * as the type of created output matrix. See the dtype parameter + * description below. + * @param scale Optional scale factor for the matrix product. + * @param dtype Optional type of the output matrix. When it is negative, the + * output matrix will have the same type as src. Otherwise, it will + * be type=CV_MAT_DEPTH(dtype) that should be either + * CV_32F or CV_64F. + * + * @see org.opencv.core.Core.mulTransposed + * @see org.opencv.core.Core#calcCovarMatrix + * @see org.opencv.core.Core#repeat + * @see org.opencv.core.Core#reduce + * @see org.opencv.core.Core#gemm + */ + public static void mulTransposed(Mat src, Mat dst, boolean aTa, Mat delta, double scale, int dtype) + { + + mulTransposed_0(src.nativeObj, dst.nativeObj, aTa, delta.nativeObj, scale, dtype); + + return; + } + +/** + *

Calculates the product of a matrix and its transposition.

+ * + *

The function mulTransposed calculates the product of + * src and its transposition:

+ * + *

dst = scale(src - delta)^T(src - delta)

+ * + *

if aTa=true, and

+ * + *

dst = scale(src - delta)(src - delta)^T

+ * + *

otherwise. The function is used to calculate the covariance matrix. With zero + * delta, it can be used as a faster substitute for general matrix product + * A*B when B=A'

+ * + * @param src input single-channel matrix. Note that unlike "gemm", the function + * can multiply not only floating-point matrices. + * @param dst output square matrix. + * @param aTa Flag specifying the multiplication ordering. See the description + * below. + * @param delta Optional delta matrix subtracted from src before + * the multiplication. When the matrix is empty (delta=noArray()), + * it is assumed to be zero, that is, nothing is subtracted. If it has the same + * size as src, it is simply subtracted. Otherwise, it is + * "repeated" (see "repeat") to cover the full src and then + * subtracted. Type of the delta matrix, when it is not empty, must be the same + * as the type of created output matrix. See the dtype parameter + * description below. + * @param scale Optional scale factor for the matrix product. + * + * @see org.opencv.core.Core.mulTransposed + * @see org.opencv.core.Core#calcCovarMatrix + * @see org.opencv.core.Core#repeat + * @see org.opencv.core.Core#reduce + * @see org.opencv.core.Core#gemm + */ + public static void mulTransposed(Mat src, Mat dst, boolean aTa, Mat delta, double scale) + { + + mulTransposed_1(src.nativeObj, dst.nativeObj, aTa, delta.nativeObj, scale); + + return; + } + +/** + *

Calculates the product of a matrix and its transposition.

+ * + *

The function mulTransposed calculates the product of + * src and its transposition:

+ * + *

dst = scale(src - delta)^T(src - delta)

+ * + *

if aTa=true, and

+ * + *

dst = scale(src - delta)(src - delta)^T

+ * + *

otherwise. The function is used to calculate the covariance matrix. With zero + * delta, it can be used as a faster substitute for general matrix product + * A*B when B=A'

+ * + * @param src input single-channel matrix. Note that unlike "gemm", the function + * can multiply not only floating-point matrices. + * @param dst output square matrix. + * @param aTa Flag specifying the multiplication ordering. See the description + * below. + * + * @see org.opencv.core.Core.mulTransposed + * @see org.opencv.core.Core#calcCovarMatrix + * @see org.opencv.core.Core#repeat + * @see org.opencv.core.Core#reduce + * @see org.opencv.core.Core#gemm + */ + public static void mulTransposed(Mat src, Mat dst, boolean aTa) + { + + mulTransposed_2(src.nativeObj, dst.nativeObj, aTa); + + return; + } + + + // + // C++: void multiply(Mat src1, Mat src2, Mat& dst, double scale = 1, int dtype = -1) + // + +/** + *

Calculates the per-element scaled product of two arrays.

+ * + *

The function multiply calculates the per-element product of two + * arrays:

+ * + *

dst(I)= saturate(scale * src1(I) * src2(I))

+ * + *

There is also a "MatrixExpressions" -friendly variant of the first function. + * See "Mat.mul".

+ * + *

For a not-per-element matrix product, see "gemm".

+ * + *

Note: Saturation is not applied when the output array has the depth + * CV_32S. You may even get result of an incorrect sign in the case + * of overflow.

+ * + * @param src1 first input array. + * @param src2 second input array of the same size and the same type as + * src1. + * @param dst output array of the same size and type as src1. + * @param scale optional scale factor. + * @param dtype a dtype + * + * @see org.opencv.core.Core.multiply + * @see org.opencv.core.Core#divide + * @see org.opencv.core.Mat#convertTo + * @see org.opencv.core.Core#addWeighted + * @see org.opencv.core.Core#add + * @see org.opencv.imgproc.Imgproc#accumulateSquare + * @see org.opencv.imgproc.Imgproc#accumulate + * @see org.opencv.core.Core#scaleAdd + * @see org.opencv.core.Core#subtract + * @see org.opencv.imgproc.Imgproc#accumulateProduct + */ + public static void multiply(Mat src1, Mat src2, Mat dst, double scale, int dtype) + { + + multiply_0(src1.nativeObj, src2.nativeObj, dst.nativeObj, scale, dtype); + + return; + } + +/** + *

Calculates the per-element scaled product of two arrays.

+ * + *

The function multiply calculates the per-element product of two + * arrays:

+ * + *

dst(I)= saturate(scale * src1(I) * src2(I))

+ * + *

There is also a "MatrixExpressions" -friendly variant of the first function. + * See "Mat.mul".

+ * + *

For a not-per-element matrix product, see "gemm".

+ * + *

Note: Saturation is not applied when the output array has the depth + * CV_32S. You may even get result of an incorrect sign in the case + * of overflow.

+ * + * @param src1 first input array. + * @param src2 second input array of the same size and the same type as + * src1. + * @param dst output array of the same size and type as src1. + * @param scale optional scale factor. + * + * @see org.opencv.core.Core.multiply + * @see org.opencv.core.Core#divide + * @see org.opencv.core.Mat#convertTo + * @see org.opencv.core.Core#addWeighted + * @see org.opencv.core.Core#add + * @see org.opencv.imgproc.Imgproc#accumulateSquare + * @see org.opencv.imgproc.Imgproc#accumulate + * @see org.opencv.core.Core#scaleAdd + * @see org.opencv.core.Core#subtract + * @see org.opencv.imgproc.Imgproc#accumulateProduct + */ + public static void multiply(Mat src1, Mat src2, Mat dst, double scale) + { + + multiply_1(src1.nativeObj, src2.nativeObj, dst.nativeObj, scale); + + return; + } + +/** + *

Calculates the per-element scaled product of two arrays.

+ * + *

The function multiply calculates the per-element product of two + * arrays:

+ * + *

dst(I)= saturate(scale * src1(I) * src2(I))

+ * + *

There is also a "MatrixExpressions" -friendly variant of the first function. + * See "Mat.mul".

+ * + *

For a not-per-element matrix product, see "gemm".

+ * + *

Note: Saturation is not applied when the output array has the depth + * CV_32S. You may even get result of an incorrect sign in the case + * of overflow.

+ * + * @param src1 first input array. + * @param src2 second input array of the same size and the same type as + * src1. + * @param dst output array of the same size and type as src1. + * + * @see org.opencv.core.Core.multiply + * @see org.opencv.core.Core#divide + * @see org.opencv.core.Mat#convertTo + * @see org.opencv.core.Core#addWeighted + * @see org.opencv.core.Core#add + * @see org.opencv.imgproc.Imgproc#accumulateSquare + * @see org.opencv.imgproc.Imgproc#accumulate + * @see org.opencv.core.Core#scaleAdd + * @see org.opencv.core.Core#subtract + * @see org.opencv.imgproc.Imgproc#accumulateProduct + */ + public static void multiply(Mat src1, Mat src2, Mat dst) + { + + multiply_2(src1.nativeObj, src2.nativeObj, dst.nativeObj); + + return; + } + + + // + // C++: void multiply(Mat src1, Scalar src2, Mat& dst, double scale = 1, int dtype = -1) + // + +/** + *

Calculates the per-element scaled product of two arrays.

+ * + *

The function multiply calculates the per-element product of two + * arrays:

+ * + *

dst(I)= saturate(scale * src1(I) * src2(I))

+ * + *

There is also a "MatrixExpressions" -friendly variant of the first function. + * See "Mat.mul".

+ * + *

For a not-per-element matrix product, see "gemm".

+ * + *

Note: Saturation is not applied when the output array has the depth + * CV_32S. You may even get result of an incorrect sign in the case + * of overflow.

+ * + * @param src1 first input array. + * @param src2 second input array of the same size and the same type as + * src1. + * @param dst output array of the same size and type as src1. + * @param scale optional scale factor. + * @param dtype a dtype + * + * @see org.opencv.core.Core.multiply + * @see org.opencv.core.Core#divide + * @see org.opencv.core.Mat#convertTo + * @see org.opencv.core.Core#addWeighted + * @see org.opencv.core.Core#add + * @see org.opencv.imgproc.Imgproc#accumulateSquare + * @see org.opencv.imgproc.Imgproc#accumulate + * @see org.opencv.core.Core#scaleAdd + * @see org.opencv.core.Core#subtract + * @see org.opencv.imgproc.Imgproc#accumulateProduct + */ + public static void multiply(Mat src1, Scalar src2, Mat dst, double scale, int dtype) + { + + multiply_3(src1.nativeObj, src2.val[0], src2.val[1], src2.val[2], src2.val[3], dst.nativeObj, scale, dtype); + + return; + } + +/** + *

Calculates the per-element scaled product of two arrays.

+ * + *

The function multiply calculates the per-element product of two + * arrays:

+ * + *

dst(I)= saturate(scale * src1(I) * src2(I))

+ * + *

There is also a "MatrixExpressions" -friendly variant of the first function. + * See "Mat.mul".

+ * + *

For a not-per-element matrix product, see "gemm".

+ * + *

Note: Saturation is not applied when the output array has the depth + * CV_32S. You may even get result of an incorrect sign in the case + * of overflow.

+ * + * @param src1 first input array. + * @param src2 second input array of the same size and the same type as + * src1. + * @param dst output array of the same size and type as src1. + * @param scale optional scale factor. + * + * @see org.opencv.core.Core.multiply + * @see org.opencv.core.Core#divide + * @see org.opencv.core.Mat#convertTo + * @see org.opencv.core.Core#addWeighted + * @see org.opencv.core.Core#add + * @see org.opencv.imgproc.Imgproc#accumulateSquare + * @see org.opencv.imgproc.Imgproc#accumulate + * @see org.opencv.core.Core#scaleAdd + * @see org.opencv.core.Core#subtract + * @see org.opencv.imgproc.Imgproc#accumulateProduct + */ + public static void multiply(Mat src1, Scalar src2, Mat dst, double scale) + { + + multiply_4(src1.nativeObj, src2.val[0], src2.val[1], src2.val[2], src2.val[3], dst.nativeObj, scale); + + return; + } + +/** + *

Calculates the per-element scaled product of two arrays.

+ * + *

The function multiply calculates the per-element product of two + * arrays:

+ * + *

dst(I)= saturate(scale * src1(I) * src2(I))

+ * + *

There is also a "MatrixExpressions" -friendly variant of the first function. + * See "Mat.mul".

+ * + *

For a not-per-element matrix product, see "gemm".

+ * + *

Note: Saturation is not applied when the output array has the depth + * CV_32S. You may even get result of an incorrect sign in the case + * of overflow.

+ * + * @param src1 first input array. + * @param src2 second input array of the same size and the same type as + * src1. + * @param dst output array of the same size and type as src1. + * + * @see org.opencv.core.Core.multiply + * @see org.opencv.core.Core#divide + * @see org.opencv.core.Mat#convertTo + * @see org.opencv.core.Core#addWeighted + * @see org.opencv.core.Core#add + * @see org.opencv.imgproc.Imgproc#accumulateSquare + * @see org.opencv.imgproc.Imgproc#accumulate + * @see org.opencv.core.Core#scaleAdd + * @see org.opencv.core.Core#subtract + * @see org.opencv.imgproc.Imgproc#accumulateProduct + */ + public static void multiply(Mat src1, Scalar src2, Mat dst) + { + + multiply_5(src1.nativeObj, src2.val[0], src2.val[1], src2.val[2], src2.val[3], dst.nativeObj); + + return; + } + + + // + // C++: double norm(Mat src1, int normType = NORM_L2, Mat mask = Mat()) + // + +/** + *

Calculates an absolute array norm, an absolute difference norm, or a relative + * difference norm.

+ * + *

The functions norm calculate an absolute norm of + * src1 (when there is no src2):

+ * + *

norm = forkthree(|src1|_(L_(infty)) = max _I|src1(I)|)(if normType = + * NORM_INF)<BR>(|src1|_(L_1) = sum _I|src1(I)|)(if normType = + * NORM_L1)<BR>(|src1|_(L_2) = sqrt(sum_I src1(I)^2))(if normType = + * NORM_L2)

+ * + *

or an absolute or relative difference norm if src2 is there:

+ * + *

norm = forkthree(|src1-src2|_(L_(infty)) = max _I|src1(I) - src2(I)|)(if + * normType = NORM_INF)<BR>(|src1 - src2|_(L_1) = sum _I|src1(I) - + * src2(I)|)(if normType = NORM_L1)<BR>(|src1 - src2|_(L_2) = + * sqrt(sum_I(src1(I) - src2(I))^2))(if normType = NORM_L2)

+ * + *

or

+ * + *

norm = forkthree((|src1-src2|_(L_(infty)))/(|src2|_(L_(infty))))(if + * normType = NORM_RELATIVE_INF)<BR>((|src1-src2|_(L_1))/(|src2|_(L_1)))(if + * normType = NORM_RELATIVE_L1)<BR>((|src1-src2|_(L_2))/(|src2|_(L_2)))(if + * normType = NORM_RELATIVE_L2)

+ * + *

The functions norm return the calculated norm.

+ * + *

When the mask parameter is specified and it is not empty, the + * norm is calculated only over the region specified by the mask.

+ * + *

A multi-channel input arrays are treated as a single-channel, that is, the + * results for all channels are combined.

+ * + * @param src1 first input array. + * @param normType type of the norm (see the details below). + * @param mask optional operation mask; it must have the same size as + * src1 and CV_8UC1 type. + * + * @see org.opencv.core.Core.norm + */ + public static double norm(Mat src1, int normType, Mat mask) + { + + double retVal = norm_0(src1.nativeObj, normType, mask.nativeObj); + + return retVal; + } + +/** + *

Calculates an absolute array norm, an absolute difference norm, or a relative + * difference norm.

+ * + *

The functions norm calculate an absolute norm of + * src1 (when there is no src2):

+ * + *

norm = forkthree(|src1|_(L_(infty)) = max _I|src1(I)|)(if normType = + * NORM_INF)<BR>(|src1|_(L_1) = sum _I|src1(I)|)(if normType = + * NORM_L1)<BR>(|src1|_(L_2) = sqrt(sum_I src1(I)^2))(if normType = + * NORM_L2)

+ * + *

or an absolute or relative difference norm if src2 is there:

+ * + *

norm = forkthree(|src1-src2|_(L_(infty)) = max _I|src1(I) - src2(I)|)(if + * normType = NORM_INF)<BR>(|src1 - src2|_(L_1) = sum _I|src1(I) - + * src2(I)|)(if normType = NORM_L1)<BR>(|src1 - src2|_(L_2) = + * sqrt(sum_I(src1(I) - src2(I))^2))(if normType = NORM_L2)

+ * + *

or

+ * + *

norm = forkthree((|src1-src2|_(L_(infty)))/(|src2|_(L_(infty))))(if + * normType = NORM_RELATIVE_INF)<BR>((|src1-src2|_(L_1))/(|src2|_(L_1)))(if + * normType = NORM_RELATIVE_L1)<BR>((|src1-src2|_(L_2))/(|src2|_(L_2)))(if + * normType = NORM_RELATIVE_L2)

+ * + *

The functions norm return the calculated norm.

+ * + *

When the mask parameter is specified and it is not empty, the + * norm is calculated only over the region specified by the mask.

+ * + *

A multi-channel input arrays are treated as a single-channel, that is, the + * results for all channels are combined.

+ * + * @param src1 first input array. + * @param normType type of the norm (see the details below). + * + * @see org.opencv.core.Core.norm + */ + public static double norm(Mat src1, int normType) + { + + double retVal = norm_1(src1.nativeObj, normType); + + return retVal; + } + +/** + *

Calculates an absolute array norm, an absolute difference norm, or a relative + * difference norm.

+ * + *

The functions norm calculate an absolute norm of + * src1 (when there is no src2):

+ * + *

norm = forkthree(|src1|_(L_(infty)) = max _I|src1(I)|)(if normType = + * NORM_INF)<BR>(|src1|_(L_1) = sum _I|src1(I)|)(if normType = + * NORM_L1)<BR>(|src1|_(L_2) = sqrt(sum_I src1(I)^2))(if normType = + * NORM_L2)

+ * + *

or an absolute or relative difference norm if src2 is there:

+ * + *

norm = forkthree(|src1-src2|_(L_(infty)) = max _I|src1(I) - src2(I)|)(if + * normType = NORM_INF)<BR>(|src1 - src2|_(L_1) = sum _I|src1(I) - + * src2(I)|)(if normType = NORM_L1)<BR>(|src1 - src2|_(L_2) = + * sqrt(sum_I(src1(I) - src2(I))^2))(if normType = NORM_L2)

+ * + *

or

+ * + *

norm = forkthree((|src1-src2|_(L_(infty)))/(|src2|_(L_(infty))))(if + * normType = NORM_RELATIVE_INF)<BR>((|src1-src2|_(L_1))/(|src2|_(L_1)))(if + * normType = NORM_RELATIVE_L1)<BR>((|src1-src2|_(L_2))/(|src2|_(L_2)))(if + * normType = NORM_RELATIVE_L2)

+ * + *

The functions norm return the calculated norm.

+ * + *

When the mask parameter is specified and it is not empty, the + * norm is calculated only over the region specified by the mask.

+ * + *

A multi-channel input arrays are treated as a single-channel, that is, the + * results for all channels are combined.

+ * + * @param src1 first input array. + * + * @see org.opencv.core.Core.norm + */ + public static double norm(Mat src1) + { + + double retVal = norm_2(src1.nativeObj); + + return retVal; + } + + + // + // C++: double norm(Mat src1, Mat src2, int normType = NORM_L2, Mat mask = Mat()) + // + +/** + *

Calculates an absolute array norm, an absolute difference norm, or a relative + * difference norm.

+ * + *

The functions norm calculate an absolute norm of + * src1 (when there is no src2):

+ * + *

norm = forkthree(|src1|_(L_(infty)) = max _I|src1(I)|)(if normType = + * NORM_INF)<BR>(|src1|_(L_1) = sum _I|src1(I)|)(if normType = + * NORM_L1)<BR>(|src1|_(L_2) = sqrt(sum_I src1(I)^2))(if normType = + * NORM_L2)

+ * + *

or an absolute or relative difference norm if src2 is there:

+ * + *

norm = forkthree(|src1-src2|_(L_(infty)) = max _I|src1(I) - src2(I)|)(if + * normType = NORM_INF)<BR>(|src1 - src2|_(L_1) = sum _I|src1(I) - + * src2(I)|)(if normType = NORM_L1)<BR>(|src1 - src2|_(L_2) = + * sqrt(sum_I(src1(I) - src2(I))^2))(if normType = NORM_L2)

+ * + *

or

+ * + *

norm = forkthree((|src1-src2|_(L_(infty)))/(|src2|_(L_(infty))))(if + * normType = NORM_RELATIVE_INF)<BR>((|src1-src2|_(L_1))/(|src2|_(L_1)))(if + * normType = NORM_RELATIVE_L1)<BR>((|src1-src2|_(L_2))/(|src2|_(L_2)))(if + * normType = NORM_RELATIVE_L2)

+ * + *

The functions norm return the calculated norm.

+ * + *

When the mask parameter is specified and it is not empty, the + * norm is calculated only over the region specified by the mask.

+ * + *

A multi-channel input arrays are treated as a single-channel, that is, the + * results for all channels are combined.

+ * + * @param src1 first input array. + * @param src2 second input array of the same size and the same type as + * src1. + * @param normType type of the norm (see the details below). + * @param mask optional operation mask; it must have the same size as + * src1 and CV_8UC1 type. + * + * @see org.opencv.core.Core.norm + */ + public static double norm(Mat src1, Mat src2, int normType, Mat mask) + { + + double retVal = norm_3(src1.nativeObj, src2.nativeObj, normType, mask.nativeObj); + + return retVal; + } + +/** + *

Calculates an absolute array norm, an absolute difference norm, or a relative + * difference norm.

+ * + *

The functions norm calculate an absolute norm of + * src1 (when there is no src2):

+ * + *

norm = forkthree(|src1|_(L_(infty)) = max _I|src1(I)|)(if normType = + * NORM_INF)<BR>(|src1|_(L_1) = sum _I|src1(I)|)(if normType = + * NORM_L1)<BR>(|src1|_(L_2) = sqrt(sum_I src1(I)^2))(if normType = + * NORM_L2)

+ * + *

or an absolute or relative difference norm if src2 is there:

+ * + *

norm = forkthree(|src1-src2|_(L_(infty)) = max _I|src1(I) - src2(I)|)(if + * normType = NORM_INF)<BR>(|src1 - src2|_(L_1) = sum _I|src1(I) - + * src2(I)|)(if normType = NORM_L1)<BR>(|src1 - src2|_(L_2) = + * sqrt(sum_I(src1(I) - src2(I))^2))(if normType = NORM_L2)

+ * + *

or

+ * + *

norm = forkthree((|src1-src2|_(L_(infty)))/(|src2|_(L_(infty))))(if + * normType = NORM_RELATIVE_INF)<BR>((|src1-src2|_(L_1))/(|src2|_(L_1)))(if + * normType = NORM_RELATIVE_L1)<BR>((|src1-src2|_(L_2))/(|src2|_(L_2)))(if + * normType = NORM_RELATIVE_L2)

+ * + *

The functions norm return the calculated norm.

+ * + *

When the mask parameter is specified and it is not empty, the + * norm is calculated only over the region specified by the mask.

+ * + *

A multi-channel input arrays are treated as a single-channel, that is, the + * results for all channels are combined.

+ * + * @param src1 first input array. + * @param src2 second input array of the same size and the same type as + * src1. + * @param normType type of the norm (see the details below). + * + * @see org.opencv.core.Core.norm + */ + public static double norm(Mat src1, Mat src2, int normType) + { + + double retVal = norm_4(src1.nativeObj, src2.nativeObj, normType); + + return retVal; + } + +/** + *

Calculates an absolute array norm, an absolute difference norm, or a relative + * difference norm.

+ * + *

The functions norm calculate an absolute norm of + * src1 (when there is no src2):

+ * + *

norm = forkthree(|src1|_(L_(infty)) = max _I|src1(I)|)(if normType = + * NORM_INF)<BR>(|src1|_(L_1) = sum _I|src1(I)|)(if normType = + * NORM_L1)<BR>(|src1|_(L_2) = sqrt(sum_I src1(I)^2))(if normType = + * NORM_L2)

+ * + *

or an absolute or relative difference norm if src2 is there:

+ * + *

norm = forkthree(|src1-src2|_(L_(infty)) = max _I|src1(I) - src2(I)|)(if + * normType = NORM_INF)<BR>(|src1 - src2|_(L_1) = sum _I|src1(I) - + * src2(I)|)(if normType = NORM_L1)<BR>(|src1 - src2|_(L_2) = + * sqrt(sum_I(src1(I) - src2(I))^2))(if normType = NORM_L2)

+ * + *

or

+ * + *

norm = forkthree((|src1-src2|_(L_(infty)))/(|src2|_(L_(infty))))(if + * normType = NORM_RELATIVE_INF)<BR>((|src1-src2|_(L_1))/(|src2|_(L_1)))(if + * normType = NORM_RELATIVE_L1)<BR>((|src1-src2|_(L_2))/(|src2|_(L_2)))(if + * normType = NORM_RELATIVE_L2)

+ * + *

The functions norm return the calculated norm.

+ * + *

When the mask parameter is specified and it is not empty, the + * norm is calculated only over the region specified by the mask.

+ * + *

A multi-channel input arrays are treated as a single-channel, that is, the + * results for all channels are combined.

+ * + * @param src1 first input array. + * @param src2 second input array of the same size and the same type as + * src1. + * + * @see org.opencv.core.Core.norm + */ + public static double norm(Mat src1, Mat src2) + { + + double retVal = norm_5(src1.nativeObj, src2.nativeObj); + + return retVal; + } + + + // + // C++: void normalize(Mat src, Mat& dst, double alpha = 1, double beta = 0, int norm_type = NORM_L2, int dtype = -1, Mat mask = Mat()) + // + +/** + *

Normalizes the norm or value range of an array.

+ * + *

The functions normalize scale and shift the input array elements + * so that

+ * + *

| dst|_(L_p)= alpha

+ * + *

(where p=Inf, 1 or 2) when normType=NORM_INF, NORM_L1, + * or NORM_L2, respectively; or so that

+ * + *

min _I dst(I)= alpha, max _I dst(I)= beta

+ * + *

when normType=NORM_MINMAX (for dense arrays only). + * The optional mask specifies a sub-array to be normalized. This means that the + * norm or min-n-max are calculated over the sub-array, and then this sub-array + * is modified to be normalized. If you want to only use the mask to calculate + * the norm or min-max but modify the whole array, you can use "norm" and + * "Mat.convertTo".

+ * + *

In case of sparse matrices, only the non-zero values are analyzed and + * transformed. Because of this, the range transformation for sparse matrices is + * not allowed since it can shift the zero level.

+ * + * @param src input array. + * @param dst output array of the same size as src. + * @param alpha norm value to normalize to or the lower range boundary in case + * of the range normalization. + * @param beta upper range boundary in case of the range normalization; it is + * not used for the norm normalization. + * @param norm_type a norm_type + * @param dtype when negative, the output array has the same type as + * src; otherwise, it has the same number of channels as + * src and the depth =CV_MAT_DEPTH(dtype). + * @param mask optional operation mask. + * + * @see org.opencv.core.Core.normalize + * @see org.opencv.core.Mat#convertTo + * @see org.opencv.core.Core#norm + */ + public static void normalize(Mat src, Mat dst, double alpha, double beta, int norm_type, int dtype, Mat mask) + { + + normalize_0(src.nativeObj, dst.nativeObj, alpha, beta, norm_type, dtype, mask.nativeObj); + + return; + } + +/** + *

Normalizes the norm or value range of an array.

+ * + *

The functions normalize scale and shift the input array elements + * so that

+ * + *

| dst|_(L_p)= alpha

+ * + *

(where p=Inf, 1 or 2) when normType=NORM_INF, NORM_L1, + * or NORM_L2, respectively; or so that

+ * + *

min _I dst(I)= alpha, max _I dst(I)= beta

+ * + *

when normType=NORM_MINMAX (for dense arrays only). + * The optional mask specifies a sub-array to be normalized. This means that the + * norm or min-n-max are calculated over the sub-array, and then this sub-array + * is modified to be normalized. If you want to only use the mask to calculate + * the norm or min-max but modify the whole array, you can use "norm" and + * "Mat.convertTo".

+ * + *

In case of sparse matrices, only the non-zero values are analyzed and + * transformed. Because of this, the range transformation for sparse matrices is + * not allowed since it can shift the zero level.

+ * + * @param src input array. + * @param dst output array of the same size as src. + * @param alpha norm value to normalize to or the lower range boundary in case + * of the range normalization. + * @param beta upper range boundary in case of the range normalization; it is + * not used for the norm normalization. + * @param norm_type a norm_type + * @param dtype when negative, the output array has the same type as + * src; otherwise, it has the same number of channels as + * src and the depth =CV_MAT_DEPTH(dtype). + * + * @see org.opencv.core.Core.normalize + * @see org.opencv.core.Mat#convertTo + * @see org.opencv.core.Core#norm + */ + public static void normalize(Mat src, Mat dst, double alpha, double beta, int norm_type, int dtype) + { + + normalize_1(src.nativeObj, dst.nativeObj, alpha, beta, norm_type, dtype); + + return; + } + +/** + *

Normalizes the norm or value range of an array.

+ * + *

The functions normalize scale and shift the input array elements + * so that

+ * + *

| dst|_(L_p)= alpha

+ * + *

(where p=Inf, 1 or 2) when normType=NORM_INF, NORM_L1, + * or NORM_L2, respectively; or so that

+ * + *

min _I dst(I)= alpha, max _I dst(I)= beta

+ * + *

when normType=NORM_MINMAX (for dense arrays only). + * The optional mask specifies a sub-array to be normalized. This means that the + * norm or min-n-max are calculated over the sub-array, and then this sub-array + * is modified to be normalized. If you want to only use the mask to calculate + * the norm or min-max but modify the whole array, you can use "norm" and + * "Mat.convertTo".

+ * + *

In case of sparse matrices, only the non-zero values are analyzed and + * transformed. Because of this, the range transformation for sparse matrices is + * not allowed since it can shift the zero level.

+ * + * @param src input array. + * @param dst output array of the same size as src. + * @param alpha norm value to normalize to or the lower range boundary in case + * of the range normalization. + * @param beta upper range boundary in case of the range normalization; it is + * not used for the norm normalization. + * @param norm_type a norm_type + * + * @see org.opencv.core.Core.normalize + * @see org.opencv.core.Mat#convertTo + * @see org.opencv.core.Core#norm + */ + public static void normalize(Mat src, Mat dst, double alpha, double beta, int norm_type) + { + + normalize_2(src.nativeObj, dst.nativeObj, alpha, beta, norm_type); + + return; + } + +/** + *

Normalizes the norm or value range of an array.

+ * + *

The functions normalize scale and shift the input array elements + * so that

+ * + *

| dst|_(L_p)= alpha

+ * + *

(where p=Inf, 1 or 2) when normType=NORM_INF, NORM_L1, + * or NORM_L2, respectively; or so that

+ * + *

min _I dst(I)= alpha, max _I dst(I)= beta

+ * + *

when normType=NORM_MINMAX (for dense arrays only). + * The optional mask specifies a sub-array to be normalized. This means that the + * norm or min-n-max are calculated over the sub-array, and then this sub-array + * is modified to be normalized. If you want to only use the mask to calculate + * the norm or min-max but modify the whole array, you can use "norm" and + * "Mat.convertTo".

+ * + *

In case of sparse matrices, only the non-zero values are analyzed and + * transformed. Because of this, the range transformation for sparse matrices is + * not allowed since it can shift the zero level.

+ * + * @param src input array. + * @param dst output array of the same size as src. + * + * @see org.opencv.core.Core.normalize + * @see org.opencv.core.Mat#convertTo + * @see org.opencv.core.Core#norm + */ + public static void normalize(Mat src, Mat dst) + { + + normalize_3(src.nativeObj, dst.nativeObj); + + return; + } + + + // + // C++: void patchNaNs(Mat& a, double val = 0) + // + + public static void patchNaNs(Mat a, double val) + { + + patchNaNs_0(a.nativeObj, val); + + return; + } + + public static void patchNaNs(Mat a) + { + + patchNaNs_1(a.nativeObj); + + return; + } + + + // + // C++: void perspectiveTransform(Mat src, Mat& dst, Mat m) + // + +/** + *

Performs the perspective matrix transformation of vectors.

+ * + *

The function perspectiveTransform transforms every element of + * src by treating it as a 2D or 3D vector, in the following way:

+ * + *

(x, y, z) -> (x'/w, y'/w, z'/w)

+ * + *

where

+ * + *

(x', y', z', w') = mat * x y z 1

+ * + *

and

+ * + *

w = w' if w' != 0; infty otherwise

+ * + *

Here a 3D vector transformation is shown. In case of a 2D vector + * transformation, the z component is omitted.

+ * + *

Note: The function transforms a sparse set of 2D or 3D vectors. If you want + * to transform an image using perspective transformation, use "warpPerspective". + * If you have an inverse problem, that is, you want to compute the most + * probable perspective transformation out of several pairs of corresponding + * points, you can use "getPerspectiveTransform" or "findHomography".

+ * + * @param src input two-channel or three-channel floating-point array; each + * element is a 2D/3D vector to be transformed. + * @param dst output array of the same size and type as src. + * @param m 3x3 or 4x4 floating-point transformation + * matrix. + * + * @see org.opencv.core.Core.perspectiveTransform + * @see org.opencv.calib3d.Calib3d#findHomography + * @see org.opencv.imgproc.Imgproc#warpPerspective + * @see org.opencv.core.Core#transform + * @see org.opencv.imgproc.Imgproc#getPerspectiveTransform + */ + public static void perspectiveTransform(Mat src, Mat dst, Mat m) + { + + perspectiveTransform_0(src.nativeObj, dst.nativeObj, m.nativeObj); + + return; + } + + + // + // C++: void phase(Mat x, Mat y, Mat& angle, bool angleInDegrees = false) + // + +/** + *

Calculates the rotation angle of 2D vectors.

+ * + *

The function phase calculates the rotation angle of each 2D + * vector that is formed from the corresponding elements of x and + * y :

+ * + *

angle(I) = atan2(y(I), x(I))

+ * + *

The angle estimation accuracy is about 0.3 degrees. When x(I)=y(I)=0, + * the corresponding angle(I) is set to 0.

+ * + * @param x input floating-point array of x-coordinates of 2D vectors. + * @param y input array of y-coordinates of 2D vectors; it must have the same + * size and the same type as x. + * @param angle output array of vector angles; it has the same size and same + * type as x. + * @param angleInDegrees when true, the function calculates the angle in + * degrees, otherwise, they are measured in radians. + * + * @see org.opencv.core.Core.phase + */ + public static void phase(Mat x, Mat y, Mat angle, boolean angleInDegrees) + { + + phase_0(x.nativeObj, y.nativeObj, angle.nativeObj, angleInDegrees); + + return; + } + +/** + *

Calculates the rotation angle of 2D vectors.

+ * + *

The function phase calculates the rotation angle of each 2D + * vector that is formed from the corresponding elements of x and + * y :

+ * + *

angle(I) = atan2(y(I), x(I))

+ * + *

The angle estimation accuracy is about 0.3 degrees. When x(I)=y(I)=0, + * the corresponding angle(I) is set to 0.

+ * + * @param x input floating-point array of x-coordinates of 2D vectors. + * @param y input array of y-coordinates of 2D vectors; it must have the same + * size and the same type as x. + * @param angle output array of vector angles; it has the same size and same + * type as x. + * + * @see org.opencv.core.Core.phase + */ + public static void phase(Mat x, Mat y, Mat angle) + { + + phase_1(x.nativeObj, y.nativeObj, angle.nativeObj); + + return; + } + + + // + // C++: void polarToCart(Mat magnitude, Mat angle, Mat& x, Mat& y, bool angleInDegrees = false) + // + +/** + *

Calculates x and y coordinates of 2D vectors from their magnitude and angle.

+ * + *

The function polarToCart calculates the Cartesian coordinates of + * each 2D vector represented by the corresponding elements of magnitude + * and angle :

+ * + *

x(I) = magnitude(I) cos(angle(I)) + * y(I) = magnitude(I) sin(angle(I)) + *

+ * + *

The relative accuracy of the estimated coordinates is about 1e-6.

+ * + * @param magnitude input floating-point array of magnitudes of 2D vectors; it + * can be an empty matrix (=Mat()), in this case, the function + * assumes that all the magnitudes are =1; if it is not empty, it must have the + * same size and type as angle. + * @param angle input floating-point array of angles of 2D vectors. + * @param x output array of x-coordinates of 2D vectors; it has the same size + * and type as angle. + * @param y output array of y-coordinates of 2D vectors; it has the same size + * and type as angle. + * @param angleInDegrees when true, the input angles are measured in degrees, + * otherwise, they are measured in radians. + * + * @see org.opencv.core.Core.polarToCart + * @see org.opencv.core.Core#log + * @see org.opencv.core.Core#cartToPolar + * @see org.opencv.core.Core#pow + * @see org.opencv.core.Core#sqrt + * @see org.opencv.core.Core#magnitude + * @see org.opencv.core.Core#exp + * @see org.opencv.core.Core#phase + */ + public static void polarToCart(Mat magnitude, Mat angle, Mat x, Mat y, boolean angleInDegrees) + { + + polarToCart_0(magnitude.nativeObj, angle.nativeObj, x.nativeObj, y.nativeObj, angleInDegrees); + + return; + } + +/** + *

Calculates x and y coordinates of 2D vectors from their magnitude and angle.

+ * + *

The function polarToCart calculates the Cartesian coordinates of + * each 2D vector represented by the corresponding elements of magnitude + * and angle :

+ * + *

x(I) = magnitude(I) cos(angle(I)) + * y(I) = magnitude(I) sin(angle(I)) + *

+ * + *

The relative accuracy of the estimated coordinates is about 1e-6.

+ * + * @param magnitude input floating-point array of magnitudes of 2D vectors; it + * can be an empty matrix (=Mat()), in this case, the function + * assumes that all the magnitudes are =1; if it is not empty, it must have the + * same size and type as angle. + * @param angle input floating-point array of angles of 2D vectors. + * @param x output array of x-coordinates of 2D vectors; it has the same size + * and type as angle. + * @param y output array of y-coordinates of 2D vectors; it has the same size + * and type as angle. + * + * @see org.opencv.core.Core.polarToCart + * @see org.opencv.core.Core#log + * @see org.opencv.core.Core#cartToPolar + * @see org.opencv.core.Core#pow + * @see org.opencv.core.Core#sqrt + * @see org.opencv.core.Core#magnitude + * @see org.opencv.core.Core#exp + * @see org.opencv.core.Core#phase + */ + public static void polarToCart(Mat magnitude, Mat angle, Mat x, Mat y) + { + + polarToCart_1(magnitude.nativeObj, angle.nativeObj, x.nativeObj, y.nativeObj); + + return; + } + + + // + // C++: void polylines(Mat& img, vector_vector_Point pts, bool isClosed, Scalar color, int thickness = 1, int lineType = 8, int shift = 0) + // + +/** + *

Draws several polygonal curves.

+ * + *

The function polylines draws one or more polygonal curves.

+ * + * @param img Image. + * @param pts Array of polygonal curves. + * @param isClosed Flag indicating whether the drawn polylines are closed or + * not. If they are closed, the function draws a line from the last vertex of + * each curve to its first vertex. + * @param color Polyline color. + * @param thickness Thickness of the polyline edges. + * @param lineType Type of the line segments. See the "line" description. + * @param shift Number of fractional bits in the vertex coordinates. + * + * @see org.opencv.core.Core.polylines + */ + public static void polylines(Mat img, List pts, boolean isClosed, Scalar color, int thickness, int lineType, int shift) + { + List pts_tmplm = new ArrayList((pts != null) ? pts.size() : 0); + Mat pts_mat = Converters.vector_vector_Point_to_Mat(pts, pts_tmplm); + polylines_0(img.nativeObj, pts_mat.nativeObj, isClosed, color.val[0], color.val[1], color.val[2], color.val[3], thickness, lineType, shift); + + return; + } + +/** + *

Draws several polygonal curves.

+ * + *

The function polylines draws one or more polygonal curves.

+ * + * @param img Image. + * @param pts Array of polygonal curves. + * @param isClosed Flag indicating whether the drawn polylines are closed or + * not. If they are closed, the function draws a line from the last vertex of + * each curve to its first vertex. + * @param color Polyline color. + * @param thickness Thickness of the polyline edges. + * + * @see org.opencv.core.Core.polylines + */ + public static void polylines(Mat img, List pts, boolean isClosed, Scalar color, int thickness) + { + List pts_tmplm = new ArrayList((pts != null) ? pts.size() : 0); + Mat pts_mat = Converters.vector_vector_Point_to_Mat(pts, pts_tmplm); + polylines_1(img.nativeObj, pts_mat.nativeObj, isClosed, color.val[0], color.val[1], color.val[2], color.val[3], thickness); + + return; + } + +/** + *

Draws several polygonal curves.

+ * + *

The function polylines draws one or more polygonal curves.

+ * + * @param img Image. + * @param pts Array of polygonal curves. + * @param isClosed Flag indicating whether the drawn polylines are closed or + * not. If they are closed, the function draws a line from the last vertex of + * each curve to its first vertex. + * @param color Polyline color. + * + * @see org.opencv.core.Core.polylines + */ + public static void polylines(Mat img, List pts, boolean isClosed, Scalar color) + { + List pts_tmplm = new ArrayList((pts != null) ? pts.size() : 0); + Mat pts_mat = Converters.vector_vector_Point_to_Mat(pts, pts_tmplm); + polylines_2(img.nativeObj, pts_mat.nativeObj, isClosed, color.val[0], color.val[1], color.val[2], color.val[3]); + + return; + } + + + // + // C++: void pow(Mat src, double power, Mat& dst) + // + +/** + *

Raises every array element to a power.

+ * + *

The function pow raises every element of the input array to + * power :

+ * + *

dst(I) = src(I)^power if power is integer; |src(I)|^power + * otherwise<BR>So, for a non-integer power exponent, the absolute values of + * input array elements are used. However, it is possible to get true values for + * negative values using some extra operations. In the example below, computing + * the 5th root of array src shows: <BR><code>

+ * + *

// C++ code:

+ * + *

Mat mask = src < 0;

+ * + *

pow(src, 1./5, dst);

+ * + *

subtract(Scalar.all(0), dst, dst, mask);

+ * + *

For some values of power, such as integer values, 0.5 and -0.5, + * specialized faster algorithms are used. + *

+ * + *

Special values (NaN, Inf) are not handled.

+ * + * @param src input array. + * @param power exponent of power. + * @param dst output array of the same size and type as src. + * + * @see org.opencv.core.Core.pow + * @see org.opencv.core.Core#cartToPolar + * @see org.opencv.core.Core#polarToCart + * @see org.opencv.core.Core#exp + * @see org.opencv.core.Core#sqrt + * @see org.opencv.core.Core#log + */ + public static void pow(Mat src, double power, Mat dst) + { + + pow_0(src.nativeObj, power, dst.nativeObj); + + return; + } + + + // + // C++: void putText(Mat img, string text, Point org, int fontFace, double fontScale, Scalar color, int thickness = 1, int lineType = 8, bool bottomLeftOrigin = false) + // + +/** + *

Draws a text string.

+ * + *

The function putText renders the specified text string in the + * image. + * Symbols that cannot be rendered using the specified font are replaced by + * question marks. See "getTextSize" for a text rendering code example.

+ * + * @param img Image. + * @param text Text string to be drawn. + * @param org Bottom-left corner of the text string in the image. + * @param fontFace Font type. One of FONT_HERSHEY_SIMPLEX, + * FONT_HERSHEY_PLAIN, FONT_HERSHEY_DUPLEX, + * FONT_HERSHEY_COMPLEX, FONT_HERSHEY_TRIPLEX, + * FONT_HERSHEY_COMPLEX_SMALL, FONT_HERSHEY_SCRIPT_SIMPLEX, + * or FONT_HERSHEY_SCRIPT_COMPLEX, where each of the font ID's can + * be combined with FONT_HERSHEY_ITALIC to get the slanted letters. + * @param fontScale Font scale factor that is multiplied by the font-specific + * base size. + * @param color Text color. + * @param thickness Thickness of the lines used to draw a text. + * @param lineType Line type. See the line for details. + * @param bottomLeftOrigin When true, the image data origin is at the + * bottom-left corner. Otherwise, it is at the top-left corner. + * + * @see org.opencv.core.Core.putText + */ + public static void putText(Mat img, String text, Point org, int fontFace, double fontScale, Scalar color, int thickness, int lineType, boolean bottomLeftOrigin) + { + + putText_0(img.nativeObj, text, org.x, org.y, fontFace, fontScale, color.val[0], color.val[1], color.val[2], color.val[3], thickness, lineType, bottomLeftOrigin); + + return; + } + +/** + *

Draws a text string.

+ * + *

The function putText renders the specified text string in the + * image. + * Symbols that cannot be rendered using the specified font are replaced by + * question marks. See "getTextSize" for a text rendering code example.

+ * + * @param img Image. + * @param text Text string to be drawn. + * @param org Bottom-left corner of the text string in the image. + * @param fontFace Font type. One of FONT_HERSHEY_SIMPLEX, + * FONT_HERSHEY_PLAIN, FONT_HERSHEY_DUPLEX, + * FONT_HERSHEY_COMPLEX, FONT_HERSHEY_TRIPLEX, + * FONT_HERSHEY_COMPLEX_SMALL, FONT_HERSHEY_SCRIPT_SIMPLEX, + * or FONT_HERSHEY_SCRIPT_COMPLEX, where each of the font ID's can + * be combined with FONT_HERSHEY_ITALIC to get the slanted letters. + * @param fontScale Font scale factor that is multiplied by the font-specific + * base size. + * @param color Text color. + * @param thickness Thickness of the lines used to draw a text. + * + * @see org.opencv.core.Core.putText + */ + public static void putText(Mat img, String text, Point org, int fontFace, double fontScale, Scalar color, int thickness) + { + + putText_1(img.nativeObj, text, org.x, org.y, fontFace, fontScale, color.val[0], color.val[1], color.val[2], color.val[3], thickness); + + return; + } + +/** + *

Draws a text string.

+ * + *

The function putText renders the specified text string in the + * image. + * Symbols that cannot be rendered using the specified font are replaced by + * question marks. See "getTextSize" for a text rendering code example.

+ * + * @param img Image. + * @param text Text string to be drawn. + * @param org Bottom-left corner of the text string in the image. + * @param fontFace Font type. One of FONT_HERSHEY_SIMPLEX, + * FONT_HERSHEY_PLAIN, FONT_HERSHEY_DUPLEX, + * FONT_HERSHEY_COMPLEX, FONT_HERSHEY_TRIPLEX, + * FONT_HERSHEY_COMPLEX_SMALL, FONT_HERSHEY_SCRIPT_SIMPLEX, + * or FONT_HERSHEY_SCRIPT_COMPLEX, where each of the font ID's can + * be combined with FONT_HERSHEY_ITALIC to get the slanted letters. + * @param fontScale Font scale factor that is multiplied by the font-specific + * base size. + * @param color Text color. + * + * @see org.opencv.core.Core.putText + */ + public static void putText(Mat img, String text, Point org, int fontFace, double fontScale, Scalar color) + { + + putText_2(img.nativeObj, text, org.x, org.y, fontFace, fontScale, color.val[0], color.val[1], color.val[2], color.val[3]); + + return; + } + + + // + // C++: void randShuffle_(Mat& dst, double iterFactor = 1.) + // + + public static void randShuffle(Mat dst, double iterFactor) + { + + randShuffle_0(dst.nativeObj, iterFactor); + + return; + } + + public static void randShuffle(Mat dst) + { + + randShuffle_1(dst.nativeObj); + + return; + } + + + // + // C++: void randn(Mat& dst, double mean, double stddev) + // + +/** + *

Fills the array with normally distributed random numbers.

+ * + *

The function randn fills the matrix dst with + * normally distributed random numbers with the specified mean vector and the + * standard deviation matrix. The generated random numbers are clipped to fit + * the value range of the output array data type.

+ * + * @param dst output array of random numbers; the array must be pre-allocated + * and have 1 to 4 channels. + * @param mean mean value (expectation) of the generated random numbers. + * @param stddev standard deviation of the generated random numbers; it can be + * either a vector (in which case a diagonal standard deviation matrix is + * assumed) or a square matrix. + * + * @see org.opencv.core.Core.randn + * @see org.opencv.core.Core#randu + */ + public static void randn(Mat dst, double mean, double stddev) + { + + randn_0(dst.nativeObj, mean, stddev); + + return; + } + + + // + // C++: void randu(Mat& dst, double low, double high) + // + +/** + *

Generates a single uniformly-distributed random number or an array of random + * numbers.

+ * + *

The template functions randu generate and return the next + * uniformly-distributed random value of the specified type. randu() + * is an equivalent to (int)theRNG();, and so on. See "RNG" + * description.

+ * + *

The second non-template variant of the function fills the matrix + * dst with uniformly-distributed random numbers from the specified + * range:

+ * + *

low _c <= dst(I)_c < high _c

+ * + * @param dst output array of random numbers; the array must be pre-allocated. + * @param low inclusive lower boundary of the generated random numbers. + * @param high exclusive upper boundary of the generated random numbers. + * + * @see org.opencv.core.Core.randu + * @see org.opencv.core.Core#randn + */ + public static void randu(Mat dst, double low, double high) + { + + randu_0(dst.nativeObj, low, high); + + return; + } + + + // + // C++: void rectangle(Mat& img, Point pt1, Point pt2, Scalar color, int thickness = 1, int lineType = 8, int shift = 0) + // + +/** + *

Draws a simple, thick, or filled up-right rectangle.

+ * + *

The function rectangle draws a rectangle outline or a filled + * rectangle whose two opposite corners are pt1 and + * pt2, or r.tl() and r.br()-Point(1,1).

+ * + * @param img Image. + * @param pt1 Vertex of the rectangle. + * @param pt2 Vertex of the rectangle opposite to pt1. + * @param color Rectangle color or brightness (grayscale image). + * @param thickness Thickness of lines that make up the rectangle. Negative + * values, like CV_FILLED, mean that the function has to draw a + * filled rectangle. + * @param lineType Type of the line. See the "line" description. + * @param shift Number of fractional bits in the point coordinates. + * + * @see org.opencv.core.Core.rectangle + */ + public static void rectangle(Mat img, Point pt1, Point pt2, Scalar color, int thickness, int lineType, int shift) + { + + rectangle_0(img.nativeObj, pt1.x, pt1.y, pt2.x, pt2.y, color.val[0], color.val[1], color.val[2], color.val[3], thickness, lineType, shift); + + return; + } + +/** + *

Draws a simple, thick, or filled up-right rectangle.

+ * + *

The function rectangle draws a rectangle outline or a filled + * rectangle whose two opposite corners are pt1 and + * pt2, or r.tl() and r.br()-Point(1,1).

+ * + * @param img Image. + * @param pt1 Vertex of the rectangle. + * @param pt2 Vertex of the rectangle opposite to pt1. + * @param color Rectangle color or brightness (grayscale image). + * @param thickness Thickness of lines that make up the rectangle. Negative + * values, like CV_FILLED, mean that the function has to draw a + * filled rectangle. + * + * @see org.opencv.core.Core.rectangle + */ + public static void rectangle(Mat img, Point pt1, Point pt2, Scalar color, int thickness) + { + + rectangle_1(img.nativeObj, pt1.x, pt1.y, pt2.x, pt2.y, color.val[0], color.val[1], color.val[2], color.val[3], thickness); + + return; + } + +/** + *

Draws a simple, thick, or filled up-right rectangle.

+ * + *

The function rectangle draws a rectangle outline or a filled + * rectangle whose two opposite corners are pt1 and + * pt2, or r.tl() and r.br()-Point(1,1).

+ * + * @param img Image. + * @param pt1 Vertex of the rectangle. + * @param pt2 Vertex of the rectangle opposite to pt1. + * @param color Rectangle color or brightness (grayscale image). + * + * @see org.opencv.core.Core.rectangle + */ + public static void rectangle(Mat img, Point pt1, Point pt2, Scalar color) + { + + rectangle_2(img.nativeObj, pt1.x, pt1.y, pt2.x, pt2.y, color.val[0], color.val[1], color.val[2], color.val[3]); + + return; + } + + + // + // C++: void reduce(Mat src, Mat& dst, int dim, int rtype, int dtype = -1) + // + +/** + *

Reduces a matrix to a vector.

+ * + *

The function reduce reduces the matrix to a vector by treating + * the matrix rows/columns as a set of 1D vectors and performing the specified + * operation on the vectors until a single row/column is obtained. For example, + * the function can be used to compute horizontal and vertical projections of a + * raster image. In case of CV_REDUCE_SUM and CV_REDUCE_AVG, + * the output may have a larger element bit-depth to preserve accuracy. And + * multi-channel arrays are also supported in these two reduction modes.

+ * + * @param src input 2D matrix. + * @param dst output vector. Its size and type is defined by dim + * and dtype parameters. + * @param dim dimension index along which the matrix is reduced. 0 means that + * the matrix is reduced to a single row. 1 means that the matrix is reduced to + * a single column. + * @param rtype reduction operation that could be one of the following: + *
    + *
  • CV_REDUCE_SUM: the output is the sum of all rows/columns of the + * matrix. + *
  • CV_REDUCE_AVG: the output is the mean vector of all rows/columns of + * the matrix. + *
  • CV_REDUCE_MAX: the output is the maximum (column/row-wise) of all + * rows/columns of the matrix. + *
  • CV_REDUCE_MIN: the output is the minimum (column/row-wise) of all + * rows/columns of the matrix. + *
+ * @param dtype when negative, the output vector will have the same type as the + * input matrix, otherwise, its type will be CV_MAKE_TYPE(CV_MAT_DEPTH(dtype), + * src.channels()). + * + * @see org.opencv.core.Core.reduce + * @see org.opencv.core.Core#repeat + */ + public static void reduce(Mat src, Mat dst, int dim, int rtype, int dtype) + { + + reduce_0(src.nativeObj, dst.nativeObj, dim, rtype, dtype); + + return; + } + +/** + *

Reduces a matrix to a vector.

+ * + *

The function reduce reduces the matrix to a vector by treating + * the matrix rows/columns as a set of 1D vectors and performing the specified + * operation on the vectors until a single row/column is obtained. For example, + * the function can be used to compute horizontal and vertical projections of a + * raster image. In case of CV_REDUCE_SUM and CV_REDUCE_AVG, + * the output may have a larger element bit-depth to preserve accuracy. And + * multi-channel arrays are also supported in these two reduction modes.

+ * + * @param src input 2D matrix. + * @param dst output vector. Its size and type is defined by dim + * and dtype parameters. + * @param dim dimension index along which the matrix is reduced. 0 means that + * the matrix is reduced to a single row. 1 means that the matrix is reduced to + * a single column. + * @param rtype reduction operation that could be one of the following: + *
    + *
  • CV_REDUCE_SUM: the output is the sum of all rows/columns of the + * matrix. + *
  • CV_REDUCE_AVG: the output is the mean vector of all rows/columns of + * the matrix. + *
  • CV_REDUCE_MAX: the output is the maximum (column/row-wise) of all + * rows/columns of the matrix. + *
  • CV_REDUCE_MIN: the output is the minimum (column/row-wise) of all + * rows/columns of the matrix. + *
+ * + * @see org.opencv.core.Core.reduce + * @see org.opencv.core.Core#repeat + */ + public static void reduce(Mat src, Mat dst, int dim, int rtype) + { + + reduce_1(src.nativeObj, dst.nativeObj, dim, rtype); + + return; + } + + + // + // C++: void repeat(Mat src, int ny, int nx, Mat& dst) + // + +/** + *

Fills the output array with repeated copies of the input array.

+ * + *

The functions "repeat" duplicate the input array one or more times along each + * of the two axes:

+ * + *

dst _(ij)= src _(i mod src.rows, j mod src.cols)

+ * + *

The second variant of the function is more convenient to use with + * "MatrixExpressions".

+ * + * @param src input array to replicate. + * @param ny Flag to specify how many times the src is repeated + * along the vertical axis. + * @param nx Flag to specify how many times the src is repeated + * along the horizontal axis. + * @param dst output array of the same type as src. + * + * @see org.opencv.core.Core.repeat + * @see org.opencv.core.Core#reduce + */ + public static void repeat(Mat src, int ny, int nx, Mat dst) + { + + repeat_0(src.nativeObj, ny, nx, dst.nativeObj); + + return; + } + + + // + // C++: void scaleAdd(Mat src1, double alpha, Mat src2, Mat& dst) + // + +/** + *

Calculates the sum of a scaled array and another array.

+ * + *

The function scaleAdd is one of the classical primitive linear + * algebra operations, known as DAXPY or SAXPY in BLAS + * (http://en.wikipedia.org/wiki/Basic_Linear_Algebra_Subprograms). It + * calculates the sum of a scaled array and another array:

+ * + *

dst(I)= scale * src1(I) + src2(I)<BR>The function can also be + * emulated with a matrix expression, for example: <BR><code>

+ * + *

// C++ code:

+ * + *

Mat A(3, 3, CV_64F);...

+ * + *

A.row(0) = A.row(1)*2 + A.row(2);

+ * + * @param src1 first input array. + * @param alpha a alpha + * @param src2 second input array of the same size and type as src1. + * @param dst output array of the same size and type as src1. + * + * @see org.opencv.core.Core.scaleAdd + * @see org.opencv.core.Mat#dot + * @see org.opencv.core.Mat#convertTo + * @see org.opencv.core.Core#addWeighted + * @see org.opencv.core.Core#add + * @see org.opencv.core.Core#subtract + */ + public static void scaleAdd(Mat src1, double alpha, Mat src2, Mat dst) + { + + scaleAdd_0(src1.nativeObj, alpha, src2.nativeObj, dst.nativeObj); + + return; + } + + + // + // C++: void setErrorVerbosity(bool verbose) + // + + public static void setErrorVerbosity(boolean verbose) + { + + setErrorVerbosity_0(verbose); + + return; + } + + + // + // C++: void setIdentity(Mat& mtx, Scalar s = Scalar(1)) + // + +/** + *

Initializes a scaled identity matrix.

+ * + *

The function "setIdentity" initializes a scaled identity matrix:

+ * + *

mtx(i,j)= value if i=j; 0 otherwise<BR>The function can also be + * emulated using the matrix initializers and the matrix expressions: + * <BR><code>

+ * + *

// C++ code:

+ * + *

Mat A = Mat.eye(4, 3, CV_32F)*5;

+ * + *

// A will be set to [[5, 0, 0], [0, 5, 0], [0, 0, 5], [0, 0, 0]]

+ * + * @param mtx matrix to initialize (not necessarily square). + * @param s a s + * + * @see org.opencv.core.Core.setIdentity + * @see org.opencv.core.Mat#setTo + * @see org.opencv.core.Mat#ones + * @see org.opencv.core.Mat#zeros + */ + public static void setIdentity(Mat mtx, Scalar s) + { + + setIdentity_0(mtx.nativeObj, s.val[0], s.val[1], s.val[2], s.val[3]); + + return; + } + +/** + *

Initializes a scaled identity matrix.

+ * + *

The function "setIdentity" initializes a scaled identity matrix:

+ * + *

mtx(i,j)= value if i=j; 0 otherwise<BR>The function can also be + * emulated using the matrix initializers and the matrix expressions: + * <BR><code>

+ * + *

// C++ code:

+ * + *

Mat A = Mat.eye(4, 3, CV_32F)*5;

+ * + *

// A will be set to [[5, 0, 0], [0, 5, 0], [0, 0, 5], [0, 0, 0]]

+ * + * @param mtx matrix to initialize (not necessarily square). + * + * @see org.opencv.core.Core.setIdentity + * @see org.opencv.core.Mat#setTo + * @see org.opencv.core.Mat#ones + * @see org.opencv.core.Mat#zeros + */ + public static void setIdentity(Mat mtx) + { + + setIdentity_1(mtx.nativeObj); + + return; + } + + + // + // C++: bool solve(Mat src1, Mat src2, Mat& dst, int flags = DECOMP_LU) + // + +/** + *

Solves one or more linear systems or least-squares problems.

+ * + *

The function solve solves a linear system or least-squares + * problem (the latter is possible with SVD or QR methods, or by specifying the + * flag DECOMP_NORMAL):

+ * + *

dst = arg min _X|src1 * X - src2|

+ * + *

If DECOMP_LU or DECOMP_CHOLESKY method is used, the + * function returns 1 if src1 (or src1^Tsrc1) is + * non-singular. Otherwise, it returns 0. In the latter case, dst + * is not valid. Other methods find a pseudo-solution in case of a singular + * left-hand side part.

+ * + *

Note: If you want to find a unity-norm solution of an under-defined singular + * system src1*dst=0, the function solve will not do the + * work. Use "SVD.solveZ" instead.

+ * + * @param src1 input matrix on the left-hand side of the system. + * @param src2 input matrix on the right-hand side of the system. + * @param dst output solution. + * @param flags solution (matrix inversion) method. + *
    + *
  • DECOMP_LU Gaussian elimination with optimal pivot element chosen. + *
  • DECOMP_CHOLESKY Cholesky LL^T factorization; the matrix + * src1 must be symmetrical and positively defined. + *
  • DECOMP_EIG eigenvalue decomposition; the matrix src1 must + * be symmetrical. + *
  • DECOMP_SVD singular value decomposition (SVD) method; the system can + * be over-defined and/or the matrix src1 can be singular. + *
  • DECOMP_QR QR factorization; the system can be over-defined and/or the + * matrix src1 can be singular. + *
  • DECOMP_NORMAL while all the previous flags are mutually exclusive, + * this flag can be used together with any of the previous; it means that the + * normal equations src1^T*src1*dst=src1^Tsrc2 are solved instead of + * the original system src1*dst=src2. + *
+ * + * @see org.opencv.core.Core.solve + * @see org.opencv.core.Core#invert + * @see org.opencv.core.Core#eigen + */ + public static boolean solve(Mat src1, Mat src2, Mat dst, int flags) + { + + boolean retVal = solve_0(src1.nativeObj, src2.nativeObj, dst.nativeObj, flags); + + return retVal; + } + +/** + *

Solves one or more linear systems or least-squares problems.

+ * + *

The function solve solves a linear system or least-squares + * problem (the latter is possible with SVD or QR methods, or by specifying the + * flag DECOMP_NORMAL):

+ * + *

dst = arg min _X|src1 * X - src2|

+ * + *

If DECOMP_LU or DECOMP_CHOLESKY method is used, the + * function returns 1 if src1 (or src1^Tsrc1) is + * non-singular. Otherwise, it returns 0. In the latter case, dst + * is not valid. Other methods find a pseudo-solution in case of a singular + * left-hand side part.

+ * + *

Note: If you want to find a unity-norm solution of an under-defined singular + * system src1*dst=0, the function solve will not do the + * work. Use "SVD.solveZ" instead.

+ * + * @param src1 input matrix on the left-hand side of the system. + * @param src2 input matrix on the right-hand side of the system. + * @param dst output solution. + * + * @see org.opencv.core.Core.solve + * @see org.opencv.core.Core#invert + * @see org.opencv.core.Core#eigen + */ + public static boolean solve(Mat src1, Mat src2, Mat dst) + { + + boolean retVal = solve_1(src1.nativeObj, src2.nativeObj, dst.nativeObj); + + return retVal; + } + + + // + // C++: int solveCubic(Mat coeffs, Mat& roots) + // + +/** + *

Finds the real roots of a cubic equation.

+ * + *

The function solveCubic finds the real roots of a cubic + * equation:

+ *
    + *
  • if coeffs is a 4-element vector: + *
+ * + *

coeffs [0] x^3 + coeffs [1] x^2 + coeffs [2] x + coeffs [3] = 0

+ * + *
    + *
  • if coeffs is a 3-element vector: + *
+ * + *

x^3 + coeffs [0] x^2 + coeffs [1] x + coeffs [2] = 0

+ * + *

The roots are stored in the roots array.

+ * + * @param coeffs equation coefficients, an array of 3 or 4 elements. + * @param roots output array of real roots that has 1 or 3 elements. + * + * @see org.opencv.core.Core.solveCubic + */ + public static int solveCubic(Mat coeffs, Mat roots) + { + + int retVal = solveCubic_0(coeffs.nativeObj, roots.nativeObj); + + return retVal; + } + + + // + // C++: double solvePoly(Mat coeffs, Mat& roots, int maxIters = 300) + // + +/** + *

Finds the real or complex roots of a polynomial equation.

+ * + *

The function solvePoly finds real and complex roots of a + * polynomial equation:

+ * + *

coeffs [n] x^(n) + coeffs [n-1] x^(n-1) +... + coeffs [1] x + coeffs [0] + * = 0

+ * + * @param coeffs array of polynomial coefficients. + * @param roots output (complex) array of roots. + * @param maxIters maximum number of iterations the algorithm does. + * + * @see org.opencv.core.Core.solvePoly + */ + public static double solvePoly(Mat coeffs, Mat roots, int maxIters) + { + + double retVal = solvePoly_0(coeffs.nativeObj, roots.nativeObj, maxIters); + + return retVal; + } + +/** + *

Finds the real or complex roots of a polynomial equation.

+ * + *

The function solvePoly finds real and complex roots of a + * polynomial equation:

+ * + *

coeffs [n] x^(n) + coeffs [n-1] x^(n-1) +... + coeffs [1] x + coeffs [0] + * = 0

+ * + * @param coeffs array of polynomial coefficients. + * @param roots output (complex) array of roots. + * + * @see org.opencv.core.Core.solvePoly + */ + public static double solvePoly(Mat coeffs, Mat roots) + { + + double retVal = solvePoly_1(coeffs.nativeObj, roots.nativeObj); + + return retVal; + } + + + // + // C++: void sort(Mat src, Mat& dst, int flags) + // + +/** + *

Sorts each row or each column of a matrix.

+ * + *

The function sort sorts each matrix row or each matrix column in + * ascending or descending order. So you should pass two operation flags to get + * desired behaviour. If you want to sort matrix rows or columns + * lexicographically, you can use STL std.sort generic function + * with the proper comparison predicate.

+ * + * @param src input single-channel array. + * @param dst output array of the same size and type as src. + * @param flags operation flags, a combination of the following values: + *
    + *
  • CV_SORT_EVERY_ROW each matrix row is sorted independently. + *
  • CV_SORT_EVERY_COLUMN each matrix column is sorted independently; this + * flag and the previous one are mutually exclusive. + *
  • CV_SORT_ASCENDING each matrix row is sorted in the ascending order. + *
  • CV_SORT_DESCENDING each matrix row is sorted in the descending order; + * this flag and the previous one are also mutually exclusive. + *
+ * + * @see org.opencv.core.Core.sort + * @see org.opencv.core.Core#randShuffle + * @see org.opencv.core.Core#sortIdx + */ + public static void sort(Mat src, Mat dst, int flags) + { + + sort_0(src.nativeObj, dst.nativeObj, flags); + + return; + } + + + // + // C++: void sortIdx(Mat src, Mat& dst, int flags) + // + +/** + *

Sorts each row or each column of a matrix.

+ * + *

The function sortIdx sorts each matrix row or each matrix column + * in the ascending or descending order. So you should pass two operation flags + * to get desired behaviour. Instead of reordering the elements themselves, it + * stores the indices of sorted elements in the output array. For example: + *

+ * + *

// C++ code:

+ * + *

Mat A = Mat.eye(3,3,CV_32F), B;

+ * + *

sortIdx(A, B, CV_SORT_EVERY_ROW + CV_SORT_ASCENDING);

+ * + *

// B will probably contain

+ * + *

// (because of equal elements in A some permutations are possible):

+ * + *

// [[1, 2, 0], [0, 2, 1], [0, 1, 2]]

+ * + * @param src input single-channel array. + * @param dst output integer array of the same size as src. + * @param flags operation flags that could be a combination of the following + * values: + *
    + *
  • CV_SORT_EVERY_ROW each matrix row is sorted independently. + *
  • CV_SORT_EVERY_COLUMN each matrix column is sorted independently; this + * flag and the previous one are mutually exclusive. + *
  • CV_SORT_ASCENDING each matrix row is sorted in the ascending order. + *
  • CV_SORT_DESCENDING each matrix row is sorted in the descending order; + * his flag and the previous one are also mutually exclusive. + *
+ * + * @see org.opencv.core.Core.sortIdx + * @see org.opencv.core.Core#sort + * @see org.opencv.core.Core#randShuffle + */ + public static void sortIdx(Mat src, Mat dst, int flags) + { + + sortIdx_0(src.nativeObj, dst.nativeObj, flags); + + return; + } + + + // + // C++: void split(Mat m, vector_Mat& mv) + // + +/** + *

Divides a multi-channel array into several single-channel arrays.

+ * + *

The functions split split a multi-channel array into separate + * single-channel arrays:

+ * + *

mv [c](I) = src(I)_c

+ * + *

If you need to extract a single channel or do some other sophisticated + * channel permutation, use "mixChannels".

+ * + * @param m a m + * @param mv output array or vector of arrays; in the first variant of the + * function the number of arrays must match src.channels(); the + * arrays themselves are reallocated, if needed. + * + * @see org.opencv.core.Core.split + * @see org.opencv.core.Core#merge + * @see org.opencv.imgproc.Imgproc#cvtColor + * @see org.opencv.core.Core#mixChannels + */ + public static void split(Mat m, List mv) + { + Mat mv_mat = new Mat(); + split_0(m.nativeObj, mv_mat.nativeObj); + Converters.Mat_to_vector_Mat(mv_mat, mv); + return; + } + + + // + // C++: void sqrt(Mat src, Mat& dst) + // + +/** + *

Calculates a square root of array elements.

+ * + *

The functions sqrt calculate a square root of each input array + * element. In case of multi-channel arrays, each channel is processed + * independently. The accuracy is approximately the same as of the built-in + * std.sqrt.

+ * + * @param src input floating-point array. + * @param dst output array of the same size and type as src. + * + * @see org.opencv.core.Core.sqrt + * @see org.opencv.core.Core#pow + * @see org.opencv.core.Core#magnitude + */ + public static void sqrt(Mat src, Mat dst) + { + + sqrt_0(src.nativeObj, dst.nativeObj); + + return; + } + + + // + // C++: void subtract(Mat src1, Mat src2, Mat& dst, Mat mask = Mat(), int dtype = -1) + // + +/** + *

Calculates the per-element difference between two arrays or array and a + * scalar.

+ * + *

The function subtract calculates:

+ *
    + *
  • Difference between two arrays, when both input arrays have the same + * size and the same number of channels: + *
+ * + *

dst(I) = saturate(src1(I) - src2(I)) if mask(I) != 0

+ * + *
    + *
  • Difference between an array and a scalar, when src2 is + * constructed from Scalar or has the same number of elements as + * src1.channels(): + *
+ * + *

dst(I) = saturate(src1(I) - src2) if mask(I) != 0

+ * + *
    + *
  • Difference between a scalar and an array, when src1 is + * constructed from Scalar or has the same number of elements as + * src2.channels(): + *
+ * + *

dst(I) = saturate(src1 - src2(I)) if mask(I) != 0

+ * + *
    + *
  • The reverse difference between a scalar and an array in the case of + * SubRS: + *
+ * + *

dst(I) = saturate(src2 - src1(I)) if mask(I) != 0

+ * + *

where I is a multi-dimensional index of array elements. In case + * of multi-channel arrays, each channel is processed independently. + * The first function in the list above can be replaced with matrix expressions: + *

+ * + *

// C++ code:

+ * + *

dst = src1 - src2;

+ * + *

dst -= src1; // equivalent to subtract(dst, src1, dst);

+ * + *

The input arrays and the output array can all have the same or different + * depths. For example, you can subtract to 8-bit unsigned arrays and store the + * difference in a 16-bit signed array. Depth of the output array is determined + * by dtype parameter. In the second and third cases above, as well + * as in the first case, when src1.depth() == src2.depth(), + * dtype can be set to the default -1. In this case + * the output array will have the same depth as the input array, be it + * src1, src2 or both. + *

+ * + *

Note: Saturation is not applied when the output array has the depth + * CV_32S. You may even get result of an incorrect sign in the case + * of overflow.

+ * + * @param src1 first input array or a scalar. + * @param src2 second input array or a scalar. + * @param dst output array of the same size and the same number of channels as + * the input array. + * @param mask optional operation mask; this is an 8-bit single channel array + * that specifies elements of the output array to be changed. + * @param dtype optional depth of the output array (see the details below). + * + * @see org.opencv.core.Core.subtract + * @see org.opencv.core.Core#addWeighted + * @see org.opencv.core.Core#add + * @see org.opencv.core.Core#scaleAdd + * @see org.opencv.core.Mat#convertTo + */ + public static void subtract(Mat src1, Mat src2, Mat dst, Mat mask, int dtype) + { + + subtract_0(src1.nativeObj, src2.nativeObj, dst.nativeObj, mask.nativeObj, dtype); + + return; + } + +/** + *

Calculates the per-element difference between two arrays or array and a + * scalar.

+ * + *

The function subtract calculates:

+ *
    + *
  • Difference between two arrays, when both input arrays have the same + * size and the same number of channels: + *
+ * + *

dst(I) = saturate(src1(I) - src2(I)) if mask(I) != 0

+ * + *
    + *
  • Difference between an array and a scalar, when src2 is + * constructed from Scalar or has the same number of elements as + * src1.channels(): + *
+ * + *

dst(I) = saturate(src1(I) - src2) if mask(I) != 0

+ * + *
    + *
  • Difference between a scalar and an array, when src1 is + * constructed from Scalar or has the same number of elements as + * src2.channels(): + *
+ * + *

dst(I) = saturate(src1 - src2(I)) if mask(I) != 0

+ * + *
    + *
  • The reverse difference between a scalar and an array in the case of + * SubRS: + *
+ * + *

dst(I) = saturate(src2 - src1(I)) if mask(I) != 0

+ * + *

where I is a multi-dimensional index of array elements. In case + * of multi-channel arrays, each channel is processed independently. + * The first function in the list above can be replaced with matrix expressions: + *

+ * + *

// C++ code:

+ * + *

dst = src1 - src2;

+ * + *

dst -= src1; // equivalent to subtract(dst, src1, dst);

+ * + *

The input arrays and the output array can all have the same or different + * depths. For example, you can subtract to 8-bit unsigned arrays and store the + * difference in a 16-bit signed array. Depth of the output array is determined + * by dtype parameter. In the second and third cases above, as well + * as in the first case, when src1.depth() == src2.depth(), + * dtype can be set to the default -1. In this case + * the output array will have the same depth as the input array, be it + * src1, src2 or both. + *

+ * + *

Note: Saturation is not applied when the output array has the depth + * CV_32S. You may even get result of an incorrect sign in the case + * of overflow.

+ * + * @param src1 first input array or a scalar. + * @param src2 second input array or a scalar. + * @param dst output array of the same size and the same number of channels as + * the input array. + * @param mask optional operation mask; this is an 8-bit single channel array + * that specifies elements of the output array to be changed. + * + * @see org.opencv.core.Core.subtract + * @see org.opencv.core.Core#addWeighted + * @see org.opencv.core.Core#add + * @see org.opencv.core.Core#scaleAdd + * @see org.opencv.core.Mat#convertTo + */ + public static void subtract(Mat src1, Mat src2, Mat dst, Mat mask) + { + + subtract_1(src1.nativeObj, src2.nativeObj, dst.nativeObj, mask.nativeObj); + + return; + } + +/** + *

Calculates the per-element difference between two arrays or array and a + * scalar.

+ * + *

The function subtract calculates:

+ *
    + *
  • Difference between two arrays, when both input arrays have the same + * size and the same number of channels: + *
+ * + *

dst(I) = saturate(src1(I) - src2(I)) if mask(I) != 0

+ * + *
    + *
  • Difference between an array and a scalar, when src2 is + * constructed from Scalar or has the same number of elements as + * src1.channels(): + *
+ * + *

dst(I) = saturate(src1(I) - src2) if mask(I) != 0

+ * + *
    + *
  • Difference between a scalar and an array, when src1 is + * constructed from Scalar or has the same number of elements as + * src2.channels(): + *
+ * + *

dst(I) = saturate(src1 - src2(I)) if mask(I) != 0

+ * + *
    + *
  • The reverse difference between a scalar and an array in the case of + * SubRS: + *
+ * + *

dst(I) = saturate(src2 - src1(I)) if mask(I) != 0

+ * + *

where I is a multi-dimensional index of array elements. In case + * of multi-channel arrays, each channel is processed independently. + * The first function in the list above can be replaced with matrix expressions: + *

+ * + *

// C++ code:

+ * + *

dst = src1 - src2;

+ * + *

dst -= src1; // equivalent to subtract(dst, src1, dst);

+ * + *

The input arrays and the output array can all have the same or different + * depths. For example, you can subtract to 8-bit unsigned arrays and store the + * difference in a 16-bit signed array. Depth of the output array is determined + * by dtype parameter. In the second and third cases above, as well + * as in the first case, when src1.depth() == src2.depth(), + * dtype can be set to the default -1. In this case + * the output array will have the same depth as the input array, be it + * src1, src2 or both. + *

+ * + *

Note: Saturation is not applied when the output array has the depth + * CV_32S. You may even get result of an incorrect sign in the case + * of overflow.

+ * + * @param src1 first input array or a scalar. + * @param src2 second input array or a scalar. + * @param dst output array of the same size and the same number of channels as + * the input array. + * + * @see org.opencv.core.Core.subtract + * @see org.opencv.core.Core#addWeighted + * @see org.opencv.core.Core#add + * @see org.opencv.core.Core#scaleAdd + * @see org.opencv.core.Mat#convertTo + */ + public static void subtract(Mat src1, Mat src2, Mat dst) + { + + subtract_2(src1.nativeObj, src2.nativeObj, dst.nativeObj); + + return; + } + + + // + // C++: void subtract(Mat src1, Scalar src2, Mat& dst, Mat mask = Mat(), int dtype = -1) + // + +/** + *

Calculates the per-element difference between two arrays or array and a + * scalar.

+ * + *

The function subtract calculates:

+ *
    + *
  • Difference between two arrays, when both input arrays have the same + * size and the same number of channels: + *
+ * + *

dst(I) = saturate(src1(I) - src2(I)) if mask(I) != 0

+ * + *
    + *
  • Difference between an array and a scalar, when src2 is + * constructed from Scalar or has the same number of elements as + * src1.channels(): + *
+ * + *

dst(I) = saturate(src1(I) - src2) if mask(I) != 0

+ * + *
    + *
  • Difference between a scalar and an array, when src1 is + * constructed from Scalar or has the same number of elements as + * src2.channels(): + *
+ * + *

dst(I) = saturate(src1 - src2(I)) if mask(I) != 0

+ * + *
    + *
  • The reverse difference between a scalar and an array in the case of + * SubRS: + *
+ * + *

dst(I) = saturate(src2 - src1(I)) if mask(I) != 0

+ * + *

where I is a multi-dimensional index of array elements. In case + * of multi-channel arrays, each channel is processed independently. + * The first function in the list above can be replaced with matrix expressions: + *

+ * + *

// C++ code:

+ * + *

dst = src1 - src2;

+ * + *

dst -= src1; // equivalent to subtract(dst, src1, dst);

+ * + *

The input arrays and the output array can all have the same or different + * depths. For example, you can subtract to 8-bit unsigned arrays and store the + * difference in a 16-bit signed array. Depth of the output array is determined + * by dtype parameter. In the second and third cases above, as well + * as in the first case, when src1.depth() == src2.depth(), + * dtype can be set to the default -1. In this case + * the output array will have the same depth as the input array, be it + * src1, src2 or both. + *

+ * + *

Note: Saturation is not applied when the output array has the depth + * CV_32S. You may even get result of an incorrect sign in the case + * of overflow.

+ * + * @param src1 first input array or a scalar. + * @param src2 second input array or a scalar. + * @param dst output array of the same size and the same number of channels as + * the input array. + * @param mask optional operation mask; this is an 8-bit single channel array + * that specifies elements of the output array to be changed. + * @param dtype optional depth of the output array (see the details below). + * + * @see org.opencv.core.Core.subtract + * @see org.opencv.core.Core#addWeighted + * @see org.opencv.core.Core#add + * @see org.opencv.core.Core#scaleAdd + * @see org.opencv.core.Mat#convertTo + */ + public static void subtract(Mat src1, Scalar src2, Mat dst, Mat mask, int dtype) + { + + subtract_3(src1.nativeObj, src2.val[0], src2.val[1], src2.val[2], src2.val[3], dst.nativeObj, mask.nativeObj, dtype); + + return; + } + +/** + *

Calculates the per-element difference between two arrays or array and a + * scalar.

+ * + *

The function subtract calculates:

+ *
    + *
  • Difference between two arrays, when both input arrays have the same + * size and the same number of channels: + *
+ * + *

dst(I) = saturate(src1(I) - src2(I)) if mask(I) != 0

+ * + *
    + *
  • Difference between an array and a scalar, when src2 is + * constructed from Scalar or has the same number of elements as + * src1.channels(): + *
+ * + *

dst(I) = saturate(src1(I) - src2) if mask(I) != 0

+ * + *
    + *
  • Difference between a scalar and an array, when src1 is + * constructed from Scalar or has the same number of elements as + * src2.channels(): + *
+ * + *

dst(I) = saturate(src1 - src2(I)) if mask(I) != 0

+ * + *
    + *
  • The reverse difference between a scalar and an array in the case of + * SubRS: + *
+ * + *

dst(I) = saturate(src2 - src1(I)) if mask(I) != 0

+ * + *

where I is a multi-dimensional index of array elements. In case + * of multi-channel arrays, each channel is processed independently. + * The first function in the list above can be replaced with matrix expressions: + *

+ * + *

// C++ code:

+ * + *

dst = src1 - src2;

+ * + *

dst -= src1; // equivalent to subtract(dst, src1, dst);

+ * + *

The input arrays and the output array can all have the same or different + * depths. For example, you can subtract to 8-bit unsigned arrays and store the + * difference in a 16-bit signed array. Depth of the output array is determined + * by dtype parameter. In the second and third cases above, as well + * as in the first case, when src1.depth() == src2.depth(), + * dtype can be set to the default -1. In this case + * the output array will have the same depth as the input array, be it + * src1, src2 or both. + *

+ * + *

Note: Saturation is not applied when the output array has the depth + * CV_32S. You may even get result of an incorrect sign in the case + * of overflow.

+ * + * @param src1 first input array or a scalar. + * @param src2 second input array or a scalar. + * @param dst output array of the same size and the same number of channels as + * the input array. + * @param mask optional operation mask; this is an 8-bit single channel array + * that specifies elements of the output array to be changed. + * + * @see org.opencv.core.Core.subtract + * @see org.opencv.core.Core#addWeighted + * @see org.opencv.core.Core#add + * @see org.opencv.core.Core#scaleAdd + * @see org.opencv.core.Mat#convertTo + */ + public static void subtract(Mat src1, Scalar src2, Mat dst, Mat mask) + { + + subtract_4(src1.nativeObj, src2.val[0], src2.val[1], src2.val[2], src2.val[3], dst.nativeObj, mask.nativeObj); + + return; + } + +/** + *

Calculates the per-element difference between two arrays or array and a + * scalar.

+ * + *

The function subtract calculates:

+ *
    + *
  • Difference between two arrays, when both input arrays have the same + * size and the same number of channels: + *
+ * + *

dst(I) = saturate(src1(I) - src2(I)) if mask(I) != 0

+ * + *
    + *
  • Difference between an array and a scalar, when src2 is + * constructed from Scalar or has the same number of elements as + * src1.channels(): + *
+ * + *

dst(I) = saturate(src1(I) - src2) if mask(I) != 0

+ * + *
    + *
  • Difference between a scalar and an array, when src1 is + * constructed from Scalar or has the same number of elements as + * src2.channels(): + *
+ * + *

dst(I) = saturate(src1 - src2(I)) if mask(I) != 0

+ * + *
    + *
  • The reverse difference between a scalar and an array in the case of + * SubRS: + *
+ * + *

dst(I) = saturate(src2 - src1(I)) if mask(I) != 0

+ * + *

where I is a multi-dimensional index of array elements. In case + * of multi-channel arrays, each channel is processed independently. + * The first function in the list above can be replaced with matrix expressions: + *

+ * + *

// C++ code:

+ * + *

dst = src1 - src2;

+ * + *

dst -= src1; // equivalent to subtract(dst, src1, dst);

+ * + *

The input arrays and the output array can all have the same or different + * depths. For example, you can subtract to 8-bit unsigned arrays and store the + * difference in a 16-bit signed array. Depth of the output array is determined + * by dtype parameter. In the second and third cases above, as well + * as in the first case, when src1.depth() == src2.depth(), + * dtype can be set to the default -1. In this case + * the output array will have the same depth as the input array, be it + * src1, src2 or both. + *

+ * + *

Note: Saturation is not applied when the output array has the depth + * CV_32S. You may even get result of an incorrect sign in the case + * of overflow.

+ * + * @param src1 first input array or a scalar. + * @param src2 second input array or a scalar. + * @param dst output array of the same size and the same number of channels as + * the input array. + * + * @see org.opencv.core.Core.subtract + * @see org.opencv.core.Core#addWeighted + * @see org.opencv.core.Core#add + * @see org.opencv.core.Core#scaleAdd + * @see org.opencv.core.Mat#convertTo + */ + public static void subtract(Mat src1, Scalar src2, Mat dst) + { + + subtract_5(src1.nativeObj, src2.val[0], src2.val[1], src2.val[2], src2.val[3], dst.nativeObj); + + return; + } + + + // + // C++: Scalar sum(Mat src) + // + +/** + *

Calculates the sum of array elements.

+ * + *

The functions sum calculate and return the sum of array + * elements, independently for each channel.

+ * + * @param src a src + * + * @see org.opencv.core.Core.sum + * @see org.opencv.core.Core#meanStdDev + * @see org.opencv.core.Core#reduce + * @see org.opencv.core.Core#minMaxLoc + * @see org.opencv.core.Core#countNonZero + * @see org.opencv.core.Core#norm + * @see org.opencv.core.Core#mean + */ + public static Scalar sumElems(Mat src) + { + + Scalar retVal = new Scalar(sumElems_0(src.nativeObj)); + + return retVal; + } + + + // + // C++: Scalar trace(Mat mtx) + // + +/** + *

Returns the trace of a matrix.

+ * + *

The function trace returns the sum of the diagonal elements of + * the matrix mtx.

+ * + *

tr(mtx) = sum _i mtx(i,i)

+ * + * @param mtx a mtx + * + * @see org.opencv.core.Core.trace + */ + public static Scalar trace(Mat mtx) + { + + Scalar retVal = new Scalar(trace_0(mtx.nativeObj)); + + return retVal; + } + + + // + // C++: void transform(Mat src, Mat& dst, Mat m) + // + +/** + *

Performs the matrix transformation of every array element.

+ * + *

The function transform performs the matrix transformation of + * every element of the array src and stores the results in + * dst :

+ * + *

dst(I) = m * src(I)

+ * + *

(when m.cols=src.channels()), or

+ * + *

dst(I) = m * [ src(I); 1]

+ * + *

(when m.cols=src.channels()+1)

+ * + *

Every element of the N -channel array src is + * interpreted as N -element vector that is transformed using the + * M x N or M x (N+1) matrix m to + * M-element vector - the corresponding element of the output array + * dst.

+ * + *

The function may be used for geometrical transformation of N + * -dimensional points, arbitrary linear color space transformation (such as + * various kinds of RGB to YUV transforms), shuffling the image channels, and so + * forth.

+ * + * @param src input array that must have as many channels (1 to 4) as + * m.cols or m.cols-1. + * @param dst output array of the same size and depth as src; it + * has as many channels as m.rows. + * @param m transformation 2x2 or 2x3 floating-point + * matrix. + * + * @see org.opencv.core.Core.transform + * @see org.opencv.imgproc.Imgproc#warpAffine + * @see org.opencv.core.Core#perspectiveTransform + * @see org.opencv.imgproc.Imgproc#warpPerspective + * @see org.opencv.imgproc.Imgproc#getAffineTransform + * @see org.opencv.video.Video#estimateRigidTransform + */ + public static void transform(Mat src, Mat dst, Mat m) + { + + transform_0(src.nativeObj, dst.nativeObj, m.nativeObj); + + return; + } + + + // + // C++: void transpose(Mat src, Mat& dst) + // + +/** + *

Transposes a matrix.

+ * + *

The function "transpose" transposes the matrix src :

+ * + *

dst(i,j) = src(j,i)

+ * + *

Note: No complex conjugation is done in case of a complex matrix. It it + * should be done separately if needed.

+ * + * @param src input array. + * @param dst output array of the same type as src. + * + * @see org.opencv.core.Core.transpose + */ + public static void transpose(Mat src, Mat dst) + { + + transpose_0(src.nativeObj, dst.nativeObj); + + return; + } + + + // + // C++: void vconcat(vector_Mat src, Mat& dst) + // + + public static void vconcat(List src, Mat dst) + { + Mat src_mat = Converters.vector_Mat_to_Mat(src); + vconcat_0(src_mat.nativeObj, dst.nativeObj); + + return; + } + + + // manual port + public static class MinMaxLocResult { + public double minVal; + public double maxVal; + public Point minLoc; + public Point maxLoc; + + public MinMaxLocResult() { + minVal=0; maxVal=0; + minLoc=new Point(); + maxLoc=new Point(); + } + } + + // C++: minMaxLoc(Mat src, double* minVal, double* maxVal=0, Point* minLoc=0, Point* maxLoc=0, InputArray mask=noArray()) + +/** + *

Finds the global minimum and maximum in an array.

+ * + *

The functions minMaxLoc find the minimum and maximum element + * values and their positions. The extremums are searched across the whole array + * or, if mask is not an empty array, in the specified array + * region.

+ * + *

The functions do not work with multi-channel arrays. If you need to find + * minimum or maximum elements across all the channels, use "Mat.reshape" first + * to reinterpret the array as single-channel. Or you may extract the particular + * channel using either "extractImageCOI", or "mixChannels", or "split".

+ * + * @param src input single-channel array. + * @param mask optional mask used to select a sub-array. + * + * @see org.opencv.core.Core.minMaxLoc + * @see org.opencv.core.Core#compare + * @see org.opencv.core.Core#min + * @see org.opencv.core.Core#mixChannels + * @see org.opencv.core.Mat#reshape + * @see org.opencv.core.Core#split + * @see org.opencv.core.Core#max + * @see org.opencv.core.Core#inRange + */ + public static MinMaxLocResult minMaxLoc(Mat src, Mat mask) { + MinMaxLocResult res = new MinMaxLocResult(); + long maskNativeObj=0; + if (mask != null) { + maskNativeObj=mask.nativeObj; + } + double resarr[] = n_minMaxLocManual(src.nativeObj, maskNativeObj); + res.minVal=resarr[0]; + res.maxVal=resarr[1]; + res.minLoc.x=resarr[2]; + res.minLoc.y=resarr[3]; + res.maxLoc.x=resarr[4]; + res.maxLoc.y=resarr[5]; + return res; + } + +/** + *

Finds the global minimum and maximum in an array.

+ * + *

The functions minMaxLoc find the minimum and maximum element + * values and their positions. The extremums are searched across the whole array + * or, if mask is not an empty array, in the specified array + * region.

+ * + *

The functions do not work with multi-channel arrays. If you need to find + * minimum or maximum elements across all the channels, use "Mat.reshape" first + * to reinterpret the array as single-channel. Or you may extract the particular + * channel using either "extractImageCOI", or "mixChannels", or "split".

+ * + * @param src input single-channel array. + * + * @see org.opencv.core.Core.minMaxLoc + * @see org.opencv.core.Core#compare + * @see org.opencv.core.Core#min + * @see org.opencv.core.Core#mixChannels + * @see org.opencv.core.Mat#reshape + * @see org.opencv.core.Core#split + * @see org.opencv.core.Core#max + * @see org.opencv.core.Core#inRange + */ + public static MinMaxLocResult minMaxLoc(Mat src) { + return minMaxLoc(src, null); + } + + + // C++: Size getTextSize(const string& text, int fontFace, double fontScale, int thickness, int* baseLine); +/** + *

Calculates the width and height of a text string.

+ * + *

The function getTextSize calculates and returns the size of a + * box that contains the specified text.That is, the following code renders some + * text, the tight box surrounding it, and the baseline:

+ * + *

// C++ code:

+ * + *

string text = "Funny text inside the box";

+ * + *

int fontFace = FONT_HERSHEY_SCRIPT_SIMPLEX;

+ * + *

double fontScale = 2;

+ * + *

int thickness = 3;

+ * + *

Mat img(600, 800, CV_8UC3, Scalar.all(0));

+ * + *

int baseline=0;

+ * + *

Size textSize = getTextSize(text, fontFace,

+ * + *

fontScale, thickness, &baseline);

+ * + *

baseline += thickness;

+ * + *

// center the text

+ * + *

Point textOrg((img.cols - textSize.width)/2,

+ * + *

(img.rows + textSize.height)/2);

+ * + *

// draw the box

+ * + *

rectangle(img, textOrg + Point(0, baseline),

+ * + *

textOrg + Point(textSize.width, -textSize.height),

+ * + *

Scalar(0,0,255));

+ * + *

//... and the baseline first

+ * + *

line(img, textOrg + Point(0, thickness),

+ * + *

textOrg + Point(textSize.width, thickness),

+ * + *

Scalar(0, 0, 255));

+ * + *

// then put the text itself

+ * + *

putText(img, text, textOrg, fontFace, fontScale,

+ * + *

Scalar.all(255), thickness, 8);

+ * + * @param text Input text string. + * @param fontFace Font to use. See the "putText" for details. + * @param fontScale Font scale. See the "putText" for details. + * @param thickness Thickness of lines used to render the text. See "putText" + * for details. + * @param baseLine Output parameter - y-coordinate of the baseline relative to + * the bottom-most text point. + * + * @see org.opencv.core.Core.getTextSize + */ + public static Size getTextSize(String text, int fontFace, double fontScale, int thickness, int[] baseLine) { + if(baseLine != null && baseLine.length != 1) + throw new java.lang.IllegalArgumentException("'baseLine' must be 'int[1]' or 'null'."); + Size retVal = new Size(n_getTextSize(text, fontFace, fontScale, thickness, baseLine)); + return retVal; + } + + + + // C++: void LUT(Mat src, Mat lut, Mat& dst, int interpolation = 0) + private static native void LUT_0(long src_nativeObj, long lut_nativeObj, long dst_nativeObj, int interpolation); + private static native void LUT_1(long src_nativeObj, long lut_nativeObj, long dst_nativeObj); + + // C++: double Mahalanobis(Mat v1, Mat v2, Mat icovar) + private static native double Mahalanobis_0(long v1_nativeObj, long v2_nativeObj, long icovar_nativeObj); + + // C++: void PCABackProject(Mat data, Mat mean, Mat eigenvectors, Mat& result) + private static native void PCABackProject_0(long data_nativeObj, long mean_nativeObj, long eigenvectors_nativeObj, long result_nativeObj); + + // C++: void PCACompute(Mat data, Mat& mean, Mat& eigenvectors, int maxComponents = 0) + private static native void PCACompute_0(long data_nativeObj, long mean_nativeObj, long eigenvectors_nativeObj, int maxComponents); + private static native void PCACompute_1(long data_nativeObj, long mean_nativeObj, long eigenvectors_nativeObj); + + // C++: void PCAComputeVar(Mat data, Mat& mean, Mat& eigenvectors, double retainedVariance) + private static native void PCAComputeVar_0(long data_nativeObj, long mean_nativeObj, long eigenvectors_nativeObj, double retainedVariance); + + // C++: void PCAProject(Mat data, Mat mean, Mat eigenvectors, Mat& result) + private static native void PCAProject_0(long data_nativeObj, long mean_nativeObj, long eigenvectors_nativeObj, long result_nativeObj); + + // C++: void SVBackSubst(Mat w, Mat u, Mat vt, Mat rhs, Mat& dst) + private static native void SVBackSubst_0(long w_nativeObj, long u_nativeObj, long vt_nativeObj, long rhs_nativeObj, long dst_nativeObj); + + // C++: void SVDecomp(Mat src, Mat& w, Mat& u, Mat& vt, int flags = 0) + private static native void SVDecomp_0(long src_nativeObj, long w_nativeObj, long u_nativeObj, long vt_nativeObj, int flags); + private static native void SVDecomp_1(long src_nativeObj, long w_nativeObj, long u_nativeObj, long vt_nativeObj); + + // C++: void absdiff(Mat src1, Mat src2, Mat& dst) + private static native void absdiff_0(long src1_nativeObj, long src2_nativeObj, long dst_nativeObj); + + // C++: void absdiff(Mat src1, Scalar src2, Mat& dst) + private static native void absdiff_1(long src1_nativeObj, double src2_val0, double src2_val1, double src2_val2, double src2_val3, long dst_nativeObj); + + // C++: void add(Mat src1, Mat src2, Mat& dst, Mat mask = Mat(), int dtype = -1) + private static native void add_0(long src1_nativeObj, long src2_nativeObj, long dst_nativeObj, long mask_nativeObj, int dtype); + private static native void add_1(long src1_nativeObj, long src2_nativeObj, long dst_nativeObj, long mask_nativeObj); + private static native void add_2(long src1_nativeObj, long src2_nativeObj, long dst_nativeObj); + + // C++: void add(Mat src1, Scalar src2, Mat& dst, Mat mask = Mat(), int dtype = -1) + private static native void add_3(long src1_nativeObj, double src2_val0, double src2_val1, double src2_val2, double src2_val3, long dst_nativeObj, long mask_nativeObj, int dtype); + private static native void add_4(long src1_nativeObj, double src2_val0, double src2_val1, double src2_val2, double src2_val3, long dst_nativeObj, long mask_nativeObj); + private static native void add_5(long src1_nativeObj, double src2_val0, double src2_val1, double src2_val2, double src2_val3, long dst_nativeObj); + + // C++: void addWeighted(Mat src1, double alpha, Mat src2, double beta, double gamma, Mat& dst, int dtype = -1) + private static native void addWeighted_0(long src1_nativeObj, double alpha, long src2_nativeObj, double beta, double gamma, long dst_nativeObj, int dtype); + private static native void addWeighted_1(long src1_nativeObj, double alpha, long src2_nativeObj, double beta, double gamma, long dst_nativeObj); + + // C++: void batchDistance(Mat src1, Mat src2, Mat& dist, int dtype, Mat& nidx, int normType = NORM_L2, int K = 0, Mat mask = Mat(), int update = 0, bool crosscheck = false) + private static native void batchDistance_0(long src1_nativeObj, long src2_nativeObj, long dist_nativeObj, int dtype, long nidx_nativeObj, int normType, int K, long mask_nativeObj, int update, boolean crosscheck); + private static native void batchDistance_1(long src1_nativeObj, long src2_nativeObj, long dist_nativeObj, int dtype, long nidx_nativeObj, int normType, int K); + private static native void batchDistance_2(long src1_nativeObj, long src2_nativeObj, long dist_nativeObj, int dtype, long nidx_nativeObj); + + // C++: void bitwise_and(Mat src1, Mat src2, Mat& dst, Mat mask = Mat()) + private static native void bitwise_and_0(long src1_nativeObj, long src2_nativeObj, long dst_nativeObj, long mask_nativeObj); + private static native void bitwise_and_1(long src1_nativeObj, long src2_nativeObj, long dst_nativeObj); + + // C++: void bitwise_not(Mat src, Mat& dst, Mat mask = Mat()) + private static native void bitwise_not_0(long src_nativeObj, long dst_nativeObj, long mask_nativeObj); + private static native void bitwise_not_1(long src_nativeObj, long dst_nativeObj); + + // C++: void bitwise_or(Mat src1, Mat src2, Mat& dst, Mat mask = Mat()) + private static native void bitwise_or_0(long src1_nativeObj, long src2_nativeObj, long dst_nativeObj, long mask_nativeObj); + private static native void bitwise_or_1(long src1_nativeObj, long src2_nativeObj, long dst_nativeObj); + + // C++: void bitwise_xor(Mat src1, Mat src2, Mat& dst, Mat mask = Mat()) + private static native void bitwise_xor_0(long src1_nativeObj, long src2_nativeObj, long dst_nativeObj, long mask_nativeObj); + private static native void bitwise_xor_1(long src1_nativeObj, long src2_nativeObj, long dst_nativeObj); + + // C++: void calcCovarMatrix(Mat samples, Mat& covar, Mat& mean, int flags, int ctype = CV_64F) + private static native void calcCovarMatrix_0(long samples_nativeObj, long covar_nativeObj, long mean_nativeObj, int flags, int ctype); + private static native void calcCovarMatrix_1(long samples_nativeObj, long covar_nativeObj, long mean_nativeObj, int flags); + + // C++: void cartToPolar(Mat x, Mat y, Mat& magnitude, Mat& angle, bool angleInDegrees = false) + private static native void cartToPolar_0(long x_nativeObj, long y_nativeObj, long magnitude_nativeObj, long angle_nativeObj, boolean angleInDegrees); + private static native void cartToPolar_1(long x_nativeObj, long y_nativeObj, long magnitude_nativeObj, long angle_nativeObj); + + // C++: bool checkRange(Mat a, bool quiet = true, _hidden_ * pos = 0, double minVal = -DBL_MAX, double maxVal = DBL_MAX) + private static native boolean checkRange_0(long a_nativeObj, boolean quiet, double minVal, double maxVal); + private static native boolean checkRange_1(long a_nativeObj); + + // C++: void circle(Mat& img, Point center, int radius, Scalar color, int thickness = 1, int lineType = 8, int shift = 0) + private static native void circle_0(long img_nativeObj, double center_x, double center_y, int radius, double color_val0, double color_val1, double color_val2, double color_val3, int thickness, int lineType, int shift); + private static native void circle_1(long img_nativeObj, double center_x, double center_y, int radius, double color_val0, double color_val1, double color_val2, double color_val3, int thickness); + private static native void circle_2(long img_nativeObj, double center_x, double center_y, int radius, double color_val0, double color_val1, double color_val2, double color_val3); + + // C++: bool clipLine(Rect imgRect, Point& pt1, Point& pt2) + private static native boolean clipLine_0(int imgRect_x, int imgRect_y, int imgRect_width, int imgRect_height, double pt1_x, double pt1_y, double[] pt1_out, double pt2_x, double pt2_y, double[] pt2_out); + + // C++: void compare(Mat src1, Mat src2, Mat& dst, int cmpop) + private static native void compare_0(long src1_nativeObj, long src2_nativeObj, long dst_nativeObj, int cmpop); + + // C++: void compare(Mat src1, Scalar src2, Mat& dst, int cmpop) + private static native void compare_1(long src1_nativeObj, double src2_val0, double src2_val1, double src2_val2, double src2_val3, long dst_nativeObj, int cmpop); + + // C++: void completeSymm(Mat& mtx, bool lowerToUpper = false) + private static native void completeSymm_0(long mtx_nativeObj, boolean lowerToUpper); + private static native void completeSymm_1(long mtx_nativeObj); + + // C++: void convertScaleAbs(Mat src, Mat& dst, double alpha = 1, double beta = 0) + private static native void convertScaleAbs_0(long src_nativeObj, long dst_nativeObj, double alpha, double beta); + private static native void convertScaleAbs_1(long src_nativeObj, long dst_nativeObj); + + // C++: int countNonZero(Mat src) + private static native int countNonZero_0(long src_nativeObj); + + // C++: float cubeRoot(float val) + private static native float cubeRoot_0(float val); + + // C++: void dct(Mat src, Mat& dst, int flags = 0) + private static native void dct_0(long src_nativeObj, long dst_nativeObj, int flags); + private static native void dct_1(long src_nativeObj, long dst_nativeObj); + + // C++: double determinant(Mat mtx) + private static native double determinant_0(long mtx_nativeObj); + + // C++: void dft(Mat src, Mat& dst, int flags = 0, int nonzeroRows = 0) + private static native void dft_0(long src_nativeObj, long dst_nativeObj, int flags, int nonzeroRows); + private static native void dft_1(long src_nativeObj, long dst_nativeObj); + + // C++: void divide(Mat src1, Mat src2, Mat& dst, double scale = 1, int dtype = -1) + private static native void divide_0(long src1_nativeObj, long src2_nativeObj, long dst_nativeObj, double scale, int dtype); + private static native void divide_1(long src1_nativeObj, long src2_nativeObj, long dst_nativeObj, double scale); + private static native void divide_2(long src1_nativeObj, long src2_nativeObj, long dst_nativeObj); + + // C++: void divide(double scale, Mat src2, Mat& dst, int dtype = -1) + private static native void divide_3(double scale, long src2_nativeObj, long dst_nativeObj, int dtype); + private static native void divide_4(double scale, long src2_nativeObj, long dst_nativeObj); + + // C++: void divide(Mat src1, Scalar src2, Mat& dst, double scale = 1, int dtype = -1) + private static native void divide_5(long src1_nativeObj, double src2_val0, double src2_val1, double src2_val2, double src2_val3, long dst_nativeObj, double scale, int dtype); + private static native void divide_6(long src1_nativeObj, double src2_val0, double src2_val1, double src2_val2, double src2_val3, long dst_nativeObj, double scale); + private static native void divide_7(long src1_nativeObj, double src2_val0, double src2_val1, double src2_val2, double src2_val3, long dst_nativeObj); + + // C++: bool eigen(Mat src, bool computeEigenvectors, Mat& eigenvalues, Mat& eigenvectors) + private static native boolean eigen_0(long src_nativeObj, boolean computeEigenvectors, long eigenvalues_nativeObj, long eigenvectors_nativeObj); + + // C++: void ellipse(Mat& img, Point center, Size axes, double angle, double startAngle, double endAngle, Scalar color, int thickness = 1, int lineType = 8, int shift = 0) + private static native void ellipse_0(long img_nativeObj, double center_x, double center_y, double axes_width, double axes_height, double angle, double startAngle, double endAngle, double color_val0, double color_val1, double color_val2, double color_val3, int thickness, int lineType, int shift); + private static native void ellipse_1(long img_nativeObj, double center_x, double center_y, double axes_width, double axes_height, double angle, double startAngle, double endAngle, double color_val0, double color_val1, double color_val2, double color_val3, int thickness); + private static native void ellipse_2(long img_nativeObj, double center_x, double center_y, double axes_width, double axes_height, double angle, double startAngle, double endAngle, double color_val0, double color_val1, double color_val2, double color_val3); + + // C++: void ellipse(Mat& img, RotatedRect box, Scalar color, int thickness = 1, int lineType = 8) + private static native void ellipse_3(long img_nativeObj, double box_center_x, double box_center_y, double box_size_width, double box_size_height, double box_angle, double color_val0, double color_val1, double color_val2, double color_val3, int thickness, int lineType); + private static native void ellipse_4(long img_nativeObj, double box_center_x, double box_center_y, double box_size_width, double box_size_height, double box_angle, double color_val0, double color_val1, double color_val2, double color_val3, int thickness); + private static native void ellipse_5(long img_nativeObj, double box_center_x, double box_center_y, double box_size_width, double box_size_height, double box_angle, double color_val0, double color_val1, double color_val2, double color_val3); + + // C++: void ellipse2Poly(Point center, Size axes, int angle, int arcStart, int arcEnd, int delta, vector_Point& pts) + private static native void ellipse2Poly_0(double center_x, double center_y, double axes_width, double axes_height, int angle, int arcStart, int arcEnd, int delta, long pts_mat_nativeObj); + + // C++: void exp(Mat src, Mat& dst) + private static native void exp_0(long src_nativeObj, long dst_nativeObj); + + // C++: void extractChannel(Mat src, Mat& dst, int coi) + private static native void extractChannel_0(long src_nativeObj, long dst_nativeObj, int coi); + + // C++: float fastAtan2(float y, float x) + private static native float fastAtan2_0(float y, float x); + + // C++: void fillConvexPoly(Mat& img, vector_Point points, Scalar color, int lineType = 8, int shift = 0) + private static native void fillConvexPoly_0(long img_nativeObj, long points_mat_nativeObj, double color_val0, double color_val1, double color_val2, double color_val3, int lineType, int shift); + private static native void fillConvexPoly_1(long img_nativeObj, long points_mat_nativeObj, double color_val0, double color_val1, double color_val2, double color_val3); + + // C++: void fillPoly(Mat& img, vector_vector_Point pts, Scalar color, int lineType = 8, int shift = 0, Point offset = Point()) + private static native void fillPoly_0(long img_nativeObj, long pts_mat_nativeObj, double color_val0, double color_val1, double color_val2, double color_val3, int lineType, int shift, double offset_x, double offset_y); + private static native void fillPoly_1(long img_nativeObj, long pts_mat_nativeObj, double color_val0, double color_val1, double color_val2, double color_val3); + + // C++: void findNonZero(Mat src, Mat& idx) + private static native void findNonZero_0(long src_nativeObj, long idx_nativeObj); + + // C++: void flip(Mat src, Mat& dst, int flipCode) + private static native void flip_0(long src_nativeObj, long dst_nativeObj, int flipCode); + + // C++: void gemm(Mat src1, Mat src2, double alpha, Mat src3, double gamma, Mat& dst, int flags = 0) + private static native void gemm_0(long src1_nativeObj, long src2_nativeObj, double alpha, long src3_nativeObj, double gamma, long dst_nativeObj, int flags); + private static native void gemm_1(long src1_nativeObj, long src2_nativeObj, double alpha, long src3_nativeObj, double gamma, long dst_nativeObj); + + // C++: string getBuildInformation() + private static native String getBuildInformation_0(); + + // C++: int64 getCPUTickCount() + private static native long getCPUTickCount_0(); + + // C++: int getNumberOfCPUs() + private static native int getNumberOfCPUs_0(); + + // C++: int getOptimalDFTSize(int vecsize) + private static native int getOptimalDFTSize_0(int vecsize); + + // C++: int64 getTickCount() + private static native long getTickCount_0(); + + // C++: double getTickFrequency() + private static native double getTickFrequency_0(); + + // C++: void hconcat(vector_Mat src, Mat& dst) + private static native void hconcat_0(long src_mat_nativeObj, long dst_nativeObj); + + // C++: void idct(Mat src, Mat& dst, int flags = 0) + private static native void idct_0(long src_nativeObj, long dst_nativeObj, int flags); + private static native void idct_1(long src_nativeObj, long dst_nativeObj); + + // C++: void idft(Mat src, Mat& dst, int flags = 0, int nonzeroRows = 0) + private static native void idft_0(long src_nativeObj, long dst_nativeObj, int flags, int nonzeroRows); + private static native void idft_1(long src_nativeObj, long dst_nativeObj); + + // C++: void inRange(Mat src, Scalar lowerb, Scalar upperb, Mat& dst) + private static native void inRange_0(long src_nativeObj, double lowerb_val0, double lowerb_val1, double lowerb_val2, double lowerb_val3, double upperb_val0, double upperb_val1, double upperb_val2, double upperb_val3, long dst_nativeObj); + + // C++: void insertChannel(Mat src, Mat& dst, int coi) + private static native void insertChannel_0(long src_nativeObj, long dst_nativeObj, int coi); + + // C++: double invert(Mat src, Mat& dst, int flags = DECOMP_LU) + private static native double invert_0(long src_nativeObj, long dst_nativeObj, int flags); + private static native double invert_1(long src_nativeObj, long dst_nativeObj); + + // C++: double kmeans(Mat data, int K, Mat& bestLabels, TermCriteria criteria, int attempts, int flags, Mat& centers = Mat()) + private static native double kmeans_0(long data_nativeObj, int K, long bestLabels_nativeObj, int criteria_type, int criteria_maxCount, double criteria_epsilon, int attempts, int flags, long centers_nativeObj); + private static native double kmeans_1(long data_nativeObj, int K, long bestLabels_nativeObj, int criteria_type, int criteria_maxCount, double criteria_epsilon, int attempts, int flags); + + // C++: void line(Mat& img, Point pt1, Point pt2, Scalar color, int thickness = 1, int lineType = 8, int shift = 0) + private static native void line_0(long img_nativeObj, double pt1_x, double pt1_y, double pt2_x, double pt2_y, double color_val0, double color_val1, double color_val2, double color_val3, int thickness, int lineType, int shift); + private static native void line_1(long img_nativeObj, double pt1_x, double pt1_y, double pt2_x, double pt2_y, double color_val0, double color_val1, double color_val2, double color_val3, int thickness); + private static native void line_2(long img_nativeObj, double pt1_x, double pt1_y, double pt2_x, double pt2_y, double color_val0, double color_val1, double color_val2, double color_val3); + + // C++: void log(Mat src, Mat& dst) + private static native void log_0(long src_nativeObj, long dst_nativeObj); + + // C++: void magnitude(Mat x, Mat y, Mat& magnitude) + private static native void magnitude_0(long x_nativeObj, long y_nativeObj, long magnitude_nativeObj); + + // C++: void max(Mat src1, Mat src2, Mat& dst) + private static native void max_0(long src1_nativeObj, long src2_nativeObj, long dst_nativeObj); + + // C++: void max(Mat src1, Scalar src2, Mat& dst) + private static native void max_1(long src1_nativeObj, double src2_val0, double src2_val1, double src2_val2, double src2_val3, long dst_nativeObj); + + // C++: Scalar mean(Mat src, Mat mask = Mat()) + private static native double[] mean_0(long src_nativeObj, long mask_nativeObj); + private static native double[] mean_1(long src_nativeObj); + + // C++: void meanStdDev(Mat src, vector_double& mean, vector_double& stddev, Mat mask = Mat()) + private static native void meanStdDev_0(long src_nativeObj, long mean_mat_nativeObj, long stddev_mat_nativeObj, long mask_nativeObj); + private static native void meanStdDev_1(long src_nativeObj, long mean_mat_nativeObj, long stddev_mat_nativeObj); + + // C++: void merge(vector_Mat mv, Mat& dst) + private static native void merge_0(long mv_mat_nativeObj, long dst_nativeObj); + + // C++: void min(Mat src1, Mat src2, Mat& dst) + private static native void min_0(long src1_nativeObj, long src2_nativeObj, long dst_nativeObj); + + // C++: void min(Mat src1, Scalar src2, Mat& dst) + private static native void min_1(long src1_nativeObj, double src2_val0, double src2_val1, double src2_val2, double src2_val3, long dst_nativeObj); + + // C++: void mixChannels(vector_Mat src, vector_Mat dst, vector_int fromTo) + private static native void mixChannels_0(long src_mat_nativeObj, long dst_mat_nativeObj, long fromTo_mat_nativeObj); + + // C++: void mulSpectrums(Mat a, Mat b, Mat& c, int flags, bool conjB = false) + private static native void mulSpectrums_0(long a_nativeObj, long b_nativeObj, long c_nativeObj, int flags, boolean conjB); + private static native void mulSpectrums_1(long a_nativeObj, long b_nativeObj, long c_nativeObj, int flags); + + // C++: void mulTransposed(Mat src, Mat& dst, bool aTa, Mat delta = Mat(), double scale = 1, int dtype = -1) + private static native void mulTransposed_0(long src_nativeObj, long dst_nativeObj, boolean aTa, long delta_nativeObj, double scale, int dtype); + private static native void mulTransposed_1(long src_nativeObj, long dst_nativeObj, boolean aTa, long delta_nativeObj, double scale); + private static native void mulTransposed_2(long src_nativeObj, long dst_nativeObj, boolean aTa); + + // C++: void multiply(Mat src1, Mat src2, Mat& dst, double scale = 1, int dtype = -1) + private static native void multiply_0(long src1_nativeObj, long src2_nativeObj, long dst_nativeObj, double scale, int dtype); + private static native void multiply_1(long src1_nativeObj, long src2_nativeObj, long dst_nativeObj, double scale); + private static native void multiply_2(long src1_nativeObj, long src2_nativeObj, long dst_nativeObj); + + // C++: void multiply(Mat src1, Scalar src2, Mat& dst, double scale = 1, int dtype = -1) + private static native void multiply_3(long src1_nativeObj, double src2_val0, double src2_val1, double src2_val2, double src2_val3, long dst_nativeObj, double scale, int dtype); + private static native void multiply_4(long src1_nativeObj, double src2_val0, double src2_val1, double src2_val2, double src2_val3, long dst_nativeObj, double scale); + private static native void multiply_5(long src1_nativeObj, double src2_val0, double src2_val1, double src2_val2, double src2_val3, long dst_nativeObj); + + // C++: double norm(Mat src1, int normType = NORM_L2, Mat mask = Mat()) + private static native double norm_0(long src1_nativeObj, int normType, long mask_nativeObj); + private static native double norm_1(long src1_nativeObj, int normType); + private static native double norm_2(long src1_nativeObj); + + // C++: double norm(Mat src1, Mat src2, int normType = NORM_L2, Mat mask = Mat()) + private static native double norm_3(long src1_nativeObj, long src2_nativeObj, int normType, long mask_nativeObj); + private static native double norm_4(long src1_nativeObj, long src2_nativeObj, int normType); + private static native double norm_5(long src1_nativeObj, long src2_nativeObj); + + // C++: void normalize(Mat src, Mat& dst, double alpha = 1, double beta = 0, int norm_type = NORM_L2, int dtype = -1, Mat mask = Mat()) + private static native void normalize_0(long src_nativeObj, long dst_nativeObj, double alpha, double beta, int norm_type, int dtype, long mask_nativeObj); + private static native void normalize_1(long src_nativeObj, long dst_nativeObj, double alpha, double beta, int norm_type, int dtype); + private static native void normalize_2(long src_nativeObj, long dst_nativeObj, double alpha, double beta, int norm_type); + private static native void normalize_3(long src_nativeObj, long dst_nativeObj); + + // C++: void patchNaNs(Mat& a, double val = 0) + private static native void patchNaNs_0(long a_nativeObj, double val); + private static native void patchNaNs_1(long a_nativeObj); + + // C++: void perspectiveTransform(Mat src, Mat& dst, Mat m) + private static native void perspectiveTransform_0(long src_nativeObj, long dst_nativeObj, long m_nativeObj); + + // C++: void phase(Mat x, Mat y, Mat& angle, bool angleInDegrees = false) + private static native void phase_0(long x_nativeObj, long y_nativeObj, long angle_nativeObj, boolean angleInDegrees); + private static native void phase_1(long x_nativeObj, long y_nativeObj, long angle_nativeObj); + + // C++: void polarToCart(Mat magnitude, Mat angle, Mat& x, Mat& y, bool angleInDegrees = false) + private static native void polarToCart_0(long magnitude_nativeObj, long angle_nativeObj, long x_nativeObj, long y_nativeObj, boolean angleInDegrees); + private static native void polarToCart_1(long magnitude_nativeObj, long angle_nativeObj, long x_nativeObj, long y_nativeObj); + + // C++: void polylines(Mat& img, vector_vector_Point pts, bool isClosed, Scalar color, int thickness = 1, int lineType = 8, int shift = 0) + private static native void polylines_0(long img_nativeObj, long pts_mat_nativeObj, boolean isClosed, double color_val0, double color_val1, double color_val2, double color_val3, int thickness, int lineType, int shift); + private static native void polylines_1(long img_nativeObj, long pts_mat_nativeObj, boolean isClosed, double color_val0, double color_val1, double color_val2, double color_val3, int thickness); + private static native void polylines_2(long img_nativeObj, long pts_mat_nativeObj, boolean isClosed, double color_val0, double color_val1, double color_val2, double color_val3); + + // C++: void pow(Mat src, double power, Mat& dst) + private static native void pow_0(long src_nativeObj, double power, long dst_nativeObj); + + // C++: void putText(Mat img, string text, Point org, int fontFace, double fontScale, Scalar color, int thickness = 1, int lineType = 8, bool bottomLeftOrigin = false) + private static native void putText_0(long img_nativeObj, String text, double org_x, double org_y, int fontFace, double fontScale, double color_val0, double color_val1, double color_val2, double color_val3, int thickness, int lineType, boolean bottomLeftOrigin); + private static native void putText_1(long img_nativeObj, String text, double org_x, double org_y, int fontFace, double fontScale, double color_val0, double color_val1, double color_val2, double color_val3, int thickness); + private static native void putText_2(long img_nativeObj, String text, double org_x, double org_y, int fontFace, double fontScale, double color_val0, double color_val1, double color_val2, double color_val3); + + // C++: void randShuffle_(Mat& dst, double iterFactor = 1.) + private static native void randShuffle_0(long dst_nativeObj, double iterFactor); + private static native void randShuffle_1(long dst_nativeObj); + + // C++: void randn(Mat& dst, double mean, double stddev) + private static native void randn_0(long dst_nativeObj, double mean, double stddev); + + // C++: void randu(Mat& dst, double low, double high) + private static native void randu_0(long dst_nativeObj, double low, double high); + + // C++: void rectangle(Mat& img, Point pt1, Point pt2, Scalar color, int thickness = 1, int lineType = 8, int shift = 0) + private static native void rectangle_0(long img_nativeObj, double pt1_x, double pt1_y, double pt2_x, double pt2_y, double color_val0, double color_val1, double color_val2, double color_val3, int thickness, int lineType, int shift); + private static native void rectangle_1(long img_nativeObj, double pt1_x, double pt1_y, double pt2_x, double pt2_y, double color_val0, double color_val1, double color_val2, double color_val3, int thickness); + private static native void rectangle_2(long img_nativeObj, double pt1_x, double pt1_y, double pt2_x, double pt2_y, double color_val0, double color_val1, double color_val2, double color_val3); + + // C++: void reduce(Mat src, Mat& dst, int dim, int rtype, int dtype = -1) + private static native void reduce_0(long src_nativeObj, long dst_nativeObj, int dim, int rtype, int dtype); + private static native void reduce_1(long src_nativeObj, long dst_nativeObj, int dim, int rtype); + + // C++: void repeat(Mat src, int ny, int nx, Mat& dst) + private static native void repeat_0(long src_nativeObj, int ny, int nx, long dst_nativeObj); + + // C++: void scaleAdd(Mat src1, double alpha, Mat src2, Mat& dst) + private static native void scaleAdd_0(long src1_nativeObj, double alpha, long src2_nativeObj, long dst_nativeObj); + + // C++: void setErrorVerbosity(bool verbose) + private static native void setErrorVerbosity_0(boolean verbose); + + // C++: void setIdentity(Mat& mtx, Scalar s = Scalar(1)) + private static native void setIdentity_0(long mtx_nativeObj, double s_val0, double s_val1, double s_val2, double s_val3); + private static native void setIdentity_1(long mtx_nativeObj); + + // C++: bool solve(Mat src1, Mat src2, Mat& dst, int flags = DECOMP_LU) + private static native boolean solve_0(long src1_nativeObj, long src2_nativeObj, long dst_nativeObj, int flags); + private static native boolean solve_1(long src1_nativeObj, long src2_nativeObj, long dst_nativeObj); + + // C++: int solveCubic(Mat coeffs, Mat& roots) + private static native int solveCubic_0(long coeffs_nativeObj, long roots_nativeObj); + + // C++: double solvePoly(Mat coeffs, Mat& roots, int maxIters = 300) + private static native double solvePoly_0(long coeffs_nativeObj, long roots_nativeObj, int maxIters); + private static native double solvePoly_1(long coeffs_nativeObj, long roots_nativeObj); + + // C++: void sort(Mat src, Mat& dst, int flags) + private static native void sort_0(long src_nativeObj, long dst_nativeObj, int flags); + + // C++: void sortIdx(Mat src, Mat& dst, int flags) + private static native void sortIdx_0(long src_nativeObj, long dst_nativeObj, int flags); + + // C++: void split(Mat m, vector_Mat& mv) + private static native void split_0(long m_nativeObj, long mv_mat_nativeObj); + + // C++: void sqrt(Mat src, Mat& dst) + private static native void sqrt_0(long src_nativeObj, long dst_nativeObj); + + // C++: void subtract(Mat src1, Mat src2, Mat& dst, Mat mask = Mat(), int dtype = -1) + private static native void subtract_0(long src1_nativeObj, long src2_nativeObj, long dst_nativeObj, long mask_nativeObj, int dtype); + private static native void subtract_1(long src1_nativeObj, long src2_nativeObj, long dst_nativeObj, long mask_nativeObj); + private static native void subtract_2(long src1_nativeObj, long src2_nativeObj, long dst_nativeObj); + + // C++: void subtract(Mat src1, Scalar src2, Mat& dst, Mat mask = Mat(), int dtype = -1) + private static native void subtract_3(long src1_nativeObj, double src2_val0, double src2_val1, double src2_val2, double src2_val3, long dst_nativeObj, long mask_nativeObj, int dtype); + private static native void subtract_4(long src1_nativeObj, double src2_val0, double src2_val1, double src2_val2, double src2_val3, long dst_nativeObj, long mask_nativeObj); + private static native void subtract_5(long src1_nativeObj, double src2_val0, double src2_val1, double src2_val2, double src2_val3, long dst_nativeObj); + + // C++: Scalar sum(Mat src) + private static native double[] sumElems_0(long src_nativeObj); + + // C++: Scalar trace(Mat mtx) + private static native double[] trace_0(long mtx_nativeObj); + + // C++: void transform(Mat src, Mat& dst, Mat m) + private static native void transform_0(long src_nativeObj, long dst_nativeObj, long m_nativeObj); + + // C++: void transpose(Mat src, Mat& dst) + private static native void transpose_0(long src_nativeObj, long dst_nativeObj); + + // C++: void vconcat(vector_Mat src, Mat& dst) + private static native void vconcat_0(long src_mat_nativeObj, long dst_nativeObj); + private static native double[] n_minMaxLocManual(long src_nativeObj, long mask_nativeObj); + private static native double[] n_getTextSize(String text, int fontFace, double fontScale, int thickness, int[] baseLine); + +} diff --git a/src/org/opencv/core/CvException.java b/src/org/opencv/core/CvException.java new file mode 100644 index 0000000..e9241e6 --- /dev/null +++ b/src/org/opencv/core/CvException.java @@ -0,0 +1,15 @@ +package org.opencv.core; + +public class CvException extends RuntimeException { + + private static final long serialVersionUID = 1L; + + public CvException(String msg) { + super(msg); + } + + @Override + public String toString() { + return "CvException [" + super.toString() + "]"; + } +} diff --git a/src/org/opencv/core/CvType.java b/src/org/opencv/core/CvType.java new file mode 100644 index 0000000..748c1cd --- /dev/null +++ b/src/org/opencv/core/CvType.java @@ -0,0 +1,136 @@ +package org.opencv.core; + +public final class CvType { + + // type depth constants + public static final int + CV_8U = 0, CV_8S = 1, + CV_16U = 2, CV_16S = 3, + CV_32S = 4, + CV_32F = 5, + CV_64F = 6, + CV_USRTYPE1 = 7; + + // predefined type constants + public static final int + CV_8UC1 = CV_8UC(1), CV_8UC2 = CV_8UC(2), CV_8UC3 = CV_8UC(3), CV_8UC4 = CV_8UC(4), + CV_8SC1 = CV_8SC(1), CV_8SC2 = CV_8SC(2), CV_8SC3 = CV_8SC(3), CV_8SC4 = CV_8SC(4), + CV_16UC1 = CV_16UC(1), CV_16UC2 = CV_16UC(2), CV_16UC3 = CV_16UC(3), CV_16UC4 = CV_16UC(4), + CV_16SC1 = CV_16SC(1), CV_16SC2 = CV_16SC(2), CV_16SC3 = CV_16SC(3), CV_16SC4 = CV_16SC(4), + CV_32SC1 = CV_32SC(1), CV_32SC2 = CV_32SC(2), CV_32SC3 = CV_32SC(3), CV_32SC4 = CV_32SC(4), + CV_32FC1 = CV_32FC(1), CV_32FC2 = CV_32FC(2), CV_32FC3 = CV_32FC(3), CV_32FC4 = CV_32FC(4), + CV_64FC1 = CV_64FC(1), CV_64FC2 = CV_64FC(2), CV_64FC3 = CV_64FC(3), CV_64FC4 = CV_64FC(4); + + private static final int CV_CN_MAX = 512, CV_CN_SHIFT = 3, CV_DEPTH_MAX = (1 << CV_CN_SHIFT); + + public static final int makeType(int depth, int channels) { + if (channels <= 0 || channels >= CV_CN_MAX) { + throw new java.lang.UnsupportedOperationException( + "Channels count should be 1.." + (CV_CN_MAX - 1)); + } + if (depth < 0 || depth >= CV_DEPTH_MAX) { + throw new java.lang.UnsupportedOperationException( + "Data type depth should be 0.." + (CV_DEPTH_MAX - 1)); + } + return (depth & (CV_DEPTH_MAX - 1)) + ((channels - 1) << CV_CN_SHIFT); + } + + public static final int CV_8UC(int ch) { + return makeType(CV_8U, ch); + } + + public static final int CV_8SC(int ch) { + return makeType(CV_8S, ch); + } + + public static final int CV_16UC(int ch) { + return makeType(CV_16U, ch); + } + + public static final int CV_16SC(int ch) { + return makeType(CV_16S, ch); + } + + public static final int CV_32SC(int ch) { + return makeType(CV_32S, ch); + } + + public static final int CV_32FC(int ch) { + return makeType(CV_32F, ch); + } + + public static final int CV_64FC(int ch) { + return makeType(CV_64F, ch); + } + + public static final int channels(int type) { + return (type >> CV_CN_SHIFT) + 1; + } + + public static final int depth(int type) { + return type & (CV_DEPTH_MAX - 1); + } + + public static final boolean isInteger(int type) { + return depth(type) < CV_32F; + } + + public static final int ELEM_SIZE(int type) { + switch (depth(type)) { + case CV_8U: + case CV_8S: + return channels(type); + case CV_16U: + case CV_16S: + return 2 * channels(type); + case CV_32S: + case CV_32F: + return 4 * channels(type); + case CV_64F: + return 8 * channels(type); + default: + throw new java.lang.UnsupportedOperationException( + "Unsupported CvType value: " + type); + } + } + + public static final String typeToString(int type) { + String s; + switch (depth(type)) { + case CV_8U: + s = "CV_8U"; + break; + case CV_8S: + s = "CV_8S"; + break; + case CV_16U: + s = "CV_16U"; + break; + case CV_16S: + s = "CV_16S"; + break; + case CV_32S: + s = "CV_32S"; + break; + case CV_32F: + s = "CV_32F"; + break; + case CV_64F: + s = "CV_64F"; + break; + case CV_USRTYPE1: + s = "CV_USRTYPE1"; + break; + default: + throw new java.lang.UnsupportedOperationException( + "Unsupported CvType value: " + type); + } + + int ch = channels(type); + if (ch <= 4) + return s + "C" + ch; + else + return s + "C(" + ch + ")"; + } + +} diff --git a/src/org/opencv/core/Mat.java b/src/org/opencv/core/Mat.java new file mode 100644 index 0000000..f381e61 --- /dev/null +++ b/src/org/opencv/core/Mat.java @@ -0,0 +1,2843 @@ +package org.opencv.core; + +// C++: class Mat +/** + *

OpenCV C++ n-dimensional dense array class

+ * + *

class CV_EXPORTS Mat

+ * + *

// C++ code:

+ * + * + *

public:

+ * + *

//... a lot of methods......

+ * + *

/ *! includes several bit-fields:

+ * + *

- the magic signature

+ * + *

- continuity flag

+ * + *

- depth

+ * + *

- number of channels

+ *
    + *
  • / + *
+ * + *

int flags;

+ * + *

//! the array dimensionality, >= 2

+ * + *

int dims;

+ * + *

//! the number of rows and columns or (-1, -1) when the array has more than 2 + * dimensions

+ * + *

int rows, cols;

+ * + *

//! pointer to the data

+ * + *

uchar* data;

+ * + *

//! pointer to the reference counter;

+ * + *

// when array points to user-allocated data, the pointer is NULL

+ * + *

int* refcount;

+ * + *

// other members...

+ * + *

};

+ * + *

The class Mat represents an n-dimensional dense numerical + * single-channel or multi-channel array. It can be used to store real or + * complex-valued vectors and matrices, grayscale or color images, voxel + * volumes, vector fields, point clouds, tensors, histograms (though, very + * high-dimensional histograms may be better stored in a SparseMat). + * The data layout of the array

+ * + *

M is defined by the array M.step[], so that the address + * of element (i_0,...,i_(M.dims-1)), where 0 <= i_k<M.size[k], + * is computed as:

+ * + *

addr(M_(i_0,...,i_(M.dims-1))) = M.data + M.step[0]*i_0 + M.step[1]*i_1 + * +... + M.step[M.dims-1]*i_(M.dims-1)

+ * + *

In case of a 2-dimensional array, the above formula is reduced to:

+ * + *

addr(M_(i,j)) = M.data + M.step[0]*i + M.step[1]*j

+ * + *

Note that M.step[i] >= M.step[i+1] (in fact, M.step[i] >= + * M.step[i+1]*M.size[i+1]). This means that 2-dimensional matrices are + * stored row-by-row, 3-dimensional matrices are stored plane-by-plane, and so + * on. M.step[M.dims-1] is minimal and always equal to the element + * size M.elemSize().

+ * + *

So, the data layout in Mat is fully compatible with + * CvMat, IplImage, and CvMatND types + * from OpenCV 1.x. It is also compatible with the majority of dense array types + * from the standard toolkits and SDKs, such as Numpy (ndarray), Win32 + * (independent device bitmaps), and others, that is, with any array that uses + * *steps* (or *strides*) to compute the position of a pixel. Due to this + * compatibility, it is possible to make a Mat header for + * user-allocated data and process it in-place using OpenCV functions.

+ * + *

There are many different ways to create a Mat object. The most + * popular options are listed below:

+ *
    + *
  • Use the create(nrows, ncols, type) method or the similar + * Mat(nrows, ncols, type[, fillValue]) constructor. A new array of + * the specified size and type is allocated. type has the same + * meaning as in the cvCreateMat method. + *
+ *

For example, CV_8UC1 means a 8-bit single-channel array, + * CV_32FC2 means a 2-channel (complex) floating-point array, and + * so on.

+ * + *

+ * + *

// C++ code:

+ * + *

// make a 7x7 complex matrix filled with 1+3j.

+ * + *

Mat M(7,7,CV_32FC2,Scalar(1,3));

+ * + *

// and now turn M to a 100x60 15-channel 8-bit matrix.

+ * + *

// The old content will be deallocated

+ * + *

M.create(100,60,CV_8UC(15));

+ * + *

+ * + *

As noted in the introduction to this chapter, create() allocates + * only a new array when the shape or type of the current array are different + * from the specified ones.

+ *
    + *
  • Create a multi-dimensional array: + *
+ * + *

+ * + *

// C++ code:

+ * + *

// create a 100x100x100 8-bit array

+ * + *

int sz[] = {100, 100, 100};

+ * + *

Mat bigCube(3, sz, CV_8U, Scalar.all(0));

+ * + *

+ * + *

It passes the number of dimensions =1 to the Mat constructor but + * the created array will be 2-dimensional with the number of columns set to 1. + * So, Mat.dims is always >= 2 (can also be 0 when the array is + * empty).

+ *
    + *
  • Use a copy constructor or assignment operator where there can be an + * array or expression on the right side (see below). As noted in the + * introduction, the array assignment is an O(1) operation because it only + * copies the header and increases the reference counter. The Mat.clone() + * method can be used to get a full (deep) copy of the array when you need it. + *
  • Construct a header for a part of another array. It can be a single + * row, single column, several rows, several columns, rectangular region in the + * array (called a *minor* in algebra) or a diagonal. Such operations are also + * O(1) because the new header references the same data. You can actually modify + * a part of the array using this feature, for example: + *
+ * + *

+ * + *

// C++ code:

+ * + *

// add the 5-th row, multiplied by 3 to the 3rd row

+ * + *

M.row(3) = M.row(3) + M.row(5)*3;

+ * + *

// now copy the 7-th column to the 1-st column

+ * + *

// M.col(1) = M.col(7); // this will not work

+ * + *

Mat M1 = M.col(1);

+ * + *

M.col(7).copyTo(M1);

+ * + *

// create a new 320x240 image

+ * + *

Mat img(Size(320,240),CV_8UC3);

+ * + *

// select a ROI

+ * + *

Mat roi(img, Rect(10,10,100,100));

+ * + *

// fill the ROI with (0,255,0) (which is green in RGB space);

+ * + *

// the original 320x240 image will be modified

+ * + *

roi = Scalar(0,255,0);

+ * + *

+ * + *

Due to the additional datastart and dataend + * members, it is possible to compute a relative sub-array position in the main + * *container* array using locateROI():

+ * + *

+ * + *

// C++ code:

+ * + *

Mat A = Mat.eye(10, 10, CV_32S);

+ * + *

// extracts A columns, 1 (inclusive) to 3 (exclusive).

+ * + *

Mat B = A(Range.all(), Range(1, 3));

+ * + *

// extracts B rows, 5 (inclusive) to 9 (exclusive).

+ * + *

// that is, C ~ A(Range(5, 9), Range(1, 3))

+ * + *

Mat C = B(Range(5, 9), Range.all());

+ * + *

Size size; Point ofs;

+ * + *

C.locateROI(size, ofs);

+ * + *

// size will be (width=10,height=10) and the ofs will be (x=1, y=5)

+ * + *

+ * + *

As in case of whole matrices, if you need a deep copy, use the + * clone() method of the extracted sub-matrices.

+ *
    + *
  • Make a header for user-allocated data. It can be useful to do the + * following: + *
  • Process "foreign" data using OpenCV (for example, when you implement a + * DirectShow* filter or a processing module for gstreamer, and so + * on). For example: + *
+ * + *

+ * + *

// C++ code:

+ * + *

void process_video_frame(const unsigned char* pixels,

+ * + *

int width, int height, int step)

+ * + * + *

Mat img(height, width, CV_8UC3, pixels, step);

+ * + *

GaussianBlur(img, img, Size(7,7), 1.5, 1.5);

+ * + * + *

+ *
    + *
  • Quickly initialize small matrices and/or get a super-fast element + * access. + *
+ * + *

+ * + *

// C++ code:

+ * + *

double m[3][3] = {{a, b, c}, {d, e, f}, {g, h, i}};

+ * + *

Mat M = Mat(3, 3, CV_64F, m).inv();

+ * + *

+ * + *

Partial yet very common cases of this *user-allocated data* case are + * conversions from CvMat and IplImage to + * Mat. For this purpose, there are special constructors taking + * pointers to CvMat or IplImage and the optional flag + * indicating whether to copy the data or not.

+ * + *

Backward conversion from Mat to CvMat or + * IplImage is provided via cast operators Mat.operator + * CvMat() const and Mat.operator IplImage(). The operators + * do NOT copy the data.

+ * + *

+ * + *

// C++ code:

+ * + *

IplImage* img = cvLoadImage("greatwave.jpg", 1);

+ * + *

Mat mtx(img); // convert IplImage* -> Mat

+ * + *

CvMat oldmat = mtx; // convert Mat -> CvMat

+ * + *

CV_Assert(oldmat.cols == img->width && oldmat.rows == img->height &&

+ * + *

oldmat.data.ptr == (uchar*)img->imageData && oldmat.step == img->widthStep);

+ * + *

+ *
    + *
  • Use MATLAB-style array initializers, zeros(), ones(), + * eye(), for example: + *
+ * + *

+ * + *

// C++ code:

+ * + *

// create a double-precision identity martix and add it to M.

+ * + *

M += Mat.eye(M.rows, M.cols, CV_64F);

+ * + *

+ *
    + *
  • Use a comma-separated initializer: + *
+ * + *

+ * + *

// C++ code:

+ * + *

// create a 3x3 double-precision identity matrix

+ * + *

Mat M = (Mat_(3,3) << 1, 0, 0, 0, 1, 0, 0, 0, 1);

+ * + *

+ * + *

With this approach, you first call a constructor of the "Mat_" class with the + * proper parameters, and then you just put << operator followed by + * comma-separated values that can be constants, variables, expressions, and so + * on. Also, note the extra parentheses required to avoid compilation errors.

+ * + *

Once the array is created, it is automatically managed via a + * reference-counting mechanism. If the array header is built on top of + * user-allocated data, you should handle the data by yourself. + * The array data is deallocated when no one points to it. If you want to + * release the data pointed by a array header before the array destructor is + * called, use Mat.release().

+ * + *

The next important thing to learn about the array class is element access. + * This manual already described how to compute an address of each array + * element. Normally, you are not required to use the formula directly in the + * code. If you know the array element type (which can be retrieved using the + * method Mat.type()), you can access the elementM_(ij) + * of a 2-dimensional array as:

+ * + *

// C++ code:

+ * + *

M.at(i,j) += 1.f;

+ * + *

assuming that M is a double-precision floating-point array. There are several + * variants of the method at for a different number of dimensions. + *

+ * + *

If you need to process a whole row of a 2D array, the most efficient way is + * to get the pointer to the row first, and then just use the plain C operator + * [] :

+ * + *

// C++ code:

+ * + *

// compute sum of positive matrix elements

+ * + *

// (assuming that M isa double-precision matrix)

+ * + *

double sum=0;

+ * + *

for(int i = 0; i < M.rows; i++)

+ * + * + *

const double* Mi = M.ptr(i);

+ * + *

for(int j = 0; j < M.cols; j++)

+ * + *

sum += std.max(Mi[j], 0.);

+ * + * + *

Some operations, like the one above, do not actually depend on the array + * shape. They just process elements of an array one by one (or elements from + * multiple arrays that have the same coordinates, for example, array addition). + * Such operations are called *element-wise*. It makes sense to check whether + * all the input/output arrays are continuous, namely, have no gaps at the end + * of each row. If yes, process them as a long single row:

+ * + *

// compute the sum of positive matrix elements, optimized variant

+ * + *

double sum=0;

+ * + *

int cols = M.cols, rows = M.rows;

+ * + *

if(M.isContinuous())

+ * + * + *

cols *= rows;

+ * + *

rows = 1;

+ * + * + *

for(int i = 0; i < rows; i++)

+ * + * + *

const double* Mi = M.ptr(i);

+ * + *

for(int j = 0; j < cols; j++)

+ * + *

sum += std.max(Mi[j], 0.);

+ * + * + *

In case of the continuous matrix, the outer loop body is executed just once. + * So, the overhead is smaller, which is especially noticeable in case of small + * matrices. + *

+ * + *

Finally, there are STL-style iterators that are smart enough to skip gaps + * between successive rows:

+ * + *

// C++ code:

+ * + *

// compute sum of positive matrix elements, iterator-based variant

+ * + *

double sum=0;

+ * + *

MatConstIterator_ it = M.begin(), it_end = M.end();

+ * + *

for(; it != it_end; ++it)

+ * + *

sum += std.max(*it, 0.);

+ * + *

The matrix iterators are random-access iterators, so they can be passed to + * any STL algorithm, including std.sort(). + *

+ * + * @see org.opencv.core.Mat + */ +public class Mat { + + public final long nativeObj; + + public Mat(long addr) + { + if (addr == 0) + throw new java.lang.UnsupportedOperationException("Native object address is NULL"); + nativeObj = addr; + } + + // + // C++: Mat::Mat() + // + +/** + *

Various Mat constructors

+ * + *

These are various constructors that form a matrix. As noted in the + * "AutomaticAllocation", often the default constructor is enough, and the + * proper matrix will be allocated by an OpenCV function. The constructed matrix + * can further be assigned to another matrix or matrix expression or can be + * allocated with "Mat.create". In the former case, the old content is + * de-referenced.

+ * + * @see org.opencv.core.Mat.Mat + */ + public Mat() + { + + nativeObj = n_Mat(); + + return; + } + + // + // C++: Mat::Mat(int rows, int cols, int type) + // + +/** + *

Various Mat constructors

+ * + *

These are various constructors that form a matrix. As noted in the + * "AutomaticAllocation", often the default constructor is enough, and the + * proper matrix will be allocated by an OpenCV function. The constructed matrix + * can further be assigned to another matrix or matrix expression or can be + * allocated with "Mat.create". In the former case, the old content is + * de-referenced.

+ * + * @param rows Number of rows in a 2D array. + * @param cols Number of columns in a 2D array. + * @param type Array type. Use CV_8UC1,..., CV_64FC4 to create 1-4 + * channel matrices, or CV_8UC(n),..., CV_64FC(n) to create + * multi-channel (up to CV_MAX_CN channels) matrices. + * + * @see org.opencv.core.Mat.Mat + */ + public Mat(int rows, int cols, int type) + { + + nativeObj = n_Mat(rows, cols, type); + + return; + } + + // + // C++: Mat::Mat(Size size, int type) + // + +/** + *

Various Mat constructors

+ * + *

These are various constructors that form a matrix. As noted in the + * "AutomaticAllocation", often the default constructor is enough, and the + * proper matrix will be allocated by an OpenCV function. The constructed matrix + * can further be assigned to another matrix or matrix expression or can be + * allocated with "Mat.create". In the former case, the old content is + * de-referenced.

+ * + * @param size 2D array size: Size(cols, rows). In the + * Size() constructor, the number of rows and the number of columns + * go in the reverse order. + * @param type Array type. Use CV_8UC1,..., CV_64FC4 to create 1-4 + * channel matrices, or CV_8UC(n),..., CV_64FC(n) to create + * multi-channel (up to CV_MAX_CN channels) matrices. + * + * @see org.opencv.core.Mat.Mat + */ + public Mat(Size size, int type) + { + + nativeObj = n_Mat(size.width, size.height, type); + + return; + } + + // + // C++: Mat::Mat(int rows, int cols, int type, Scalar s) + // + +/** + *

Various Mat constructors

+ * + *

These are various constructors that form a matrix. As noted in the + * "AutomaticAllocation", often the default constructor is enough, and the + * proper matrix will be allocated by an OpenCV function. The constructed matrix + * can further be assigned to another matrix or matrix expression or can be + * allocated with "Mat.create". In the former case, the old content is + * de-referenced.

+ * + * @param rows Number of rows in a 2D array. + * @param cols Number of columns in a 2D array. + * @param type Array type. Use CV_8UC1,..., CV_64FC4 to create 1-4 + * channel matrices, or CV_8UC(n),..., CV_64FC(n) to create + * multi-channel (up to CV_MAX_CN channels) matrices. + * @param s An optional value to initialize each matrix element with. To set all + * the matrix elements to the particular value after the construction, use the + * assignment operator Mat.operator=(const Scalar& value). + * + * @see org.opencv.core.Mat.Mat + */ + public Mat(int rows, int cols, int type, Scalar s) + { + + nativeObj = n_Mat(rows, cols, type, s.val[0], s.val[1], s.val[2], s.val[3]); + + return; + } + + // + // C++: Mat::Mat(Size size, int type, Scalar s) + // + +/** + *

Various Mat constructors

+ * + *

These are various constructors that form a matrix. As noted in the + * "AutomaticAllocation", often the default constructor is enough, and the + * proper matrix will be allocated by an OpenCV function. The constructed matrix + * can further be assigned to another matrix or matrix expression or can be + * allocated with "Mat.create". In the former case, the old content is + * de-referenced.

+ * + * @param size 2D array size: Size(cols, rows). In the + * Size() constructor, the number of rows and the number of columns + * go in the reverse order. + * @param type Array type. Use CV_8UC1,..., CV_64FC4 to create 1-4 + * channel matrices, or CV_8UC(n),..., CV_64FC(n) to create + * multi-channel (up to CV_MAX_CN channels) matrices. + * @param s An optional value to initialize each matrix element with. To set all + * the matrix elements to the particular value after the construction, use the + * assignment operator Mat.operator=(const Scalar& value). + * + * @see org.opencv.core.Mat.Mat + */ + public Mat(Size size, int type, Scalar s) + { + + nativeObj = n_Mat(size.width, size.height, type, s.val[0], s.val[1], s.val[2], s.val[3]); + + return; + } + + // + // C++: Mat::Mat(Mat m, Range rowRange, Range colRange = Range::all()) + // + +/** + *

Various Mat constructors

+ * + *

These are various constructors that form a matrix. As noted in the + * "AutomaticAllocation", often the default constructor is enough, and the + * proper matrix will be allocated by an OpenCV function. The constructed matrix + * can further be assigned to another matrix or matrix expression or can be + * allocated with "Mat.create". In the former case, the old content is + * de-referenced.

+ * + * @param m Array that (as a whole or partly) is assigned to the constructed + * matrix. No data is copied by these constructors. Instead, the header pointing + * to m data or its sub-array is constructed and associated with + * it. The reference counter, if any, is incremented. So, when you modify the + * matrix formed using such a constructor, you also modify the corresponding + * elements of m. If you want to have an independent copy of the + * sub-array, use Mat.clone(). + * @param rowRange Range of the m rows to take. As usual, the range + * start is inclusive and the range end is exclusive. Use Range.all() + * to take all the rows. + * @param colRange Range of the m columns to take. Use + * Range.all() to take all the columns. + * + * @see org.opencv.core.Mat.Mat + */ + public Mat(Mat m, Range rowRange, Range colRange) + { + + nativeObj = n_Mat(m.nativeObj, rowRange.start, rowRange.end, colRange.start, colRange.end); + + return; + } + +/** + *

Various Mat constructors

+ * + *

These are various constructors that form a matrix. As noted in the + * "AutomaticAllocation", often the default constructor is enough, and the + * proper matrix will be allocated by an OpenCV function. The constructed matrix + * can further be assigned to another matrix or matrix expression or can be + * allocated with "Mat.create". In the former case, the old content is + * de-referenced.

+ * + * @param m Array that (as a whole or partly) is assigned to the constructed + * matrix. No data is copied by these constructors. Instead, the header pointing + * to m data or its sub-array is constructed and associated with + * it. The reference counter, if any, is incremented. So, when you modify the + * matrix formed using such a constructor, you also modify the corresponding + * elements of m. If you want to have an independent copy of the + * sub-array, use Mat.clone(). + * @param rowRange Range of the m rows to take. As usual, the range + * start is inclusive and the range end is exclusive. Use Range.all() + * to take all the rows. + * + * @see org.opencv.core.Mat.Mat + */ + public Mat(Mat m, Range rowRange) + { + + nativeObj = n_Mat(m.nativeObj, rowRange.start, rowRange.end); + + return; + } + + // + // C++: Mat::Mat(Mat m, Rect roi) + // + +/** + *

Various Mat constructors

+ * + *

These are various constructors that form a matrix. As noted in the + * "AutomaticAllocation", often the default constructor is enough, and the + * proper matrix will be allocated by an OpenCV function. The constructed matrix + * can further be assigned to another matrix or matrix expression or can be + * allocated with "Mat.create". In the former case, the old content is + * de-referenced.

+ * + * @param m Array that (as a whole or partly) is assigned to the constructed + * matrix. No data is copied by these constructors. Instead, the header pointing + * to m data or its sub-array is constructed and associated with + * it. The reference counter, if any, is incremented. So, when you modify the + * matrix formed using such a constructor, you also modify the corresponding + * elements of m. If you want to have an independent copy of the + * sub-array, use Mat.clone(). + * @param roi Region of interest. + * + * @see org.opencv.core.Mat.Mat + */ + public Mat(Mat m, Rect roi) + { + + nativeObj = n_Mat(m.nativeObj, roi.y, roi.y + roi.height, roi.x, roi.x + roi.width); + + return; + } + + // + // C++: Mat Mat::adjustROI(int dtop, int dbottom, int dleft, int dright) + // + +/** + *

Adjusts a submatrix size and position within the parent matrix.

+ * + *

The method is complimentary to"Mat.locateROI". The typical use of these + * functions is to determine the submatrix position within the parent matrix and + * then shift the position somehow. Typically, it can be required for filtering + * operations when pixels outside of the ROI should be taken into account. When + * all the method parameters are positive, the ROI needs to grow in all + * directions by the specified amount, for example:

+ * + *

// C++ code:

+ * + *

A.adjustROI(2, 2, 2, 2);

+ * + *

In this example, the matrix size is increased by 4 elements in each + * direction. The matrix is shifted by 2 elements to the left and 2 elements up, + * which brings in all the necessary pixels for the filtering with the 5x5 + * kernel. + *

+ * + *

adjustROI forces the adjusted ROI to be inside of the parent + * matrix that is boundaries of the adjusted ROI are constrained by boundaries + * of the parent matrix. For example, if the submatrix A is located + * in the first row of a parent matrix and you called A.adjustROI(2, 2, 2, + * 2) then A will not be increased in the upward direction.

+ * + *

The function is used internally by the OpenCV filtering functions, like + * "filter2D", morphological operations, and so on.

+ * + * @param dtop Shift of the top submatrix boundary upwards. + * @param dbottom Shift of the bottom submatrix boundary downwards. + * @param dleft Shift of the left submatrix boundary to the left. + * @param dright Shift of the right submatrix boundary to the right. + * + * @see org.opencv.core.Mat.adjustROI + * @see org.opencv.imgproc.Imgproc#copyMakeBorder + */ + public Mat adjustROI(int dtop, int dbottom, int dleft, int dright) + { + + Mat retVal = new Mat(n_adjustROI(nativeObj, dtop, dbottom, dleft, dright)); + + return retVal; + } + + // + // C++: void Mat::assignTo(Mat m, int type = -1) + // + +/** + *

Provides a functional form of convertTo.

+ * + *

This is an internally used method called by the "MatrixExpressions" engine.

+ * + * @param m Destination array. + * @param type Desired destination array depth (or -1 if it should be the same + * as the source type). + * + * @see org.opencv.core.Mat.assignTo + */ + public void assignTo(Mat m, int type) + { + + n_assignTo(nativeObj, m.nativeObj, type); + + return; + } + +/** + *

Provides a functional form of convertTo.

+ * + *

This is an internally used method called by the "MatrixExpressions" engine.

+ * + * @param m Destination array. + * + * @see org.opencv.core.Mat.assignTo + */ + public void assignTo(Mat m) + { + + n_assignTo(nativeObj, m.nativeObj); + + return; + } + + // + // C++: int Mat::channels() + // + +/** + *

Returns the number of matrix channels.

+ * + *

The method returns the number of matrix channels.

+ * + * @see org.opencv.core.Mat.channels + */ + public int channels() + { + + int retVal = n_channels(nativeObj); + + return retVal; + } + + // + // C++: int Mat::checkVector(int elemChannels, int depth = -1, bool + // requireContinuous = true) + // + + public int checkVector(int elemChannels, int depth, boolean requireContinuous) + { + + int retVal = n_checkVector(nativeObj, elemChannels, depth, requireContinuous); + + return retVal; + } + + public int checkVector(int elemChannels, int depth) + { + + int retVal = n_checkVector(nativeObj, elemChannels, depth); + + return retVal; + } + + public int checkVector(int elemChannels) + { + + int retVal = n_checkVector(nativeObj, elemChannels); + + return retVal; + } + + // + // C++: Mat Mat::clone() + // + +/** + *

Creates a full copy of the array and the underlying data.

+ * + *

The method creates a full copy of the array. The original step[] + * is not taken into account. So, the array copy is a continuous array occupying + * total()*elemSize() bytes.

+ * + * @see org.opencv.core.Mat.clone + */ + public Mat clone() + { + + Mat retVal = new Mat(n_clone(nativeObj)); + + return retVal; + } + + // + // C++: Mat Mat::col(int x) + // + +/** + *

Creates a matrix header for the specified matrix column.

+ * + *

The method makes a new header for the specified matrix column and returns it. + * This is an O(1) operation, regardless of the matrix size. The underlying data + * of the new matrix is shared with the original matrix. See also the "Mat.row" + * description.

+ * + * @param x A 0-based column index. + * + * @see org.opencv.core.Mat.col + */ + public Mat col(int x) + { + + Mat retVal = new Mat(n_col(nativeObj, x)); + + return retVal; + } + + // + // C++: Mat Mat::colRange(int startcol, int endcol) + // + +/** + *

Creates a matrix header for the specified row span.

+ * + *

The method makes a new header for the specified column span of the matrix. + * Similarly to "Mat.row" and "Mat.col", this is an O(1) operation.

+ * + * @param startcol An inclusive 0-based start index of the column span. + * @param endcol An exclusive 0-based ending index of the column span. + * + * @see org.opencv.core.Mat.colRange + */ + public Mat colRange(int startcol, int endcol) + { + + Mat retVal = new Mat(n_colRange(nativeObj, startcol, endcol)); + + return retVal; + } + + // + // C++: Mat Mat::colRange(Range r) + // + +/** + *

Creates a matrix header for the specified row span.

+ * + *

The method makes a new header for the specified column span of the matrix. + * Similarly to "Mat.row" and "Mat.col", this is an O(1) operation.

+ * + * @param r "Range" structure containing both the start and the end indices. + * + * @see org.opencv.core.Mat.colRange + */ + public Mat colRange(Range r) + { + + Mat retVal = new Mat(n_colRange(nativeObj, r.start, r.end)); + + return retVal; + } + + // + // C++: int Mat::cols() + // + + public int cols() + { + + int retVal = n_cols(nativeObj); + + return retVal; + } + + // + // C++: void Mat::convertTo(Mat& m, int rtype, double alpha = 1, double beta + // = 0) + // + +/** + *

Converts an array to another data type with optional scaling.

+ * + *

The method converts source pixel values to the target data type. + * saturate_cast<> is applied at the end to avoid possible + * overflows:

+ * + *

m(x,y) = saturate _ cast<rType>(alpha(*this)(x,y) + beta)

+ * + * @param m output matrix; if it does not have a proper size or type before the + * operation, it is reallocated. + * @param rtype desired output matrix type or, rather, the depth since the + * number of channels are the same as the input has; if rtype is + * negative, the output matrix will have the same type as the input. + * @param alpha optional scale factor. + * @param beta optional delta added to the scaled values. + * + * @see org.opencv.core.Mat.convertTo + */ + public void convertTo(Mat m, int rtype, double alpha, double beta) + { + + n_convertTo(nativeObj, m.nativeObj, rtype, alpha, beta); + + return; + } + +/** + *

Converts an array to another data type with optional scaling.

+ * + *

The method converts source pixel values to the target data type. + * saturate_cast<> is applied at the end to avoid possible + * overflows:

+ * + *

m(x,y) = saturate _ cast<rType>(alpha(*this)(x,y) + beta)

+ * + * @param m output matrix; if it does not have a proper size or type before the + * operation, it is reallocated. + * @param rtype desired output matrix type or, rather, the depth since the + * number of channels are the same as the input has; if rtype is + * negative, the output matrix will have the same type as the input. + * @param alpha optional scale factor. + * + * @see org.opencv.core.Mat.convertTo + */ + public void convertTo(Mat m, int rtype, double alpha) + { + + n_convertTo(nativeObj, m.nativeObj, rtype, alpha); + + return; + } + +/** + *

Converts an array to another data type with optional scaling.

+ * + *

The method converts source pixel values to the target data type. + * saturate_cast<> is applied at the end to avoid possible + * overflows:

+ * + *

m(x,y) = saturate _ cast<rType>(alpha(*this)(x,y) + beta)

+ * + * @param m output matrix; if it does not have a proper size or type before the + * operation, it is reallocated. + * @param rtype desired output matrix type or, rather, the depth since the + * number of channels are the same as the input has; if rtype is + * negative, the output matrix will have the same type as the input. + * + * @see org.opencv.core.Mat.convertTo + */ + public void convertTo(Mat m, int rtype) + { + + n_convertTo(nativeObj, m.nativeObj, rtype); + + return; + } + + // + // C++: void Mat::copyTo(Mat& m) + // + +/** + *

Copies the matrix to another one.

+ * + *

The method copies the matrix data to another matrix. Before copying the data, + * the method invokes

+ * + *

// C++ code:

+ * + *

m.create(this->size(), this->type);

+ * + *

so that the destination matrix is reallocated if needed. While + * m.copyTo(m); works flawlessly, the function does not handle the + * case of a partial overlap between the source and the destination matrices. + *

+ * + *

When the operation mask is specified, and the Mat.create call + * shown above reallocated the matrix, the newly allocated matrix is initialized + * with all zeros before copying the data.

+ * + * @param m Destination matrix. If it does not have a proper size or type before + * the operation, it is reallocated. + * + * @see org.opencv.core.Mat.copyTo + */ + public void copyTo(Mat m) + { + + n_copyTo(nativeObj, m.nativeObj); + + return; + } + + // + // C++: void Mat::copyTo(Mat& m, Mat mask) + // + +/** + *

Copies the matrix to another one.

+ * + *

The method copies the matrix data to another matrix. Before copying the data, + * the method invokes

+ * + *

// C++ code:

+ * + *

m.create(this->size(), this->type);

+ * + *

so that the destination matrix is reallocated if needed. While + * m.copyTo(m); works flawlessly, the function does not handle the + * case of a partial overlap between the source and the destination matrices. + *

+ * + *

When the operation mask is specified, and the Mat.create call + * shown above reallocated the matrix, the newly allocated matrix is initialized + * with all zeros before copying the data.

+ * + * @param m Destination matrix. If it does not have a proper size or type before + * the operation, it is reallocated. + * @param mask Operation mask. Its non-zero elements indicate which matrix + * elements need to be copied. + * + * @see org.opencv.core.Mat.copyTo + */ + public void copyTo(Mat m, Mat mask) + { + + n_copyTo(nativeObj, m.nativeObj, mask.nativeObj); + + return; + } + + // + // C++: void Mat::create(int rows, int cols, int type) + // + +/** + *

Allocates new array data if needed.

+ * + *

This is one of the key Mat methods. Most new-style OpenCV + * functions and methods that produce arrays call this method for each output + * array. The method uses the following algorithm:

+ *
    + *
  • If the current array shape and the type match the new ones, return + * immediately. Otherwise, de-reference the previous data by calling + * "Mat.release". + *
  • Initialize the new header. + *
  • Allocate the new data of total()*elemSize() bytes. + *
  • Allocate the new, associated with the data, reference counter and set + * it to 1. + *
+ *

Such a scheme makes the memory management robust and efficient at the same + * time and helps avoid extra typing for you. This means that usually there is + * no need to explicitly allocate output arrays. That is, instead of writing: + *

+ * + *

// C++ code:

+ * + *

Mat color;...

+ * + *

Mat gray(color.rows, color.cols, color.depth());

+ * + *

cvtColor(color, gray, CV_BGR2GRAY);

+ * + *

you can simply write:

+ * + *

Mat color;...

+ * + *

Mat gray;

+ * + *

cvtColor(color, gray, CV_BGR2GRAY);

+ * + *

because cvtColor, as well as the most of OpenCV functions, calls + * Mat.create() for the output array internally. + *

+ * + * @param rows New number of rows. + * @param cols New number of columns. + * @param type New matrix type. + * + * @see org.opencv.core.Mat.create + */ + public void create(int rows, int cols, int type) + { + + n_create(nativeObj, rows, cols, type); + + return; + } + + // + // C++: void Mat::create(Size size, int type) + // + +/** + *

Allocates new array data if needed.

+ * + *

This is one of the key Mat methods. Most new-style OpenCV + * functions and methods that produce arrays call this method for each output + * array. The method uses the following algorithm:

+ *
    + *
  • If the current array shape and the type match the new ones, return + * immediately. Otherwise, de-reference the previous data by calling + * "Mat.release". + *
  • Initialize the new header. + *
  • Allocate the new data of total()*elemSize() bytes. + *
  • Allocate the new, associated with the data, reference counter and set + * it to 1. + *
+ *

Such a scheme makes the memory management robust and efficient at the same + * time and helps avoid extra typing for you. This means that usually there is + * no need to explicitly allocate output arrays. That is, instead of writing: + *

+ * + *

// C++ code:

+ * + *

Mat color;...

+ * + *

Mat gray(color.rows, color.cols, color.depth());

+ * + *

cvtColor(color, gray, CV_BGR2GRAY);

+ * + *

you can simply write:

+ * + *

Mat color;...

+ * + *

Mat gray;

+ * + *

cvtColor(color, gray, CV_BGR2GRAY);

+ * + *

because cvtColor, as well as the most of OpenCV functions, calls + * Mat.create() for the output array internally. + *

+ * + * @param size Alternative new matrix size specification: Size(cols, + * rows) + * @param type New matrix type. + * + * @see org.opencv.core.Mat.create + */ + public void create(Size size, int type) + { + + n_create(nativeObj, size.width, size.height, type); + + return; + } + + // + // C++: Mat Mat::cross(Mat m) + // + +/** + *

Computes a cross-product of two 3-element vectors.

+ * + *

The method computes a cross-product of two 3-element vectors. The vectors + * must be 3-element floating-point vectors of the same shape and size. The + * result is another 3-element vector of the same shape and type as operands.

+ * + * @param m Another cross-product operand. + * + * @see org.opencv.core.Mat.cross + */ + public Mat cross(Mat m) + { + + Mat retVal = new Mat(n_cross(nativeObj, m.nativeObj)); + + return retVal; + } + + // + // C++: long Mat::dataAddr() + // + + public long dataAddr() + { + + long retVal = n_dataAddr(nativeObj); + + return retVal; + } + + // + // C++: int Mat::depth() + // + +/** + *

Returns the depth of a matrix element.

+ * + *

The method returns the identifier of the matrix element depth (the type of + * each individual channel). For example, for a 16-bit signed 3-channel array, + * the method returns CV_16S. A complete list of matrix types + * contains the following values:

+ *
    + *
  • CV_8U - 8-bit unsigned integers (0..255) + *
  • CV_8S - 8-bit signed integers (-128..127) + *
  • CV_16U - 16-bit unsigned integers (0..65535) + *
  • CV_16S - 16-bit signed integers (-32768..32767) + *
  • CV_32S - 32-bit signed integers (-2147483648..2147483647) + *
  • CV_32F - 32-bit floating-point numbers (-FLT_MAX..FLT_MAX, + * INF, NAN) + *
  • CV_64F - 64-bit floating-point numbers (-DBL_MAX..DBL_MAX, + * INF, NAN) + *
+ * + * @see org.opencv.core.Mat.depth + */ + public int depth() + { + + int retVal = n_depth(nativeObj); + + return retVal; + } + + // + // C++: Mat Mat::diag(int d = 0) + // + +/** + *

Extracts a diagonal from a matrix, or creates a diagonal matrix.

+ * + *

The method makes a new header for the specified matrix diagonal. The new + * matrix is represented as a single-column matrix. Similarly to "Mat.row" and + * "Mat.col", this is an O(1) operation.

+ * + * @param d Single-column matrix that forms a diagonal matrix or index of the + * diagonal, with the following values: + *
    + *
  • d=0 is the main diagonal. + *
  • d>0 is a diagonal from the lower half. For example, d=1 + * means the diagonal is set immediately below the main one. + *
  • d<0 is a diagonal from the upper half. For example, d=1 + * means the diagonal is set immediately above the main one. + *
+ * + * @see org.opencv.core.Mat.diag + */ + public Mat diag(int d) + { + + Mat retVal = new Mat(n_diag(nativeObj, d)); + + return retVal; + } + +/** + *

Extracts a diagonal from a matrix, or creates a diagonal matrix.

+ * + *

The method makes a new header for the specified matrix diagonal. The new + * matrix is represented as a single-column matrix. Similarly to "Mat.row" and + * "Mat.col", this is an O(1) operation.

+ * + * @see org.opencv.core.Mat.diag + */ + public Mat diag() + { + + Mat retVal = new Mat(n_diag(nativeObj, 0)); + + return retVal; + } + + // + // C++: static Mat Mat::diag(Mat d) + // + +/** + *

Extracts a diagonal from a matrix, or creates a diagonal matrix.

+ * + *

The method makes a new header for the specified matrix diagonal. The new + * matrix is represented as a single-column matrix. Similarly to "Mat.row" and + * "Mat.col", this is an O(1) operation.

+ * + * @param d Single-column matrix that forms a diagonal matrix or index of the + * diagonal, with the following values: + *
    + *
  • d=0 is the main diagonal. + *
  • d>0 is a diagonal from the lower half. For example, d=1 + * means the diagonal is set immediately below the main one. + *
  • d<0 is a diagonal from the upper half. For example, d=1 + * means the diagonal is set immediately above the main one. + *
+ * + * @see org.opencv.core.Mat.diag + */ + public static Mat diag(Mat d) + { + + Mat retVal = new Mat(n_diag(d.nativeObj)); + + return retVal; + } + + // + // C++: double Mat::dot(Mat m) + // + +/** + *

Computes a dot-product of two vectors.

+ * + *

The method computes a dot-product of two matrices. If the matrices are not + * single-column or single-row vectors, the top-to-bottom left-to-right scan + * ordering is used to treat them as 1D vectors. The vectors must have the same + * size and type. If the matrices have more than one channel, the dot products + * from all the channels are summed together.

+ * + * @param m another dot-product operand. + * + * @see org.opencv.core.Mat.dot + */ + public double dot(Mat m) + { + + double retVal = n_dot(nativeObj, m.nativeObj); + + return retVal; + } + + // + // C++: size_t Mat::elemSize() + // + +/** + *

Returns the matrix element size in bytes.

+ * + *

The method returns the matrix element size in bytes. For example, if the + * matrix type is CV_16SC3, the method returns 3*sizeof(short) + * or 6.

+ * + * @see org.opencv.core.Mat.elemSize + */ + public long elemSize() + { + + long retVal = n_elemSize(nativeObj); + + return retVal; + } + + // + // C++: size_t Mat::elemSize1() + // + +/** + *

Returns the size of each matrix element channel in bytes.

+ * + *

The method returns the matrix element channel size in bytes, that is, it + * ignores the number of channels. For example, if the matrix type is + * CV_16SC3, the method returns sizeof(short) or 2.

+ * + * @see org.opencv.core.Mat.elemSize1 + */ + public long elemSize1() + { + + long retVal = n_elemSize1(nativeObj); + + return retVal; + } + + // + // C++: bool Mat::empty() + // + +/** + *

Returns true if the array has no elements.

+ * + *

The method returns true if Mat.total() is 0 or if + * Mat.data is NULL. Because of pop_back() and + * resize() methods M.total() == 0 does not imply that + * M.data == NULL.

+ * + * @see org.opencv.core.Mat.empty + */ + public boolean empty() + { + + boolean retVal = n_empty(nativeObj); + + return retVal; + } + + // + // C++: static Mat Mat::eye(int rows, int cols, int type) + // + +/** + *

Returns an identity matrix of the specified size and type.

+ * + *

The method returns a Matlab-style identity matrix initializer, similarly to + * "Mat.zeros". Similarly to"Mat.ones", you can use a scale operation to + * create a scaled identity matrix efficiently:

+ * + *

// C++ code:

+ * + *

// make a 4x4 diagonal matrix with 0.1's on the diagonal.

+ * + *

Mat A = Mat.eye(4, 4, CV_32F)*0.1;

+ * + * @param rows Number of rows. + * @param cols Number of columns. + * @param type Created matrix type. + * + * @see org.opencv.core.Mat.eye + */ + public static Mat eye(int rows, int cols, int type) + { + + Mat retVal = new Mat(n_eye(rows, cols, type)); + + return retVal; + } + + // + // C++: static Mat Mat::eye(Size size, int type) + // + +/** + *

Returns an identity matrix of the specified size and type.

+ * + *

The method returns a Matlab-style identity matrix initializer, similarly to + * "Mat.zeros". Similarly to"Mat.ones", you can use a scale operation to + * create a scaled identity matrix efficiently:

+ * + *

// C++ code:

+ * + *

// make a 4x4 diagonal matrix with 0.1's on the diagonal.

+ * + *

Mat A = Mat.eye(4, 4, CV_32F)*0.1;

+ * + * @param size Alternative matrix size specification as Size(cols, + * rows). + * @param type Created matrix type. + * + * @see org.opencv.core.Mat.eye + */ + public static Mat eye(Size size, int type) + { + + Mat retVal = new Mat(n_eye(size.width, size.height, type)); + + return retVal; + } + + // + // C++: Mat Mat::inv(int method = DECOMP_LU) + // + +/** + *

Inverses a matrix.

+ * + *

The method performs a matrix inversion by means of matrix expressions. This + * means that a temporary matrix inversion object is returned by the method and + * can be used further as a part of more complex matrix expressions or can be + * assigned to a matrix.

+ * + * @param method Matrix inversion method. Possible values are the following: + *
    + *
  • DECOMP_LU is the LU decomposition. The matrix must be non-singular. + *
  • DECOMP_CHOLESKY is the Cholesky LL^T decomposition for + * symmetrical positively defined matrices only. This type is about twice faster + * than LU on big matrices. + *
  • DECOMP_SVD is the SVD decomposition. If the matrix is singular or even + * non-square, the pseudo inversion is computed. + *
+ * + * @see org.opencv.core.Mat.inv + */ + public Mat inv(int method) + { + + Mat retVal = new Mat(n_inv(nativeObj, method)); + + return retVal; + } + +/** + *

Inverses a matrix.

+ * + *

The method performs a matrix inversion by means of matrix expressions. This + * means that a temporary matrix inversion object is returned by the method and + * can be used further as a part of more complex matrix expressions or can be + * assigned to a matrix.

+ * + * @see org.opencv.core.Mat.inv + */ + public Mat inv() + { + + Mat retVal = new Mat(n_inv(nativeObj)); + + return retVal; + } + + // + // C++: bool Mat::isContinuous() + // + +/** + *

Reports whether the matrix is continuous or not.

+ * + *

The method returns true if the matrix elements are stored + * continuously without gaps at the end of each row. Otherwise, it returns + * false. Obviously, 1x1 or 1xN matrices + * are always continuous. Matrices created with "Mat.create" are always + * continuous. But if you extract a part of the matrix using "Mat.col", + * "Mat.diag", and so on, or constructed a matrix header for externally + * allocated data, such matrices may no longer have this property. + * The continuity flag is stored as a bit in the Mat.flags field + * and is computed automatically when you construct a matrix header. Thus, the + * continuity check is a very fast operation, though theoretically it could be + * done as follows:

+ * + *

// C++ code:

+ * + *

// alternative implementation of Mat.isContinuous()

+ * + *

bool myCheckMatContinuity(const Mat& m)

+ * + * + *

//return (m.flags & Mat.CONTINUOUS_FLAG) != 0;

+ * + *

return m.rows == 1 || m.step == m.cols*m.elemSize();

+ * + * + *

The method is used in quite a few of OpenCV functions. The point is that + * element-wise operations (such as arithmetic and logical operations, math + * functions, alpha blending, color space transformations, and others) do not + * depend on the image geometry. Thus, if all the input and output arrays are + * continuous, the functions can process them as very long single-row vectors. + * The example below illustrates how an alpha-blending function can be + * implemented.

+ * + *

template

+ * + *

void alphaBlendRGBA(const Mat& src1, const Mat& src2, Mat& dst)

+ * + * + *

const float alpha_scale = (float)std.numeric_limits.max(),

+ * + *

inv_scale = 1.f/alpha_scale;

+ * + *

CV_Assert(src1.type() == src2.type() &&

+ * + *

src1.type() == CV_MAKETYPE(DataType.depth, 4) &&

+ * + *

src1.size() == src2.size());

+ * + *

Size size = src1.size();

+ * + *

dst.create(size, src1.type());

+ * + *

// here is the idiom: check the arrays for continuity and,

+ * + *

// if this is the case,

+ * + *

// treat the arrays as 1D vectors

+ * + *

if(src1.isContinuous() && src2.isContinuous() && dst.isContinuous())

+ * + * + *

size.width *= size.height;

+ * + *

size.height = 1;

+ * + * + *

size.width *= 4;

+ * + *

for(int i = 0; i < size.height; i++)

+ * + * + *

// when the arrays are continuous,

+ * + *

// the outer loop is executed only once

+ * + *

const T* ptr1 = src1.ptr(i);

+ * + *

const T* ptr2 = src2.ptr(i);

+ * + *

T* dptr = dst.ptr(i);

+ * + *

for(int j = 0; j < size.width; j += 4)

+ * + * + *

float alpha = ptr1[j+3]*inv_scale, beta = ptr2[j+3]*inv_scale;

+ * + *

dptr[j] = saturate_cast(ptr1[j]*alpha + ptr2[j]*beta);

+ * + *

dptr[j+1] = saturate_cast(ptr1[j+1]*alpha + ptr2[j+1]*beta);

+ * + *

dptr[j+2] = saturate_cast(ptr1[j+2]*alpha + ptr2[j+2]*beta);

+ * + *

dptr[j+3] = saturate_cast((1 - (1-alpha)*(1-beta))*alpha_scale);

+ * + * + * + * + *

This approach, while being very simple, can boost the performance of a simple + * element-operation by 10-20 percents, especially if the image is rather small + * and the operation is quite simple. + *

+ * + *

Another OpenCV idiom in this function, a call of "Mat.create" for the + * destination array, that allocates the destination array unless it already has + * the proper size and type. And while the newly allocated arrays are always + * continuous, you still need to check the destination array because + * "Mat.create" does not always allocate a new matrix.

+ * + * @see org.opencv.core.Mat.isContinuous + */ + public boolean isContinuous() + { + + boolean retVal = n_isContinuous(nativeObj); + + return retVal; + } + + // + // C++: bool Mat::isSubmatrix() + // + + public boolean isSubmatrix() + { + + boolean retVal = n_isSubmatrix(nativeObj); + + return retVal; + } + + // + // C++: void Mat::locateROI(Size wholeSize, Point ofs) + // + +/** + *

Locates the matrix header within a parent matrix.

+ * + *

After you extracted a submatrix from a matrix using "Mat.row", "Mat.col", + * "Mat.rowRange", "Mat.colRange", and others, the resultant submatrix points + * just to the part of the original big matrix. However, each submatrix contains + * information (represented by datastart and dataend + * fields) that helps reconstruct the original matrix size and the position of + * the extracted submatrix within the original matrix. The method + * locateROI does exactly that.

+ * + * @param wholeSize Output parameter that contains the size of the whole matrix + * containing *this as a part. + * @param ofs Output parameter that contains an offset of *this + * inside the whole matrix. + * + * @see org.opencv.core.Mat.locateROI + */ + public void locateROI(Size wholeSize, Point ofs) + { + double[] wholeSize_out = new double[2]; + double[] ofs_out = new double[2]; + locateROI_0(nativeObj, wholeSize_out, ofs_out); + if(wholeSize!=null){ wholeSize.width = wholeSize_out[0]; wholeSize.height = wholeSize_out[1]; } + if(ofs!=null){ ofs.x = ofs_out[0]; ofs.y = ofs_out[1]; } + return; + } + + // + // C++: Mat Mat::mul(Mat m, double scale = 1) + // + +/** + *

Performs an element-wise multiplication or division of the two matrices.

+ * + *

The method returns a temporary object encoding per-element array + * multiplication, with optional scale. Note that this is not a matrix + * multiplication that corresponds to a simpler "*" operator. + * Example:

+ * + *

// C++ code:

+ * + *

Mat C = A.mul(5/B); // equivalent to divide(A, B, C, 5)

+ * + * @param m Another array of the same type and the same size as + * *this, or a matrix expression. + * @param scale Optional scale factor. + * + * @see org.opencv.core.Mat.mul + */ + public Mat mul(Mat m, double scale) + { + + Mat retVal = new Mat(n_mul(nativeObj, m.nativeObj, scale)); + + return retVal; + } + +/** + *

Performs an element-wise multiplication or division of the two matrices.

+ * + *

The method returns a temporary object encoding per-element array + * multiplication, with optional scale. Note that this is not a matrix + * multiplication that corresponds to a simpler "*" operator. + * Example:

+ * + *

// C++ code:

+ * + *

Mat C = A.mul(5/B); // equivalent to divide(A, B, C, 5)

+ * + * @param m Another array of the same type and the same size as + * *this, or a matrix expression. + * + * @see org.opencv.core.Mat.mul + */ + public Mat mul(Mat m) + { + + Mat retVal = new Mat(n_mul(nativeObj, m.nativeObj)); + + return retVal; + } + + // + // C++: static Mat Mat::ones(int rows, int cols, int type) + // + +/** + *

Returns an array of all 1's of the specified size and type.

+ * + *

The method returns a Matlab-style 1's array initializer, similarly + * to"Mat.zeros". Note that using this method you can initialize an array with + * an arbitrary value, using the following Matlab idiom:

+ * + *

// C++ code:

+ * + *

Mat A = Mat.ones(100, 100, CV_8U)*3; // make 100x100 matrix filled with 3.

+ * + *

The above operation does not form a 100x100 matrix of 1's and then multiply + * it by 3. Instead, it just remembers the scale factor (3 in this case) and use + * it when actually invoking the matrix initializer. + *

+ * + * @param rows Number of rows. + * @param cols Number of columns. + * @param type Created matrix type. + * + * @see org.opencv.core.Mat.ones + */ + public static Mat ones(int rows, int cols, int type) + { + + Mat retVal = new Mat(n_ones(rows, cols, type)); + + return retVal; + } + + // + // C++: static Mat Mat::ones(Size size, int type) + // + +/** + *

Returns an array of all 1's of the specified size and type.

+ * + *

The method returns a Matlab-style 1's array initializer, similarly + * to"Mat.zeros". Note that using this method you can initialize an array with + * an arbitrary value, using the following Matlab idiom:

+ * + *

// C++ code:

+ * + *

Mat A = Mat.ones(100, 100, CV_8U)*3; // make 100x100 matrix filled with 3.

+ * + *

The above operation does not form a 100x100 matrix of 1's and then multiply + * it by 3. Instead, it just remembers the scale factor (3 in this case) and use + * it when actually invoking the matrix initializer. + *

+ * + * @param size Alternative to the matrix size specification Size(cols, + * rows). + * @param type Created matrix type. + * + * @see org.opencv.core.Mat.ones + */ + public static Mat ones(Size size, int type) + { + + Mat retVal = new Mat(n_ones(size.width, size.height, type)); + + return retVal; + } + + // + // C++: void Mat::push_back(Mat m) + // + +/** + *

Adds elements to the bottom of the matrix.

+ * + *

The methods add one or more elements to the bottom of the matrix. They + * emulate the corresponding method of the STL vector class. When + * elem is Mat, its type and the number of columns + * must be the same as in the container matrix.

+ * + * @param m a m + * + * @see org.opencv.core.Mat.push_back + */ + public void push_back(Mat m) + { + + n_push_back(nativeObj, m.nativeObj); + + return; + } + + // + // C++: void Mat::release() + // + +/** + *

Decrements the reference counter and deallocates the matrix if needed.

+ * + *

The method decrements the reference counter associated with the matrix data. + * When the reference counter reaches 0, the matrix data is deallocated and the + * data and the reference counter pointers are set to NULL's. If the matrix + * header points to an external data set (see "Mat.Mat"), the reference counter + * is NULL, and the method has no effect in this case.

+ * + *

This method can be called manually to force the matrix data deallocation. But + * since this method is automatically called in the destructor, or by any other + * method that changes the data pointer, it is usually not needed. The reference + * counter decrement and check for 0 is an atomic operation on the platforms + * that support it. Thus, it is safe to operate on the same matrices + * asynchronously in different threads.

+ * + * @see org.opencv.core.Mat.release + */ + public void release() + { + + n_release(nativeObj); + + return; + } + + // + // C++: Mat Mat::reshape(int cn, int rows = 0) + // + +/** + *

Changes the shape and/or the number of channels of a 2D matrix without + * copying the data.

+ * + *

The method makes a new matrix header for *this elements. The new + * matrix may have a different size and/or different number of channels. Any + * combination is possible if:

+ *
    + *
  • No extra elements are included into the new matrix and no elements are + * excluded. Consequently, the product rows*cols*channels() must + * stay the same after the transformation. + *
  • No data is copied. That is, this is an O(1) operation. Consequently, + * if you change the number of rows, or the operation changes the indices of + * elements row in some other way, the matrix must be continuous. See + * "Mat.isContinuous". + *
+ *

For example, if there is a set of 3D points stored as an STL vector, and you + * want to represent the points as a 3xN matrix, do the following: + *

+ * + *

// C++ code:

+ * + *

std.vector vec;...

+ * + *

Mat pointMat = Mat(vec). // convert vector to Mat, O(1) operation

+ * + *

reshape(1). // make Nx3 1-channel matrix out of Nx1 3-channel.

+ * + *

// Also, an O(1) operation

+ * + *

t(); // finally, transpose the Nx3 matrix.

+ * + *

// This involves copying all the elements

+ * + * @param cn New number of channels. If the parameter is 0, the number of + * channels remains the same. + * @param rows New number of rows. If the parameter is 0, the number of rows + * remains the same. + * + * @see org.opencv.core.Mat.reshape + */ + public Mat reshape(int cn, int rows) + { + + Mat retVal = new Mat(n_reshape(nativeObj, cn, rows)); + + return retVal; + } + +/** + *

Changes the shape and/or the number of channels of a 2D matrix without + * copying the data.

+ * + *

The method makes a new matrix header for *this elements. The new + * matrix may have a different size and/or different number of channels. Any + * combination is possible if:

+ *
    + *
  • No extra elements are included into the new matrix and no elements are + * excluded. Consequently, the product rows*cols*channels() must + * stay the same after the transformation. + *
  • No data is copied. That is, this is an O(1) operation. Consequently, + * if you change the number of rows, or the operation changes the indices of + * elements row in some other way, the matrix must be continuous. See + * "Mat.isContinuous". + *
+ *

For example, if there is a set of 3D points stored as an STL vector, and you + * want to represent the points as a 3xN matrix, do the following: + *

+ * + *

// C++ code:

+ * + *

std.vector vec;...

+ * + *

Mat pointMat = Mat(vec). // convert vector to Mat, O(1) operation

+ * + *

reshape(1). // make Nx3 1-channel matrix out of Nx1 3-channel.

+ * + *

// Also, an O(1) operation

+ * + *

t(); // finally, transpose the Nx3 matrix.

+ * + *

// This involves copying all the elements

+ * + * @param cn New number of channels. If the parameter is 0, the number of + * channels remains the same. + * + * @see org.opencv.core.Mat.reshape + */ + public Mat reshape(int cn) + { + + Mat retVal = new Mat(n_reshape(nativeObj, cn)); + + return retVal; + } + + // + // C++: Mat Mat::row(int y) + // + +/** + *

Creates a matrix header for the specified matrix row.

+ * + *

The method makes a new header for the specified matrix row and returns it. + * This is an O(1) operation, regardless of the matrix size. The underlying data + * of the new matrix is shared with the original matrix. Here is the example of + * one of the classical basic matrix processing operations, axpy, + * used by LU and many other algorithms:

+ * + *

// C++ code:

+ * + *

inline void matrix_axpy(Mat& A, int i, int j, double alpha)

+ * + * + *

A.row(i) += A.row(j)*alpha;

+ * + * + *

Note:

+ * + *

In the current implementation, the following code does not work as expected: + *

+ * + *

// C++ code:

+ * + *

Mat A;...

+ * + *

A.row(i) = A.row(j); // will not work

+ * + *

This happens because A.row(i) forms a temporary header that is + * further assigned to another header. Remember that each of these operations is + * O(1), that is, no data is copied. Thus, the above assignment is not true if + * you may have expected the j-th row to be copied to the i-th row. To achieve + * that, you should either turn this simple assignment into an expression or use + * the "Mat.copyTo" method:

+ * + *

Mat A;...

+ * + *

// works, but looks a bit obscure.

+ * + *

A.row(i) = A.row(j) + 0;

+ * + *

// this is a bit longer, but the recommended method.

+ * + *

A.row(j).copyTo(A.row(i));

+ * + * @param y A 0-based row index. + * + * @see org.opencv.core.Mat.row + */ + public Mat row(int y) + { + + Mat retVal = new Mat(n_row(nativeObj, y)); + + return retVal; + } + + // + // C++: Mat Mat::rowRange(int startrow, int endrow) + // + +/** + *

Creates a matrix header for the specified row span.

+ * + *

The method makes a new header for the specified row span of the matrix. + * Similarly to "Mat.row" and "Mat.col", this is an O(1) operation.

+ * + * @param startrow An inclusive 0-based start index of the row span. + * @param endrow An exclusive 0-based ending index of the row span. + * + * @see org.opencv.core.Mat.rowRange + */ + public Mat rowRange(int startrow, int endrow) + { + + Mat retVal = new Mat(n_rowRange(nativeObj, startrow, endrow)); + + return retVal; + } + + // + // C++: Mat Mat::rowRange(Range r) + // + +/** + *

Creates a matrix header for the specified row span.

+ * + *

The method makes a new header for the specified row span of the matrix. + * Similarly to "Mat.row" and "Mat.col", this is an O(1) operation.

+ * + * @param r "Range" structure containing both the start and the end indices. + * + * @see org.opencv.core.Mat.rowRange + */ + public Mat rowRange(Range r) + { + + Mat retVal = new Mat(n_rowRange(nativeObj, r.start, r.end)); + + return retVal; + } + + // + // C++: int Mat::rows() + // + + public int rows() + { + + int retVal = n_rows(nativeObj); + + return retVal; + } + + // + // C++: Mat Mat::operator =(Scalar s) + // + + public Mat setTo(Scalar s) + { + + Mat retVal = new Mat(n_setTo(nativeObj, s.val[0], s.val[1], s.val[2], s.val[3])); + + return retVal; + } + + // + // C++: Mat Mat::setTo(Scalar value, Mat mask = Mat()) + // + +/** + *

Sets all or some of the array elements to the specified value.

+ * + * @param value Assigned scalar converted to the actual array type. + * @param mask Operation mask of the same size as *this. This is an + * advanced variant of the Mat.operator=(const Scalar& s) + * operator. + * + * @see org.opencv.core.Mat.setTo + */ + public Mat setTo(Scalar value, Mat mask) + { + + Mat retVal = new Mat(n_setTo(nativeObj, value.val[0], value.val[1], value.val[2], value.val[3], mask.nativeObj)); + + return retVal; + } + + // + // C++: Mat Mat::setTo(Mat value, Mat mask = Mat()) + // + +/** + *

Sets all or some of the array elements to the specified value.

+ * + * @param value Assigned scalar converted to the actual array type. + * @param mask Operation mask of the same size as *this. This is an + * advanced variant of the Mat.operator=(const Scalar& s) + * operator. + * + * @see org.opencv.core.Mat.setTo + */ + public Mat setTo(Mat value, Mat mask) + { + + Mat retVal = new Mat(n_setTo(nativeObj, value.nativeObj, mask.nativeObj)); + + return retVal; + } + +/** + *

Sets all or some of the array elements to the specified value.

+ * + * @param value Assigned scalar converted to the actual array type. + * + * @see org.opencv.core.Mat.setTo + */ + public Mat setTo(Mat value) + { + + Mat retVal = new Mat(n_setTo(nativeObj, value.nativeObj)); + + return retVal; + } + + // + // C++: Size Mat::size() + // + +/** + *

Returns a matrix size.

+ * + *

The method returns a matrix size: Size(cols, rows). When the + * matrix is more than 2-dimensional, the returned size is (-1, -1).

+ * + * @see org.opencv.core.Mat.size + */ + public Size size() + { + + Size retVal = new Size(n_size(nativeObj)); + + return retVal; + } + + // + // C++: size_t Mat::step1(int i = 0) + // + +/** + *

Returns a normalized step.

+ * + *

The method returns a matrix step divided by "Mat.elemSize1()". It can be + * useful to quickly access an arbitrary matrix element.

+ * + * @param i a i + * + * @see org.opencv.core.Mat.step1 + */ + public long step1(int i) + { + + long retVal = n_step1(nativeObj, i); + + return retVal; + } + +/** + *

Returns a normalized step.

+ * + *

The method returns a matrix step divided by "Mat.elemSize1()". It can be + * useful to quickly access an arbitrary matrix element.

+ * + * @see org.opencv.core.Mat.step1 + */ + public long step1() + { + + long retVal = n_step1(nativeObj); + + return retVal; + } + + // + // C++: Mat Mat::operator()(int rowStart, int rowEnd, int colStart, int + // colEnd) + // + +/** + *

Extracts a rectangular submatrix.

+ * + *

The operators make a new header for the specified sub-array of + * *this. They are the most generalized forms of "Mat.row", + * "Mat.col", "Mat.rowRange", and "Mat.colRange". For example, + * A(Range(0, 10), Range.all()) is equivalent to A.rowRange(0, + * 10). Similarly to all of the above, the operators are O(1) operations, + * that is, no matrix data is copied.

+ * + * @param rowStart a rowStart + * @param rowEnd a rowEnd + * @param colStart a colStart + * @param colEnd a colEnd + * + * @see org.opencv.core.Mat.operator() + */ + public Mat submat(int rowStart, int rowEnd, int colStart, int colEnd) + { + + Mat retVal = new Mat(n_submat_rr(nativeObj, rowStart, rowEnd, colStart, colEnd)); + + return retVal; + } + + // + // C++: Mat Mat::operator()(Range rowRange, Range colRange) + // + +/** + *

Extracts a rectangular submatrix.

+ * + *

The operators make a new header for the specified sub-array of + * *this. They are the most generalized forms of "Mat.row", + * "Mat.col", "Mat.rowRange", and "Mat.colRange". For example, + * A(Range(0, 10), Range.all()) is equivalent to A.rowRange(0, + * 10). Similarly to all of the above, the operators are O(1) operations, + * that is, no matrix data is copied.

+ * + * @param rowRange Start and end row of the extracted submatrix. The upper + * boundary is not included. To select all the rows, use Range.all(). + * @param colRange Start and end column of the extracted submatrix. The upper + * boundary is not included. To select all the columns, use Range.all(). + * + * @see org.opencv.core.Mat.operator() + */ + public Mat submat(Range rowRange, Range colRange) + { + + Mat retVal = new Mat(n_submat_rr(nativeObj, rowRange.start, rowRange.end, colRange.start, colRange.end)); + + return retVal; + } + + // + // C++: Mat Mat::operator()(Rect roi) + // + +/** + *

Extracts a rectangular submatrix.

+ * + *

The operators make a new header for the specified sub-array of + * *this. They are the most generalized forms of "Mat.row", + * "Mat.col", "Mat.rowRange", and "Mat.colRange". For example, + * A(Range(0, 10), Range.all()) is equivalent to A.rowRange(0, + * 10). Similarly to all of the above, the operators are O(1) operations, + * that is, no matrix data is copied.

+ * + * @param roi Extracted submatrix specified as a rectangle. + * + * @see org.opencv.core.Mat.operator() + */ + public Mat submat(Rect roi) + { + + Mat retVal = new Mat(n_submat(nativeObj, roi.x, roi.y, roi.width, roi.height)); + + return retVal; + } + + // + // C++: Mat Mat::t() + // + +/** + *

Transposes a matrix.

+ * + *

The method performs matrix transposition by means of matrix expressions. It + * does not perform the actual transposition but returns a temporary matrix + * transposition object that can be further used as a part of more complex + * matrix expressions or can be assigned to a matrix:

+ * + *

// C++ code:

+ * + *

Mat A1 = A + Mat.eye(A.size(), A.type)*lambda;

+ * + *

Mat C = A1.t()*A1; // compute (A + lambda*I)^t * (A + lamda*I)

+ * + * @see org.opencv.core.Mat.t + */ + public Mat t() + { + + Mat retVal = new Mat(n_t(nativeObj)); + + return retVal; + } + + // + // C++: size_t Mat::total() + // + +/** + *

Returns the total number of array elements.

+ * + *

The method returns the number of array elements (a number of pixels if the + * array represents an image).

+ * + * @see org.opencv.core.Mat.total + */ + public long total() + { + + long retVal = n_total(nativeObj); + + return retVal; + } + + // + // C++: int Mat::type() + // + +/** + *

Returns the type of a matrix element.

+ * + *

The method returns a matrix element type. This is an identifier compatible + * with the CvMat type system, like CV_16SC3 or 16-bit + * signed 3-channel array, and so on.

+ * + * @see org.opencv.core.Mat.type + */ + public int type() + { + + int retVal = n_type(nativeObj); + + return retVal; + } + + // + // C++: static Mat Mat::zeros(int rows, int cols, int type) + // + +/** + *

Returns a zero array of the specified size and type.

+ * + *

The method returns a Matlab-style zero array initializer. It can be used to + * quickly form a constant array as a function parameter, part of a matrix + * expression, or as a matrix initializer. + *

+ * + *

// C++ code:

+ * + *

Mat A;

+ * + *

A = Mat.zeros(3, 3, CV_32F);

+ * + *

In the example above, a new matrix is allocated only if A is not + * a 3x3 floating-point matrix. Otherwise, the existing matrix A is + * filled with zeros. + *

+ * + * @param rows Number of rows. + * @param cols Number of columns. + * @param type Created matrix type. + * + * @see org.opencv.core.Mat.zeros + */ + public static Mat zeros(int rows, int cols, int type) + { + + Mat retVal = new Mat(n_zeros(rows, cols, type)); + + return retVal; + } + + // + // C++: static Mat Mat::zeros(Size size, int type) + // + +/** + *

Returns a zero array of the specified size and type.

+ * + *

The method returns a Matlab-style zero array initializer. It can be used to + * quickly form a constant array as a function parameter, part of a matrix + * expression, or as a matrix initializer. + *

+ * + *

// C++ code:

+ * + *

Mat A;

+ * + *

A = Mat.zeros(3, 3, CV_32F);

+ * + *

In the example above, a new matrix is allocated only if A is not + * a 3x3 floating-point matrix. Otherwise, the existing matrix A is + * filled with zeros. + *

+ * + * @param size Alternative to the matrix size specification Size(cols, + * rows). + * @param type Created matrix type. + * + * @see org.opencv.core.Mat.zeros + */ + public static Mat zeros(Size size, int type) + { + + Mat retVal = new Mat(n_zeros(size.width, size.height, type)); + + return retVal; + } + + @Override + protected void finalize() throws Throwable { + n_delete(nativeObj); + super.finalize(); + } + + @Override + public String toString() { + return "Mat [ " + + rows() + "*" + cols() + "*" + CvType.typeToString(type()) + + ", isCont=" + isContinuous() + ", isSubmat=" + isSubmatrix() + + ", nativeObj=0x" + Long.toHexString(nativeObj) + + ", dataAddr=0x" + Long.toHexString(dataAddr()) + + " ]"; + } + + public String dump() { + return nDump(nativeObj); + } + + public int put(int row, int col, double... data) { + int t = type(); + if (data == null || data.length % CvType.channels(t) != 0) + throw new java.lang.UnsupportedOperationException( + "Provided data element number (" + + (data == null ? 0 : data.length) + + ") should be multiple of the Mat channels count (" + + CvType.channels(t) + ")"); + return nPutD(nativeObj, row, col, data.length, data); + } + + public int put(int row, int col, float[] data) { + int t = type(); + if (data == null || data.length % CvType.channels(t) != 0) + throw new java.lang.UnsupportedOperationException( + "Provided data element number (" + + (data == null ? 0 : data.length) + + ") should be multiple of the Mat channels count (" + + CvType.channels(t) + ")"); + if (CvType.depth(t) == CvType.CV_32F) { + return nPutF(nativeObj, row, col, data.length, data); + } + throw new java.lang.UnsupportedOperationException("Mat data type is not compatible: " + t); + } + + public int put(int row, int col, int[] data) { + int t = type(); + if (data == null || data.length % CvType.channels(t) != 0) + throw new java.lang.UnsupportedOperationException( + "Provided data element number (" + + (data == null ? 0 : data.length) + + ") should be multiple of the Mat channels count (" + + CvType.channels(t) + ")"); + if (CvType.depth(t) == CvType.CV_32S) { + return nPutI(nativeObj, row, col, data.length, data); + } + throw new java.lang.UnsupportedOperationException("Mat data type is not compatible: " + t); + } + + public int put(int row, int col, short[] data) { + int t = type(); + if (data == null || data.length % CvType.channels(t) != 0) + throw new java.lang.UnsupportedOperationException( + "Provided data element number (" + + (data == null ? 0 : data.length) + + ") should be multiple of the Mat channels count (" + + CvType.channels(t) + ")"); + if (CvType.depth(t) == CvType.CV_16U || CvType.depth(t) == CvType.CV_16S) { + return nPutS(nativeObj, row, col, data.length, data); + } + throw new java.lang.UnsupportedOperationException("Mat data type is not compatible: " + t); + } + + public int put(int row, int col, byte[] data) { + int t = type(); + if (data == null || data.length % CvType.channels(t) != 0) + throw new java.lang.UnsupportedOperationException( + "Provided data element number (" + + (data == null ? 0 : data.length) + + ") should be multiple of the Mat channels count (" + + CvType.channels(t) + ")"); + if (CvType.depth(t) == CvType.CV_8U || CvType.depth(t) == CvType.CV_8S) { + return nPutB(nativeObj, row, col, data.length, data); + } + throw new java.lang.UnsupportedOperationException("Mat data type is not compatible: " + t); + } + + public int get(int row, int col, byte[] data) { + int t = type(); + if (data == null || data.length % CvType.channels(t) != 0) + throw new java.lang.UnsupportedOperationException( + "Provided data element number (" + + (data == null ? 0 : data.length) + + ") should be multiple of the Mat channels count (" + + CvType.channels(t) + ")"); + if (CvType.depth(t) == CvType.CV_8U || CvType.depth(t) == CvType.CV_8S) { + return nGetB(nativeObj, row, col, data.length, data); + } + throw new java.lang.UnsupportedOperationException("Mat data type is not compatible: " + t); + } + + public int get(int row, int col, short[] data) { + int t = type(); + if (data == null || data.length % CvType.channels(t) != 0) + throw new java.lang.UnsupportedOperationException( + "Provided data element number (" + + (data == null ? 0 : data.length) + + ") should be multiple of the Mat channels count (" + + CvType.channels(t) + ")"); + if (CvType.depth(t) == CvType.CV_16U || CvType.depth(t) == CvType.CV_16S) { + return nGetS(nativeObj, row, col, data.length, data); + } + throw new java.lang.UnsupportedOperationException("Mat data type is not compatible: " + t); + } + + public int get(int row, int col, int[] data) { + int t = type(); + if (data == null || data.length % CvType.channels(t) != 0) + throw new java.lang.UnsupportedOperationException( + "Provided data element number (" + + (data == null ? 0 : data.length) + + ") should be multiple of the Mat channels count (" + + CvType.channels(t) + ")"); + if (CvType.depth(t) == CvType.CV_32S) { + return nGetI(nativeObj, row, col, data.length, data); + } + throw new java.lang.UnsupportedOperationException("Mat data type is not compatible: " + t); + } + + public int get(int row, int col, float[] data) { + int t = type(); + if (data == null || data.length % CvType.channels(t) != 0) + throw new java.lang.UnsupportedOperationException( + "Provided data element number (" + + (data == null ? 0 : data.length) + + ") should be multiple of the Mat channels count (" + + CvType.channels(t) + ")"); + if (CvType.depth(t) == CvType.CV_32F) { + return nGetF(nativeObj, row, col, data.length, data); + } + throw new java.lang.UnsupportedOperationException("Mat data type is not compatible: " + t); + } + + public int get(int row, int col, double[] data) { + int t = type(); + if (data == null || data.length % CvType.channels(t) != 0) + throw new java.lang.UnsupportedOperationException( + "Provided data element number (" + + (data == null ? 0 : data.length) + + ") should be multiple of the Mat channels count (" + + CvType.channels(t) + ")"); + if (CvType.depth(t) == CvType.CV_64F) { + return nGetD(nativeObj, row, col, data.length, data); + } + throw new java.lang.UnsupportedOperationException("Mat data type is not compatible: " + t); + } + + public double[] get(int row, int col) { + return nGet(nativeObj, row, col); + } + + public int height() { + return rows(); + } + + public int width() { + return cols(); + } + + public long getNativeObjAddr() { + return nativeObj; + } + + // C++: Mat::Mat() + private static native long n_Mat(); + + // C++: Mat::Mat(int rows, int cols, int type) + private static native long n_Mat(int rows, int cols, int type); + + // C++: Mat::Mat(Size size, int type) + private static native long n_Mat(double size_width, double size_height, int type); + + // C++: Mat::Mat(int rows, int cols, int type, Scalar s) + private static native long n_Mat(int rows, int cols, int type, double s_val0, double s_val1, double s_val2, double s_val3); + + // C++: Mat::Mat(Size size, int type, Scalar s) + private static native long n_Mat(double size_width, double size_height, int type, double s_val0, double s_val1, double s_val2, double s_val3); + + // C++: Mat::Mat(Mat m, Range rowRange, Range colRange = Range::all()) + private static native long n_Mat(long m_nativeObj, int rowRange_start, int rowRange_end, int colRange_start, int colRange_end); + + private static native long n_Mat(long m_nativeObj, int rowRange_start, int rowRange_end); + + // C++: Mat Mat::adjustROI(int dtop, int dbottom, int dleft, int dright) + private static native long n_adjustROI(long nativeObj, int dtop, int dbottom, int dleft, int dright); + + // C++: void Mat::assignTo(Mat m, int type = -1) + private static native void n_assignTo(long nativeObj, long m_nativeObj, int type); + + private static native void n_assignTo(long nativeObj, long m_nativeObj); + + // C++: int Mat::channels() + private static native int n_channels(long nativeObj); + + // C++: int Mat::checkVector(int elemChannels, int depth = -1, bool + // requireContinuous = true) + private static native int n_checkVector(long nativeObj, int elemChannels, int depth, boolean requireContinuous); + + private static native int n_checkVector(long nativeObj, int elemChannels, int depth); + + private static native int n_checkVector(long nativeObj, int elemChannels); + + // C++: Mat Mat::clone() + private static native long n_clone(long nativeObj); + + // C++: Mat Mat::col(int x) + private static native long n_col(long nativeObj, int x); + + // C++: Mat Mat::colRange(int startcol, int endcol) + private static native long n_colRange(long nativeObj, int startcol, int endcol); + + // C++: int Mat::cols() + private static native int n_cols(long nativeObj); + + // C++: void Mat::convertTo(Mat& m, int rtype, double alpha = 1, double beta + // = 0) + private static native void n_convertTo(long nativeObj, long m_nativeObj, int rtype, double alpha, double beta); + + private static native void n_convertTo(long nativeObj, long m_nativeObj, int rtype, double alpha); + + private static native void n_convertTo(long nativeObj, long m_nativeObj, int rtype); + + // C++: void Mat::copyTo(Mat& m) + private static native void n_copyTo(long nativeObj, long m_nativeObj); + + // C++: void Mat::copyTo(Mat& m, Mat mask) + private static native void n_copyTo(long nativeObj, long m_nativeObj, long mask_nativeObj); + + // C++: void Mat::create(int rows, int cols, int type) + private static native void n_create(long nativeObj, int rows, int cols, int type); + + // C++: void Mat::create(Size size, int type) + private static native void n_create(long nativeObj, double size_width, double size_height, int type); + + // C++: Mat Mat::cross(Mat m) + private static native long n_cross(long nativeObj, long m_nativeObj); + + // C++: long Mat::dataAddr() + private static native long n_dataAddr(long nativeObj); + + // C++: int Mat::depth() + private static native int n_depth(long nativeObj); + + // C++: Mat Mat::diag(int d = 0) + private static native long n_diag(long nativeObj, int d); + + // C++: static Mat Mat::diag(Mat d) + private static native long n_diag(long d_nativeObj); + + // C++: double Mat::dot(Mat m) + private static native double n_dot(long nativeObj, long m_nativeObj); + + // C++: size_t Mat::elemSize() + private static native long n_elemSize(long nativeObj); + + // C++: size_t Mat::elemSize1() + private static native long n_elemSize1(long nativeObj); + + // C++: bool Mat::empty() + private static native boolean n_empty(long nativeObj); + + // C++: static Mat Mat::eye(int rows, int cols, int type) + private static native long n_eye(int rows, int cols, int type); + + // C++: static Mat Mat::eye(Size size, int type) + private static native long n_eye(double size_width, double size_height, int type); + + // C++: Mat Mat::inv(int method = DECOMP_LU) + private static native long n_inv(long nativeObj, int method); + + private static native long n_inv(long nativeObj); + + // C++: bool Mat::isContinuous() + private static native boolean n_isContinuous(long nativeObj); + + // C++: bool Mat::isSubmatrix() + private static native boolean n_isSubmatrix(long nativeObj); + + // C++: void Mat::locateROI(Size wholeSize, Point ofs) + private static native void locateROI_0(long nativeObj, double[] wholeSize_out, double[] ofs_out); + + // C++: Mat Mat::mul(Mat m, double scale = 1) + private static native long n_mul(long nativeObj, long m_nativeObj, double scale); + + private static native long n_mul(long nativeObj, long m_nativeObj); + + // C++: static Mat Mat::ones(int rows, int cols, int type) + private static native long n_ones(int rows, int cols, int type); + + // C++: static Mat Mat::ones(Size size, int type) + private static native long n_ones(double size_width, double size_height, int type); + + // C++: void Mat::push_back(Mat m) + private static native void n_push_back(long nativeObj, long m_nativeObj); + + // C++: void Mat::release() + private static native void n_release(long nativeObj); + + // C++: Mat Mat::reshape(int cn, int rows = 0) + private static native long n_reshape(long nativeObj, int cn, int rows); + + private static native long n_reshape(long nativeObj, int cn); + + // C++: Mat Mat::row(int y) + private static native long n_row(long nativeObj, int y); + + // C++: Mat Mat::rowRange(int startrow, int endrow) + private static native long n_rowRange(long nativeObj, int startrow, int endrow); + + // C++: int Mat::rows() + private static native int n_rows(long nativeObj); + + // C++: Mat Mat::operator =(Scalar s) + private static native long n_setTo(long nativeObj, double s_val0, double s_val1, double s_val2, double s_val3); + + // C++: Mat Mat::setTo(Scalar value, Mat mask = Mat()) + private static native long n_setTo(long nativeObj, double s_val0, double s_val1, double s_val2, double s_val3, long mask_nativeObj); + + // C++: Mat Mat::setTo(Mat value, Mat mask = Mat()) + private static native long n_setTo(long nativeObj, long value_nativeObj, long mask_nativeObj); + + private static native long n_setTo(long nativeObj, long value_nativeObj); + + // C++: Size Mat::size() + private static native double[] n_size(long nativeObj); + + // C++: size_t Mat::step1(int i = 0) + private static native long n_step1(long nativeObj, int i); + + private static native long n_step1(long nativeObj); + + // C++: Mat Mat::operator()(Range rowRange, Range colRange) + private static native long n_submat_rr(long nativeObj, int rowRange_start, int rowRange_end, int colRange_start, int colRange_end); + + // C++: Mat Mat::operator()(Rect roi) + private static native long n_submat(long nativeObj, int roi_x, int roi_y, int roi_width, int roi_height); + + // C++: Mat Mat::t() + private static native long n_t(long nativeObj); + + // C++: size_t Mat::total() + private static native long n_total(long nativeObj); + + // C++: int Mat::type() + private static native int n_type(long nativeObj); + + // C++: static Mat Mat::zeros(int rows, int cols, int type) + private static native long n_zeros(int rows, int cols, int type); + + // C++: static Mat Mat::zeros(Size size, int type) + private static native long n_zeros(double size_width, double size_height, int type); + + // native support for java finalize() + private static native void n_delete(long nativeObj); + + private static native int nPutD(long self, int row, int col, int count, double[] data); + + private static native int nPutF(long self, int row, int col, int count, float[] data); + + private static native int nPutI(long self, int row, int col, int count, int[] data); + + private static native int nPutS(long self, int row, int col, int count, short[] data); + + private static native int nPutB(long self, int row, int col, int count, byte[] data); + + private static native int nGetB(long self, int row, int col, int count, byte[] vals); + + private static native int nGetS(long self, int row, int col, int count, short[] vals); + + private static native int nGetI(long self, int row, int col, int count, int[] vals); + + private static native int nGetF(long self, int row, int col, int count, float[] vals); + + private static native int nGetD(long self, int row, int col, int count, double[] vals); + + private static native double[] nGet(long self, int row, int col); + + private static native String nDump(long self); +} diff --git a/src/org/opencv/core/MatOfByte.java b/src/org/opencv/core/MatOfByte.java new file mode 100644 index 0000000..0ebdb66 --- /dev/null +++ b/src/org/opencv/core/MatOfByte.java @@ -0,0 +1,79 @@ +package org.opencv.core; + +import java.util.Arrays; +import java.util.List; + +public class MatOfByte extends Mat { + // 8UC(x) + private static final int _depth = CvType.CV_8U; + private static final int _channels = 1; + + public MatOfByte() { + super(); + } + + protected MatOfByte(long addr) { + super(addr); + if(checkVector(_channels, _depth) < 0 ) + throw new IllegalArgumentException("Incomatible Mat"); + //FIXME: do we need release() here? + } + + public static MatOfByte fromNativeAddr(long addr) { + return new MatOfByte(addr); + } + + public MatOfByte(Mat m) { + super(m, Range.all()); + if(checkVector(_channels, _depth) < 0 ) + throw new IllegalArgumentException("Incomatible Mat"); + //FIXME: do we need release() here? + } + + public MatOfByte(byte...a) { + super(); + fromArray(a); + } + + public void alloc(int elemNumber) { + if(elemNumber>0) + super.create(elemNumber, 1, CvType.makeType(_depth, _channels)); + } + + public void fromArray(byte...a) { + if(a==null || a.length==0) + return; + int num = a.length / _channels; + alloc(num); + put(0, 0, a); //TODO: check ret val! + } + + public byte[] toArray() { + int num = checkVector(_channels, _depth); + if(num < 0) + throw new RuntimeException("Native Mat has unexpected type or size: " + toString()); + byte[] a = new byte[num * _channels]; + if(num == 0) + return a; + get(0, 0, a); //TODO: check ret val! + return a; + } + + public void fromList(List lb) { + if(lb==null || lb.size()==0) + return; + Byte ab[] = lb.toArray(new Byte[0]); + byte a[] = new byte[ab.length]; + for(int i=0; i toList() { + byte[] a = toArray(); + Byte ab[] = new Byte[a.length]; + for(int i=0; i0) + super.create(elemNumber, 1, CvType.makeType(_depth, _channels)); + } + + + public void fromArray(DMatch...a) { + if(a==null || a.length==0) + return; + int num = a.length; + alloc(num); + float buff[] = new float[num * _channels]; + for(int i=0; i ldm) { + DMatch adm[] = ldm.toArray(new DMatch[0]); + fromArray(adm); + } + + public List toList() { + DMatch[] adm = toArray(); + return Arrays.asList(adm); + } +} diff --git a/src/org/opencv/core/MatOfDouble.java b/src/org/opencv/core/MatOfDouble.java new file mode 100644 index 0000000..cca5251 --- /dev/null +++ b/src/org/opencv/core/MatOfDouble.java @@ -0,0 +1,79 @@ +package org.opencv.core; + +import java.util.Arrays; +import java.util.List; + +public class MatOfDouble extends Mat { + // 64FC(x) + private static final int _depth = CvType.CV_64F; + private static final int _channels = 1; + + public MatOfDouble() { + super(); + } + + protected MatOfDouble(long addr) { + super(addr); + if(checkVector(_channels, _depth) < 0 ) + throw new IllegalArgumentException("Incomatible Mat"); + //FIXME: do we need release() here? + } + + public static MatOfDouble fromNativeAddr(long addr) { + return new MatOfDouble(addr); + } + + public MatOfDouble(Mat m) { + super(m, Range.all()); + if(checkVector(_channels, _depth) < 0 ) + throw new IllegalArgumentException("Incomatible Mat"); + //FIXME: do we need release() here? + } + + public MatOfDouble(double...a) { + super(); + fromArray(a); + } + + public void alloc(int elemNumber) { + if(elemNumber>0) + super.create(elemNumber, 1, CvType.makeType(_depth, _channels)); + } + + public void fromArray(double...a) { + if(a==null || a.length==0) + return; + int num = a.length / _channels; + alloc(num); + put(0, 0, a); //TODO: check ret val! + } + + public double[] toArray() { + int num = checkVector(_channels, _depth); + if(num < 0) + throw new RuntimeException("Native Mat has unexpected type or size: " + toString()); + double[] a = new double[num * _channels]; + if(num == 0) + return a; + get(0, 0, a); //TODO: check ret val! + return a; + } + + public void fromList(List lb) { + if(lb==null || lb.size()==0) + return; + Double ab[] = lb.toArray(new Double[0]); + double a[] = new double[ab.length]; + for(int i=0; i toList() { + double[] a = toArray(); + Double ab[] = new Double[a.length]; + for(int i=0; i0) + super.create(elemNumber, 1, CvType.makeType(_depth, _channels)); + } + + public void fromArray(float...a) { + if(a==null || a.length==0) + return; + int num = a.length / _channels; + alloc(num); + put(0, 0, a); //TODO: check ret val! + } + + public float[] toArray() { + int num = checkVector(_channels, _depth); + if(num < 0) + throw new RuntimeException("Native Mat has unexpected type or size: " + toString()); + float[] a = new float[num * _channels]; + if(num == 0) + return a; + get(0, 0, a); //TODO: check ret val! + return a; + } + + public void fromList(List lb) { + if(lb==null || lb.size()==0) + return; + Float ab[] = lb.toArray(new Float[0]); + float a[] = new float[ab.length]; + for(int i=0; i toList() { + float[] a = toArray(); + Float ab[] = new Float[a.length]; + for(int i=0; i0) + super.create(elemNumber, 1, CvType.makeType(_depth, _channels)); + } + + public void fromArray(float...a) { + if(a==null || a.length==0) + return; + int num = a.length / _channels; + alloc(num); + put(0, 0, a); //TODO: check ret val! + } + + public float[] toArray() { + int num = checkVector(_channels, _depth); + if(num < 0) + throw new RuntimeException("Native Mat has unexpected type or size: " + toString()); + float[] a = new float[num * _channels]; + if(num == 0) + return a; + get(0, 0, a); //TODO: check ret val! + return a; + } + + public void fromList(List lb) { + if(lb==null || lb.size()==0) + return; + Float ab[] = lb.toArray(new Float[0]); + float a[] = new float[ab.length]; + for(int i=0; i toList() { + float[] a = toArray(); + Float ab[] = new Float[a.length]; + for(int i=0; i0) + super.create(elemNumber, 1, CvType.makeType(_depth, _channels)); + } + + public void fromArray(float...a) { + if(a==null || a.length==0) + return; + int num = a.length / _channels; + alloc(num); + put(0, 0, a); //TODO: check ret val! + } + + public float[] toArray() { + int num = checkVector(_channels, _depth); + if(num < 0) + throw new RuntimeException("Native Mat has unexpected type or size: " + toString()); + float[] a = new float[num * _channels]; + if(num == 0) + return a; + get(0, 0, a); //TODO: check ret val! + return a; + } + + public void fromList(List lb) { + if(lb==null || lb.size()==0) + return; + Float ab[] = lb.toArray(new Float[0]); + float a[] = new float[ab.length]; + for(int i=0; i toList() { + float[] a = toArray(); + Float ab[] = new Float[a.length]; + for(int i=0; i0) + super.create(elemNumber, 1, CvType.makeType(_depth, _channels)); + } + + public void fromArray(int...a) { + if(a==null || a.length==0) + return; + int num = a.length / _channels; + alloc(num); + put(0, 0, a); //TODO: check ret val! + } + + public int[] toArray() { + int num = checkVector(_channels, _depth); + if(num < 0) + throw new RuntimeException("Native Mat has unexpected type or size: " + toString()); + int[] a = new int[num * _channels]; + if(num == 0) + return a; + get(0, 0, a); //TODO: check ret val! + return a; + } + + public void fromList(List lb) { + if(lb==null || lb.size()==0) + return; + Integer ab[] = lb.toArray(new Integer[0]); + int a[] = new int[ab.length]; + for(int i=0; i toList() { + int[] a = toArray(); + Integer ab[] = new Integer[a.length]; + for(int i=0; i0) + super.create(elemNumber, 1, CvType.makeType(_depth, _channels)); + } + + public void fromArray(int...a) { + if(a==null || a.length==0) + return; + int num = a.length / _channels; + alloc(num); + put(0, 0, a); //TODO: check ret val! + } + + public int[] toArray() { + int num = checkVector(_channels, _depth); + if(num < 0) + throw new RuntimeException("Native Mat has unexpected type or size: " + toString()); + int[] a = new int[num * _channels]; + if(num == 0) + return a; + get(0, 0, a); //TODO: check ret val! + return a; + } + + public void fromList(List lb) { + if(lb==null || lb.size()==0) + return; + Integer ab[] = lb.toArray(new Integer[0]); + int a[] = new int[ab.length]; + for(int i=0; i toList() { + int[] a = toArray(); + Integer ab[] = new Integer[a.length]; + for(int i=0; i0) + super.create(elemNumber, 1, CvType.makeType(_depth, _channels)); + } + + public void fromArray(KeyPoint...a) { + if(a==null || a.length==0) + return; + int num = a.length; + alloc(num); + float buff[] = new float[num * _channels]; + for(int i=0; i lkp) { + KeyPoint akp[] = lkp.toArray(new KeyPoint[0]); + fromArray(akp); + } + + public List toList() { + KeyPoint[] akp = toArray(); + return Arrays.asList(akp); + } +} diff --git a/src/org/opencv/core/MatOfPoint.java b/src/org/opencv/core/MatOfPoint.java new file mode 100644 index 0000000..23eeed0 --- /dev/null +++ b/src/org/opencv/core/MatOfPoint.java @@ -0,0 +1,78 @@ +package org.opencv.core; + +import java.util.Arrays; +import java.util.List; + +public class MatOfPoint extends Mat { + // 32SC2 + private static final int _depth = CvType.CV_32S; + private static final int _channels = 2; + + public MatOfPoint() { + super(); + } + + protected MatOfPoint(long addr) { + super(addr); + if(checkVector(_channels, _depth) < 0 ) + throw new IllegalArgumentException("Incomatible Mat"); + //FIXME: do we need release() here? + } + + public static MatOfPoint fromNativeAddr(long addr) { + return new MatOfPoint(addr); + } + + public MatOfPoint(Mat m) { + super(m, Range.all()); + if(checkVector(_channels, _depth) < 0 ) + throw new IllegalArgumentException("Incomatible Mat"); + //FIXME: do we need release() here? + } + + public MatOfPoint(Point...a) { + super(); + fromArray(a); + } + + public void alloc(int elemNumber) { + if(elemNumber>0) + super.create(elemNumber, 1, CvType.makeType(_depth, _channels)); + } + + public void fromArray(Point...a) { + if(a==null || a.length==0) + return; + int num = a.length; + alloc(num); + int buff[] = new int[num * _channels]; + for(int i=0; i lp) { + Point ap[] = lp.toArray(new Point[0]); + fromArray(ap); + } + + public List toList() { + Point[] ap = toArray(); + return Arrays.asList(ap); + } +} diff --git a/src/org/opencv/core/MatOfPoint2f.java b/src/org/opencv/core/MatOfPoint2f.java new file mode 100644 index 0000000..ba4be4a --- /dev/null +++ b/src/org/opencv/core/MatOfPoint2f.java @@ -0,0 +1,78 @@ +package org.opencv.core; + +import java.util.Arrays; +import java.util.List; + +public class MatOfPoint2f extends Mat { + // 32FC2 + private static final int _depth = CvType.CV_32F; + private static final int _channels = 2; + + public MatOfPoint2f() { + super(); + } + + protected MatOfPoint2f(long addr) { + super(addr); + if(checkVector(_channels, _depth) < 0 ) + throw new IllegalArgumentException("Incomatible Mat"); + //FIXME: do we need release() here? + } + + public static MatOfPoint2f fromNativeAddr(long addr) { + return new MatOfPoint2f(addr); + } + + public MatOfPoint2f(Mat m) { + super(m, Range.all()); + if(checkVector(_channels, _depth) < 0 ) + throw new IllegalArgumentException("Incomatible Mat"); + //FIXME: do we need release() here? + } + + public MatOfPoint2f(Point...a) { + super(); + fromArray(a); + } + + public void alloc(int elemNumber) { + if(elemNumber>0) + super.create(elemNumber, 1, CvType.makeType(_depth, _channels)); + } + + public void fromArray(Point...a) { + if(a==null || a.length==0) + return; + int num = a.length; + alloc(num); + float buff[] = new float[num * _channels]; + for(int i=0; i lp) { + Point ap[] = lp.toArray(new Point[0]); + fromArray(ap); + } + + public List toList() { + Point[] ap = toArray(); + return Arrays.asList(ap); + } +} diff --git a/src/org/opencv/core/MatOfPoint3.java b/src/org/opencv/core/MatOfPoint3.java new file mode 100644 index 0000000..16e2130 --- /dev/null +++ b/src/org/opencv/core/MatOfPoint3.java @@ -0,0 +1,79 @@ +package org.opencv.core; + +import java.util.Arrays; +import java.util.List; + +public class MatOfPoint3 extends Mat { + // 32SC3 + private static final int _depth = CvType.CV_32S; + private static final int _channels = 3; + + public MatOfPoint3() { + super(); + } + + protected MatOfPoint3(long addr) { + super(addr); + if(checkVector(_channels, _depth) < 0 ) + throw new IllegalArgumentException("Incomatible Mat"); + //FIXME: do we need release() here? + } + + public static MatOfPoint3 fromNativeAddr(long addr) { + return new MatOfPoint3(addr); + } + + public MatOfPoint3(Mat m) { + super(m, Range.all()); + if(checkVector(_channels, _depth) < 0 ) + throw new IllegalArgumentException("Incomatible Mat"); + //FIXME: do we need release() here? + } + + public MatOfPoint3(Point3...a) { + super(); + fromArray(a); + } + + public void alloc(int elemNumber) { + if(elemNumber>0) + super.create(elemNumber, 1, CvType.makeType(_depth, _channels)); + } + + public void fromArray(Point3...a) { + if(a==null || a.length==0) + return; + int num = a.length; + alloc(num); + int buff[] = new int[num * _channels]; + for(int i=0; i lp) { + Point3 ap[] = lp.toArray(new Point3[0]); + fromArray(ap); + } + + public List toList() { + Point3[] ap = toArray(); + return Arrays.asList(ap); + } +} diff --git a/src/org/opencv/core/MatOfPoint3f.java b/src/org/opencv/core/MatOfPoint3f.java new file mode 100644 index 0000000..97e2a95 --- /dev/null +++ b/src/org/opencv/core/MatOfPoint3f.java @@ -0,0 +1,79 @@ +package org.opencv.core; + +import java.util.Arrays; +import java.util.List; + +public class MatOfPoint3f extends Mat { + // 32FC3 + private static final int _depth = CvType.CV_32F; + private static final int _channels = 3; + + public MatOfPoint3f() { + super(); + } + + protected MatOfPoint3f(long addr) { + super(addr); + if(checkVector(_channels, _depth) < 0 ) + throw new IllegalArgumentException("Incomatible Mat"); + //FIXME: do we need release() here? + } + + public static MatOfPoint3f fromNativeAddr(long addr) { + return new MatOfPoint3f(addr); + } + + public MatOfPoint3f(Mat m) { + super(m, Range.all()); + if(checkVector(_channels, _depth) < 0 ) + throw new IllegalArgumentException("Incomatible Mat"); + //FIXME: do we need release() here? + } + + public MatOfPoint3f(Point3...a) { + super(); + fromArray(a); + } + + public void alloc(int elemNumber) { + if(elemNumber>0) + super.create(elemNumber, 1, CvType.makeType(_depth, _channels)); + } + + public void fromArray(Point3...a) { + if(a==null || a.length==0) + return; + int num = a.length; + alloc(num); + float buff[] = new float[num * _channels]; + for(int i=0; i lp) { + Point3 ap[] = lp.toArray(new Point3[0]); + fromArray(ap); + } + + public List toList() { + Point3[] ap = toArray(); + return Arrays.asList(ap); + } +} diff --git a/src/org/opencv/core/MatOfRect.java b/src/org/opencv/core/MatOfRect.java new file mode 100644 index 0000000..2e58bfe --- /dev/null +++ b/src/org/opencv/core/MatOfRect.java @@ -0,0 +1,81 @@ +package org.opencv.core; + +import java.util.Arrays; +import java.util.List; + + +public class MatOfRect extends Mat { + // 32SC4 + private static final int _depth = CvType.CV_32S; + private static final int _channels = 4; + + public MatOfRect() { + super(); + } + + protected MatOfRect(long addr) { + super(addr); + if(checkVector(_channels, _depth) < 0 ) + throw new IllegalArgumentException("Incomatible Mat"); + //FIXME: do we need release() here? + } + + public static MatOfRect fromNativeAddr(long addr) { + return new MatOfRect(addr); + } + + public MatOfRect(Mat m) { + super(m, Range.all()); + if(checkVector(_channels, _depth) < 0 ) + throw new IllegalArgumentException("Incomatible Mat"); + //FIXME: do we need release() here? + } + + public MatOfRect(Rect...a) { + super(); + fromArray(a); + } + + public void alloc(int elemNumber) { + if(elemNumber>0) + super.create(elemNumber, 1, CvType.makeType(_depth, _channels)); + } + + public void fromArray(Rect...a) { + if(a==null || a.length==0) + return; + int num = a.length; + alloc(num); + int buff[] = new int[num * _channels]; + for(int i=0; i lr) { + Rect ap[] = lr.toArray(new Rect[0]); + fromArray(ap); + } + + public List toList() { + Rect[] ar = toArray(); + return Arrays.asList(ar); + } +} diff --git a/src/org/opencv/core/Point.java b/src/org/opencv/core/Point.java new file mode 100644 index 0000000..cb19a1d --- /dev/null +++ b/src/org/opencv/core/Point.java @@ -0,0 +1,120 @@ +package org.opencv.core; + +/** + *

Template class for 2D points specified by its coordinates x and + * y. + * An instance of the class is interchangeable with C structures, + * CvPoint and CvPoint2D32f. There is also a cast + * operator to convert point coordinates to the specified type. The conversion + * from floating-point coordinates to integer coordinates is done by rounding. + * Commonly, the conversion uses this operation for each of the coordinates. + * Besides the class members listed in the declaration above, the following + * operations on points are implemented:

+ * + *

pt1 = pt2 + pt3;

+ * + *

// C++ code:

+ * + *

pt1 = pt2 - pt3;

+ * + *

pt1 = pt2 * a;

+ * + *

pt1 = a * pt2;

+ * + *

pt1 += pt2;

+ * + *

pt1 -= pt2;

+ * + *

pt1 *= a;

+ * + *

double value = norm(pt); // L2 norm

+ * + *

pt1 == pt2;

+ * + *

pt1 != pt2;

+ * + *

For your convenience, the following type aliases are defined:

+ * + *

typedef Point_ Point2i;

+ * + *

typedef Point2i Point;

+ * + *

typedef Point_ Point2f;

+ * + *

typedef Point_ Point2d;

+ * + *

Example:

+ * + *

Point2f a(0.3f, 0.f), b(0.f, 0.4f);

+ * + *

Point pt = (a + b)*10.f;

+ * + *

cout << pt.x << ", " << pt.y << endl;

+ * + * @see org.opencv.core.Point_ + */ +public class Point { + + public double x, y; + + public Point(double x, double y) { + this.x = x; + this.y = y; + } + + public Point() { + this(0, 0); + } + + public Point(double[] vals) { + this(); + set(vals); + } + + public void set(double[] vals) { + if (vals != null) { + x = vals.length > 0 ? vals[0] : 0; + y = vals.length > 1 ? vals[1] : 0; + } else { + x = 0; + y = 0; + } + } + + public Point clone() { + return new Point(x, y); + } + + public double dot(Point p) { + return x * p.x + y * p.y; + } + + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + long temp; + temp = Double.doubleToLongBits(x); + result = prime * result + (int) (temp ^ (temp >>> 32)); + temp = Double.doubleToLongBits(y); + result = prime * result + (int) (temp ^ (temp >>> 32)); + return result; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (!(obj instanceof Point)) return false; + Point it = (Point) obj; + return x == it.x && y == it.y; + } + + public boolean inside(Rect r) { + return r.contains(this); + } + + @Override + public String toString() { + return "{" + x + ", " + y + "}"; + } +} diff --git a/src/org/opencv/core/Point3.java b/src/org/opencv/core/Point3.java new file mode 100644 index 0000000..711e073 --- /dev/null +++ b/src/org/opencv/core/Point3.java @@ -0,0 +1,98 @@ +package org.opencv.core; + +/** + *

Template class for 3D points specified by its coordinates x, + * y and z. + * An instance of the class is interchangeable with the C structure + * CvPoint2D32f. Similarly to Point_, the coordinates + * of 3D points can be converted to another type. The vector arithmetic and + * comparison operations are also supported.

+ * + *

The following Point3_<> aliases are available:

+ * + *

typedef Point3_ Point3i;

+ * + *

// C++ code:

+ * + *

typedef Point3_ Point3f;

+ * + *

typedef Point3_ Point3d;

+ * + * @see org.opencv.core.Point3_ + */ +public class Point3 { + + public double x, y, z; + + public Point3(double x, double y, double z) { + this.x = x; + this.y = y; + this.z = z; + } + + public Point3() { + this(0, 0, 0); + } + + public Point3(Point p) { + x = p.x; + y = p.y; + z = 0; + } + + public Point3(double[] vals) { + this(); + set(vals); + } + + public void set(double[] vals) { + if (vals != null) { + x = vals.length > 0 ? vals[0] : 0; + y = vals.length > 1 ? vals[1] : 0; + z = vals.length > 2 ? vals[2] : 0; + } else { + x = 0; + y = 0; + z = 0; + } + } + + public Point3 clone() { + return new Point3(x, y, z); + } + + public double dot(Point3 p) { + return x * p.x + y * p.y + z * p.z; + } + + public Point3 cross(Point3 p) { + return new Point3(y * p.z - z * p.y, z * p.x - x * p.z, x * p.y - y * p.x); + } + + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + long temp; + temp = Double.doubleToLongBits(x); + result = prime * result + (int) (temp ^ (temp >>> 32)); + temp = Double.doubleToLongBits(y); + result = prime * result + (int) (temp ^ (temp >>> 32)); + temp = Double.doubleToLongBits(z); + result = prime * result + (int) (temp ^ (temp >>> 32)); + return result; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (!(obj instanceof Point3)) return false; + Point3 it = (Point3) obj; + return x == it.x && y == it.y && z == it.z; + } + + @Override + public String toString() { + return "{" + x + ", " + y + ", " + z + "}"; + } +} diff --git a/src/org/opencv/core/Range.java b/src/org/opencv/core/Range.java new file mode 100644 index 0000000..e904510 --- /dev/null +++ b/src/org/opencv/core/Range.java @@ -0,0 +1,129 @@ +package org.opencv.core; + +/** + *

Template class specifying a continuous subsequence (slice) of a sequence.

+ * + *

class Range

+ * + *

// C++ code:

+ * + * + *

public:...

+ * + *

int start, end;

+ * + *

};

+ * + *

The class is used to specify a row or a column span in a matrix (

+ * + *

"Mat") and for many other purposes. Range(a,b) is basically the + * same as a:b in Matlab or a..b in Python. As in + * Python, start is an inclusive left boundary of the range and + * end is an exclusive right boundary of the range. Such a + * half-opened interval is usually denoted as [start,end). + * The static method Range.all() returns a special variable that + * means "the whole sequence" or "the whole range", just like " : " + * in Matlab or " ... " in Python. All the methods and functions in + * OpenCV that take Range support this special Range.all() + * value. But, of course, in case of your own custom processing, you will + * probably have to check and handle it explicitly:

+ * + *

// C++ code:

+ * + *

void my_function(..., const Range& r,....)

+ * + * + *

if(r == Range.all()) {

+ * + *

// process all the data

+ * + * + *

else {

+ * + *

// process [r.start, r.end)

+ * + * + * + *

+ * + * @see org.opencv.core.Range + */ +public class Range { + + public int start, end; + + public Range(int s, int e) { + this.start = s; + this.end = e; + } + + public Range() { + this(0, 0); + } + + public Range(double[] vals) { + set(vals); + } + + public void set(double[] vals) { + if (vals != null) { + start = vals.length > 0 ? (int) vals[0] : 0; + end = vals.length > 1 ? (int) vals[1] : 0; + } else { + start = 0; + end = 0; + } + + } + + public int size() { + return empty() ? 0 : end - start; + } + + public boolean empty() { + return end <= start; + } + + public static Range all() { + return new Range(Integer.MIN_VALUE, Integer.MAX_VALUE); + } + + public Range intersection(Range r1) { + Range r = new Range(Math.max(r1.start, this.start), Math.min(r1.end, this.end)); + r.end = Math.max(r.end, r.start); + return r; + } + + public Range shift(int delta) { + return new Range(start + delta, end + delta); + } + + public Range clone() { + return new Range(start, end); + } + + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + long temp; + temp = Double.doubleToLongBits(start); + result = prime * result + (int) (temp ^ (temp >>> 32)); + temp = Double.doubleToLongBits(end); + result = prime * result + (int) (temp ^ (temp >>> 32)); + return result; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (!(obj instanceof Range)) return false; + Range it = (Range) obj; + return start == it.start && end == it.end; + } + + @Override + public String toString() { + return "[" + start + ", " + end + ")"; + } +} diff --git a/src/org/opencv/core/Rect.java b/src/org/opencv/core/Rect.java new file mode 100644 index 0000000..dd6677a --- /dev/null +++ b/src/org/opencv/core/Rect.java @@ -0,0 +1,164 @@ +package org.opencv.core; + +/** + *

Template class for 2D rectangles, described by the following parameters:

+ *
    + *
  • Coordinates of the top-left corner. This is a default interpretation + * of Rect_.x and Rect_.y in OpenCV. Though, in your + * algorithms you may count x and y from the + * bottom-left corner. + *
  • Rectangle width and height. + *
+ * + *

OpenCV typically assumes that the top and left boundary of the rectangle are + * inclusive, while the right and bottom boundaries are not. For example, the + * method Rect_.contains returns true if

+ * + *

x <= pt.x < x+width,<BR>y <= pt.y < y+height

+ * + *

Virtually every loop over an imageROI in OpenCV (where ROI is specified by + * Rect_) is implemented as:

+ * + *

// C++ code:

+ * + *

for(int y = roi.y; y < roi.y + rect.height; y++)

+ * + *

for(int x = roi.x; x < roi.x + rect.width; x++)

+ * + * + *

//...

+ * + * + *

In addition to the class members, the following operations on rectangles are + * implemented:

+ *
    + *
  • rect = rect +- point (shifting a rectangle by a certain + * offset) + *
  • rect = rect +- size (expanding or shrinking a rectangle by a + * certain amount) + *
  • rect += point, rect -= point, rect += size, rect -= size + * (augmenting operations) + *
  • rect = rect1 & rect2 (rectangle intersection) + *
  • rect = rect1 | rect2 (minimum area rectangle containing + * rect2 and rect3) + *
  • rect &= rect1, rect |= rect1 (and the corresponding + * augmenting operations) + *
  • rect == rect1, rect != rect1 (rectangle comparison) + *
+ * + *

This is an example how the partial ordering on rectangles can be established + * (rect1subseteq rect2):

+ * + *

// C++ code:

+ * + *

template inline bool

+ * + *

operator <= (const Rect_<_Tp>& r1, const Rect_<_Tp>& r2)

+ * + * + *

return (r1 & r2) == r1;

+ * + * + *

For your convenience, the Rect_<> alias is available:

+ * + *

typedef Rect_ Rect;

+ * + * @see org.opencv.core.Rect_ + */ +public class Rect { + + public int x, y, width, height; + + public Rect(int x, int y, int width, int height) { + this.x = x; + this.y = y; + this.width = width; + this.height = height; + } + + public Rect() { + this(0, 0, 0, 0); + } + + public Rect(Point p1, Point p2) { + x = (int) (p1.x < p2.x ? p1.x : p2.x); + y = (int) (p1.y < p2.y ? p1.y : p2.y); + width = (int) (p1.x > p2.x ? p1.x : p2.x) - x; + height = (int) (p1.y > p2.y ? p1.y : p2.y) - y; + } + + public Rect(Point p, Size s) { + this((int) p.x, (int) p.y, (int) s.width, (int) s.height); + } + + public Rect(double[] vals) { + set(vals); + } + + public void set(double[] vals) { + if (vals != null) { + x = vals.length > 0 ? (int) vals[0] : 0; + y = vals.length > 1 ? (int) vals[1] : 0; + width = vals.length > 2 ? (int) vals[2] : 0; + height = vals.length > 3 ? (int) vals[3] : 0; + } else { + x = 0; + y = 0; + width = 0; + height = 0; + } + } + + public Rect clone() { + return new Rect(x, y, width, height); + } + + public Point tl() { + return new Point(x, y); + } + + public Point br() { + return new Point(x + width, y + height); + } + + public Size size() { + return new Size(width, height); + } + + public double area() { + return width * height; + } + + public boolean contains(Point p) { + return x <= p.x && p.x < x + width && y <= p.y && p.y < y + height; + } + + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + long temp; + temp = Double.doubleToLongBits(height); + result = prime * result + (int) (temp ^ (temp >>> 32)); + temp = Double.doubleToLongBits(width); + result = prime * result + (int) (temp ^ (temp >>> 32)); + temp = Double.doubleToLongBits(x); + result = prime * result + (int) (temp ^ (temp >>> 32)); + temp = Double.doubleToLongBits(y); + result = prime * result + (int) (temp ^ (temp >>> 32)); + return result; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (!(obj instanceof Rect)) return false; + Rect it = (Rect) obj; + return x == it.x && y == it.y && width == it.width && height == it.height; + } + + @Override + public String toString() { + return "{" + x + ", " + y + ", " + width + "x" + height + "}"; + } +} diff --git a/src/org/opencv/core/RotatedRect.java b/src/org/opencv/core/RotatedRect.java new file mode 100644 index 0000000..f905361 --- /dev/null +++ b/src/org/opencv/core/RotatedRect.java @@ -0,0 +1,112 @@ +package org.opencv.core; + +public class RotatedRect { + + public Point center; + public Size size; + public double angle; + + public RotatedRect() { + this.center = new Point(); + this.size = new Size(); + this.angle = 0; + } + + public RotatedRect(Point c, Size s, double a) { + this.center = c.clone(); + this.size = s.clone(); + this.angle = a; + } + + public RotatedRect(double[] vals) { + this(); + set(vals); + } + + public void set(double[] vals) { + if (vals != null) { + center.x = vals.length > 0 ? (double) vals[0] : 0; + center.y = vals.length > 1 ? (double) vals[1] : 0; + size.width = vals.length > 2 ? (double) vals[2] : 0; + size.height = vals.length > 3 ? (double) vals[3] : 0; + angle = vals.length > 4 ? (double) vals[4] : 0; + } else { + center.x = 0; + center.x = 0; + size.width = 0; + size.height = 0; + angle = 0; + } + } + + public void points(Point pt[]) + { + double _angle = angle * Math.PI / 180.0; + double b = (double) Math.cos(_angle) * 0.5f; + double a = (double) Math.sin(_angle) * 0.5f; + + pt[0] = new Point( + center.x - a * size.height - b * size.width, + center.y + b * size.height - a * size.width); + + pt[1] = new Point( + center.x + a * size.height - b * size.width, + center.y - b * size.height - a * size.width); + + pt[2] = new Point( + 2 * center.x - pt[0].x, + 2 * center.y - pt[0].y); + + pt[3] = new Point( + 2 * center.x - pt[1].x, + 2 * center.y - pt[1].y); + } + + public Rect boundingRect() + { + Point pt[] = new Point[4]; + points(pt); + Rect r = new Rect((int) Math.floor(Math.min(Math.min(Math.min(pt[0].x, pt[1].x), pt[2].x), pt[3].x)), + (int) Math.floor(Math.min(Math.min(Math.min(pt[0].y, pt[1].y), pt[2].y), pt[3].y)), + (int) Math.ceil(Math.max(Math.max(Math.max(pt[0].x, pt[1].x), pt[2].x), pt[3].x)), + (int) Math.ceil(Math.max(Math.max(Math.max(pt[0].y, pt[1].y), pt[2].y), pt[3].y))); + r.width -= r.x - 1; + r.height -= r.y - 1; + return r; + } + + public RotatedRect clone() { + return new RotatedRect(center, size, angle); + } + + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + long temp; + temp = Double.doubleToLongBits(center.x); + result = prime * result + (int) (temp ^ (temp >>> 32)); + temp = Double.doubleToLongBits(center.y); + result = prime * result + (int) (temp ^ (temp >>> 32)); + temp = Double.doubleToLongBits(size.width); + result = prime * result + (int) (temp ^ (temp >>> 32)); + temp = Double.doubleToLongBits(size.height); + result = prime * result + (int) (temp ^ (temp >>> 32)); + temp = Double.doubleToLongBits(angle); + result = prime * result + (int) (temp ^ (temp >>> 32)); + return result; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (!(obj instanceof RotatedRect)) return false; + RotatedRect it = (RotatedRect) obj; + return center.equals(it.center) && size.equals(it.size) && angle == it.angle; + } + + @Override + public String toString() { + return "{ " + center + " " + size + " * " + angle + " }"; + } +} diff --git a/src/org/opencv/core/Scalar.java b/src/org/opencv/core/Scalar.java new file mode 100644 index 0000000..ce87e7f --- /dev/null +++ b/src/org/opencv/core/Scalar.java @@ -0,0 +1,106 @@ +package org.opencv.core; + +/** + *

Template class for a 4-element vector derived from Vec.

+ * + *

template class Scalar_ : public Vec<_Tp, 4> {... };

+ * + *

// C++ code:

+ * + *

typedef Scalar_ Scalar;

+ * + *

Being derived from Vec<_Tp, 4>, Scalar_ and + * Scalar can be used just as typical 4-element vectors. In + * addition, they can be converted to/from CvScalar. The type + * Scalar is widely used in OpenCV to pass pixel values. + *

+ * + * @see org.opencv.core.Scalar_ + */ +public class Scalar { + + public double val[]; + + public Scalar(double v0, double v1, double v2, double v3) { + val = new double[] { v0, v1, v2, v3 }; + } + + public Scalar(double v0, double v1, double v2) { + val = new double[] { v0, v1, v2, 0 }; + } + + public Scalar(double v0, double v1) { + val = new double[] { v0, v1, 0, 0 }; + } + + public Scalar(double v0) { + val = new double[] { v0, 0, 0, 0 }; + } + + public Scalar(double[] vals) { + if (vals != null && vals.length == 4) + val = vals.clone(); + else { + val = new double[4]; + set(vals); + } + } + + public void set(double[] vals) { + if (vals != null) { + val[0] = vals.length > 0 ? vals[0] : 0; + val[1] = vals.length > 1 ? vals[1] : 0; + val[2] = vals.length > 2 ? vals[2] : 0; + val[3] = vals.length > 3 ? vals[3] : 0; + } else + val[0] = val[1] = val[2] = val[3] = 0; + } + + public static Scalar all(double v) { + return new Scalar(v, v, v, v); + } + + public Scalar clone() { + return new Scalar(val); + } + + public Scalar mul(Scalar it, double scale) { + return new Scalar(val[0] * it.val[0] * scale, val[1] * it.val[1] * scale, + val[2] * it.val[2] * scale, val[3] * it.val[3] * scale); + } + + public Scalar mul(Scalar it) { + return mul(it, 1); + } + + public Scalar conj() { + return new Scalar(val[0], -val[1], -val[2], -val[3]); + } + + public boolean isReal() { + return val[1] == 0 && val[2] == 0 && val[3] == 0; + } + + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + java.util.Arrays.hashCode(val); + return result; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (!(obj instanceof Scalar)) return false; + Scalar it = (Scalar) obj; + if (!java.util.Arrays.equals(val, it.val)) return false; + return true; + } + + @Override + public String toString() { + return "[" + val[0] + ", " + val[1] + ", " + val[2] + ", " + val[3] + "]"; + } + +} diff --git a/src/org/opencv/core/Size.java b/src/org/opencv/core/Size.java new file mode 100644 index 0000000..cf84ff5 --- /dev/null +++ b/src/org/opencv/core/Size.java @@ -0,0 +1,87 @@ +package org.opencv.core; + +/** + *

Template class for specifying the size of an image or rectangle. The class + * includes two members called width and height. The + * structure can be converted to and from the old OpenCV structures + * CvSize and CvSize2D32f. The same set of arithmetic + * and comparison operations as for Point_ is available.

+ * + *

OpenCV defines the following Size_<> aliases:

+ * + *

typedef Size_ Size2i;

+ * + *

// C++ code:

+ * + *

typedef Size2i Size;

+ * + *

typedef Size_ Size2f;

+ * + * @see org.opencv.core.Size_ + */ +public class Size { + + public double width, height; + + public Size(double width, double height) { + this.width = width; + this.height = height; + } + + public Size() { + this(0, 0); + } + + public Size(Point p) { + width = p.x; + height = p.y; + } + + public Size(double[] vals) { + set(vals); + } + + public void set(double[] vals) { + if (vals != null) { + width = vals.length > 0 ? vals[0] : 0; + height = vals.length > 1 ? vals[1] : 0; + } else { + width = 0; + height = 0; + } + } + + public double area() { + return width * height; + } + + public Size clone() { + return new Size(width, height); + } + + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + long temp; + temp = Double.doubleToLongBits(height); + result = prime * result + (int) (temp ^ (temp >>> 32)); + temp = Double.doubleToLongBits(width); + result = prime * result + (int) (temp ^ (temp >>> 32)); + return result; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (!(obj instanceof Size)) return false; + Size it = (Size) obj; + return width == it.width && height == it.height; + } + + @Override + public String toString() { + return (int)width + "x" + (int)height; + } + +} diff --git a/src/org/opencv/core/TermCriteria.java b/src/org/opencv/core/TermCriteria.java new file mode 100644 index 0000000..f601556 --- /dev/null +++ b/src/org/opencv/core/TermCriteria.java @@ -0,0 +1,100 @@ +package org.opencv.core; + +/** + *

The class defining termination criteria for iterative algorithms. You can + * initialize it by default constructor and then override any parameters, or the + * structure may be fully initialized using the advanced variant of the + * constructor.

+ * + * @see org.opencv.core.TermCriteria + */ +public class TermCriteria { + + /** + * The maximum number of iterations or elements to compute + */ + public static final int COUNT = 1; + /** + * The maximum number of iterations or elements to compute + */ + public static final int MAX_ITER = COUNT; + /** + * The desired accuracy threshold or change in parameters at which the iterative algorithm is terminated. + */ + public static final int EPS = 2; + + public int type; + public int maxCount; + public double epsilon; + + /** + * Termination criteria for iterative algorithms. + * + * @param type + * the type of termination criteria: COUNT, EPS or COUNT + EPS. + * @param maxCount + * the maximum number of iterations/elements. + * @param epsilon + * the desired accuracy. + */ + public TermCriteria(int type, int maxCount, double epsilon) { + this.type = type; + this.maxCount = maxCount; + this.epsilon = epsilon; + } + + /** + * Termination criteria for iterative algorithms. + */ + public TermCriteria() { + this(0, 0, 0.0); + } + + public TermCriteria(double[] vals) { + set(vals); + } + + public void set(double[] vals) { + if (vals != null) { + type = vals.length > 0 ? (int) vals[0] : 0; + maxCount = vals.length > 1 ? (int) vals[1] : 0; + epsilon = vals.length > 2 ? (double) vals[2] : 0; + } else { + type = 0; + maxCount = 0; + epsilon = 0; + } + } + + public TermCriteria clone() { + return new TermCriteria(type, maxCount, epsilon); + } + + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + long temp; + temp = Double.doubleToLongBits(type); + result = prime * result + (int) (temp ^ (temp >>> 32)); + temp = Double.doubleToLongBits(maxCount); + result = prime * result + (int) (temp ^ (temp >>> 32)); + temp = Double.doubleToLongBits(epsilon); + result = prime * result + (int) (temp ^ (temp >>> 32)); + return result; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (!(obj instanceof TermCriteria)) return false; + TermCriteria it = (TermCriteria) obj; + return type == it.type && maxCount == it.maxCount && epsilon == it.epsilon; + } + + @Override + public String toString() { + if (this == null) return "null"; + return "{ type: " + type + ", maxCount: " + maxCount + ", epsilon: " + epsilon + "}"; + } +} diff --git a/src/org/opencv/core/package.bluej b/src/org/opencv/core/package.bluej new file mode 100644 index 0000000..e69de29 diff --git a/src/org/opencv/features2d/DMatch.java b/src/org/opencv/features2d/DMatch.java new file mode 100644 index 0000000..d520a2e --- /dev/null +++ b/src/org/opencv/features2d/DMatch.java @@ -0,0 +1,57 @@ +package org.opencv.features2d; + +//C++: class DMatch + +/** + * Structure for matching: query descriptor index, train descriptor index, train + * image index and distance between descriptors. + */ +public class DMatch { + + /** + * Query descriptor index. + */ + public int queryIdx; + /** + * Train descriptor index. + */ + public int trainIdx; + /** + * Train image index. + */ + public int imgIdx; + + public float distance; + + public DMatch() { + this(-1, -1, Float.MAX_VALUE); + } + + public DMatch(int _queryIdx, int _trainIdx, float _distance) { + queryIdx = _queryIdx; + trainIdx = _trainIdx; + imgIdx = -1; + distance = _distance; + } + + public DMatch(int _queryIdx, int _trainIdx, int _imgIdx, float _distance) { + queryIdx = _queryIdx; + trainIdx = _trainIdx; + imgIdx = _imgIdx; + distance = _distance; + } + + /** + * Less is better. + */ + public boolean lessThan(DMatch it) { + return distance < it.distance; + } + + @Override + public String toString() { + return "DMatch [queryIdx=" + queryIdx + ", trainIdx=" + trainIdx + + ", imgIdx=" + imgIdx + ", distance=" + distance + "]"; + } + +} diff --git a/src/org/opencv/features2d/DescriptorExtractor.java b/src/org/opencv/features2d/DescriptorExtractor.java new file mode 100644 index 0000000..dff80f4 --- /dev/null +++ b/src/org/opencv/features2d/DescriptorExtractor.java @@ -0,0 +1,278 @@ + +// +// This file is auto-generated. Please don't modify it! +// +package org.opencv.features2d; + +import java.lang.String; +import java.util.ArrayList; +import java.util.List; +import org.opencv.core.Mat; +import org.opencv.core.MatOfKeyPoint; +import org.opencv.utils.Converters; + +// C++: class javaDescriptorExtractor +/** + *

Abstract base class for computing descriptors for image keypoints.

+ * + *

class CV_EXPORTS DescriptorExtractor

+ * + *

// C++ code:

+ * + * + *

public:

+ * + *

virtual ~DescriptorExtractor();

+ * + *

void compute(const Mat& image, vector& keypoints,

+ * + *

Mat& descriptors) const;

+ * + *

void compute(const vector& images, vector >& keypoints,

+ * + *

vector& descriptors) const;

+ * + *

virtual void read(const FileNode&);

+ * + *

virtual void write(FileStorage&) const;

+ * + *

virtual int descriptorSize() const = 0;

+ * + *

virtual int descriptorType() const = 0;

+ * + *

static Ptr create(const string& descriptorExtractorType);

+ * + *

protected:...

+ * + *

};

+ * + *

In this interface, a keypoint descriptor can be represented as a

+ * + *

dense, fixed-dimension vector of a basic type. Most descriptors follow this + * pattern as it simplifies computing distances between descriptors. Therefore, + * a collection of descriptors is represented as "Mat", where each row is a + * keypoint descriptor.

+ * + * @see org.opencv.features2d.DescriptorExtractor : public Algorithm + */ +public class DescriptorExtractor { + + protected final long nativeObj; + protected DescriptorExtractor(long addr) { nativeObj = addr; } + + + private static final int + OPPONENTEXTRACTOR = 1000; + + + public static final int + SIFT = 1, + SURF = 2, + ORB = 3, + BRIEF = 4, + BRISK = 5, + FREAK = 6, + OPPONENT_SIFT = OPPONENTEXTRACTOR + SIFT, + OPPONENT_SURF = OPPONENTEXTRACTOR + SURF, + OPPONENT_ORB = OPPONENTEXTRACTOR + ORB, + OPPONENT_BRIEF = OPPONENTEXTRACTOR + BRIEF, + OPPONENT_BRISK = OPPONENTEXTRACTOR + BRISK, + OPPONENT_FREAK = OPPONENTEXTRACTOR + FREAK; + + + // + // C++: void javaDescriptorExtractor::compute(Mat image, vector_KeyPoint& keypoints, Mat descriptors) + // + +/** + *

Computes the descriptors for a set of keypoints detected in an image (first + * variant) or image set (second variant).

+ * + * @param image Image. + * @param keypoints Input collection of keypoints. Keypoints for which a + * descriptor cannot be computed are removed. Sometimes new keypoints can be + * added, for example: SIFT duplicates keypoint with several + * dominant orientations (for each orientation). + * @param descriptors Computed descriptors. In the second variant of the method + * descriptors[i] are descriptors computed for a keypoints[i]". + * Row j is the keypoints (or keypoints[i]) + * is the descriptor for keypoint j"-th keypoint. + * + * @see org.opencv.features2d.DescriptorExtractor.compute + */ + public void compute(Mat image, MatOfKeyPoint keypoints, Mat descriptors) + { + Mat keypoints_mat = keypoints; + compute_0(nativeObj, image.nativeObj, keypoints_mat.nativeObj, descriptors.nativeObj); + + return; + } + + + // + // C++: void javaDescriptorExtractor::compute(vector_Mat images, vector_vector_KeyPoint& keypoints, vector_Mat& descriptors) + // + +/** + *

Computes the descriptors for a set of keypoints detected in an image (first + * variant) or image set (second variant).

+ * + * @param images Image set. + * @param keypoints Input collection of keypoints. Keypoints for which a + * descriptor cannot be computed are removed. Sometimes new keypoints can be + * added, for example: SIFT duplicates keypoint with several + * dominant orientations (for each orientation). + * @param descriptors Computed descriptors. In the second variant of the method + * descriptors[i] are descriptors computed for a keypoints[i]". + * Row j is the keypoints (or keypoints[i]) + * is the descriptor for keypoint j"-th keypoint. + * + * @see org.opencv.features2d.DescriptorExtractor.compute + */ + public void compute(List images, List keypoints, List descriptors) + { + Mat images_mat = Converters.vector_Mat_to_Mat(images); + List keypoints_tmplm = new ArrayList((keypoints != null) ? keypoints.size() : 0); + Mat keypoints_mat = Converters.vector_vector_KeyPoint_to_Mat(keypoints, keypoints_tmplm); + Mat descriptors_mat = new Mat(); + compute_1(nativeObj, images_mat.nativeObj, keypoints_mat.nativeObj, descriptors_mat.nativeObj); + Converters.Mat_to_vector_vector_KeyPoint(keypoints_mat, keypoints); + Converters.Mat_to_vector_Mat(descriptors_mat, descriptors); + return; + } + + + // + // C++: static javaDescriptorExtractor* javaDescriptorExtractor::create(int extractorType) + // + +/** + *

Creates a descriptor extractor by name.

+ * + *

The current implementation supports the following types of a descriptor + * extractor:

+ *
    + *
  • "SIFT" -- "SIFT" + *
  • "SURF" -- "SURF" + *
  • "ORB" -- "ORB" + *
  • "BRISK" -- "BRISK" + *
  • "BRIEF" -- "BriefDescriptorExtractor" + *
+ * + *

A combined format is also supported: descriptor extractor adapter name + * ("Opponent" -- "OpponentColorDescriptorExtractor") + descriptor + * extractor name (see above), for example: "OpponentSIFT".

+ * + * @param extractorType a extractorType + * + * @see org.opencv.features2d.DescriptorExtractor.create + */ + public static DescriptorExtractor create(int extractorType) + { + + DescriptorExtractor retVal = new DescriptorExtractor(create_0(extractorType)); + + return retVal; + } + + + // + // C++: int javaDescriptorExtractor::descriptorSize() + // + + public int descriptorSize() + { + + int retVal = descriptorSize_0(nativeObj); + + return retVal; + } + + + // + // C++: int javaDescriptorExtractor::descriptorType() + // + + public int descriptorType() + { + + int retVal = descriptorType_0(nativeObj); + + return retVal; + } + + + // + // C++: bool javaDescriptorExtractor::empty() + // + + public boolean empty() + { + + boolean retVal = empty_0(nativeObj); + + return retVal; + } + + + // + // C++: void javaDescriptorExtractor::read(string fileName) + // + + public void read(String fileName) + { + + read_0(nativeObj, fileName); + + return; + } + + + // + // C++: void javaDescriptorExtractor::write(string fileName) + // + + public void write(String fileName) + { + + write_0(nativeObj, fileName); + + return; + } + + + @Override + protected void finalize() throws Throwable { + delete(nativeObj); + } + + + + // C++: void javaDescriptorExtractor::compute(Mat image, vector_KeyPoint& keypoints, Mat descriptors) + private static native void compute_0(long nativeObj, long image_nativeObj, long keypoints_mat_nativeObj, long descriptors_nativeObj); + + // C++: void javaDescriptorExtractor::compute(vector_Mat images, vector_vector_KeyPoint& keypoints, vector_Mat& descriptors) + private static native void compute_1(long nativeObj, long images_mat_nativeObj, long keypoints_mat_nativeObj, long descriptors_mat_nativeObj); + + // C++: static javaDescriptorExtractor* javaDescriptorExtractor::create(int extractorType) + private static native long create_0(int extractorType); + + // C++: int javaDescriptorExtractor::descriptorSize() + private static native int descriptorSize_0(long nativeObj); + + // C++: int javaDescriptorExtractor::descriptorType() + private static native int descriptorType_0(long nativeObj); + + // C++: bool javaDescriptorExtractor::empty() + private static native boolean empty_0(long nativeObj); + + // C++: void javaDescriptorExtractor::read(string fileName) + private static native void read_0(long nativeObj, String fileName); + + // C++: void javaDescriptorExtractor::write(string fileName) + private static native void write_0(long nativeObj, String fileName); + + // native support for java finalize() + private static native void delete(long nativeObj); + +} diff --git a/src/org/opencv/features2d/DescriptorMatcher.java b/src/org/opencv/features2d/DescriptorMatcher.java new file mode 100644 index 0000000..40a7613 --- /dev/null +++ b/src/org/opencv/features2d/DescriptorMatcher.java @@ -0,0 +1,742 @@ + +// +// This file is auto-generated. Please don't modify it! +// +package org.opencv.features2d; + +import java.lang.String; +import java.util.ArrayList; +import java.util.List; +import org.opencv.core.Mat; +import org.opencv.core.MatOfDMatch; +import org.opencv.utils.Converters; + +// C++: class javaDescriptorMatcher +/** + *

Abstract base class for matching keypoint descriptors. It has two groups of + * match methods: for matching descriptors of an image with another image or + * with an image set.

+ * + *

class DescriptorMatcher

+ * + *

// C++ code:

+ * + * + *

public:

+ * + *

virtual ~DescriptorMatcher();

+ * + *

virtual void add(const vector& descriptors);

+ * + *

const vector& getTrainDescriptors() const;

+ * + *

virtual void clear();

+ * + *

bool empty() const;

+ * + *

virtual bool isMaskSupported() const = 0;

+ * + *

virtual void train();

+ * + *

/ *

+ *
    + *
  • Group of methods to match descriptors from an image pair. + *
  • / + *
+ * + *

void match(const Mat& queryDescriptors, const Mat& trainDescriptors,

+ * + *

vector& matches, const Mat& mask=Mat()) const;

+ * + *

void knnMatch(const Mat& queryDescriptors, const Mat& trainDescriptors,

+ * + *

vector >& matches, int k,

+ * + *

const Mat& mask=Mat(), bool compactResult=false) const;

+ * + *

void radiusMatch(const Mat& queryDescriptors, const Mat& trainDescriptors,

+ * + *

vector >& matches, float maxDistance,

+ * + *

const Mat& mask=Mat(), bool compactResult=false) const;

+ * + *

/ *

+ *
    + *
  • Group of methods to match descriptors from one image to an image set. + *
  • / + *
+ * + *

void match(const Mat& queryDescriptors, vector& matches,

+ * + *

const vector& masks=vector());

+ * + *

void knnMatch(const Mat& queryDescriptors, vector >& matches,

+ * + *

int k, const vector& masks=vector(),

+ * + *

bool compactResult=false);

+ * + *

void radiusMatch(const Mat& queryDescriptors, vector >& + * matches,

+ * + *

float maxDistance, const vector& masks=vector(),

+ * + *

bool compactResult=false);

+ * + *

virtual void read(const FileNode&);

+ * + *

virtual void write(FileStorage&) const;

+ * + *

virtual Ptr clone(bool emptyTrainData=false) const = 0;

+ * + *

static Ptr create(const string& descriptorMatcherType);

+ * + *

protected:

+ * + *

vector trainDescCollection;...

+ * + *

};

+ * + * @see org.opencv.features2d.DescriptorMatcher : public Algorithm + */ +public class DescriptorMatcher { + + protected final long nativeObj; + protected DescriptorMatcher(long addr) { nativeObj = addr; } + + + public static final int + FLANNBASED = 1, + BRUTEFORCE = 2, + BRUTEFORCE_L1 = 3, + BRUTEFORCE_HAMMING = 4, + BRUTEFORCE_HAMMINGLUT = 5, + BRUTEFORCE_SL2 = 6; + + + // + // C++: void javaDescriptorMatcher::add(vector_Mat descriptors) + // + +/** + *

Adds descriptors to train a descriptor collection. If the collection + * trainDescCollectionis is not empty, the new descriptors are + * added to existing train descriptors.

+ * + * @param descriptors Descriptors to add. Each descriptors[i] is a + * set of descriptors from the same train image. + * + * @see org.opencv.features2d.DescriptorMatcher.add + */ + public void add(List descriptors) + { + Mat descriptors_mat = Converters.vector_Mat_to_Mat(descriptors); + add_0(nativeObj, descriptors_mat.nativeObj); + + return; + } + + + // + // C++: void javaDescriptorMatcher::clear() + // + +/** + *

Clears the train descriptor collection.

+ * + * @see org.opencv.features2d.DescriptorMatcher.clear + */ + public void clear() + { + + clear_0(nativeObj); + + return; + } + + + // + // C++: javaDescriptorMatcher* javaDescriptorMatcher::jclone(bool emptyTrainData = false) + // + + public DescriptorMatcher clone(boolean emptyTrainData) + { + + DescriptorMatcher retVal = new DescriptorMatcher(clone_0(nativeObj, emptyTrainData)); + + return retVal; + } + + public DescriptorMatcher clone() + { + + DescriptorMatcher retVal = new DescriptorMatcher(clone_1(nativeObj)); + + return retVal; + } + + + // + // C++: static javaDescriptorMatcher* javaDescriptorMatcher::create(int matcherType) + // + +/** + *

Creates a descriptor matcher of a given type with the default parameters + * (using default constructor).

+ * + * @param matcherType a matcherType + * + * @see org.opencv.features2d.DescriptorMatcher.create + */ + public static DescriptorMatcher create(int matcherType) + { + + DescriptorMatcher retVal = new DescriptorMatcher(create_0(matcherType)); + + return retVal; + } + + + // + // C++: bool javaDescriptorMatcher::empty() + // + +/** + *

Returns true if there are no train descriptors in the collection.

+ * + * @see org.opencv.features2d.DescriptorMatcher.empty + */ + public boolean empty() + { + + boolean retVal = empty_0(nativeObj); + + return retVal; + } + + + // + // C++: vector_Mat javaDescriptorMatcher::getTrainDescriptors() + // + +/** + *

Returns a constant link to the train descriptor collection trainDescCollection.

+ * + * @see org.opencv.features2d.DescriptorMatcher.getTrainDescriptors + */ + public List getTrainDescriptors() + { + List retVal = new ArrayList(); + Mat retValMat = new Mat(getTrainDescriptors_0(nativeObj)); + Converters.Mat_to_vector_Mat(retValMat, retVal); + return retVal; + } + + + // + // C++: bool javaDescriptorMatcher::isMaskSupported() + // + +/** + *

Returns true if the descriptor matcher supports masking permissible matches.

+ * + * @see org.opencv.features2d.DescriptorMatcher.isMaskSupported + */ + public boolean isMaskSupported() + { + + boolean retVal = isMaskSupported_0(nativeObj); + + return retVal; + } + + + // + // C++: void javaDescriptorMatcher::knnMatch(Mat queryDescriptors, Mat trainDescriptors, vector_vector_DMatch& matches, int k, Mat mask = Mat(), bool compactResult = false) + // + +/** + *

Finds the k best matches for each descriptor from a query set.

+ * + *

These extended variants of "DescriptorMatcher.match" methods find several + * best matches for each query descriptor. The matches are returned in the + * distance increasing order. See "DescriptorMatcher.match" for the details + * about query and train descriptors.

+ * + * @param queryDescriptors Query set of descriptors. + * @param trainDescriptors Train set of descriptors. This set is not added to + * the train descriptors collection stored in the class object. + * @param matches Matches. Each matches[i] is k or less matches for + * the same query descriptor. + * @param k Count of best matches found per each query descriptor or less if a + * query descriptor has less than k possible matches in total. + * @param mask Mask specifying permissible matches between an input query and + * train matrices of descriptors. + * @param compactResult Parameter used when the mask (or masks) is not empty. If + * compactResult is false, the matches vector has the + * same size as queryDescriptors rows. If compactResult + * is true, the matches vector does not contain matches for fully + * masked-out query descriptors. + * + * @see org.opencv.features2d.DescriptorMatcher.knnMatch + */ + public void knnMatch(Mat queryDescriptors, Mat trainDescriptors, List matches, int k, Mat mask, boolean compactResult) + { + Mat matches_mat = new Mat(); + knnMatch_0(nativeObj, queryDescriptors.nativeObj, trainDescriptors.nativeObj, matches_mat.nativeObj, k, mask.nativeObj, compactResult); + Converters.Mat_to_vector_vector_DMatch(matches_mat, matches); + return; + } + +/** + *

Finds the k best matches for each descriptor from a query set.

+ * + *

These extended variants of "DescriptorMatcher.match" methods find several + * best matches for each query descriptor. The matches are returned in the + * distance increasing order. See "DescriptorMatcher.match" for the details + * about query and train descriptors.

+ * + * @param queryDescriptors Query set of descriptors. + * @param trainDescriptors Train set of descriptors. This set is not added to + * the train descriptors collection stored in the class object. + * @param matches Matches. Each matches[i] is k or less matches for + * the same query descriptor. + * @param k Count of best matches found per each query descriptor or less if a + * query descriptor has less than k possible matches in total. + * + * @see org.opencv.features2d.DescriptorMatcher.knnMatch + */ + public void knnMatch(Mat queryDescriptors, Mat trainDescriptors, List matches, int k) + { + Mat matches_mat = new Mat(); + knnMatch_1(nativeObj, queryDescriptors.nativeObj, trainDescriptors.nativeObj, matches_mat.nativeObj, k); + Converters.Mat_to_vector_vector_DMatch(matches_mat, matches); + return; + } + + + // + // C++: void javaDescriptorMatcher::knnMatch(Mat queryDescriptors, vector_vector_DMatch& matches, int k, vector_Mat masks = vector(), bool compactResult = false) + // + +/** + *

Finds the k best matches for each descriptor from a query set.

+ * + *

These extended variants of "DescriptorMatcher.match" methods find several + * best matches for each query descriptor. The matches are returned in the + * distance increasing order. See "DescriptorMatcher.match" for the details + * about query and train descriptors.

+ * + * @param queryDescriptors Query set of descriptors. + * @param matches Matches. Each matches[i] is k or less matches for + * the same query descriptor. + * @param k Count of best matches found per each query descriptor or less if a + * query descriptor has less than k possible matches in total. + * @param masks Set of masks. Each masks[i] specifies permissible + * matches between the input query descriptors and stored train descriptors from + * the i-th image trainDescCollection[i]. + * @param compactResult Parameter used when the mask (or masks) is not empty. If + * compactResult is false, the matches vector has the + * same size as queryDescriptors rows. If compactResult + * is true, the matches vector does not contain matches for fully + * masked-out query descriptors. + * + * @see org.opencv.features2d.DescriptorMatcher.knnMatch + */ + public void knnMatch(Mat queryDescriptors, List matches, int k, List masks, boolean compactResult) + { + Mat matches_mat = new Mat(); + Mat masks_mat = Converters.vector_Mat_to_Mat(masks); + knnMatch_2(nativeObj, queryDescriptors.nativeObj, matches_mat.nativeObj, k, masks_mat.nativeObj, compactResult); + Converters.Mat_to_vector_vector_DMatch(matches_mat, matches); + return; + } + +/** + *

Finds the k best matches for each descriptor from a query set.

+ * + *

These extended variants of "DescriptorMatcher.match" methods find several + * best matches for each query descriptor. The matches are returned in the + * distance increasing order. See "DescriptorMatcher.match" for the details + * about query and train descriptors.

+ * + * @param queryDescriptors Query set of descriptors. + * @param matches Matches. Each matches[i] is k or less matches for + * the same query descriptor. + * @param k Count of best matches found per each query descriptor or less if a + * query descriptor has less than k possible matches in total. + * + * @see org.opencv.features2d.DescriptorMatcher.knnMatch + */ + public void knnMatch(Mat queryDescriptors, List matches, int k) + { + Mat matches_mat = new Mat(); + knnMatch_3(nativeObj, queryDescriptors.nativeObj, matches_mat.nativeObj, k); + Converters.Mat_to_vector_vector_DMatch(matches_mat, matches); + return; + } + + + // + // C++: void javaDescriptorMatcher::match(Mat queryDescriptors, Mat trainDescriptors, vector_DMatch& matches, Mat mask = Mat()) + // + +/** + *

Finds the best match for each descriptor from a query set.

+ * + *

In the first variant of this method, the train descriptors are passed as an + * input argument. In the second variant of the method, train descriptors + * collection that was set by DescriptorMatcher.add is used. + * Optional mask (or masks) can be passed to specify which query and training + * descriptors can be matched. Namely, queryDescriptors[i] can be + * matched with trainDescriptors[j] only if mask.at(i,j) + * is non-zero.

+ * + * @param queryDescriptors Query set of descriptors. + * @param trainDescriptors Train set of descriptors. This set is not added to + * the train descriptors collection stored in the class object. + * @param matches Matches. If a query descriptor is masked out in + * mask, no match is added for this descriptor. So, + * matches size may be smaller than the query descriptors count. + * @param mask Mask specifying permissible matches between an input query and + * train matrices of descriptors. + * + * @see org.opencv.features2d.DescriptorMatcher.match + */ + public void match(Mat queryDescriptors, Mat trainDescriptors, MatOfDMatch matches, Mat mask) + { + Mat matches_mat = matches; + match_0(nativeObj, queryDescriptors.nativeObj, trainDescriptors.nativeObj, matches_mat.nativeObj, mask.nativeObj); + + return; + } + +/** + *

Finds the best match for each descriptor from a query set.

+ * + *

In the first variant of this method, the train descriptors are passed as an + * input argument. In the second variant of the method, train descriptors + * collection that was set by DescriptorMatcher.add is used. + * Optional mask (or masks) can be passed to specify which query and training + * descriptors can be matched. Namely, queryDescriptors[i] can be + * matched with trainDescriptors[j] only if mask.at(i,j) + * is non-zero.

+ * + * @param queryDescriptors Query set of descriptors. + * @param trainDescriptors Train set of descriptors. This set is not added to + * the train descriptors collection stored in the class object. + * @param matches Matches. If a query descriptor is masked out in + * mask, no match is added for this descriptor. So, + * matches size may be smaller than the query descriptors count. + * + * @see org.opencv.features2d.DescriptorMatcher.match + */ + public void match(Mat queryDescriptors, Mat trainDescriptors, MatOfDMatch matches) + { + Mat matches_mat = matches; + match_1(nativeObj, queryDescriptors.nativeObj, trainDescriptors.nativeObj, matches_mat.nativeObj); + + return; + } + + + // + // C++: void javaDescriptorMatcher::match(Mat queryDescriptors, vector_DMatch& matches, vector_Mat masks = vector()) + // + +/** + *

Finds the best match for each descriptor from a query set.

+ * + *

In the first variant of this method, the train descriptors are passed as an + * input argument. In the second variant of the method, train descriptors + * collection that was set by DescriptorMatcher.add is used. + * Optional mask (or masks) can be passed to specify which query and training + * descriptors can be matched. Namely, queryDescriptors[i] can be + * matched with trainDescriptors[j] only if mask.at(i,j) + * is non-zero.

+ * + * @param queryDescriptors Query set of descriptors. + * @param matches Matches. If a query descriptor is masked out in + * mask, no match is added for this descriptor. So, + * matches size may be smaller than the query descriptors count. + * @param masks Set of masks. Each masks[i] specifies permissible + * matches between the input query descriptors and stored train descriptors from + * the i-th image trainDescCollection[i]. + * + * @see org.opencv.features2d.DescriptorMatcher.match + */ + public void match(Mat queryDescriptors, MatOfDMatch matches, List masks) + { + Mat matches_mat = matches; + Mat masks_mat = Converters.vector_Mat_to_Mat(masks); + match_2(nativeObj, queryDescriptors.nativeObj, matches_mat.nativeObj, masks_mat.nativeObj); + + return; + } + +/** + *

Finds the best match for each descriptor from a query set.

+ * + *

In the first variant of this method, the train descriptors are passed as an + * input argument. In the second variant of the method, train descriptors + * collection that was set by DescriptorMatcher.add is used. + * Optional mask (or masks) can be passed to specify which query and training + * descriptors can be matched. Namely, queryDescriptors[i] can be + * matched with trainDescriptors[j] only if mask.at(i,j) + * is non-zero.

+ * + * @param queryDescriptors Query set of descriptors. + * @param matches Matches. If a query descriptor is masked out in + * mask, no match is added for this descriptor. So, + * matches size may be smaller than the query descriptors count. + * + * @see org.opencv.features2d.DescriptorMatcher.match + */ + public void match(Mat queryDescriptors, MatOfDMatch matches) + { + Mat matches_mat = matches; + match_3(nativeObj, queryDescriptors.nativeObj, matches_mat.nativeObj); + + return; + } + + + // + // C++: void javaDescriptorMatcher::radiusMatch(Mat queryDescriptors, Mat trainDescriptors, vector_vector_DMatch& matches, float maxDistance, Mat mask = Mat(), bool compactResult = false) + // + +/** + *

For each query descriptor, finds the training descriptors not farther than + * the specified distance.

+ * + *

For each query descriptor, the methods find such training descriptors that + * the distance between the query descriptor and the training descriptor is + * equal or smaller than maxDistance. Found matches are returned in + * the distance increasing order.

+ * + * @param queryDescriptors Query set of descriptors. + * @param trainDescriptors Train set of descriptors. This set is not added to + * the train descriptors collection stored in the class object. + * @param matches Found matches. + * @param maxDistance Threshold for the distance between matched descriptors. + * @param mask Mask specifying permissible matches between an input query and + * train matrices of descriptors. + * @param compactResult Parameter used when the mask (or masks) is not empty. If + * compactResult is false, the matches vector has the + * same size as queryDescriptors rows. If compactResult + * is true, the matches vector does not contain matches for fully + * masked-out query descriptors. + * + * @see org.opencv.features2d.DescriptorMatcher.radiusMatch + */ + public void radiusMatch(Mat queryDescriptors, Mat trainDescriptors, List matches, float maxDistance, Mat mask, boolean compactResult) + { + Mat matches_mat = new Mat(); + radiusMatch_0(nativeObj, queryDescriptors.nativeObj, trainDescriptors.nativeObj, matches_mat.nativeObj, maxDistance, mask.nativeObj, compactResult); + Converters.Mat_to_vector_vector_DMatch(matches_mat, matches); + return; + } + +/** + *

For each query descriptor, finds the training descriptors not farther than + * the specified distance.

+ * + *

For each query descriptor, the methods find such training descriptors that + * the distance between the query descriptor and the training descriptor is + * equal or smaller than maxDistance. Found matches are returned in + * the distance increasing order.

+ * + * @param queryDescriptors Query set of descriptors. + * @param trainDescriptors Train set of descriptors. This set is not added to + * the train descriptors collection stored in the class object. + * @param matches Found matches. + * @param maxDistance Threshold for the distance between matched descriptors. + * + * @see org.opencv.features2d.DescriptorMatcher.radiusMatch + */ + public void radiusMatch(Mat queryDescriptors, Mat trainDescriptors, List matches, float maxDistance) + { + Mat matches_mat = new Mat(); + radiusMatch_1(nativeObj, queryDescriptors.nativeObj, trainDescriptors.nativeObj, matches_mat.nativeObj, maxDistance); + Converters.Mat_to_vector_vector_DMatch(matches_mat, matches); + return; + } + + + // + // C++: void javaDescriptorMatcher::radiusMatch(Mat queryDescriptors, vector_vector_DMatch& matches, float maxDistance, vector_Mat masks = vector(), bool compactResult = false) + // + +/** + *

For each query descriptor, finds the training descriptors not farther than + * the specified distance.

+ * + *

For each query descriptor, the methods find such training descriptors that + * the distance between the query descriptor and the training descriptor is + * equal or smaller than maxDistance. Found matches are returned in + * the distance increasing order.

+ * + * @param queryDescriptors Query set of descriptors. + * @param matches Found matches. + * @param maxDistance Threshold for the distance between matched descriptors. + * @param masks Set of masks. Each masks[i] specifies permissible + * matches between the input query descriptors and stored train descriptors from + * the i-th image trainDescCollection[i]. + * @param compactResult Parameter used when the mask (or masks) is not empty. If + * compactResult is false, the matches vector has the + * same size as queryDescriptors rows. If compactResult + * is true, the matches vector does not contain matches for fully + * masked-out query descriptors. + * + * @see org.opencv.features2d.DescriptorMatcher.radiusMatch + */ + public void radiusMatch(Mat queryDescriptors, List matches, float maxDistance, List masks, boolean compactResult) + { + Mat matches_mat = new Mat(); + Mat masks_mat = Converters.vector_Mat_to_Mat(masks); + radiusMatch_2(nativeObj, queryDescriptors.nativeObj, matches_mat.nativeObj, maxDistance, masks_mat.nativeObj, compactResult); + Converters.Mat_to_vector_vector_DMatch(matches_mat, matches); + return; + } + +/** + *

For each query descriptor, finds the training descriptors not farther than + * the specified distance.

+ * + *

For each query descriptor, the methods find such training descriptors that + * the distance between the query descriptor and the training descriptor is + * equal or smaller than maxDistance. Found matches are returned in + * the distance increasing order.

+ * + * @param queryDescriptors Query set of descriptors. + * @param matches Found matches. + * @param maxDistance Threshold for the distance between matched descriptors. + * + * @see org.opencv.features2d.DescriptorMatcher.radiusMatch + */ + public void radiusMatch(Mat queryDescriptors, List matches, float maxDistance) + { + Mat matches_mat = new Mat(); + radiusMatch_3(nativeObj, queryDescriptors.nativeObj, matches_mat.nativeObj, maxDistance); + Converters.Mat_to_vector_vector_DMatch(matches_mat, matches); + return; + } + + + // + // C++: void javaDescriptorMatcher::read(string fileName) + // + + public void read(String fileName) + { + + read_0(nativeObj, fileName); + + return; + } + + + // + // C++: void javaDescriptorMatcher::train() + // + +/** + *

Trains a descriptor matcher

+ * + *

Trains a descriptor matcher (for example, the flann index). In all methods to + * match, the method train() is run every time before matching. + * Some descriptor matchers (for example, BruteForceMatcher) have + * an empty implementation of this method. Other matchers really train their + * inner structures (for example, FlannBasedMatcher trains + * flann.Index).

+ * + * @see org.opencv.features2d.DescriptorMatcher.train + */ + public void train() + { + + train_0(nativeObj); + + return; + } + + + // + // C++: void javaDescriptorMatcher::write(string fileName) + // + + public void write(String fileName) + { + + write_0(nativeObj, fileName); + + return; + } + + + @Override + protected void finalize() throws Throwable { + delete(nativeObj); + } + + + + // C++: void javaDescriptorMatcher::add(vector_Mat descriptors) + private static native void add_0(long nativeObj, long descriptors_mat_nativeObj); + + // C++: void javaDescriptorMatcher::clear() + private static native void clear_0(long nativeObj); + + // C++: javaDescriptorMatcher* javaDescriptorMatcher::jclone(bool emptyTrainData = false) + private static native long clone_0(long nativeObj, boolean emptyTrainData); + private static native long clone_1(long nativeObj); + + // C++: static javaDescriptorMatcher* javaDescriptorMatcher::create(int matcherType) + private static native long create_0(int matcherType); + + // C++: bool javaDescriptorMatcher::empty() + private static native boolean empty_0(long nativeObj); + + // C++: vector_Mat javaDescriptorMatcher::getTrainDescriptors() + private static native long getTrainDescriptors_0(long nativeObj); + + // C++: bool javaDescriptorMatcher::isMaskSupported() + private static native boolean isMaskSupported_0(long nativeObj); + + // C++: void javaDescriptorMatcher::knnMatch(Mat queryDescriptors, Mat trainDescriptors, vector_vector_DMatch& matches, int k, Mat mask = Mat(), bool compactResult = false) + private static native void knnMatch_0(long nativeObj, long queryDescriptors_nativeObj, long trainDescriptors_nativeObj, long matches_mat_nativeObj, int k, long mask_nativeObj, boolean compactResult); + private static native void knnMatch_1(long nativeObj, long queryDescriptors_nativeObj, long trainDescriptors_nativeObj, long matches_mat_nativeObj, int k); + + // C++: void javaDescriptorMatcher::knnMatch(Mat queryDescriptors, vector_vector_DMatch& matches, int k, vector_Mat masks = vector(), bool compactResult = false) + private static native void knnMatch_2(long nativeObj, long queryDescriptors_nativeObj, long matches_mat_nativeObj, int k, long masks_mat_nativeObj, boolean compactResult); + private static native void knnMatch_3(long nativeObj, long queryDescriptors_nativeObj, long matches_mat_nativeObj, int k); + + // C++: void javaDescriptorMatcher::match(Mat queryDescriptors, Mat trainDescriptors, vector_DMatch& matches, Mat mask = Mat()) + private static native void match_0(long nativeObj, long queryDescriptors_nativeObj, long trainDescriptors_nativeObj, long matches_mat_nativeObj, long mask_nativeObj); + private static native void match_1(long nativeObj, long queryDescriptors_nativeObj, long trainDescriptors_nativeObj, long matches_mat_nativeObj); + + // C++: void javaDescriptorMatcher::match(Mat queryDescriptors, vector_DMatch& matches, vector_Mat masks = vector()) + private static native void match_2(long nativeObj, long queryDescriptors_nativeObj, long matches_mat_nativeObj, long masks_mat_nativeObj); + private static native void match_3(long nativeObj, long queryDescriptors_nativeObj, long matches_mat_nativeObj); + + // C++: void javaDescriptorMatcher::radiusMatch(Mat queryDescriptors, Mat trainDescriptors, vector_vector_DMatch& matches, float maxDistance, Mat mask = Mat(), bool compactResult = false) + private static native void radiusMatch_0(long nativeObj, long queryDescriptors_nativeObj, long trainDescriptors_nativeObj, long matches_mat_nativeObj, float maxDistance, long mask_nativeObj, boolean compactResult); + private static native void radiusMatch_1(long nativeObj, long queryDescriptors_nativeObj, long trainDescriptors_nativeObj, long matches_mat_nativeObj, float maxDistance); + + // C++: void javaDescriptorMatcher::radiusMatch(Mat queryDescriptors, vector_vector_DMatch& matches, float maxDistance, vector_Mat masks = vector(), bool compactResult = false) + private static native void radiusMatch_2(long nativeObj, long queryDescriptors_nativeObj, long matches_mat_nativeObj, float maxDistance, long masks_mat_nativeObj, boolean compactResult); + private static native void radiusMatch_3(long nativeObj, long queryDescriptors_nativeObj, long matches_mat_nativeObj, float maxDistance); + + // C++: void javaDescriptorMatcher::read(string fileName) + private static native void read_0(long nativeObj, String fileName); + + // C++: void javaDescriptorMatcher::train() + private static native void train_0(long nativeObj); + + // C++: void javaDescriptorMatcher::write(string fileName) + private static native void write_0(long nativeObj, String fileName); + + // native support for java finalize() + private static native void delete(long nativeObj); + +} diff --git a/src/org/opencv/features2d/FeatureDetector.java b/src/org/opencv/features2d/FeatureDetector.java new file mode 100644 index 0000000..7f67e32 --- /dev/null +++ b/src/org/opencv/features2d/FeatureDetector.java @@ -0,0 +1,303 @@ + +// +// This file is auto-generated. Please don't modify it! +// +package org.opencv.features2d; + +import java.lang.String; +import java.util.List; +import org.opencv.core.Mat; +import org.opencv.core.MatOfKeyPoint; +import org.opencv.utils.Converters; + +// C++: class javaFeatureDetector +/** + *

Abstract base class for 2D image feature detectors.

+ * + *

class CV_EXPORTS FeatureDetector

+ * + *

// C++ code:

+ * + * + *

public:

+ * + *

virtual ~FeatureDetector();

+ * + *

void detect(const Mat& image, vector& keypoints,

+ * + *

const Mat& mask=Mat()) const;

+ * + *

void detect(const vector& images,

+ * + *

vector >& keypoints,

+ * + *

const vector& masks=vector()) const;

+ * + *

virtual void read(const FileNode&);

+ * + *

virtual void write(FileStorage&) const;

+ * + *

static Ptr create(const string& detectorType);

+ * + *

protected:...

+ * + *

};

+ * + * @see org.opencv.features2d.FeatureDetector : public Algorithm + */ +public class FeatureDetector { + + protected final long nativeObj; + protected FeatureDetector(long addr) { nativeObj = addr; } + + + private static final int + GRIDDETECTOR = 1000, + PYRAMIDDETECTOR = 2000, + DYNAMICDETECTOR = 3000; + + + public static final int + FAST = 1, + STAR = 2, + SIFT = 3, + SURF = 4, + ORB = 5, + MSER = 6, + GFTT = 7, + HARRIS = 8, + SIMPLEBLOB = 9, + DENSE = 10, + BRISK = 11, + GRIDRETECTOR = 1000, + GRID_FAST = GRIDDETECTOR + FAST, + GRID_STAR = GRIDDETECTOR + STAR, + GRID_SIFT = GRIDDETECTOR + SIFT, + GRID_SURF = GRIDDETECTOR + SURF, + GRID_ORB = GRIDDETECTOR + ORB, + GRID_MSER = GRIDDETECTOR + MSER, + GRID_GFTT = GRIDDETECTOR + GFTT, + GRID_HARRIS = GRIDDETECTOR + HARRIS, + GRID_SIMPLEBLOB = GRIDDETECTOR + SIMPLEBLOB, + GRID_DENSE = GRIDDETECTOR + DENSE, + GRID_BRISK = GRIDDETECTOR + BRISK, + PYRAMID_FAST = PYRAMIDDETECTOR + FAST, + PYRAMID_STAR = PYRAMIDDETECTOR + STAR, + PYRAMID_SIFT = PYRAMIDDETECTOR + SIFT, + PYRAMID_SURF = PYRAMIDDETECTOR + SURF, + PYRAMID_ORB = PYRAMIDDETECTOR + ORB, + PYRAMID_MSER = PYRAMIDDETECTOR + MSER, + PYRAMID_GFTT = PYRAMIDDETECTOR + GFTT, + PYRAMID_HARRIS = PYRAMIDDETECTOR + HARRIS, + PYRAMID_SIMPLEBLOB = PYRAMIDDETECTOR + SIMPLEBLOB, + PYRAMID_DENSE = PYRAMIDDETECTOR + DENSE, + PYRAMID_BRISK = PYRAMIDDETECTOR + BRISK, + DYNAMIC_FAST = DYNAMICDETECTOR + FAST, + DYNAMIC_STAR = DYNAMICDETECTOR + STAR, + DYNAMIC_SIFT = DYNAMICDETECTOR + SIFT, + DYNAMIC_SURF = DYNAMICDETECTOR + SURF, + DYNAMIC_ORB = DYNAMICDETECTOR + ORB, + DYNAMIC_MSER = DYNAMICDETECTOR + MSER, + DYNAMIC_GFTT = DYNAMICDETECTOR + GFTT, + DYNAMIC_HARRIS = DYNAMICDETECTOR + HARRIS, + DYNAMIC_SIMPLEBLOB = DYNAMICDETECTOR + SIMPLEBLOB, + DYNAMIC_DENSE = DYNAMICDETECTOR + DENSE, + DYNAMIC_BRISK = DYNAMICDETECTOR + BRISK; + + + // + // C++: static javaFeatureDetector* javaFeatureDetector::create(int detectorType) + // + +/** + *

Creates a feature detector by its name.

+ * + *

The following detector types are supported:

+ *
    + *
  • "FAST" -- "FastFeatureDetector" + *
  • "STAR" -- "StarFeatureDetector" + *
  • "SIFT" -- "SIFT" (nonfree module) + *
  • "SURF" -- "SURF" (nonfree module) + *
  • "ORB" -- "ORB" + *
  • "BRISK" -- "BRISK" + *
  • "MSER" -- "MSER" + *
  • "GFTT" -- "GoodFeaturesToTrackDetector" + *
  • "HARRIS" -- "GoodFeaturesToTrackDetector" with Harris + * detector enabled + *
  • "Dense" -- "DenseFeatureDetector" + *
  • "SimpleBlob" -- "SimpleBlobDetector" + *
+ * + *

Also a combined format is supported: feature detector adapter name + * ("Grid" -- "GridAdaptedFeatureDetector", "Pyramid" + * -- "PyramidAdaptedFeatureDetector") + feature detector name (see above), for + * example: "GridFAST", "PyramidSTAR".

+ * + * @param detectorType Feature detector type. + * + * @see org.opencv.features2d.FeatureDetector.create + */ + public static FeatureDetector create(int detectorType) + { + + FeatureDetector retVal = new FeatureDetector(create_0(detectorType)); + + return retVal; + } + + + // + // C++: void javaFeatureDetector::detect(Mat image, vector_KeyPoint& keypoints, Mat mask = Mat()) + // + +/** + *

Detects keypoints in an image (first variant) or image set (second variant).

+ * + * @param image Image. + * @param keypoints The detected keypoints. In the second variant of the method + * keypoints[i] is a set of keypoints detected in images[i]. + * @param mask Mask specifying where to look for keypoints (optional). It must + * be a 8-bit integer matrix with non-zero values in the region of interest. + * + * @see org.opencv.features2d.FeatureDetector.detect + */ + public void detect(Mat image, MatOfKeyPoint keypoints, Mat mask) + { + Mat keypoints_mat = keypoints; + detect_0(nativeObj, image.nativeObj, keypoints_mat.nativeObj, mask.nativeObj); + + return; + } + +/** + *

Detects keypoints in an image (first variant) or image set (second variant).

+ * + * @param image Image. + * @param keypoints The detected keypoints. In the second variant of the method + * keypoints[i] is a set of keypoints detected in images[i]. + * + * @see org.opencv.features2d.FeatureDetector.detect + */ + public void detect(Mat image, MatOfKeyPoint keypoints) + { + Mat keypoints_mat = keypoints; + detect_1(nativeObj, image.nativeObj, keypoints_mat.nativeObj); + + return; + } + + + // + // C++: void javaFeatureDetector::detect(vector_Mat images, vector_vector_KeyPoint& keypoints, vector_Mat masks = vector()) + // + +/** + *

Detects keypoints in an image (first variant) or image set (second variant).

+ * + * @param images Image set. + * @param keypoints The detected keypoints. In the second variant of the method + * keypoints[i] is a set of keypoints detected in images[i]. + * @param masks Masks for each input image specifying where to look for + * keypoints (optional). masks[i] is a mask for images[i]. + * + * @see org.opencv.features2d.FeatureDetector.detect + */ + public void detect(List images, List keypoints, List masks) + { + Mat images_mat = Converters.vector_Mat_to_Mat(images); + Mat keypoints_mat = new Mat(); + Mat masks_mat = Converters.vector_Mat_to_Mat(masks); + detect_2(nativeObj, images_mat.nativeObj, keypoints_mat.nativeObj, masks_mat.nativeObj); + Converters.Mat_to_vector_vector_KeyPoint(keypoints_mat, keypoints); + return; + } + +/** + *

Detects keypoints in an image (first variant) or image set (second variant).

+ * + * @param images Image set. + * @param keypoints The detected keypoints. In the second variant of the method + * keypoints[i] is a set of keypoints detected in images[i]. + * + * @see org.opencv.features2d.FeatureDetector.detect + */ + public void detect(List images, List keypoints) + { + Mat images_mat = Converters.vector_Mat_to_Mat(images); + Mat keypoints_mat = new Mat(); + detect_3(nativeObj, images_mat.nativeObj, keypoints_mat.nativeObj); + Converters.Mat_to_vector_vector_KeyPoint(keypoints_mat, keypoints); + return; + } + + + // + // C++: bool javaFeatureDetector::empty() + // + + public boolean empty() + { + + boolean retVal = empty_0(nativeObj); + + return retVal; + } + + + // + // C++: void javaFeatureDetector::read(string fileName) + // + + public void read(String fileName) + { + + read_0(nativeObj, fileName); + + return; + } + + + // + // C++: void javaFeatureDetector::write(string fileName) + // + + public void write(String fileName) + { + + write_0(nativeObj, fileName); + + return; + } + + + @Override + protected void finalize() throws Throwable { + delete(nativeObj); + } + + + + // C++: static javaFeatureDetector* javaFeatureDetector::create(int detectorType) + private static native long create_0(int detectorType); + + // C++: void javaFeatureDetector::detect(Mat image, vector_KeyPoint& keypoints, Mat mask = Mat()) + private static native void detect_0(long nativeObj, long image_nativeObj, long keypoints_mat_nativeObj, long mask_nativeObj); + private static native void detect_1(long nativeObj, long image_nativeObj, long keypoints_mat_nativeObj); + + // C++: void javaFeatureDetector::detect(vector_Mat images, vector_vector_KeyPoint& keypoints, vector_Mat masks = vector()) + private static native void detect_2(long nativeObj, long images_mat_nativeObj, long keypoints_mat_nativeObj, long masks_mat_nativeObj); + private static native void detect_3(long nativeObj, long images_mat_nativeObj, long keypoints_mat_nativeObj); + + // C++: bool javaFeatureDetector::empty() + private static native boolean empty_0(long nativeObj); + + // C++: void javaFeatureDetector::read(string fileName) + private static native void read_0(long nativeObj, String fileName); + + // C++: void javaFeatureDetector::write(string fileName) + private static native void write_0(long nativeObj, String fileName); + + // native support for java finalize() + private static native void delete(long nativeObj); + +} diff --git a/src/org/opencv/features2d/Features2d.java b/src/org/opencv/features2d/Features2d.java new file mode 100644 index 0000000..0b17924 --- /dev/null +++ b/src/org/opencv/features2d/Features2d.java @@ -0,0 +1,402 @@ + +// +// This file is auto-generated. Please don't modify it! +// +package org.opencv.features2d; + +import java.util.ArrayList; +import java.util.List; +import org.opencv.core.Mat; +import org.opencv.core.MatOfByte; +import org.opencv.core.MatOfDMatch; +import org.opencv.core.MatOfKeyPoint; +import org.opencv.core.Scalar; +import org.opencv.utils.Converters; + +public class Features2d { + + public static final int + DRAW_OVER_OUTIMG = 1, + NOT_DRAW_SINGLE_POINTS = 2, + DRAW_RICH_KEYPOINTS = 4; + + + // + // C++: void drawKeypoints(Mat image, vector_KeyPoint keypoints, Mat outImage, Scalar color = Scalar::all(-1), int flags = 0) + // + +/** + *

Draws keypoints.

+ * + * @param image Source image. + * @param keypoints Keypoints from the source image. + * @param outImage Output image. Its content depends on the flags + * value defining what is drawn in the output image. See possible + * flags bit values below. + * @param color Color of keypoints. + * @param flags Flags setting drawing features. Possible flags bit + * values are defined by DrawMatchesFlags. See details above in + * "drawMatches". + * + * @see org.opencv.features2d.Features2d.drawKeypoints + */ + public static void drawKeypoints(Mat image, MatOfKeyPoint keypoints, Mat outImage, Scalar color, int flags) + { + Mat keypoints_mat = keypoints; + drawKeypoints_0(image.nativeObj, keypoints_mat.nativeObj, outImage.nativeObj, color.val[0], color.val[1], color.val[2], color.val[3], flags); + + return; + } + +/** + *

Draws keypoints.

+ * + * @param image Source image. + * @param keypoints Keypoints from the source image. + * @param outImage Output image. Its content depends on the flags + * value defining what is drawn in the output image. See possible + * flags bit values below. + * + * @see org.opencv.features2d.Features2d.drawKeypoints + */ + public static void drawKeypoints(Mat image, MatOfKeyPoint keypoints, Mat outImage) + { + Mat keypoints_mat = keypoints; + drawKeypoints_1(image.nativeObj, keypoints_mat.nativeObj, outImage.nativeObj); + + return; + } + + + // + // C++: void drawMatches(Mat img1, vector_KeyPoint keypoints1, Mat img2, vector_KeyPoint keypoints2, vector_DMatch matches1to2, Mat outImg, Scalar matchColor = Scalar::all(-1), Scalar singlePointColor = Scalar::all(-1), vector_char matchesMask = vector(), int flags = 0) + // + +/** + *

Draws the found matches of keypoints from two images.

+ * + *

This function draws matches of keypoints from two images in the output image. + * Match is a line connecting two keypoints (circles). The structure + * DrawMatchesFlags is defined as follows: struct DrawMatchesFlags + *

+ * + *

// C++ code:

+ * + * + *

enum

+ * + * + *

DEFAULT = 0, // Output image matrix will be created (Mat.create),

+ * + *

// i.e. existing memory of output image may be reused.

+ * + *

// Two source images, matches, and single keypoints

+ * + *

// will be drawn.

+ * + *

// For each keypoint, only the center point will be

+ * + *

// drawn (without a circle around the keypoint with the

+ * + *

// keypoint size and orientation).

+ * + *

DRAW_OVER_OUTIMG = 1, // Output image matrix will not be

+ * + *

// created (using Mat.create). Matches will be drawn

+ * + *

// on existing content of output image.

+ * + *

NOT_DRAW_SINGLE_POINTS = 2, // Single keypoints will not be drawn.

+ * + *

DRAW_RICH_KEYPOINTS = 4 // For each keypoint, the circle around

+ * + *

// keypoint with keypoint size and orientation will

+ * + *

// be drawn.

+ * + *

};

+ * + *

};

+ * + *

+ * + * @param img1 First source image. + * @param keypoints1 Keypoints from the first source image. + * @param img2 Second source image. + * @param keypoints2 Keypoints from the second source image. + * @param matches1to2 Matches from the first image to the second one, which + * means that keypoints1[i] has a corresponding point in + * keypoints2[matches[i]]. + * @param outImg Output image. Its content depends on the flags + * value defining what is drawn in the output image. See possible + * flags bit values below. + * @param matchColor Color of matches (lines and connected keypoints). If + * matchColor==Scalar.all(-1), the color is generated randomly. + * @param singlePointColor Color of single keypoints (circles), which means that + * keypoints do not have the matches. If singlePointColor==Scalar.all(-1), + * the color is generated randomly. + * @param matchesMask Mask determining which matches are drawn. If the mask is + * empty, all matches are drawn. + * @param flags Flags setting drawing features. Possible flags bit + * values are defined by DrawMatchesFlags. + * + * @see org.opencv.features2d.Features2d.drawMatches + */ + public static void drawMatches(Mat img1, MatOfKeyPoint keypoints1, Mat img2, MatOfKeyPoint keypoints2, MatOfDMatch matches1to2, Mat outImg, Scalar matchColor, Scalar singlePointColor, MatOfByte matchesMask, int flags) + { + Mat keypoints1_mat = keypoints1; + Mat keypoints2_mat = keypoints2; + Mat matches1to2_mat = matches1to2; + Mat matchesMask_mat = matchesMask; + drawMatches_0(img1.nativeObj, keypoints1_mat.nativeObj, img2.nativeObj, keypoints2_mat.nativeObj, matches1to2_mat.nativeObj, outImg.nativeObj, matchColor.val[0], matchColor.val[1], matchColor.val[2], matchColor.val[3], singlePointColor.val[0], singlePointColor.val[1], singlePointColor.val[2], singlePointColor.val[3], matchesMask_mat.nativeObj, flags); + + return; + } + +/** + *

Draws the found matches of keypoints from two images.

+ * + *

This function draws matches of keypoints from two images in the output image. + * Match is a line connecting two keypoints (circles). The structure + * DrawMatchesFlags is defined as follows: struct DrawMatchesFlags + *

+ * + *

// C++ code:

+ * + * + *

enum

+ * + * + *

DEFAULT = 0, // Output image matrix will be created (Mat.create),

+ * + *

// i.e. existing memory of output image may be reused.

+ * + *

// Two source images, matches, and single keypoints

+ * + *

// will be drawn.

+ * + *

// For each keypoint, only the center point will be

+ * + *

// drawn (without a circle around the keypoint with the

+ * + *

// keypoint size and orientation).

+ * + *

DRAW_OVER_OUTIMG = 1, // Output image matrix will not be

+ * + *

// created (using Mat.create). Matches will be drawn

+ * + *

// on existing content of output image.

+ * + *

NOT_DRAW_SINGLE_POINTS = 2, // Single keypoints will not be drawn.

+ * + *

DRAW_RICH_KEYPOINTS = 4 // For each keypoint, the circle around

+ * + *

// keypoint with keypoint size and orientation will

+ * + *

// be drawn.

+ * + *

};

+ * + *

};

+ * + *

+ * + * @param img1 First source image. + * @param keypoints1 Keypoints from the first source image. + * @param img2 Second source image. + * @param keypoints2 Keypoints from the second source image. + * @param matches1to2 Matches from the first image to the second one, which + * means that keypoints1[i] has a corresponding point in + * keypoints2[matches[i]]. + * @param outImg Output image. Its content depends on the flags + * value defining what is drawn in the output image. See possible + * flags bit values below. + * + * @see org.opencv.features2d.Features2d.drawMatches + */ + public static void drawMatches(Mat img1, MatOfKeyPoint keypoints1, Mat img2, MatOfKeyPoint keypoints2, MatOfDMatch matches1to2, Mat outImg) + { + Mat keypoints1_mat = keypoints1; + Mat keypoints2_mat = keypoints2; + Mat matches1to2_mat = matches1to2; + drawMatches_1(img1.nativeObj, keypoints1_mat.nativeObj, img2.nativeObj, keypoints2_mat.nativeObj, matches1to2_mat.nativeObj, outImg.nativeObj); + + return; + } + + + // + // C++: void drawMatches(Mat img1, vector_KeyPoint keypoints1, Mat img2, vector_KeyPoint keypoints2, vector_vector_DMatch matches1to2, Mat outImg, Scalar matchColor = Scalar::all(-1), Scalar singlePointColor = Scalar::all(-1), vector_vector_char matchesMask = vector >(), int flags = 0) + // + +/** + *

Draws the found matches of keypoints from two images.

+ * + *

This function draws matches of keypoints from two images in the output image. + * Match is a line connecting two keypoints (circles). The structure + * DrawMatchesFlags is defined as follows: struct DrawMatchesFlags + *

+ * + *

// C++ code:

+ * + * + *

enum

+ * + * + *

DEFAULT = 0, // Output image matrix will be created (Mat.create),

+ * + *

// i.e. existing memory of output image may be reused.

+ * + *

// Two source images, matches, and single keypoints

+ * + *

// will be drawn.

+ * + *

// For each keypoint, only the center point will be

+ * + *

// drawn (without a circle around the keypoint with the

+ * + *

// keypoint size and orientation).

+ * + *

DRAW_OVER_OUTIMG = 1, // Output image matrix will not be

+ * + *

// created (using Mat.create). Matches will be drawn

+ * + *

// on existing content of output image.

+ * + *

NOT_DRAW_SINGLE_POINTS = 2, // Single keypoints will not be drawn.

+ * + *

DRAW_RICH_KEYPOINTS = 4 // For each keypoint, the circle around

+ * + *

// keypoint with keypoint size and orientation will

+ * + *

// be drawn.

+ * + *

};

+ * + *

};

+ * + *

+ * + * @param img1 First source image. + * @param keypoints1 Keypoints from the first source image. + * @param img2 Second source image. + * @param keypoints2 Keypoints from the second source image. + * @param matches1to2 Matches from the first image to the second one, which + * means that keypoints1[i] has a corresponding point in + * keypoints2[matches[i]]. + * @param outImg Output image. Its content depends on the flags + * value defining what is drawn in the output image. See possible + * flags bit values below. + * @param matchColor Color of matches (lines and connected keypoints). If + * matchColor==Scalar.all(-1), the color is generated randomly. + * @param singlePointColor Color of single keypoints (circles), which means that + * keypoints do not have the matches. If singlePointColor==Scalar.all(-1), + * the color is generated randomly. + * @param matchesMask Mask determining which matches are drawn. If the mask is + * empty, all matches are drawn. + * @param flags Flags setting drawing features. Possible flags bit + * values are defined by DrawMatchesFlags. + * + * @see org.opencv.features2d.Features2d.drawMatches + */ + public static void drawMatches2(Mat img1, MatOfKeyPoint keypoints1, Mat img2, MatOfKeyPoint keypoints2, List matches1to2, Mat outImg, Scalar matchColor, Scalar singlePointColor, List matchesMask, int flags) + { + Mat keypoints1_mat = keypoints1; + Mat keypoints2_mat = keypoints2; + List matches1to2_tmplm = new ArrayList((matches1to2 != null) ? matches1to2.size() : 0); + Mat matches1to2_mat = Converters.vector_vector_DMatch_to_Mat(matches1to2, matches1to2_tmplm); + List matchesMask_tmplm = new ArrayList((matchesMask != null) ? matchesMask.size() : 0); + Mat matchesMask_mat = Converters.vector_vector_char_to_Mat(matchesMask, matchesMask_tmplm); + drawMatches2_0(img1.nativeObj, keypoints1_mat.nativeObj, img2.nativeObj, keypoints2_mat.nativeObj, matches1to2_mat.nativeObj, outImg.nativeObj, matchColor.val[0], matchColor.val[1], matchColor.val[2], matchColor.val[3], singlePointColor.val[0], singlePointColor.val[1], singlePointColor.val[2], singlePointColor.val[3], matchesMask_mat.nativeObj, flags); + + return; + } + +/** + *

Draws the found matches of keypoints from two images.

+ * + *

This function draws matches of keypoints from two images in the output image. + * Match is a line connecting two keypoints (circles). The structure + * DrawMatchesFlags is defined as follows: struct DrawMatchesFlags + *

+ * + *

// C++ code:

+ * + * + *

enum

+ * + * + *

DEFAULT = 0, // Output image matrix will be created (Mat.create),

+ * + *

// i.e. existing memory of output image may be reused.

+ * + *

// Two source images, matches, and single keypoints

+ * + *

// will be drawn.

+ * + *

// For each keypoint, only the center point will be

+ * + *

// drawn (without a circle around the keypoint with the

+ * + *

// keypoint size and orientation).

+ * + *

DRAW_OVER_OUTIMG = 1, // Output image matrix will not be

+ * + *

// created (using Mat.create). Matches will be drawn

+ * + *

// on existing content of output image.

+ * + *

NOT_DRAW_SINGLE_POINTS = 2, // Single keypoints will not be drawn.

+ * + *

DRAW_RICH_KEYPOINTS = 4 // For each keypoint, the circle around

+ * + *

// keypoint with keypoint size and orientation will

+ * + *

// be drawn.

+ * + *

};

+ * + *

};

+ * + *

+ * + * @param img1 First source image. + * @param keypoints1 Keypoints from the first source image. + * @param img2 Second source image. + * @param keypoints2 Keypoints from the second source image. + * @param matches1to2 Matches from the first image to the second one, which + * means that keypoints1[i] has a corresponding point in + * keypoints2[matches[i]]. + * @param outImg Output image. Its content depends on the flags + * value defining what is drawn in the output image. See possible + * flags bit values below. + * + * @see org.opencv.features2d.Features2d.drawMatches + */ + public static void drawMatches2(Mat img1, MatOfKeyPoint keypoints1, Mat img2, MatOfKeyPoint keypoints2, List matches1to2, Mat outImg) + { + Mat keypoints1_mat = keypoints1; + Mat keypoints2_mat = keypoints2; + List matches1to2_tmplm = new ArrayList((matches1to2 != null) ? matches1to2.size() : 0); + Mat matches1to2_mat = Converters.vector_vector_DMatch_to_Mat(matches1to2, matches1to2_tmplm); + drawMatches2_1(img1.nativeObj, keypoints1_mat.nativeObj, img2.nativeObj, keypoints2_mat.nativeObj, matches1to2_mat.nativeObj, outImg.nativeObj); + + return; + } + + + + + // C++: void drawKeypoints(Mat image, vector_KeyPoint keypoints, Mat outImage, Scalar color = Scalar::all(-1), int flags = 0) + private static native void drawKeypoints_0(long image_nativeObj, long keypoints_mat_nativeObj, long outImage_nativeObj, double color_val0, double color_val1, double color_val2, double color_val3, int flags); + private static native void drawKeypoints_1(long image_nativeObj, long keypoints_mat_nativeObj, long outImage_nativeObj); + + // C++: void drawMatches(Mat img1, vector_KeyPoint keypoints1, Mat img2, vector_KeyPoint keypoints2, vector_DMatch matches1to2, Mat outImg, Scalar matchColor = Scalar::all(-1), Scalar singlePointColor = Scalar::all(-1), vector_char matchesMask = vector(), int flags = 0) + private static native void drawMatches_0(long img1_nativeObj, long keypoints1_mat_nativeObj, long img2_nativeObj, long keypoints2_mat_nativeObj, long matches1to2_mat_nativeObj, long outImg_nativeObj, double matchColor_val0, double matchColor_val1, double matchColor_val2, double matchColor_val3, double singlePointColor_val0, double singlePointColor_val1, double singlePointColor_val2, double singlePointColor_val3, long matchesMask_mat_nativeObj, int flags); + private static native void drawMatches_1(long img1_nativeObj, long keypoints1_mat_nativeObj, long img2_nativeObj, long keypoints2_mat_nativeObj, long matches1to2_mat_nativeObj, long outImg_nativeObj); + + // C++: void drawMatches(Mat img1, vector_KeyPoint keypoints1, Mat img2, vector_KeyPoint keypoints2, vector_vector_DMatch matches1to2, Mat outImg, Scalar matchColor = Scalar::all(-1), Scalar singlePointColor = Scalar::all(-1), vector_vector_char matchesMask = vector >(), int flags = 0) + private static native void drawMatches2_0(long img1_nativeObj, long keypoints1_mat_nativeObj, long img2_nativeObj, long keypoints2_mat_nativeObj, long matches1to2_mat_nativeObj, long outImg_nativeObj, double matchColor_val0, double matchColor_val1, double matchColor_val2, double matchColor_val3, double singlePointColor_val0, double singlePointColor_val1, double singlePointColor_val2, double singlePointColor_val3, long matchesMask_mat_nativeObj, int flags); + private static native void drawMatches2_1(long img1_nativeObj, long keypoints1_mat_nativeObj, long img2_nativeObj, long keypoints2_mat_nativeObj, long matches1to2_mat_nativeObj, long outImg_nativeObj); + +} diff --git a/src/org/opencv/features2d/GenericDescriptorMatcher.java b/src/org/opencv/features2d/GenericDescriptorMatcher.java new file mode 100644 index 0000000..a5cfc53 --- /dev/null +++ b/src/org/opencv/features2d/GenericDescriptorMatcher.java @@ -0,0 +1,861 @@ + +// +// This file is auto-generated. Please don't modify it! +// +package org.opencv.features2d; + +import java.lang.String; +import java.util.ArrayList; +import java.util.List; +import org.opencv.core.Mat; +import org.opencv.core.MatOfDMatch; +import org.opencv.core.MatOfKeyPoint; +import org.opencv.utils.Converters; + +// C++: class javaGenericDescriptorMatcher +/** + *

Abstract interface for extracting and matching a keypoint descriptor. There + * are also "DescriptorExtractor" and "DescriptorMatcher" for these purposes but + * their interfaces are intended for descriptors represented as vectors in a + * multidimensional space. GenericDescriptorMatcher is a more + * generic interface for descriptors. DescriptorMatcher and + * GenericDescriptorMatcher have two groups of match methods: for + * matching keypoints of an image with another image or with an image set.

+ * + *

class GenericDescriptorMatcher

+ * + *

// C++ code:

+ * + * + *

public:

+ * + *

GenericDescriptorMatcher();

+ * + *

virtual ~GenericDescriptorMatcher();

+ * + *

virtual void add(const vector& images,

+ * + *

vector >& keypoints);

+ * + *

const vector& getTrainImages() const;

+ * + *

const vector >& getTrainKeypoints() const;

+ * + *

virtual void clear();

+ * + *

virtual void train() = 0;

+ * + *

virtual bool isMaskSupported() = 0;

+ * + *

void classify(const Mat& queryImage,

+ * + *

vector& queryKeypoints,

+ * + *

const Mat& trainImage,

+ * + *

vector& trainKeypoints) const;

+ * + *

void classify(const Mat& queryImage,

+ * + *

vector& queryKeypoints);

+ * + *

/ *

+ *
    + *
  • Group of methods to match keypoints from an image pair. + *
  • / + *
+ * + *

void match(const Mat& queryImage, vector& queryKeypoints,

+ * + *

const Mat& trainImage, vector& trainKeypoints,

+ * + *

vector& matches, const Mat& mask=Mat()) const;

+ * + *

void knnMatch(const Mat& queryImage, vector& queryKeypoints,

+ * + *

const Mat& trainImage, vector& trainKeypoints,

+ * + *

vector >& matches, int k,

+ * + *

const Mat& mask=Mat(), bool compactResult=false) const;

+ * + *

void radiusMatch(const Mat& queryImage, vector& queryKeypoints,

+ * + *

const Mat& trainImage, vector& trainKeypoints,

+ * + *

vector >& matches, float maxDistance,

+ * + *

const Mat& mask=Mat(), bool compactResult=false) const;

+ * + *

/ *

+ *
    + *
  • Group of methods to match keypoints from one image to an image set. + *
  • / + *
+ * + *

void match(const Mat& queryImage, vector& queryKeypoints,

+ * + *

vector& matches, const vector& masks=vector());

+ * + *

void knnMatch(const Mat& queryImage, vector& queryKeypoints,

+ * + *

vector >& matches, int k,

+ * + *

const vector& masks=vector(), bool compactResult=false);

+ * + *

void radiusMatch(const Mat& queryImage, vector& queryKeypoints,

+ * + *

vector >& matches, float maxDistance,

+ * + *

const vector& masks=vector(), bool compactResult=false);

+ * + *

virtual void read(const FileNode&);

+ * + *

virtual void write(FileStorage&) const;

+ * + *

virtual Ptr clone(bool emptyTrainData=false) const + * = 0;

+ * + *

protected:...

+ * + *

};

+ * + * @see org.opencv.features2d.GenericDescriptorMatcher + */ +public class GenericDescriptorMatcher { + + protected final long nativeObj; + protected GenericDescriptorMatcher(long addr) { nativeObj = addr; } + + + public static final int + ONEWAY = 1, + FERN = 2; + + + // + // C++: void javaGenericDescriptorMatcher::add(vector_Mat images, vector_vector_KeyPoint keypoints) + // + +/** + *

Adds images and their keypoints to the training collection stored in the + * class instance.

+ * + * @param images Image collection. + * @param keypoints Point collection. It is assumed that keypoints[i] + * are keypoints detected in the image images[i]. + * + * @see org.opencv.features2d.GenericDescriptorMatcher.add + */ + public void add(List images, List keypoints) + { + Mat images_mat = Converters.vector_Mat_to_Mat(images); + List keypoints_tmplm = new ArrayList((keypoints != null) ? keypoints.size() : 0); + Mat keypoints_mat = Converters.vector_vector_KeyPoint_to_Mat(keypoints, keypoints_tmplm); + add_0(nativeObj, images_mat.nativeObj, keypoints_mat.nativeObj); + + return; + } + + + // + // C++: void javaGenericDescriptorMatcher::classify(Mat queryImage, vector_KeyPoint& queryKeypoints, Mat trainImage, vector_KeyPoint trainKeypoints) + // + +/** + *

Classifies keypoints from a query set.

+ * + *

The method classifies each keypoint from a query set. The first variant of + * the method takes a train image and its keypoints as an input argument. The + * second variant uses the internally stored training collection that can be + * built using the GenericDescriptorMatcher.add method.

+ * + *

The methods do the following:

+ *
    + *
  • Call the GenericDescriptorMatcher.match method to find + * correspondence between the query set and the training set. + *
  • Set the class_id field of each keypoint from the query + * set to class_id of the corresponding keypoint from the training + * set. + *
+ * + * @param queryImage Query image. + * @param queryKeypoints Keypoints from a query image. + * @param trainImage Train image. + * @param trainKeypoints Keypoints from a train image. + * + * @see org.opencv.features2d.GenericDescriptorMatcher.classify + */ + public void classify(Mat queryImage, MatOfKeyPoint queryKeypoints, Mat trainImage, MatOfKeyPoint trainKeypoints) + { + Mat queryKeypoints_mat = queryKeypoints; + Mat trainKeypoints_mat = trainKeypoints; + classify_0(nativeObj, queryImage.nativeObj, queryKeypoints_mat.nativeObj, trainImage.nativeObj, trainKeypoints_mat.nativeObj); + + return; + } + + + // + // C++: void javaGenericDescriptorMatcher::classify(Mat queryImage, vector_KeyPoint& queryKeypoints) + // + +/** + *

Classifies keypoints from a query set.

+ * + *

The method classifies each keypoint from a query set. The first variant of + * the method takes a train image and its keypoints as an input argument. The + * second variant uses the internally stored training collection that can be + * built using the GenericDescriptorMatcher.add method.

+ * + *

The methods do the following:

+ *
    + *
  • Call the GenericDescriptorMatcher.match method to find + * correspondence between the query set and the training set. + *
  • Set the class_id field of each keypoint from the query + * set to class_id of the corresponding keypoint from the training + * set. + *
+ * + * @param queryImage Query image. + * @param queryKeypoints Keypoints from a query image. + * + * @see org.opencv.features2d.GenericDescriptorMatcher.classify + */ + public void classify(Mat queryImage, MatOfKeyPoint queryKeypoints) + { + Mat queryKeypoints_mat = queryKeypoints; + classify_1(nativeObj, queryImage.nativeObj, queryKeypoints_mat.nativeObj); + + return; + } + + + // + // C++: void javaGenericDescriptorMatcher::clear() + // + +/** + *

Clears a train collection (images and keypoints).

+ * + * @see org.opencv.features2d.GenericDescriptorMatcher.clear + */ + public void clear() + { + + clear_0(nativeObj); + + return; + } + + + // + // C++: javaGenericDescriptorMatcher* javaGenericDescriptorMatcher::jclone(bool emptyTrainData = false) + // + + public GenericDescriptorMatcher clone(boolean emptyTrainData) + { + + GenericDescriptorMatcher retVal = new GenericDescriptorMatcher(clone_0(nativeObj, emptyTrainData)); + + return retVal; + } + + public GenericDescriptorMatcher clone() + { + + GenericDescriptorMatcher retVal = new GenericDescriptorMatcher(clone_1(nativeObj)); + + return retVal; + } + + + // + // C++: static javaGenericDescriptorMatcher* javaGenericDescriptorMatcher::create(int matcherType) + // + + public static GenericDescriptorMatcher create(int matcherType) + { + + GenericDescriptorMatcher retVal = new GenericDescriptorMatcher(create_0(matcherType)); + + return retVal; + } + + + // + // C++: bool javaGenericDescriptorMatcher::empty() + // + + public boolean empty() + { + + boolean retVal = empty_0(nativeObj); + + return retVal; + } + + + // + // C++: vector_Mat javaGenericDescriptorMatcher::getTrainImages() + // + +/** + *

Returns a train image collection.

+ * + * @see org.opencv.features2d.GenericDescriptorMatcher.getTrainImages + */ + public List getTrainImages() + { + List retVal = new ArrayList(); + Mat retValMat = new Mat(getTrainImages_0(nativeObj)); + Converters.Mat_to_vector_Mat(retValMat, retVal); + return retVal; + } + + + // + // C++: vector_vector_KeyPoint javaGenericDescriptorMatcher::getTrainKeypoints() + // + +/** + *

Returns a train keypoints collection.

+ * + * @see org.opencv.features2d.GenericDescriptorMatcher.getTrainKeypoints + */ + public List getTrainKeypoints() + { + List retVal = new ArrayList(); + Mat retValMat = new Mat(getTrainKeypoints_0(nativeObj)); + Converters.Mat_to_vector_vector_KeyPoint(retValMat, retVal); + return retVal; + } + + + // + // C++: bool javaGenericDescriptorMatcher::isMaskSupported() + // + +/** + *

Returns true if a generic descriptor matcher supports masking + * permissible matches.

+ * + * @see org.opencv.features2d.GenericDescriptorMatcher.isMaskSupported + */ + public boolean isMaskSupported() + { + + boolean retVal = isMaskSupported_0(nativeObj); + + return retVal; + } + + + // + // C++: void javaGenericDescriptorMatcher::knnMatch(Mat queryImage, vector_KeyPoint queryKeypoints, Mat trainImage, vector_KeyPoint trainKeypoints, vector_vector_DMatch& matches, int k, Mat mask = Mat(), bool compactResult = false) + // + +/** + *

Finds the k best matches for each query keypoint.

+ * + *

The methods are extended variants of GenericDescriptorMatch.match. + * The parameters are similar, and the semantics is similar to DescriptorMatcher.knnMatch. + * But this class does not require explicitly computed keypoint descriptors.

+ * + * @param queryImage a queryImage + * @param queryKeypoints a queryKeypoints + * @param trainImage a trainImage + * @param trainKeypoints a trainKeypoints + * @param matches a matches + * @param k a k + * @param mask a mask + * @param compactResult a compactResult + * + * @see org.opencv.features2d.GenericDescriptorMatcher.knnMatch + */ + public void knnMatch(Mat queryImage, MatOfKeyPoint queryKeypoints, Mat trainImage, MatOfKeyPoint trainKeypoints, List matches, int k, Mat mask, boolean compactResult) + { + Mat queryKeypoints_mat = queryKeypoints; + Mat trainKeypoints_mat = trainKeypoints; + Mat matches_mat = new Mat(); + knnMatch_0(nativeObj, queryImage.nativeObj, queryKeypoints_mat.nativeObj, trainImage.nativeObj, trainKeypoints_mat.nativeObj, matches_mat.nativeObj, k, mask.nativeObj, compactResult); + Converters.Mat_to_vector_vector_DMatch(matches_mat, matches); + return; + } + +/** + *

Finds the k best matches for each query keypoint.

+ * + *

The methods are extended variants of GenericDescriptorMatch.match. + * The parameters are similar, and the semantics is similar to DescriptorMatcher.knnMatch. + * But this class does not require explicitly computed keypoint descriptors.

+ * + * @param queryImage a queryImage + * @param queryKeypoints a queryKeypoints + * @param trainImage a trainImage + * @param trainKeypoints a trainKeypoints + * @param matches a matches + * @param k a k + * + * @see org.opencv.features2d.GenericDescriptorMatcher.knnMatch + */ + public void knnMatch(Mat queryImage, MatOfKeyPoint queryKeypoints, Mat trainImage, MatOfKeyPoint trainKeypoints, List matches, int k) + { + Mat queryKeypoints_mat = queryKeypoints; + Mat trainKeypoints_mat = trainKeypoints; + Mat matches_mat = new Mat(); + knnMatch_1(nativeObj, queryImage.nativeObj, queryKeypoints_mat.nativeObj, trainImage.nativeObj, trainKeypoints_mat.nativeObj, matches_mat.nativeObj, k); + Converters.Mat_to_vector_vector_DMatch(matches_mat, matches); + return; + } + + + // + // C++: void javaGenericDescriptorMatcher::knnMatch(Mat queryImage, vector_KeyPoint queryKeypoints, vector_vector_DMatch& matches, int k, vector_Mat masks = vector(), bool compactResult = false) + // + +/** + *

Finds the k best matches for each query keypoint.

+ * + *

The methods are extended variants of GenericDescriptorMatch.match. + * The parameters are similar, and the semantics is similar to DescriptorMatcher.knnMatch. + * But this class does not require explicitly computed keypoint descriptors.

+ * + * @param queryImage a queryImage + * @param queryKeypoints a queryKeypoints + * @param matches a matches + * @param k a k + * @param masks a masks + * @param compactResult a compactResult + * + * @see org.opencv.features2d.GenericDescriptorMatcher.knnMatch + */ + public void knnMatch(Mat queryImage, MatOfKeyPoint queryKeypoints, List matches, int k, List masks, boolean compactResult) + { + Mat queryKeypoints_mat = queryKeypoints; + Mat matches_mat = new Mat(); + Mat masks_mat = Converters.vector_Mat_to_Mat(masks); + knnMatch_2(nativeObj, queryImage.nativeObj, queryKeypoints_mat.nativeObj, matches_mat.nativeObj, k, masks_mat.nativeObj, compactResult); + Converters.Mat_to_vector_vector_DMatch(matches_mat, matches); + return; + } + +/** + *

Finds the k best matches for each query keypoint.

+ * + *

The methods are extended variants of GenericDescriptorMatch.match. + * The parameters are similar, and the semantics is similar to DescriptorMatcher.knnMatch. + * But this class does not require explicitly computed keypoint descriptors.

+ * + * @param queryImage a queryImage + * @param queryKeypoints a queryKeypoints + * @param matches a matches + * @param k a k + * + * @see org.opencv.features2d.GenericDescriptorMatcher.knnMatch + */ + public void knnMatch(Mat queryImage, MatOfKeyPoint queryKeypoints, List matches, int k) + { + Mat queryKeypoints_mat = queryKeypoints; + Mat matches_mat = new Mat(); + knnMatch_3(nativeObj, queryImage.nativeObj, queryKeypoints_mat.nativeObj, matches_mat.nativeObj, k); + Converters.Mat_to_vector_vector_DMatch(matches_mat, matches); + return; + } + + + // + // C++: void javaGenericDescriptorMatcher::match(Mat queryImage, vector_KeyPoint queryKeypoints, Mat trainImage, vector_KeyPoint trainKeypoints, vector_DMatch& matches, Mat mask = Mat()) + // + +/** + *

Finds the best match in the training set for each keypoint from the query + * set.

+ * + *

The methods find the best match for each query keypoint. In the first variant + * of the method, a train image and its keypoints are the input arguments. In + * the second variant, query keypoints are matched to the internally stored + * training collection that can be built using the GenericDescriptorMatcher.add + * method. Optional mask (or masks) can be passed to specify which query and + * training descriptors can be matched. Namely, queryKeypoints[i] + * can be matched with trainKeypoints[j] only if mask.at(i,j) + * is non-zero.

+ * + * @param queryImage Query image. + * @param queryKeypoints Keypoints detected in queryImage. + * @param trainImage Train image. It is not added to a train image collection + * stored in the class object. + * @param trainKeypoints Keypoints detected in trainImage. They are + * not added to a train points collection stored in the class object. + * @param matches Matches. If a query descriptor (keypoint) is masked out in + * mask, match is added for this descriptor. So, matches + * size may be smaller than the query keypoints count. + * @param mask Mask specifying permissible matches between an input query and + * train keypoints. + * + * @see org.opencv.features2d.GenericDescriptorMatcher.match + */ + public void match(Mat queryImage, MatOfKeyPoint queryKeypoints, Mat trainImage, MatOfKeyPoint trainKeypoints, MatOfDMatch matches, Mat mask) + { + Mat queryKeypoints_mat = queryKeypoints; + Mat trainKeypoints_mat = trainKeypoints; + Mat matches_mat = matches; + match_0(nativeObj, queryImage.nativeObj, queryKeypoints_mat.nativeObj, trainImage.nativeObj, trainKeypoints_mat.nativeObj, matches_mat.nativeObj, mask.nativeObj); + + return; + } + +/** + *

Finds the best match in the training set for each keypoint from the query + * set.

+ * + *

The methods find the best match for each query keypoint. In the first variant + * of the method, a train image and its keypoints are the input arguments. In + * the second variant, query keypoints are matched to the internally stored + * training collection that can be built using the GenericDescriptorMatcher.add + * method. Optional mask (or masks) can be passed to specify which query and + * training descriptors can be matched. Namely, queryKeypoints[i] + * can be matched with trainKeypoints[j] only if mask.at(i,j) + * is non-zero.

+ * + * @param queryImage Query image. + * @param queryKeypoints Keypoints detected in queryImage. + * @param trainImage Train image. It is not added to a train image collection + * stored in the class object. + * @param trainKeypoints Keypoints detected in trainImage. They are + * not added to a train points collection stored in the class object. + * @param matches Matches. If a query descriptor (keypoint) is masked out in + * mask, match is added for this descriptor. So, matches + * size may be smaller than the query keypoints count. + * + * @see org.opencv.features2d.GenericDescriptorMatcher.match + */ + public void match(Mat queryImage, MatOfKeyPoint queryKeypoints, Mat trainImage, MatOfKeyPoint trainKeypoints, MatOfDMatch matches) + { + Mat queryKeypoints_mat = queryKeypoints; + Mat trainKeypoints_mat = trainKeypoints; + Mat matches_mat = matches; + match_1(nativeObj, queryImage.nativeObj, queryKeypoints_mat.nativeObj, trainImage.nativeObj, trainKeypoints_mat.nativeObj, matches_mat.nativeObj); + + return; + } + + + // + // C++: void javaGenericDescriptorMatcher::match(Mat queryImage, vector_KeyPoint queryKeypoints, vector_DMatch& matches, vector_Mat masks = vector()) + // + +/** + *

Finds the best match in the training set for each keypoint from the query + * set.

+ * + *

The methods find the best match for each query keypoint. In the first variant + * of the method, a train image and its keypoints are the input arguments. In + * the second variant, query keypoints are matched to the internally stored + * training collection that can be built using the GenericDescriptorMatcher.add + * method. Optional mask (or masks) can be passed to specify which query and + * training descriptors can be matched. Namely, queryKeypoints[i] + * can be matched with trainKeypoints[j] only if mask.at(i,j) + * is non-zero.

+ * + * @param queryImage Query image. + * @param queryKeypoints Keypoints detected in queryImage. + * @param matches Matches. If a query descriptor (keypoint) is masked out in + * mask, match is added for this descriptor. So, matches + * size may be smaller than the query keypoints count. + * @param masks Set of masks. Each masks[i] specifies permissible + * matches between input query keypoints and stored train keypoints from the + * i-th image. + * + * @see org.opencv.features2d.GenericDescriptorMatcher.match + */ + public void match(Mat queryImage, MatOfKeyPoint queryKeypoints, MatOfDMatch matches, List masks) + { + Mat queryKeypoints_mat = queryKeypoints; + Mat matches_mat = matches; + Mat masks_mat = Converters.vector_Mat_to_Mat(masks); + match_2(nativeObj, queryImage.nativeObj, queryKeypoints_mat.nativeObj, matches_mat.nativeObj, masks_mat.nativeObj); + + return; + } + +/** + *

Finds the best match in the training set for each keypoint from the query + * set.

+ * + *

The methods find the best match for each query keypoint. In the first variant + * of the method, a train image and its keypoints are the input arguments. In + * the second variant, query keypoints are matched to the internally stored + * training collection that can be built using the GenericDescriptorMatcher.add + * method. Optional mask (or masks) can be passed to specify which query and + * training descriptors can be matched. Namely, queryKeypoints[i] + * can be matched with trainKeypoints[j] only if mask.at(i,j) + * is non-zero.

+ * + * @param queryImage Query image. + * @param queryKeypoints Keypoints detected in queryImage. + * @param matches Matches. If a query descriptor (keypoint) is masked out in + * mask, match is added for this descriptor. So, matches + * size may be smaller than the query keypoints count. + * + * @see org.opencv.features2d.GenericDescriptorMatcher.match + */ + public void match(Mat queryImage, MatOfKeyPoint queryKeypoints, MatOfDMatch matches) + { + Mat queryKeypoints_mat = queryKeypoints; + Mat matches_mat = matches; + match_3(nativeObj, queryImage.nativeObj, queryKeypoints_mat.nativeObj, matches_mat.nativeObj); + + return; + } + + + // + // C++: void javaGenericDescriptorMatcher::radiusMatch(Mat queryImage, vector_KeyPoint queryKeypoints, Mat trainImage, vector_KeyPoint trainKeypoints, vector_vector_DMatch& matches, float maxDistance, Mat mask = Mat(), bool compactResult = false) + // + +/** + *

For each query keypoint, finds the training keypoints not farther than the + * specified distance.

+ * + *

The methods are similar to DescriptorMatcher.radius. But this + * class does not require explicitly computed keypoint descriptors.

+ * + * @param queryImage a queryImage + * @param queryKeypoints a queryKeypoints + * @param trainImage a trainImage + * @param trainKeypoints a trainKeypoints + * @param matches a matches + * @param maxDistance a maxDistance + * @param mask a mask + * @param compactResult a compactResult + * + * @see org.opencv.features2d.GenericDescriptorMatcher.radiusMatch + */ + public void radiusMatch(Mat queryImage, MatOfKeyPoint queryKeypoints, Mat trainImage, MatOfKeyPoint trainKeypoints, List matches, float maxDistance, Mat mask, boolean compactResult) + { + Mat queryKeypoints_mat = queryKeypoints; + Mat trainKeypoints_mat = trainKeypoints; + Mat matches_mat = new Mat(); + radiusMatch_0(nativeObj, queryImage.nativeObj, queryKeypoints_mat.nativeObj, trainImage.nativeObj, trainKeypoints_mat.nativeObj, matches_mat.nativeObj, maxDistance, mask.nativeObj, compactResult); + Converters.Mat_to_vector_vector_DMatch(matches_mat, matches); + return; + } + +/** + *

For each query keypoint, finds the training keypoints not farther than the + * specified distance.

+ * + *

The methods are similar to DescriptorMatcher.radius. But this + * class does not require explicitly computed keypoint descriptors.

+ * + * @param queryImage a queryImage + * @param queryKeypoints a queryKeypoints + * @param trainImage a trainImage + * @param trainKeypoints a trainKeypoints + * @param matches a matches + * @param maxDistance a maxDistance + * + * @see org.opencv.features2d.GenericDescriptorMatcher.radiusMatch + */ + public void radiusMatch(Mat queryImage, MatOfKeyPoint queryKeypoints, Mat trainImage, MatOfKeyPoint trainKeypoints, List matches, float maxDistance) + { + Mat queryKeypoints_mat = queryKeypoints; + Mat trainKeypoints_mat = trainKeypoints; + Mat matches_mat = new Mat(); + radiusMatch_1(nativeObj, queryImage.nativeObj, queryKeypoints_mat.nativeObj, trainImage.nativeObj, trainKeypoints_mat.nativeObj, matches_mat.nativeObj, maxDistance); + Converters.Mat_to_vector_vector_DMatch(matches_mat, matches); + return; + } + + + // + // C++: void javaGenericDescriptorMatcher::radiusMatch(Mat queryImage, vector_KeyPoint queryKeypoints, vector_vector_DMatch& matches, float maxDistance, vector_Mat masks = vector(), bool compactResult = false) + // + +/** + *

For each query keypoint, finds the training keypoints not farther than the + * specified distance.

+ * + *

The methods are similar to DescriptorMatcher.radius. But this + * class does not require explicitly computed keypoint descriptors.

+ * + * @param queryImage a queryImage + * @param queryKeypoints a queryKeypoints + * @param matches a matches + * @param maxDistance a maxDistance + * @param masks a masks + * @param compactResult a compactResult + * + * @see org.opencv.features2d.GenericDescriptorMatcher.radiusMatch + */ + public void radiusMatch(Mat queryImage, MatOfKeyPoint queryKeypoints, List matches, float maxDistance, List masks, boolean compactResult) + { + Mat queryKeypoints_mat = queryKeypoints; + Mat matches_mat = new Mat(); + Mat masks_mat = Converters.vector_Mat_to_Mat(masks); + radiusMatch_2(nativeObj, queryImage.nativeObj, queryKeypoints_mat.nativeObj, matches_mat.nativeObj, maxDistance, masks_mat.nativeObj, compactResult); + Converters.Mat_to_vector_vector_DMatch(matches_mat, matches); + return; + } + +/** + *

For each query keypoint, finds the training keypoints not farther than the + * specified distance.

+ * + *

The methods are similar to DescriptorMatcher.radius. But this + * class does not require explicitly computed keypoint descriptors.

+ * + * @param queryImage a queryImage + * @param queryKeypoints a queryKeypoints + * @param matches a matches + * @param maxDistance a maxDistance + * + * @see org.opencv.features2d.GenericDescriptorMatcher.radiusMatch + */ + public void radiusMatch(Mat queryImage, MatOfKeyPoint queryKeypoints, List matches, float maxDistance) + { + Mat queryKeypoints_mat = queryKeypoints; + Mat matches_mat = new Mat(); + radiusMatch_3(nativeObj, queryImage.nativeObj, queryKeypoints_mat.nativeObj, matches_mat.nativeObj, maxDistance); + Converters.Mat_to_vector_vector_DMatch(matches_mat, matches); + return; + } + + + // + // C++: void javaGenericDescriptorMatcher::read(string fileName) + // + +/** + *

Reads a matcher object from a file node.

+ * + * @param fileName a fileName + * + * @see org.opencv.features2d.GenericDescriptorMatcher.read + */ + public void read(String fileName) + { + + read_0(nativeObj, fileName); + + return; + } + + + // + // C++: void javaGenericDescriptorMatcher::train() + // + +/** + *

Trains descriptor matcher

+ * + *

Prepares descriptor matcher, for example, creates a tree-based structure, to + * extract descriptors or to optimize descriptors matching.

+ * + * @see org.opencv.features2d.GenericDescriptorMatcher.train + */ + public void train() + { + + train_0(nativeObj); + + return; + } + + + // + // C++: void javaGenericDescriptorMatcher::write(string fileName) + // + +/** + *

Writes a match object to a file storage.

+ * + * @param fileName a fileName + * + * @see org.opencv.features2d.GenericDescriptorMatcher.write + */ + public void write(String fileName) + { + + write_0(nativeObj, fileName); + + return; + } + + + @Override + protected void finalize() throws Throwable { + delete(nativeObj); + } + + + + // C++: void javaGenericDescriptorMatcher::add(vector_Mat images, vector_vector_KeyPoint keypoints) + private static native void add_0(long nativeObj, long images_mat_nativeObj, long keypoints_mat_nativeObj); + + // C++: void javaGenericDescriptorMatcher::classify(Mat queryImage, vector_KeyPoint& queryKeypoints, Mat trainImage, vector_KeyPoint trainKeypoints) + private static native void classify_0(long nativeObj, long queryImage_nativeObj, long queryKeypoints_mat_nativeObj, long trainImage_nativeObj, long trainKeypoints_mat_nativeObj); + + // C++: void javaGenericDescriptorMatcher::classify(Mat queryImage, vector_KeyPoint& queryKeypoints) + private static native void classify_1(long nativeObj, long queryImage_nativeObj, long queryKeypoints_mat_nativeObj); + + // C++: void javaGenericDescriptorMatcher::clear() + private static native void clear_0(long nativeObj); + + // C++: javaGenericDescriptorMatcher* javaGenericDescriptorMatcher::jclone(bool emptyTrainData = false) + private static native long clone_0(long nativeObj, boolean emptyTrainData); + private static native long clone_1(long nativeObj); + + // C++: static javaGenericDescriptorMatcher* javaGenericDescriptorMatcher::create(int matcherType) + private static native long create_0(int matcherType); + + // C++: bool javaGenericDescriptorMatcher::empty() + private static native boolean empty_0(long nativeObj); + + // C++: vector_Mat javaGenericDescriptorMatcher::getTrainImages() + private static native long getTrainImages_0(long nativeObj); + + // C++: vector_vector_KeyPoint javaGenericDescriptorMatcher::getTrainKeypoints() + private static native long getTrainKeypoints_0(long nativeObj); + + // C++: bool javaGenericDescriptorMatcher::isMaskSupported() + private static native boolean isMaskSupported_0(long nativeObj); + + // C++: void javaGenericDescriptorMatcher::knnMatch(Mat queryImage, vector_KeyPoint queryKeypoints, Mat trainImage, vector_KeyPoint trainKeypoints, vector_vector_DMatch& matches, int k, Mat mask = Mat(), bool compactResult = false) + private static native void knnMatch_0(long nativeObj, long queryImage_nativeObj, long queryKeypoints_mat_nativeObj, long trainImage_nativeObj, long trainKeypoints_mat_nativeObj, long matches_mat_nativeObj, int k, long mask_nativeObj, boolean compactResult); + private static native void knnMatch_1(long nativeObj, long queryImage_nativeObj, long queryKeypoints_mat_nativeObj, long trainImage_nativeObj, long trainKeypoints_mat_nativeObj, long matches_mat_nativeObj, int k); + + // C++: void javaGenericDescriptorMatcher::knnMatch(Mat queryImage, vector_KeyPoint queryKeypoints, vector_vector_DMatch& matches, int k, vector_Mat masks = vector(), bool compactResult = false) + private static native void knnMatch_2(long nativeObj, long queryImage_nativeObj, long queryKeypoints_mat_nativeObj, long matches_mat_nativeObj, int k, long masks_mat_nativeObj, boolean compactResult); + private static native void knnMatch_3(long nativeObj, long queryImage_nativeObj, long queryKeypoints_mat_nativeObj, long matches_mat_nativeObj, int k); + + // C++: void javaGenericDescriptorMatcher::match(Mat queryImage, vector_KeyPoint queryKeypoints, Mat trainImage, vector_KeyPoint trainKeypoints, vector_DMatch& matches, Mat mask = Mat()) + private static native void match_0(long nativeObj, long queryImage_nativeObj, long queryKeypoints_mat_nativeObj, long trainImage_nativeObj, long trainKeypoints_mat_nativeObj, long matches_mat_nativeObj, long mask_nativeObj); + private static native void match_1(long nativeObj, long queryImage_nativeObj, long queryKeypoints_mat_nativeObj, long trainImage_nativeObj, long trainKeypoints_mat_nativeObj, long matches_mat_nativeObj); + + // C++: void javaGenericDescriptorMatcher::match(Mat queryImage, vector_KeyPoint queryKeypoints, vector_DMatch& matches, vector_Mat masks = vector()) + private static native void match_2(long nativeObj, long queryImage_nativeObj, long queryKeypoints_mat_nativeObj, long matches_mat_nativeObj, long masks_mat_nativeObj); + private static native void match_3(long nativeObj, long queryImage_nativeObj, long queryKeypoints_mat_nativeObj, long matches_mat_nativeObj); + + // C++: void javaGenericDescriptorMatcher::radiusMatch(Mat queryImage, vector_KeyPoint queryKeypoints, Mat trainImage, vector_KeyPoint trainKeypoints, vector_vector_DMatch& matches, float maxDistance, Mat mask = Mat(), bool compactResult = false) + private static native void radiusMatch_0(long nativeObj, long queryImage_nativeObj, long queryKeypoints_mat_nativeObj, long trainImage_nativeObj, long trainKeypoints_mat_nativeObj, long matches_mat_nativeObj, float maxDistance, long mask_nativeObj, boolean compactResult); + private static native void radiusMatch_1(long nativeObj, long queryImage_nativeObj, long queryKeypoints_mat_nativeObj, long trainImage_nativeObj, long trainKeypoints_mat_nativeObj, long matches_mat_nativeObj, float maxDistance); + + // C++: void javaGenericDescriptorMatcher::radiusMatch(Mat queryImage, vector_KeyPoint queryKeypoints, vector_vector_DMatch& matches, float maxDistance, vector_Mat masks = vector(), bool compactResult = false) + private static native void radiusMatch_2(long nativeObj, long queryImage_nativeObj, long queryKeypoints_mat_nativeObj, long matches_mat_nativeObj, float maxDistance, long masks_mat_nativeObj, boolean compactResult); + private static native void radiusMatch_3(long nativeObj, long queryImage_nativeObj, long queryKeypoints_mat_nativeObj, long matches_mat_nativeObj, float maxDistance); + + // C++: void javaGenericDescriptorMatcher::read(string fileName) + private static native void read_0(long nativeObj, String fileName); + + // C++: void javaGenericDescriptorMatcher::train() + private static native void train_0(long nativeObj); + + // C++: void javaGenericDescriptorMatcher::write(string fileName) + private static native void write_0(long nativeObj, String fileName); + + // native support for java finalize() + private static native void delete(long nativeObj); + +} diff --git a/src/org/opencv/features2d/KeyPoint.java b/src/org/opencv/features2d/KeyPoint.java new file mode 100644 index 0000000..d0f03ba --- /dev/null +++ b/src/org/opencv/features2d/KeyPoint.java @@ -0,0 +1,161 @@ +package org.opencv.features2d; + +import org.opencv.core.Point; + +/** + *

Data structure for salient point detectors.

+ * + *

coordinates of the keypoint

+ * + *

diameter of the meaningful keypoint neighborhood

+ * + *

// C++ code:

+ * + *

computed orientation of the keypoint (-1 if not applicable). Its possible + * values are in a range [0,360) degrees. It is measured relative to image + * coordinate system (y-axis is directed downward), ie in clockwise.

+ * + *

the response by which the most strong keypoints have been selected. Can be + * used for further sorting or subsampling

+ * + *

octave (pyramid layer) from which the keypoint has been extracted

+ * + *

object id that can be used to clustered keypoints by an object they belong to

+ * + * @see org.opencv.features2d.KeyPoint + */ +public class KeyPoint { + + /** + * Coordinates of the keypoint. + */ + public Point pt; + /** + * Diameter of the useful keypoint adjacent area. + */ + public float size; + /** + * Computed orientation of the keypoint (-1 if not applicable). + */ + public float angle; + /** + * The response, by which the strongest keypoints have been selected. Can + * be used for further sorting or subsampling. + */ + public float response; + /** + * Octave (pyramid layer), from which the keypoint has been extracted. + */ + public int octave; + /** + * Object ID, that can be used to cluster keypoints by an object they + * belong to. + */ + public int class_id; + +/** + *

The keypoint constructors

+ * + * @param x x-coordinate of the keypoint + * @param y y-coordinate of the keypoint + * @param _size keypoint diameter + * @param _angle keypoint orientation + * @param _response keypoint detector response on the keypoint (that is, + * strength of the keypoint) + * @param _octave pyramid octave in which the keypoint has been detected + * @param _class_id object id + * + * @see org.opencv.features2d.KeyPoint.KeyPoint + */ + public KeyPoint(float x, float y, float _size, float _angle, float _response, int _octave, int _class_id) + { + pt = new Point(x, y); + size = _size; + angle = _angle; + response = _response; + octave = _octave; + class_id = _class_id; + } + +/** + *

The keypoint constructors

+ * + * @see org.opencv.features2d.KeyPoint.KeyPoint + */ + public KeyPoint() + { + this(0, 0, 0, -1, 0, 0, -1); + } + +/** + *

The keypoint constructors

+ * + * @param x x-coordinate of the keypoint + * @param y y-coordinate of the keypoint + * @param _size keypoint diameter + * @param _angle keypoint orientation + * @param _response keypoint detector response on the keypoint (that is, + * strength of the keypoint) + * @param _octave pyramid octave in which the keypoint has been detected + * + * @see org.opencv.features2d.KeyPoint.KeyPoint + */ + public KeyPoint(float x, float y, float _size, float _angle, float _response, int _octave) + { + this(x, y, _size, _angle, _response, _octave, -1); + } + +/** + *

The keypoint constructors

+ * + * @param x x-coordinate of the keypoint + * @param y y-coordinate of the keypoint + * @param _size keypoint diameter + * @param _angle keypoint orientation + * @param _response keypoint detector response on the keypoint (that is, + * strength of the keypoint) + * + * @see org.opencv.features2d.KeyPoint.KeyPoint + */ + public KeyPoint(float x, float y, float _size, float _angle, float _response) + { + this(x, y, _size, _angle, _response, 0, -1); + } + +/** + *

The keypoint constructors

+ * + * @param x x-coordinate of the keypoint + * @param y y-coordinate of the keypoint + * @param _size keypoint diameter + * @param _angle keypoint orientation + * + * @see org.opencv.features2d.KeyPoint.KeyPoint + */ + public KeyPoint(float x, float y, float _size, float _angle) + { + this(x, y, _size, _angle, 0, 0, -1); + } + +/** + *

The keypoint constructors

+ * + * @param x x-coordinate of the keypoint + * @param y y-coordinate of the keypoint + * @param _size keypoint diameter + * + * @see org.opencv.features2d.KeyPoint.KeyPoint + */ + public KeyPoint(float x, float y, float _size) + { + this(x, y, _size, -1, 0, 0, -1); + } + + @Override + public String toString() { + return "KeyPoint [pt=" + pt + ", size=" + size + ", angle=" + angle + + ", response=" + response + ", octave=" + octave + + ", class_id=" + class_id + "]"; + } + +} diff --git a/src/org/opencv/features2d/package.bluej b/src/org/opencv/features2d/package.bluej new file mode 100644 index 0000000..e69de29 diff --git a/src/org/opencv/highgui/Highgui.java b/src/org/opencv/highgui/Highgui.java new file mode 100644 index 0000000..cdd2a6d --- /dev/null +++ b/src/org/opencv/highgui/Highgui.java @@ -0,0 +1,584 @@ + +// +// This file is auto-generated. Please don't modify it! +// +package org.opencv.highgui; + +import java.lang.String; +import org.opencv.core.Mat; +import org.opencv.core.MatOfByte; +import org.opencv.core.MatOfInt; + +public class Highgui { + + public static final int + CV_FONT_LIGHT = 25, + CV_FONT_NORMAL = 50, + CV_FONT_DEMIBOLD = 63, + CV_FONT_BOLD = 75, + CV_FONT_BLACK = 87, + CV_STYLE_NORMAL = 0, + CV_STYLE_ITALIC = 1, + CV_STYLE_OBLIQUE = 2, + CV_LOAD_IMAGE_UNCHANGED = -1, + CV_LOAD_IMAGE_GRAYSCALE = 0, + CV_LOAD_IMAGE_COLOR = 1, + CV_LOAD_IMAGE_ANYDEPTH = 2, + CV_LOAD_IMAGE_ANYCOLOR = 4, + CV_IMWRITE_JPEG_QUALITY = 1, + CV_IMWRITE_PNG_COMPRESSION = 16, + CV_IMWRITE_PNG_STRATEGY = 17, + CV_IMWRITE_PNG_BILEVEL = 18, + CV_IMWRITE_PNG_STRATEGY_DEFAULT = 0, + CV_IMWRITE_PNG_STRATEGY_FILTERED = 1, + CV_IMWRITE_PNG_STRATEGY_HUFFMAN_ONLY = 2, + CV_IMWRITE_PNG_STRATEGY_RLE = 3, + CV_IMWRITE_PNG_STRATEGY_FIXED = 4, + CV_IMWRITE_PXM_BINARY = 32, + CV_CVTIMG_FLIP = 1, + CV_CVTIMG_SWAP_RB = 2, + CV_CAP_MSMF = 1400, + CV_CAP_ANDROID = 1000, + CV_CAP_XIAPI = 1100, + CV_CAP_AVFOUNDATION = 1200, + CV_CAP_GIGANETIX = 1300, + CV_CAP_PROP_FRAME_WIDTH = 3, + CV_CAP_PROP_FRAME_HEIGHT = 4, + CV_CAP_PROP_ZOOM = 27, + CV_CAP_PROP_FOCUS = 28, + CV_CAP_PROP_GUID = 29, + CV_CAP_PROP_ISO_SPEED = 30, + CV_CAP_PROP_BACKLIGHT = 32, + CV_CAP_PROP_PAN = 33, + CV_CAP_PROP_TILT = 34, + CV_CAP_PROP_ROLL = 35, + CV_CAP_PROP_IRIS = 36, + CV_CAP_PROP_SETTINGS = 37, + CV_CAP_PROP_AUTOGRAB = 1024, + CV_CAP_PROP_PREVIEW_FORMAT = 1026, + CV_CAP_PROP_XI_DOWNSAMPLING = 400, + CV_CAP_PROP_XI_DATA_FORMAT = 401, + CV_CAP_PROP_XI_OFFSET_X = 402, + CV_CAP_PROP_XI_OFFSET_Y = 403, + CV_CAP_PROP_XI_TRG_SOURCE = 404, + CV_CAP_PROP_XI_TRG_SOFTWARE = 405, + CV_CAP_PROP_XI_GPI_SELECTOR = 406, + CV_CAP_PROP_XI_GPI_MODE = 407, + CV_CAP_PROP_XI_GPI_LEVEL = 408, + CV_CAP_PROP_XI_GPO_SELECTOR = 409, + CV_CAP_PROP_XI_GPO_MODE = 410, + CV_CAP_PROP_XI_LED_SELECTOR = 411, + CV_CAP_PROP_XI_LED_MODE = 412, + CV_CAP_PROP_XI_MANUAL_WB = 413, + CV_CAP_PROP_XI_AUTO_WB = 414, + CV_CAP_PROP_XI_AEAG = 415, + CV_CAP_PROP_XI_EXP_PRIORITY = 416, + CV_CAP_PROP_XI_AE_MAX_LIMIT = 417, + CV_CAP_PROP_XI_AG_MAX_LIMIT = 418, + CV_CAP_PROP_XI_AEAG_LEVEL = 419, + CV_CAP_PROP_XI_TIMEOUT = 420, + CV_CAP_PROP_ANDROID_FLASH_MODE = 8001, + CV_CAP_PROP_ANDROID_FOCUS_MODE = 8002, + CV_CAP_PROP_ANDROID_WHITE_BALANCE = 8003, + CV_CAP_PROP_ANDROID_ANTIBANDING = 8004, + CV_CAP_PROP_ANDROID_FOCAL_LENGTH = 8005, + CV_CAP_PROP_ANDROID_FOCUS_DISTANCE_NEAR = 8006, + CV_CAP_PROP_ANDROID_FOCUS_DISTANCE_OPTIMAL = 8007, + CV_CAP_PROP_ANDROID_FOCUS_DISTANCE_FAR = 8008, + CV_CAP_PROP_IOS_DEVICE_FOCUS = 9001, + CV_CAP_PROP_IOS_DEVICE_EXPOSURE = 9002, + CV_CAP_PROP_IOS_DEVICE_FLASH = 9003, + CV_CAP_PROP_IOS_DEVICE_WHITEBALANCE = 9004, + CV_CAP_PROP_IOS_DEVICE_TORCH = 9005, + CV_CAP_PROP_GIGA_FRAME_OFFSET_X = 10001, + CV_CAP_PROP_GIGA_FRAME_OFFSET_Y = 10002, + CV_CAP_PROP_GIGA_FRAME_WIDTH_MAX = 10003, + CV_CAP_PROP_GIGA_FRAME_HEIGH_MAX = 10004, + CV_CAP_PROP_GIGA_FRAME_SENS_WIDTH = 10005, + CV_CAP_PROP_GIGA_FRAME_SENS_HEIGH = 10006, + CV_CAP_ANDROID_COLOR_FRAME_BGR = 0, + CV_CAP_ANDROID_COLOR_FRAME = CV_CAP_ANDROID_COLOR_FRAME_BGR, + CV_CAP_ANDROID_GREY_FRAME = 1, + CV_CAP_ANDROID_COLOR_FRAME_RGB = 2, + CV_CAP_ANDROID_COLOR_FRAME_BGRA = 3, + CV_CAP_ANDROID_COLOR_FRAME_RGBA = 4, + CV_CAP_ANDROID_FLASH_MODE_AUTO = 0, + CV_CAP_ANDROID_FLASH_MODE_OFF = 0+1, + CV_CAP_ANDROID_FLASH_MODE_ON = 0+2, + CV_CAP_ANDROID_FLASH_MODE_RED_EYE = 0+3, + CV_CAP_ANDROID_FLASH_MODE_TORCH = 0+4, + CV_CAP_ANDROID_FOCUS_MODE_AUTO = 0, + CV_CAP_ANDROID_FOCUS_MODE_CONTINUOUS_VIDEO = 0+1, + CV_CAP_ANDROID_FOCUS_MODE_EDOF = 0+2, + CV_CAP_ANDROID_FOCUS_MODE_FIXED = 0+3, + CV_CAP_ANDROID_FOCUS_MODE_INFINITY = 0+4, + CV_CAP_ANDROID_FOCUS_MODE_MACRO = 0+5, + CV_CAP_ANDROID_WHITE_BALANCE_AUTO = 0, + CV_CAP_ANDROID_WHITE_BALANCE_CLOUDY_DAYLIGHT = 0+1, + CV_CAP_ANDROID_WHITE_BALANCE_DAYLIGHT = 0+2, + CV_CAP_ANDROID_WHITE_BALANCE_FLUORESCENT = 0+3, + CV_CAP_ANDROID_WHITE_BALANCE_INCANDESCENT = 0+4, + CV_CAP_ANDROID_WHITE_BALANCE_SHADE = 0+5, + CV_CAP_ANDROID_WHITE_BALANCE_TWILIGHT = 0+6, + CV_CAP_ANDROID_WHITE_BALANCE_WARM_FLUORESCENT = 0+7, + CV_CAP_ANDROID_ANTIBANDING_50HZ = 0, + CV_CAP_ANDROID_ANTIBANDING_60HZ = 0+1, + CV_CAP_ANDROID_ANTIBANDING_AUTO = 0+2, + CV_CAP_ANDROID_ANTIBANDING_OFF = 0+3, + IMREAD_UNCHANGED = -1, + IMREAD_GRAYSCALE = 0, + IMREAD_COLOR = 1, + IMREAD_ANYDEPTH = 2, + IMREAD_ANYCOLOR = 4, + IMWRITE_JPEG_QUALITY = 1, + IMWRITE_PNG_COMPRESSION = 16, + IMWRITE_PNG_STRATEGY = 17, + IMWRITE_PNG_BILEVEL = 18, + IMWRITE_PNG_STRATEGY_DEFAULT = 0, + IMWRITE_PNG_STRATEGY_FILTERED = 1, + IMWRITE_PNG_STRATEGY_HUFFMAN_ONLY = 2, + IMWRITE_PNG_STRATEGY_RLE = 3, + IMWRITE_PNG_STRATEGY_FIXED = 4, + IMWRITE_PXM_BINARY = 32; + + + // + // C++: Mat imdecode(Mat buf, int flags) + // + +/** + *

Reads an image from a buffer in memory.

+ * + *

The function reads an image from the specified buffer in the memory. + * If the buffer is too short or contains invalid data, the empty matrix/image + * is returned.

+ * + *

See "imread" for the list of supported formats and flags description.

+ * + *

Note: In the case of color images, the decoded images will have the channels + * stored in B G R order.

+ * + * @param buf Input array or vector of bytes. + * @param flags The same flags as in "imread". + * + * @see org.opencv.highgui.Highgui.imdecode + */ + public static Mat imdecode(Mat buf, int flags) + { + + Mat retVal = new Mat(imdecode_0(buf.nativeObj, flags)); + + return retVal; + } + + + // + // C++: bool imencode(string ext, Mat img, vector_uchar& buf, vector_int params = vector()) + // + +/** + *

Encodes an image into a memory buffer.

+ * + *

The function compresses the image and stores it in the memory buffer that is + * resized to fit the result. + * See "imwrite" for the list of supported formats and flags description.

+ * + *

Note: cvEncodeImage returns single-row matrix of type + * CV_8UC1 that contains encoded image as array of bytes.

+ * + * @param ext File extension that defines the output format. + * @param img Image to be written. + * @param buf Output buffer resized to fit the compressed image. + * @param params Format-specific parameters. See "imwrite". + * + * @see org.opencv.highgui.Highgui.imencode + */ + public static boolean imencode(String ext, Mat img, MatOfByte buf, MatOfInt params) + { + Mat buf_mat = buf; + Mat params_mat = params; + boolean retVal = imencode_0(ext, img.nativeObj, buf_mat.nativeObj, params_mat.nativeObj); + + return retVal; + } + +/** + *

Encodes an image into a memory buffer.

+ * + *

The function compresses the image and stores it in the memory buffer that is + * resized to fit the result. + * See "imwrite" for the list of supported formats and flags description.

+ * + *

Note: cvEncodeImage returns single-row matrix of type + * CV_8UC1 that contains encoded image as array of bytes.

+ * + * @param ext File extension that defines the output format. + * @param img Image to be written. + * @param buf Output buffer resized to fit the compressed image. + * + * @see org.opencv.highgui.Highgui.imencode + */ + public static boolean imencode(String ext, Mat img, MatOfByte buf) + { + Mat buf_mat = buf; + boolean retVal = imencode_1(ext, img.nativeObj, buf_mat.nativeObj); + + return retVal; + } + + + // + // C++: Mat imread(string filename, int flags = 1) + // + +/** + *

Loads an image from a file.

+ * + *

The function imread loads an image from the specified file and + * returns it. If the image cannot be read (because of missing file, improper + * permissions, unsupported or invalid format), the function returns an empty + * matrix (Mat.data==NULL). Currently, the following file formats + * are supported:

+ *
    + *
  • Windows bitmaps - *.bmp, *.dib (always supported) + *
  • JPEG files - *.jpeg, *.jpg, *.jpe (see the *Notes* + * section) + *
  • JPEG 2000 files - *.jp2 (see the *Notes* section) + *
  • Portable Network Graphics - *.png (see the *Notes* + * section) + *
  • Portable image format - *.pbm, *.pgm, *.ppm (always + * supported) + *
  • Sun rasters - *.sr, *.ras (always supported) + *
  • TIFF files - *.tiff, *.tif (see the *Notes* section) + *
+ * + *

Note:

+ *
    + *
  • The function determines the type of an image by the content, not by + * the file extension. + *
  • On Microsoft Windows* OS and MacOSX*, the codecs shipped with an + * OpenCV image (libjpeg, libpng, libtiff, and libjasper) are used by default. + * So, OpenCV can always read JPEGs, PNGs, and TIFFs. On MacOSX, there is also + * an option to use native MacOSX image readers. But beware that currently these + * native image loaders give images with different pixel values because of the + * color management embedded into MacOSX. + *
  • On Linux*, BSD flavors and other Unix-like open-source operating + * systems, OpenCV looks for codecs supplied with an OS image. Install the + * relevant packages (do not forget the development files, for example, + * "libjpeg-dev", in Debian* and Ubuntu*) to get the codec support or turn on + * the OPENCV_BUILD_3RDPARTY_LIBS flag in CMake. + *
+ * + *

Note: In the case of color images, the decoded images will have the channels + * stored in B G R order.

+ * + * @param filename Name of file to be loaded. + * @param flags Flags specifying the color type of a loaded image: + *
    + *
  • CV_LOAD_IMAGE_ANYDEPTH - If set, return 16-bit/32-bit image when the + * input has the corresponding depth, otherwise convert it to 8-bit. + *
  • CV_LOAD_IMAGE_COLOR - If set, always convert image to the color one + *
  • CV_LOAD_IMAGE_GRAYSCALE - If set, always convert image to the + * grayscale one + *
  • >0 Return a 3-channel color image. + *
+ *

Note: In the current implementation the alpha channel, if any, is stripped + * from the output image. Use negative value if you need the alpha channel.

+ *
    + *
  • =0 Return a grayscale image. + *
  • <0 Return the loaded image as is (with alpha channel). + *
+ * + * @see org.opencv.highgui.Highgui.imread + */ + public static Mat imread(String filename, int flags) + { + + Mat retVal = new Mat(imread_0(filename, flags)); + + return retVal; + } + +/** + *

Loads an image from a file.

+ * + *

The function imread loads an image from the specified file and + * returns it. If the image cannot be read (because of missing file, improper + * permissions, unsupported or invalid format), the function returns an empty + * matrix (Mat.data==NULL). Currently, the following file formats + * are supported:

+ *
    + *
  • Windows bitmaps - *.bmp, *.dib (always supported) + *
  • JPEG files - *.jpeg, *.jpg, *.jpe (see the *Notes* + * section) + *
  • JPEG 2000 files - *.jp2 (see the *Notes* section) + *
  • Portable Network Graphics - *.png (see the *Notes* + * section) + *
  • Portable image format - *.pbm, *.pgm, *.ppm (always + * supported) + *
  • Sun rasters - *.sr, *.ras (always supported) + *
  • TIFF files - *.tiff, *.tif (see the *Notes* section) + *
+ * + *

Note:

+ *
    + *
  • The function determines the type of an image by the content, not by + * the file extension. + *
  • On Microsoft Windows* OS and MacOSX*, the codecs shipped with an + * OpenCV image (libjpeg, libpng, libtiff, and libjasper) are used by default. + * So, OpenCV can always read JPEGs, PNGs, and TIFFs. On MacOSX, there is also + * an option to use native MacOSX image readers. But beware that currently these + * native image loaders give images with different pixel values because of the + * color management embedded into MacOSX. + *
  • On Linux*, BSD flavors and other Unix-like open-source operating + * systems, OpenCV looks for codecs supplied with an OS image. Install the + * relevant packages (do not forget the development files, for example, + * "libjpeg-dev", in Debian* and Ubuntu*) to get the codec support or turn on + * the OPENCV_BUILD_3RDPARTY_LIBS flag in CMake. + *
+ * + *

Note: In the case of color images, the decoded images will have the channels + * stored in B G R order.

+ * + * @param filename Name of file to be loaded. + * + * @see org.opencv.highgui.Highgui.imread + */ + public static Mat imread(String filename) + { + + Mat retVal = new Mat(imread_1(filename)); + + return retVal; + } + + + // + // C++: bool imwrite(string filename, Mat img, vector_int params = vector()) + // + +/** + *

Saves an image to a specified file.

+ * + *

The function imwrite saves the image to the specified file. The + * image format is chosen based on the filename extension (see + * "imread" for the list of extensions). Only 8-bit (or 16-bit unsigned + * (CV_16U) in case of PNG, JPEG 2000, and TIFF) single-channel or + * 3-channel (with 'BGR' channel order) images can be saved using this function. + * If the format, depth or channel order is different, use "Mat.convertTo", and + * "cvtColor" to convert it before saving. Or, use the universal XML I/O + * functions to save the image to XML or YAML format. + * It is possible to store PNG images with an alpha channel using this function. + * To do this, create 8-bit (or 16-bit) 4-channel image BGRA, where the alpha + * channel goes last. Fully transparent pixels should have alpha set to 0, fully + * opaque pixels should have alpha set to 255/65535. The sample below shows how + * to create such a BGRA image and store to PNG file. It also demonstrates how + * to set custom compression parameters

+ * + *

// C++ code:

+ * + *

#include

+ * + *

#include

+ * + *

#include

+ * + *

using namespace cv;

+ * + *

using namespace std;

+ * + *

void createAlphaMat(Mat &mat)

+ * + * + *

for (int i = 0; i < mat.rows; ++i) {

+ * + *

for (int j = 0; j < mat.cols; ++j) {

+ * + *

Vec4b& rgba = mat.at(i, j);

+ * + *

rgba[0] = UCHAR_MAX;

+ * + *

rgba[1] = saturate_cast((float (mat.cols - j)) / ((float)mat.cols) * + * UCHAR_MAX);

+ * + *

rgba[2] = saturate_cast((float (mat.rows - i)) / ((float)mat.rows) * + * UCHAR_MAX);

+ * + *

rgba[3] = saturate_cast(0.5 * (rgba[1] + rgba[2]));

+ * + * + * + * + *

int main(int argv, char argc)

+ * + * + *

// Create mat with alpha channel

+ * + *

Mat mat(480, 640, CV_8UC4);

+ * + *

createAlphaMat(mat);

+ * + *

vector compression_params;

+ * + *

compression_params.push_back(CV_IMWRITE_PNG_COMPRESSION);

+ * + *

compression_params.push_back(9);

+ * + *

try {

+ * + *

imwrite("alpha.png", mat, compression_params);

+ * + * + *

catch (runtime_error& ex) {

+ * + *

fprintf(stderr, "Exception converting image to PNG format: %sn", ex.what());

+ * + *

return 1;

+ * + * + *

fprintf(stdout, "Saved PNG file with alpha data.n");

+ * + *

return 0;

+ * + * + * @param filename Name of the file. + * @param img a img + * @param params Format-specific save parameters encoded as pairs + * paramId_1, paramValue_1, paramId_2, paramValue_2,.... The + * following parameters are currently supported: + *
    + *
  • For JPEG, it can be a quality (CV_IMWRITE_JPEG_QUALITY) + * from 0 to 100 (the higher is the better). Default value is 95. + *
  • For PNG, it can be the compression level (CV_IMWRITE_PNG_COMPRESSION) + * from 0 to 9. A higher value means a smaller size and longer compression time. + * Default value is 3. + *
  • For PPM, PGM, or PBM, it can be a binary format flag (CV_IMWRITE_PXM_BINARY), + * 0 or 1. Default value is 1. + *
+ * + * @see org.opencv.highgui.Highgui.imwrite + */ + public static boolean imwrite(String filename, Mat img, MatOfInt params) + { + Mat params_mat = params; + boolean retVal = imwrite_0(filename, img.nativeObj, params_mat.nativeObj); + + return retVal; + } + +/** + *

Saves an image to a specified file.

+ * + *

The function imwrite saves the image to the specified file. The + * image format is chosen based on the filename extension (see + * "imread" for the list of extensions). Only 8-bit (or 16-bit unsigned + * (CV_16U) in case of PNG, JPEG 2000, and TIFF) single-channel or + * 3-channel (with 'BGR' channel order) images can be saved using this function. + * If the format, depth or channel order is different, use "Mat.convertTo", and + * "cvtColor" to convert it before saving. Or, use the universal XML I/O + * functions to save the image to XML or YAML format. + * It is possible to store PNG images with an alpha channel using this function. + * To do this, create 8-bit (or 16-bit) 4-channel image BGRA, where the alpha + * channel goes last. Fully transparent pixels should have alpha set to 0, fully + * opaque pixels should have alpha set to 255/65535. The sample below shows how + * to create such a BGRA image and store to PNG file. It also demonstrates how + * to set custom compression parameters

+ * + *

// C++ code:

+ * + *

#include

+ * + *

#include

+ * + *

#include

+ * + *

using namespace cv;

+ * + *

using namespace std;

+ * + *

void createAlphaMat(Mat &mat)

+ * + * + *

for (int i = 0; i < mat.rows; ++i) {

+ * + *

for (int j = 0; j < mat.cols; ++j) {

+ * + *

Vec4b& rgba = mat.at(i, j);

+ * + *

rgba[0] = UCHAR_MAX;

+ * + *

rgba[1] = saturate_cast((float (mat.cols - j)) / ((float)mat.cols) * + * UCHAR_MAX);

+ * + *

rgba[2] = saturate_cast((float (mat.rows - i)) / ((float)mat.rows) * + * UCHAR_MAX);

+ * + *

rgba[3] = saturate_cast(0.5 * (rgba[1] + rgba[2]));

+ * + * + * + * + *

int main(int argv, char argc)

+ * + * + *

// Create mat with alpha channel

+ * + *

Mat mat(480, 640, CV_8UC4);

+ * + *

createAlphaMat(mat);

+ * + *

vector compression_params;

+ * + *

compression_params.push_back(CV_IMWRITE_PNG_COMPRESSION);

+ * + *

compression_params.push_back(9);

+ * + *

try {

+ * + *

imwrite("alpha.png", mat, compression_params);

+ * + * + *

catch (runtime_error& ex) {

+ * + *

fprintf(stderr, "Exception converting image to PNG format: %sn", ex.what());

+ * + *

return 1;

+ * + * + *

fprintf(stdout, "Saved PNG file with alpha data.n");

+ * + *

return 0;

+ * + * + * @param filename Name of the file. + * @param img a img + * + * @see org.opencv.highgui.Highgui.imwrite + */ + public static boolean imwrite(String filename, Mat img) + { + + boolean retVal = imwrite_1(filename, img.nativeObj); + + return retVal; + } + + + + + // C++: Mat imdecode(Mat buf, int flags) + private static native long imdecode_0(long buf_nativeObj, int flags); + + // C++: bool imencode(string ext, Mat img, vector_uchar& buf, vector_int params = vector()) + private static native boolean imencode_0(String ext, long img_nativeObj, long buf_mat_nativeObj, long params_mat_nativeObj); + private static native boolean imencode_1(String ext, long img_nativeObj, long buf_mat_nativeObj); + + // C++: Mat imread(string filename, int flags = 1) + private static native long imread_0(String filename, int flags); + private static native long imread_1(String filename); + + // C++: bool imwrite(string filename, Mat img, vector_int params = vector()) + private static native boolean imwrite_0(String filename, long img_nativeObj, long params_mat_nativeObj); + private static native boolean imwrite_1(String filename, long img_nativeObj); + +} diff --git a/src/org/opencv/highgui/VideoCapture.java b/src/org/opencv/highgui/VideoCapture.java new file mode 100644 index 0000000..7dcf322 --- /dev/null +++ b/src/org/opencv/highgui/VideoCapture.java @@ -0,0 +1,411 @@ +package org.opencv.highgui; + +import java.util.List; +import java.util.LinkedList; + +import org.opencv.core.Mat; +import org.opencv.core.Size; + +// C++: class VideoCapture +/** + *

Class for video capturing from video files or cameras. + * The class provides C++ API for capturing video from cameras or for reading + * video files. Here is how the class can be used:

+ * + *

#include "opencv2/opencv.hpp"

+ * + *

// C++ code:

+ * + *

using namespace cv;

+ * + *

int main(int, char)

+ * + * + *

VideoCapture cap(0); // open the default camera

+ * + *

if(!cap.isOpened()) // check if we succeeded

+ * + *

return -1;

+ * + *

Mat edges;

+ * + *

namedWindow("edges",1);

+ * + *

for(;;)

+ * + * + *

Mat frame;

+ * + *

cap >> frame; // get a new frame from camera

+ * + *

cvtColor(frame, edges, CV_BGR2GRAY);

+ * + *

GaussianBlur(edges, edges, Size(7,7), 1.5, 1.5);

+ * + *

Canny(edges, edges, 0, 30, 3);

+ * + *

imshow("edges", edges);

+ * + *

if(waitKey(30) >= 0) break;

+ * + * + *

// the camera will be deinitialized automatically in VideoCapture destructor

+ * + *

return 0;

+ * + * + *

Note: In C API the black-box structure CvCapture is used instead + * of VideoCapture. + *

+ * + * @see org.opencv.highgui.VideoCapture + */ +public class VideoCapture { + + protected final long nativeObj; + + protected VideoCapture(long addr) { + nativeObj = addr; + } + + // + // C++: VideoCapture::VideoCapture() + // + +/** + *

VideoCapture constructors.

+ * + *

Note: In C API, when you finished working with video, release + * CvCapture structure with cvReleaseCapture(), or use + * Ptr that calls cvReleaseCapture() + * automatically in the destructor.

+ * + * @see org.opencv.highgui.VideoCapture.VideoCapture + */ + public VideoCapture() + { + + nativeObj = n_VideoCapture(); + + return; + } + + // + // C++: VideoCapture::VideoCapture(int device) + // + +/** + *

VideoCapture constructors.

+ * + *

Note: In C API, when you finished working with video, release + * CvCapture structure with cvReleaseCapture(), or use + * Ptr that calls cvReleaseCapture() + * automatically in the destructor.

+ * + * @param device id of the opened video capturing device (i.e. a camera index). + * If there is a single camera connected, just pass 0. + * + * @see org.opencv.highgui.VideoCapture.VideoCapture + */ + public VideoCapture(int device) + { + + nativeObj = n_VideoCapture(device); + + return; + } + + // + // C++: double VideoCapture::get(int propId) + // + +/** + * Returns the specified "VideoCapture" property. + * + * Note: When querying a property that is not supported by the backend used by + * the "VideoCapture" class, value 0 is returned. + * + * @param propId property identifier; it can be one of the following: + * * CV_CAP_PROP_FRAME_WIDTH width of the frames in the video stream. + * * CV_CAP_PROP_FRAME_HEIGHT height of the frames in the video stream. + * + * @see org.opencv.highgui.VideoCapture.get + */ + public double get(int propId) + { + + double retVal = n_get(nativeObj, propId); + + return retVal; + } + + public List getSupportedPreviewSizes() + { + String[] sizes_str = n_getSupportedPreviewSizes(nativeObj).split(","); + List sizes = new LinkedList(); + + for (String str : sizes_str) { + String[] wh = str.split("x"); + sizes.add(new Size(Double.parseDouble(wh[0]), Double.parseDouble(wh[1]))); + } + + return sizes; + } + + // + // C++: bool VideoCapture::grab() + // + +/** + *

Grabs the next frame from video file or capturing device.

+ * + *

The methods/functions grab the next frame from video file or camera and + * return true (non-zero) in the case of success.

+ * + *

The primary use of the function is in multi-camera environments, especially + * when the cameras do not have hardware synchronization. That is, you call + * VideoCapture.grab() for each camera and after that call the + * slower method VideoCapture.retrieve() to decode and get frame + * from each camera. This way the overhead on demosaicing or motion jpeg + * decompression etc. is eliminated and the retrieved frames from different + * cameras will be closer in time.

+ * + *

Also, when a connected camera is multi-head (for example, a stereo camera or + * a Kinect device), the correct way of retrieving data from it is to call + * "VideoCapture.grab" first and then call "VideoCapture.retrieve" one or more + * times with different values of the channel parameter. See + * http://code.opencv.org/projects/opencv/repository/revisions/master/entry/samples/cpp/kinect_maps.cpp

+ * + * @see org.opencv.highgui.VideoCapture.grab + */ + public boolean grab() + { + + boolean retVal = n_grab(nativeObj); + + return retVal; + } + + // + // C++: bool VideoCapture::isOpened() + // + +/** + *

Returns true if video capturing has been initialized already.

+ * + *

If the previous call to VideoCapture constructor or + * VideoCapture.open succeeded, the method returns true.

+ * + * @see org.opencv.highgui.VideoCapture.isOpened + */ + public boolean isOpened() + { + + boolean retVal = n_isOpened(nativeObj); + + return retVal; + } + + // + // C++: bool VideoCapture::open(int device) + // + +/** + *

Open video file or a capturing device for video capturing

+ * + *

The methods first call "VideoCapture.release" to close the already opened + * file or camera.

+ * + * @param device id of the opened video capturing device (i.e. a camera index). + * + * @see org.opencv.highgui.VideoCapture.open + */ + public boolean open(int device) + { + + boolean retVal = n_open(nativeObj, device); + + return retVal; + } + + // + // C++: bool VideoCapture::read(Mat image) + // + +/** + *

Grabs, decodes and returns the next video frame.

+ * + *

The methods/functions combine "VideoCapture.grab" and "VideoCapture.retrieve" + * in one call. This is the most convenient method for reading video files or + * capturing data from decode and return the just grabbed frame. If no frames + * has been grabbed (camera has been disconnected, or there are no more frames + * in video file), the methods return false and the functions return NULL + * pointer.

+ * + *

Note: OpenCV 1.x functions cvRetrieveFrame and cv.RetrieveFrame + * return image stored inside the video capturing structure. It is not allowed + * to modify or release the image! You can copy the frame using "cvCloneImage" + * and then do whatever you want with the copy.

+ * + * @param image a image + * + * @see org.opencv.highgui.VideoCapture.read + */ + public boolean read(Mat image) + { + + boolean retVal = n_read(nativeObj, image.nativeObj); + + return retVal; + } + + // + // C++: void VideoCapture::release() + // + +/** + *

Closes video file or capturing device.

+ * + *

The methods are automatically called by subsequent "VideoCapture.open" and + * by VideoCapture destructor.

+ * + *

The C function also deallocates memory and clears *capture + * pointer.

+ * + * @see org.opencv.highgui.VideoCapture.release + */ + public void release() + { + + n_release(nativeObj); + + return; + } + + // + // C++: bool VideoCapture::retrieve(Mat image, int channel = 0) + // + +/** + *

Decodes and returns the grabbed video frame.

+ * + *

The methods/functions decode and return the just grabbed frame. If no frames + * has been grabbed (camera has been disconnected, or there are no more frames + * in video file), the methods return false and the functions return NULL + * pointer.

+ * + *

Note: OpenCV 1.x functions cvRetrieveFrame and cv.RetrieveFrame + * return image stored inside the video capturing structure. It is not allowed + * to modify or release the image! You can copy the frame using "cvCloneImage" + * and then do whatever you want with the copy.

+ * + * @param image a image + * @param channel a channel + * + * @see org.opencv.highgui.VideoCapture.retrieve + */ + public boolean retrieve(Mat image, int channel) + { + + boolean retVal = n_retrieve(nativeObj, image.nativeObj, channel); + + return retVal; + } + +/** + *

Decodes and returns the grabbed video frame.

+ * + *

The methods/functions decode and return the just grabbed frame. If no frames + * has been grabbed (camera has been disconnected, or there are no more frames + * in video file), the methods return false and the functions return NULL + * pointer.

+ * + *

Note: OpenCV 1.x functions cvRetrieveFrame and cv.RetrieveFrame + * return image stored inside the video capturing structure. It is not allowed + * to modify or release the image! You can copy the frame using "cvCloneImage" + * and then do whatever you want with the copy.

+ * + * @param image a image + * + * @see org.opencv.highgui.VideoCapture.retrieve + */ + public boolean retrieve(Mat image) + { + + boolean retVal = n_retrieve(nativeObj, image.nativeObj); + + return retVal; + } + + // + // C++: bool VideoCapture::set(int propId, double value) + // + +/** + * Sets a property in the "VideoCapture". + * + * @param propId property identifier; it can be one of the following: + * * CV_CAP_PROP_FRAME_WIDTH width of the frames in the video stream. + * * CV_CAP_PROP_FRAME_HEIGHT height of the frames in the video stream. + * @param value value of the property. + * + * @see org.opencv.highgui.VideoCapture.set + */ + public boolean set(int propId, double value) + { + + boolean retVal = n_set(nativeObj, propId, value); + + return retVal; + } + + @Override + protected void finalize() throws Throwable { + n_delete(nativeObj); + super.finalize(); + } + + // C++: VideoCapture::VideoCapture() + private static native long n_VideoCapture(); + + // C++: VideoCapture::VideoCapture(string filename) + private static native long n_VideoCapture(java.lang.String filename); + + // C++: VideoCapture::VideoCapture(int device) + private static native long n_VideoCapture(int device); + + // C++: double VideoCapture::get(int propId) + private static native double n_get(long nativeObj, int propId); + + // C++: bool VideoCapture::grab() + private static native boolean n_grab(long nativeObj); + + // C++: bool VideoCapture::isOpened() + private static native boolean n_isOpened(long nativeObj); + + // C++: bool VideoCapture::open(string filename) + private static native boolean n_open(long nativeObj, java.lang.String filename); + + // C++: bool VideoCapture::open(int device) + private static native boolean n_open(long nativeObj, int device); + + // C++: bool VideoCapture::read(Mat image) + private static native boolean n_read(long nativeObj, long image_nativeObj); + + // C++: void VideoCapture::release() + private static native void n_release(long nativeObj); + + // C++: bool VideoCapture::retrieve(Mat image, int channel = 0) + private static native boolean n_retrieve(long nativeObj, long image_nativeObj, int channel); + + private static native boolean n_retrieve(long nativeObj, long image_nativeObj); + + // C++: bool VideoCapture::set(int propId, double value) + private static native boolean n_set(long nativeObj, int propId, double value); + + private static native String n_getSupportedPreviewSizes(long nativeObj); + + // native support for java finalize() + private static native void n_delete(long nativeObj); + +} diff --git a/src/org/opencv/highgui/package.bluej b/src/org/opencv/highgui/package.bluej new file mode 100644 index 0000000..e69de29 diff --git a/src/org/opencv/imgproc/Imgproc.java b/src/org/opencv/imgproc/Imgproc.java new file mode 100644 index 0000000..aa51ea0 --- /dev/null +++ b/src/org/opencv/imgproc/Imgproc.java @@ -0,0 +1,9630 @@ + +// +// This file is auto-generated. Please don't modify it! +// +package org.opencv.imgproc; + +import java.util.ArrayList; +import java.util.List; +import org.opencv.core.Mat; +import org.opencv.core.MatOfFloat; +import org.opencv.core.MatOfInt; +import org.opencv.core.MatOfInt4; +import org.opencv.core.MatOfPoint; +import org.opencv.core.MatOfPoint2f; +import org.opencv.core.Point; +import org.opencv.core.Rect; +import org.opencv.core.RotatedRect; +import org.opencv.core.Scalar; +import org.opencv.core.Size; +import org.opencv.core.TermCriteria; +import org.opencv.utils.Converters; + +public class Imgproc { + + private static final int + IPL_BORDER_CONSTANT = 0, + IPL_BORDER_REPLICATE = 1, + IPL_BORDER_REFLECT = 2, + IPL_BORDER_WRAP = 3, + IPL_BORDER_REFLECT_101 = 4, + IPL_BORDER_TRANSPARENT = 5, + CV_INTER_NN = 0, + CV_INTER_LINEAR = 1, + CV_INTER_CUBIC = 2, + CV_INTER_AREA = 3, + CV_INTER_LANCZOS4 = 4, + CV_MOP_ERODE = 0, + CV_MOP_DILATE = 1, + CV_MOP_OPEN = 2, + CV_MOP_CLOSE = 3, + CV_MOP_GRADIENT = 4, + CV_MOP_TOPHAT = 5, + CV_MOP_BLACKHAT = 6, + CV_RETR_EXTERNAL = 0, + CV_RETR_LIST = 1, + CV_RETR_CCOMP = 2, + CV_RETR_TREE = 3, + CV_RETR_FLOODFILL = 4, + CV_CHAIN_APPROX_NONE = 1, + CV_CHAIN_APPROX_SIMPLE = 2, + CV_CHAIN_APPROX_TC89_L1 = 3, + CV_CHAIN_APPROX_TC89_KCOS = 4, + CV_THRESH_BINARY = 0, + CV_THRESH_BINARY_INV = 1, + CV_THRESH_TRUNC = 2, + CV_THRESH_TOZERO = 3, + CV_THRESH_TOZERO_INV = 4, + CV_THRESH_MASK = 7, + CV_THRESH_OTSU = 8; + + + public static final int + CV_BLUR_NO_SCALE = 0, + CV_BLUR = 1, + CV_GAUSSIAN = 2, + CV_MEDIAN = 3, + CV_BILATERAL = 4, + CV_GAUSSIAN_5x5 = 7, + CV_SCHARR = -1, + CV_MAX_SOBEL_KSIZE = 7, + CV_RGBA2mRGBA = 125, + CV_mRGBA2RGBA = 126, + CV_WARP_FILL_OUTLIERS = 8, + CV_WARP_INVERSE_MAP = 16, + CV_SHAPE_RECT = 0, + CV_SHAPE_CROSS = 1, + CV_SHAPE_ELLIPSE = 2, + CV_SHAPE_CUSTOM = 100, + CV_CHAIN_CODE = 0, + CV_LINK_RUNS = 5, + CV_POLY_APPROX_DP = 0, + CV_CONTOURS_MATCH_I1 = 1, + CV_CONTOURS_MATCH_I2 = 2, + CV_CONTOURS_MATCH_I3 = 3, + CV_CLOCKWISE = 1, + CV_COUNTER_CLOCKWISE = 2, + CV_COMP_CORREL = 0, + CV_COMP_CHISQR = 1, + CV_COMP_INTERSECT = 2, + CV_COMP_BHATTACHARYYA = 3, + CV_COMP_HELLINGER = CV_COMP_BHATTACHARYYA, + CV_DIST_MASK_3 = 3, + CV_DIST_MASK_5 = 5, + CV_DIST_MASK_PRECISE = 0, + CV_DIST_LABEL_CCOMP = 0, + CV_DIST_LABEL_PIXEL = 1, + CV_DIST_USER = -1, + CV_DIST_L1 = 1, + CV_DIST_L2 = 2, + CV_DIST_C = 3, + CV_DIST_L12 = 4, + CV_DIST_FAIR = 5, + CV_DIST_WELSCH = 6, + CV_DIST_HUBER = 7, + CV_CANNY_L2_GRADIENT = (1 << 31), + CV_HOUGH_STANDARD = 0, + CV_HOUGH_PROBABILISTIC = 1, + CV_HOUGH_MULTI_SCALE = 2, + CV_HOUGH_GRADIENT = 3, + BORDER_REPLICATE = IPL_BORDER_REPLICATE, + BORDER_CONSTANT = IPL_BORDER_CONSTANT, + BORDER_REFLECT = IPL_BORDER_REFLECT, + BORDER_WRAP = IPL_BORDER_WRAP, + BORDER_REFLECT_101 = IPL_BORDER_REFLECT_101, + BORDER_REFLECT101 = BORDER_REFLECT_101, + BORDER_TRANSPARENT = IPL_BORDER_TRANSPARENT, + BORDER_DEFAULT = BORDER_REFLECT_101, + BORDER_ISOLATED = 16, + KERNEL_GENERAL = 0, + KERNEL_SYMMETRICAL = 1, + KERNEL_ASYMMETRICAL = 2, + KERNEL_SMOOTH = 4, + KERNEL_INTEGER = 8, + MORPH_ERODE = CV_MOP_ERODE, + MORPH_DILATE = CV_MOP_DILATE, + MORPH_OPEN = CV_MOP_OPEN, + MORPH_CLOSE = CV_MOP_CLOSE, + MORPH_GRADIENT = CV_MOP_GRADIENT, + MORPH_TOPHAT = CV_MOP_TOPHAT, + MORPH_BLACKHAT = CV_MOP_BLACKHAT, + MORPH_RECT = 0, + MORPH_CROSS = 1, + MORPH_ELLIPSE = 2, + GHT_POSITION = 0, + GHT_SCALE = 1, + GHT_ROTATION = 2, + INTER_NEAREST = CV_INTER_NN, + INTER_LINEAR = CV_INTER_LINEAR, + INTER_CUBIC = CV_INTER_CUBIC, + INTER_AREA = CV_INTER_AREA, + INTER_LANCZOS4 = CV_INTER_LANCZOS4, + INTER_MAX = 7, + WARP_INVERSE_MAP = CV_WARP_INVERSE_MAP, + INTER_BITS = 5, + INTER_BITS2 = INTER_BITS*2, + INTER_TAB_SIZE = (1<Finds edges in an image using the [Canny86] algorithm.

+ * + *

The function finds edges in the input image image and marks them + * in the output map edges using the Canny algorithm. The smallest + * value between threshold1 and threshold2 is used for + * edge linking. The largest value is used to find initial segments of strong + * edges. See http://en.wikipedia.org/wiki/Canny_edge_detector

+ * + * @param image single-channel 8-bit input image. + * @param edges output edge map; it has the same size and type as + * image. + * @param threshold1 first threshold for the hysteresis procedure. + * @param threshold2 second threshold for the hysteresis procedure. + * @param apertureSize aperture size for the "Sobel" operator. + * @param L2gradient a flag, indicating whether a more accurate L_2 + * norm =sqrt((dI/dx)^2 + (dI/dy)^2) should be used to calculate the + * image gradient magnitude (L2gradient=true), or whether the + * default L_1 norm =|dI/dx|+|dI/dy| is enough + * (L2gradient=false). + * + * @see org.opencv.imgproc.Imgproc.Canny + */ + public static void Canny(Mat image, Mat edges, double threshold1, double threshold2, int apertureSize, boolean L2gradient) + { + + Canny_0(image.nativeObj, edges.nativeObj, threshold1, threshold2, apertureSize, L2gradient); + + return; + } + +/** + *

Finds edges in an image using the [Canny86] algorithm.

+ * + *

The function finds edges in the input image image and marks them + * in the output map edges using the Canny algorithm. The smallest + * value between threshold1 and threshold2 is used for + * edge linking. The largest value is used to find initial segments of strong + * edges. See http://en.wikipedia.org/wiki/Canny_edge_detector

+ * + * @param image single-channel 8-bit input image. + * @param edges output edge map; it has the same size and type as + * image. + * @param threshold1 first threshold for the hysteresis procedure. + * @param threshold2 second threshold for the hysteresis procedure. + * + * @see org.opencv.imgproc.Imgproc.Canny + */ + public static void Canny(Mat image, Mat edges, double threshold1, double threshold2) + { + + Canny_1(image.nativeObj, edges.nativeObj, threshold1, threshold2); + + return; + } + + + // + // C++: void GaussianBlur(Mat src, Mat& dst, Size ksize, double sigmaX, double sigmaY = 0, int borderType = BORDER_DEFAULT) + // + +/** + *

Blurs an image using a Gaussian filter.

+ * + *

The function convolves the source image with the specified Gaussian kernel. + * In-place filtering is supported.

+ * + * @param src input image; the image can have any number of channels, which are + * processed independently, but the depth should be CV_8U, + * CV_16U, CV_16S, CV_32F or + * CV_64F. + * @param dst output image of the same size and type as src. + * @param ksize Gaussian kernel size. ksize.width and + * ksize.height can differ but they both must be positive and odd. + * Or, they can be zero's and then they are computed from sigma*. + * @param sigmaX Gaussian kernel standard deviation in X direction. + * @param sigmaY Gaussian kernel standard deviation in Y direction; if + * sigmaY is zero, it is set to be equal to sigmaX, if + * both sigmas are zeros, they are computed from ksize.width and + * ksize.height, respectively (see "getGaussianKernel" for + * details); to fully control the result regardless of possible future + * modifications of all this semantics, it is recommended to specify all of + * ksize, sigmaX, and sigmaY. + * @param borderType pixel extrapolation method (see "borderInterpolate" for + * details). + * + * @see org.opencv.imgproc.Imgproc.GaussianBlur + * @see org.opencv.imgproc.Imgproc#sepFilter2D + * @see org.opencv.imgproc.Imgproc#medianBlur + * @see org.opencv.imgproc.Imgproc#boxFilter + * @see org.opencv.imgproc.Imgproc#blur + * @see org.opencv.imgproc.Imgproc#filter2D + * @see org.opencv.imgproc.Imgproc#bilateralFilter + */ + public static void GaussianBlur(Mat src, Mat dst, Size ksize, double sigmaX, double sigmaY, int borderType) + { + + GaussianBlur_0(src.nativeObj, dst.nativeObj, ksize.width, ksize.height, sigmaX, sigmaY, borderType); + + return; + } + +/** + *

Blurs an image using a Gaussian filter.

+ * + *

The function convolves the source image with the specified Gaussian kernel. + * In-place filtering is supported.

+ * + * @param src input image; the image can have any number of channels, which are + * processed independently, but the depth should be CV_8U, + * CV_16U, CV_16S, CV_32F or + * CV_64F. + * @param dst output image of the same size and type as src. + * @param ksize Gaussian kernel size. ksize.width and + * ksize.height can differ but they both must be positive and odd. + * Or, they can be zero's and then they are computed from sigma*. + * @param sigmaX Gaussian kernel standard deviation in X direction. + * @param sigmaY Gaussian kernel standard deviation in Y direction; if + * sigmaY is zero, it is set to be equal to sigmaX, if + * both sigmas are zeros, they are computed from ksize.width and + * ksize.height, respectively (see "getGaussianKernel" for + * details); to fully control the result regardless of possible future + * modifications of all this semantics, it is recommended to specify all of + * ksize, sigmaX, and sigmaY. + * + * @see org.opencv.imgproc.Imgproc.GaussianBlur + * @see org.opencv.imgproc.Imgproc#sepFilter2D + * @see org.opencv.imgproc.Imgproc#medianBlur + * @see org.opencv.imgproc.Imgproc#boxFilter + * @see org.opencv.imgproc.Imgproc#blur + * @see org.opencv.imgproc.Imgproc#filter2D + * @see org.opencv.imgproc.Imgproc#bilateralFilter + */ + public static void GaussianBlur(Mat src, Mat dst, Size ksize, double sigmaX, double sigmaY) + { + + GaussianBlur_1(src.nativeObj, dst.nativeObj, ksize.width, ksize.height, sigmaX, sigmaY); + + return; + } + +/** + *

Blurs an image using a Gaussian filter.

+ * + *

The function convolves the source image with the specified Gaussian kernel. + * In-place filtering is supported.

+ * + * @param src input image; the image can have any number of channels, which are + * processed independently, but the depth should be CV_8U, + * CV_16U, CV_16S, CV_32F or + * CV_64F. + * @param dst output image of the same size and type as src. + * @param ksize Gaussian kernel size. ksize.width and + * ksize.height can differ but they both must be positive and odd. + * Or, they can be zero's and then they are computed from sigma*. + * @param sigmaX Gaussian kernel standard deviation in X direction. + * + * @see org.opencv.imgproc.Imgproc.GaussianBlur + * @see org.opencv.imgproc.Imgproc#sepFilter2D + * @see org.opencv.imgproc.Imgproc#medianBlur + * @see org.opencv.imgproc.Imgproc#boxFilter + * @see org.opencv.imgproc.Imgproc#blur + * @see org.opencv.imgproc.Imgproc#filter2D + * @see org.opencv.imgproc.Imgproc#bilateralFilter + */ + public static void GaussianBlur(Mat src, Mat dst, Size ksize, double sigmaX) + { + + GaussianBlur_2(src.nativeObj, dst.nativeObj, ksize.width, ksize.height, sigmaX); + + return; + } + + + // + // C++: void HoughCircles(Mat image, Mat& circles, int method, double dp, double minDist, double param1 = 100, double param2 = 100, int minRadius = 0, int maxRadius = 0) + // + +/** + *

Finds circles in a grayscale image using the Hough transform.

+ * + *

The function finds circles in a grayscale image using a modification of the + * Hough transform. + * Example:

+ * + *

// C++ code:

+ * + *

#include

+ * + *

#include

+ * + *

#include

+ * + *

using namespace cv;

+ * + *

int main(int argc, char argv)

+ * + * + *

Mat img, gray;

+ * + *

if(argc != 2 && !(img=imread(argv[1], 1)).data)

+ * + *

return -1;

+ * + *

cvtColor(img, gray, CV_BGR2GRAY);

+ * + *

// smooth it, otherwise a lot of false circles may be detected

+ * + *

GaussianBlur(gray, gray, Size(9, 9), 2, 2);

+ * + *

vector circles;

+ * + *

HoughCircles(gray, circles, CV_HOUGH_GRADIENT,

+ * + *

2, gray->rows/4, 200, 100);

+ * + *

for(size_t i = 0; i < circles.size(); i++)

+ * + * + *

Point center(cvRound(circles[i][0]), cvRound(circles[i][1]));

+ * + *

int radius = cvRound(circles[i][2]);

+ * + *

// draw the circle center

+ * + *

circle(img, center, 3, Scalar(0,255,0), -1, 8, 0);

+ * + *

// draw the circle outline

+ * + *

circle(img, center, radius, Scalar(0,0,255), 3, 8, 0);

+ * + * + *

namedWindow("circles", 1);

+ * + *

imshow("circles", img);

+ * + *

return 0;

+ * + * + *

Note: Usually the function detects the centers of circles well. However, it + * may fail to find correct radii. You can assist to the function by specifying + * the radius range (minRadius and maxRadius) if you + * know it. Or, you may ignore the returned radius, use only the center, and + * find the correct radius using an additional procedure. + *

+ * + * @param image 8-bit, single-channel, grayscale input image. + * @param circles Output vector of found circles. Each vector is encoded as a + * 3-element floating-point vector (x, y, radius). + * @param method Detection method to use. Currently, the only implemented method + * is CV_HOUGH_GRADIENT, which is basically *21HT*, described in + * [Yuen90]. + * @param dp Inverse ratio of the accumulator resolution to the image + * resolution. For example, if dp=1, the accumulator has the same + * resolution as the input image. If dp=2, the accumulator has half + * as big width and height. + * @param minDist Minimum distance between the centers of the detected circles. + * If the parameter is too small, multiple neighbor circles may be falsely + * detected in addition to a true one. If it is too large, some circles may be + * missed. + * @param param1 First method-specific parameter. In case of CV_HOUGH_GRADIENT, + * it is the higher threshold of the two passed to the "Canny" edge detector + * (the lower one is twice smaller). + * @param param2 Second method-specific parameter. In case of CV_HOUGH_GRADIENT, + * it is the accumulator threshold for the circle centers at the detection + * stage. The smaller it is, the more false circles may be detected. Circles, + * corresponding to the larger accumulator values, will be returned first. + * @param minRadius Minimum circle radius. + * @param maxRadius Maximum circle radius. + * + * @see org.opencv.imgproc.Imgproc.HoughCircles + * @see org.opencv.imgproc.Imgproc#minEnclosingCircle + * @see org.opencv.imgproc.Imgproc#fitEllipse + */ + public static void HoughCircles(Mat image, Mat circles, int method, double dp, double minDist, double param1, double param2, int minRadius, int maxRadius) + { + + HoughCircles_0(image.nativeObj, circles.nativeObj, method, dp, minDist, param1, param2, minRadius, maxRadius); + + return; + } + +/** + *

Finds circles in a grayscale image using the Hough transform.

+ * + *

The function finds circles in a grayscale image using a modification of the + * Hough transform. + * Example:

+ * + *

// C++ code:

+ * + *

#include

+ * + *

#include

+ * + *

#include

+ * + *

using namespace cv;

+ * + *

int main(int argc, char argv)

+ * + * + *

Mat img, gray;

+ * + *

if(argc != 2 && !(img=imread(argv[1], 1)).data)

+ * + *

return -1;

+ * + *

cvtColor(img, gray, CV_BGR2GRAY);

+ * + *

// smooth it, otherwise a lot of false circles may be detected

+ * + *

GaussianBlur(gray, gray, Size(9, 9), 2, 2);

+ * + *

vector circles;

+ * + *

HoughCircles(gray, circles, CV_HOUGH_GRADIENT,

+ * + *

2, gray->rows/4, 200, 100);

+ * + *

for(size_t i = 0; i < circles.size(); i++)

+ * + * + *

Point center(cvRound(circles[i][0]), cvRound(circles[i][1]));

+ * + *

int radius = cvRound(circles[i][2]);

+ * + *

// draw the circle center

+ * + *

circle(img, center, 3, Scalar(0,255,0), -1, 8, 0);

+ * + *

// draw the circle outline

+ * + *

circle(img, center, radius, Scalar(0,0,255), 3, 8, 0);

+ * + * + *

namedWindow("circles", 1);

+ * + *

imshow("circles", img);

+ * + *

return 0;

+ * + * + *

Note: Usually the function detects the centers of circles well. However, it + * may fail to find correct radii. You can assist to the function by specifying + * the radius range (minRadius and maxRadius) if you + * know it. Or, you may ignore the returned radius, use only the center, and + * find the correct radius using an additional procedure. + *

+ * + * @param image 8-bit, single-channel, grayscale input image. + * @param circles Output vector of found circles. Each vector is encoded as a + * 3-element floating-point vector (x, y, radius). + * @param method Detection method to use. Currently, the only implemented method + * is CV_HOUGH_GRADIENT, which is basically *21HT*, described in + * [Yuen90]. + * @param dp Inverse ratio of the accumulator resolution to the image + * resolution. For example, if dp=1, the accumulator has the same + * resolution as the input image. If dp=2, the accumulator has half + * as big width and height. + * @param minDist Minimum distance between the centers of the detected circles. + * If the parameter is too small, multiple neighbor circles may be falsely + * detected in addition to a true one. If it is too large, some circles may be + * missed. + * + * @see org.opencv.imgproc.Imgproc.HoughCircles + * @see org.opencv.imgproc.Imgproc#minEnclosingCircle + * @see org.opencv.imgproc.Imgproc#fitEllipse + */ + public static void HoughCircles(Mat image, Mat circles, int method, double dp, double minDist) + { + + HoughCircles_1(image.nativeObj, circles.nativeObj, method, dp, minDist); + + return; + } + + + // + // C++: void HoughLines(Mat image, Mat& lines, double rho, double theta, int threshold, double srn = 0, double stn = 0) + // + +/** + *

Finds lines in a binary image using the standard Hough transform.

+ * + *

The function implements the standard or standard multi-scale Hough transform + * algorithm for line detection. See http://homepages.inf.ed.ac.uk/rbf/HIPR2/hough.htm + * for a good explanation of Hough transform. + * See also the example in "HoughLinesP" description.

+ * + * @param image 8-bit, single-channel binary source image. The image may be + * modified by the function. + * @param lines Output vector of lines. Each line is represented by a + * two-element vector (rho, theta). rho is the distance from + * the coordinate origin (0,0) (top-left corner of the image). + * theta is the line rotation angle in radians (0 ~ vertical line, + * pi/2 ~ horizontal line). + * @param rho Distance resolution of the accumulator in pixels. + * @param theta Angle resolution of the accumulator in radians. + * @param threshold Accumulator threshold parameter. Only those lines are + * returned that get enough votes (>threshold). + * @param srn For the multi-scale Hough transform, it is a divisor for the + * distance resolution rho. The coarse accumulator distance + * resolution is rho and the accurate accumulator resolution is + * rho/srn. If both srn=0 and stn=0, the + * classical Hough transform is used. Otherwise, both these parameters should be + * positive. + * @param stn For the multi-scale Hough transform, it is a divisor for the + * distance resolution theta. + * + * @see org.opencv.imgproc.Imgproc.HoughLines + */ + public static void HoughLines(Mat image, Mat lines, double rho, double theta, int threshold, double srn, double stn) + { + + HoughLines_0(image.nativeObj, lines.nativeObj, rho, theta, threshold, srn, stn); + + return; + } + +/** + *

Finds lines in a binary image using the standard Hough transform.

+ * + *

The function implements the standard or standard multi-scale Hough transform + * algorithm for line detection. See http://homepages.inf.ed.ac.uk/rbf/HIPR2/hough.htm + * for a good explanation of Hough transform. + * See also the example in "HoughLinesP" description.

+ * + * @param image 8-bit, single-channel binary source image. The image may be + * modified by the function. + * @param lines Output vector of lines. Each line is represented by a + * two-element vector (rho, theta). rho is the distance from + * the coordinate origin (0,0) (top-left corner of the image). + * theta is the line rotation angle in radians (0 ~ vertical line, + * pi/2 ~ horizontal line). + * @param rho Distance resolution of the accumulator in pixels. + * @param theta Angle resolution of the accumulator in radians. + * @param threshold Accumulator threshold parameter. Only those lines are + * returned that get enough votes (>threshold). + * + * @see org.opencv.imgproc.Imgproc.HoughLines + */ + public static void HoughLines(Mat image, Mat lines, double rho, double theta, int threshold) + { + + HoughLines_1(image.nativeObj, lines.nativeObj, rho, theta, threshold); + + return; + } + + + // + // C++: void HoughLinesP(Mat image, Mat& lines, double rho, double theta, int threshold, double minLineLength = 0, double maxLineGap = 0) + // + +/** + *

Finds line segments in a binary image using the probabilistic Hough + * transform.

+ * + *

The function implements the probabilistic Hough transform algorithm for line + * detection, described in[Matas00]. See the line detection example below: + *

+ * + *

// C++ code:

+ * + *

/ * This is a standalone program. Pass an image name as the first parameter

+ * + *

of the program. Switch between standard and probabilistic Hough transform

+ * + *

by changing "#if 1" to "#if 0" and back * /

+ * + *

#include

+ * + *

#include

+ * + *

#include

+ * + *

using namespace cv;

+ * + *

int main(int argc, char argv)

+ * + * + *

Mat src, dst, color_dst;

+ * + *

if(argc != 2 || !(src=imread(argv[1], 0)).data)

+ * + *

return -1;

+ * + *

Canny(src, dst, 50, 200, 3);

+ * + *

cvtColor(dst, color_dst, CV_GRAY2BGR);

+ * + *

#if 0

+ * + *

vector lines;

+ * + *

HoughLines(dst, lines, 1, CV_PI/180, 100);

+ * + *

for(size_t i = 0; i < lines.size(); i++)

+ * + * + *

float rho = lines[i][0];

+ * + *

float theta = lines[i][1];

+ * + *

double a = cos(theta), b = sin(theta);

+ * + *

double x0 = a*rho, y0 = b*rho;

+ * + *

Point pt1(cvRound(x0 + 1000*(-b)),

+ * + *

cvRound(y0 + 1000*(a)));

+ * + *

Point pt2(cvRound(x0 - 1000*(-b)),

+ * + *

cvRound(y0 - 1000*(a)));

+ * + *

line(color_dst, pt1, pt2, Scalar(0,0,255), 3, 8);

+ * + * + *

#else

+ * + *

vector lines;

+ * + *

HoughLinesP(dst, lines, 1, CV_PI/180, 80, 30, 10);

+ * + *

for(size_t i = 0; i < lines.size(); i++)

+ * + * + *

line(color_dst, Point(lines[i][0], lines[i][1]),

+ * + *

Point(lines[i][2], lines[i][3]), Scalar(0,0,255), 3, 8);

+ * + * + *

#endif

+ * + *

namedWindow("Source", 1);

+ * + *

imshow("Source", src);

+ * + *

namedWindow("Detected Lines", 1);

+ * + *

imshow("Detected Lines", color_dst);

+ * + *

waitKey(0);

+ * + *

return 0;

+ * + * + *

This is a sample picture the function parameters have been tuned for:

+ * + *

And this is the output of the above program in case of the probabilistic + * Hough transform:

+ * + * @param image 8-bit, single-channel binary source image. The image may be + * modified by the function. + * @param lines Output vector of lines. Each line is represented by a 4-element + * vector (x_1, y_1, x_2, y_2), where (x_1,y_1) and (x_2, + * y_2) are the ending points of each detected line segment. + * @param rho Distance resolution of the accumulator in pixels. + * @param theta Angle resolution of the accumulator in radians. + * @param threshold Accumulator threshold parameter. Only those lines are + * returned that get enough votes (>threshold). + * @param minLineLength Minimum line length. Line segments shorter than that are + * rejected. + * @param maxLineGap Maximum allowed gap between points on the same line to link + * them. + * + * @see org.opencv.imgproc.Imgproc.HoughLinesP + */ + public static void HoughLinesP(Mat image, Mat lines, double rho, double theta, int threshold, double minLineLength, double maxLineGap) + { + + HoughLinesP_0(image.nativeObj, lines.nativeObj, rho, theta, threshold, minLineLength, maxLineGap); + + return; + } + +/** + *

Finds line segments in a binary image using the probabilistic Hough + * transform.

+ * + *

The function implements the probabilistic Hough transform algorithm for line + * detection, described in[Matas00]. See the line detection example below: + *

+ * + *

// C++ code:

+ * + *

/ * This is a standalone program. Pass an image name as the first parameter

+ * + *

of the program. Switch between standard and probabilistic Hough transform

+ * + *

by changing "#if 1" to "#if 0" and back * /

+ * + *

#include

+ * + *

#include

+ * + *

#include

+ * + *

using namespace cv;

+ * + *

int main(int argc, char argv)

+ * + * + *

Mat src, dst, color_dst;

+ * + *

if(argc != 2 || !(src=imread(argv[1], 0)).data)

+ * + *

return -1;

+ * + *

Canny(src, dst, 50, 200, 3);

+ * + *

cvtColor(dst, color_dst, CV_GRAY2BGR);

+ * + *

#if 0

+ * + *

vector lines;

+ * + *

HoughLines(dst, lines, 1, CV_PI/180, 100);

+ * + *

for(size_t i = 0; i < lines.size(); i++)

+ * + * + *

float rho = lines[i][0];

+ * + *

float theta = lines[i][1];

+ * + *

double a = cos(theta), b = sin(theta);

+ * + *

double x0 = a*rho, y0 = b*rho;

+ * + *

Point pt1(cvRound(x0 + 1000*(-b)),

+ * + *

cvRound(y0 + 1000*(a)));

+ * + *

Point pt2(cvRound(x0 - 1000*(-b)),

+ * + *

cvRound(y0 - 1000*(a)));

+ * + *

line(color_dst, pt1, pt2, Scalar(0,0,255), 3, 8);

+ * + * + *

#else

+ * + *

vector lines;

+ * + *

HoughLinesP(dst, lines, 1, CV_PI/180, 80, 30, 10);

+ * + *

for(size_t i = 0; i < lines.size(); i++)

+ * + * + *

line(color_dst, Point(lines[i][0], lines[i][1]),

+ * + *

Point(lines[i][2], lines[i][3]), Scalar(0,0,255), 3, 8);

+ * + * + *

#endif

+ * + *

namedWindow("Source", 1);

+ * + *

imshow("Source", src);

+ * + *

namedWindow("Detected Lines", 1);

+ * + *

imshow("Detected Lines", color_dst);

+ * + *

waitKey(0);

+ * + *

return 0;

+ * + * + *

This is a sample picture the function parameters have been tuned for:

+ * + *

And this is the output of the above program in case of the probabilistic + * Hough transform:

+ * + * @param image 8-bit, single-channel binary source image. The image may be + * modified by the function. + * @param lines Output vector of lines. Each line is represented by a 4-element + * vector (x_1, y_1, x_2, y_2), where (x_1,y_1) and (x_2, + * y_2) are the ending points of each detected line segment. + * @param rho Distance resolution of the accumulator in pixels. + * @param theta Angle resolution of the accumulator in radians. + * @param threshold Accumulator threshold parameter. Only those lines are + * returned that get enough votes (>threshold). + * + * @see org.opencv.imgproc.Imgproc.HoughLinesP + */ + public static void HoughLinesP(Mat image, Mat lines, double rho, double theta, int threshold) + { + + HoughLinesP_1(image.nativeObj, lines.nativeObj, rho, theta, threshold); + + return; + } + + + // + // C++: void HuMoments(Moments m, Mat& hu) + // + +/** + *

Calculates seven Hu invariants.

+ * + *

The function calculates seven Hu invariants (introduced in [Hu62]; see also + * http://en.wikipedia.org/wiki/Image_moment) defined as:

+ * + *

hu[0]= eta _20+ eta _02 + * hu[1]=(eta _20- eta _02)^2+4 eta _11^2 + * hu[2]=(eta _30-3 eta _12)^2+ (3 eta _21- eta _03)^2 + * hu[3]=(eta _30+ eta _12)^2+ (eta _21+ eta _03)^2 + * hu[4]=(eta _30-3 eta _12)(eta _30+ eta _12)[(eta _30+ eta _12)^2-3(eta _21+ + * eta _03)^2]+(3 eta _21- eta _03)(eta _21+ eta _03)[3(eta _30+ eta _12)^2-(eta + * _21+ eta _03)^2] + * hu[5]=(eta _20- eta _02)[(eta _30+ eta _12)^2- (eta _21+ eta _03)^2]+4 eta + * _11(eta _30+ eta _12)(eta _21+ eta _03) + * hu[6]=(3 eta _21- eta _03)(eta _21+ eta _03)[3(eta _30+ eta _12)^2-(eta _21+ + * eta _03)^2]-(eta _30-3 eta _12)(eta _21+ eta _03)[3(eta _30+ eta _12)^2-(eta + * _21+ eta _03)^2] + *

+ * + *

where eta_(ji) stands for Moments.nu_(ji).

+ * + *

These values are proved to be invariants to the image scale, rotation, and + * reflection except the seventh one, whose sign is changed by reflection. This + * invariance is proved with the assumption of infinite image resolution. In + * case of raster images, the computed Hu invariants for the original and + * transformed images are a bit different.

+ * + * @param m a m + * @param hu Output Hu invariants. + * + * @see org.opencv.imgproc.Imgproc.HuMoments + * @see org.opencv.imgproc.Imgproc#matchShapes + */ + public static void HuMoments(Moments m, Mat hu) + { + + HuMoments_0(m.nativeObj, hu.nativeObj); + + return; + } + + + // + // C++: void Laplacian(Mat src, Mat& dst, int ddepth, int ksize = 1, double scale = 1, double delta = 0, int borderType = BORDER_DEFAULT) + // + +/** + *

Calculates the Laplacian of an image.

+ * + *

The function calculates the Laplacian of the source image by adding up the + * second x and y derivatives calculated using the Sobel operator:

+ * + *

dst = Delta src = (d^2 src)/(dx^2) + (d^2 src)/(dy^2)

+ * + *

This is done when ksize > 1. When ksize == 1, the + * Laplacian is computed by filtering the image with the following 3 x + * 3 aperture:

+ * + *

vecthreethree 0101(-4)1010

+ * + * @param src Source image. + * @param dst Destination image of the same size and the same number of channels + * as src. + * @param ddepth Desired depth of the destination image. + * @param ksize Aperture size used to compute the second-derivative filters. See + * "getDerivKernels" for details. The size must be positive and odd. + * @param scale Optional scale factor for the computed Laplacian values. By + * default, no scaling is applied. See "getDerivKernels" for details. + * @param delta Optional delta value that is added to the results prior to + * storing them in dst. + * @param borderType Pixel extrapolation method. See "borderInterpolate" for + * details. + * + * @see org.opencv.imgproc.Imgproc.Laplacian + * @see org.opencv.imgproc.Imgproc#Scharr + * @see org.opencv.imgproc.Imgproc#Sobel + */ + public static void Laplacian(Mat src, Mat dst, int ddepth, int ksize, double scale, double delta, int borderType) + { + + Laplacian_0(src.nativeObj, dst.nativeObj, ddepth, ksize, scale, delta, borderType); + + return; + } + +/** + *

Calculates the Laplacian of an image.

+ * + *

The function calculates the Laplacian of the source image by adding up the + * second x and y derivatives calculated using the Sobel operator:

+ * + *

dst = Delta src = (d^2 src)/(dx^2) + (d^2 src)/(dy^2)

+ * + *

This is done when ksize > 1. When ksize == 1, the + * Laplacian is computed by filtering the image with the following 3 x + * 3 aperture:

+ * + *

vecthreethree 0101(-4)1010

+ * + * @param src Source image. + * @param dst Destination image of the same size and the same number of channels + * as src. + * @param ddepth Desired depth of the destination image. + * @param ksize Aperture size used to compute the second-derivative filters. See + * "getDerivKernels" for details. The size must be positive and odd. + * @param scale Optional scale factor for the computed Laplacian values. By + * default, no scaling is applied. See "getDerivKernels" for details. + * @param delta Optional delta value that is added to the results prior to + * storing them in dst. + * + * @see org.opencv.imgproc.Imgproc.Laplacian + * @see org.opencv.imgproc.Imgproc#Scharr + * @see org.opencv.imgproc.Imgproc#Sobel + */ + public static void Laplacian(Mat src, Mat dst, int ddepth, int ksize, double scale, double delta) + { + + Laplacian_1(src.nativeObj, dst.nativeObj, ddepth, ksize, scale, delta); + + return; + } + +/** + *

Calculates the Laplacian of an image.

+ * + *

The function calculates the Laplacian of the source image by adding up the + * second x and y derivatives calculated using the Sobel operator:

+ * + *

dst = Delta src = (d^2 src)/(dx^2) + (d^2 src)/(dy^2)

+ * + *

This is done when ksize > 1. When ksize == 1, the + * Laplacian is computed by filtering the image with the following 3 x + * 3 aperture:

+ * + *

vecthreethree 0101(-4)1010

+ * + * @param src Source image. + * @param dst Destination image of the same size and the same number of channels + * as src. + * @param ddepth Desired depth of the destination image. + * + * @see org.opencv.imgproc.Imgproc.Laplacian + * @see org.opencv.imgproc.Imgproc#Scharr + * @see org.opencv.imgproc.Imgproc#Sobel + */ + public static void Laplacian(Mat src, Mat dst, int ddepth) + { + + Laplacian_2(src.nativeObj, dst.nativeObj, ddepth); + + return; + } + + + // + // C++: double PSNR(Mat src1, Mat src2) + // + + public static double PSNR(Mat src1, Mat src2) + { + + double retVal = PSNR_0(src1.nativeObj, src2.nativeObj); + + return retVal; + } + + + // + // C++: void Scharr(Mat src, Mat& dst, int ddepth, int dx, int dy, double scale = 1, double delta = 0, int borderType = BORDER_DEFAULT) + // + +/** + *

Calculates the first x- or y- image derivative using Scharr operator.

+ * + *

The function computes the first x- or y- spatial image derivative using the + * Scharr operator. The call

+ * + *

Scharr(src, dst, ddepth, dx, dy, scale, delta, borderType)

+ * + *

is equivalent to

+ * + *

Sobel(src, dst, ddepth, dx, dy, CV_SCHARR, scale, delta, + * borderType).

+ * + * @param src input image. + * @param dst output image of the same size and the same number of channels as + * src. + * @param ddepth output image depth (see "Sobel" for the list of supported + * combination of src.depth() and ddepth). + * @param dx order of the derivative x. + * @param dy order of the derivative y. + * @param scale optional scale factor for the computed derivative values; by + * default, no scaling is applied (see "getDerivKernels" for details). + * @param delta optional delta value that is added to the results prior to + * storing them in dst. + * @param borderType pixel extrapolation method (see "borderInterpolate" for + * details). + * + * @see org.opencv.imgproc.Imgproc.Scharr + * @see org.opencv.core.Core#cartToPolar + */ + public static void Scharr(Mat src, Mat dst, int ddepth, int dx, int dy, double scale, double delta, int borderType) + { + + Scharr_0(src.nativeObj, dst.nativeObj, ddepth, dx, dy, scale, delta, borderType); + + return; + } + +/** + *

Calculates the first x- or y- image derivative using Scharr operator.

+ * + *

The function computes the first x- or y- spatial image derivative using the + * Scharr operator. The call

+ * + *

Scharr(src, dst, ddepth, dx, dy, scale, delta, borderType)

+ * + *

is equivalent to

+ * + *

Sobel(src, dst, ddepth, dx, dy, CV_SCHARR, scale, delta, + * borderType).

+ * + * @param src input image. + * @param dst output image of the same size and the same number of channels as + * src. + * @param ddepth output image depth (see "Sobel" for the list of supported + * combination of src.depth() and ddepth). + * @param dx order of the derivative x. + * @param dy order of the derivative y. + * @param scale optional scale factor for the computed derivative values; by + * default, no scaling is applied (see "getDerivKernels" for details). + * @param delta optional delta value that is added to the results prior to + * storing them in dst. + * + * @see org.opencv.imgproc.Imgproc.Scharr + * @see org.opencv.core.Core#cartToPolar + */ + public static void Scharr(Mat src, Mat dst, int ddepth, int dx, int dy, double scale, double delta) + { + + Scharr_1(src.nativeObj, dst.nativeObj, ddepth, dx, dy, scale, delta); + + return; + } + +/** + *

Calculates the first x- or y- image derivative using Scharr operator.

+ * + *

The function computes the first x- or y- spatial image derivative using the + * Scharr operator. The call

+ * + *

Scharr(src, dst, ddepth, dx, dy, scale, delta, borderType)

+ * + *

is equivalent to

+ * + *

Sobel(src, dst, ddepth, dx, dy, CV_SCHARR, scale, delta, + * borderType).

+ * + * @param src input image. + * @param dst output image of the same size and the same number of channels as + * src. + * @param ddepth output image depth (see "Sobel" for the list of supported + * combination of src.depth() and ddepth). + * @param dx order of the derivative x. + * @param dy order of the derivative y. + * + * @see org.opencv.imgproc.Imgproc.Scharr + * @see org.opencv.core.Core#cartToPolar + */ + public static void Scharr(Mat src, Mat dst, int ddepth, int dx, int dy) + { + + Scharr_2(src.nativeObj, dst.nativeObj, ddepth, dx, dy); + + return; + } + + + // + // C++: void Sobel(Mat src, Mat& dst, int ddepth, int dx, int dy, int ksize = 3, double scale = 1, double delta = 0, int borderType = BORDER_DEFAULT) + // + +/** + *

Calculates the first, second, third, or mixed image derivatives using an + * extended Sobel operator.

+ * + *

In all cases except one, the ksize x<BR>ksize separable kernel + * is used to calculate the derivative. When ksize = 1, the 3 x + * 1 or 1 x 3 kernel is used (that is, no Gaussian smoothing is + * done). ksize = 1 can only be used for the first or the second x- + * or y- derivatives.

+ * + *

There is also the special value ksize = CV_SCHARR (-1) that + * corresponds to the 3x3 Scharr filter that may give more accurate + * results than the 3x3 Sobel. The Scharr aperture is

+ * + *

+ * |-3 0 3| + * |-10 0 10| + * |-3 0 3| + *

+ * + *

for the x-derivative, or transposed for the y-derivative.

+ * + *

The function calculates an image derivative by convolving the image with the + * appropriate kernel:

+ * + *

dst = (d^(xorder+yorder) src)/(dx^(xorder) dy^(yorder))

+ * + *

The Sobel operators combine Gaussian smoothing and differentiation, so the + * result is more or less resistant to the noise. Most often, the function is + * called with (xorder = 1, yorder = 0, + * ksize = 3) or (xorder = 0, yorder = 1, + * ksize = 3) to calculate the first x- or y- image derivative. The + * first case corresponds to a kernel of:

+ * + *

+ * |-1 0 1| + * |-2 0 2| + * |-1 0 1| + *

+ * + *

The second case corresponds to a kernel of:

+ * + *

+ * |-1 -2 -1| + * |0 0 0| + * |1 2 1| + *

+ * + * @param src input image. + * @param dst output image of the same size and the same number of channels as + * src. + * @param ddepth output image depth; the following combinations of + * src.depth() and ddepth are supported: + *
    + *
  • src.depth() = CV_8U, ddepth = + * -1/CV_16S/CV_32F/CV_64F + *
  • src.depth() = CV_16U/CV_16S, + * ddepth = -1/CV_32F/CV_64F + *
  • src.depth() = CV_32F, ddepth = + * -1/CV_32F/CV_64F + *
  • src.depth() = CV_64F, ddepth = + * -1/CV_64F + *
+ * + *

when ddepth=-1, the destination image will have the same depth + * as the source; in the case of 8-bit input images it will result in truncated + * derivatives.

+ * @param dx a dx + * @param dy a dy + * @param ksize size of the extended Sobel kernel; it must be 1, 3, 5, or 7. + * @param scale optional scale factor for the computed derivative values; by + * default, no scaling is applied (see "getDerivKernels" for details). + * @param delta optional delta value that is added to the results prior to + * storing them in dst. + * @param borderType pixel extrapolation method (see "borderInterpolate" for + * details). + * + * @see org.opencv.imgproc.Imgproc.Sobel + * @see org.opencv.imgproc.Imgproc#GaussianBlur + * @see org.opencv.core.Core#cartToPolar + * @see org.opencv.imgproc.Imgproc#sepFilter2D + * @see org.opencv.imgproc.Imgproc#Laplacian + * @see org.opencv.imgproc.Imgproc#Scharr + * @see org.opencv.imgproc.Imgproc#filter2D + */ + public static void Sobel(Mat src, Mat dst, int ddepth, int dx, int dy, int ksize, double scale, double delta, int borderType) + { + + Sobel_0(src.nativeObj, dst.nativeObj, ddepth, dx, dy, ksize, scale, delta, borderType); + + return; + } + +/** + *

Calculates the first, second, third, or mixed image derivatives using an + * extended Sobel operator.

+ * + *

In all cases except one, the ksize x<BR>ksize separable kernel + * is used to calculate the derivative. When ksize = 1, the 3 x + * 1 or 1 x 3 kernel is used (that is, no Gaussian smoothing is + * done). ksize = 1 can only be used for the first or the second x- + * or y- derivatives.

+ * + *

There is also the special value ksize = CV_SCHARR (-1) that + * corresponds to the 3x3 Scharr filter that may give more accurate + * results than the 3x3 Sobel. The Scharr aperture is

+ * + *

+ * |-3 0 3| + * |-10 0 10| + * |-3 0 3| + *

+ * + *

for the x-derivative, or transposed for the y-derivative.

+ * + *

The function calculates an image derivative by convolving the image with the + * appropriate kernel:

+ * + *

dst = (d^(xorder+yorder) src)/(dx^(xorder) dy^(yorder))

+ * + *

The Sobel operators combine Gaussian smoothing and differentiation, so the + * result is more or less resistant to the noise. Most often, the function is + * called with (xorder = 1, yorder = 0, + * ksize = 3) or (xorder = 0, yorder = 1, + * ksize = 3) to calculate the first x- or y- image derivative. The + * first case corresponds to a kernel of:

+ * + *

+ * |-1 0 1| + * |-2 0 2| + * |-1 0 1| + *

+ * + *

The second case corresponds to a kernel of:

+ * + *

+ * |-1 -2 -1| + * |0 0 0| + * |1 2 1| + *

+ * + * @param src input image. + * @param dst output image of the same size and the same number of channels as + * src. + * @param ddepth output image depth; the following combinations of + * src.depth() and ddepth are supported: + *
    + *
  • src.depth() = CV_8U, ddepth = + * -1/CV_16S/CV_32F/CV_64F + *
  • src.depth() = CV_16U/CV_16S, + * ddepth = -1/CV_32F/CV_64F + *
  • src.depth() = CV_32F, ddepth = + * -1/CV_32F/CV_64F + *
  • src.depth() = CV_64F, ddepth = + * -1/CV_64F + *
+ * + *

when ddepth=-1, the destination image will have the same depth + * as the source; in the case of 8-bit input images it will result in truncated + * derivatives.

+ * @param dx a dx + * @param dy a dy + * @param ksize size of the extended Sobel kernel; it must be 1, 3, 5, or 7. + * @param scale optional scale factor for the computed derivative values; by + * default, no scaling is applied (see "getDerivKernels" for details). + * @param delta optional delta value that is added to the results prior to + * storing them in dst. + * + * @see org.opencv.imgproc.Imgproc.Sobel + * @see org.opencv.imgproc.Imgproc#GaussianBlur + * @see org.opencv.core.Core#cartToPolar + * @see org.opencv.imgproc.Imgproc#sepFilter2D + * @see org.opencv.imgproc.Imgproc#Laplacian + * @see org.opencv.imgproc.Imgproc#Scharr + * @see org.opencv.imgproc.Imgproc#filter2D + */ + public static void Sobel(Mat src, Mat dst, int ddepth, int dx, int dy, int ksize, double scale, double delta) + { + + Sobel_1(src.nativeObj, dst.nativeObj, ddepth, dx, dy, ksize, scale, delta); + + return; + } + +/** + *

Calculates the first, second, third, or mixed image derivatives using an + * extended Sobel operator.

+ * + *

In all cases except one, the ksize x<BR>ksize separable kernel + * is used to calculate the derivative. When ksize = 1, the 3 x + * 1 or 1 x 3 kernel is used (that is, no Gaussian smoothing is + * done). ksize = 1 can only be used for the first or the second x- + * or y- derivatives.

+ * + *

There is also the special value ksize = CV_SCHARR (-1) that + * corresponds to the 3x3 Scharr filter that may give more accurate + * results than the 3x3 Sobel. The Scharr aperture is

+ * + *

+ * |-3 0 3| + * |-10 0 10| + * |-3 0 3| + *

+ * + *

for the x-derivative, or transposed for the y-derivative.

+ * + *

The function calculates an image derivative by convolving the image with the + * appropriate kernel:

+ * + *

dst = (d^(xorder+yorder) src)/(dx^(xorder) dy^(yorder))

+ * + *

The Sobel operators combine Gaussian smoothing and differentiation, so the + * result is more or less resistant to the noise. Most often, the function is + * called with (xorder = 1, yorder = 0, + * ksize = 3) or (xorder = 0, yorder = 1, + * ksize = 3) to calculate the first x- or y- image derivative. The + * first case corresponds to a kernel of:

+ * + *

+ * |-1 0 1| + * |-2 0 2| + * |-1 0 1| + *

+ * + *

The second case corresponds to a kernel of:

+ * + *

+ * |-1 -2 -1| + * |0 0 0| + * |1 2 1| + *

+ * + * @param src input image. + * @param dst output image of the same size and the same number of channels as + * src. + * @param ddepth output image depth; the following combinations of + * src.depth() and ddepth are supported: + *
    + *
  • src.depth() = CV_8U, ddepth = + * -1/CV_16S/CV_32F/CV_64F + *
  • src.depth() = CV_16U/CV_16S, + * ddepth = -1/CV_32F/CV_64F + *
  • src.depth() = CV_32F, ddepth = + * -1/CV_32F/CV_64F + *
  • src.depth() = CV_64F, ddepth = + * -1/CV_64F + *
+ * + *

when ddepth=-1, the destination image will have the same depth + * as the source; in the case of 8-bit input images it will result in truncated + * derivatives.

+ * @param dx a dx + * @param dy a dy + * + * @see org.opencv.imgproc.Imgproc.Sobel + * @see org.opencv.imgproc.Imgproc#GaussianBlur + * @see org.opencv.core.Core#cartToPolar + * @see org.opencv.imgproc.Imgproc#sepFilter2D + * @see org.opencv.imgproc.Imgproc#Laplacian + * @see org.opencv.imgproc.Imgproc#Scharr + * @see org.opencv.imgproc.Imgproc#filter2D + */ + public static void Sobel(Mat src, Mat dst, int ddepth, int dx, int dy) + { + + Sobel_2(src.nativeObj, dst.nativeObj, ddepth, dx, dy); + + return; + } + + + // + // C++: void accumulate(Mat src, Mat& dst, Mat mask = Mat()) + // + +/** + *

Adds an image to the accumulator.

+ * + *

The function adds src or some of its elements to + * dst :

+ * + *

dst(x,y) <- dst(x,y) + src(x,y) if mask(x,y) != 0

+ * + *

The function supports multi-channel images. Each channel is processed + * independently.

+ * + *

The functions accumulate* can be used, for example, to collect + * statistics of a scene background viewed by a still camera and for the further + * foreground-background segmentation.

+ * + * @param src Input image as 1- or 3-channel, 8-bit or 32-bit floating point. + * @param dst Accumulator image with the same number of channels as input image, + * 32-bit or 64-bit floating-point. + * @param mask Optional operation mask. + * + * @see org.opencv.imgproc.Imgproc.accumulate + * @see org.opencv.imgproc.Imgproc#accumulateWeighted + * @see org.opencv.imgproc.Imgproc#accumulateProduct + * @see org.opencv.imgproc.Imgproc#accumulateSquare + */ + public static void accumulate(Mat src, Mat dst, Mat mask) + { + + accumulate_0(src.nativeObj, dst.nativeObj, mask.nativeObj); + + return; + } + +/** + *

Adds an image to the accumulator.

+ * + *

The function adds src or some of its elements to + * dst :

+ * + *

dst(x,y) <- dst(x,y) + src(x,y) if mask(x,y) != 0

+ * + *

The function supports multi-channel images. Each channel is processed + * independently.

+ * + *

The functions accumulate* can be used, for example, to collect + * statistics of a scene background viewed by a still camera and for the further + * foreground-background segmentation.

+ * + * @param src Input image as 1- or 3-channel, 8-bit or 32-bit floating point. + * @param dst Accumulator image with the same number of channels as input image, + * 32-bit or 64-bit floating-point. + * + * @see org.opencv.imgproc.Imgproc.accumulate + * @see org.opencv.imgproc.Imgproc#accumulateWeighted + * @see org.opencv.imgproc.Imgproc#accumulateProduct + * @see org.opencv.imgproc.Imgproc#accumulateSquare + */ + public static void accumulate(Mat src, Mat dst) + { + + accumulate_1(src.nativeObj, dst.nativeObj); + + return; + } + + + // + // C++: void accumulateProduct(Mat src1, Mat src2, Mat& dst, Mat mask = Mat()) + // + +/** + *

Adds the per-element product of two input images to the accumulator.

+ * + *

The function adds the product of two images or their selected regions to the + * accumulator dst :

+ * + *

dst(x,y) <- dst(x,y) + src1(x,y) * src2(x,y) if mask(x,y) != 0

+ * + *

The function supports multi-channel images. Each channel is processed + * independently.

+ * + * @param src1 First input image, 1- or 3-channel, 8-bit or 32-bit floating + * point. + * @param src2 Second input image of the same type and the same size as + * src1. + * @param dst Accumulator with the same number of channels as input images, + * 32-bit or 64-bit floating-point. + * @param mask Optional operation mask. + * + * @see org.opencv.imgproc.Imgproc.accumulateProduct + * @see org.opencv.imgproc.Imgproc#accumulate + * @see org.opencv.imgproc.Imgproc#accumulateWeighted + * @see org.opencv.imgproc.Imgproc#accumulateSquare + */ + public static void accumulateProduct(Mat src1, Mat src2, Mat dst, Mat mask) + { + + accumulateProduct_0(src1.nativeObj, src2.nativeObj, dst.nativeObj, mask.nativeObj); + + return; + } + +/** + *

Adds the per-element product of two input images to the accumulator.

+ * + *

The function adds the product of two images or their selected regions to the + * accumulator dst :

+ * + *

dst(x,y) <- dst(x,y) + src1(x,y) * src2(x,y) if mask(x,y) != 0

+ * + *

The function supports multi-channel images. Each channel is processed + * independently.

+ * + * @param src1 First input image, 1- or 3-channel, 8-bit or 32-bit floating + * point. + * @param src2 Second input image of the same type and the same size as + * src1. + * @param dst Accumulator with the same number of channels as input images, + * 32-bit or 64-bit floating-point. + * + * @see org.opencv.imgproc.Imgproc.accumulateProduct + * @see org.opencv.imgproc.Imgproc#accumulate + * @see org.opencv.imgproc.Imgproc#accumulateWeighted + * @see org.opencv.imgproc.Imgproc#accumulateSquare + */ + public static void accumulateProduct(Mat src1, Mat src2, Mat dst) + { + + accumulateProduct_1(src1.nativeObj, src2.nativeObj, dst.nativeObj); + + return; + } + + + // + // C++: void accumulateSquare(Mat src, Mat& dst, Mat mask = Mat()) + // + +/** + *

Adds the square of a source image to the accumulator.

+ * + *

The function adds the input image src or its selected region, + * raised to a power of 2, to the accumulator dst :

+ * + *

dst(x,y) <- dst(x,y) + src(x,y)^2 if mask(x,y) != 0

+ * + *

The function supports multi-channel images. Each channel is processed + * independently.

+ * + * @param src Input image as 1- or 3-channel, 8-bit or 32-bit floating point. + * @param dst Accumulator image with the same number of channels as input image, + * 32-bit or 64-bit floating-point. + * @param mask Optional operation mask. + * + * @see org.opencv.imgproc.Imgproc.accumulateSquare + * @see org.opencv.imgproc.Imgproc#accumulateWeighted + * @see org.opencv.imgproc.Imgproc#accumulateProduct + * @see org.opencv.imgproc.Imgproc#accumulateSquare + */ + public static void accumulateSquare(Mat src, Mat dst, Mat mask) + { + + accumulateSquare_0(src.nativeObj, dst.nativeObj, mask.nativeObj); + + return; + } + +/** + *

Adds the square of a source image to the accumulator.

+ * + *

The function adds the input image src or its selected region, + * raised to a power of 2, to the accumulator dst :

+ * + *

dst(x,y) <- dst(x,y) + src(x,y)^2 if mask(x,y) != 0

+ * + *

The function supports multi-channel images. Each channel is processed + * independently.

+ * + * @param src Input image as 1- or 3-channel, 8-bit or 32-bit floating point. + * @param dst Accumulator image with the same number of channels as input image, + * 32-bit or 64-bit floating-point. + * + * @see org.opencv.imgproc.Imgproc.accumulateSquare + * @see org.opencv.imgproc.Imgproc#accumulateWeighted + * @see org.opencv.imgproc.Imgproc#accumulateProduct + * @see org.opencv.imgproc.Imgproc#accumulateSquare + */ + public static void accumulateSquare(Mat src, Mat dst) + { + + accumulateSquare_1(src.nativeObj, dst.nativeObj); + + return; + } + + + // + // C++: void accumulateWeighted(Mat src, Mat& dst, double alpha, Mat mask = Mat()) + // + +/** + *

Updates a running average.

+ * + *

The function calculates the weighted sum of the input image src + * and the accumulator dst so that dst becomes a + * running average of a frame sequence:

+ * + *

dst(x,y) <- (1- alpha) * dst(x,y) + alpha * src(x,y) if mask(x,y) != + * 0

+ * + *

That is, alpha regulates the update speed (how fast the + * accumulator "forgets" about earlier images). + * The function supports multi-channel images. Each channel is processed + * independently.

+ * + * @param src Input image as 1- or 3-channel, 8-bit or 32-bit floating point. + * @param dst Accumulator image with the same number of channels as input image, + * 32-bit or 64-bit floating-point. + * @param alpha Weight of the input image. + * @param mask Optional operation mask. + * + * @see org.opencv.imgproc.Imgproc.accumulateWeighted + * @see org.opencv.imgproc.Imgproc#accumulate + * @see org.opencv.imgproc.Imgproc#accumulateProduct + * @see org.opencv.imgproc.Imgproc#accumulateSquare + */ + public static void accumulateWeighted(Mat src, Mat dst, double alpha, Mat mask) + { + + accumulateWeighted_0(src.nativeObj, dst.nativeObj, alpha, mask.nativeObj); + + return; + } + +/** + *

Updates a running average.

+ * + *

The function calculates the weighted sum of the input image src + * and the accumulator dst so that dst becomes a + * running average of a frame sequence:

+ * + *

dst(x,y) <- (1- alpha) * dst(x,y) + alpha * src(x,y) if mask(x,y) != + * 0

+ * + *

That is, alpha regulates the update speed (how fast the + * accumulator "forgets" about earlier images). + * The function supports multi-channel images. Each channel is processed + * independently.

+ * + * @param src Input image as 1- or 3-channel, 8-bit or 32-bit floating point. + * @param dst Accumulator image with the same number of channels as input image, + * 32-bit or 64-bit floating-point. + * @param alpha Weight of the input image. + * + * @see org.opencv.imgproc.Imgproc.accumulateWeighted + * @see org.opencv.imgproc.Imgproc#accumulate + * @see org.opencv.imgproc.Imgproc#accumulateProduct + * @see org.opencv.imgproc.Imgproc#accumulateSquare + */ + public static void accumulateWeighted(Mat src, Mat dst, double alpha) + { + + accumulateWeighted_1(src.nativeObj, dst.nativeObj, alpha); + + return; + } + + + // + // C++: void adaptiveThreshold(Mat src, Mat& dst, double maxValue, int adaptiveMethod, int thresholdType, int blockSize, double C) + // + +/** + *

Applies an adaptive threshold to an array.

+ * + *

The function transforms a grayscale image to a binary image according to the + * formulae:

+ *
    + *
  • THRESH_BINARY + *
+ * + *

dst(x,y) = maxValue if src(x,y) > T(x,y); 0 otherwise

+ * + *
    + *
  • THRESH_BINARY_INV + *
+ * + *

dst(x,y) = 0 if src(x,y) > T(x,y); maxValue otherwise

+ * + *

where T(x,y) is a threshold calculated individually for each pixel.

+ *
    + *
  • For the method ADAPTIVE_THRESH_MEAN_C, the threshold + * value T(x,y) is a mean of the blockSize x blockSize + * neighborhood of (x, y) minus C. + *
  • For the method ADAPTIVE_THRESH_GAUSSIAN_C, the threshold + * value T(x, y) is a weighted sum (cross-correlation with a Gaussian + * window) of the blockSize x blockSize neighborhood of (x, y) + * minus C. The default sigma (standard deviation) is used for the + * specified blockSize. See "getGaussianKernel". + *
+ * + *

The function can process the image in-place.

+ * + * @param src Source 8-bit single-channel image. + * @param dst Destination image of the same size and the same type as + * src. + * @param maxValue Non-zero value assigned to the pixels for which the condition + * is satisfied. See the details below. + * @param adaptiveMethod Adaptive thresholding algorithm to use, + * ADAPTIVE_THRESH_MEAN_C or ADAPTIVE_THRESH_GAUSSIAN_C. + * See the details below. + * @param thresholdType Thresholding type that must be either THRESH_BINARY + * or THRESH_BINARY_INV. + * @param blockSize Size of a pixel neighborhood that is used to calculate a + * threshold value for the pixel: 3, 5, 7, and so on. + * @param C Constant subtracted from the mean or weighted mean (see the details + * below). Normally, it is positive but may be zero or negative as well. + * + * @see org.opencv.imgproc.Imgproc.adaptiveThreshold + * @see org.opencv.imgproc.Imgproc#threshold + * @see org.opencv.imgproc.Imgproc#GaussianBlur + * @see org.opencv.imgproc.Imgproc#blur + */ + public static void adaptiveThreshold(Mat src, Mat dst, double maxValue, int adaptiveMethod, int thresholdType, int blockSize, double C) + { + + adaptiveThreshold_0(src.nativeObj, dst.nativeObj, maxValue, adaptiveMethod, thresholdType, blockSize, C); + + return; + } + + + // + // C++: void approxPolyDP(vector_Point2f curve, vector_Point2f& approxCurve, double epsilon, bool closed) + // + +/** + *

Approximates a polygonal curve(s) with the specified precision.

+ * + *

The functions approxPolyDP approximate a curve or a polygon with + * another curve/polygon with less vertices so that the distance between them is + * less or equal to the specified precision. It uses the Douglas-Peucker + * algorithm http://en.wikipedia.org/wiki/Ramer-Douglas-Peucker_algorithm

+ * + *

See http://code.opencv.org/projects/opencv/repository/revisions/master/entry/samples/cpp/contours.cpp + * for the function usage model.

+ * + * @param curve Input vector of a 2D point stored in: + *
    + *
  • std.vector or Mat (C++ interface) + *
  • Nx2 numpy array (Python interface) + *
  • CvSeq or CvMat" (C interface) + *
+ * @param approxCurve Result of the approximation. The type should match the + * type of the input curve. In case of C interface the approximated curve is + * stored in the memory storage and pointer to it is returned. + * @param epsilon Parameter specifying the approximation accuracy. This is the + * maximum distance between the original curve and its approximation. + * @param closed If true, the approximated curve is closed (its first and last + * vertices are connected). Otherwise, it is not closed. + * + * @see org.opencv.imgproc.Imgproc.approxPolyDP + */ + public static void approxPolyDP(MatOfPoint2f curve, MatOfPoint2f approxCurve, double epsilon, boolean closed) + { + Mat curve_mat = curve; + Mat approxCurve_mat = approxCurve; + approxPolyDP_0(curve_mat.nativeObj, approxCurve_mat.nativeObj, epsilon, closed); + + return; + } + + + // + // C++: double arcLength(vector_Point2f curve, bool closed) + // + +/** + *

Calculates a contour perimeter or a curve length.

+ * + *

The function computes a curve length or a closed contour perimeter.

+ * + * @param curve Input vector of 2D points, stored in std.vector or + * Mat. + * @param closed Flag indicating whether the curve is closed or not. + * + * @see org.opencv.imgproc.Imgproc.arcLength + */ + public static double arcLength(MatOfPoint2f curve, boolean closed) + { + Mat curve_mat = curve; + double retVal = arcLength_0(curve_mat.nativeObj, closed); + + return retVal; + } + + + // + // C++: void bilateralFilter(Mat src, Mat& dst, int d, double sigmaColor, double sigmaSpace, int borderType = BORDER_DEFAULT) + // + +/** + *

Applies the bilateral filter to an image.

+ * + *

The function applies bilateral filtering to the input image, as described in + * http://www.dai.ed.ac.uk/CVonline/LOCAL_COPIES/MANDUCHI1/Bilateral_Filtering.html + * bilateralFilter can reduce unwanted noise very well while + * keeping edges fairly sharp. However, it is very slow compared to most + * filters.

+ *
    + *
  • Sigma values*: For simplicity, you can set the 2 sigma values to be the + * same. If they are small (< 10), the filter will not have much effect, whereas + * if they are large (> 150), they will have a very strong effect, making the + * image look "cartoonish". + *
  • Filter size*: Large filters (d > 5) are very slow, so it is recommended + * to use d=5 for real-time applications, and perhaps d=9 for offline + * applications that need heavy noise filtering. + *
+ * + *

This filter does not work inplace.

+ * + * @param src Source 8-bit or floating-point, 1-channel or 3-channel image. + * @param dst Destination image of the same size and type as src. + * @param d Diameter of each pixel neighborhood that is used during filtering. + * If it is non-positive, it is computed from sigmaSpace. + * @param sigmaColor Filter sigma in the color space. A larger value of the + * parameter means that farther colors within the pixel neighborhood (see + * sigmaSpace) will be mixed together, resulting in larger areas of + * semi-equal color. + * @param sigmaSpace Filter sigma in the coordinate space. A larger value of the + * parameter means that farther pixels will influence each other as long as + * their colors are close enough (see sigmaColor). When + * d>0, it specifies the neighborhood size regardless of + * sigmaSpace. Otherwise, d is proportional to + * sigmaSpace. + * @param borderType a borderType + * + * @see org.opencv.imgproc.Imgproc.bilateralFilter + */ + public static void bilateralFilter(Mat src, Mat dst, int d, double sigmaColor, double sigmaSpace, int borderType) + { + + bilateralFilter_0(src.nativeObj, dst.nativeObj, d, sigmaColor, sigmaSpace, borderType); + + return; + } + +/** + *

Applies the bilateral filter to an image.

+ * + *

The function applies bilateral filtering to the input image, as described in + * http://www.dai.ed.ac.uk/CVonline/LOCAL_COPIES/MANDUCHI1/Bilateral_Filtering.html + * bilateralFilter can reduce unwanted noise very well while + * keeping edges fairly sharp. However, it is very slow compared to most + * filters.

+ *
    + *
  • Sigma values*: For simplicity, you can set the 2 sigma values to be the + * same. If they are small (< 10), the filter will not have much effect, whereas + * if they are large (> 150), they will have a very strong effect, making the + * image look "cartoonish". + *
  • Filter size*: Large filters (d > 5) are very slow, so it is recommended + * to use d=5 for real-time applications, and perhaps d=9 for offline + * applications that need heavy noise filtering. + *
+ * + *

This filter does not work inplace.

+ * + * @param src Source 8-bit or floating-point, 1-channel or 3-channel image. + * @param dst Destination image of the same size and type as src. + * @param d Diameter of each pixel neighborhood that is used during filtering. + * If it is non-positive, it is computed from sigmaSpace. + * @param sigmaColor Filter sigma in the color space. A larger value of the + * parameter means that farther colors within the pixel neighborhood (see + * sigmaSpace) will be mixed together, resulting in larger areas of + * semi-equal color. + * @param sigmaSpace Filter sigma in the coordinate space. A larger value of the + * parameter means that farther pixels will influence each other as long as + * their colors are close enough (see sigmaColor). When + * d>0, it specifies the neighborhood size regardless of + * sigmaSpace. Otherwise, d is proportional to + * sigmaSpace. + * + * @see org.opencv.imgproc.Imgproc.bilateralFilter + */ + public static void bilateralFilter(Mat src, Mat dst, int d, double sigmaColor, double sigmaSpace) + { + + bilateralFilter_1(src.nativeObj, dst.nativeObj, d, sigmaColor, sigmaSpace); + + return; + } + + + // + // C++: void blur(Mat src, Mat& dst, Size ksize, Point anchor = Point(-1,-1), int borderType = BORDER_DEFAULT) + // + +/** + *

Blurs an image using the normalized box filter.

+ * + *

The function smoothes an image using the kernel:

+ * + *

K = 1/(ksize.width*ksize.height) 1 1 1 *s 1 1 + * 1 1 1 *s 1 1.................. + * 1 1 1 *s 1 1 + *

+ * + *

The call blur(src, dst, ksize, anchor, borderType) is equivalent + * to boxFilter(src, dst, src.type(), anchor, true, borderType).

+ * + * @param src input image; it can have any number of channels, which are + * processed independently, but the depth should be CV_8U, + * CV_16U, CV_16S, CV_32F or + * CV_64F. + * @param dst output image of the same size and type as src. + * @param ksize blurring kernel size. + * @param anchor anchor point; default value Point(-1,-1) means + * that the anchor is at the kernel center. + * @param borderType border mode used to extrapolate pixels outside of the + * image. + * + * @see org.opencv.imgproc.Imgproc.blur + * @see org.opencv.imgproc.Imgproc#boxFilter + * @see org.opencv.imgproc.Imgproc#GaussianBlur + * @see org.opencv.imgproc.Imgproc#bilateralFilter + * @see org.opencv.imgproc.Imgproc#medianBlur + */ + public static void blur(Mat src, Mat dst, Size ksize, Point anchor, int borderType) + { + + blur_0(src.nativeObj, dst.nativeObj, ksize.width, ksize.height, anchor.x, anchor.y, borderType); + + return; + } + +/** + *

Blurs an image using the normalized box filter.

+ * + *

The function smoothes an image using the kernel:

+ * + *

K = 1/(ksize.width*ksize.height) 1 1 1 *s 1 1 + * 1 1 1 *s 1 1.................. + * 1 1 1 *s 1 1 + *

+ * + *

The call blur(src, dst, ksize, anchor, borderType) is equivalent + * to boxFilter(src, dst, src.type(), anchor, true, borderType).

+ * + * @param src input image; it can have any number of channels, which are + * processed independently, but the depth should be CV_8U, + * CV_16U, CV_16S, CV_32F or + * CV_64F. + * @param dst output image of the same size and type as src. + * @param ksize blurring kernel size. + * @param anchor anchor point; default value Point(-1,-1) means + * that the anchor is at the kernel center. + * + * @see org.opencv.imgproc.Imgproc.blur + * @see org.opencv.imgproc.Imgproc#boxFilter + * @see org.opencv.imgproc.Imgproc#GaussianBlur + * @see org.opencv.imgproc.Imgproc#bilateralFilter + * @see org.opencv.imgproc.Imgproc#medianBlur + */ + public static void blur(Mat src, Mat dst, Size ksize, Point anchor) + { + + blur_1(src.nativeObj, dst.nativeObj, ksize.width, ksize.height, anchor.x, anchor.y); + + return; + } + +/** + *

Blurs an image using the normalized box filter.

+ * + *

The function smoothes an image using the kernel:

+ * + *

K = 1/(ksize.width*ksize.height) 1 1 1 *s 1 1 + * 1 1 1 *s 1 1.................. + * 1 1 1 *s 1 1 + *

+ * + *

The call blur(src, dst, ksize, anchor, borderType) is equivalent + * to boxFilter(src, dst, src.type(), anchor, true, borderType).

+ * + * @param src input image; it can have any number of channels, which are + * processed independently, but the depth should be CV_8U, + * CV_16U, CV_16S, CV_32F or + * CV_64F. + * @param dst output image of the same size and type as src. + * @param ksize blurring kernel size. + * + * @see org.opencv.imgproc.Imgproc.blur + * @see org.opencv.imgproc.Imgproc#boxFilter + * @see org.opencv.imgproc.Imgproc#GaussianBlur + * @see org.opencv.imgproc.Imgproc#bilateralFilter + * @see org.opencv.imgproc.Imgproc#medianBlur + */ + public static void blur(Mat src, Mat dst, Size ksize) + { + + blur_2(src.nativeObj, dst.nativeObj, ksize.width, ksize.height); + + return; + } + + + // + // C++: int borderInterpolate(int p, int len, int borderType) + // + +/** + *

Computes the source location of an extrapolated pixel.

+ * + *

The function computes and returns the coordinate of a donor pixel + * corresponding to the specified extrapolated pixel when using the specified + * extrapolation border mode. For example, if you use BORDER_WRAP + * mode in the horizontal direction, BORDER_REFLECT_101 in the + * vertical direction and want to compute value of the "virtual" pixel + * Point(-5, 100) in a floating-point image img, it + * looks like:

+ * + *

// C++ code:

+ * + *

float val = img.at(borderInterpolate(100, img.rows, BORDER_REFLECT_101),

+ * + *

borderInterpolate(-5, img.cols, BORDER_WRAP));

+ * + *

Normally, the function is not called directly. It is used inside

+ * + *

"FilterEngine" and "copyMakeBorder" to compute tables for quick + * extrapolation.

+ * + * @param p 0-based coordinate of the extrapolated pixel along one of the axes, + * likely <0 or >= len. + * @param len Length of the array along the corresponding axis. + * @param borderType Border type, one of the BORDER_*, except for + * BORDER_TRANSPARENT and BORDER_ISOLATED. When + * borderType==BORDER_CONSTANT, the function always returns -1, + * regardless of p and len. + * + * @see org.opencv.imgproc.Imgproc.borderInterpolate + * @see org.opencv.imgproc.Imgproc#copyMakeBorder + */ + public static int borderInterpolate(int p, int len, int borderType) + { + + int retVal = borderInterpolate_0(p, len, borderType); + + return retVal; + } + + + // + // C++: Rect boundingRect(vector_Point points) + // + +/** + *

Calculates the up-right bounding rectangle of a point set.

+ * + *

The function calculates and returns the minimal up-right bounding rectangle + * for the specified point set.

+ * + * @param points Input 2D point set, stored in std.vector or + * Mat. + * + * @see org.opencv.imgproc.Imgproc.boundingRect + */ + public static Rect boundingRect(MatOfPoint points) + { + Mat points_mat = points; + Rect retVal = new Rect(boundingRect_0(points_mat.nativeObj)); + + return retVal; + } + + + // + // C++: void boxFilter(Mat src, Mat& dst, int ddepth, Size ksize, Point anchor = Point(-1,-1), bool normalize = true, int borderType = BORDER_DEFAULT) + // + +/** + *

Blurs an image using the box filter.

+ * + *

The function smoothes an image using the kernel:

+ * + *

K = alpha 1 1 1 *s 1 1 + * 1 1 1 *s 1 1.................. + * 1 1 1 *s 1 1

+ * + *

where

+ * + *

alpha = 1/(ksize.width*ksize.height) when normalize=true; 1 + * otherwise

+ * + *

Unnormalized box filter is useful for computing various integral + * characteristics over each pixel neighborhood, such as covariance matrices of + * image derivatives (used in dense optical flow algorithms, and so on). If you + * need to compute pixel sums over variable-size windows, use "integral".

+ * + * @param src input image. + * @param dst output image of the same size and type as src. + * @param ddepth the output image depth (-1 to use src.depth()). + * @param ksize blurring kernel size. + * @param anchor anchor point; default value Point(-1,-1) means + * that the anchor is at the kernel center. + * @param normalize flag, specifying whether the kernel is normalized by its + * area or not. + * @param borderType border mode used to extrapolate pixels outside of the + * image. + * + * @see org.opencv.imgproc.Imgproc.boxFilter + * @see org.opencv.imgproc.Imgproc#GaussianBlur + * @see org.opencv.imgproc.Imgproc#medianBlur + * @see org.opencv.imgproc.Imgproc#integral + * @see org.opencv.imgproc.Imgproc#bilateralFilter + * @see org.opencv.imgproc.Imgproc#blur + */ + public static void boxFilter(Mat src, Mat dst, int ddepth, Size ksize, Point anchor, boolean normalize, int borderType) + { + + boxFilter_0(src.nativeObj, dst.nativeObj, ddepth, ksize.width, ksize.height, anchor.x, anchor.y, normalize, borderType); + + return; + } + +/** + *

Blurs an image using the box filter.

+ * + *

The function smoothes an image using the kernel:

+ * + *

K = alpha 1 1 1 *s 1 1 + * 1 1 1 *s 1 1.................. + * 1 1 1 *s 1 1

+ * + *

where

+ * + *

alpha = 1/(ksize.width*ksize.height) when normalize=true; 1 + * otherwise

+ * + *

Unnormalized box filter is useful for computing various integral + * characteristics over each pixel neighborhood, such as covariance matrices of + * image derivatives (used in dense optical flow algorithms, and so on). If you + * need to compute pixel sums over variable-size windows, use "integral".

+ * + * @param src input image. + * @param dst output image of the same size and type as src. + * @param ddepth the output image depth (-1 to use src.depth()). + * @param ksize blurring kernel size. + * @param anchor anchor point; default value Point(-1,-1) means + * that the anchor is at the kernel center. + * @param normalize flag, specifying whether the kernel is normalized by its + * area or not. + * + * @see org.opencv.imgproc.Imgproc.boxFilter + * @see org.opencv.imgproc.Imgproc#GaussianBlur + * @see org.opencv.imgproc.Imgproc#medianBlur + * @see org.opencv.imgproc.Imgproc#integral + * @see org.opencv.imgproc.Imgproc#bilateralFilter + * @see org.opencv.imgproc.Imgproc#blur + */ + public static void boxFilter(Mat src, Mat dst, int ddepth, Size ksize, Point anchor, boolean normalize) + { + + boxFilter_1(src.nativeObj, dst.nativeObj, ddepth, ksize.width, ksize.height, anchor.x, anchor.y, normalize); + + return; + } + +/** + *

Blurs an image using the box filter.

+ * + *

The function smoothes an image using the kernel:

+ * + *

K = alpha 1 1 1 *s 1 1 + * 1 1 1 *s 1 1.................. + * 1 1 1 *s 1 1

+ * + *

where

+ * + *

alpha = 1/(ksize.width*ksize.height) when normalize=true; 1 + * otherwise

+ * + *

Unnormalized box filter is useful for computing various integral + * characteristics over each pixel neighborhood, such as covariance matrices of + * image derivatives (used in dense optical flow algorithms, and so on). If you + * need to compute pixel sums over variable-size windows, use "integral".

+ * + * @param src input image. + * @param dst output image of the same size and type as src. + * @param ddepth the output image depth (-1 to use src.depth()). + * @param ksize blurring kernel size. + * + * @see org.opencv.imgproc.Imgproc.boxFilter + * @see org.opencv.imgproc.Imgproc#GaussianBlur + * @see org.opencv.imgproc.Imgproc#medianBlur + * @see org.opencv.imgproc.Imgproc#integral + * @see org.opencv.imgproc.Imgproc#bilateralFilter + * @see org.opencv.imgproc.Imgproc#blur + */ + public static void boxFilter(Mat src, Mat dst, int ddepth, Size ksize) + { + + boxFilter_2(src.nativeObj, dst.nativeObj, ddepth, ksize.width, ksize.height); + + return; + } + + + // + // C++: void calcBackProject(vector_Mat images, vector_int channels, Mat hist, Mat& dst, vector_float ranges, double scale) + // + +/** + *

Calculates the back projection of a histogram.

+ * + *

The functions calcBackProject calculate the back project of the + * histogram. That is, similarly to calcHist, at each location + * (x, y) the function collects the values from the selected + * channels in the input images and finds the corresponding histogram bin. But + * instead of incrementing it, the function reads the bin value, scales it by + * scale, and stores in backProject(x,y). In terms of + * statistics, the function computes probability of each element value in + * respect with the empirical probability distribution represented by the + * histogram. See how, for example, you can find and track a bright-colored + * object in a scene:

+ *
    + *
  • Before tracking, show the object to the camera so that it covers + * almost the whole frame. Calculate a hue histogram. The histogram may have + * strong maximums, corresponding to the dominant colors in the object. + *
  • When tracking, calculate a back projection of a hue plane of each + * input video frame using that pre-computed histogram. Threshold the back + * projection to suppress weak colors. It may also make sense to suppress pixels + * with non-sufficient color saturation and too dark or too bright pixels. + *
  • Find connected components in the resulting picture and choose, for + * example, the largest component. + *
+ * + *

This is an approximate algorithm of the "CamShift" color object tracker.

+ * + * @param images Source arrays. They all should have the same depth, + * CV_8U or CV_32F, and the same size. Each of them + * can have an arbitrary number of channels. + * @param channels The list of channels used to compute the back projection. The + * number of channels must match the histogram dimensionality. The first array + * channels are numerated from 0 to images[0].channels()-1, the + * second array channels are counted from images[0].channels() to + * images[0].channels() + images[1].channels()-1, and so on. + * @param hist Input histogram that can be dense or sparse. + * @param dst a dst + * @param ranges Array of arrays of the histogram bin boundaries in each + * dimension. See "calcHist". + * @param scale Optional scale factor for the output back projection. + * + * @see org.opencv.imgproc.Imgproc.calcBackProject + * @see org.opencv.imgproc.Imgproc#calcHist + */ + public static void calcBackProject(List images, MatOfInt channels, Mat hist, Mat dst, MatOfFloat ranges, double scale) + { + Mat images_mat = Converters.vector_Mat_to_Mat(images); + Mat channels_mat = channels; + Mat ranges_mat = ranges; + calcBackProject_0(images_mat.nativeObj, channels_mat.nativeObj, hist.nativeObj, dst.nativeObj, ranges_mat.nativeObj, scale); + + return; + } + + + // + // C++: void calcHist(vector_Mat images, vector_int channels, Mat mask, Mat& hist, vector_int histSize, vector_float ranges, bool accumulate = false) + // + +/** + *

Calculates a histogram of a set of arrays.

+ * + *

The functions calcHist calculate the histogram of one or more + * arrays. The elements of a tuple used to increment a histogram bin are taken + * from the correspondinginput arrays at the same location. The sample below + * shows how to compute a 2D Hue-Saturation histogram for a color image. + *

+ * + *

// C++ code:

+ * + *

#include

+ * + *

#include

+ * + *

using namespace cv;

+ * + *

int main(int argc, char argv)

+ * + * + *

Mat src, hsv;

+ * + *

if(argc != 2 || !(src=imread(argv[1], 1)).data)

+ * + *

return -1;

+ * + *

cvtColor(src, hsv, CV_BGR2HSV);

+ * + *

// Quantize the hue to 30 levels

+ * + *

// and the saturation to 32 levels

+ * + *

int hbins = 30, sbins = 32;

+ * + *

int histSize[] = {hbins, sbins};

+ * + *

// hue varies from 0 to 179, see cvtColor

+ * + *

float hranges[] = { 0, 180 };

+ * + *

// saturation varies from 0 (black-gray-white) to

+ * + *

// 255 (pure spectrum color)

+ * + *

float sranges[] = { 0, 256 };

+ * + *

const float* ranges[] = { hranges, sranges };

+ * + *

MatND hist;

+ * + *

// we compute the histogram from the 0-th and 1-st channels

+ * + *

int channels[] = {0, 1};

+ * + *

calcHist(&hsv, 1, channels, Mat(), // do not use mask

+ * + *

hist, 2, histSize, ranges,

+ * + *

true, // the histogram is uniform

+ * + *

false);

+ * + *

double maxVal=0;

+ * + *

minMaxLoc(hist, 0, &maxVal, 0, 0);

+ * + *

int scale = 10;

+ * + *

Mat histImg = Mat.zeros(sbins*scale, hbins*10, CV_8UC3);

+ * + *

for(int h = 0; h < hbins; h++)

+ * + *

for(int s = 0; s < sbins; s++)

+ * + * + *

float binVal = hist.at(h, s);

+ * + *

int intensity = cvRound(binVal*255/maxVal);

+ * + *

rectangle(histImg, Point(h*scale, s*scale),

+ * + *

Point((h+1)*scale - 1, (s+1)*scale - 1),

+ * + *

Scalar.all(intensity),

+ * + *

CV_FILLED);

+ * + * + *

namedWindow("Source", 1);

+ * + *

imshow("Source", src);

+ * + *

namedWindow("H-S Histogram", 1);

+ * + *

imshow("H-S Histogram", histImg);

+ * + *

waitKey();

+ * + * + * @param images Source arrays. They all should have the same depth, + * CV_8U or CV_32F, and the same size. Each of them + * can have an arbitrary number of channels. + * @param channels List of the dims channels used to compute the + * histogram. The first array channels are numerated from 0 to images[0].channels()-1, + * the second array channels are counted from images[0].channels() + * to images[0].channels() + images[1].channels()-1, and so on. + * @param mask Optional mask. If the matrix is not empty, it must be an 8-bit + * array of the same size as images[i]. The non-zero mask elements + * mark the array elements counted in the histogram. + * @param hist Output histogram, which is a dense or sparse dims + * -dimensional array. + * @param histSize Array of histogram sizes in each dimension. + * @param ranges Array of the dims arrays of the histogram bin + * boundaries in each dimension. When the histogram is uniform (uniform + * =true), then for each dimension i it is enough to specify the + * lower (inclusive) boundary L_0 of the 0-th histogram bin and the + * upper (exclusive) boundary U_(histSize[i]-1) for the last histogram + * bin histSize[i]-1. That is, in case of a uniform histogram each + * of ranges[i] is an array of 2 elements. When the histogram is + * not uniform (uniform=false), then each of ranges[i] + * contains histSize[i]+1 elements: L_0, U_0=L_1, U_1=L_2,..., + * U_(histSize[i]-2)=L_(histSize[i]-1), U_(histSize[i]-1). The array + * elements, that are not between L_0 and U_(histSize[i]-1), + * are not counted in the histogram. + * @param accumulate Accumulation flag. If it is set, the histogram is not + * cleared in the beginning when it is allocated. This feature enables you to + * compute a single histogram from several sets of arrays, or to update the + * histogram in time. + * + * @see org.opencv.imgproc.Imgproc.calcHist + */ + public static void calcHist(List images, MatOfInt channels, Mat mask, Mat hist, MatOfInt histSize, MatOfFloat ranges, boolean accumulate) + { + Mat images_mat = Converters.vector_Mat_to_Mat(images); + Mat channels_mat = channels; + Mat histSize_mat = histSize; + Mat ranges_mat = ranges; + calcHist_0(images_mat.nativeObj, channels_mat.nativeObj, mask.nativeObj, hist.nativeObj, histSize_mat.nativeObj, ranges_mat.nativeObj, accumulate); + + return; + } + +/** + *

Calculates a histogram of a set of arrays.

+ * + *

The functions calcHist calculate the histogram of one or more + * arrays. The elements of a tuple used to increment a histogram bin are taken + * from the correspondinginput arrays at the same location. The sample below + * shows how to compute a 2D Hue-Saturation histogram for a color image. + *

+ * + *

// C++ code:

+ * + *

#include

+ * + *

#include

+ * + *

using namespace cv;

+ * + *

int main(int argc, char argv)

+ * + * + *

Mat src, hsv;

+ * + *

if(argc != 2 || !(src=imread(argv[1], 1)).data)

+ * + *

return -1;

+ * + *

cvtColor(src, hsv, CV_BGR2HSV);

+ * + *

// Quantize the hue to 30 levels

+ * + *

// and the saturation to 32 levels

+ * + *

int hbins = 30, sbins = 32;

+ * + *

int histSize[] = {hbins, sbins};

+ * + *

// hue varies from 0 to 179, see cvtColor

+ * + *

float hranges[] = { 0, 180 };

+ * + *

// saturation varies from 0 (black-gray-white) to

+ * + *

// 255 (pure spectrum color)

+ * + *

float sranges[] = { 0, 256 };

+ * + *

const float* ranges[] = { hranges, sranges };

+ * + *

MatND hist;

+ * + *

// we compute the histogram from the 0-th and 1-st channels

+ * + *

int channels[] = {0, 1};

+ * + *

calcHist(&hsv, 1, channels, Mat(), // do not use mask

+ * + *

hist, 2, histSize, ranges,

+ * + *

true, // the histogram is uniform

+ * + *

false);

+ * + *

double maxVal=0;

+ * + *

minMaxLoc(hist, 0, &maxVal, 0, 0);

+ * + *

int scale = 10;

+ * + *

Mat histImg = Mat.zeros(sbins*scale, hbins*10, CV_8UC3);

+ * + *

for(int h = 0; h < hbins; h++)

+ * + *

for(int s = 0; s < sbins; s++)

+ * + * + *

float binVal = hist.at(h, s);

+ * + *

int intensity = cvRound(binVal*255/maxVal);

+ * + *

rectangle(histImg, Point(h*scale, s*scale),

+ * + *

Point((h+1)*scale - 1, (s+1)*scale - 1),

+ * + *

Scalar.all(intensity),

+ * + *

CV_FILLED);

+ * + * + *

namedWindow("Source", 1);

+ * + *

imshow("Source", src);

+ * + *

namedWindow("H-S Histogram", 1);

+ * + *

imshow("H-S Histogram", histImg);

+ * + *

waitKey();

+ * + * + * @param images Source arrays. They all should have the same depth, + * CV_8U or CV_32F, and the same size. Each of them + * can have an arbitrary number of channels. + * @param channels List of the dims channels used to compute the + * histogram. The first array channels are numerated from 0 to images[0].channels()-1, + * the second array channels are counted from images[0].channels() + * to images[0].channels() + images[1].channels()-1, and so on. + * @param mask Optional mask. If the matrix is not empty, it must be an 8-bit + * array of the same size as images[i]. The non-zero mask elements + * mark the array elements counted in the histogram. + * @param hist Output histogram, which is a dense or sparse dims + * -dimensional array. + * @param histSize Array of histogram sizes in each dimension. + * @param ranges Array of the dims arrays of the histogram bin + * boundaries in each dimension. When the histogram is uniform (uniform + * =true), then for each dimension i it is enough to specify the + * lower (inclusive) boundary L_0 of the 0-th histogram bin and the + * upper (exclusive) boundary U_(histSize[i]-1) for the last histogram + * bin histSize[i]-1. That is, in case of a uniform histogram each + * of ranges[i] is an array of 2 elements. When the histogram is + * not uniform (uniform=false), then each of ranges[i] + * contains histSize[i]+1 elements: L_0, U_0=L_1, U_1=L_2,..., + * U_(histSize[i]-2)=L_(histSize[i]-1), U_(histSize[i]-1). The array + * elements, that are not between L_0 and U_(histSize[i]-1), + * are not counted in the histogram. + * + * @see org.opencv.imgproc.Imgproc.calcHist + */ + public static void calcHist(List images, MatOfInt channels, Mat mask, Mat hist, MatOfInt histSize, MatOfFloat ranges) + { + Mat images_mat = Converters.vector_Mat_to_Mat(images); + Mat channels_mat = channels; + Mat histSize_mat = histSize; + Mat ranges_mat = ranges; + calcHist_1(images_mat.nativeObj, channels_mat.nativeObj, mask.nativeObj, hist.nativeObj, histSize_mat.nativeObj, ranges_mat.nativeObj); + + return; + } + + + // + // C++: double compareHist(Mat H1, Mat H2, int method) + // + +/** + *

Compares two histograms.

+ * + *

The functions compareHist compare two dense or two sparse + * histograms using the specified method:

+ *
    + *
  • Correlation (method=CV_COMP_CORREL) + *
+ * + *

d(H_1,H_2) = (sum_I(H_1(I) - H_1")(H_2(I) - H_2"))/(sqrt(sum_I(H_1(I) - + * H_1")^2 sum_I(H_2(I) - H_2")^2))

+ * + *

where

+ * + *

H_k" = 1/(N) sum _J H_k(J)

+ * + *

and N is a total number of histogram bins.

+ *
    + *
  • Chi-Square (method=CV_COMP_CHISQR) + *
+ * + *

d(H_1,H_2) = sum _I((H_1(I)-H_2(I))^2)/(H_1(I))

+ * + *
    + *
  • Intersection (method=CV_COMP_INTERSECT) + *
+ * + *

d(H_1,H_2) = sum _I min(H_1(I), H_2(I))

+ * + *
    + *
  • Bhattacharyya distance (method=CV_COMP_BHATTACHARYYA or + * method=CV_COMP_HELLINGER). In fact, OpenCV computes Hellinger + * distance, which is related to Bhattacharyya coefficient. + *
+ * + *

d(H_1,H_2) = sqrt(1 - frac(1)(sqrt(H_1" H_2" N^2)) sum_I sqrt(H_1(I) * + * H_2(I)))

+ * + *

The function returns d(H_1, H_2).

+ * + *

While the function works well with 1-, 2-, 3-dimensional dense histograms, it + * may not be suitable for high-dimensional sparse histograms. In such + * histograms, because of aliasing and sampling problems, the coordinates of + * non-zero histogram bins can slightly shift. To compare such histograms or + * more general sparse configurations of weighted points, consider using the + * "EMD" function.

+ * + * @param H1 First compared histogram. + * @param H2 Second compared histogram of the same size as H1. + * @param method Comparison method that could be one of the following: + *
    + *
  • CV_COMP_CORREL Correlation + *
  • CV_COMP_CHISQR Chi-Square + *
  • CV_COMP_INTERSECT Intersection + *
  • CV_COMP_BHATTACHARYYA Bhattacharyya distance + *
  • CV_COMP_HELLINGER Synonym for CV_COMP_BHATTACHARYYA + *
+ * + * @see org.opencv.imgproc.Imgproc.compareHist + */ + public static double compareHist(Mat H1, Mat H2, int method) + { + + double retVal = compareHist_0(H1.nativeObj, H2.nativeObj, method); + + return retVal; + } + + + // + // C++: double contourArea(Mat contour, bool oriented = false) + // + +/** + *

Calculates a contour area.

+ * + *

The function computes a contour area. Similarly to "moments", the area is + * computed using the Green formula. Thus, the returned area and the number of + * non-zero pixels, if you draw the contour using "drawContours" or "fillPoly", + * can be different. + * Also, the function will most certainly give a wrong results for contours with + * self-intersections. + * Example:

+ * + *

// C++ code:

+ * + *

vector contour;

+ * + *

contour.push_back(Point2f(0, 0));

+ * + *

contour.push_back(Point2f(10, 0));

+ * + *

contour.push_back(Point2f(10, 10));

+ * + *

contour.push_back(Point2f(5, 4));

+ * + *

double area0 = contourArea(contour);

+ * + *

vector approx;

+ * + *

approxPolyDP(contour, approx, 5, true);

+ * + *

double area1 = contourArea(approx);

+ * + *

cout << "area0 =" << area0 << endl <<

+ * + *

"area1 =" << area1 << endl <<

+ * + *

"approx poly vertices" << approx.size() << endl;

+ * + * @param contour Input vector of 2D points (contour vertices), stored in + * std.vector or Mat. + * @param oriented Oriented area flag. If it is true, the function returns a + * signed area value, depending on the contour orientation (clockwise or + * counter-clockwise). Using this feature you can determine orientation of a + * contour by taking the sign of an area. By default, the parameter is + * false, which means that the absolute value is returned. + * + * @see org.opencv.imgproc.Imgproc.contourArea + */ + public static double contourArea(Mat contour, boolean oriented) + { + + double retVal = contourArea_0(contour.nativeObj, oriented); + + return retVal; + } + +/** + *

Calculates a contour area.

+ * + *

The function computes a contour area. Similarly to "moments", the area is + * computed using the Green formula. Thus, the returned area and the number of + * non-zero pixels, if you draw the contour using "drawContours" or "fillPoly", + * can be different. + * Also, the function will most certainly give a wrong results for contours with + * self-intersections. + * Example:

+ * + *

// C++ code:

+ * + *

vector contour;

+ * + *

contour.push_back(Point2f(0, 0));

+ * + *

contour.push_back(Point2f(10, 0));

+ * + *

contour.push_back(Point2f(10, 10));

+ * + *

contour.push_back(Point2f(5, 4));

+ * + *

double area0 = contourArea(contour);

+ * + *

vector approx;

+ * + *

approxPolyDP(contour, approx, 5, true);

+ * + *

double area1 = contourArea(approx);

+ * + *

cout << "area0 =" << area0 << endl <<

+ * + *

"area1 =" << area1 << endl <<

+ * + *

"approx poly vertices" << approx.size() << endl;

+ * + * @param contour Input vector of 2D points (contour vertices), stored in + * std.vector or Mat. + * + * @see org.opencv.imgproc.Imgproc.contourArea + */ + public static double contourArea(Mat contour) + { + + double retVal = contourArea_1(contour.nativeObj); + + return retVal; + } + + + // + // C++: void convertMaps(Mat map1, Mat map2, Mat& dstmap1, Mat& dstmap2, int dstmap1type, bool nninterpolation = false) + // + +/** + *

Converts image transformation maps from one representation to another.

+ * + *

The function converts a pair of maps for "remap" from one representation to + * another. The following options ((map1.type(), map2.type()) + * -> (dstmap1.type(), dstmap2.type())) are supported:

+ *
    + *
  • (CV_32FC1, CV_32FC1) -> (CV_16SC2, CV_16UC1). This is the + * most frequently used conversion operation, in which the original + * floating-point maps (see "remap") are converted to a more compact and much + * faster fixed-point representation. The first output array contains the + * rounded coordinates and the second array (created only when nninterpolation=false) + * contains indices in the interpolation tables. + *
  • (CV_32FC2) -> (CV_16SC2, CV_16UC1). The same as above but the + * original maps are stored in one 2-channel matrix. + *
  • Reverse conversion. Obviously, the reconstructed floating-point maps + * will not be exactly the same as the originals. + *
+ * + * @param map1 The first input map of type CV_16SC2, + * CV_32FC1, or CV_32FC2. + * @param map2 The second input map of type CV_16UC1, + * CV_32FC1, or none (empty matrix), respectively. + * @param dstmap1 The first output map that has the type dstmap1type + * and the same size as src. + * @param dstmap2 The second output map. + * @param dstmap1type Type of the first output map that should be + * CV_16SC2, CV_32FC1, or CV_32FC2. + * @param nninterpolation Flag indicating whether the fixed-point maps are used + * for the nearest-neighbor or for a more complex interpolation. + * + * @see org.opencv.imgproc.Imgproc.convertMaps + * @see org.opencv.imgproc.Imgproc#remap + * @see org.opencv.imgproc.Imgproc#initUndistortRectifyMap + * @see org.opencv.imgproc.Imgproc#undistort + */ + public static void convertMaps(Mat map1, Mat map2, Mat dstmap1, Mat dstmap2, int dstmap1type, boolean nninterpolation) + { + + convertMaps_0(map1.nativeObj, map2.nativeObj, dstmap1.nativeObj, dstmap2.nativeObj, dstmap1type, nninterpolation); + + return; + } + +/** + *

Converts image transformation maps from one representation to another.

+ * + *

The function converts a pair of maps for "remap" from one representation to + * another. The following options ((map1.type(), map2.type()) + * -> (dstmap1.type(), dstmap2.type())) are supported:

+ *
    + *
  • (CV_32FC1, CV_32FC1) -> (CV_16SC2, CV_16UC1). This is the + * most frequently used conversion operation, in which the original + * floating-point maps (see "remap") are converted to a more compact and much + * faster fixed-point representation. The first output array contains the + * rounded coordinates and the second array (created only when nninterpolation=false) + * contains indices in the interpolation tables. + *
  • (CV_32FC2) -> (CV_16SC2, CV_16UC1). The same as above but the + * original maps are stored in one 2-channel matrix. + *
  • Reverse conversion. Obviously, the reconstructed floating-point maps + * will not be exactly the same as the originals. + *
+ * + * @param map1 The first input map of type CV_16SC2, + * CV_32FC1, or CV_32FC2. + * @param map2 The second input map of type CV_16UC1, + * CV_32FC1, or none (empty matrix), respectively. + * @param dstmap1 The first output map that has the type dstmap1type + * and the same size as src. + * @param dstmap2 The second output map. + * @param dstmap1type Type of the first output map that should be + * CV_16SC2, CV_32FC1, or CV_32FC2. + * + * @see org.opencv.imgproc.Imgproc.convertMaps + * @see org.opencv.imgproc.Imgproc#remap + * @see org.opencv.imgproc.Imgproc#initUndistortRectifyMap + * @see org.opencv.imgproc.Imgproc#undistort + */ + public static void convertMaps(Mat map1, Mat map2, Mat dstmap1, Mat dstmap2, int dstmap1type) + { + + convertMaps_1(map1.nativeObj, map2.nativeObj, dstmap1.nativeObj, dstmap2.nativeObj, dstmap1type); + + return; + } + + + // + // C++: void convexHull(vector_Point points, vector_int& hull, bool clockwise = false, _hidden_ returnPoints = true) + // + +/** + *

Finds the convex hull of a point set.

+ * + *

The functions find the convex hull of a 2D point set using the Sklansky's + * algorithm [Sklansky82] that has *O(N logN)* complexity in the current + * implementation. See the OpenCV sample convexhull.cpp that + * demonstrates the usage of different function variants.

+ * + * @param points Input 2D point set, stored in std.vector or + * Mat. + * @param hull Output convex hull. It is either an integer vector of indices or + * vector of points. In the first case, the hull elements are + * 0-based indices of the convex hull points in the original array (since the + * set of convex hull points is a subset of the original point set). In the + * second case, hull elements are the convex hull points + * themselves. + * @param clockwise Orientation flag. If it is true, the output convex hull is + * oriented clockwise. Otherwise, it is oriented counter-clockwise. The usual + * screen coordinate system is assumed so that the origin is at the top-left + * corner, x axis is oriented to the right, and y axis is oriented downwards. + * + * @see org.opencv.imgproc.Imgproc.convexHull + */ + public static void convexHull(MatOfPoint points, MatOfInt hull, boolean clockwise) + { + Mat points_mat = points; + Mat hull_mat = hull; + convexHull_0(points_mat.nativeObj, hull_mat.nativeObj, clockwise); + + return; + } + +/** + *

Finds the convex hull of a point set.

+ * + *

The functions find the convex hull of a 2D point set using the Sklansky's + * algorithm [Sklansky82] that has *O(N logN)* complexity in the current + * implementation. See the OpenCV sample convexhull.cpp that + * demonstrates the usage of different function variants.

+ * + * @param points Input 2D point set, stored in std.vector or + * Mat. + * @param hull Output convex hull. It is either an integer vector of indices or + * vector of points. In the first case, the hull elements are + * 0-based indices of the convex hull points in the original array (since the + * set of convex hull points is a subset of the original point set). In the + * second case, hull elements are the convex hull points + * themselves. + * + * @see org.opencv.imgproc.Imgproc.convexHull + */ + public static void convexHull(MatOfPoint points, MatOfInt hull) + { + Mat points_mat = points; + Mat hull_mat = hull; + convexHull_1(points_mat.nativeObj, hull_mat.nativeObj); + + return; + } + + + // + // C++: void convexityDefects(vector_Point contour, vector_int convexhull, vector_Vec4i& convexityDefects) + // + +/** + *

Finds the convexity defects of a contour.

+ * + *

The function finds all convexity defects of the input contour and returns a + * sequence of the CvConvexityDefect structures, where + * CvConvexityDetect is defined as:

+ * + *

// C++ code:

+ * + *

struct CvConvexityDefect

+ * + * + *

CvPoint* start; // point of the contour where the defect begins

+ * + *

CvPoint* end; // point of the contour where the defect ends

+ * + *

CvPoint* depth_point; // the farthest from the convex hull point within the + * defect

+ * + *

float depth; // distance between the farthest point and the convex hull

+ * + *

};

+ * + *

The figure below displays convexity defects of a hand contour:

+ * + * @param contour Input contour. + * @param convexhull Convex hull obtained using "convexHull" that should contain + * indices of the contour points that make the hull. + * @param convexityDefects The output vector of convexity defects. In C++ and + * the new Python/Java interface each convexity defect is represented as + * 4-element integer vector (a.k.a. cv.Vec4i): (start_index, + * end_index, farthest_pt_index, fixpt_depth), where indices are 0-based + * indices in the original contour of the convexity defect beginning, end and + * the farthest point, and fixpt_depth is fixed-point approximation + * (with 8 fractional bits) of the distance between the farthest contour point + * and the hull. That is, to get the floating-point value of the depth will be + * fixpt_depth/256.0. In C interface convexity defect is + * represented by CvConvexityDefect structure - see below. + * + * @see org.opencv.imgproc.Imgproc.convexityDefects + */ + public static void convexityDefects(MatOfPoint contour, MatOfInt convexhull, MatOfInt4 convexityDefects) + { + Mat contour_mat = contour; + Mat convexhull_mat = convexhull; + Mat convexityDefects_mat = convexityDefects; + convexityDefects_0(contour_mat.nativeObj, convexhull_mat.nativeObj, convexityDefects_mat.nativeObj); + + return; + } + + + // + // C++: void copyMakeBorder(Mat src, Mat& dst, int top, int bottom, int left, int right, int borderType, Scalar value = Scalar()) + // + +/** + *

Forms a border around an image.

+ * + *

The function copies the source image into the middle of the destination + * image. The areas to the left, to the right, above and below the copied source + * image will be filled with extrapolated pixels. This is not what + * "FilterEngine" or filtering functions based on it do (they extrapolate pixels + * on-fly), but what other more complex functions, including your own, may do to + * simplify image boundary handling. + * The function supports the mode when src is already in the middle + * of dst. In this case, the function does not copy + * src itself but simply constructs the border, for example:

+ * + *

// C++ code:

+ * + *

// let border be the same in all directions

+ * + *

int border=2;

+ * + *

// constructs a larger image to fit both the image and the border

+ * + *

Mat gray_buf(rgb.rows + border*2, rgb.cols + border*2, rgb.depth());

+ * + *

// select the middle part of it w/o copying data

+ * + *

Mat gray(gray_canvas, Rect(border, border, rgb.cols, rgb.rows));

+ * + *

// convert image from RGB to grayscale

+ * + *

cvtColor(rgb, gray, CV_RGB2GRAY);

+ * + *

// form a border in-place

+ * + *

copyMakeBorder(gray, gray_buf, border, border,

+ * + *

border, border, BORDER_REPLICATE);

+ * + *

// now do some custom filtering......

+ * + *

Note:

+ * + *

When the source image is a part (ROI) of a bigger image, the function will + * try to use the pixels outside of the ROI to form a border. To disable this + * feature and always do extrapolation, as if src was not a ROI, + * use borderType | BORDER_ISOLATED.

+ * + * @param src Source image. + * @param dst Destination image of the same type as src and the + * size Size(src.cols+left+right, src.rows+top+bottom). + * @param top a top + * @param bottom a bottom + * @param left a left + * @param right Parameter specifying how many pixels in each direction from the + * source image rectangle to extrapolate. For example, top=1, bottom=1, + * left=1, right=1 mean that 1 pixel-wide border needs to be built. + * @param borderType Border type. See "borderInterpolate" for details. + * @param value Border value if borderType==BORDER_CONSTANT. + * + * @see org.opencv.imgproc.Imgproc.copyMakeBorder + * @see org.opencv.imgproc.Imgproc#borderInterpolate + */ + public static void copyMakeBorder(Mat src, Mat dst, int top, int bottom, int left, int right, int borderType, Scalar value) + { + + copyMakeBorder_0(src.nativeObj, dst.nativeObj, top, bottom, left, right, borderType, value.val[0], value.val[1], value.val[2], value.val[3]); + + return; + } + +/** + *

Forms a border around an image.

+ * + *

The function copies the source image into the middle of the destination + * image. The areas to the left, to the right, above and below the copied source + * image will be filled with extrapolated pixels. This is not what + * "FilterEngine" or filtering functions based on it do (they extrapolate pixels + * on-fly), but what other more complex functions, including your own, may do to + * simplify image boundary handling. + * The function supports the mode when src is already in the middle + * of dst. In this case, the function does not copy + * src itself but simply constructs the border, for example:

+ * + *

// C++ code:

+ * + *

// let border be the same in all directions

+ * + *

int border=2;

+ * + *

// constructs a larger image to fit both the image and the border

+ * + *

Mat gray_buf(rgb.rows + border*2, rgb.cols + border*2, rgb.depth());

+ * + *

// select the middle part of it w/o copying data

+ * + *

Mat gray(gray_canvas, Rect(border, border, rgb.cols, rgb.rows));

+ * + *

// convert image from RGB to grayscale

+ * + *

cvtColor(rgb, gray, CV_RGB2GRAY);

+ * + *

// form a border in-place

+ * + *

copyMakeBorder(gray, gray_buf, border, border,

+ * + *

border, border, BORDER_REPLICATE);

+ * + *

// now do some custom filtering......

+ * + *

Note:

+ * + *

When the source image is a part (ROI) of a bigger image, the function will + * try to use the pixels outside of the ROI to form a border. To disable this + * feature and always do extrapolation, as if src was not a ROI, + * use borderType | BORDER_ISOLATED.

+ * + * @param src Source image. + * @param dst Destination image of the same type as src and the + * size Size(src.cols+left+right, src.rows+top+bottom). + * @param top a top + * @param bottom a bottom + * @param left a left + * @param right Parameter specifying how many pixels in each direction from the + * source image rectangle to extrapolate. For example, top=1, bottom=1, + * left=1, right=1 mean that 1 pixel-wide border needs to be built. + * @param borderType Border type. See "borderInterpolate" for details. + * + * @see org.opencv.imgproc.Imgproc.copyMakeBorder + * @see org.opencv.imgproc.Imgproc#borderInterpolate + */ + public static void copyMakeBorder(Mat src, Mat dst, int top, int bottom, int left, int right, int borderType) + { + + copyMakeBorder_1(src.nativeObj, dst.nativeObj, top, bottom, left, right, borderType); + + return; + } + + + // + // C++: void cornerEigenValsAndVecs(Mat src, Mat& dst, int blockSize, int ksize, int borderType = BORDER_DEFAULT) + // + +/** + *

Calculates eigenvalues and eigenvectors of image blocks for corner detection.

+ * + *

For every pixel p, the function cornerEigenValsAndVecs + * considers a blockSize x blockSize + * neighborhood S(p). It calculates the covariation matrix of + * derivatives over the neighborhood as:

+ * + *

M = sum(by: S(p))(dI/dx)^2 sum(by: S(p))(dI/dx dI/dy)^2 + * sum(by: S(p))(dI/dx dI/dy)^2 sum(by: S(p))(dI/dy)^2

+ * + *

where the derivatives are computed using the "Sobel" operator.

+ * + *

After that, it finds eigenvectors and eigenvalues of M and stores + * them in the destination image as (lambda_1, lambda_2, x_1, y_1, x_2, + * y_2) where

+ *
    + *
  • lambda_1, lambda_2 are the non-sorted eigenvalues of + * M + *
  • x_1, y_1 are the eigenvectors corresponding to + * lambda_1 + *
  • x_2, y_2 are the eigenvectors corresponding to + * lambda_2 + *
+ * + *

The output of the function can be used for robust edge or corner detection.

+ * + * @param src Input single-channel 8-bit or floating-point image. + * @param dst Image to store the results. It has the same size as + * src and the type CV_32FC(6). + * @param blockSize Neighborhood size (see details below). + * @param ksize Aperture parameter for the "Sobel" operator. + * @param borderType Pixel extrapolation method. See "borderInterpolate". + * + * @see org.opencv.imgproc.Imgproc.cornerEigenValsAndVecs + * @see org.opencv.imgproc.Imgproc#cornerHarris + * @see org.opencv.imgproc.Imgproc#cornerMinEigenVal + * @see org.opencv.imgproc.Imgproc#preCornerDetect + */ + public static void cornerEigenValsAndVecs(Mat src, Mat dst, int blockSize, int ksize, int borderType) + { + + cornerEigenValsAndVecs_0(src.nativeObj, dst.nativeObj, blockSize, ksize, borderType); + + return; + } + +/** + *

Calculates eigenvalues and eigenvectors of image blocks for corner detection.

+ * + *

For every pixel p, the function cornerEigenValsAndVecs + * considers a blockSize x blockSize + * neighborhood S(p). It calculates the covariation matrix of + * derivatives over the neighborhood as:

+ * + *

M = sum(by: S(p))(dI/dx)^2 sum(by: S(p))(dI/dx dI/dy)^2 + * sum(by: S(p))(dI/dx dI/dy)^2 sum(by: S(p))(dI/dy)^2

+ * + *

where the derivatives are computed using the "Sobel" operator.

+ * + *

After that, it finds eigenvectors and eigenvalues of M and stores + * them in the destination image as (lambda_1, lambda_2, x_1, y_1, x_2, + * y_2) where

+ *
    + *
  • lambda_1, lambda_2 are the non-sorted eigenvalues of + * M + *
  • x_1, y_1 are the eigenvectors corresponding to + * lambda_1 + *
  • x_2, y_2 are the eigenvectors corresponding to + * lambda_2 + *
+ * + *

The output of the function can be used for robust edge or corner detection.

+ * + * @param src Input single-channel 8-bit or floating-point image. + * @param dst Image to store the results. It has the same size as + * src and the type CV_32FC(6). + * @param blockSize Neighborhood size (see details below). + * @param ksize Aperture parameter for the "Sobel" operator. + * + * @see org.opencv.imgproc.Imgproc.cornerEigenValsAndVecs + * @see org.opencv.imgproc.Imgproc#cornerHarris + * @see org.opencv.imgproc.Imgproc#cornerMinEigenVal + * @see org.opencv.imgproc.Imgproc#preCornerDetect + */ + public static void cornerEigenValsAndVecs(Mat src, Mat dst, int blockSize, int ksize) + { + + cornerEigenValsAndVecs_1(src.nativeObj, dst.nativeObj, blockSize, ksize); + + return; + } + + + // + // C++: void cornerHarris(Mat src, Mat& dst, int blockSize, int ksize, double k, int borderType = BORDER_DEFAULT) + // + +/** + *

Harris edge detector.

+ * + *

The function runs the Harris edge detector on the image. Similarly to + * "cornerMinEigenVal" and "cornerEigenValsAndVecs", for each pixel (x, + * y) it calculates a 2x2 gradient covariance matrix + * M^((x,y)) over a blockSize x blockSize neighborhood. Then, + * it computes the following characteristic:

+ * + *

dst(x,y) = det M^((x,y)) - k * (tr M^((x,y)))^2

+ * + *

Corners in the image can be found as the local maxima of this response map.

+ * + * @param src Input single-channel 8-bit or floating-point image. + * @param dst Image to store the Harris detector responses. It has the type + * CV_32FC1 and the same size as src. + * @param blockSize Neighborhood size (see the details on "cornerEigenValsAndVecs"). + * @param ksize Aperture parameter for the "Sobel" operator. + * @param k Harris detector free parameter. See the formula below. + * @param borderType Pixel extrapolation method. See "borderInterpolate". + * + * @see org.opencv.imgproc.Imgproc.cornerHarris + */ + public static void cornerHarris(Mat src, Mat dst, int blockSize, int ksize, double k, int borderType) + { + + cornerHarris_0(src.nativeObj, dst.nativeObj, blockSize, ksize, k, borderType); + + return; + } + +/** + *

Harris edge detector.

+ * + *

The function runs the Harris edge detector on the image. Similarly to + * "cornerMinEigenVal" and "cornerEigenValsAndVecs", for each pixel (x, + * y) it calculates a 2x2 gradient covariance matrix + * M^((x,y)) over a blockSize x blockSize neighborhood. Then, + * it computes the following characteristic:

+ * + *

dst(x,y) = det M^((x,y)) - k * (tr M^((x,y)))^2

+ * + *

Corners in the image can be found as the local maxima of this response map.

+ * + * @param src Input single-channel 8-bit or floating-point image. + * @param dst Image to store the Harris detector responses. It has the type + * CV_32FC1 and the same size as src. + * @param blockSize Neighborhood size (see the details on "cornerEigenValsAndVecs"). + * @param ksize Aperture parameter for the "Sobel" operator. + * @param k Harris detector free parameter. See the formula below. + * + * @see org.opencv.imgproc.Imgproc.cornerHarris + */ + public static void cornerHarris(Mat src, Mat dst, int blockSize, int ksize, double k) + { + + cornerHarris_1(src.nativeObj, dst.nativeObj, blockSize, ksize, k); + + return; + } + + + // + // C++: void cornerMinEigenVal(Mat src, Mat& dst, int blockSize, int ksize = 3, int borderType = BORDER_DEFAULT) + // + +/** + *

Calculates the minimal eigenvalue of gradient matrices for corner detection.

+ * + *

The function is similar to "cornerEigenValsAndVecs" but it calculates and + * stores only the minimal eigenvalue of the covariance matrix of derivatives, + * that is, min(lambda_1, lambda_2) in terms of the formulae in the + * "cornerEigenValsAndVecs" description.

+ * + * @param src Input single-channel 8-bit or floating-point image. + * @param dst Image to store the minimal eigenvalues. It has the type + * CV_32FC1 and the same size as src. + * @param blockSize Neighborhood size (see the details on "cornerEigenValsAndVecs"). + * @param ksize Aperture parameter for the "Sobel" operator. + * @param borderType Pixel extrapolation method. See "borderInterpolate". + * + * @see org.opencv.imgproc.Imgproc.cornerMinEigenVal + */ + public static void cornerMinEigenVal(Mat src, Mat dst, int blockSize, int ksize, int borderType) + { + + cornerMinEigenVal_0(src.nativeObj, dst.nativeObj, blockSize, ksize, borderType); + + return; + } + +/** + *

Calculates the minimal eigenvalue of gradient matrices for corner detection.

+ * + *

The function is similar to "cornerEigenValsAndVecs" but it calculates and + * stores only the minimal eigenvalue of the covariance matrix of derivatives, + * that is, min(lambda_1, lambda_2) in terms of the formulae in the + * "cornerEigenValsAndVecs" description.

+ * + * @param src Input single-channel 8-bit or floating-point image. + * @param dst Image to store the minimal eigenvalues. It has the type + * CV_32FC1 and the same size as src. + * @param blockSize Neighborhood size (see the details on "cornerEigenValsAndVecs"). + * @param ksize Aperture parameter for the "Sobel" operator. + * + * @see org.opencv.imgproc.Imgproc.cornerMinEigenVal + */ + public static void cornerMinEigenVal(Mat src, Mat dst, int blockSize, int ksize) + { + + cornerMinEigenVal_1(src.nativeObj, dst.nativeObj, blockSize, ksize); + + return; + } + +/** + *

Calculates the minimal eigenvalue of gradient matrices for corner detection.

+ * + *

The function is similar to "cornerEigenValsAndVecs" but it calculates and + * stores only the minimal eigenvalue of the covariance matrix of derivatives, + * that is, min(lambda_1, lambda_2) in terms of the formulae in the + * "cornerEigenValsAndVecs" description.

+ * + * @param src Input single-channel 8-bit or floating-point image. + * @param dst Image to store the minimal eigenvalues. It has the type + * CV_32FC1 and the same size as src. + * @param blockSize Neighborhood size (see the details on "cornerEigenValsAndVecs"). + * + * @see org.opencv.imgproc.Imgproc.cornerMinEigenVal + */ + public static void cornerMinEigenVal(Mat src, Mat dst, int blockSize) + { + + cornerMinEigenVal_2(src.nativeObj, dst.nativeObj, blockSize); + + return; + } + + + // + // C++: void cornerSubPix(Mat image, vector_Point2f& corners, Size winSize, Size zeroZone, TermCriteria criteria) + // + +/** + *

Refines the corner locations.

+ * + *

The function iterates to find the sub-pixel accurate location of corners or + * radial saddle points, as shown on the figure below.

+ * + *

Sub-pixel accurate corner locator is based on the observation that every + * vector from the center q to a point p located within a + * neighborhood of q is orthogonal to the image gradient at p + * subject to image and measurement noise. Consider the expression:

+ * + *

epsilon _i = (DI_(p_i))^T * (q - p_i)

+ * + *

where (DI_(p_i)) is an image gradient at one of the points + * p_i in a neighborhood of q. The value of q is to + * be found so that epsilon_i is minimized. A system of equations may + * be set up with epsilon_i set to zero:

+ * + *

sum _i(DI_(p_i) * (DI_(p_i))^T) - sum _i(DI_(p_i) * (DI_(p_i))^T * + * p_i)

+ * + *

where the gradients are summed within a neighborhood ("search window") of + * q. Calling the first gradient term G and the second + * gradient term b gives:

+ * + *

q = G^(-1) * b

+ * + *

The algorithm sets the center of the neighborhood window at this new center + * q and then iterates until the center stays within a set threshold.

+ * + * @param image Input image. + * @param corners Initial coordinates of the input corners and refined + * coordinates provided for output. + * @param winSize Half of the side length of the search window. For example, if + * winSize=Size(5,5), then a 5*2+1 x 5*2+1 = 11 x 11 + * search window is used. + * @param zeroZone Half of the size of the dead region in the middle of the + * search zone over which the summation in the formula below is not done. It is + * used sometimes to avoid possible singularities of the autocorrelation matrix. + * The value of (-1,-1) indicates that there is no such a size. + * @param criteria Criteria for termination of the iterative process of corner + * refinement. That is, the process of corner position refinement stops either + * after criteria.maxCount iterations or when the corner position + * moves by less than criteria.epsilon on some iteration. + * + * @see org.opencv.imgproc.Imgproc.cornerSubPix + */ + public static void cornerSubPix(Mat image, MatOfPoint2f corners, Size winSize, Size zeroZone, TermCriteria criteria) + { + Mat corners_mat = corners; + cornerSubPix_0(image.nativeObj, corners_mat.nativeObj, winSize.width, winSize.height, zeroZone.width, zeroZone.height, criteria.type, criteria.maxCount, criteria.epsilon); + + return; + } + + + // + // C++: void createHanningWindow(Mat& dst, Size winSize, int type) + // + +/** + *

This function computes a Hanning window coefficients in two dimensions. See + * http://en.wikipedia.org/wiki/Hann_function and http://en.wikipedia.org/wiki/Window_function + * for more information.

+ * + *

An example is shown below:

+ * + *

// C++ code:

+ * + *

// create hanning window of size 100x100 and type CV_32F

+ * + *

Mat hann;

+ * + *

createHanningWindow(hann, Size(100, 100), CV_32F);

+ * + * @param dst Destination array to place Hann coefficients in + * @param winSize The window size specifications + * @param type Created array type + * + * @see org.opencv.imgproc.Imgproc.createHanningWindow + * @see org.opencv.imgproc.Imgproc#phaseCorrelate + */ + public static void createHanningWindow(Mat dst, Size winSize, int type) + { + + createHanningWindow_0(dst.nativeObj, winSize.width, winSize.height, type); + + return; + } + + + // + // C++: void cvtColor(Mat src, Mat& dst, int code, int dstCn = 0) + // + +/** + *

Converts an image from one color space to another.

+ * + *

The function converts an input image from one color space to another. In case + * of a transformation to-from RGB color space, the order of the channels should + * be specified explicitly (RGB or BGR). + * Note that the default color format in OpenCV is often referred to as RGB but + * it is actually BGR (the bytes are reversed). So the first byte in a standard + * (24-bit) color image will be an 8-bit Blue component, the second byte will be + * Green, and the third byte will be Red. The fourth, fifth, and sixth bytes + * would then be the second pixel (Blue, then Green, then Red), and so on.

+ * + *

The conventional ranges for R, G, and B channel values are:

+ *
    + *
  • 0 to 255 for CV_8U images + *
  • 0 to 65535 for CV_16U images + *
  • 0 to 1 for CV_32F images + *
+ * + *

In case of linear transformations, the range does not matter. + * But in case of a non-linear transformation, an input RGB image should be + * normalized to the proper value range to get the correct results, for example, + * for RGB-> L*u*v* transformation. For example, if you have a 32-bit + * floating-point image directly converted from an 8-bit image without any + * scaling, then it will have the 0..255 value range instead of 0..1 assumed by + * the function. So, before calling cvtColor, you need first to + * scale the image down:

+ * + *

// C++ code:

+ * + *

img *= 1./255;

+ * + *

cvtColor(img, img, CV_BGR2Luv);

+ * + *

If you use cvtColor with 8-bit images, the conversion will have + * some information lost. For many applications, this will not be noticeable but + * it is recommended to use 32-bit images in applications that need the full + * range of colors or that convert an image before an operation and then convert + * back. + *

+ * + *

The function can do the following transformations:

+ *
    + *
  • Transformations within RGB space like adding/removing the alpha + * channel, reversing the channel order, conversion to/from 16-bit RGB color + * (R5:G6:B5 or R5:G5:B5), as well as conversion to/from grayscale using: + *
+ * + *

RGB[A] to Gray: Y <- 0.299 * R + 0.587 * G + 0.114 * B

+ * + *

and

+ * + *

Gray to RGB[A]: R <- Y, G <- Y, B <- Y, A <- 0

+ * + *

The conversion from a RGB image to gray is done with:

+ * + *

+ * + *

// C++ code:

+ * + *

cvtColor(src, bwsrc, CV_RGB2GRAY);

+ * + *

+ * + *

More advanced channel reordering can also be done with "mixChannels".

+ *
    + *
  • RGB <-> CIE XYZ.Rec 709 with D65 white point + * (CV_BGR2XYZ, CV_RGB2XYZ, CV_XYZ2BGR, CV_XYZ2RGB): + *
+ * + *

X + * Z ltBR gt <- 0.412453 0.357580 0.180423 + * 0.212671 0.715160 0.072169 + * 0.019334 0.119193 0.950227 ltBR gt * R + * B ltBR gt

+ * + * + * + *

R + * B ltBR gt <- 3.240479 -1.53715 -0.498535 + * -0.969256 1.875991 0.041556 + * 0.055648 -0.204043 1.057311 ltBR gt * X + * Z ltBR gt

+ * + *

X, Y and Z cover the whole value range (in case of + * floating-point images, Z may exceed 1).

+ *
    + *
  • RGB <-> YCrCb JPEG (or YCC) (CV_BGR2YCrCb, + * CV_RGB2YCrCb, CV_YCrCb2BGR, CV_YCrCb2RGB) + *
+ * + *

Y <- 0.299 * R + 0.587 * G + 0.114 * B

+ * + * + * + *

Cr <- (R-Y) * 0.713 + delta

+ * + * + * + *

Cb <- (B-Y) * 0.564 + delta

+ * + * + * + *

R <- Y + 1.403 * (Cr - delta)

+ * + * + * + *

G <- Y - 0.714 * (Cr - delta) - 0.344 * (Cb - delta)

+ * + * + * + *

B <- Y + 1.773 * (Cb - delta)

+ * + *

where

+ * + *

delta = <= ft (128 for 8-bit images + * 32768 for 16-bit images + * 0.5 for floating-point images right.

+ * + *

Y, Cr, and Cb cover the whole value range.

+ *
    + *
  • RGB <-> HSV (CV_BGR2HSV, CV_RGB2HSV, CV_HSV2BGR, + * CV_HSV2RGB) In case of 8-bit and 16-bit images, R, G, and B are + * converted to the floating-point format and scaled to fit the 0 to 1 range. + *
+ * + *

V <- max(R,G,B)

+ * + * + * + *

S <- (V-min(R,G,B))/(V) if V != 0; 0 otherwise

+ * + * + * + *

H <- (60(G - B))/((V-min(R,G,B))) if V=R; (120+60(B - R))/((V-min(R,G,B))) + * if V=G; (240+60(R - G))/((V-min(R,G,B))) if V=B

+ * + *

If H<0 then H <- H+360. On output 0 <= V <= 1, + * 0 <= S <= 1, 0 <= H <= 360.

+ * + *

The values are then converted to the destination data type:

+ *
    + *
  • 8-bit images + *
+ * + *

V <- 255 V, S <- 255 S, H <- H/2(to fit to 0 to 255)

+ * + *
    + *
  • 16-bit images (currently not supported) + *
+ * + *

V <- 65535 V, S <- 65535 S, H <- H

+ * + *
    + *
  • 32-bit images H, S, and V are left as is + *
  • RGB <-> HLS (CV_BGR2HLS, CV_RGB2HLS, CV_HLS2BGR, + * CV_HLS2RGB). + *
+ *

In case of 8-bit and 16-bit images, R, G, and B are converted to the + * floating-point format and scaled to fit the 0 to 1 range.

+ * + *

V_(max) <- (max)(R,G,B)

+ * + * + * + *

V_(min) <- (min)(R,G,B)

+ * + * + * + *

L <- (V_(max) + V_(min))/2

+ * + * + * + *

S <- fork ((V_(max) - V_(min))/(V_(max) + V_(min)))(if L < + * 0.5)<BR>((V_(max) - V_(min))/(2 - (V_(max) + V_(min))))(if L >= 0.5)

+ * + * + * + *

H <- forkthree ((60(G - B))/(S))(if V_(max)=R)<BR>((120+60(B - + * R))/(S))(if V_(max)=G)<BR>((240+60(R - G))/(S))(if V_(max)=B)

+ * + *

If H<0 then H <- H+360. On output 0 <= L <= 1, + * 0 <= S <= 1, 0 <= H <= 360.

+ * + *

The values are then converted to the destination data type:

+ *
    + *
  • 8-bit images + *
+ * + *

V <- 255 * V, S <- 255 * S, H <- H/2(to fit to 0 to 255)

+ * + *
    + *
  • 16-bit images (currently not supported) + *
+ * + *

V <- 65535 * V, S <- 65535 * S, H <- H

+ * + *
    + *
  • 32-bit images H, S, V are left as is + *
  • RGB <-> CIE L*a*b* (CV_BGR2Lab, CV_RGB2Lab, CV_Lab2BGR, + * CV_Lab2RGB). + *
+ *

In case of 8-bit and 16-bit images, R, G, and B are converted to the + * floating-point format and scaled to fit the 0 to 1 range.

+ * + *

[X Y Z] <- + * |0.412453 0.357580 0.180423| + * |0.212671 0.715160 0.072169| + * |0.019334 0.119193 0.950227|

+ *
    + *
  • [R G B] + * + * + *
+ * + *

X <- X/X_n, where X_n = 0.950456

+ * + * + * + *

Z <- Z/Z_n, where Z_n = 1.088754

+ * + * + * + *

L <- 116*Y^(1/3)-16 for Y>0.008856; 903.3*Y for Y <= 0.008856

+ * + * + * + *

a <- 500(f(X)-f(Y)) + delta

+ * + * + * + *

b <- 200(f(Y)-f(Z)) + delta

+ * + *

where

+ * + *

f(t)= t^(1/3) for t>0.008856; 7.787 t+16/116 for t <= 0.008856

+ * + *

and

+ * + *

delta = 128 for 8-bit images; 0 for floating-point images

+ * + *

This outputs 0 <= L <= 100, -127 <= a <= 127, -127 <= b + * <= 127. The values are then converted to the destination data type:

+ *
    + *
  • 8-bit images + *
+ * + *

L <- L*255/100, a <- a + 128, b <- b + 128

+ * + *
    + *
  • 16-bit images (currently not supported) + *
  • 32-bit images L, a, and b are left as is + *
  • RGB <-> CIE L*u*v* (CV_BGR2Luv, CV_RGB2Luv, CV_Luv2BGR, + * CV_Luv2RGB). + *
+ *

In case of 8-bit and 16-bit images, R, G, and B are converted to the + * floating-point format and scaled to fit 0 to 1 range.

+ * + *

[X Y Z] <- + * |0.412453 0.357580 0.180423| + * |0.212671 0.715160 0.072169| + * |0.019334 0.119193 0.950227|

+ *
    + *
  • [R G B] + * + * + *
+ * + *

L <- 116 Y^(1/3) for Y>0.008856; 903.3 Y for Y <= 0.008856

+ * + * + * + *

u' <- 4*X/(X + 15*Y + 3 Z)

+ * + * + * + *

v' <- 9*Y/(X + 15*Y + 3 Z)

+ * + * + * + *

u <- 13*L*(u' - u_n) where u_n=0.19793943

+ * + * + * + *

v <- 13*L*(v' - v_n) where v_n=0.46831096

+ * + *

This outputs 0 <= L <= 100, -134 <= u <= 220, -140 <= v + * <= 122.

+ * + *

The values are then converted to the destination data type:

+ *
    + *
  • 8-bit images + *
+ * + *

L <- 255/100 L, u <- 255/354(u + 134), v <- 255/256(v + 140)

+ * + *
    + *
  • 16-bit images (currently not supported) + *
  • 32-bit images L, u, and v are left as is + *
+ * + *

The above formulae for converting RGB to/from various color spaces have been + * taken from multiple sources on the web, primarily from the Charles Poynton + * site http://www.poynton.com/ColorFAQ.html

+ *
    + *
  • Bayer -> RGB (CV_BayerBG2BGR, CV_BayerGB2BGR, + * CV_BayerRG2BGR, CV_BayerGR2BGR, CV_BayerBG2RGB, CV_BayerGB2RGB, + * CV_BayerRG2RGB, CV_BayerGR2RGB). The Bayer pattern is widely used in + * CCD and CMOS cameras. It enables you to get color pictures from a single + * plane where R,G, and B pixels (sensors of a particular component) are + * interleaved as follows: The output RGB components of a pixel are interpolated + * from 1, 2, or + *
+ * + *

// C++ code:

+ * + *

4 neighbors of the pixel having the same color. There are several

+ * + *

modifications of the above pattern that can be achieved by shifting

+ * + *

the pattern one pixel left and/or one pixel up. The two letters

+ * + *

C_1 and

+ * + *

C_2 in the conversion constants CV_Bayer C_1 + * C_2 2BGR and CV_Bayer C_1 C_2 + * 2RGB indicate the particular pattern

+ * + *

type. These are components from the second row, second and third

+ * + *

columns, respectively. For example, the above pattern has a very

+ * + *

popular "BG" type.

+ * + * @param src input image: 8-bit unsigned, 16-bit unsigned (CV_16UC...), + * or single-precision floating-point. + * @param dst output image of the same size and depth as src. + * @param code color space conversion code (see the description below). + * @param dstCn number of channels in the destination image; if the parameter is + * 0, the number of the channels is derived automatically from src + * and code. + * + * @see org.opencv.imgproc.Imgproc.cvtColor + */ + public static void cvtColor(Mat src, Mat dst, int code, int dstCn) + { + + cvtColor_0(src.nativeObj, dst.nativeObj, code, dstCn); + + return; + } + +/** + *

Converts an image from one color space to another.

+ * + *

The function converts an input image from one color space to another. In case + * of a transformation to-from RGB color space, the order of the channels should + * be specified explicitly (RGB or BGR). + * Note that the default color format in OpenCV is often referred to as RGB but + * it is actually BGR (the bytes are reversed). So the first byte in a standard + * (24-bit) color image will be an 8-bit Blue component, the second byte will be + * Green, and the third byte will be Red. The fourth, fifth, and sixth bytes + * would then be the second pixel (Blue, then Green, then Red), and so on.

+ * + *

The conventional ranges for R, G, and B channel values are:

+ *
    + *
  • 0 to 255 for CV_8U images + *
  • 0 to 65535 for CV_16U images + *
  • 0 to 1 for CV_32F images + *
+ * + *

In case of linear transformations, the range does not matter. + * But in case of a non-linear transformation, an input RGB image should be + * normalized to the proper value range to get the correct results, for example, + * for RGB-> L*u*v* transformation. For example, if you have a 32-bit + * floating-point image directly converted from an 8-bit image without any + * scaling, then it will have the 0..255 value range instead of 0..1 assumed by + * the function. So, before calling cvtColor, you need first to + * scale the image down:

+ * + *

// C++ code:

+ * + *

img *= 1./255;

+ * + *

cvtColor(img, img, CV_BGR2Luv);

+ * + *

If you use cvtColor with 8-bit images, the conversion will have + * some information lost. For many applications, this will not be noticeable but + * it is recommended to use 32-bit images in applications that need the full + * range of colors or that convert an image before an operation and then convert + * back. + *

+ * + *

The function can do the following transformations:

+ *
    + *
  • Transformations within RGB space like adding/removing the alpha + * channel, reversing the channel order, conversion to/from 16-bit RGB color + * (R5:G6:B5 or R5:G5:B5), as well as conversion to/from grayscale using: + *
+ * + *

RGB[A] to Gray: Y <- 0.299 * R + 0.587 * G + 0.114 * B

+ * + *

and

+ * + *

Gray to RGB[A]: R <- Y, G <- Y, B <- Y, A <- 0

+ * + *

The conversion from a RGB image to gray is done with:

+ * + *

+ * + *

// C++ code:

+ * + *

cvtColor(src, bwsrc, CV_RGB2GRAY);

+ * + *

+ * + *

More advanced channel reordering can also be done with "mixChannels".

+ *
    + *
  • RGB <-> CIE XYZ.Rec 709 with D65 white point + * (CV_BGR2XYZ, CV_RGB2XYZ, CV_XYZ2BGR, CV_XYZ2RGB): + *
+ * + *

X + * Z ltBR gt <- 0.412453 0.357580 0.180423 + * 0.212671 0.715160 0.072169 + * 0.019334 0.119193 0.950227 ltBR gt * R + * B ltBR gt

+ * + * + * + *

R + * B ltBR gt <- 3.240479 -1.53715 -0.498535 + * -0.969256 1.875991 0.041556 + * 0.055648 -0.204043 1.057311 ltBR gt * X + * Z ltBR gt

+ * + *

X, Y and Z cover the whole value range (in case of + * floating-point images, Z may exceed 1).

+ *
    + *
  • RGB <-> YCrCb JPEG (or YCC) (CV_BGR2YCrCb, + * CV_RGB2YCrCb, CV_YCrCb2BGR, CV_YCrCb2RGB) + *
+ * + *

Y <- 0.299 * R + 0.587 * G + 0.114 * B

+ * + * + * + *

Cr <- (R-Y) * 0.713 + delta

+ * + * + * + *

Cb <- (B-Y) * 0.564 + delta

+ * + * + * + *

R <- Y + 1.403 * (Cr - delta)

+ * + * + * + *

G <- Y - 0.714 * (Cr - delta) - 0.344 * (Cb - delta)

+ * + * + * + *

B <- Y + 1.773 * (Cb - delta)

+ * + *

where

+ * + *

delta = <= ft (128 for 8-bit images + * 32768 for 16-bit images + * 0.5 for floating-point images right.

+ * + *

Y, Cr, and Cb cover the whole value range.

+ *
    + *
  • RGB <-> HSV (CV_BGR2HSV, CV_RGB2HSV, CV_HSV2BGR, + * CV_HSV2RGB) In case of 8-bit and 16-bit images, R, G, and B are + * converted to the floating-point format and scaled to fit the 0 to 1 range. + *
+ * + *

V <- max(R,G,B)

+ * + * + * + *

S <- (V-min(R,G,B))/(V) if V != 0; 0 otherwise

+ * + * + * + *

H <- (60(G - B))/((V-min(R,G,B))) if V=R; (120+60(B - R))/((V-min(R,G,B))) + * if V=G; (240+60(R - G))/((V-min(R,G,B))) if V=B

+ * + *

If H<0 then H <- H+360. On output 0 <= V <= 1, + * 0 <= S <= 1, 0 <= H <= 360.

+ * + *

The values are then converted to the destination data type:

+ *
    + *
  • 8-bit images + *
+ * + *

V <- 255 V, S <- 255 S, H <- H/2(to fit to 0 to 255)

+ * + *
    + *
  • 16-bit images (currently not supported) + *
+ * + *

V <- 65535 V, S <- 65535 S, H <- H

+ * + *
    + *
  • 32-bit images H, S, and V are left as is + *
  • RGB <-> HLS (CV_BGR2HLS, CV_RGB2HLS, CV_HLS2BGR, + * CV_HLS2RGB). + *
+ *

In case of 8-bit and 16-bit images, R, G, and B are converted to the + * floating-point format and scaled to fit the 0 to 1 range.

+ * + *

V_(max) <- (max)(R,G,B)

+ * + * + * + *

V_(min) <- (min)(R,G,B)

+ * + * + * + *

L <- (V_(max) + V_(min))/2

+ * + * + * + *

S <- fork ((V_(max) - V_(min))/(V_(max) + V_(min)))(if L < + * 0.5)<BR>((V_(max) - V_(min))/(2 - (V_(max) + V_(min))))(if L >= 0.5)

+ * + * + * + *

H <- forkthree ((60(G - B))/(S))(if V_(max)=R)<BR>((120+60(B - + * R))/(S))(if V_(max)=G)<BR>((240+60(R - G))/(S))(if V_(max)=B)

+ * + *

If H<0 then H <- H+360. On output 0 <= L <= 1, + * 0 <= S <= 1, 0 <= H <= 360.

+ * + *

The values are then converted to the destination data type:

+ *
    + *
  • 8-bit images + *
+ * + *

V <- 255 * V, S <- 255 * S, H <- H/2(to fit to 0 to 255)

+ * + *
    + *
  • 16-bit images (currently not supported) + *
+ * + *

V <- 65535 * V, S <- 65535 * S, H <- H

+ * + *
    + *
  • 32-bit images H, S, V are left as is + *
  • RGB <-> CIE L*a*b* (CV_BGR2Lab, CV_RGB2Lab, CV_Lab2BGR, + * CV_Lab2RGB). + *
+ *

In case of 8-bit and 16-bit images, R, G, and B are converted to the + * floating-point format and scaled to fit the 0 to 1 range.

+ * + *

[X Y Z] <- + * |0.412453 0.357580 0.180423| + * |0.212671 0.715160 0.072169| + * |0.019334 0.119193 0.950227|

+ *
    + *
  • [R G B] + * + * + *
+ * + *

X <- X/X_n, where X_n = 0.950456

+ * + * + * + *

Z <- Z/Z_n, where Z_n = 1.088754

+ * + * + * + *

L <- 116*Y^(1/3)-16 for Y>0.008856; 903.3*Y for Y <= 0.008856

+ * + * + * + *

a <- 500(f(X)-f(Y)) + delta

+ * + * + * + *

b <- 200(f(Y)-f(Z)) + delta

+ * + *

where

+ * + *

f(t)= t^(1/3) for t>0.008856; 7.787 t+16/116 for t <= 0.008856

+ * + *

and

+ * + *

delta = 128 for 8-bit images; 0 for floating-point images

+ * + *

This outputs 0 <= L <= 100, -127 <= a <= 127, -127 <= b + * <= 127. The values are then converted to the destination data type:

+ *
    + *
  • 8-bit images + *
+ * + *

L <- L*255/100, a <- a + 128, b <- b + 128

+ * + *
    + *
  • 16-bit images (currently not supported) + *
  • 32-bit images L, a, and b are left as is + *
  • RGB <-> CIE L*u*v* (CV_BGR2Luv, CV_RGB2Luv, CV_Luv2BGR, + * CV_Luv2RGB). + *
+ *

In case of 8-bit and 16-bit images, R, G, and B are converted to the + * floating-point format and scaled to fit 0 to 1 range.

+ * + *

[X Y Z] <- + * |0.412453 0.357580 0.180423| + * |0.212671 0.715160 0.072169| + * |0.019334 0.119193 0.950227|

+ *
    + *
  • [R G B] + * + * + *
+ * + *

L <- 116 Y^(1/3) for Y>0.008856; 903.3 Y for Y <= 0.008856

+ * + * + * + *

u' <- 4*X/(X + 15*Y + 3 Z)

+ * + * + * + *

v' <- 9*Y/(X + 15*Y + 3 Z)

+ * + * + * + *

u <- 13*L*(u' - u_n) where u_n=0.19793943

+ * + * + * + *

v <- 13*L*(v' - v_n) where v_n=0.46831096

+ * + *

This outputs 0 <= L <= 100, -134 <= u <= 220, -140 <= v + * <= 122.

+ * + *

The values are then converted to the destination data type:

+ *
    + *
  • 8-bit images + *
+ * + *

L <- 255/100 L, u <- 255/354(u + 134), v <- 255/256(v + 140)

+ * + *
    + *
  • 16-bit images (currently not supported) + *
  • 32-bit images L, u, and v are left as is + *
+ * + *

The above formulae for converting RGB to/from various color spaces have been + * taken from multiple sources on the web, primarily from the Charles Poynton + * site http://www.poynton.com/ColorFAQ.html

+ *
    + *
  • Bayer -> RGB (CV_BayerBG2BGR, CV_BayerGB2BGR, + * CV_BayerRG2BGR, CV_BayerGR2BGR, CV_BayerBG2RGB, CV_BayerGB2RGB, + * CV_BayerRG2RGB, CV_BayerGR2RGB). The Bayer pattern is widely used in + * CCD and CMOS cameras. It enables you to get color pictures from a single + * plane where R,G, and B pixels (sensors of a particular component) are + * interleaved as follows: The output RGB components of a pixel are interpolated + * from 1, 2, or + *
+ * + *

// C++ code:

+ * + *

4 neighbors of the pixel having the same color. There are several

+ * + *

modifications of the above pattern that can be achieved by shifting

+ * + *

the pattern one pixel left and/or one pixel up. The two letters

+ * + *

C_1 and

+ * + *

C_2 in the conversion constants CV_Bayer C_1 + * C_2 2BGR and CV_Bayer C_1 C_2 + * 2RGB indicate the particular pattern

+ * + *

type. These are components from the second row, second and third

+ * + *

columns, respectively. For example, the above pattern has a very

+ * + *

popular "BG" type.

+ * + * @param src input image: 8-bit unsigned, 16-bit unsigned (CV_16UC...), + * or single-precision floating-point. + * @param dst output image of the same size and depth as src. + * @param code color space conversion code (see the description below). + * + * @see org.opencv.imgproc.Imgproc.cvtColor + */ + public static void cvtColor(Mat src, Mat dst, int code) + { + + cvtColor_1(src.nativeObj, dst.nativeObj, code); + + return; + } + + + // + // C++: void dilate(Mat src, Mat& dst, Mat kernel, Point anchor = Point(-1,-1), int iterations = 1, int borderType = BORDER_CONSTANT, Scalar borderValue = morphologyDefaultBorderValue()) + // + +/** + *

Dilates an image by using a specific structuring element.

+ * + *

The function dilates the source image using the specified structuring element + * that determines the shape of a pixel neighborhood over which the maximum is + * taken:

+ * + *

dst(x,y) = max _((x',y'): element(x',y') != 0) src(x+x',y+y')

+ * + *

The function supports the in-place mode. Dilation can be applied several + * (iterations) times. In case of multi-channel images, each + * channel is processed independently.

+ * + * @param src input image; the number of channels can be arbitrary, but the + * depth should be one of CV_8U, CV_16U, + * CV_16S, CV_32F" or CV_64F". + * @param dst output image of the same size and type as src. + * @param kernel a kernel + * @param anchor position of the anchor within the element; default value + * (-1, -1) means that the anchor is at the element center. + * @param iterations number of times dilation is applied. + * @param borderType pixel extrapolation method (see "borderInterpolate" for + * details). + * @param borderValue border value in case of a constant border (see + * "createMorphologyFilter" for details). + * + * @see org.opencv.imgproc.Imgproc.dilate + * @see org.opencv.imgproc.Imgproc#erode + * @see org.opencv.imgproc.Imgproc#morphologyEx + */ + public static void dilate(Mat src, Mat dst, Mat kernel, Point anchor, int iterations, int borderType, Scalar borderValue) + { + + dilate_0(src.nativeObj, dst.nativeObj, kernel.nativeObj, anchor.x, anchor.y, iterations, borderType, borderValue.val[0], borderValue.val[1], borderValue.val[2], borderValue.val[3]); + + return; + } + +/** + *

Dilates an image by using a specific structuring element.

+ * + *

The function dilates the source image using the specified structuring element + * that determines the shape of a pixel neighborhood over which the maximum is + * taken:

+ * + *

dst(x,y) = max _((x',y'): element(x',y') != 0) src(x+x',y+y')

+ * + *

The function supports the in-place mode. Dilation can be applied several + * (iterations) times. In case of multi-channel images, each + * channel is processed independently.

+ * + * @param src input image; the number of channels can be arbitrary, but the + * depth should be one of CV_8U, CV_16U, + * CV_16S, CV_32F" or CV_64F". + * @param dst output image of the same size and type as src. + * @param kernel a kernel + * @param anchor position of the anchor within the element; default value + * (-1, -1) means that the anchor is at the element center. + * @param iterations number of times dilation is applied. + * + * @see org.opencv.imgproc.Imgproc.dilate + * @see org.opencv.imgproc.Imgproc#erode + * @see org.opencv.imgproc.Imgproc#morphologyEx + */ + public static void dilate(Mat src, Mat dst, Mat kernel, Point anchor, int iterations) + { + + dilate_1(src.nativeObj, dst.nativeObj, kernel.nativeObj, anchor.x, anchor.y, iterations); + + return; + } + +/** + *

Dilates an image by using a specific structuring element.

+ * + *

The function dilates the source image using the specified structuring element + * that determines the shape of a pixel neighborhood over which the maximum is + * taken:

+ * + *

dst(x,y) = max _((x',y'): element(x',y') != 0) src(x+x',y+y')

+ * + *

The function supports the in-place mode. Dilation can be applied several + * (iterations) times. In case of multi-channel images, each + * channel is processed independently.

+ * + * @param src input image; the number of channels can be arbitrary, but the + * depth should be one of CV_8U, CV_16U, + * CV_16S, CV_32F" or CV_64F". + * @param dst output image of the same size and type as src. + * @param kernel a kernel + * + * @see org.opencv.imgproc.Imgproc.dilate + * @see org.opencv.imgproc.Imgproc#erode + * @see org.opencv.imgproc.Imgproc#morphologyEx + */ + public static void dilate(Mat src, Mat dst, Mat kernel) + { + + dilate_2(src.nativeObj, dst.nativeObj, kernel.nativeObj); + + return; + } + + + // + // C++: void distanceTransform(Mat src, Mat& dst, int distanceType, int maskSize) + // + +/** + *

Calculates the distance to the closest zero pixel for each pixel of the + * source image.

+ * + *

The functions distanceTransform calculate the approximate or + * precise distance from every binary image pixel to the nearest zero pixel. + * For zero image pixels, the distance will obviously be zero.

+ * + *

When maskSize == CV_DIST_MASK_PRECISE and distanceType == + * CV_DIST_L2, the function runs the algorithm described in + * [Felzenszwalb04]. This algorithm is parallelized with the TBB library.

+ * + *

In other cases, the algorithm [Borgefors86] is used. This means that for a + * pixel the function finds the shortest path to the nearest zero pixel + * consisting of basic shifts: horizontal, vertical, diagonal, or knight's move + * (the latest is available for a 5x 5 mask). The overall distance is + * calculated as a sum of these basic distances. Since the distance function + * should be symmetric, all of the horizontal and vertical shifts must have the + * same cost (denoted as a), all the diagonal shifts must have the + * same cost (denoted as b), and all knight's moves must have the + * same cost (denoted as c). For the CV_DIST_C and + * CV_DIST_L1 types, the distance is calculated precisely, whereas + * for CV_DIST_L2 (Euclidean distance) the distance can be + * calculated only with a relative error (a 5x 5 mask gives more + * accurate results). For a,b, and c, + * OpenCV uses the values suggested in the original paper:

+ * + *

============== =================== ====================== + * CV_DIST_C (3x 3) a = 1, b = 1 \ + * ============== =================== ====================== + * CV_DIST_L1 (3x 3) a = 1, b = 2 \ + * CV_DIST_L2 (3x 3) a=0.955, b=1.3693 \ + * CV_DIST_L2 (5x 5) a=1, b=1.4, c=2.1969 \ + * ============== =================== ======================

+ * + *

Typically, for a fast, coarse distance estimation CV_DIST_L2, a + * 3x 3 mask is used. For a more accurate distance estimation + * CV_DIST_L2, a 5x 5 mask or the precise algorithm is + * used. + * Note that both the precise and the approximate algorithms are linear on the + * number of pixels.

+ * + *

The second variant of the function does not only compute the minimum distance + * for each pixel (x, y) but also identifies the nearest connected + * component consisting of zero pixels (labelType==DIST_LABEL_CCOMP) + * or the nearest zero pixel (labelType==DIST_LABEL_PIXEL). Index + * of the component/pixel is stored in labels(x, y). + * When labelType==DIST_LABEL_CCOMP, the function automatically + * finds connected components of zero pixels in the input image and marks them + * with distinct labels. When labelType==DIST_LABEL_CCOMP, the + * function scans through the input image and marks all the zero pixels with + * distinct labels.

+ * + *

In this mode, the complexity is still linear. + * That is, the function provides a very fast way to compute the Voronoi diagram + * for a binary image. + * Currently, the second variant can use only the approximate distance transform + * algorithm, i.e. maskSize=CV_DIST_MASK_PRECISE is not supported + * yet.

+ * + * @param src 8-bit, single-channel (binary) source image. + * @param dst Output image with calculated distances. It is a 32-bit + * floating-point, single-channel image of the same size as src. + * @param distanceType Type of distance. It can be CV_DIST_L1, + * CV_DIST_L2, or CV_DIST_C. + * @param maskSize Size of the distance transform mask. It can be 3, 5, or + * CV_DIST_MASK_PRECISE (the latter option is only supported by the + * first function). In case of the CV_DIST_L1 or CV_DIST_C + * distance type, the parameter is forced to 3 because a 3x 3 mask + * gives the same result as 5x 5 or any larger aperture. + * + * @see org.opencv.imgproc.Imgproc.distanceTransform + */ + public static void distanceTransform(Mat src, Mat dst, int distanceType, int maskSize) + { + + distanceTransform_0(src.nativeObj, dst.nativeObj, distanceType, maskSize); + + return; + } + + + // + // C++: void distanceTransform(Mat src, Mat& dst, Mat& labels, int distanceType, int maskSize, int labelType = DIST_LABEL_CCOMP) + // + +/** + *

Calculates the distance to the closest zero pixel for each pixel of the + * source image.

+ * + *

The functions distanceTransform calculate the approximate or + * precise distance from every binary image pixel to the nearest zero pixel. + * For zero image pixels, the distance will obviously be zero.

+ * + *

When maskSize == CV_DIST_MASK_PRECISE and distanceType == + * CV_DIST_L2, the function runs the algorithm described in + * [Felzenszwalb04]. This algorithm is parallelized with the TBB library.

+ * + *

In other cases, the algorithm [Borgefors86] is used. This means that for a + * pixel the function finds the shortest path to the nearest zero pixel + * consisting of basic shifts: horizontal, vertical, diagonal, or knight's move + * (the latest is available for a 5x 5 mask). The overall distance is + * calculated as a sum of these basic distances. Since the distance function + * should be symmetric, all of the horizontal and vertical shifts must have the + * same cost (denoted as a), all the diagonal shifts must have the + * same cost (denoted as b), and all knight's moves must have the + * same cost (denoted as c). For the CV_DIST_C and + * CV_DIST_L1 types, the distance is calculated precisely, whereas + * for CV_DIST_L2 (Euclidean distance) the distance can be + * calculated only with a relative error (a 5x 5 mask gives more + * accurate results). For a,b, and c, + * OpenCV uses the values suggested in the original paper:

+ * + *

============== =================== ====================== + * CV_DIST_C (3x 3) a = 1, b = 1 \ + * ============== =================== ====================== + * CV_DIST_L1 (3x 3) a = 1, b = 2 \ + * CV_DIST_L2 (3x 3) a=0.955, b=1.3693 \ + * CV_DIST_L2 (5x 5) a=1, b=1.4, c=2.1969 \ + * ============== =================== ======================

+ * + *

Typically, for a fast, coarse distance estimation CV_DIST_L2, a + * 3x 3 mask is used. For a more accurate distance estimation + * CV_DIST_L2, a 5x 5 mask or the precise algorithm is + * used. + * Note that both the precise and the approximate algorithms are linear on the + * number of pixels.

+ * + *

The second variant of the function does not only compute the minimum distance + * for each pixel (x, y) but also identifies the nearest connected + * component consisting of zero pixels (labelType==DIST_LABEL_CCOMP) + * or the nearest zero pixel (labelType==DIST_LABEL_PIXEL). Index + * of the component/pixel is stored in labels(x, y). + * When labelType==DIST_LABEL_CCOMP, the function automatically + * finds connected components of zero pixels in the input image and marks them + * with distinct labels. When labelType==DIST_LABEL_CCOMP, the + * function scans through the input image and marks all the zero pixels with + * distinct labels.

+ * + *

In this mode, the complexity is still linear. + * That is, the function provides a very fast way to compute the Voronoi diagram + * for a binary image. + * Currently, the second variant can use only the approximate distance transform + * algorithm, i.e. maskSize=CV_DIST_MASK_PRECISE is not supported + * yet.

+ * + * @param src 8-bit, single-channel (binary) source image. + * @param dst Output image with calculated distances. It is a 32-bit + * floating-point, single-channel image of the same size as src. + * @param labels Optional output 2D array of labels (the discrete Voronoi + * diagram). It has the type CV_32SC1 and the same size as + * src. See the details below. + * @param distanceType Type of distance. It can be CV_DIST_L1, + * CV_DIST_L2, or CV_DIST_C. + * @param maskSize Size of the distance transform mask. It can be 3, 5, or + * CV_DIST_MASK_PRECISE (the latter option is only supported by the + * first function). In case of the CV_DIST_L1 or CV_DIST_C + * distance type, the parameter is forced to 3 because a 3x 3 mask + * gives the same result as 5x 5 or any larger aperture. + * @param labelType Type of the label array to build. If labelType==DIST_LABEL_CCOMP + * then each connected component of zeros in src (as well as all + * the non-zero pixels closest to the connected component) will be assigned the + * same label. If labelType==DIST_LABEL_PIXEL then each zero pixel + * (and all the non-zero pixels closest to it) gets its own label. + * + * @see org.opencv.imgproc.Imgproc.distanceTransform + */ + public static void distanceTransformWithLabels(Mat src, Mat dst, Mat labels, int distanceType, int maskSize, int labelType) + { + + distanceTransformWithLabels_0(src.nativeObj, dst.nativeObj, labels.nativeObj, distanceType, maskSize, labelType); + + return; + } + +/** + *

Calculates the distance to the closest zero pixel for each pixel of the + * source image.

+ * + *

The functions distanceTransform calculate the approximate or + * precise distance from every binary image pixel to the nearest zero pixel. + * For zero image pixels, the distance will obviously be zero.

+ * + *

When maskSize == CV_DIST_MASK_PRECISE and distanceType == + * CV_DIST_L2, the function runs the algorithm described in + * [Felzenszwalb04]. This algorithm is parallelized with the TBB library.

+ * + *

In other cases, the algorithm [Borgefors86] is used. This means that for a + * pixel the function finds the shortest path to the nearest zero pixel + * consisting of basic shifts: horizontal, vertical, diagonal, or knight's move + * (the latest is available for a 5x 5 mask). The overall distance is + * calculated as a sum of these basic distances. Since the distance function + * should be symmetric, all of the horizontal and vertical shifts must have the + * same cost (denoted as a), all the diagonal shifts must have the + * same cost (denoted as b), and all knight's moves must have the + * same cost (denoted as c). For the CV_DIST_C and + * CV_DIST_L1 types, the distance is calculated precisely, whereas + * for CV_DIST_L2 (Euclidean distance) the distance can be + * calculated only with a relative error (a 5x 5 mask gives more + * accurate results). For a,b, and c, + * OpenCV uses the values suggested in the original paper:

+ * + *

============== =================== ====================== + * CV_DIST_C (3x 3) a = 1, b = 1 \ + * ============== =================== ====================== + * CV_DIST_L1 (3x 3) a = 1, b = 2 \ + * CV_DIST_L2 (3x 3) a=0.955, b=1.3693 \ + * CV_DIST_L2 (5x 5) a=1, b=1.4, c=2.1969 \ + * ============== =================== ======================

+ * + *

Typically, for a fast, coarse distance estimation CV_DIST_L2, a + * 3x 3 mask is used. For a more accurate distance estimation + * CV_DIST_L2, a 5x 5 mask or the precise algorithm is + * used. + * Note that both the precise and the approximate algorithms are linear on the + * number of pixels.

+ * + *

The second variant of the function does not only compute the minimum distance + * for each pixel (x, y) but also identifies the nearest connected + * component consisting of zero pixels (labelType==DIST_LABEL_CCOMP) + * or the nearest zero pixel (labelType==DIST_LABEL_PIXEL). Index + * of the component/pixel is stored in labels(x, y). + * When labelType==DIST_LABEL_CCOMP, the function automatically + * finds connected components of zero pixels in the input image and marks them + * with distinct labels. When labelType==DIST_LABEL_CCOMP, the + * function scans through the input image and marks all the zero pixels with + * distinct labels.

+ * + *

In this mode, the complexity is still linear. + * That is, the function provides a very fast way to compute the Voronoi diagram + * for a binary image. + * Currently, the second variant can use only the approximate distance transform + * algorithm, i.e. maskSize=CV_DIST_MASK_PRECISE is not supported + * yet.

+ * + * @param src 8-bit, single-channel (binary) source image. + * @param dst Output image with calculated distances. It is a 32-bit + * floating-point, single-channel image of the same size as src. + * @param labels Optional output 2D array of labels (the discrete Voronoi + * diagram). It has the type CV_32SC1 and the same size as + * src. See the details below. + * @param distanceType Type of distance. It can be CV_DIST_L1, + * CV_DIST_L2, or CV_DIST_C. + * @param maskSize Size of the distance transform mask. It can be 3, 5, or + * CV_DIST_MASK_PRECISE (the latter option is only supported by the + * first function). In case of the CV_DIST_L1 or CV_DIST_C + * distance type, the parameter is forced to 3 because a 3x 3 mask + * gives the same result as 5x 5 or any larger aperture. + * + * @see org.opencv.imgproc.Imgproc.distanceTransform + */ + public static void distanceTransformWithLabels(Mat src, Mat dst, Mat labels, int distanceType, int maskSize) + { + + distanceTransformWithLabels_1(src.nativeObj, dst.nativeObj, labels.nativeObj, distanceType, maskSize); + + return; + } + + + // + // C++: void drawContours(Mat& image, vector_vector_Point contours, int contourIdx, Scalar color, int thickness = 1, int lineType = 8, Mat hierarchy = Mat(), int maxLevel = INT_MAX, Point offset = Point()) + // + +/** + *

Draws contours outlines or filled contours.

+ * + *

The function draws contour outlines in the image if thickness >= 0 + * or fills the area bounded by the contours ifthickness<0. The + * example below shows how to retrieve connected components from the binary + * image and label them:

+ * + *

// C++ code:

+ * + *

#include "cv.h"

+ * + *

#include "highgui.h"

+ * + *

using namespace cv;

+ * + *

int main(int argc, char argv)

+ * + * + *

Mat src;

+ * + *

// the first command-line parameter must be a filename of the binary

+ * + *

// (black-n-white) image

+ * + *

if(argc != 2 || !(src=imread(argv[1], 0)).data)

+ * + *

return -1;

+ * + *

Mat dst = Mat.zeros(src.rows, src.cols, CV_8UC3);

+ * + *

src = src > 1;

+ * + *

namedWindow("Source", 1);

+ * + *

imshow("Source", src);

+ * + *

vector > contours;

+ * + *

vector hierarchy;

+ * + *

findContours(src, contours, hierarchy,

+ * + *

CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE);

+ * + *

// iterate through all the top-level contours,

+ * + *

// draw each connected component with its own random color

+ * + *

int idx = 0;

+ * + *

for(; idx >= 0; idx = hierarchy[idx][0])

+ * + * + *

Scalar color(rand()&255, rand()&255, rand()&255);

+ * + *

drawContours(dst, contours, idx, color, CV_FILLED, 8, hierarchy);

+ * + * + *

namedWindow("Components", 1);

+ * + *

imshow("Components", dst);

+ * + *

waitKey(0);

+ * + * + * @param image Destination image. + * @param contours All the input contours. Each contour is stored as a point + * vector. + * @param contourIdx Parameter indicating a contour to draw. If it is negative, + * all the contours are drawn. + * @param color Color of the contours. + * @param thickness Thickness of lines the contours are drawn with. If it is + * negative (for example, thickness=CV_FILLED), the contour + * interiors are drawn. + * @param lineType Line connectivity. See "line" for details. + * @param hierarchy Optional information about hierarchy. It is only needed if + * you want to draw only some of the contours (see maxLevel). + * @param maxLevel Maximal level for drawn contours. If it is 0, only the + * specified contour is drawn. If it is 1, the function draws the contour(s) and + * all the nested contours. If it is 2, the function draws the contours, all the + * nested contours, all the nested-to-nested contours, and so on. This parameter + * is only taken into account when there is hierarchy available. + * @param offset Optional contour shift parameter. Shift all the drawn contours + * by the specified offset=(dx,dy). + * + * @see org.opencv.imgproc.Imgproc.drawContours + */ + public static void drawContours(Mat image, List contours, int contourIdx, Scalar color, int thickness, int lineType, Mat hierarchy, int maxLevel, Point offset) + { + List contours_tmplm = new ArrayList((contours != null) ? contours.size() : 0); + Mat contours_mat = Converters.vector_vector_Point_to_Mat(contours, contours_tmplm); + drawContours_0(image.nativeObj, contours_mat.nativeObj, contourIdx, color.val[0], color.val[1], color.val[2], color.val[3], thickness, lineType, hierarchy.nativeObj, maxLevel, offset.x, offset.y); + + return; + } + +/** + *

Draws contours outlines or filled contours.

+ * + *

The function draws contour outlines in the image if thickness >= 0 + * or fills the area bounded by the contours ifthickness<0. The + * example below shows how to retrieve connected components from the binary + * image and label them:

+ * + *

// C++ code:

+ * + *

#include "cv.h"

+ * + *

#include "highgui.h"

+ * + *

using namespace cv;

+ * + *

int main(int argc, char argv)

+ * + * + *

Mat src;

+ * + *

// the first command-line parameter must be a filename of the binary

+ * + *

// (black-n-white) image

+ * + *

if(argc != 2 || !(src=imread(argv[1], 0)).data)

+ * + *

return -1;

+ * + *

Mat dst = Mat.zeros(src.rows, src.cols, CV_8UC3);

+ * + *

src = src > 1;

+ * + *

namedWindow("Source", 1);

+ * + *

imshow("Source", src);

+ * + *

vector > contours;

+ * + *

vector hierarchy;

+ * + *

findContours(src, contours, hierarchy,

+ * + *

CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE);

+ * + *

// iterate through all the top-level contours,

+ * + *

// draw each connected component with its own random color

+ * + *

int idx = 0;

+ * + *

for(; idx >= 0; idx = hierarchy[idx][0])

+ * + * + *

Scalar color(rand()&255, rand()&255, rand()&255);

+ * + *

drawContours(dst, contours, idx, color, CV_FILLED, 8, hierarchy);

+ * + * + *

namedWindow("Components", 1);

+ * + *

imshow("Components", dst);

+ * + *

waitKey(0);

+ * + * + * @param image Destination image. + * @param contours All the input contours. Each contour is stored as a point + * vector. + * @param contourIdx Parameter indicating a contour to draw. If it is negative, + * all the contours are drawn. + * @param color Color of the contours. + * @param thickness Thickness of lines the contours are drawn with. If it is + * negative (for example, thickness=CV_FILLED), the contour + * interiors are drawn. + * + * @see org.opencv.imgproc.Imgproc.drawContours + */ + public static void drawContours(Mat image, List contours, int contourIdx, Scalar color, int thickness) + { + List contours_tmplm = new ArrayList((contours != null) ? contours.size() : 0); + Mat contours_mat = Converters.vector_vector_Point_to_Mat(contours, contours_tmplm); + drawContours_1(image.nativeObj, contours_mat.nativeObj, contourIdx, color.val[0], color.val[1], color.val[2], color.val[3], thickness); + + return; + } + +/** + *

Draws contours outlines or filled contours.

+ * + *

The function draws contour outlines in the image if thickness >= 0 + * or fills the area bounded by the contours ifthickness<0. The + * example below shows how to retrieve connected components from the binary + * image and label them:

+ * + *

// C++ code:

+ * + *

#include "cv.h"

+ * + *

#include "highgui.h"

+ * + *

using namespace cv;

+ * + *

int main(int argc, char argv)

+ * + * + *

Mat src;

+ * + *

// the first command-line parameter must be a filename of the binary

+ * + *

// (black-n-white) image

+ * + *

if(argc != 2 || !(src=imread(argv[1], 0)).data)

+ * + *

return -1;

+ * + *

Mat dst = Mat.zeros(src.rows, src.cols, CV_8UC3);

+ * + *

src = src > 1;

+ * + *

namedWindow("Source", 1);

+ * + *

imshow("Source", src);

+ * + *

vector > contours;

+ * + *

vector hierarchy;

+ * + *

findContours(src, contours, hierarchy,

+ * + *

CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE);

+ * + *

// iterate through all the top-level contours,

+ * + *

// draw each connected component with its own random color

+ * + *

int idx = 0;

+ * + *

for(; idx >= 0; idx = hierarchy[idx][0])

+ * + * + *

Scalar color(rand()&255, rand()&255, rand()&255);

+ * + *

drawContours(dst, contours, idx, color, CV_FILLED, 8, hierarchy);

+ * + * + *

namedWindow("Components", 1);

+ * + *

imshow("Components", dst);

+ * + *

waitKey(0);

+ * + * + * @param image Destination image. + * @param contours All the input contours. Each contour is stored as a point + * vector. + * @param contourIdx Parameter indicating a contour to draw. If it is negative, + * all the contours are drawn. + * @param color Color of the contours. + * + * @see org.opencv.imgproc.Imgproc.drawContours + */ + public static void drawContours(Mat image, List contours, int contourIdx, Scalar color) + { + List contours_tmplm = new ArrayList((contours != null) ? contours.size() : 0); + Mat contours_mat = Converters.vector_vector_Point_to_Mat(contours, contours_tmplm); + drawContours_2(image.nativeObj, contours_mat.nativeObj, contourIdx, color.val[0], color.val[1], color.val[2], color.val[3]); + + return; + } + + + // + // C++: void equalizeHist(Mat src, Mat& dst) + // + +/** + *

Equalizes the histogram of a grayscale image.

+ * + *

The function equalizes the histogram of the input image using the following + * algorithm:

+ *
    + *
  • Calculate the histogram H for src. + *
  • Normalize the histogram so that the sum of histogram bins is 255. + *
  • Compute the integral of the histogram: + *
+ * + *

H'_i = sum(by: 0 <= j < i) H(j)

+ * + *
    + *
  • + *
+ *

Transform the image using H' as a look-up table: dst(x,y) = + * H'(src(x,y))

+ * + *

The algorithm normalizes the brightness and increases the contrast of the + * image.

+ * + * @param src Source 8-bit single channel image. + * @param dst Destination image of the same size and type as src. + * + * @see org.opencv.imgproc.Imgproc.equalizeHist + */ + public static void equalizeHist(Mat src, Mat dst) + { + + equalizeHist_0(src.nativeObj, dst.nativeObj); + + return; + } + + + // + // C++: void erode(Mat src, Mat& dst, Mat kernel, Point anchor = Point(-1,-1), int iterations = 1, int borderType = BORDER_CONSTANT, Scalar borderValue = morphologyDefaultBorderValue()) + // + +/** + *

Erodes an image by using a specific structuring element.

+ * + *

The function erodes the source image using the specified structuring element + * that determines the shape of a pixel neighborhood over which the minimum is + * taken:

+ * + *

dst(x,y) = min _((x',y'): element(x',y') != 0) src(x+x',y+y')

+ * + *

The function supports the in-place mode. Erosion can be applied several + * (iterations) times. In case of multi-channel images, each + * channel is processed independently.

+ * + * @param src input image; the number of channels can be arbitrary, but the + * depth should be one of CV_8U, CV_16U, + * CV_16S, CV_32F" or CV_64F". + * @param dst output image of the same size and type as src. + * @param kernel a kernel + * @param anchor position of the anchor within the element; default value + * (-1, -1) means that the anchor is at the element center. + * @param iterations number of times erosion is applied. + * @param borderType pixel extrapolation method (see "borderInterpolate" for + * details). + * @param borderValue border value in case of a constant border (see + * "createMorphologyFilter" for details). + * + * @see org.opencv.imgproc.Imgproc.erode + * @see org.opencv.imgproc.Imgproc#morphologyEx + * @see org.opencv.imgproc.Imgproc#dilate + */ + public static void erode(Mat src, Mat dst, Mat kernel, Point anchor, int iterations, int borderType, Scalar borderValue) + { + + erode_0(src.nativeObj, dst.nativeObj, kernel.nativeObj, anchor.x, anchor.y, iterations, borderType, borderValue.val[0], borderValue.val[1], borderValue.val[2], borderValue.val[3]); + + return; + } + +/** + *

Erodes an image by using a specific structuring element.

+ * + *

The function erodes the source image using the specified structuring element + * that determines the shape of a pixel neighborhood over which the minimum is + * taken:

+ * + *

dst(x,y) = min _((x',y'): element(x',y') != 0) src(x+x',y+y')

+ * + *

The function supports the in-place mode. Erosion can be applied several + * (iterations) times. In case of multi-channel images, each + * channel is processed independently.

+ * + * @param src input image; the number of channels can be arbitrary, but the + * depth should be one of CV_8U, CV_16U, + * CV_16S, CV_32F" or CV_64F". + * @param dst output image of the same size and type as src. + * @param kernel a kernel + * @param anchor position of the anchor within the element; default value + * (-1, -1) means that the anchor is at the element center. + * @param iterations number of times erosion is applied. + * + * @see org.opencv.imgproc.Imgproc.erode + * @see org.opencv.imgproc.Imgproc#morphologyEx + * @see org.opencv.imgproc.Imgproc#dilate + */ + public static void erode(Mat src, Mat dst, Mat kernel, Point anchor, int iterations) + { + + erode_1(src.nativeObj, dst.nativeObj, kernel.nativeObj, anchor.x, anchor.y, iterations); + + return; + } + +/** + *

Erodes an image by using a specific structuring element.

+ * + *

The function erodes the source image using the specified structuring element + * that determines the shape of a pixel neighborhood over which the minimum is + * taken:

+ * + *

dst(x,y) = min _((x',y'): element(x',y') != 0) src(x+x',y+y')

+ * + *

The function supports the in-place mode. Erosion can be applied several + * (iterations) times. In case of multi-channel images, each + * channel is processed independently.

+ * + * @param src input image; the number of channels can be arbitrary, but the + * depth should be one of CV_8U, CV_16U, + * CV_16S, CV_32F" or CV_64F". + * @param dst output image of the same size and type as src. + * @param kernel a kernel + * + * @see org.opencv.imgproc.Imgproc.erode + * @see org.opencv.imgproc.Imgproc#morphologyEx + * @see org.opencv.imgproc.Imgproc#dilate + */ + public static void erode(Mat src, Mat dst, Mat kernel) + { + + erode_2(src.nativeObj, dst.nativeObj, kernel.nativeObj); + + return; + } + + + // + // C++: void filter2D(Mat src, Mat& dst, int ddepth, Mat kernel, Point anchor = Point(-1,-1), double delta = 0, int borderType = BORDER_DEFAULT) + // + +/** + *

Convolves an image with the kernel.

+ * + *

The function applies an arbitrary linear filter to an image. In-place + * operation is supported. When the aperture is partially outside the image, the + * function interpolates outlier pixel values according to the specified border + * mode.

+ * + *

The function does actually compute correlation, not the convolution:

+ * + *

dst(x,y) = sum(by: 0 <= x' < kernel.cols, 0 <= y' < kernel.rows) + * kernel(x',y')* src(x+x'- anchor.x,y+y'- anchor.y)

+ * + *

That is, the kernel is not mirrored around the anchor point. If you need a + * real convolution, flip the kernel using "flip" and set the new anchor to + * (kernel.cols - anchor.x - 1, kernel.rows - anchor.y - 1).

+ * + *

The function uses the DFT-based algorithm in case of sufficiently large + * kernels (~11 x 11 or larger) and the direct algorithm (that uses + * the engine retrieved by "createLinearFilter") for small kernels.

+ * + * @param src input image. + * @param dst output image of the same size and the same number of channels as + * src. + * @param ddepth desired depth of the destination image; if it is negative, it + * will be the same as src.depth(); the following combinations of + * src.depth() and ddepth are supported: + *
    + *
  • src.depth() = CV_8U, ddepth = + * -1/CV_16S/CV_32F/CV_64F + *
  • src.depth() = CV_16U/CV_16S, + * ddepth = -1/CV_32F/CV_64F + *
  • src.depth() = CV_32F, ddepth = + * -1/CV_32F/CV_64F + *
  • src.depth() = CV_64F, ddepth = + * -1/CV_64F + *
+ * + *

when ddepth=-1, the output image will have the same depth as the + * source.

+ * @param kernel convolution kernel (or rather a correlation kernel), a + * single-channel floating point matrix; if you want to apply different kernels + * to different channels, split the image into separate color planes using + * "split" and process them individually. + * @param anchor anchor of the kernel that indicates the relative position of a + * filtered point within the kernel; the anchor should lie within the kernel; + * default value (-1,-1) means that the anchor is at the kernel center. + * @param delta optional value added to the filtered pixels before storing them + * in dst. + * @param borderType pixel extrapolation method (see "borderInterpolate" for + * details). + * + * @see org.opencv.imgproc.Imgproc.filter2D + * @see org.opencv.imgproc.Imgproc#matchTemplate + * @see org.opencv.core.Core#dft + * @see org.opencv.imgproc.Imgproc#sepFilter2D + */ + public static void filter2D(Mat src, Mat dst, int ddepth, Mat kernel, Point anchor, double delta, int borderType) + { + + filter2D_0(src.nativeObj, dst.nativeObj, ddepth, kernel.nativeObj, anchor.x, anchor.y, delta, borderType); + + return; + } + +/** + *

Convolves an image with the kernel.

+ * + *

The function applies an arbitrary linear filter to an image. In-place + * operation is supported. When the aperture is partially outside the image, the + * function interpolates outlier pixel values according to the specified border + * mode.

+ * + *

The function does actually compute correlation, not the convolution:

+ * + *

dst(x,y) = sum(by: 0 <= x' < kernel.cols, 0 <= y' < kernel.rows) + * kernel(x',y')* src(x+x'- anchor.x,y+y'- anchor.y)

+ * + *

That is, the kernel is not mirrored around the anchor point. If you need a + * real convolution, flip the kernel using "flip" and set the new anchor to + * (kernel.cols - anchor.x - 1, kernel.rows - anchor.y - 1).

+ * + *

The function uses the DFT-based algorithm in case of sufficiently large + * kernels (~11 x 11 or larger) and the direct algorithm (that uses + * the engine retrieved by "createLinearFilter") for small kernels.

+ * + * @param src input image. + * @param dst output image of the same size and the same number of channels as + * src. + * @param ddepth desired depth of the destination image; if it is negative, it + * will be the same as src.depth(); the following combinations of + * src.depth() and ddepth are supported: + *
    + *
  • src.depth() = CV_8U, ddepth = + * -1/CV_16S/CV_32F/CV_64F + *
  • src.depth() = CV_16U/CV_16S, + * ddepth = -1/CV_32F/CV_64F + *
  • src.depth() = CV_32F, ddepth = + * -1/CV_32F/CV_64F + *
  • src.depth() = CV_64F, ddepth = + * -1/CV_64F + *
+ * + *

when ddepth=-1, the output image will have the same depth as the + * source.

+ * @param kernel convolution kernel (or rather a correlation kernel), a + * single-channel floating point matrix; if you want to apply different kernels + * to different channels, split the image into separate color planes using + * "split" and process them individually. + * @param anchor anchor of the kernel that indicates the relative position of a + * filtered point within the kernel; the anchor should lie within the kernel; + * default value (-1,-1) means that the anchor is at the kernel center. + * @param delta optional value added to the filtered pixels before storing them + * in dst. + * + * @see org.opencv.imgproc.Imgproc.filter2D + * @see org.opencv.imgproc.Imgproc#matchTemplate + * @see org.opencv.core.Core#dft + * @see org.opencv.imgproc.Imgproc#sepFilter2D + */ + public static void filter2D(Mat src, Mat dst, int ddepth, Mat kernel, Point anchor, double delta) + { + + filter2D_1(src.nativeObj, dst.nativeObj, ddepth, kernel.nativeObj, anchor.x, anchor.y, delta); + + return; + } + +/** + *

Convolves an image with the kernel.

+ * + *

The function applies an arbitrary linear filter to an image. In-place + * operation is supported. When the aperture is partially outside the image, the + * function interpolates outlier pixel values according to the specified border + * mode.

+ * + *

The function does actually compute correlation, not the convolution:

+ * + *

dst(x,y) = sum(by: 0 <= x' < kernel.cols, 0 <= y' < kernel.rows) + * kernel(x',y')* src(x+x'- anchor.x,y+y'- anchor.y)

+ * + *

That is, the kernel is not mirrored around the anchor point. If you need a + * real convolution, flip the kernel using "flip" and set the new anchor to + * (kernel.cols - anchor.x - 1, kernel.rows - anchor.y - 1).

+ * + *

The function uses the DFT-based algorithm in case of sufficiently large + * kernels (~11 x 11 or larger) and the direct algorithm (that uses + * the engine retrieved by "createLinearFilter") for small kernels.

+ * + * @param src input image. + * @param dst output image of the same size and the same number of channels as + * src. + * @param ddepth desired depth of the destination image; if it is negative, it + * will be the same as src.depth(); the following combinations of + * src.depth() and ddepth are supported: + *
    + *
  • src.depth() = CV_8U, ddepth = + * -1/CV_16S/CV_32F/CV_64F + *
  • src.depth() = CV_16U/CV_16S, + * ddepth = -1/CV_32F/CV_64F + *
  • src.depth() = CV_32F, ddepth = + * -1/CV_32F/CV_64F + *
  • src.depth() = CV_64F, ddepth = + * -1/CV_64F + *
+ * + *

when ddepth=-1, the output image will have the same depth as the + * source.

+ * @param kernel convolution kernel (or rather a correlation kernel), a + * single-channel floating point matrix; if you want to apply different kernels + * to different channels, split the image into separate color planes using + * "split" and process them individually. + * + * @see org.opencv.imgproc.Imgproc.filter2D + * @see org.opencv.imgproc.Imgproc#matchTemplate + * @see org.opencv.core.Core#dft + * @see org.opencv.imgproc.Imgproc#sepFilter2D + */ + public static void filter2D(Mat src, Mat dst, int ddepth, Mat kernel) + { + + filter2D_2(src.nativeObj, dst.nativeObj, ddepth, kernel.nativeObj); + + return; + } + + + // + // C++: void findContours(Mat& image, vector_vector_Point& contours, Mat& hierarchy, int mode, int method, Point offset = Point()) + // + +/** + *

Finds contours in a binary image.

+ * + *

The function retrieves contours from the binary image using the algorithm + * [Suzuki85]. The contours are a useful tool for shape analysis and object + * detection and recognition. See squares.c in the OpenCV sample + * directory.

+ * + *

Note: Source image is modified by this function. Also, the + * function does not take into account 1-pixel border of the image (it's filled + * with 0's and used for neighbor analysis in the algorithm), therefore the + * contours touching the image border will be clipped.

+ * + *

Note: If you use the new Python interface then the CV_ prefix + * has to be omitted in contour retrieval mode and contour approximation method + * parameters (for example, use cv2.RETR_LIST and cv2.CHAIN_APPROX_NONE + * parameters). If you use the old Python interface then these parameters have + * the CV_ prefix (for example, use cv.CV_RETR_LIST + * and cv.CV_CHAIN_APPROX_NONE).

+ * + * @param image Source, an 8-bit single-channel image. Non-zero pixels are + * treated as 1's. Zero pixels remain 0's, so the image is treated as + * binary. You can use "compare", "inRange", "threshold", + * "adaptiveThreshold", "Canny", and others to create a binary image out of a + * grayscale or color one. The function modifies the image while + * extracting the contours. + * @param contours Detected contours. Each contour is stored as a vector of + * points. + * @param hierarchy Optional output vector, containing information about the + * image topology. It has as many elements as the number of contours. For each + * i-th contour contours[i], the elements hierarchy[i][0], + * hiearchy[i][1], hiearchy[i][2], and + * hiearchy[i][3] are set to 0-based indices in contours + * of the next and previous contours at the same hierarchical level, the first + * child contour and the parent contour, respectively. If for the contour + * i there are no next, previous, parent, or nested contours, the + * corresponding elements of hierarchy[i] will be negative. + * @param mode Contour retrieval mode (if you use Python see also a note below). + *
    + *
  • CV_RETR_EXTERNAL retrieves only the extreme outer contours. It sets + * hierarchy[i][2]=hierarchy[i][3]=-1 for all the contours. + *
  • CV_RETR_LIST retrieves all of the contours without establishing any + * hierarchical relationships. + *
  • CV_RETR_CCOMP retrieves all of the contours and organizes them into a + * two-level hierarchy. At the top level, there are external boundaries of the + * components. At the second level, there are boundaries of the holes. If there + * is another contour inside a hole of a connected component, it is still put at + * the top level. + *
  • CV_RETR_TREE retrieves all of the contours and reconstructs a full + * hierarchy of nested contours. This full hierarchy is built and shown in the + * OpenCV contours.c demo. + *
+ * @param method Contour approximation method (if you use Python see also a note + * below). + *
    + *
  • CV_CHAIN_APPROX_NONE stores absolutely all the contour points. That + * is, any 2 subsequent points (x1,y1) and (x2,y2) of + * the contour will be either horizontal, vertical or diagonal neighbors, that + * is, max(abs(x1-x2),abs(y2-y1))==1. + *
  • CV_CHAIN_APPROX_SIMPLE compresses horizontal, vertical, and diagonal + * segments and leaves only their end points. For example, an up-right + * rectangular contour is encoded with 4 points. + *
  • CV_CHAIN_APPROX_TC89_L1,CV_CHAIN_APPROX_TC89_KCOS applies one of the + * flavors of the Teh-Chin chain approximation algorithm. See [TehChin89] for + * details. + *
+ * @param offset Optional offset by which every contour point is shifted. This + * is useful if the contours are extracted from the image ROI and then they + * should be analyzed in the whole image context. + * + * @see org.opencv.imgproc.Imgproc.findContours + */ + public static void findContours(Mat image, List contours, Mat hierarchy, int mode, int method, Point offset) + { + Mat contours_mat = new Mat(); + findContours_0(image.nativeObj, contours_mat.nativeObj, hierarchy.nativeObj, mode, method, offset.x, offset.y); + Converters.Mat_to_vector_vector_Point(contours_mat, contours); + return; + } + +/** + *

Finds contours in a binary image.

+ * + *

The function retrieves contours from the binary image using the algorithm + * [Suzuki85]. The contours are a useful tool for shape analysis and object + * detection and recognition. See squares.c in the OpenCV sample + * directory.

+ * + *

Note: Source image is modified by this function. Also, the + * function does not take into account 1-pixel border of the image (it's filled + * with 0's and used for neighbor analysis in the algorithm), therefore the + * contours touching the image border will be clipped.

+ * + *

Note: If you use the new Python interface then the CV_ prefix + * has to be omitted in contour retrieval mode and contour approximation method + * parameters (for example, use cv2.RETR_LIST and cv2.CHAIN_APPROX_NONE + * parameters). If you use the old Python interface then these parameters have + * the CV_ prefix (for example, use cv.CV_RETR_LIST + * and cv.CV_CHAIN_APPROX_NONE).

+ * + * @param image Source, an 8-bit single-channel image. Non-zero pixels are + * treated as 1's. Zero pixels remain 0's, so the image is treated as + * binary. You can use "compare", "inRange", "threshold", + * "adaptiveThreshold", "Canny", and others to create a binary image out of a + * grayscale or color one. The function modifies the image while + * extracting the contours. + * @param contours Detected contours. Each contour is stored as a vector of + * points. + * @param hierarchy Optional output vector, containing information about the + * image topology. It has as many elements as the number of contours. For each + * i-th contour contours[i], the elements hierarchy[i][0], + * hiearchy[i][1], hiearchy[i][2], and + * hiearchy[i][3] are set to 0-based indices in contours + * of the next and previous contours at the same hierarchical level, the first + * child contour and the parent contour, respectively. If for the contour + * i there are no next, previous, parent, or nested contours, the + * corresponding elements of hierarchy[i] will be negative. + * @param mode Contour retrieval mode (if you use Python see also a note below). + *
    + *
  • CV_RETR_EXTERNAL retrieves only the extreme outer contours. It sets + * hierarchy[i][2]=hierarchy[i][3]=-1 for all the contours. + *
  • CV_RETR_LIST retrieves all of the contours without establishing any + * hierarchical relationships. + *
  • CV_RETR_CCOMP retrieves all of the contours and organizes them into a + * two-level hierarchy. At the top level, there are external boundaries of the + * components. At the second level, there are boundaries of the holes. If there + * is another contour inside a hole of a connected component, it is still put at + * the top level. + *
  • CV_RETR_TREE retrieves all of the contours and reconstructs a full + * hierarchy of nested contours. This full hierarchy is built and shown in the + * OpenCV contours.c demo. + *
+ * @param method Contour approximation method (if you use Python see also a note + * below). + *
    + *
  • CV_CHAIN_APPROX_NONE stores absolutely all the contour points. That + * is, any 2 subsequent points (x1,y1) and (x2,y2) of + * the contour will be either horizontal, vertical or diagonal neighbors, that + * is, max(abs(x1-x2),abs(y2-y1))==1. + *
  • CV_CHAIN_APPROX_SIMPLE compresses horizontal, vertical, and diagonal + * segments and leaves only their end points. For example, an up-right + * rectangular contour is encoded with 4 points. + *
  • CV_CHAIN_APPROX_TC89_L1,CV_CHAIN_APPROX_TC89_KCOS applies one of the + * flavors of the Teh-Chin chain approximation algorithm. See [TehChin89] for + * details. + *
+ * + * @see org.opencv.imgproc.Imgproc.findContours + */ + public static void findContours(Mat image, List contours, Mat hierarchy, int mode, int method) + { + Mat contours_mat = new Mat(); + findContours_1(image.nativeObj, contours_mat.nativeObj, hierarchy.nativeObj, mode, method); + Converters.Mat_to_vector_vector_Point(contours_mat, contours); + return; + } + + + // + // C++: RotatedRect fitEllipse(vector_Point2f points) + // + +/** + *

Fits an ellipse around a set of 2D points.

+ * + *

The function calculates the ellipse that fits (in a least-squares sense) a + * set of 2D points best of all. It returns the rotated rectangle in which the + * ellipse is inscribed. The algorithm [Fitzgibbon95] is used.

+ * + * @param points Input 2D point set, stored in: + *
    + *
  • std.vector<> or Mat (C++ interface) + *
  • CvSeq* or CvMat* (C interface) + *
  • Nx2 numpy array (Python interface) + *
+ * + * @see org.opencv.imgproc.Imgproc.fitEllipse + */ + public static RotatedRect fitEllipse(MatOfPoint2f points) + { + Mat points_mat = points; + RotatedRect retVal = new RotatedRect(fitEllipse_0(points_mat.nativeObj)); + + return retVal; + } + + + // + // C++: void fitLine(Mat points, Mat& line, int distType, double param, double reps, double aeps) + // + +/** + *

Fits a line to a 2D or 3D point set.

+ * + *

The function fitLine fits a line to a 2D or 3D point set by + * minimizing sum_i rho(r_i) where r_i is a distance between + * the i^(th) point, the line and rho(r) is a distance + * function, one of the following:

+ *
    + *
  • distType=CV_DIST_L2 + *
+ * + *

rho(r) = r^2/2(the simplest and the fastest least-squares method)

+ * + *
    + *
  • distType=CV_DIST_L1 + *
+ * + *

rho(r) = r

+ * + *
    + *
  • distType=CV_DIST_L12 + *
+ * + *

rho(r) = 2 * (sqrt(1 + frac(r^2)2) - 1)

+ * + *
    + *
  • distType=CV_DIST_FAIR + *
+ * + *

rho(r) = C^2 * ((r)/(C) - log((1 + (r)/(C)))) where C=1.3998

+ * + *
    + *
  • distType=CV_DIST_WELSCH + *
+ * + *

rho(r) = (C^2)/2 * (1 - exp((-((r)/(C))^2))) where C=2.9846

+ * + *
    + *
  • distType=CV_DIST_HUBER + *
+ * + *

rho(r) = r^2/2 if r < C; C * (r-C/2) otherwise where C=1.345

+ * + *

The algorithm is based on the M-estimator (http://en.wikipedia.org/wiki/M-estimator) + * technique that iteratively fits the line using the weighted least-squares + * algorithm. After each iteration the weights w_i are adjusted to be + * inversely proportional to rho(r_i).

+ * + * @param points Input vector of 2D or 3D points, stored in std.vector<> + * or Mat. + * @param line Output line parameters. In case of 2D fitting, it should be a + * vector of 4 elements (like Vec4f) - (vx, vy, x0, + * y0), where (vx, vy) is a normalized vector collinear to + * the line and (x0, y0) is a point on the line. In case of 3D + * fitting, it should be a vector of 6 elements (like Vec6f) - + * (vx, vy, vz, x0, y0, z0), where (vx, vy, vz) is a + * normalized vector collinear to the line and (x0, y0, z0) is a + * point on the line. + * @param distType Distance used by the M-estimator (see the discussion below). + * @param param Numerical parameter (C) for some types of + * distances. If it is 0, an optimal value is chosen. + * @param reps Sufficient accuracy for the radius (distance between the + * coordinate origin and the line). + * @param aeps Sufficient accuracy for the angle. 0.01 would be a good default + * value for reps and aeps. + * + * @see org.opencv.imgproc.Imgproc.fitLine + */ + public static void fitLine(Mat points, Mat line, int distType, double param, double reps, double aeps) + { + + fitLine_0(points.nativeObj, line.nativeObj, distType, param, reps, aeps); + + return; + } + + + // + // C++: int floodFill(Mat& image, Mat& mask, Point seedPoint, Scalar newVal, Rect* rect = 0, Scalar loDiff = Scalar(), Scalar upDiff = Scalar(), int flags = 4) + // + +/** + *

Fills a connected component with the given color.

+ * + *

The functions floodFill fill a connected component starting from + * the seed point with the specified color. The connectivity is determined by + * the color/brightness closeness of the neighbor pixels. The pixel at + * (x,y) is considered to belong to the repainted domain if:

+ *
    + *
  • src(x',y')- loDiff <= src(x,y) <= src(x',y')+ upDiff + *
+ * + *

in case of a grayscale image and floating range

+ *
    + *
  • src(seedPoint.x, seedPoint.y)- loDiff <= src(x,y) <= + * src(seedPoint.x, seedPoint.y)+ upDiff + *
+ * + *

in case of a grayscale image and fixed range

+ *
    + *
  • src(x',y')_r- loDiff _r <= src(x,y)_r <= src(x',y')_r+ upDiff + * _r, + * + * + *
+ * + *

src(x',y')_g- loDiff _g <= src(x,y)_g <= src(x',y')_g+ upDiff _g

+ * + *

and

+ * + *

src(x',y')_b- loDiff _b <= src(x,y)_b <= src(x',y')_b+ upDiff _b

+ * + *

in case of a color image and floating range

+ *
    + *
  • src(seedPoint.x, seedPoint.y)_r- loDiff _r <= src(x,y)_r <= + * src(seedPoint.x, seedPoint.y)_r+ upDiff _r, + * + * + *
+ * + *

src(seedPoint.x, seedPoint.y)_g- loDiff _g <= src(x,y)_g <= + * src(seedPoint.x, seedPoint.y)_g+ upDiff _g

+ * + *

and

+ * + *

src(seedPoint.x, seedPoint.y)_b- loDiff _b <= src(x,y)_b <= + * src(seedPoint.x, seedPoint.y)_b+ upDiff _b

+ * + *

in case of a color image and fixed range

+ * + *

where src(x',y') is the value of one of pixel neighbors that is + * already known to belong to the component. That is, to be added to the + * connected component, a color/brightness of the pixel should be close enough + * to:

+ *
    + *
  • Color/brightness of one of its neighbors that already belong to the + * connected component in case of a floating range. + *
  • Color/brightness of the seed point in case of a fixed range. + *
+ * + *

Use these functions to either mark a connected component with the specified + * color in-place, or build a mask and then extract the contour, or copy the + * region to another image, and so on. Various modes of the function are + * demonstrated in the floodfill.cpp sample.

+ * + * @param image Input/output 1- or 3-channel, 8-bit, or floating-point image. It + * is modified by the function unless the FLOODFILL_MASK_ONLY flag + * is set in the second variant of the function. See the details below. + * @param mask (For the second function only) Operation mask that should be a + * single-channel 8-bit image, 2 pixels wider and 2 pixels taller. The function + * uses and updates the mask, so you take responsibility of initializing the + * mask content. Flood-filling cannot go across non-zero pixels in + * the mask. For example, an edge detector output can be used as a mask to stop + * filling at edges. It is possible to use the same mask in multiple calls to + * the function to make sure the filled area does not overlap. + * + *

Note: Since the mask is larger than the filled image, a pixel (x, y) + * in image corresponds to the pixel (x+1, y+1) in the + * mask.

+ * @param seedPoint Starting point. + * @param newVal New value of the repainted domain pixels. + * @param rect Optional output parameter set by the function to the minimum + * bounding rectangle of the repainted domain. + * @param loDiff Maximal lower brightness/color difference between the currently + * observed pixel and one of its neighbors belonging to the component, or a seed + * pixel being added to the component. + * @param upDiff Maximal upper brightness/color difference between the currently + * observed pixel and one of its neighbors belonging to the component, or a seed + * pixel being added to the component. + * @param flags Operation flags. Lower bits contain a connectivity value, 4 + * (default) or 8, used within the function. Connectivity determines which + * neighbors of a pixel are considered. Upper bits can be 0 or a combination of + * the following flags: + *
    + *
  • FLOODFILL_FIXED_RANGE If set, the difference between the current pixel + * and seed pixel is considered. Otherwise, the difference between neighbor + * pixels is considered (that is, the range is floating). + *
  • FLOODFILL_MASK_ONLY If set, the function does not change the image + * (newVal is ignored), but fills the mask. The flag can be used + * for the second variant only. + *
+ * + * @see org.opencv.imgproc.Imgproc.floodFill + * @see org.opencv.imgproc.Imgproc#findContours + */ + public static int floodFill(Mat image, Mat mask, Point seedPoint, Scalar newVal, Rect rect, Scalar loDiff, Scalar upDiff, int flags) + { + double[] rect_out = new double[4]; + int retVal = floodFill_0(image.nativeObj, mask.nativeObj, seedPoint.x, seedPoint.y, newVal.val[0], newVal.val[1], newVal.val[2], newVal.val[3], rect_out, loDiff.val[0], loDiff.val[1], loDiff.val[2], loDiff.val[3], upDiff.val[0], upDiff.val[1], upDiff.val[2], upDiff.val[3], flags); + if(rect!=null){ rect.x = (int)rect_out[0]; rect.y = (int)rect_out[1]; rect.width = (int)rect_out[2]; rect.height = (int)rect_out[3]; } + return retVal; + } + +/** + *

Fills a connected component with the given color.

+ * + *

The functions floodFill fill a connected component starting from + * the seed point with the specified color. The connectivity is determined by + * the color/brightness closeness of the neighbor pixels. The pixel at + * (x,y) is considered to belong to the repainted domain if:

+ *
    + *
  • src(x',y')- loDiff <= src(x,y) <= src(x',y')+ upDiff + *
+ * + *

in case of a grayscale image and floating range

+ *
    + *
  • src(seedPoint.x, seedPoint.y)- loDiff <= src(x,y) <= + * src(seedPoint.x, seedPoint.y)+ upDiff + *
+ * + *

in case of a grayscale image and fixed range

+ *
    + *
  • src(x',y')_r- loDiff _r <= src(x,y)_r <= src(x',y')_r+ upDiff + * _r, + * + * + *
+ * + *

src(x',y')_g- loDiff _g <= src(x,y)_g <= src(x',y')_g+ upDiff _g

+ * + *

and

+ * + *

src(x',y')_b- loDiff _b <= src(x,y)_b <= src(x',y')_b+ upDiff _b

+ * + *

in case of a color image and floating range

+ *
    + *
  • src(seedPoint.x, seedPoint.y)_r- loDiff _r <= src(x,y)_r <= + * src(seedPoint.x, seedPoint.y)_r+ upDiff _r, + * + * + *
+ * + *

src(seedPoint.x, seedPoint.y)_g- loDiff _g <= src(x,y)_g <= + * src(seedPoint.x, seedPoint.y)_g+ upDiff _g

+ * + *

and

+ * + *

src(seedPoint.x, seedPoint.y)_b- loDiff _b <= src(x,y)_b <= + * src(seedPoint.x, seedPoint.y)_b+ upDiff _b

+ * + *

in case of a color image and fixed range

+ * + *

where src(x',y') is the value of one of pixel neighbors that is + * already known to belong to the component. That is, to be added to the + * connected component, a color/brightness of the pixel should be close enough + * to:

+ *
    + *
  • Color/brightness of one of its neighbors that already belong to the + * connected component in case of a floating range. + *
  • Color/brightness of the seed point in case of a fixed range. + *
+ * + *

Use these functions to either mark a connected component with the specified + * color in-place, or build a mask and then extract the contour, or copy the + * region to another image, and so on. Various modes of the function are + * demonstrated in the floodfill.cpp sample.

+ * + * @param image Input/output 1- or 3-channel, 8-bit, or floating-point image. It + * is modified by the function unless the FLOODFILL_MASK_ONLY flag + * is set in the second variant of the function. See the details below. + * @param mask (For the second function only) Operation mask that should be a + * single-channel 8-bit image, 2 pixels wider and 2 pixels taller. The function + * uses and updates the mask, so you take responsibility of initializing the + * mask content. Flood-filling cannot go across non-zero pixels in + * the mask. For example, an edge detector output can be used as a mask to stop + * filling at edges. It is possible to use the same mask in multiple calls to + * the function to make sure the filled area does not overlap. + * + *

Note: Since the mask is larger than the filled image, a pixel (x, y) + * in image corresponds to the pixel (x+1, y+1) in the + * mask.

+ * @param seedPoint Starting point. + * @param newVal New value of the repainted domain pixels. + * + * @see org.opencv.imgproc.Imgproc.floodFill + * @see org.opencv.imgproc.Imgproc#findContours + */ + public static int floodFill(Mat image, Mat mask, Point seedPoint, Scalar newVal) + { + + int retVal = floodFill_1(image.nativeObj, mask.nativeObj, seedPoint.x, seedPoint.y, newVal.val[0], newVal.val[1], newVal.val[2], newVal.val[3]); + + return retVal; + } + + + // + // C++: Mat getAffineTransform(vector_Point2f src, vector_Point2f dst) + // + +/** + *

Calculates an affine transform from three pairs of the corresponding points.

+ * + *

The function calculates the 2 x 3 matrix of an affine transform so + * that:

+ * + *

x'_i + * y'_i = map_matrix * x_i + * y_i + * 1

+ * + *

where

+ * + *

dst(i)=(x'_i,y'_i),<BR>src(i)=(x_i, y_i),<BR>i=0,1,2

+ * + * @param src Coordinates of triangle vertices in the source image. + * @param dst Coordinates of the corresponding triangle vertices in the + * destination image. + * + * @see org.opencv.imgproc.Imgproc.getAffineTransform + * @see org.opencv.imgproc.Imgproc#warpAffine + * @see org.opencv.core.Core#transform + */ + public static Mat getAffineTransform(MatOfPoint2f src, MatOfPoint2f dst) + { + Mat src_mat = src; + Mat dst_mat = dst; + Mat retVal = new Mat(getAffineTransform_0(src_mat.nativeObj, dst_mat.nativeObj)); + + return retVal; + } + + + // + // C++: Mat getDefaultNewCameraMatrix(Mat cameraMatrix, Size imgsize = Size(), bool centerPrincipalPoint = false) + // + +/** + *

Returns the default new camera matrix.

+ * + *

The function returns the camera matrix that is either an exact copy of the + * input cameraMatrix (when centerPrinicipalPoint=false), + * or the modified one (when centerPrincipalPoint=true).

+ * + *

In the latter case, the new camera matrix will be:

+ * + *

f_x 0(imgSize.width -1)*0.5 + * 0 f_y(imgSize.height -1)*0.5 + * 0 0 1,

+ * + *

where f_x and f_y are (0,0) and (1,1) + * elements of cameraMatrix, respectively.

+ * + *

By default, the undistortion functions in OpenCV (see "initUndistortRectifyMap", + * "undistort") do not move the principal point. However, when you work with + * stereo, it is important to move the principal points in both views to the + * same y-coordinate (which is required by most of stereo correspondence + * algorithms), and may be to the same x-coordinate too. So, you can form the + * new camera matrix for each view where the principal points are located at the + * center.

+ * + * @param cameraMatrix Input camera matrix. + * @param imgsize Camera view image size in pixels. + * @param centerPrincipalPoint Location of the principal point in the new camera + * matrix. The parameter indicates whether this location should be at the image + * center or not. + * + * @see org.opencv.imgproc.Imgproc.getDefaultNewCameraMatrix + */ + public static Mat getDefaultNewCameraMatrix(Mat cameraMatrix, Size imgsize, boolean centerPrincipalPoint) + { + + Mat retVal = new Mat(getDefaultNewCameraMatrix_0(cameraMatrix.nativeObj, imgsize.width, imgsize.height, centerPrincipalPoint)); + + return retVal; + } + +/** + *

Returns the default new camera matrix.

+ * + *

The function returns the camera matrix that is either an exact copy of the + * input cameraMatrix (when centerPrinicipalPoint=false), + * or the modified one (when centerPrincipalPoint=true).

+ * + *

In the latter case, the new camera matrix will be:

+ * + *

f_x 0(imgSize.width -1)*0.5 + * 0 f_y(imgSize.height -1)*0.5 + * 0 0 1,

+ * + *

where f_x and f_y are (0,0) and (1,1) + * elements of cameraMatrix, respectively.

+ * + *

By default, the undistortion functions in OpenCV (see "initUndistortRectifyMap", + * "undistort") do not move the principal point. However, when you work with + * stereo, it is important to move the principal points in both views to the + * same y-coordinate (which is required by most of stereo correspondence + * algorithms), and may be to the same x-coordinate too. So, you can form the + * new camera matrix for each view where the principal points are located at the + * center.

+ * + * @param cameraMatrix Input camera matrix. + * + * @see org.opencv.imgproc.Imgproc.getDefaultNewCameraMatrix + */ + public static Mat getDefaultNewCameraMatrix(Mat cameraMatrix) + { + + Mat retVal = new Mat(getDefaultNewCameraMatrix_1(cameraMatrix.nativeObj)); + + return retVal; + } + + + // + // C++: void getDerivKernels(Mat& kx, Mat& ky, int dx, int dy, int ksize, bool normalize = false, int ktype = CV_32F) + // + +/** + *

Returns filter coefficients for computing spatial image derivatives.

+ * + *

The function computes and returns the filter coefficients for spatial image + * derivatives. When ksize=CV_SCHARR, the Scharr 3 x 3 + * kernels are generated (see "Scharr"). Otherwise, Sobel kernels are generated + * (see "Sobel"). The filters are normally passed to "sepFilter2D" or to + * "createSeparableLinearFilter".

+ * + * @param kx Output matrix of row filter coefficients. It has the type + * ktype. + * @param ky Output matrix of column filter coefficients. It has the type + * ktype. + * @param dx Derivative order in respect of x. + * @param dy Derivative order in respect of y. + * @param ksize Aperture size. It can be CV_SCHARR, 1, 3, 5, or 7. + * @param normalize Flag indicating whether to normalize (scale down) the filter + * coefficients or not. Theoretically, the coefficients should have the + * denominator =2^(ksize*2-dx-dy-2). If you are going to filter + * floating-point images, you are likely to use the normalized kernels. But if + * you compute derivatives of an 8-bit image, store the results in a 16-bit + * image, and wish to preserve all the fractional bits, you may want to set + * normalize=false. + * @param ktype Type of filter coefficients. It can be CV_32f or + * CV_64F. + * + * @see org.opencv.imgproc.Imgproc.getDerivKernels + */ + public static void getDerivKernels(Mat kx, Mat ky, int dx, int dy, int ksize, boolean normalize, int ktype) + { + + getDerivKernels_0(kx.nativeObj, ky.nativeObj, dx, dy, ksize, normalize, ktype); + + return; + } + +/** + *

Returns filter coefficients for computing spatial image derivatives.

+ * + *

The function computes and returns the filter coefficients for spatial image + * derivatives. When ksize=CV_SCHARR, the Scharr 3 x 3 + * kernels are generated (see "Scharr"). Otherwise, Sobel kernels are generated + * (see "Sobel"). The filters are normally passed to "sepFilter2D" or to + * "createSeparableLinearFilter".

+ * + * @param kx Output matrix of row filter coefficients. It has the type + * ktype. + * @param ky Output matrix of column filter coefficients. It has the type + * ktype. + * @param dx Derivative order in respect of x. + * @param dy Derivative order in respect of y. + * @param ksize Aperture size. It can be CV_SCHARR, 1, 3, 5, or 7. + * + * @see org.opencv.imgproc.Imgproc.getDerivKernels + */ + public static void getDerivKernels(Mat kx, Mat ky, int dx, int dy, int ksize) + { + + getDerivKernels_1(kx.nativeObj, ky.nativeObj, dx, dy, ksize); + + return; + } + + + // + // C++: Mat getGaborKernel(Size ksize, double sigma, double theta, double lambd, double gamma, double psi = CV_PI*0.5, int ktype = CV_64F) + // + + public static Mat getGaborKernel(Size ksize, double sigma, double theta, double lambd, double gamma, double psi, int ktype) + { + + Mat retVal = new Mat(getGaborKernel_0(ksize.width, ksize.height, sigma, theta, lambd, gamma, psi, ktype)); + + return retVal; + } + + public static Mat getGaborKernel(Size ksize, double sigma, double theta, double lambd, double gamma) + { + + Mat retVal = new Mat(getGaborKernel_1(ksize.width, ksize.height, sigma, theta, lambd, gamma)); + + return retVal; + } + + + // + // C++: Mat getGaussianKernel(int ksize, double sigma, int ktype = CV_64F) + // + +/** + *

Returns Gaussian filter coefficients.

+ * + *

The function computes and returns the ksize x 1 matrix of Gaussian + * filter coefficients:

+ * + *

G_i= alpha *e^(-(i-(ksize -1)/2)^2/(2* sigma)^2),

+ * + *

where i=0..ksize-1 and alpha is the scale factor chosen so + * that sum_i G_i=1.

+ * + *

Two of such generated kernels can be passed to "sepFilter2D" or to + * "createSeparableLinearFilter". Those functions automatically recognize + * smoothing kernels (a symmetrical kernel with sum of weights equal to 1) and + * handle them accordingly. You may also use the higher-level "GaussianBlur".

+ * + * @param ksize Aperture size. It should be odd (ksize mod 2 = 1) and + * positive. + * @param sigma Gaussian standard deviation. If it is non-positive, it is + * computed from ksize as sigma = 0.3*((ksize-1)*0.5 - 1) + + * 0.8. + * @param ktype Type of filter coefficients. It can be CV_32f or + * CV_64F. + * + * @see org.opencv.imgproc.Imgproc.getGaussianKernel + * @see org.opencv.imgproc.Imgproc#GaussianBlur + * @see org.opencv.imgproc.Imgproc#sepFilter2D + * @see org.opencv.imgproc.Imgproc#getStructuringElement + * @see org.opencv.imgproc.Imgproc#getDerivKernels + */ + public static Mat getGaussianKernel(int ksize, double sigma, int ktype) + { + + Mat retVal = new Mat(getGaussianKernel_0(ksize, sigma, ktype)); + + return retVal; + } + +/** + *

Returns Gaussian filter coefficients.

+ * + *

The function computes and returns the ksize x 1 matrix of Gaussian + * filter coefficients:

+ * + *

G_i= alpha *e^(-(i-(ksize -1)/2)^2/(2* sigma)^2),

+ * + *

where i=0..ksize-1 and alpha is the scale factor chosen so + * that sum_i G_i=1.

+ * + *

Two of such generated kernels can be passed to "sepFilter2D" or to + * "createSeparableLinearFilter". Those functions automatically recognize + * smoothing kernels (a symmetrical kernel with sum of weights equal to 1) and + * handle them accordingly. You may also use the higher-level "GaussianBlur".

+ * + * @param ksize Aperture size. It should be odd (ksize mod 2 = 1) and + * positive. + * @param sigma Gaussian standard deviation. If it is non-positive, it is + * computed from ksize as sigma = 0.3*((ksize-1)*0.5 - 1) + + * 0.8. + * + * @see org.opencv.imgproc.Imgproc.getGaussianKernel + * @see org.opencv.imgproc.Imgproc#GaussianBlur + * @see org.opencv.imgproc.Imgproc#sepFilter2D + * @see org.opencv.imgproc.Imgproc#getStructuringElement + * @see org.opencv.imgproc.Imgproc#getDerivKernels + */ + public static Mat getGaussianKernel(int ksize, double sigma) + { + + Mat retVal = new Mat(getGaussianKernel_1(ksize, sigma)); + + return retVal; + } + + + // + // C++: Mat getPerspectiveTransform(Mat src, Mat dst) + // + +/** + *

Calculates a perspective transform from four pairs of the corresponding + * points.

+ * + *

The function calculates the 3 x 3 matrix of a perspective transform + * so that:

+ * + *

t_i x'_i + * t_i y'_i + * t_i = map_matrix * x_i + * y_i + * 1

+ * + *

where

+ * + *

dst(i)=(x'_i,y'_i),<BR>src(i)=(x_i, y_i),<BR>i=0,1,2,3

+ * + * @param src Coordinates of quadrangle vertices in the source image. + * @param dst Coordinates of the corresponding quadrangle vertices in the + * destination image. + * + * @see org.opencv.imgproc.Imgproc.getPerspectiveTransform + * @see org.opencv.calib3d.Calib3d#findHomography + * @see org.opencv.core.Core#perspectiveTransform + * @see org.opencv.imgproc.Imgproc#warpPerspective + */ + public static Mat getPerspectiveTransform(Mat src, Mat dst) + { + + Mat retVal = new Mat(getPerspectiveTransform_0(src.nativeObj, dst.nativeObj)); + + return retVal; + } + + + // + // C++: void getRectSubPix(Mat image, Size patchSize, Point2f center, Mat& patch, int patchType = -1) + // + +/** + *

Retrieves a pixel rectangle from an image with sub-pixel accuracy.

+ * + *

The function getRectSubPix extracts pixels from src

+ * + *

dst(x, y) = src(x + center.x - (dst.cols -1)*0.5, y + center.y - + * (dst.rows -1)*0.5)

+ * + *

where the values of the pixels at non-integer coordinates are retrieved using + * bilinear interpolation. Every channel of multi-channel images is processed + * independently. While the center of the rectangle must be inside the image, + * parts of the rectangle may be outside. In this case, the replication border + * mode (see "borderInterpolate") is used to extrapolate the pixel values + * outside of the image.

+ * + * @param image a image + * @param patchSize Size of the extracted patch. + * @param center Floating point coordinates of the center of the extracted + * rectangle within the source image. The center must be inside the image. + * @param patch a patch + * @param patchType Depth of the extracted pixels. By default, they have the + * same depth as src. + * + * @see org.opencv.imgproc.Imgproc.getRectSubPix + * @see org.opencv.imgproc.Imgproc#warpAffine + * @see org.opencv.imgproc.Imgproc#warpPerspective + */ + public static void getRectSubPix(Mat image, Size patchSize, Point center, Mat patch, int patchType) + { + + getRectSubPix_0(image.nativeObj, patchSize.width, patchSize.height, center.x, center.y, patch.nativeObj, patchType); + + return; + } + +/** + *

Retrieves a pixel rectangle from an image with sub-pixel accuracy.

+ * + *

The function getRectSubPix extracts pixels from src

+ * + *

dst(x, y) = src(x + center.x - (dst.cols -1)*0.5, y + center.y - + * (dst.rows -1)*0.5)

+ * + *

where the values of the pixels at non-integer coordinates are retrieved using + * bilinear interpolation. Every channel of multi-channel images is processed + * independently. While the center of the rectangle must be inside the image, + * parts of the rectangle may be outside. In this case, the replication border + * mode (see "borderInterpolate") is used to extrapolate the pixel values + * outside of the image.

+ * + * @param image a image + * @param patchSize Size of the extracted patch. + * @param center Floating point coordinates of the center of the extracted + * rectangle within the source image. The center must be inside the image. + * @param patch a patch + * + * @see org.opencv.imgproc.Imgproc.getRectSubPix + * @see org.opencv.imgproc.Imgproc#warpAffine + * @see org.opencv.imgproc.Imgproc#warpPerspective + */ + public static void getRectSubPix(Mat image, Size patchSize, Point center, Mat patch) + { + + getRectSubPix_1(image.nativeObj, patchSize.width, patchSize.height, center.x, center.y, patch.nativeObj); + + return; + } + + + // + // C++: Mat getRotationMatrix2D(Point2f center, double angle, double scale) + // + +/** + *

Calculates an affine matrix of 2D rotation.

+ * + *

The function calculates the following matrix:

+ * + *

alpha beta(1- alpha) * center.x - beta * center.y + * - beta alpha beta * center.x + (1- alpha) * center.y

+ * + *

where

+ * + *

alpha = scale * cos angle, + * beta = scale * sin angle

+ * + *

The transformation maps the rotation center to itself. If this is not the + * target, adjust the shift.

+ * + * @param center Center of the rotation in the source image. + * @param angle Rotation angle in degrees. Positive values mean + * counter-clockwise rotation (the coordinate origin is assumed to be the + * top-left corner). + * @param scale Isotropic scale factor. + * + * @see org.opencv.imgproc.Imgproc.getRotationMatrix2D + * @see org.opencv.imgproc.Imgproc#warpAffine + * @see org.opencv.imgproc.Imgproc#getAffineTransform + * @see org.opencv.core.Core#transform + */ + public static Mat getRotationMatrix2D(Point center, double angle, double scale) + { + + Mat retVal = new Mat(getRotationMatrix2D_0(center.x, center.y, angle, scale)); + + return retVal; + } + + + // + // C++: Mat getStructuringElement(int shape, Size ksize, Point anchor = Point(-1,-1)) + // + +/** + *

Returns a structuring element of the specified size and shape for + * morphological operations.

+ * + *

The function constructs and returns the structuring element that can be + * further passed to "createMorphologyFilter", "erode", "dilate" or + * "morphologyEx". But you can also construct an arbitrary binary mask yourself + * and use it as the structuring element.

+ * + *

Note: When using OpenCV 1.x C API, the created structuring element + * IplConvKernel* element must be released in the end using + * cvReleaseStructuringElement(&element).

+ * + * @param shape Element shape that could be one of the following: + *
    + *
  • MORPH_RECT - a rectangular structuring element: + *
+ * + *

E_(ij)=1

+ * + *
    + *
  • MORPH_ELLIPSE - an elliptic structuring element, that is, a filled + * ellipse inscribed into the rectangle Rect(0, 0, esize.width, + * 0.esize.height) + *
  • MORPH_CROSS - a cross-shaped structuring element: + *
+ * + *

E_(ij) = 1 if i=anchor.y or j=anchor.x; 0 otherwise

+ * + *
    + *
  • CV_SHAPE_CUSTOM - custom structuring element (OpenCV 1.x API) + *
+ * @param ksize Size of the structuring element. + * @param anchor Anchor position within the element. The default value (-1, + * -1) means that the anchor is at the center. Note that only the shape of + * a cross-shaped element depends on the anchor position. In other cases the + * anchor just regulates how much the result of the morphological operation is + * shifted. + * + * @see org.opencv.imgproc.Imgproc.getStructuringElement + */ + public static Mat getStructuringElement(int shape, Size ksize, Point anchor) + { + + Mat retVal = new Mat(getStructuringElement_0(shape, ksize.width, ksize.height, anchor.x, anchor.y)); + + return retVal; + } + +/** + *

Returns a structuring element of the specified size and shape for + * morphological operations.

+ * + *

The function constructs and returns the structuring element that can be + * further passed to "createMorphologyFilter", "erode", "dilate" or + * "morphologyEx". But you can also construct an arbitrary binary mask yourself + * and use it as the structuring element.

+ * + *

Note: When using OpenCV 1.x C API, the created structuring element + * IplConvKernel* element must be released in the end using + * cvReleaseStructuringElement(&element).

+ * + * @param shape Element shape that could be one of the following: + *
    + *
  • MORPH_RECT - a rectangular structuring element: + *
+ * + *

E_(ij)=1

+ * + *
    + *
  • MORPH_ELLIPSE - an elliptic structuring element, that is, a filled + * ellipse inscribed into the rectangle Rect(0, 0, esize.width, + * 0.esize.height) + *
  • MORPH_CROSS - a cross-shaped structuring element: + *
+ * + *

E_(ij) = 1 if i=anchor.y or j=anchor.x; 0 otherwise

+ * + *
    + *
  • CV_SHAPE_CUSTOM - custom structuring element (OpenCV 1.x API) + *
+ * @param ksize Size of the structuring element. + * + * @see org.opencv.imgproc.Imgproc.getStructuringElement + */ + public static Mat getStructuringElement(int shape, Size ksize) + { + + Mat retVal = new Mat(getStructuringElement_1(shape, ksize.width, ksize.height)); + + return retVal; + } + + + // + // C++: void goodFeaturesToTrack(Mat image, vector_Point& corners, int maxCorners, double qualityLevel, double minDistance, Mat mask = Mat(), int blockSize = 3, bool useHarrisDetector = false, double k = 0.04) + // + +/** + *

Determines strong corners on an image.

+ * + *

The function finds the most prominent corners in the image or in the + * specified image region, as described in [Shi94]:

+ *
    + *
  • Function calculates the corner quality measure at every source image + * pixel using the "cornerMinEigenVal" or "cornerHarris". + *
  • Function performs a non-maximum suppression (the local maximums in *3 + * x 3* neighborhood are retained). + *
  • The corners with the minimal eigenvalue less than qualityLevel * + * max_(x,y) qualityMeasureMap(x,y) are rejected. + *
  • The remaining corners are sorted by the quality measure in the + * descending order. + *
  • Function throws away each corner for which there is a stronger corner + * at a distance less than maxDistance. + *
+ * + *

The function can be used to initialize a point-based tracker of an object.

+ * + *

Note: If the function is called with different values A and + * B of the parameter qualityLevel, and A + * > {B}, the vector of returned corners with qualityLevel=A will + * be the prefix of the output vector with qualityLevel=B.

+ * + * @param image Input 8-bit or floating-point 32-bit, single-channel image. + * @param corners Output vector of detected corners. + * @param maxCorners Maximum number of corners to return. If there are more + * corners than are found, the strongest of them is returned. + * @param qualityLevel Parameter characterizing the minimal accepted quality of + * image corners. The parameter value is multiplied by the best corner quality + * measure, which is the minimal eigenvalue (see "cornerMinEigenVal") or the + * Harris function response (see "cornerHarris"). The corners with the quality + * measure less than the product are rejected. For example, if the best corner + * has the quality measure = 1500, and the qualityLevel=0.01, then + * all the corners with the quality measure less than 15 are rejected. + * @param minDistance Minimum possible Euclidean distance between the returned + * corners. + * @param mask Optional region of interest. If the image is not empty (it needs + * to have the type CV_8UC1 and the same size as image), + * it specifies the region in which the corners are detected. + * @param blockSize Size of an average block for computing a derivative + * covariation matrix over each pixel neighborhood. See "cornerEigenValsAndVecs". + * @param useHarrisDetector Parameter indicating whether to use a Harris + * detector (see "cornerHarris") or "cornerMinEigenVal". + * @param k Free parameter of the Harris detector. + * + * @see org.opencv.imgproc.Imgproc.goodFeaturesToTrack + * @see org.opencv.imgproc.Imgproc#cornerHarris + * @see org.opencv.video.Video#estimateRigidTransform + * @see org.opencv.imgproc.Imgproc#cornerMinEigenVal + * @see org.opencv.video.Video#calcOpticalFlowPyrLK + */ + public static void goodFeaturesToTrack(Mat image, MatOfPoint corners, int maxCorners, double qualityLevel, double minDistance, Mat mask, int blockSize, boolean useHarrisDetector, double k) + { + Mat corners_mat = corners; + goodFeaturesToTrack_0(image.nativeObj, corners_mat.nativeObj, maxCorners, qualityLevel, minDistance, mask.nativeObj, blockSize, useHarrisDetector, k); + + return; + } + +/** + *

Determines strong corners on an image.

+ * + *

The function finds the most prominent corners in the image or in the + * specified image region, as described in [Shi94]:

+ *
    + *
  • Function calculates the corner quality measure at every source image + * pixel using the "cornerMinEigenVal" or "cornerHarris". + *
  • Function performs a non-maximum suppression (the local maximums in *3 + * x 3* neighborhood are retained). + *
  • The corners with the minimal eigenvalue less than qualityLevel * + * max_(x,y) qualityMeasureMap(x,y) are rejected. + *
  • The remaining corners are sorted by the quality measure in the + * descending order. + *
  • Function throws away each corner for which there is a stronger corner + * at a distance less than maxDistance. + *
+ * + *

The function can be used to initialize a point-based tracker of an object.

+ * + *

Note: If the function is called with different values A and + * B of the parameter qualityLevel, and A + * > {B}, the vector of returned corners with qualityLevel=A will + * be the prefix of the output vector with qualityLevel=B.

+ * + * @param image Input 8-bit or floating-point 32-bit, single-channel image. + * @param corners Output vector of detected corners. + * @param maxCorners Maximum number of corners to return. If there are more + * corners than are found, the strongest of them is returned. + * @param qualityLevel Parameter characterizing the minimal accepted quality of + * image corners. The parameter value is multiplied by the best corner quality + * measure, which is the minimal eigenvalue (see "cornerMinEigenVal") or the + * Harris function response (see "cornerHarris"). The corners with the quality + * measure less than the product are rejected. For example, if the best corner + * has the quality measure = 1500, and the qualityLevel=0.01, then + * all the corners with the quality measure less than 15 are rejected. + * @param minDistance Minimum possible Euclidean distance between the returned + * corners. + * + * @see org.opencv.imgproc.Imgproc.goodFeaturesToTrack + * @see org.opencv.imgproc.Imgproc#cornerHarris + * @see org.opencv.video.Video#estimateRigidTransform + * @see org.opencv.imgproc.Imgproc#cornerMinEigenVal + * @see org.opencv.video.Video#calcOpticalFlowPyrLK + */ + public static void goodFeaturesToTrack(Mat image, MatOfPoint corners, int maxCorners, double qualityLevel, double minDistance) + { + Mat corners_mat = corners; + goodFeaturesToTrack_1(image.nativeObj, corners_mat.nativeObj, maxCorners, qualityLevel, minDistance); + + return; + } + + + // + // C++: void grabCut(Mat img, Mat& mask, Rect rect, Mat& bgdModel, Mat& fgdModel, int iterCount, int mode = GC_EVAL) + // + +/** + *

Runs the GrabCut algorithm.

+ * + *

The function implements the GrabCut image segmentation algorithm + * (http://en.wikipedia.org/wiki/GrabCut). + * See the sample grabcut.cpp to learn how to use the function.

+ * + * @param img Input 8-bit 3-channel image. + * @param mask Input/output 8-bit single-channel mask. The mask is initialized + * by the function when mode is set to GC_INIT_WITH_RECT. + * Its elements may have one of following values: + *
    + *
  • GC_BGD defines an obvious background pixels. + *
  • GC_FGD defines an obvious foreground (object) pixel. + *
  • GC_PR_BGD defines a possible background pixel. + *
  • GC_PR_BGD defines a possible foreground pixel. + *
+ * @param rect ROI containing a segmented object. The pixels outside of the ROI + * are marked as "obvious background". The parameter is only used when + * mode==GC_INIT_WITH_RECT. + * @param bgdModel Temporary array for the background model. Do not modify it + * while you are processing the same image. + * @param fgdModel Temporary arrays for the foreground model. Do not modify it + * while you are processing the same image. + * @param iterCount Number of iterations the algorithm should make before + * returning the result. Note that the result can be refined with further calls + * with mode==GC_INIT_WITH_MASK or mode==GC_EVAL. + * @param mode Operation mode that could be one of the following: + *
    + *
  • GC_INIT_WITH_RECT The function initializes the state and the mask + * using the provided rectangle. After that it runs iterCount + * iterations of the algorithm. + *
  • GC_INIT_WITH_MASK The function initializes the state using the + * provided mask. Note that GC_INIT_WITH_RECT and GC_INIT_WITH_MASK + * can be combined. Then, all the pixels outside of the ROI are automatically + * initialized with GC_BGD. + *
  • GC_EVAL The value means that the algorithm should just resume. + *
+ * + * @see org.opencv.imgproc.Imgproc.grabCut + */ + public static void grabCut(Mat img, Mat mask, Rect rect, Mat bgdModel, Mat fgdModel, int iterCount, int mode) + { + + grabCut_0(img.nativeObj, mask.nativeObj, rect.x, rect.y, rect.width, rect.height, bgdModel.nativeObj, fgdModel.nativeObj, iterCount, mode); + + return; + } + +/** + *

Runs the GrabCut algorithm.

+ * + *

The function implements the GrabCut image segmentation algorithm + * (http://en.wikipedia.org/wiki/GrabCut). + * See the sample grabcut.cpp to learn how to use the function.

+ * + * @param img Input 8-bit 3-channel image. + * @param mask Input/output 8-bit single-channel mask. The mask is initialized + * by the function when mode is set to GC_INIT_WITH_RECT. + * Its elements may have one of following values: + *
    + *
  • GC_BGD defines an obvious background pixels. + *
  • GC_FGD defines an obvious foreground (object) pixel. + *
  • GC_PR_BGD defines a possible background pixel. + *
  • GC_PR_BGD defines a possible foreground pixel. + *
+ * @param rect ROI containing a segmented object. The pixels outside of the ROI + * are marked as "obvious background". The parameter is only used when + * mode==GC_INIT_WITH_RECT. + * @param bgdModel Temporary array for the background model. Do not modify it + * while you are processing the same image. + * @param fgdModel Temporary arrays for the foreground model. Do not modify it + * while you are processing the same image. + * @param iterCount Number of iterations the algorithm should make before + * returning the result. Note that the result can be refined with further calls + * with mode==GC_INIT_WITH_MASK or mode==GC_EVAL. + * + * @see org.opencv.imgproc.Imgproc.grabCut + */ + public static void grabCut(Mat img, Mat mask, Rect rect, Mat bgdModel, Mat fgdModel, int iterCount) + { + + grabCut_1(img.nativeObj, mask.nativeObj, rect.x, rect.y, rect.width, rect.height, bgdModel.nativeObj, fgdModel.nativeObj, iterCount); + + return; + } + + + // + // C++: void initUndistortRectifyMap(Mat cameraMatrix, Mat distCoeffs, Mat R, Mat newCameraMatrix, Size size, int m1type, Mat& map1, Mat& map2) + // + +/** + *

Computes the undistortion and rectification transformation map.

+ * + *

The function computes the joint undistortion and rectification transformation + * and represents the result in the form of maps for "remap". The undistorted + * image looks like original, as if it is captured with a camera using the + * camera matrix =newCameraMatrix and zero distortion. In case of a + * monocular camera, newCameraMatrix is usually equal to + * cameraMatrix, or it can be computed by "getOptimalNewCameraMatrix" + * for a better control over scaling. In case of a stereo camera, + * newCameraMatrix is normally set to P1 or + * P2 computed by "stereoRectify".

+ * + *

Also, this new camera is oriented differently in the coordinate space, + * according to R. That, for example, helps to align two heads of a + * stereo camera so that the epipolar lines on both images become horizontal and + * have the same y- coordinate (in case of a horizontally aligned stereo + * camera).

+ * + *

The function actually builds the maps for the inverse mapping algorithm that + * is used by "remap". That is, for each pixel (u, v) in the + * destination (corrected and rectified) image, the function computes the + * corresponding coordinates in the source image (that is, in the original image + * from camera). The following process is applied:

+ * + *

x <- (u - (c')_x)/(f')_x + * y <- (v - (c')_y)/(f')_y + * ([X Y W]) ^T <- R^(-1)*[x y 1]^T + * x' <- X/W + * y' <- Y/W + * x" <- x' (1 + k_1 r^2 + k_2 r^4 + k_3 r^6) + 2p_1 x' y' + p_2(r^2 + 2 x'^2) + * y" <- y' (1 + k_1 r^2 + k_2 r^4 + k_3 r^6) + p_1(r^2 + 2 y'^2) + 2 p_2 x' y' + * map_x(u,v) <- x" f_x + c_x + * map_y(u,v) <- y" f_y + c_y

+ * + *

where (k_1, k_2, p_1, p_2[, k_3]) are the distortion coefficients.

+ * + *

In case of a stereo camera, this function is called twice: once for each + * camera head, after "stereoRectify", which in its turn is called after + * "stereoCalibrate". But if the stereo camera was not calibrated, it is still + * possible to compute the rectification transformations directly from the + * fundamental matrix using "stereoRectifyUncalibrated". For each camera, the + * function computes homography H as the rectification + * transformation in a pixel domain, not a rotation matrix R in 3D + * space. R can be computed from H as

+ * + *

R = cameraMatrix ^(-1) * H * cameraMatrix

+ * + *

where cameraMatrix can be chosen arbitrarily.

+ * + * @param cameraMatrix Input camera matrix A= + *

|f_x 0 c_x| + * |0 f_y c_y| + * |0 0 1| + * .

+ * @param distCoeffs Input vector of distortion coefficients (k_1, k_2, p_1, + * p_2[, k_3[, k_4, k_5, k_6]]) of 4, 5, or 8 elements. If the vector is + * NULL/empty, the zero distortion coefficients are assumed. + * @param R Optional rectification transformation in the object space (3x3 + * matrix). R1 or R2, computed by "stereoRectify" can + * be passed here. If the matrix is empty, the identity transformation is + * assumed. In cvInitUndistortMap R assumed to be an identity + * matrix. + * @param newCameraMatrix New camera matrix A'= + *

|f_x' 0 c_x'| + * |0 f_y' c_y'| + * |0 0 1| + * .

+ * @param size Undistorted image size. + * @param m1type Type of the first output map that can be CV_32FC1 + * or CV_16SC2. See "convertMaps" for details. + * @param map1 The first output map. + * @param map2 The second output map. + * + * @see org.opencv.imgproc.Imgproc.initUndistortRectifyMap + */ + public static void initUndistortRectifyMap(Mat cameraMatrix, Mat distCoeffs, Mat R, Mat newCameraMatrix, Size size, int m1type, Mat map1, Mat map2) + { + + initUndistortRectifyMap_0(cameraMatrix.nativeObj, distCoeffs.nativeObj, R.nativeObj, newCameraMatrix.nativeObj, size.width, size.height, m1type, map1.nativeObj, map2.nativeObj); + + return; + } + + + // + // C++: float initWideAngleProjMap(Mat cameraMatrix, Mat distCoeffs, Size imageSize, int destImageWidth, int m1type, Mat& map1, Mat& map2, int projType = PROJ_SPHERICAL_EQRECT, double alpha = 0) + // + + public static float initWideAngleProjMap(Mat cameraMatrix, Mat distCoeffs, Size imageSize, int destImageWidth, int m1type, Mat map1, Mat map2, int projType, double alpha) + { + + float retVal = initWideAngleProjMap_0(cameraMatrix.nativeObj, distCoeffs.nativeObj, imageSize.width, imageSize.height, destImageWidth, m1type, map1.nativeObj, map2.nativeObj, projType, alpha); + + return retVal; + } + + public static float initWideAngleProjMap(Mat cameraMatrix, Mat distCoeffs, Size imageSize, int destImageWidth, int m1type, Mat map1, Mat map2) + { + + float retVal = initWideAngleProjMap_1(cameraMatrix.nativeObj, distCoeffs.nativeObj, imageSize.width, imageSize.height, destImageWidth, m1type, map1.nativeObj, map2.nativeObj); + + return retVal; + } + + + // + // C++: void integral(Mat src, Mat& sum, int sdepth = -1) + // + +/** + *

Calculates the integral of an image.

+ * + *

The functions calculate one or more integral images for the source image as + * follows:

+ * + *

sum(X,Y) = sum(by: x<X,y<Y) image(x,y)

+ * + * + * + *

sqsum(X,Y) = sum(by: x<X,y<Y) image(x,y)^2

+ * + * + * + *

tilted(X,Y) = sum(by: y<Y,abs(x-X+1) <= Y-y-1) image(x,y)

+ * + *

Using these integral images, you can calculate sa um, mean, and standard + * deviation over a specific up-right or rotated rectangular region of the image + * in a constant time, for example:

+ * + *

sum(by: x_1 <= x < x_2, y_1 <= y < y_2) image(x,y) = sum(x_2,y_2)- + * sum(x_1,y_2)- sum(x_2,y_1)+ sum(x_1,y_1)

+ * + *

It makes possible to do a fast blurring or fast block correlation with a + * variable window size, for example. In case of multi-channel images, sums for + * each channel are accumulated independently.

+ * + *

As a practical example, the next figure shows the calculation of the integral + * of a straight rectangle Rect(3,3,3,2) and of a tilted rectangle + * Rect(5,1,2,3). The selected pixels in the original + * image are shown, as well as the relative pixels in the integral + * images sum and tilted.

+ * + * @param src a src + * @param sum integral image as (W+1)x(H+1), 32-bit integer or + * floating-point (32f or 64f). + * @param sdepth desired depth of the integral and the tilted integral images, + * CV_32S, CV_32F, or CV_64F. + * + * @see org.opencv.imgproc.Imgproc.integral + */ + public static void integral(Mat src, Mat sum, int sdepth) + { + + integral_0(src.nativeObj, sum.nativeObj, sdepth); + + return; + } + +/** + *

Calculates the integral of an image.

+ * + *

The functions calculate one or more integral images for the source image as + * follows:

+ * + *

sum(X,Y) = sum(by: x<X,y<Y) image(x,y)

+ * + * + * + *

sqsum(X,Y) = sum(by: x<X,y<Y) image(x,y)^2

+ * + * + * + *

tilted(X,Y) = sum(by: y<Y,abs(x-X+1) <= Y-y-1) image(x,y)

+ * + *

Using these integral images, you can calculate sa um, mean, and standard + * deviation over a specific up-right or rotated rectangular region of the image + * in a constant time, for example:

+ * + *

sum(by: x_1 <= x < x_2, y_1 <= y < y_2) image(x,y) = sum(x_2,y_2)- + * sum(x_1,y_2)- sum(x_2,y_1)+ sum(x_1,y_1)

+ * + *

It makes possible to do a fast blurring or fast block correlation with a + * variable window size, for example. In case of multi-channel images, sums for + * each channel are accumulated independently.

+ * + *

As a practical example, the next figure shows the calculation of the integral + * of a straight rectangle Rect(3,3,3,2) and of a tilted rectangle + * Rect(5,1,2,3). The selected pixels in the original + * image are shown, as well as the relative pixels in the integral + * images sum and tilted.

+ * + * @param src a src + * @param sum integral image as (W+1)x(H+1), 32-bit integer or + * floating-point (32f or 64f). + * + * @see org.opencv.imgproc.Imgproc.integral + */ + public static void integral(Mat src, Mat sum) + { + + integral_1(src.nativeObj, sum.nativeObj); + + return; + } + + + // + // C++: void integral(Mat src, Mat& sum, Mat& sqsum, int sdepth = -1) + // + +/** + *

Calculates the integral of an image.

+ * + *

The functions calculate one or more integral images for the source image as + * follows:

+ * + *

sum(X,Y) = sum(by: x<X,y<Y) image(x,y)

+ * + * + * + *

sqsum(X,Y) = sum(by: x<X,y<Y) image(x,y)^2

+ * + * + * + *

tilted(X,Y) = sum(by: y<Y,abs(x-X+1) <= Y-y-1) image(x,y)

+ * + *

Using these integral images, you can calculate sa um, mean, and standard + * deviation over a specific up-right or rotated rectangular region of the image + * in a constant time, for example:

+ * + *

sum(by: x_1 <= x < x_2, y_1 <= y < y_2) image(x,y) = sum(x_2,y_2)- + * sum(x_1,y_2)- sum(x_2,y_1)+ sum(x_1,y_1)

+ * + *

It makes possible to do a fast blurring or fast block correlation with a + * variable window size, for example. In case of multi-channel images, sums for + * each channel are accumulated independently.

+ * + *

As a practical example, the next figure shows the calculation of the integral + * of a straight rectangle Rect(3,3,3,2) and of a tilted rectangle + * Rect(5,1,2,3). The selected pixels in the original + * image are shown, as well as the relative pixels in the integral + * images sum and tilted.

+ * + * @param src a src + * @param sum integral image as (W+1)x(H+1), 32-bit integer or + * floating-point (32f or 64f). + * @param sqsum integral image for squared pixel values; it is (W+1)x(H+1), + * double-precision floating-point (64f) array. + * @param sdepth desired depth of the integral and the tilted integral images, + * CV_32S, CV_32F, or CV_64F. + * + * @see org.opencv.imgproc.Imgproc.integral + */ + public static void integral2(Mat src, Mat sum, Mat sqsum, int sdepth) + { + + integral2_0(src.nativeObj, sum.nativeObj, sqsum.nativeObj, sdepth); + + return; + } + +/** + *

Calculates the integral of an image.

+ * + *

The functions calculate one or more integral images for the source image as + * follows:

+ * + *

sum(X,Y) = sum(by: x<X,y<Y) image(x,y)

+ * + * + * + *

sqsum(X,Y) = sum(by: x<X,y<Y) image(x,y)^2

+ * + * + * + *

tilted(X,Y) = sum(by: y<Y,abs(x-X+1) <= Y-y-1) image(x,y)

+ * + *

Using these integral images, you can calculate sa um, mean, and standard + * deviation over a specific up-right or rotated rectangular region of the image + * in a constant time, for example:

+ * + *

sum(by: x_1 <= x < x_2, y_1 <= y < y_2) image(x,y) = sum(x_2,y_2)- + * sum(x_1,y_2)- sum(x_2,y_1)+ sum(x_1,y_1)

+ * + *

It makes possible to do a fast blurring or fast block correlation with a + * variable window size, for example. In case of multi-channel images, sums for + * each channel are accumulated independently.

+ * + *

As a practical example, the next figure shows the calculation of the integral + * of a straight rectangle Rect(3,3,3,2) and of a tilted rectangle + * Rect(5,1,2,3). The selected pixels in the original + * image are shown, as well as the relative pixels in the integral + * images sum and tilted.

+ * + * @param src a src + * @param sum integral image as (W+1)x(H+1), 32-bit integer or + * floating-point (32f or 64f). + * @param sqsum integral image for squared pixel values; it is (W+1)x(H+1), + * double-precision floating-point (64f) array. + * + * @see org.opencv.imgproc.Imgproc.integral + */ + public static void integral2(Mat src, Mat sum, Mat sqsum) + { + + integral2_1(src.nativeObj, sum.nativeObj, sqsum.nativeObj); + + return; + } + + + // + // C++: void integral(Mat src, Mat& sum, Mat& sqsum, Mat& tilted, int sdepth = -1) + // + +/** + *

Calculates the integral of an image.

+ * + *

The functions calculate one or more integral images for the source image as + * follows:

+ * + *

sum(X,Y) = sum(by: x<X,y<Y) image(x,y)

+ * + * + * + *

sqsum(X,Y) = sum(by: x<X,y<Y) image(x,y)^2

+ * + * + * + *

tilted(X,Y) = sum(by: y<Y,abs(x-X+1) <= Y-y-1) image(x,y)

+ * + *

Using these integral images, you can calculate sa um, mean, and standard + * deviation over a specific up-right or rotated rectangular region of the image + * in a constant time, for example:

+ * + *

sum(by: x_1 <= x < x_2, y_1 <= y < y_2) image(x,y) = sum(x_2,y_2)- + * sum(x_1,y_2)- sum(x_2,y_1)+ sum(x_1,y_1)

+ * + *

It makes possible to do a fast blurring or fast block correlation with a + * variable window size, for example. In case of multi-channel images, sums for + * each channel are accumulated independently.

+ * + *

As a practical example, the next figure shows the calculation of the integral + * of a straight rectangle Rect(3,3,3,2) and of a tilted rectangle + * Rect(5,1,2,3). The selected pixels in the original + * image are shown, as well as the relative pixels in the integral + * images sum and tilted.

+ * + * @param src a src + * @param sum integral image as (W+1)x(H+1), 32-bit integer or + * floating-point (32f or 64f). + * @param sqsum integral image for squared pixel values; it is (W+1)x(H+1), + * double-precision floating-point (64f) array. + * @param tilted integral for the image rotated by 45 degrees; it is + * (W+1)x(H+1) array with the same data type as sum. + * @param sdepth desired depth of the integral and the tilted integral images, + * CV_32S, CV_32F, or CV_64F. + * + * @see org.opencv.imgproc.Imgproc.integral + */ + public static void integral3(Mat src, Mat sum, Mat sqsum, Mat tilted, int sdepth) + { + + integral3_0(src.nativeObj, sum.nativeObj, sqsum.nativeObj, tilted.nativeObj, sdepth); + + return; + } + +/** + *

Calculates the integral of an image.

+ * + *

The functions calculate one or more integral images for the source image as + * follows:

+ * + *

sum(X,Y) = sum(by: x<X,y<Y) image(x,y)

+ * + * + * + *

sqsum(X,Y) = sum(by: x<X,y<Y) image(x,y)^2

+ * + * + * + *

tilted(X,Y) = sum(by: y<Y,abs(x-X+1) <= Y-y-1) image(x,y)

+ * + *

Using these integral images, you can calculate sa um, mean, and standard + * deviation over a specific up-right or rotated rectangular region of the image + * in a constant time, for example:

+ * + *

sum(by: x_1 <= x < x_2, y_1 <= y < y_2) image(x,y) = sum(x_2,y_2)- + * sum(x_1,y_2)- sum(x_2,y_1)+ sum(x_1,y_1)

+ * + *

It makes possible to do a fast blurring or fast block correlation with a + * variable window size, for example. In case of multi-channel images, sums for + * each channel are accumulated independently.

+ * + *

As a practical example, the next figure shows the calculation of the integral + * of a straight rectangle Rect(3,3,3,2) and of a tilted rectangle + * Rect(5,1,2,3). The selected pixels in the original + * image are shown, as well as the relative pixels in the integral + * images sum and tilted.

+ * + * @param src a src + * @param sum integral image as (W+1)x(H+1), 32-bit integer or + * floating-point (32f or 64f). + * @param sqsum integral image for squared pixel values; it is (W+1)x(H+1), + * double-precision floating-point (64f) array. + * @param tilted integral for the image rotated by 45 degrees; it is + * (W+1)x(H+1) array with the same data type as sum. + * + * @see org.opencv.imgproc.Imgproc.integral + */ + public static void integral3(Mat src, Mat sum, Mat sqsum, Mat tilted) + { + + integral3_1(src.nativeObj, sum.nativeObj, sqsum.nativeObj, tilted.nativeObj); + + return; + } + + + // + // C++: float intersectConvexConvex(Mat _p1, Mat _p2, Mat& _p12, bool handleNested = true) + // + + public static float intersectConvexConvex(Mat _p1, Mat _p2, Mat _p12, boolean handleNested) + { + + float retVal = intersectConvexConvex_0(_p1.nativeObj, _p2.nativeObj, _p12.nativeObj, handleNested); + + return retVal; + } + + public static float intersectConvexConvex(Mat _p1, Mat _p2, Mat _p12) + { + + float retVal = intersectConvexConvex_1(_p1.nativeObj, _p2.nativeObj, _p12.nativeObj); + + return retVal; + } + + + // + // C++: void invertAffineTransform(Mat M, Mat& iM) + // + +/** + *

Inverts an affine transformation.

+ * + *

The function computes an inverse affine transformation represented by 2 x + * 3 matrix M :

+ * + *

a_11 a_12 b_1 + * a_21 a_22 b_2

+ * + *

The result is also a 2 x 3 matrix of the same type as + * M.

+ * + * @param M Original affine transformation. + * @param iM Output reverse affine transformation. + * + * @see org.opencv.imgproc.Imgproc.invertAffineTransform + */ + public static void invertAffineTransform(Mat M, Mat iM) + { + + invertAffineTransform_0(M.nativeObj, iM.nativeObj); + + return; + } + + + // + // C++: bool isContourConvex(vector_Point contour) + // + +/** + *

Tests a contour convexity.

+ * + *

The function tests whether the input contour is convex or not. The contour + * must be simple, that is, without self-intersections. Otherwise, the function + * output is undefined.

+ * + * @param contour Input vector of 2D points, stored in: + *
    + *
  • std.vector<> or Mat (C++ interface) + *
  • CvSeq* or CvMat* (C interface) + *
  • Nx2 numpy array (Python interface) + *
+ * + * @see org.opencv.imgproc.Imgproc.isContourConvex + */ + public static boolean isContourConvex(MatOfPoint contour) + { + Mat contour_mat = contour; + boolean retVal = isContourConvex_0(contour_mat.nativeObj); + + return retVal; + } + + + // + // C++: double matchShapes(Mat contour1, Mat contour2, int method, double parameter) + // + +/** + *

Compares two shapes.

+ * + *

The function compares two shapes. All three implemented methods use the Hu + * invariants (see "HuMoments") as follows (A denotes object1,B + * denotes object2):

+ *
    + *
  • method=CV_CONTOURS_MATCH_I1 + *
+ * + *

I_1(A,B) = sum(by: i=1...7) <= ft|1/(m^A_i) - 1/(m^B_i) right|

+ * + *
    + *
  • method=CV_CONTOURS_MATCH_I2 + *
+ * + *

I_2(A,B) = sum(by: i=1...7) <= ft|m^A_i - m^B_i right|

+ * + *
    + *
  • method=CV_CONTOURS_MATCH_I3 + *
+ * + *

I_3(A,B) = max _(i=1...7)(<= ft| m^A_i - m^B_i right|)/(<= ft| m^A_i + * right|)

+ * + *

where

+ * + *

m^A_i = sign(h^A_i) * log(h^A_i) + * m^B_i = sign(h^B_i) * log(h^B_i)

+ * + *

and h^A_i, h^B_i are the Hu moments of A and B, + * respectively.

+ * + * @param contour1 a contour1 + * @param contour2 a contour2 + * @param method Comparison method: CV_CONTOURS_MATCH_I1, + * CV_CONTOURS_MATCH_I2 \ + *

or CV_CONTOURS_MATCH_I3 (see the details below).

+ * @param parameter Method-specific parameter (not supported now). + * + * @see org.opencv.imgproc.Imgproc.matchShapes + */ + public static double matchShapes(Mat contour1, Mat contour2, int method, double parameter) + { + + double retVal = matchShapes_0(contour1.nativeObj, contour2.nativeObj, method, parameter); + + return retVal; + } + + + // + // C++: void matchTemplate(Mat image, Mat templ, Mat& result, int method) + // + +/** + *

Compares a template against overlapped image regions.

+ * + *

The function slides through image, compares the overlapped + * patches of size w x h against templ using the specified + * method and stores the comparison results in result. Here are the + * formulae for the available comparison methods (I denotes + * image, T template, R + * result). The summation is done over template and/or the image + * patch: x' = 0...w-1, y' = 0...h-1

+ *
    + *
  • method=CV_TM_SQDIFF + *
+ * + *

R(x,y)= sum(by: x',y')(T(x',y')-I(x+x',y+y'))^2

+ * + *
    + *
  • method=CV_TM_SQDIFF_NORMED + *
+ * + *

R(x,y)= (sum_(x',y')(T(x',y')-I(x+x',y+y'))^2)/(sqrt(sum_(x',y')T(x',y')^2 + * * sum_(x',y') I(x+x',y+y')^2))

+ * + *
    + *
  • method=CV_TM_CCORR + *
+ * + *

R(x,y)= sum(by: x',y')(T(x',y') * I(x+x',y+y'))

+ * + *
    + *
  • method=CV_TM_CCORR_NORMED + *
+ * + *

R(x,y)= (sum_(x',y')(T(x',y') * I(x+x',y+y')))/(sqrt(sum_(x',y')T(x',y')^2 + * * sum_(x',y') I(x+x',y+y')^2))

+ * + *
    + *
  • method=CV_TM_CCOEFF + *
+ * + *

R(x,y)= sum(by: x',y')(T'(x',y') * I'(x+x',y+y'))

+ * + *

where

+ * + *

T'(x',y')=T(x',y') - 1/(w * h) * sum(by: x'',y'') T(x'',y'') + * I'(x+x',y+y')=I(x+x',y+y') - 1/(w * h) * sum(by: x'',y'') I(x+x'',y+y'') + *

+ * + *
    + *
  • method=CV_TM_CCOEFF_NORMED + *
+ * + *

R(x,y)= (sum_(x',y')(T'(x',y') * I'(x+x',y+y')))/(sqrt(sum_(x',y')T'(x',y')^2 + * * sum_(x',y') I'(x+x',y+y')^2))

+ * + *

After the function finishes the comparison, the best matches can be found as + * global minimums (when CV_TM_SQDIFF was used) or maximums (when + * CV_TM_CCORR or CV_TM_CCOEFF was used) using the + * "minMaxLoc" function. In case of a color image, template summation in the + * numerator and each sum in the denominator is done over all of the channels + * and separate mean values are used for each channel. That is, the function can + * take a color template and a color image. The result will still be a + * single-channel image, which is easier to analyze.

+ * + * @param image Image where the search is running. It must be 8-bit or 32-bit + * floating-point. + * @param templ Searched template. It must be not greater than the source image + * and have the same data type. + * @param result Map of comparison results. It must be single-channel 32-bit + * floating-point. If image is W x H and templ + * is w x h, then result is (W-w+1) x(H-h+1). + * @param method Parameter specifying the comparison method (see below). + * + * @see org.opencv.imgproc.Imgproc.matchTemplate + */ + public static void matchTemplate(Mat image, Mat templ, Mat result, int method) + { + + matchTemplate_0(image.nativeObj, templ.nativeObj, result.nativeObj, method); + + return; + } + + + // + // C++: void medianBlur(Mat src, Mat& dst, int ksize) + // + +/** + *

Blurs an image using the median filter.

+ * + *

The function smoothes an image using the median filter with the ksize x + * ksize aperture. Each channel of a multi-channel image is processed + * independently. In-place operation is supported.

+ * + * @param src input 1-, 3-, or 4-channel image; when ksize is 3 or + * 5, the image depth should be CV_8U, CV_16U, or + * CV_32F, for larger aperture sizes, it can only be + * CV_8U. + * @param dst destination array of the same size and type as src. + * @param ksize aperture linear size; it must be odd and greater than 1, for + * example: 3, 5, 7... + * + * @see org.opencv.imgproc.Imgproc.medianBlur + * @see org.opencv.imgproc.Imgproc#boxFilter + * @see org.opencv.imgproc.Imgproc#GaussianBlur + * @see org.opencv.imgproc.Imgproc#bilateralFilter + * @see org.opencv.imgproc.Imgproc#blur + */ + public static void medianBlur(Mat src, Mat dst, int ksize) + { + + medianBlur_0(src.nativeObj, dst.nativeObj, ksize); + + return; + } + + + // + // C++: RotatedRect minAreaRect(vector_Point2f points) + // + +/** + *

Finds a rotated rectangle of the minimum area enclosing the input 2D point + * set.

+ * + *

The function calculates and returns the minimum-area bounding rectangle + * (possibly rotated) for a specified point set. See the OpenCV sample + * minarea.cpp.

+ * + * @param points Input vector of 2D points, stored in: + *
    + *
  • std.vector<> or Mat (C++ interface) + *
  • CvSeq* or CvMat* (C interface) + *
  • Nx2 numpy array (Python interface) + *
+ * + * @see org.opencv.imgproc.Imgproc.minAreaRect + */ + public static RotatedRect minAreaRect(MatOfPoint2f points) + { + Mat points_mat = points; + RotatedRect retVal = new RotatedRect(minAreaRect_0(points_mat.nativeObj)); + + return retVal; + } + + + // + // C++: void minEnclosingCircle(vector_Point2f points, Point2f& center, float& radius) + // + +/** + *

Finds a circle of the minimum area enclosing a 2D point set.

+ * + *

The function finds the minimal enclosing circle of a 2D point set using an + * iterative algorithm. See the OpenCV sample minarea.cpp.

+ * + * @param points Input vector of 2D points, stored in: + *
    + *
  • std.vector<> or Mat (C++ interface) + *
  • CvSeq* or CvMat* (C interface) + *
  • Nx2 numpy array (Python interface) + *
+ * @param center Output center of the circle. + * @param radius Output radius of the circle. + * + * @see org.opencv.imgproc.Imgproc.minEnclosingCircle + */ + public static void minEnclosingCircle(MatOfPoint2f points, Point center, float[] radius) + { + Mat points_mat = points; + double[] center_out = new double[2]; + double[] radius_out = new double[1]; + minEnclosingCircle_0(points_mat.nativeObj, center_out, radius_out); + if(center!=null){ center.x = center_out[0]; center.y = center_out[1]; } + if(radius!=null) radius[0] = (float)radius_out[0]; + return; + } + + + // + // C++: Moments moments(Mat array, bool binaryImage = false) + // + +/** + *

Calculates all of the moments up to the third order of a polygon or + * rasterized shape.

+ * + *

The function computes moments, up to the 3rd order, of a vector shape or a + * rasterized shape. The results are returned in the structure Moments + * defined as:

+ * + *

// C++ code:

+ * + *

class Moments

+ * + * + *

public:

+ * + *

Moments();

+ * + *

Moments(double m00, double m10, double m01, double m20, double m11,

+ * + *

double m02, double m30, double m21, double m12, double m03);

+ * + *

Moments(const CvMoments& moments);

+ * + *

operator CvMoments() const;

+ * + *

// spatial moments

+ * + *

double m00, m10, m01, m20, m11, m02, m30, m21, m12, m03;

+ * + *

// central moments

+ * + *

double mu20, mu11, mu02, mu30, mu21, mu12, mu03;

+ * + *

// central normalized moments

+ * + *

double nu20, nu11, nu02, nu30, nu21, nu12, nu03;

+ * + * + *

In case of a raster image, the spatial moments Moments.m_(ji) are + * computed as:

+ * + *

m _(ji)= sum(by: x,y)(array(x,y) * x^j * y^i)

+ * + *

The central moments Moments.mu_(ji) are computed as:

+ * + *

mu _(ji)= sum(by: x,y)(array(x,y) * (x - x")^j * (y - y")^i)

+ * + *

where (x", y") is the mass center:

+ * + *

x" = (m_10)/(m_(00)), y" = (m_01)/(m_(00))

+ * + *

The normalized central moments Moments.nu_(ij) are computed as:

+ * + *

nu _(ji)= (mu_(ji))/(m_(00)^((i+j)/2+1)).

+ * + *

Note:

+ * + *

mu_00=m_00, nu_00=1 nu_10=mu_10=mu_01=mu_10=0, + * hence the values are not stored.

+ * + *

The moments of a contour are defined in the same way but computed using the + * Green's formula (see http://en.wikipedia.org/wiki/Green_theorem). So, due to + * a limited raster resolution, the moments computed for a contour are slightly + * different from the moments computed for the same rasterized contour.

+ * + *

Note:

+ * + *

Since the contour moments are computed using Green formula, you may get + * seemingly odd results for contours with self-intersections, e.g. a zero area + * (m00) for butterfly-shaped contours.

+ * + * @param array Raster image (single-channel, 8-bit or floating-point 2D array) + * or an array (1 x N or N x 1) of 2D points (Point + * or Point2f). + * @param binaryImage If it is true, all non-zero image pixels are treated as + * 1's. The parameter is used for images only. + * + * @see org.opencv.imgproc.Imgproc.moments + * @see org.opencv.imgproc.Imgproc#contourArea + * @see org.opencv.imgproc.Imgproc#arcLength + */ + public static Moments moments(Mat array, boolean binaryImage) + { + + Moments retVal = new Moments(moments_0(array.nativeObj, binaryImage)); + + return retVal; + } + +/** + *

Calculates all of the moments up to the third order of a polygon or + * rasterized shape.

+ * + *

The function computes moments, up to the 3rd order, of a vector shape or a + * rasterized shape. The results are returned in the structure Moments + * defined as:

+ * + *

// C++ code:

+ * + *

class Moments

+ * + * + *

public:

+ * + *

Moments();

+ * + *

Moments(double m00, double m10, double m01, double m20, double m11,

+ * + *

double m02, double m30, double m21, double m12, double m03);

+ * + *

Moments(const CvMoments& moments);

+ * + *

operator CvMoments() const;

+ * + *

// spatial moments

+ * + *

double m00, m10, m01, m20, m11, m02, m30, m21, m12, m03;

+ * + *

// central moments

+ * + *

double mu20, mu11, mu02, mu30, mu21, mu12, mu03;

+ * + *

// central normalized moments

+ * + *

double nu20, nu11, nu02, nu30, nu21, nu12, nu03;

+ * + * + *

In case of a raster image, the spatial moments Moments.m_(ji) are + * computed as:

+ * + *

m _(ji)= sum(by: x,y)(array(x,y) * x^j * y^i)

+ * + *

The central moments Moments.mu_(ji) are computed as:

+ * + *

mu _(ji)= sum(by: x,y)(array(x,y) * (x - x")^j * (y - y")^i)

+ * + *

where (x", y") is the mass center:

+ * + *

x" = (m_10)/(m_(00)), y" = (m_01)/(m_(00))

+ * + *

The normalized central moments Moments.nu_(ij) are computed as:

+ * + *

nu _(ji)= (mu_(ji))/(m_(00)^((i+j)/2+1)).

+ * + *

Note:

+ * + *

mu_00=m_00, nu_00=1 nu_10=mu_10=mu_01=mu_10=0, + * hence the values are not stored.

+ * + *

The moments of a contour are defined in the same way but computed using the + * Green's formula (see http://en.wikipedia.org/wiki/Green_theorem). So, due to + * a limited raster resolution, the moments computed for a contour are slightly + * different from the moments computed for the same rasterized contour.

+ * + *

Note:

+ * + *

Since the contour moments are computed using Green formula, you may get + * seemingly odd results for contours with self-intersections, e.g. a zero area + * (m00) for butterfly-shaped contours.

+ * + * @param array Raster image (single-channel, 8-bit or floating-point 2D array) + * or an array (1 x N or N x 1) of 2D points (Point + * or Point2f). + * + * @see org.opencv.imgproc.Imgproc.moments + * @see org.opencv.imgproc.Imgproc#contourArea + * @see org.opencv.imgproc.Imgproc#arcLength + */ + public static Moments moments(Mat array) + { + + Moments retVal = new Moments(moments_1(array.nativeObj)); + + return retVal; + } + + + // + // C++: void morphologyEx(Mat src, Mat& dst, int op, Mat kernel, Point anchor = Point(-1,-1), int iterations = 1, int borderType = BORDER_CONSTANT, Scalar borderValue = morphologyDefaultBorderValue()) + // + +/** + *

Performs advanced morphological transformations.

+ * + *

The function can perform advanced morphological transformations using an + * erosion and dilation as basic operations.

+ * + *

Opening operation:

+ * + *

dst = open(src, element)= dilate(erode(src, element))

+ * + *

Closing operation:

+ * + *

dst = close(src, element)= erode(dilate(src, element))

+ * + *

Morphological gradient:

+ * + *

dst = morph_grad(src, element)= dilate(src, element)- erode(src, + * element)

+ * + *

"Top hat":

+ * + *

dst = tophat(src, element)= src - open(src, element)

+ * + *

"Black hat":

+ * + *

dst = blackhat(src, element)= close(src, element)- src

+ * + *

Any of the operations can be done in-place. In case of multi-channel images, + * each channel is processed independently.

+ * + * @param src Source image. The number of channels can be arbitrary. The depth + * should be one of CV_8U, CV_16U, CV_16S, + * CV_32F" or CV_64F". + * @param dst Destination image of the same size and type as src. + * @param op Type of a morphological operation that can be one of the following: + *
    + *
  • MORPH_OPEN - an opening operation + *
  • MORPH_CLOSE - a closing operation + *
  • MORPH_GRADIENT - a morphological gradient + *
  • MORPH_TOPHAT - "top hat" + *
  • MORPH_BLACKHAT - "black hat" + *
+ * @param kernel a kernel + * @param anchor a anchor + * @param iterations Number of times erosion and dilation are applied. + * @param borderType Pixel extrapolation method. See "borderInterpolate" for + * details. + * @param borderValue Border value in case of a constant border. The default + * value has a special meaning. See "createMorphologyFilter" for details. + * + * @see org.opencv.imgproc.Imgproc.morphologyEx + * @see org.opencv.imgproc.Imgproc#erode + * @see org.opencv.imgproc.Imgproc#dilate + */ + public static void morphologyEx(Mat src, Mat dst, int op, Mat kernel, Point anchor, int iterations, int borderType, Scalar borderValue) + { + + morphologyEx_0(src.nativeObj, dst.nativeObj, op, kernel.nativeObj, anchor.x, anchor.y, iterations, borderType, borderValue.val[0], borderValue.val[1], borderValue.val[2], borderValue.val[3]); + + return; + } + +/** + *

Performs advanced morphological transformations.

+ * + *

The function can perform advanced morphological transformations using an + * erosion and dilation as basic operations.

+ * + *

Opening operation:

+ * + *

dst = open(src, element)= dilate(erode(src, element))

+ * + *

Closing operation:

+ * + *

dst = close(src, element)= erode(dilate(src, element))

+ * + *

Morphological gradient:

+ * + *

dst = morph_grad(src, element)= dilate(src, element)- erode(src, + * element)

+ * + *

"Top hat":

+ * + *

dst = tophat(src, element)= src - open(src, element)

+ * + *

"Black hat":

+ * + *

dst = blackhat(src, element)= close(src, element)- src

+ * + *

Any of the operations can be done in-place. In case of multi-channel images, + * each channel is processed independently.

+ * + * @param src Source image. The number of channels can be arbitrary. The depth + * should be one of CV_8U, CV_16U, CV_16S, + * CV_32F" or CV_64F". + * @param dst Destination image of the same size and type as src. + * @param op Type of a morphological operation that can be one of the following: + *
    + *
  • MORPH_OPEN - an opening operation + *
  • MORPH_CLOSE - a closing operation + *
  • MORPH_GRADIENT - a morphological gradient + *
  • MORPH_TOPHAT - "top hat" + *
  • MORPH_BLACKHAT - "black hat" + *
+ * @param kernel a kernel + * @param anchor a anchor + * @param iterations Number of times erosion and dilation are applied. + * + * @see org.opencv.imgproc.Imgproc.morphologyEx + * @see org.opencv.imgproc.Imgproc#erode + * @see org.opencv.imgproc.Imgproc#dilate + */ + public static void morphologyEx(Mat src, Mat dst, int op, Mat kernel, Point anchor, int iterations) + { + + morphologyEx_1(src.nativeObj, dst.nativeObj, op, kernel.nativeObj, anchor.x, anchor.y, iterations); + + return; + } + +/** + *

Performs advanced morphological transformations.

+ * + *

The function can perform advanced morphological transformations using an + * erosion and dilation as basic operations.

+ * + *

Opening operation:

+ * + *

dst = open(src, element)= dilate(erode(src, element))

+ * + *

Closing operation:

+ * + *

dst = close(src, element)= erode(dilate(src, element))

+ * + *

Morphological gradient:

+ * + *

dst = morph_grad(src, element)= dilate(src, element)- erode(src, + * element)

+ * + *

"Top hat":

+ * + *

dst = tophat(src, element)= src - open(src, element)

+ * + *

"Black hat":

+ * + *

dst = blackhat(src, element)= close(src, element)- src

+ * + *

Any of the operations can be done in-place. In case of multi-channel images, + * each channel is processed independently.

+ * + * @param src Source image. The number of channels can be arbitrary. The depth + * should be one of CV_8U, CV_16U, CV_16S, + * CV_32F" or CV_64F". + * @param dst Destination image of the same size and type as src. + * @param op Type of a morphological operation that can be one of the following: + *
    + *
  • MORPH_OPEN - an opening operation + *
  • MORPH_CLOSE - a closing operation + *
  • MORPH_GRADIENT - a morphological gradient + *
  • MORPH_TOPHAT - "top hat" + *
  • MORPH_BLACKHAT - "black hat" + *
+ * @param kernel a kernel + * + * @see org.opencv.imgproc.Imgproc.morphologyEx + * @see org.opencv.imgproc.Imgproc#erode + * @see org.opencv.imgproc.Imgproc#dilate + */ + public static void morphologyEx(Mat src, Mat dst, int op, Mat kernel) + { + + morphologyEx_2(src.nativeObj, dst.nativeObj, op, kernel.nativeObj); + + return; + } + + + // + // C++: Point2d phaseCorrelate(Mat src1, Mat src2, Mat window = Mat()) + // + +/** + *

The function is used to detect translational shifts that occur between two + * images. The operation takes advantage of the Fourier shift theorem for + * detecting the translational shift in the frequency domain. It can be used for + * fast image registration as well as motion estimation. For more information + * please see http://en.wikipedia.org/wiki/Phase_correlation.

+ * + *

Calculates the cross-power spectrum of two supplied source arrays. The arrays + * are padded if needed with "getOptimalDFTSize".

+ * + *

Return value: detected phase shift (sub-pixel) between the two arrays.

+ * + *

The function performs the following equations

+ *
    + *
  • First it applies a Hanning window (see http://en.wikipedia.org/wiki/Hann_function) + * to each image to remove possible edge effects. This window is cached until + * the array size changes to speed up processing time. + *
  • Next it computes the forward DFTs of each source array: + *
+ * + *

mathbf(G)_a = mathcal(F)(src_1), mathbf(G)_b = mathcal(F)(src_2)

+ * + *

where mathcal(F) is the forward DFT.

+ *
    + *
  • It then computes the cross-power spectrum of each frequency domain + * array: + *
+ * + *

R = (mathbf(G)_a mathbf(G)_b^*)/(|mathbf(G)_a mathbf(G)_b^*|)

+ * + *
    + *
  • Next the cross-correlation is converted back into the time domain via + * the inverse DFT: + *
+ * + *

r = mathcal(F)^(-1)(R)

+ * + *
    + *
  • Finally, it computes the peak location and computes a 5x5 weighted + * centroid around the peak to achieve sub-pixel accuracy. + *
+ * + *

(Delta x, Delta y) = weightedCentroid (arg max_((x, y))(r))

+ * + *
    + *
  • If non-zero, the response parameter is computed as the sum of the + * elements of r within the 5x5 centroid around the peak location. It is + * normalized to a maximum of 1 (meaning there is a single peak) and will be + * smaller when there are multiple peaks. + *
+ * + * @param src1 Source floating point array (CV_32FC1 or CV_64FC1) + * @param src2 Source floating point array (CV_32FC1 or CV_64FC1) + * @param window Floating point array with windowing coefficients to reduce edge + * effects (optional). + * + * @see org.opencv.imgproc.Imgproc.phaseCorrelate + * @see org.opencv.imgproc.Imgproc#createHanningWindow + * @see org.opencv.core.Core#dft + * @see org.opencv.core.Core#mulSpectrums + * @see org.opencv.core.Core#getOptimalDFTSize + * @see org.opencv.core.Core#idft + */ + public static Point phaseCorrelate(Mat src1, Mat src2, Mat window) + { + + Point retVal = new Point(phaseCorrelate_0(src1.nativeObj, src2.nativeObj, window.nativeObj)); + + return retVal; + } + +/** + *

The function is used to detect translational shifts that occur between two + * images. The operation takes advantage of the Fourier shift theorem for + * detecting the translational shift in the frequency domain. It can be used for + * fast image registration as well as motion estimation. For more information + * please see http://en.wikipedia.org/wiki/Phase_correlation.

+ * + *

Calculates the cross-power spectrum of two supplied source arrays. The arrays + * are padded if needed with "getOptimalDFTSize".

+ * + *

Return value: detected phase shift (sub-pixel) between the two arrays.

+ * + *

The function performs the following equations

+ *
    + *
  • First it applies a Hanning window (see http://en.wikipedia.org/wiki/Hann_function) + * to each image to remove possible edge effects. This window is cached until + * the array size changes to speed up processing time. + *
  • Next it computes the forward DFTs of each source array: + *
+ * + *

mathbf(G)_a = mathcal(F)(src_1), mathbf(G)_b = mathcal(F)(src_2)

+ * + *

where mathcal(F) is the forward DFT.

+ *
    + *
  • It then computes the cross-power spectrum of each frequency domain + * array: + *
+ * + *

R = (mathbf(G)_a mathbf(G)_b^*)/(|mathbf(G)_a mathbf(G)_b^*|)

+ * + *
    + *
  • Next the cross-correlation is converted back into the time domain via + * the inverse DFT: + *
+ * + *

r = mathcal(F)^(-1)(R)

+ * + *
    + *
  • Finally, it computes the peak location and computes a 5x5 weighted + * centroid around the peak to achieve sub-pixel accuracy. + *
+ * + *

(Delta x, Delta y) = weightedCentroid (arg max_((x, y))(r))

+ * + *
    + *
  • If non-zero, the response parameter is computed as the sum of the + * elements of r within the 5x5 centroid around the peak location. It is + * normalized to a maximum of 1 (meaning there is a single peak) and will be + * smaller when there are multiple peaks. + *
+ * + * @param src1 Source floating point array (CV_32FC1 or CV_64FC1) + * @param src2 Source floating point array (CV_32FC1 or CV_64FC1) + * + * @see org.opencv.imgproc.Imgproc.phaseCorrelate + * @see org.opencv.imgproc.Imgproc#createHanningWindow + * @see org.opencv.core.Core#dft + * @see org.opencv.core.Core#mulSpectrums + * @see org.opencv.core.Core#getOptimalDFTSize + * @see org.opencv.core.Core#idft + */ + public static Point phaseCorrelate(Mat src1, Mat src2) + { + + Point retVal = new Point(phaseCorrelate_1(src1.nativeObj, src2.nativeObj)); + + return retVal; + } + + + // + // C++: Point2d phaseCorrelateRes(Mat src1, Mat src2, Mat window, double* response = 0) + // + + public static Point phaseCorrelateRes(Mat src1, Mat src2, Mat window, double[] response) + { + double[] response_out = new double[1]; + Point retVal = new Point(phaseCorrelateRes_0(src1.nativeObj, src2.nativeObj, window.nativeObj, response_out)); + if(response!=null) response[0] = (double)response_out[0]; + return retVal; + } + + public static Point phaseCorrelateRes(Mat src1, Mat src2, Mat window) + { + + Point retVal = new Point(phaseCorrelateRes_1(src1.nativeObj, src2.nativeObj, window.nativeObj)); + + return retVal; + } + + + // + // C++: double pointPolygonTest(vector_Point2f contour, Point2f pt, bool measureDist) + // + +/** + *

Performs a point-in-contour test.

+ * + *

The function determines whether the point is inside a contour, outside, or + * lies on an edge (or coincides with a vertex). It returns positive (inside), + * negative (outside), or zero (on an edge) value, correspondingly. When + * measureDist=false, the return value is +1, -1, and 0, + * respectively. Otherwise, the return value is a signed distance between the + * point and the nearest contour edge.

+ * + *

See below a sample output of the function where each image pixel is tested + * against the contour.

+ * + * @param contour Input contour. + * @param pt Point tested against the contour. + * @param measureDist If true, the function estimates the signed distance from + * the point to the nearest contour edge. Otherwise, the function only checks if + * the point is inside a contour or not. + * + * @see org.opencv.imgproc.Imgproc.pointPolygonTest + */ + public static double pointPolygonTest(MatOfPoint2f contour, Point pt, boolean measureDist) + { + Mat contour_mat = contour; + double retVal = pointPolygonTest_0(contour_mat.nativeObj, pt.x, pt.y, measureDist); + + return retVal; + } + + + // + // C++: void preCornerDetect(Mat src, Mat& dst, int ksize, int borderType = BORDER_DEFAULT) + // + +/** + *

Calculates a feature map for corner detection.

+ * + *

The function calculates the complex spatial derivative-based function of the + * source image

+ * + *

dst = (D_x src)^2 * D_(yy) src + (D_y src)^2 * D_(xx) src - 2 D_x src * + * D_y src * D_(xy) src

+ * + *

where D_x,D_y are the first image derivatives, + * D_(xx),D_(yy) are the second image derivatives, and + * D_(xy) is the mixed derivative. + * The corners can be found as local maximums of the functions, as shown below: + *

+ * + *

// C++ code:

+ * + *

Mat corners, dilated_corners;

+ * + *

preCornerDetect(image, corners, 3);

+ * + *

// dilation with 3x3 rectangular structuring element

+ * + *

dilate(corners, dilated_corners, Mat(), 1);

+ * + *

Mat corner_mask = corners == dilated_corners;

+ * + *

+ * + * @param src Source single-channel 8-bit of floating-point image. + * @param dst Output image that has the type CV_32F and the same + * size as src. + * @param ksize Aperture size of the "Sobel". + * @param borderType Pixel extrapolation method. See "borderInterpolate". + * + * @see org.opencv.imgproc.Imgproc.preCornerDetect + */ + public static void preCornerDetect(Mat src, Mat dst, int ksize, int borderType) + { + + preCornerDetect_0(src.nativeObj, dst.nativeObj, ksize, borderType); + + return; + } + +/** + *

Calculates a feature map for corner detection.

+ * + *

The function calculates the complex spatial derivative-based function of the + * source image

+ * + *

dst = (D_x src)^2 * D_(yy) src + (D_y src)^2 * D_(xx) src - 2 D_x src * + * D_y src * D_(xy) src

+ * + *

where D_x,D_y are the first image derivatives, + * D_(xx),D_(yy) are the second image derivatives, and + * D_(xy) is the mixed derivative. + * The corners can be found as local maximums of the functions, as shown below: + *

+ * + *

// C++ code:

+ * + *

Mat corners, dilated_corners;

+ * + *

preCornerDetect(image, corners, 3);

+ * + *

// dilation with 3x3 rectangular structuring element

+ * + *

dilate(corners, dilated_corners, Mat(), 1);

+ * + *

Mat corner_mask = corners == dilated_corners;

+ * + *

+ * + * @param src Source single-channel 8-bit of floating-point image. + * @param dst Output image that has the type CV_32F and the same + * size as src. + * @param ksize Aperture size of the "Sobel". + * + * @see org.opencv.imgproc.Imgproc.preCornerDetect + */ + public static void preCornerDetect(Mat src, Mat dst, int ksize) + { + + preCornerDetect_1(src.nativeObj, dst.nativeObj, ksize); + + return; + } + + + // + // C++: void pyrDown(Mat src, Mat& dst, Size dstsize = Size(), int borderType = BORDER_DEFAULT) + // + +/** + *

Blurs an image and downsamples it.

+ * + *

The function performs the downsampling step of the Gaussian pyramid + * construction. First, it convolves the source image with the kernel:

+ * + *

1/256 1 4 6 4 1 + * 4 16 24 16 4 + * 6 24 36 24 6 + * 4 16 24 16 4 + * 1 4 6 4 1

+ * + *

Then, it downsamples the image by rejecting even rows and columns.

+ * + * @param src input image. + * @param dst output image; it has the specified size and the same type as + * src. + * @param dstsize size of the output image; by default, it is computed as + * Size((src.cols+1)/2, (src.rows+1)/2), but in any case, the + * following conditions should be satisfied: + * + *

ltBR gt| dstsize.width *2-src.cols| <= 2 + * |dstsize.height *2-src.rows| <= 2

+ * @param borderType a borderType + * + * @see org.opencv.imgproc.Imgproc.pyrDown + */ + public static void pyrDown(Mat src, Mat dst, Size dstsize, int borderType) + { + + pyrDown_0(src.nativeObj, dst.nativeObj, dstsize.width, dstsize.height, borderType); + + return; + } + +/** + *

Blurs an image and downsamples it.

+ * + *

The function performs the downsampling step of the Gaussian pyramid + * construction. First, it convolves the source image with the kernel:

+ * + *

1/256 1 4 6 4 1 + * 4 16 24 16 4 + * 6 24 36 24 6 + * 4 16 24 16 4 + * 1 4 6 4 1

+ * + *

Then, it downsamples the image by rejecting even rows and columns.

+ * + * @param src input image. + * @param dst output image; it has the specified size and the same type as + * src. + * @param dstsize size of the output image; by default, it is computed as + * Size((src.cols+1)/2, (src.rows+1)/2), but in any case, the + * following conditions should be satisfied: + * + *

ltBR gt| dstsize.width *2-src.cols| <= 2 + * |dstsize.height *2-src.rows| <= 2

+ * + * @see org.opencv.imgproc.Imgproc.pyrDown + */ + public static void pyrDown(Mat src, Mat dst, Size dstsize) + { + + pyrDown_1(src.nativeObj, dst.nativeObj, dstsize.width, dstsize.height); + + return; + } + +/** + *

Blurs an image and downsamples it.

+ * + *

The function performs the downsampling step of the Gaussian pyramid + * construction. First, it convolves the source image with the kernel:

+ * + *

1/256 1 4 6 4 1 + * 4 16 24 16 4 + * 6 24 36 24 6 + * 4 16 24 16 4 + * 1 4 6 4 1

+ * + *

Then, it downsamples the image by rejecting even rows and columns.

+ * + * @param src input image. + * @param dst output image; it has the specified size and the same type as + * src. + * + * @see org.opencv.imgproc.Imgproc.pyrDown + */ + public static void pyrDown(Mat src, Mat dst) + { + + pyrDown_2(src.nativeObj, dst.nativeObj); + + return; + } + + + // + // C++: void pyrMeanShiftFiltering(Mat src, Mat& dst, double sp, double sr, int maxLevel = 1, TermCriteria termcrit = TermCriteria( TermCriteria::MAX_ITER+TermCriteria::EPS,5,1)) + // + +/** + *

Performs initial step of meanshift segmentation of an image.

+ * + *

The function implements the filtering stage of meanshift segmentation, that + * is, the output of the function is the filtered "posterized" image with color + * gradients and fine-grain texture flattened. At every pixel (X,Y) + * of the input image (or down-sized input image, see below) the function + * executes meanshift iterations, that is, the pixel (X,Y) + * neighborhood in the joint space-color hyperspace is considered:

+ * + *

(x,y): X- sp <= x <= X+ sp, Y- sp <= y <= Y+ sp, ||(R,G,B)-(r,g,b)|| <= + * sr

+ * + *

where (R,G,B) and (r,g,b) are the vectors of color + * components at (X,Y) and (x,y), respectively + * (though, the algorithm does not depend on the color space used, so any + * 3-component color space can be used instead). Over the neighborhood the + * average spatial value (X',Y') and average color vector + * (R',G',B') are found and they act as the neighborhood center on + * the next iteration:

+ * + *

(X,Y)~(X',Y'), (R,G,B)~(R',G',B').

+ * + *

After the iterations over, the color components of the initial pixel (that + * is, the pixel from where the iterations started) are set to the final value + * (average color at the last iteration):

+ * + *

I(X,Y) <- (R*,G*,B*)

+ * + *

When maxLevel > 0, the gaussian pyramid of maxLevel+1 + * levels is built, and the above procedure is run on the smallest layer first. + * After that, the results are propagated to the larger layer and the iterations + * are run again only on those pixels where the layer colors differ by more than + * sr from the lower-resolution layer of the pyramid. That makes + * boundaries of color regions sharper. Note that the results will be actually + * different from the ones obtained by running the meanshift procedure on the + * whole original image (i.e. when maxLevel==0).

+ * + * @param src The source 8-bit, 3-channel image. + * @param dst The destination image of the same format and the same size as the + * source. + * @param sp The spatial window radius. + * @param sr The color window radius. + * @param maxLevel Maximum level of the pyramid for the segmentation. + * @param termcrit Termination criteria: when to stop meanshift iterations. + * + * @see org.opencv.imgproc.Imgproc.pyrMeanShiftFiltering + */ + public static void pyrMeanShiftFiltering(Mat src, Mat dst, double sp, double sr, int maxLevel, TermCriteria termcrit) + { + + pyrMeanShiftFiltering_0(src.nativeObj, dst.nativeObj, sp, sr, maxLevel, termcrit.type, termcrit.maxCount, termcrit.epsilon); + + return; + } + +/** + *

Performs initial step of meanshift segmentation of an image.

+ * + *

The function implements the filtering stage of meanshift segmentation, that + * is, the output of the function is the filtered "posterized" image with color + * gradients and fine-grain texture flattened. At every pixel (X,Y) + * of the input image (or down-sized input image, see below) the function + * executes meanshift iterations, that is, the pixel (X,Y) + * neighborhood in the joint space-color hyperspace is considered:

+ * + *

(x,y): X- sp <= x <= X+ sp, Y- sp <= y <= Y+ sp, ||(R,G,B)-(r,g,b)|| <= + * sr

+ * + *

where (R,G,B) and (r,g,b) are the vectors of color + * components at (X,Y) and (x,y), respectively + * (though, the algorithm does not depend on the color space used, so any + * 3-component color space can be used instead). Over the neighborhood the + * average spatial value (X',Y') and average color vector + * (R',G',B') are found and they act as the neighborhood center on + * the next iteration:

+ * + *

(X,Y)~(X',Y'), (R,G,B)~(R',G',B').

+ * + *

After the iterations over, the color components of the initial pixel (that + * is, the pixel from where the iterations started) are set to the final value + * (average color at the last iteration):

+ * + *

I(X,Y) <- (R*,G*,B*)

+ * + *

When maxLevel > 0, the gaussian pyramid of maxLevel+1 + * levels is built, and the above procedure is run on the smallest layer first. + * After that, the results are propagated to the larger layer and the iterations + * are run again only on those pixels where the layer colors differ by more than + * sr from the lower-resolution layer of the pyramid. That makes + * boundaries of color regions sharper. Note that the results will be actually + * different from the ones obtained by running the meanshift procedure on the + * whole original image (i.e. when maxLevel==0).

+ * + * @param src The source 8-bit, 3-channel image. + * @param dst The destination image of the same format and the same size as the + * source. + * @param sp The spatial window radius. + * @param sr The color window radius. + * + * @see org.opencv.imgproc.Imgproc.pyrMeanShiftFiltering + */ + public static void pyrMeanShiftFiltering(Mat src, Mat dst, double sp, double sr) + { + + pyrMeanShiftFiltering_1(src.nativeObj, dst.nativeObj, sp, sr); + + return; + } + + + // + // C++: void pyrUp(Mat src, Mat& dst, Size dstsize = Size(), int borderType = BORDER_DEFAULT) + // + +/** + *

Upsamples an image and then blurs it.

+ * + *

The function performs the upsampling step of the Gaussian pyramid + * construction, though it can actually be used to construct the Laplacian + * pyramid. First, it upsamples the source image by injecting even zero rows and + * columns and then convolves the result with the same kernel as in "pyrDown" + * multiplied by 4.

+ * + * @param src input image. + * @param dst output image. It has the specified size and the same type as + * src. + * @param dstsize size of the output image; by default, it is computed as + * Size(src.cols*2, (src.rows*2), but in any case, the following + * conditions should be satisfied: + * + *

ltBR gt| dstsize.width -src.cols*2| <= (dstsize.width mod 2) + * |dstsize.height -src.rows*2| <= (dstsize.height mod 2)

+ * @param borderType a borderType + * + * @see org.opencv.imgproc.Imgproc.pyrUp + */ + public static void pyrUp(Mat src, Mat dst, Size dstsize, int borderType) + { + + pyrUp_0(src.nativeObj, dst.nativeObj, dstsize.width, dstsize.height, borderType); + + return; + } + +/** + *

Upsamples an image and then blurs it.

+ * + *

The function performs the upsampling step of the Gaussian pyramid + * construction, though it can actually be used to construct the Laplacian + * pyramid. First, it upsamples the source image by injecting even zero rows and + * columns and then convolves the result with the same kernel as in "pyrDown" + * multiplied by 4.

+ * + * @param src input image. + * @param dst output image. It has the specified size and the same type as + * src. + * @param dstsize size of the output image; by default, it is computed as + * Size(src.cols*2, (src.rows*2), but in any case, the following + * conditions should be satisfied: + * + *

ltBR gt| dstsize.width -src.cols*2| <= (dstsize.width mod 2) + * |dstsize.height -src.rows*2| <= (dstsize.height mod 2)

+ * + * @see org.opencv.imgproc.Imgproc.pyrUp + */ + public static void pyrUp(Mat src, Mat dst, Size dstsize) + { + + pyrUp_1(src.nativeObj, dst.nativeObj, dstsize.width, dstsize.height); + + return; + } + +/** + *

Upsamples an image and then blurs it.

+ * + *

The function performs the upsampling step of the Gaussian pyramid + * construction, though it can actually be used to construct the Laplacian + * pyramid. First, it upsamples the source image by injecting even zero rows and + * columns and then convolves the result with the same kernel as in "pyrDown" + * multiplied by 4.

+ * + * @param src input image. + * @param dst output image. It has the specified size and the same type as + * src. + * + * @see org.opencv.imgproc.Imgproc.pyrUp + */ + public static void pyrUp(Mat src, Mat dst) + { + + pyrUp_2(src.nativeObj, dst.nativeObj); + + return; + } + + + // + // C++: void remap(Mat src, Mat& dst, Mat map1, Mat map2, int interpolation, int borderMode = BORDER_CONSTANT, Scalar borderValue = Scalar()) + // + +/** + *

Applies a generic geometrical transformation to an image.

+ * + *

The function remap transforms the source image using the + * specified map:

+ * + *

dst(x,y) = src(map_x(x,y),map_y(x,y))

+ * + *

where values of pixels with non-integer coordinates are computed using one of + * available interpolation methods. + * map_x and map_y can be encoded as separate floating-point + * maps in map_1 and map_2 respectively, or interleaved + * floating-point maps of (x,y) in map_1, or fixed-point maps + * created by using "convertMaps". The reason you might want to convert from + * floating to fixed-point representations of a map is that they can yield much + * faster (~2x) remapping operations. In the converted case, map_1 + * contains pairs (cvFloor(x), cvFloor(y)) and map_2 + * contains indices in a table of interpolation coefficients.

+ * + *

This function cannot operate in-place.

+ * + * @param src Source image. + * @param dst Destination image. It has the same size as map1 and + * the same type as src. + * @param map1 The first map of either (x,y) points or just + * x values having the type CV_16SC2, + * CV_32FC1, or CV_32FC2. See "convertMaps" for + * details on converting a floating point representation to fixed-point for + * speed. + * @param map2 The second map of y values having the type + * CV_16UC1, CV_32FC1, or none (empty map if + * map1 is (x,y) points), respectively. + * @param interpolation Interpolation method (see "resize"). The method + * INTER_AREA is not supported by this function. + * @param borderMode Pixel extrapolation method (see "borderInterpolate"). When + * borderMode=BORDER_TRANSPARENT, it means that the pixels in the + * destination image that corresponds to the "outliers" in the source image are + * not modified by the function. + * @param borderValue Value used in case of a constant border. By default, it is + * 0. + * + * @see org.opencv.imgproc.Imgproc.remap + */ + public static void remap(Mat src, Mat dst, Mat map1, Mat map2, int interpolation, int borderMode, Scalar borderValue) + { + + remap_0(src.nativeObj, dst.nativeObj, map1.nativeObj, map2.nativeObj, interpolation, borderMode, borderValue.val[0], borderValue.val[1], borderValue.val[2], borderValue.val[3]); + + return; + } + +/** + *

Applies a generic geometrical transformation to an image.

+ * + *

The function remap transforms the source image using the + * specified map:

+ * + *

dst(x,y) = src(map_x(x,y),map_y(x,y))

+ * + *

where values of pixels with non-integer coordinates are computed using one of + * available interpolation methods. + * map_x and map_y can be encoded as separate floating-point + * maps in map_1 and map_2 respectively, or interleaved + * floating-point maps of (x,y) in map_1, or fixed-point maps + * created by using "convertMaps". The reason you might want to convert from + * floating to fixed-point representations of a map is that they can yield much + * faster (~2x) remapping operations. In the converted case, map_1 + * contains pairs (cvFloor(x), cvFloor(y)) and map_2 + * contains indices in a table of interpolation coefficients.

+ * + *

This function cannot operate in-place.

+ * + * @param src Source image. + * @param dst Destination image. It has the same size as map1 and + * the same type as src. + * @param map1 The first map of either (x,y) points or just + * x values having the type CV_16SC2, + * CV_32FC1, or CV_32FC2. See "convertMaps" for + * details on converting a floating point representation to fixed-point for + * speed. + * @param map2 The second map of y values having the type + * CV_16UC1, CV_32FC1, or none (empty map if + * map1 is (x,y) points), respectively. + * @param interpolation Interpolation method (see "resize"). The method + * INTER_AREA is not supported by this function. + * + * @see org.opencv.imgproc.Imgproc.remap + */ + public static void remap(Mat src, Mat dst, Mat map1, Mat map2, int interpolation) + { + + remap_1(src.nativeObj, dst.nativeObj, map1.nativeObj, map2.nativeObj, interpolation); + + return; + } + + + // + // C++: void resize(Mat src, Mat& dst, Size dsize, double fx = 0, double fy = 0, int interpolation = INTER_LINEAR) + // + +/** + *

Resizes an image.

+ * + *

The function resize resizes the image src down to + * or up to the specified size.Note that the initial dst type or + * size are not taken into account. Instead, the size and type are derived from + * the src,dsize,fx, and fy. + * If you want to resize src so that it fits the pre-created + * dst, you may call the function as follows:

+ * + *

// C++ code:

+ * + *

// explicitly specify dsize=dst.size(); fx and fy will be computed from that.

+ * + *

resize(src, dst, dst.size(), 0, 0, interpolation);

+ * + *

If you want to decimate the image by factor of 2 in each direction, you can + * call the function this way:

+ * + *

// specify fx and fy and let the function compute the destination image size.

+ * + *

resize(src, dst, Size(), 0.5, 0.5, interpolation);

+ * + *

To shrink an image, it will generally look best with CV_INTER_AREA + * interpolation, whereas to enlarge an image, it will generally look best with + * CV_INTER_CUBIC (slow) or CV_INTER_LINEAR (faster but still looks OK). + *

+ * + * @param src input image. + * @param dst output image; it has the size dsize (when it is + * non-zero) or the size computed from src.size(), fx, + * and fy; the type of dst is the same as of + * src. + * @param dsize output image size; if it equals zero, it is computed as: + * + *

dsize = Size(round(fx*src.cols), round(fy*src.rows))

+ * + *

Either dsize or both fx and fy must be + * non-zero.

+ * @param fx scale factor along the horizontal axis; when it equals 0, it is + * computed as + * + *

(double)dsize.width/src.cols

+ * @param fy scale factor along the vertical axis; when it equals 0, it is + * computed as + * + *

(double)dsize.height/src.rows

+ * @param interpolation interpolation method: + *
    + *
  • INTER_NEAREST - a nearest-neighbor interpolation + *
  • INTER_LINEAR - a bilinear interpolation (used by default) + *
  • INTER_AREA - resampling using pixel area relation. It may be a + * preferred method for image decimation, as it gives moire'-free results. But + * when the image is zoomed, it is similar to the INTER_NEAREST + * method. + *
  • INTER_CUBIC - a bicubic interpolation over 4x4 pixel neighborhood + *
  • INTER_LANCZOS4 - a Lanczos interpolation over 8x8 pixel neighborhood + *
+ * + * @see org.opencv.imgproc.Imgproc.resize + * @see org.opencv.imgproc.Imgproc#warpAffine + * @see org.opencv.imgproc.Imgproc#remap + * @see org.opencv.imgproc.Imgproc#warpPerspective + */ + public static void resize(Mat src, Mat dst, Size dsize, double fx, double fy, int interpolation) + { + + resize_0(src.nativeObj, dst.nativeObj, dsize.width, dsize.height, fx, fy, interpolation); + + return; + } + +/** + *

Resizes an image.

+ * + *

The function resize resizes the image src down to + * or up to the specified size.Note that the initial dst type or + * size are not taken into account. Instead, the size and type are derived from + * the src,dsize,fx, and fy. + * If you want to resize src so that it fits the pre-created + * dst, you may call the function as follows:

+ * + *

// C++ code:

+ * + *

// explicitly specify dsize=dst.size(); fx and fy will be computed from that.

+ * + *

resize(src, dst, dst.size(), 0, 0, interpolation);

+ * + *

If you want to decimate the image by factor of 2 in each direction, you can + * call the function this way:

+ * + *

// specify fx and fy and let the function compute the destination image size.

+ * + *

resize(src, dst, Size(), 0.5, 0.5, interpolation);

+ * + *

To shrink an image, it will generally look best with CV_INTER_AREA + * interpolation, whereas to enlarge an image, it will generally look best with + * CV_INTER_CUBIC (slow) or CV_INTER_LINEAR (faster but still looks OK). + *

+ * + * @param src input image. + * @param dst output image; it has the size dsize (when it is + * non-zero) or the size computed from src.size(), fx, + * and fy; the type of dst is the same as of + * src. + * @param dsize output image size; if it equals zero, it is computed as: + * + *

dsize = Size(round(fx*src.cols), round(fy*src.rows))

+ * + *

Either dsize or both fx and fy must be + * non-zero.

+ * + * @see org.opencv.imgproc.Imgproc.resize + * @see org.opencv.imgproc.Imgproc#warpAffine + * @see org.opencv.imgproc.Imgproc#remap + * @see org.opencv.imgproc.Imgproc#warpPerspective + */ + public static void resize(Mat src, Mat dst, Size dsize) + { + + resize_1(src.nativeObj, dst.nativeObj, dsize.width, dsize.height); + + return; + } + + + // + // C++: void sepFilter2D(Mat src, Mat& dst, int ddepth, Mat kernelX, Mat kernelY, Point anchor = Point(-1,-1), double delta = 0, int borderType = BORDER_DEFAULT) + // + +/** + *

Applies a separable linear filter to an image.

+ * + *

The function applies a separable linear filter to the image. That is, first, + * every row of src is filtered with the 1D kernel + * kernelX. Then, every column of the result is filtered with the + * 1D kernel kernelY. The final result shifted by delta + * is stored in dst.

+ * + * @param src Source image. + * @param dst Destination image of the same size and the same number of channels + * as src. + * @param ddepth Destination image depth. The following combination of + * src.depth() and ddepth are supported: + *
    + *
  • src.depth() = CV_8U, ddepth = + * -1/CV_16S/CV_32F/CV_64F + *
  • src.depth() = CV_16U/CV_16S, + * ddepth = -1/CV_32F/CV_64F + *
  • src.depth() = CV_32F, ddepth = + * -1/CV_32F/CV_64F + *
  • src.depth() = CV_64F, ddepth = + * -1/CV_64F + *
+ * + *

when ddepth=-1, the destination image will have the same depth + * as the source.

+ * @param kernelX Coefficients for filtering each row. + * @param kernelY Coefficients for filtering each column. + * @param anchor Anchor position within the kernel. The default value (-1, + * 1) means that the anchor is at the kernel center. + * @param delta Value added to the filtered results before storing them. + * @param borderType Pixel extrapolation method. See "borderInterpolate" for + * details. + * + * @see org.opencv.imgproc.Imgproc.sepFilter2D + * @see org.opencv.imgproc.Imgproc#GaussianBlur + * @see org.opencv.imgproc.Imgproc#Sobel + * @see org.opencv.imgproc.Imgproc#boxFilter + * @see org.opencv.imgproc.Imgproc#blur + * @see org.opencv.imgproc.Imgproc#filter2D + */ + public static void sepFilter2D(Mat src, Mat dst, int ddepth, Mat kernelX, Mat kernelY, Point anchor, double delta, int borderType) + { + + sepFilter2D_0(src.nativeObj, dst.nativeObj, ddepth, kernelX.nativeObj, kernelY.nativeObj, anchor.x, anchor.y, delta, borderType); + + return; + } + +/** + *

Applies a separable linear filter to an image.

+ * + *

The function applies a separable linear filter to the image. That is, first, + * every row of src is filtered with the 1D kernel + * kernelX. Then, every column of the result is filtered with the + * 1D kernel kernelY. The final result shifted by delta + * is stored in dst.

+ * + * @param src Source image. + * @param dst Destination image of the same size and the same number of channels + * as src. + * @param ddepth Destination image depth. The following combination of + * src.depth() and ddepth are supported: + *
    + *
  • src.depth() = CV_8U, ddepth = + * -1/CV_16S/CV_32F/CV_64F + *
  • src.depth() = CV_16U/CV_16S, + * ddepth = -1/CV_32F/CV_64F + *
  • src.depth() = CV_32F, ddepth = + * -1/CV_32F/CV_64F + *
  • src.depth() = CV_64F, ddepth = + * -1/CV_64F + *
+ * + *

when ddepth=-1, the destination image will have the same depth + * as the source.

+ * @param kernelX Coefficients for filtering each row. + * @param kernelY Coefficients for filtering each column. + * @param anchor Anchor position within the kernel. The default value (-1, + * 1) means that the anchor is at the kernel center. + * @param delta Value added to the filtered results before storing them. + * + * @see org.opencv.imgproc.Imgproc.sepFilter2D + * @see org.opencv.imgproc.Imgproc#GaussianBlur + * @see org.opencv.imgproc.Imgproc#Sobel + * @see org.opencv.imgproc.Imgproc#boxFilter + * @see org.opencv.imgproc.Imgproc#blur + * @see org.opencv.imgproc.Imgproc#filter2D + */ + public static void sepFilter2D(Mat src, Mat dst, int ddepth, Mat kernelX, Mat kernelY, Point anchor, double delta) + { + + sepFilter2D_1(src.nativeObj, dst.nativeObj, ddepth, kernelX.nativeObj, kernelY.nativeObj, anchor.x, anchor.y, delta); + + return; + } + +/** + *

Applies a separable linear filter to an image.

+ * + *

The function applies a separable linear filter to the image. That is, first, + * every row of src is filtered with the 1D kernel + * kernelX. Then, every column of the result is filtered with the + * 1D kernel kernelY. The final result shifted by delta + * is stored in dst.

+ * + * @param src Source image. + * @param dst Destination image of the same size and the same number of channels + * as src. + * @param ddepth Destination image depth. The following combination of + * src.depth() and ddepth are supported: + *
    + *
  • src.depth() = CV_8U, ddepth = + * -1/CV_16S/CV_32F/CV_64F + *
  • src.depth() = CV_16U/CV_16S, + * ddepth = -1/CV_32F/CV_64F + *
  • src.depth() = CV_32F, ddepth = + * -1/CV_32F/CV_64F + *
  • src.depth() = CV_64F, ddepth = + * -1/CV_64F + *
+ * + *

when ddepth=-1, the destination image will have the same depth + * as the source.

+ * @param kernelX Coefficients for filtering each row. + * @param kernelY Coefficients for filtering each column. + * + * @see org.opencv.imgproc.Imgproc.sepFilter2D + * @see org.opencv.imgproc.Imgproc#GaussianBlur + * @see org.opencv.imgproc.Imgproc#Sobel + * @see org.opencv.imgproc.Imgproc#boxFilter + * @see org.opencv.imgproc.Imgproc#blur + * @see org.opencv.imgproc.Imgproc#filter2D + */ + public static void sepFilter2D(Mat src, Mat dst, int ddepth, Mat kernelX, Mat kernelY) + { + + sepFilter2D_2(src.nativeObj, dst.nativeObj, ddepth, kernelX.nativeObj, kernelY.nativeObj); + + return; + } + + + // + // C++: double threshold(Mat src, Mat& dst, double thresh, double maxval, int type) + // + +/** + *

Applies a fixed-level threshold to each array element.

+ * + *

The function applies fixed-level thresholding to a single-channel array. The + * function is typically used to get a bi-level (binary) image out of a + * grayscale image ("compare" could be also used for this purpose) or for + * removing a noise, that is, filtering out pixels with too small or too large + * values. There are several types of thresholding supported by the function. + * They are determined by type :

+ *
    + *
  • THRESH_BINARY + *
+ * + *

dst(x,y) = maxval if src(x,y) > thresh; 0 otherwise

+ * + *
    + *
  • THRESH_BINARY_INV + *
+ * + *

dst(x,y) = 0 if src(x,y) > thresh; maxval otherwise

+ * + *
    + *
  • THRESH_TRUNC + *
+ * + *

dst(x,y) = threshold if src(x,y) > thresh; src(x,y) otherwise

+ * + *
    + *
  • THRESH_TOZERO + *
+ * + *

dst(x,y) = src(x,y) if src(x,y) > thresh; 0 otherwise

+ * + *
    + *
  • THRESH_TOZERO_INV + *
+ * + *

dst(x,y) = 0 if src(x,y) > thresh; src(x,y) otherwise

+ * + *

Also, the special value THRESH_OTSU may be combined with one of + * the above values. In this case, the function determines the optimal threshold + * value using the Otsu's algorithm and uses it instead of the specified + * thresh. + * The function returns the computed threshold value. + * Currently, the Otsu's method is implemented only for 8-bit images.

+ * + * @param src input array (single-channel, 8-bit or 32-bit floating point). + * @param dst output array of the same size and type as src. + * @param thresh threshold value. + * @param maxval maximum value to use with the THRESH_BINARY and + * THRESH_BINARY_INV thresholding types. + * @param type thresholding type (see the details below). + * + * @see org.opencv.imgproc.Imgproc.threshold + * @see org.opencv.imgproc.Imgproc#findContours + * @see org.opencv.core.Core#max + * @see org.opencv.imgproc.Imgproc#adaptiveThreshold + * @see org.opencv.core.Core#compare + * @see org.opencv.core.Core#min + */ + public static double threshold(Mat src, Mat dst, double thresh, double maxval, int type) + { + + double retVal = threshold_0(src.nativeObj, dst.nativeObj, thresh, maxval, type); + + return retVal; + } + + + // + // C++: void undistort(Mat src, Mat& dst, Mat cameraMatrix, Mat distCoeffs, Mat newCameraMatrix = Mat()) + // + +/** + *

Transforms an image to compensate for lens distortion.

+ * + *

The function transforms an image to compensate radial and tangential lens + * distortion.

+ * + *

The function is simply a combination of "initUndistortRectifyMap" (with unity + * R) and "remap" (with bilinear interpolation). See the former + * function for details of the transformation being performed.

+ * + *

Those pixels in the destination image, for which there is no correspondent + * pixels in the source image, are filled with zeros (black color).

+ * + *

A particular subset of the source image that will be visible in the corrected + * image can be regulated by newCameraMatrix. You can use + * "getOptimalNewCameraMatrix" to compute the appropriate newCameraMatrix + * depending on your requirements.

+ * + *

The camera matrix and the distortion parameters can be determined using + * "calibrateCamera". If the resolution of images is different from the + * resolution used at the calibration stage, f_x, f_y, c_x and + * c_y need to be scaled accordingly, while the distortion coefficients + * remain the same.

+ * + * @param src Input (distorted) image. + * @param dst Output (corrected) image that has the same size and type as + * src. + * @param cameraMatrix Input camera matrix A = + *

|f_x 0 c_x| + * |0 f_y c_y| + * |0 0 1| + * .

+ * @param distCoeffs Input vector of distortion coefficients (k_1, k_2, p_1, + * p_2[, k_3[, k_4, k_5, k_6]]) of 4, 5, or 8 elements. If the vector is + * NULL/empty, the zero distortion coefficients are assumed. + * @param newCameraMatrix Camera matrix of the distorted image. By default, it + * is the same as cameraMatrix but you may additionally scale and + * shift the result by using a different matrix. + * + * @see org.opencv.imgproc.Imgproc.undistort + */ + public static void undistort(Mat src, Mat dst, Mat cameraMatrix, Mat distCoeffs, Mat newCameraMatrix) + { + + undistort_0(src.nativeObj, dst.nativeObj, cameraMatrix.nativeObj, distCoeffs.nativeObj, newCameraMatrix.nativeObj); + + return; + } + +/** + *

Transforms an image to compensate for lens distortion.

+ * + *

The function transforms an image to compensate radial and tangential lens + * distortion.

+ * + *

The function is simply a combination of "initUndistortRectifyMap" (with unity + * R) and "remap" (with bilinear interpolation). See the former + * function for details of the transformation being performed.

+ * + *

Those pixels in the destination image, for which there is no correspondent + * pixels in the source image, are filled with zeros (black color).

+ * + *

A particular subset of the source image that will be visible in the corrected + * image can be regulated by newCameraMatrix. You can use + * "getOptimalNewCameraMatrix" to compute the appropriate newCameraMatrix + * depending on your requirements.

+ * + *

The camera matrix and the distortion parameters can be determined using + * "calibrateCamera". If the resolution of images is different from the + * resolution used at the calibration stage, f_x, f_y, c_x and + * c_y need to be scaled accordingly, while the distortion coefficients + * remain the same.

+ * + * @param src Input (distorted) image. + * @param dst Output (corrected) image that has the same size and type as + * src. + * @param cameraMatrix Input camera matrix A = + *

|f_x 0 c_x| + * |0 f_y c_y| + * |0 0 1| + * .

+ * @param distCoeffs Input vector of distortion coefficients (k_1, k_2, p_1, + * p_2[, k_3[, k_4, k_5, k_6]]) of 4, 5, or 8 elements. If the vector is + * NULL/empty, the zero distortion coefficients are assumed. + * + * @see org.opencv.imgproc.Imgproc.undistort + */ + public static void undistort(Mat src, Mat dst, Mat cameraMatrix, Mat distCoeffs) + { + + undistort_1(src.nativeObj, dst.nativeObj, cameraMatrix.nativeObj, distCoeffs.nativeObj); + + return; + } + + + // + // C++: void undistortPoints(vector_Point2f src, vector_Point2f& dst, Mat cameraMatrix, Mat distCoeffs, Mat R = Mat(), Mat P = Mat()) + // + +/** + *

Computes the ideal point coordinates from the observed point coordinates.

+ * + *

The function is similar to "undistort" and "initUndistortRectifyMap" but it + * operates on a sparse set of points instead of a raster image. Also the + * function performs a reverse transformation to"projectPoints". In case of a 3D + * object, it does not reconstruct its 3D coordinates, but for a planar object, + * it does, up to a translation vector, if the proper R is + * specified. + *

+ * + *

// C++ code:

+ * + *

// (u,v) is the input point, (u', v') is the output point

+ * + *

// camera_matrix=[fx 0 cx; 0 fy cy; 0 0 1]

+ * + *

// P=[fx' 0 cx' tx; 0 fy' cy' ty; 0 0 1 tz]

+ * + *

x" = (u - cx)/fx

+ * + *

y" = (v - cy)/fy

+ * + *

(x',y') = undistort(x",y",dist_coeffs)

+ * + *

[X,Y,W]T = R*[x' y' 1]T

+ * + *

x = X/W, y = Y/W

+ * + *

// only performed if P=[fx' 0 cx' [tx]; 0 fy' cy' [ty]; 0 0 1 [tz]] is + * specified

+ * + *

u' = x*fx' + cx'

+ * + *

v' = y*fy' + cy',

+ * + *

where undistort() is an approximate iterative algorithm that + * estimates the normalized original point coordinates out of the normalized + * distorted point coordinates ("normalized" means that the coordinates do not + * depend on the camera matrix). + *

+ * + *

The function can be used for both a stereo camera head or a monocular camera + * (when R is empty).

+ * + * @param src Observed point coordinates, 1xN or Nx1 2-channel (CV_32FC2 or + * CV_64FC2). + * @param dst Output ideal point coordinates after undistortion and reverse + * perspective transformation. If matrix P is identity or omitted, + * dst will contain normalized point coordinates. + * @param cameraMatrix Camera matrix + *

|f_x 0 c_x| + * |0 f_y c_y| + * |0 0 1| + * .

+ * @param distCoeffs Input vector of distortion coefficients (k_1, k_2, p_1, + * p_2[, k_3[, k_4, k_5, k_6]]) of 4, 5, or 8 elements. If the vector is + * NULL/empty, the zero distortion coefficients are assumed. + * @param R Rectification transformation in the object space (3x3 matrix). + * R1 or R2 computed by "stereoRectify" can be passed + * here. If the matrix is empty, the identity transformation is used. + * @param P New camera matrix (3x3) or new projection matrix (3x4). + * P1 or P2 computed by "stereoRectify" can be passed + * here. If the matrix is empty, the identity new camera matrix is used. + * + * @see org.opencv.imgproc.Imgproc.undistortPoints + */ + public static void undistortPoints(MatOfPoint2f src, MatOfPoint2f dst, Mat cameraMatrix, Mat distCoeffs, Mat R, Mat P) + { + Mat src_mat = src; + Mat dst_mat = dst; + undistortPoints_0(src_mat.nativeObj, dst_mat.nativeObj, cameraMatrix.nativeObj, distCoeffs.nativeObj, R.nativeObj, P.nativeObj); + + return; + } + +/** + *

Computes the ideal point coordinates from the observed point coordinates.

+ * + *

The function is similar to "undistort" and "initUndistortRectifyMap" but it + * operates on a sparse set of points instead of a raster image. Also the + * function performs a reverse transformation to"projectPoints". In case of a 3D + * object, it does not reconstruct its 3D coordinates, but for a planar object, + * it does, up to a translation vector, if the proper R is + * specified. + *

+ * + *

// C++ code:

+ * + *

// (u,v) is the input point, (u', v') is the output point

+ * + *

// camera_matrix=[fx 0 cx; 0 fy cy; 0 0 1]

+ * + *

// P=[fx' 0 cx' tx; 0 fy' cy' ty; 0 0 1 tz]

+ * + *

x" = (u - cx)/fx

+ * + *

y" = (v - cy)/fy

+ * + *

(x',y') = undistort(x",y",dist_coeffs)

+ * + *

[X,Y,W]T = R*[x' y' 1]T

+ * + *

x = X/W, y = Y/W

+ * + *

// only performed if P=[fx' 0 cx' [tx]; 0 fy' cy' [ty]; 0 0 1 [tz]] is + * specified

+ * + *

u' = x*fx' + cx'

+ * + *

v' = y*fy' + cy',

+ * + *

where undistort() is an approximate iterative algorithm that + * estimates the normalized original point coordinates out of the normalized + * distorted point coordinates ("normalized" means that the coordinates do not + * depend on the camera matrix). + *

+ * + *

The function can be used for both a stereo camera head or a monocular camera + * (when R is empty).

+ * + * @param src Observed point coordinates, 1xN or Nx1 2-channel (CV_32FC2 or + * CV_64FC2). + * @param dst Output ideal point coordinates after undistortion and reverse + * perspective transformation. If matrix P is identity or omitted, + * dst will contain normalized point coordinates. + * @param cameraMatrix Camera matrix + *

|f_x 0 c_x| + * |0 f_y c_y| + * |0 0 1| + * .

+ * @param distCoeffs Input vector of distortion coefficients (k_1, k_2, p_1, + * p_2[, k_3[, k_4, k_5, k_6]]) of 4, 5, or 8 elements. If the vector is + * NULL/empty, the zero distortion coefficients are assumed. + * + * @see org.opencv.imgproc.Imgproc.undistortPoints + */ + public static void undistortPoints(MatOfPoint2f src, MatOfPoint2f dst, Mat cameraMatrix, Mat distCoeffs) + { + Mat src_mat = src; + Mat dst_mat = dst; + undistortPoints_1(src_mat.nativeObj, dst_mat.nativeObj, cameraMatrix.nativeObj, distCoeffs.nativeObj); + + return; + } + + + // + // C++: void warpAffine(Mat src, Mat& dst, Mat M, Size dsize, int flags = INTER_LINEAR, int borderMode = BORDER_CONSTANT, Scalar borderValue = Scalar()) + // + +/** + *

Applies an affine transformation to an image.

+ * + *

The function warpAffine transforms the source image using the + * specified matrix:

+ * + *

dst(x,y) = src(M _11 x + M _12 y + M _13, M _21 x + M _22 y + M _23)

+ * + *

when the flag WARP_INVERSE_MAP is set. Otherwise, the + * transformation is first inverted with "invertAffineTransform" and then put in + * the formula above instead of M. + * The function cannot operate in-place.

+ * + *

Note: cvGetQuadrangleSubPix is similar to cvWarpAffine, + * but the outliers are extrapolated using replication border mode.

+ * + * @param src input image. + * @param dst output image that has the size dsize and the same + * type as src. + * @param M 2x 3 transformation matrix. + * @param dsize size of the output image. + * @param flags combination of interpolation methods (see "resize") and the + * optional flag WARP_INVERSE_MAP that means that M is + * the inverse transformation (dst->src). + * @param borderMode pixel extrapolation method (see "borderInterpolate"); when + * borderMode=BORDER_TRANSPARENT, it means that the pixels in the + * destination image corresponding to the "outliers" in the source image are not + * modified by the function. + * @param borderValue value used in case of a constant border; by default, it is + * 0. + * + * @see org.opencv.imgproc.Imgproc.warpAffine + * @see org.opencv.imgproc.Imgproc#remap + * @see org.opencv.imgproc.Imgproc#warpPerspective + * @see org.opencv.imgproc.Imgproc#getRectSubPix + * @see org.opencv.imgproc.Imgproc#resize + * @see org.opencv.core.Core#transform + */ + public static void warpAffine(Mat src, Mat dst, Mat M, Size dsize, int flags, int borderMode, Scalar borderValue) + { + + warpAffine_0(src.nativeObj, dst.nativeObj, M.nativeObj, dsize.width, dsize.height, flags, borderMode, borderValue.val[0], borderValue.val[1], borderValue.val[2], borderValue.val[3]); + + return; + } + +/** + *

Applies an affine transformation to an image.

+ * + *

The function warpAffine transforms the source image using the + * specified matrix:

+ * + *

dst(x,y) = src(M _11 x + M _12 y + M _13, M _21 x + M _22 y + M _23)

+ * + *

when the flag WARP_INVERSE_MAP is set. Otherwise, the + * transformation is first inverted with "invertAffineTransform" and then put in + * the formula above instead of M. + * The function cannot operate in-place.

+ * + *

Note: cvGetQuadrangleSubPix is similar to cvWarpAffine, + * but the outliers are extrapolated using replication border mode.

+ * + * @param src input image. + * @param dst output image that has the size dsize and the same + * type as src. + * @param M 2x 3 transformation matrix. + * @param dsize size of the output image. + * @param flags combination of interpolation methods (see "resize") and the + * optional flag WARP_INVERSE_MAP that means that M is + * the inverse transformation (dst->src). + * + * @see org.opencv.imgproc.Imgproc.warpAffine + * @see org.opencv.imgproc.Imgproc#remap + * @see org.opencv.imgproc.Imgproc#warpPerspective + * @see org.opencv.imgproc.Imgproc#getRectSubPix + * @see org.opencv.imgproc.Imgproc#resize + * @see org.opencv.core.Core#transform + */ + public static void warpAffine(Mat src, Mat dst, Mat M, Size dsize, int flags) + { + + warpAffine_1(src.nativeObj, dst.nativeObj, M.nativeObj, dsize.width, dsize.height, flags); + + return; + } + +/** + *

Applies an affine transformation to an image.

+ * + *

The function warpAffine transforms the source image using the + * specified matrix:

+ * + *

dst(x,y) = src(M _11 x + M _12 y + M _13, M _21 x + M _22 y + M _23)

+ * + *

when the flag WARP_INVERSE_MAP is set. Otherwise, the + * transformation is first inverted with "invertAffineTransform" and then put in + * the formula above instead of M. + * The function cannot operate in-place.

+ * + *

Note: cvGetQuadrangleSubPix is similar to cvWarpAffine, + * but the outliers are extrapolated using replication border mode.

+ * + * @param src input image. + * @param dst output image that has the size dsize and the same + * type as src. + * @param M 2x 3 transformation matrix. + * @param dsize size of the output image. + * + * @see org.opencv.imgproc.Imgproc.warpAffine + * @see org.opencv.imgproc.Imgproc#remap + * @see org.opencv.imgproc.Imgproc#warpPerspective + * @see org.opencv.imgproc.Imgproc#getRectSubPix + * @see org.opencv.imgproc.Imgproc#resize + * @see org.opencv.core.Core#transform + */ + public static void warpAffine(Mat src, Mat dst, Mat M, Size dsize) + { + + warpAffine_2(src.nativeObj, dst.nativeObj, M.nativeObj, dsize.width, dsize.height); + + return; + } + + + // + // C++: void warpPerspective(Mat src, Mat& dst, Mat M, Size dsize, int flags = INTER_LINEAR, int borderMode = BORDER_CONSTANT, Scalar borderValue = Scalar()) + // + +/** + *

Applies a perspective transformation to an image.

+ * + *

The function warpPerspective transforms the source image using + * the specified matrix:

+ * + *

dst(x,y) = src((M_11 x + M_12 y + M_13)/(M_(31) x + M_32 y + + * M_33),<BR>(M_21 x + M_22 y + M_23)/(M_(31) x + M_32 y + M_33))

+ * + *

when the flag WARP_INVERSE_MAP is set. Otherwise, the + * transformation is first inverted with "invert" and then put in the formula + * above instead of M. + * The function cannot operate in-place.

+ * + * @param src input image. + * @param dst output image that has the size dsize and the same + * type as src. + * @param M 3x 3 transformation matrix. + * @param dsize size of the output image. + * @param flags combination of interpolation methods (INTER_LINEAR + * or INTER_NEAREST) and the optional flag WARP_INVERSE_MAP, + * that sets M as the inverse transformation (dst->src). + * @param borderMode pixel extrapolation method (BORDER_CONSTANT or + * BORDER_REPLICATE). + * @param borderValue value used in case of a constant border; by default, it + * equals 0. + * + * @see org.opencv.imgproc.Imgproc.warpPerspective + * @see org.opencv.imgproc.Imgproc#warpAffine + * @see org.opencv.imgproc.Imgproc#remap + * @see org.opencv.core.Core#perspectiveTransform + * @see org.opencv.imgproc.Imgproc#getRectSubPix + * @see org.opencv.imgproc.Imgproc#resize + */ + public static void warpPerspective(Mat src, Mat dst, Mat M, Size dsize, int flags, int borderMode, Scalar borderValue) + { + + warpPerspective_0(src.nativeObj, dst.nativeObj, M.nativeObj, dsize.width, dsize.height, flags, borderMode, borderValue.val[0], borderValue.val[1], borderValue.val[2], borderValue.val[3]); + + return; + } + +/** + *

Applies a perspective transformation to an image.

+ * + *

The function warpPerspective transforms the source image using + * the specified matrix:

+ * + *

dst(x,y) = src((M_11 x + M_12 y + M_13)/(M_(31) x + M_32 y + + * M_33),<BR>(M_21 x + M_22 y + M_23)/(M_(31) x + M_32 y + M_33))

+ * + *

when the flag WARP_INVERSE_MAP is set. Otherwise, the + * transformation is first inverted with "invert" and then put in the formula + * above instead of M. + * The function cannot operate in-place.

+ * + * @param src input image. + * @param dst output image that has the size dsize and the same + * type as src. + * @param M 3x 3 transformation matrix. + * @param dsize size of the output image. + * @param flags combination of interpolation methods (INTER_LINEAR + * or INTER_NEAREST) and the optional flag WARP_INVERSE_MAP, + * that sets M as the inverse transformation (dst->src). + * + * @see org.opencv.imgproc.Imgproc.warpPerspective + * @see org.opencv.imgproc.Imgproc#warpAffine + * @see org.opencv.imgproc.Imgproc#remap + * @see org.opencv.core.Core#perspectiveTransform + * @see org.opencv.imgproc.Imgproc#getRectSubPix + * @see org.opencv.imgproc.Imgproc#resize + */ + public static void warpPerspective(Mat src, Mat dst, Mat M, Size dsize, int flags) + { + + warpPerspective_1(src.nativeObj, dst.nativeObj, M.nativeObj, dsize.width, dsize.height, flags); + + return; + } + +/** + *

Applies a perspective transformation to an image.

+ * + *

The function warpPerspective transforms the source image using + * the specified matrix:

+ * + *

dst(x,y) = src((M_11 x + M_12 y + M_13)/(M_(31) x + M_32 y + + * M_33),<BR>(M_21 x + M_22 y + M_23)/(M_(31) x + M_32 y + M_33))

+ * + *

when the flag WARP_INVERSE_MAP is set. Otherwise, the + * transformation is first inverted with "invert" and then put in the formula + * above instead of M. + * The function cannot operate in-place.

+ * + * @param src input image. + * @param dst output image that has the size dsize and the same + * type as src. + * @param M 3x 3 transformation matrix. + * @param dsize size of the output image. + * + * @see org.opencv.imgproc.Imgproc.warpPerspective + * @see org.opencv.imgproc.Imgproc#warpAffine + * @see org.opencv.imgproc.Imgproc#remap + * @see org.opencv.core.Core#perspectiveTransform + * @see org.opencv.imgproc.Imgproc#getRectSubPix + * @see org.opencv.imgproc.Imgproc#resize + */ + public static void warpPerspective(Mat src, Mat dst, Mat M, Size dsize) + { + + warpPerspective_2(src.nativeObj, dst.nativeObj, M.nativeObj, dsize.width, dsize.height); + + return; + } + + + // + // C++: void watershed(Mat image, Mat& markers) + // + +/** + *

Performs a marker-based image segmentation using the watershed algorithm.

+ * + *

The function implements one of the variants of watershed, non-parametric + * marker-based segmentation algorithm, described in [Meyer92].

+ * + *

Before passing the image to the function, you have to roughly outline the + * desired regions in the image markers with positive + * (>0) indices. So, every region is represented as one or more + * connected components with the pixel values 1, 2, 3, and so on. Such markers + * can be retrieved from a binary mask using "findContours" and "drawContours" + * (see the watershed.cpp demo). The markers are "seeds" of the + * future image regions. All the other pixels in markers, whose + * relation to the outlined regions is not known and should be defined by the + * algorithm, should be set to 0's. In the function output, each pixel in + * markers is set to a value of the "seed" components or to -1 at boundaries + * between the regions.

+ * + *

Visual demonstration and usage example of the function can be found in the + * OpenCV samples directory (see the watershed.cpp demo).

+ * + *

Note: Any two neighbor connected components are not necessarily separated by + * a watershed boundary (-1's pixels); for example, they can touch each other in + * the initial marker image passed to the function.

+ * + * @param image Input 8-bit 3-channel image. + * @param markers Input/output 32-bit single-channel image (map) of markers. It + * should have the same size as image. + * + * @see org.opencv.imgproc.Imgproc.watershed + * @see org.opencv.imgproc.Imgproc#findContours + */ + public static void watershed(Mat image, Mat markers) + { + + watershed_0(image.nativeObj, markers.nativeObj); + + return; + } + + + + + // C++: void Canny(Mat image, Mat& edges, double threshold1, double threshold2, int apertureSize = 3, bool L2gradient = false) + private static native void Canny_0(long image_nativeObj, long edges_nativeObj, double threshold1, double threshold2, int apertureSize, boolean L2gradient); + private static native void Canny_1(long image_nativeObj, long edges_nativeObj, double threshold1, double threshold2); + + // C++: void GaussianBlur(Mat src, Mat& dst, Size ksize, double sigmaX, double sigmaY = 0, int borderType = BORDER_DEFAULT) + private static native void GaussianBlur_0(long src_nativeObj, long dst_nativeObj, double ksize_width, double ksize_height, double sigmaX, double sigmaY, int borderType); + private static native void GaussianBlur_1(long src_nativeObj, long dst_nativeObj, double ksize_width, double ksize_height, double sigmaX, double sigmaY); + private static native void GaussianBlur_2(long src_nativeObj, long dst_nativeObj, double ksize_width, double ksize_height, double sigmaX); + + // C++: void HoughCircles(Mat image, Mat& circles, int method, double dp, double minDist, double param1 = 100, double param2 = 100, int minRadius = 0, int maxRadius = 0) + private static native void HoughCircles_0(long image_nativeObj, long circles_nativeObj, int method, double dp, double minDist, double param1, double param2, int minRadius, int maxRadius); + private static native void HoughCircles_1(long image_nativeObj, long circles_nativeObj, int method, double dp, double minDist); + + // C++: void HoughLines(Mat image, Mat& lines, double rho, double theta, int threshold, double srn = 0, double stn = 0) + private static native void HoughLines_0(long image_nativeObj, long lines_nativeObj, double rho, double theta, int threshold, double srn, double stn); + private static native void HoughLines_1(long image_nativeObj, long lines_nativeObj, double rho, double theta, int threshold); + + // C++: void HoughLinesP(Mat image, Mat& lines, double rho, double theta, int threshold, double minLineLength = 0, double maxLineGap = 0) + private static native void HoughLinesP_0(long image_nativeObj, long lines_nativeObj, double rho, double theta, int threshold, double minLineLength, double maxLineGap); + private static native void HoughLinesP_1(long image_nativeObj, long lines_nativeObj, double rho, double theta, int threshold); + + // C++: void HuMoments(Moments m, Mat& hu) + private static native void HuMoments_0(long m_nativeObj, long hu_nativeObj); + + // C++: void Laplacian(Mat src, Mat& dst, int ddepth, int ksize = 1, double scale = 1, double delta = 0, int borderType = BORDER_DEFAULT) + private static native void Laplacian_0(long src_nativeObj, long dst_nativeObj, int ddepth, int ksize, double scale, double delta, int borderType); + private static native void Laplacian_1(long src_nativeObj, long dst_nativeObj, int ddepth, int ksize, double scale, double delta); + private static native void Laplacian_2(long src_nativeObj, long dst_nativeObj, int ddepth); + + // C++: double PSNR(Mat src1, Mat src2) + private static native double PSNR_0(long src1_nativeObj, long src2_nativeObj); + + // C++: void Scharr(Mat src, Mat& dst, int ddepth, int dx, int dy, double scale = 1, double delta = 0, int borderType = BORDER_DEFAULT) + private static native void Scharr_0(long src_nativeObj, long dst_nativeObj, int ddepth, int dx, int dy, double scale, double delta, int borderType); + private static native void Scharr_1(long src_nativeObj, long dst_nativeObj, int ddepth, int dx, int dy, double scale, double delta); + private static native void Scharr_2(long src_nativeObj, long dst_nativeObj, int ddepth, int dx, int dy); + + // C++: void Sobel(Mat src, Mat& dst, int ddepth, int dx, int dy, int ksize = 3, double scale = 1, double delta = 0, int borderType = BORDER_DEFAULT) + private static native void Sobel_0(long src_nativeObj, long dst_nativeObj, int ddepth, int dx, int dy, int ksize, double scale, double delta, int borderType); + private static native void Sobel_1(long src_nativeObj, long dst_nativeObj, int ddepth, int dx, int dy, int ksize, double scale, double delta); + private static native void Sobel_2(long src_nativeObj, long dst_nativeObj, int ddepth, int dx, int dy); + + // C++: void accumulate(Mat src, Mat& dst, Mat mask = Mat()) + private static native void accumulate_0(long src_nativeObj, long dst_nativeObj, long mask_nativeObj); + private static native void accumulate_1(long src_nativeObj, long dst_nativeObj); + + // C++: void accumulateProduct(Mat src1, Mat src2, Mat& dst, Mat mask = Mat()) + private static native void accumulateProduct_0(long src1_nativeObj, long src2_nativeObj, long dst_nativeObj, long mask_nativeObj); + private static native void accumulateProduct_1(long src1_nativeObj, long src2_nativeObj, long dst_nativeObj); + + // C++: void accumulateSquare(Mat src, Mat& dst, Mat mask = Mat()) + private static native void accumulateSquare_0(long src_nativeObj, long dst_nativeObj, long mask_nativeObj); + private static native void accumulateSquare_1(long src_nativeObj, long dst_nativeObj); + + // C++: void accumulateWeighted(Mat src, Mat& dst, double alpha, Mat mask = Mat()) + private static native void accumulateWeighted_0(long src_nativeObj, long dst_nativeObj, double alpha, long mask_nativeObj); + private static native void accumulateWeighted_1(long src_nativeObj, long dst_nativeObj, double alpha); + + // C++: void adaptiveThreshold(Mat src, Mat& dst, double maxValue, int adaptiveMethod, int thresholdType, int blockSize, double C) + private static native void adaptiveThreshold_0(long src_nativeObj, long dst_nativeObj, double maxValue, int adaptiveMethod, int thresholdType, int blockSize, double C); + + // C++: void approxPolyDP(vector_Point2f curve, vector_Point2f& approxCurve, double epsilon, bool closed) + private static native void approxPolyDP_0(long curve_mat_nativeObj, long approxCurve_mat_nativeObj, double epsilon, boolean closed); + + // C++: double arcLength(vector_Point2f curve, bool closed) + private static native double arcLength_0(long curve_mat_nativeObj, boolean closed); + + // C++: void bilateralFilter(Mat src, Mat& dst, int d, double sigmaColor, double sigmaSpace, int borderType = BORDER_DEFAULT) + private static native void bilateralFilter_0(long src_nativeObj, long dst_nativeObj, int d, double sigmaColor, double sigmaSpace, int borderType); + private static native void bilateralFilter_1(long src_nativeObj, long dst_nativeObj, int d, double sigmaColor, double sigmaSpace); + + // C++: void blur(Mat src, Mat& dst, Size ksize, Point anchor = Point(-1,-1), int borderType = BORDER_DEFAULT) + private static native void blur_0(long src_nativeObj, long dst_nativeObj, double ksize_width, double ksize_height, double anchor_x, double anchor_y, int borderType); + private static native void blur_1(long src_nativeObj, long dst_nativeObj, double ksize_width, double ksize_height, double anchor_x, double anchor_y); + private static native void blur_2(long src_nativeObj, long dst_nativeObj, double ksize_width, double ksize_height); + + // C++: int borderInterpolate(int p, int len, int borderType) + private static native int borderInterpolate_0(int p, int len, int borderType); + + // C++: Rect boundingRect(vector_Point points) + private static native double[] boundingRect_0(long points_mat_nativeObj); + + // C++: void boxFilter(Mat src, Mat& dst, int ddepth, Size ksize, Point anchor = Point(-1,-1), bool normalize = true, int borderType = BORDER_DEFAULT) + private static native void boxFilter_0(long src_nativeObj, long dst_nativeObj, int ddepth, double ksize_width, double ksize_height, double anchor_x, double anchor_y, boolean normalize, int borderType); + private static native void boxFilter_1(long src_nativeObj, long dst_nativeObj, int ddepth, double ksize_width, double ksize_height, double anchor_x, double anchor_y, boolean normalize); + private static native void boxFilter_2(long src_nativeObj, long dst_nativeObj, int ddepth, double ksize_width, double ksize_height); + + // C++: void calcBackProject(vector_Mat images, vector_int channels, Mat hist, Mat& dst, vector_float ranges, double scale) + private static native void calcBackProject_0(long images_mat_nativeObj, long channels_mat_nativeObj, long hist_nativeObj, long dst_nativeObj, long ranges_mat_nativeObj, double scale); + + // C++: void calcHist(vector_Mat images, vector_int channels, Mat mask, Mat& hist, vector_int histSize, vector_float ranges, bool accumulate = false) + private static native void calcHist_0(long images_mat_nativeObj, long channels_mat_nativeObj, long mask_nativeObj, long hist_nativeObj, long histSize_mat_nativeObj, long ranges_mat_nativeObj, boolean accumulate); + private static native void calcHist_1(long images_mat_nativeObj, long channels_mat_nativeObj, long mask_nativeObj, long hist_nativeObj, long histSize_mat_nativeObj, long ranges_mat_nativeObj); + + // C++: double compareHist(Mat H1, Mat H2, int method) + private static native double compareHist_0(long H1_nativeObj, long H2_nativeObj, int method); + + // C++: double contourArea(Mat contour, bool oriented = false) + private static native double contourArea_0(long contour_nativeObj, boolean oriented); + private static native double contourArea_1(long contour_nativeObj); + + // C++: void convertMaps(Mat map1, Mat map2, Mat& dstmap1, Mat& dstmap2, int dstmap1type, bool nninterpolation = false) + private static native void convertMaps_0(long map1_nativeObj, long map2_nativeObj, long dstmap1_nativeObj, long dstmap2_nativeObj, int dstmap1type, boolean nninterpolation); + private static native void convertMaps_1(long map1_nativeObj, long map2_nativeObj, long dstmap1_nativeObj, long dstmap2_nativeObj, int dstmap1type); + + // C++: void convexHull(vector_Point points, vector_int& hull, bool clockwise = false, _hidden_ returnPoints = true) + private static native void convexHull_0(long points_mat_nativeObj, long hull_mat_nativeObj, boolean clockwise); + private static native void convexHull_1(long points_mat_nativeObj, long hull_mat_nativeObj); + + // C++: void convexityDefects(vector_Point contour, vector_int convexhull, vector_Vec4i& convexityDefects) + private static native void convexityDefects_0(long contour_mat_nativeObj, long convexhull_mat_nativeObj, long convexityDefects_mat_nativeObj); + + // C++: void copyMakeBorder(Mat src, Mat& dst, int top, int bottom, int left, int right, int borderType, Scalar value = Scalar()) + private static native void copyMakeBorder_0(long src_nativeObj, long dst_nativeObj, int top, int bottom, int left, int right, int borderType, double value_val0, double value_val1, double value_val2, double value_val3); + private static native void copyMakeBorder_1(long src_nativeObj, long dst_nativeObj, int top, int bottom, int left, int right, int borderType); + + // C++: void cornerEigenValsAndVecs(Mat src, Mat& dst, int blockSize, int ksize, int borderType = BORDER_DEFAULT) + private static native void cornerEigenValsAndVecs_0(long src_nativeObj, long dst_nativeObj, int blockSize, int ksize, int borderType); + private static native void cornerEigenValsAndVecs_1(long src_nativeObj, long dst_nativeObj, int blockSize, int ksize); + + // C++: void cornerHarris(Mat src, Mat& dst, int blockSize, int ksize, double k, int borderType = BORDER_DEFAULT) + private static native void cornerHarris_0(long src_nativeObj, long dst_nativeObj, int blockSize, int ksize, double k, int borderType); + private static native void cornerHarris_1(long src_nativeObj, long dst_nativeObj, int blockSize, int ksize, double k); + + // C++: void cornerMinEigenVal(Mat src, Mat& dst, int blockSize, int ksize = 3, int borderType = BORDER_DEFAULT) + private static native void cornerMinEigenVal_0(long src_nativeObj, long dst_nativeObj, int blockSize, int ksize, int borderType); + private static native void cornerMinEigenVal_1(long src_nativeObj, long dst_nativeObj, int blockSize, int ksize); + private static native void cornerMinEigenVal_2(long src_nativeObj, long dst_nativeObj, int blockSize); + + // C++: void cornerSubPix(Mat image, vector_Point2f& corners, Size winSize, Size zeroZone, TermCriteria criteria) + private static native void cornerSubPix_0(long image_nativeObj, long corners_mat_nativeObj, double winSize_width, double winSize_height, double zeroZone_width, double zeroZone_height, int criteria_type, int criteria_maxCount, double criteria_epsilon); + + // C++: void createHanningWindow(Mat& dst, Size winSize, int type) + private static native void createHanningWindow_0(long dst_nativeObj, double winSize_width, double winSize_height, int type); + + // C++: void cvtColor(Mat src, Mat& dst, int code, int dstCn = 0) + private static native void cvtColor_0(long src_nativeObj, long dst_nativeObj, int code, int dstCn); + private static native void cvtColor_1(long src_nativeObj, long dst_nativeObj, int code); + + // C++: void dilate(Mat src, Mat& dst, Mat kernel, Point anchor = Point(-1,-1), int iterations = 1, int borderType = BORDER_CONSTANT, Scalar borderValue = morphologyDefaultBorderValue()) + private static native void dilate_0(long src_nativeObj, long dst_nativeObj, long kernel_nativeObj, double anchor_x, double anchor_y, int iterations, int borderType, double borderValue_val0, double borderValue_val1, double borderValue_val2, double borderValue_val3); + private static native void dilate_1(long src_nativeObj, long dst_nativeObj, long kernel_nativeObj, double anchor_x, double anchor_y, int iterations); + private static native void dilate_2(long src_nativeObj, long dst_nativeObj, long kernel_nativeObj); + + // C++: void distanceTransform(Mat src, Mat& dst, int distanceType, int maskSize) + private static native void distanceTransform_0(long src_nativeObj, long dst_nativeObj, int distanceType, int maskSize); + + // C++: void distanceTransform(Mat src, Mat& dst, Mat& labels, int distanceType, int maskSize, int labelType = DIST_LABEL_CCOMP) + private static native void distanceTransformWithLabels_0(long src_nativeObj, long dst_nativeObj, long labels_nativeObj, int distanceType, int maskSize, int labelType); + private static native void distanceTransformWithLabels_1(long src_nativeObj, long dst_nativeObj, long labels_nativeObj, int distanceType, int maskSize); + + // C++: void drawContours(Mat& image, vector_vector_Point contours, int contourIdx, Scalar color, int thickness = 1, int lineType = 8, Mat hierarchy = Mat(), int maxLevel = INT_MAX, Point offset = Point()) + private static native void drawContours_0(long image_nativeObj, long contours_mat_nativeObj, int contourIdx, double color_val0, double color_val1, double color_val2, double color_val3, int thickness, int lineType, long hierarchy_nativeObj, int maxLevel, double offset_x, double offset_y); + private static native void drawContours_1(long image_nativeObj, long contours_mat_nativeObj, int contourIdx, double color_val0, double color_val1, double color_val2, double color_val3, int thickness); + private static native void drawContours_2(long image_nativeObj, long contours_mat_nativeObj, int contourIdx, double color_val0, double color_val1, double color_val2, double color_val3); + + // C++: void equalizeHist(Mat src, Mat& dst) + private static native void equalizeHist_0(long src_nativeObj, long dst_nativeObj); + + // C++: void erode(Mat src, Mat& dst, Mat kernel, Point anchor = Point(-1,-1), int iterations = 1, int borderType = BORDER_CONSTANT, Scalar borderValue = morphologyDefaultBorderValue()) + private static native void erode_0(long src_nativeObj, long dst_nativeObj, long kernel_nativeObj, double anchor_x, double anchor_y, int iterations, int borderType, double borderValue_val0, double borderValue_val1, double borderValue_val2, double borderValue_val3); + private static native void erode_1(long src_nativeObj, long dst_nativeObj, long kernel_nativeObj, double anchor_x, double anchor_y, int iterations); + private static native void erode_2(long src_nativeObj, long dst_nativeObj, long kernel_nativeObj); + + // C++: void filter2D(Mat src, Mat& dst, int ddepth, Mat kernel, Point anchor = Point(-1,-1), double delta = 0, int borderType = BORDER_DEFAULT) + private static native void filter2D_0(long src_nativeObj, long dst_nativeObj, int ddepth, long kernel_nativeObj, double anchor_x, double anchor_y, double delta, int borderType); + private static native void filter2D_1(long src_nativeObj, long dst_nativeObj, int ddepth, long kernel_nativeObj, double anchor_x, double anchor_y, double delta); + private static native void filter2D_2(long src_nativeObj, long dst_nativeObj, int ddepth, long kernel_nativeObj); + + // C++: void findContours(Mat& image, vector_vector_Point& contours, Mat& hierarchy, int mode, int method, Point offset = Point()) + private static native void findContours_0(long image_nativeObj, long contours_mat_nativeObj, long hierarchy_nativeObj, int mode, int method, double offset_x, double offset_y); + private static native void findContours_1(long image_nativeObj, long contours_mat_nativeObj, long hierarchy_nativeObj, int mode, int method); + + // C++: RotatedRect fitEllipse(vector_Point2f points) + private static native double[] fitEllipse_0(long points_mat_nativeObj); + + // C++: void fitLine(Mat points, Mat& line, int distType, double param, double reps, double aeps) + private static native void fitLine_0(long points_nativeObj, long line_nativeObj, int distType, double param, double reps, double aeps); + + // C++: int floodFill(Mat& image, Mat& mask, Point seedPoint, Scalar newVal, Rect* rect = 0, Scalar loDiff = Scalar(), Scalar upDiff = Scalar(), int flags = 4) + private static native int floodFill_0(long image_nativeObj, long mask_nativeObj, double seedPoint_x, double seedPoint_y, double newVal_val0, double newVal_val1, double newVal_val2, double newVal_val3, double[] rect_out, double loDiff_val0, double loDiff_val1, double loDiff_val2, double loDiff_val3, double upDiff_val0, double upDiff_val1, double upDiff_val2, double upDiff_val3, int flags); + private static native int floodFill_1(long image_nativeObj, long mask_nativeObj, double seedPoint_x, double seedPoint_y, double newVal_val0, double newVal_val1, double newVal_val2, double newVal_val3); + + // C++: Mat getAffineTransform(vector_Point2f src, vector_Point2f dst) + private static native long getAffineTransform_0(long src_mat_nativeObj, long dst_mat_nativeObj); + + // C++: Mat getDefaultNewCameraMatrix(Mat cameraMatrix, Size imgsize = Size(), bool centerPrincipalPoint = false) + private static native long getDefaultNewCameraMatrix_0(long cameraMatrix_nativeObj, double imgsize_width, double imgsize_height, boolean centerPrincipalPoint); + private static native long getDefaultNewCameraMatrix_1(long cameraMatrix_nativeObj); + + // C++: void getDerivKernels(Mat& kx, Mat& ky, int dx, int dy, int ksize, bool normalize = false, int ktype = CV_32F) + private static native void getDerivKernels_0(long kx_nativeObj, long ky_nativeObj, int dx, int dy, int ksize, boolean normalize, int ktype); + private static native void getDerivKernels_1(long kx_nativeObj, long ky_nativeObj, int dx, int dy, int ksize); + + // C++: Mat getGaborKernel(Size ksize, double sigma, double theta, double lambd, double gamma, double psi = CV_PI*0.5, int ktype = CV_64F) + private static native long getGaborKernel_0(double ksize_width, double ksize_height, double sigma, double theta, double lambd, double gamma, double psi, int ktype); + private static native long getGaborKernel_1(double ksize_width, double ksize_height, double sigma, double theta, double lambd, double gamma); + + // C++: Mat getGaussianKernel(int ksize, double sigma, int ktype = CV_64F) + private static native long getGaussianKernel_0(int ksize, double sigma, int ktype); + private static native long getGaussianKernel_1(int ksize, double sigma); + + // C++: Mat getPerspectiveTransform(Mat src, Mat dst) + private static native long getPerspectiveTransform_0(long src_nativeObj, long dst_nativeObj); + + // C++: void getRectSubPix(Mat image, Size patchSize, Point2f center, Mat& patch, int patchType = -1) + private static native void getRectSubPix_0(long image_nativeObj, double patchSize_width, double patchSize_height, double center_x, double center_y, long patch_nativeObj, int patchType); + private static native void getRectSubPix_1(long image_nativeObj, double patchSize_width, double patchSize_height, double center_x, double center_y, long patch_nativeObj); + + // C++: Mat getRotationMatrix2D(Point2f center, double angle, double scale) + private static native long getRotationMatrix2D_0(double center_x, double center_y, double angle, double scale); + + // C++: Mat getStructuringElement(int shape, Size ksize, Point anchor = Point(-1,-1)) + private static native long getStructuringElement_0(int shape, double ksize_width, double ksize_height, double anchor_x, double anchor_y); + private static native long getStructuringElement_1(int shape, double ksize_width, double ksize_height); + + // C++: void goodFeaturesToTrack(Mat image, vector_Point& corners, int maxCorners, double qualityLevel, double minDistance, Mat mask = Mat(), int blockSize = 3, bool useHarrisDetector = false, double k = 0.04) + private static native void goodFeaturesToTrack_0(long image_nativeObj, long corners_mat_nativeObj, int maxCorners, double qualityLevel, double minDistance, long mask_nativeObj, int blockSize, boolean useHarrisDetector, double k); + private static native void goodFeaturesToTrack_1(long image_nativeObj, long corners_mat_nativeObj, int maxCorners, double qualityLevel, double minDistance); + + // C++: void grabCut(Mat img, Mat& mask, Rect rect, Mat& bgdModel, Mat& fgdModel, int iterCount, int mode = GC_EVAL) + private static native void grabCut_0(long img_nativeObj, long mask_nativeObj, int rect_x, int rect_y, int rect_width, int rect_height, long bgdModel_nativeObj, long fgdModel_nativeObj, int iterCount, int mode); + private static native void grabCut_1(long img_nativeObj, long mask_nativeObj, int rect_x, int rect_y, int rect_width, int rect_height, long bgdModel_nativeObj, long fgdModel_nativeObj, int iterCount); + + // C++: void initUndistortRectifyMap(Mat cameraMatrix, Mat distCoeffs, Mat R, Mat newCameraMatrix, Size size, int m1type, Mat& map1, Mat& map2) + private static native void initUndistortRectifyMap_0(long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long R_nativeObj, long newCameraMatrix_nativeObj, double size_width, double size_height, int m1type, long map1_nativeObj, long map2_nativeObj); + + // C++: float initWideAngleProjMap(Mat cameraMatrix, Mat distCoeffs, Size imageSize, int destImageWidth, int m1type, Mat& map1, Mat& map2, int projType = PROJ_SPHERICAL_EQRECT, double alpha = 0) + private static native float initWideAngleProjMap_0(long cameraMatrix_nativeObj, long distCoeffs_nativeObj, double imageSize_width, double imageSize_height, int destImageWidth, int m1type, long map1_nativeObj, long map2_nativeObj, int projType, double alpha); + private static native float initWideAngleProjMap_1(long cameraMatrix_nativeObj, long distCoeffs_nativeObj, double imageSize_width, double imageSize_height, int destImageWidth, int m1type, long map1_nativeObj, long map2_nativeObj); + + // C++: void integral(Mat src, Mat& sum, int sdepth = -1) + private static native void integral_0(long src_nativeObj, long sum_nativeObj, int sdepth); + private static native void integral_1(long src_nativeObj, long sum_nativeObj); + + // C++: void integral(Mat src, Mat& sum, Mat& sqsum, int sdepth = -1) + private static native void integral2_0(long src_nativeObj, long sum_nativeObj, long sqsum_nativeObj, int sdepth); + private static native void integral2_1(long src_nativeObj, long sum_nativeObj, long sqsum_nativeObj); + + // C++: void integral(Mat src, Mat& sum, Mat& sqsum, Mat& tilted, int sdepth = -1) + private static native void integral3_0(long src_nativeObj, long sum_nativeObj, long sqsum_nativeObj, long tilted_nativeObj, int sdepth); + private static native void integral3_1(long src_nativeObj, long sum_nativeObj, long sqsum_nativeObj, long tilted_nativeObj); + + // C++: float intersectConvexConvex(Mat _p1, Mat _p2, Mat& _p12, bool handleNested = true) + private static native float intersectConvexConvex_0(long _p1_nativeObj, long _p2_nativeObj, long _p12_nativeObj, boolean handleNested); + private static native float intersectConvexConvex_1(long _p1_nativeObj, long _p2_nativeObj, long _p12_nativeObj); + + // C++: void invertAffineTransform(Mat M, Mat& iM) + private static native void invertAffineTransform_0(long M_nativeObj, long iM_nativeObj); + + // C++: bool isContourConvex(vector_Point contour) + private static native boolean isContourConvex_0(long contour_mat_nativeObj); + + // C++: double matchShapes(Mat contour1, Mat contour2, int method, double parameter) + private static native double matchShapes_0(long contour1_nativeObj, long contour2_nativeObj, int method, double parameter); + + // C++: void matchTemplate(Mat image, Mat templ, Mat& result, int method) + private static native void matchTemplate_0(long image_nativeObj, long templ_nativeObj, long result_nativeObj, int method); + + // C++: void medianBlur(Mat src, Mat& dst, int ksize) + private static native void medianBlur_0(long src_nativeObj, long dst_nativeObj, int ksize); + + // C++: RotatedRect minAreaRect(vector_Point2f points) + private static native double[] minAreaRect_0(long points_mat_nativeObj); + + // C++: void minEnclosingCircle(vector_Point2f points, Point2f& center, float& radius) + private static native void minEnclosingCircle_0(long points_mat_nativeObj, double[] center_out, double[] radius_out); + + // C++: Moments moments(Mat array, bool binaryImage = false) + private static native long moments_0(long array_nativeObj, boolean binaryImage); + private static native long moments_1(long array_nativeObj); + + // C++: void morphologyEx(Mat src, Mat& dst, int op, Mat kernel, Point anchor = Point(-1,-1), int iterations = 1, int borderType = BORDER_CONSTANT, Scalar borderValue = morphologyDefaultBorderValue()) + private static native void morphologyEx_0(long src_nativeObj, long dst_nativeObj, int op, long kernel_nativeObj, double anchor_x, double anchor_y, int iterations, int borderType, double borderValue_val0, double borderValue_val1, double borderValue_val2, double borderValue_val3); + private static native void morphologyEx_1(long src_nativeObj, long dst_nativeObj, int op, long kernel_nativeObj, double anchor_x, double anchor_y, int iterations); + private static native void morphologyEx_2(long src_nativeObj, long dst_nativeObj, int op, long kernel_nativeObj); + + // C++: Point2d phaseCorrelate(Mat src1, Mat src2, Mat window = Mat()) + private static native double[] phaseCorrelate_0(long src1_nativeObj, long src2_nativeObj, long window_nativeObj); + private static native double[] phaseCorrelate_1(long src1_nativeObj, long src2_nativeObj); + + // C++: Point2d phaseCorrelateRes(Mat src1, Mat src2, Mat window, double* response = 0) + private static native double[] phaseCorrelateRes_0(long src1_nativeObj, long src2_nativeObj, long window_nativeObj, double[] response_out); + private static native double[] phaseCorrelateRes_1(long src1_nativeObj, long src2_nativeObj, long window_nativeObj); + + // C++: double pointPolygonTest(vector_Point2f contour, Point2f pt, bool measureDist) + private static native double pointPolygonTest_0(long contour_mat_nativeObj, double pt_x, double pt_y, boolean measureDist); + + // C++: void preCornerDetect(Mat src, Mat& dst, int ksize, int borderType = BORDER_DEFAULT) + private static native void preCornerDetect_0(long src_nativeObj, long dst_nativeObj, int ksize, int borderType); + private static native void preCornerDetect_1(long src_nativeObj, long dst_nativeObj, int ksize); + + // C++: void pyrDown(Mat src, Mat& dst, Size dstsize = Size(), int borderType = BORDER_DEFAULT) + private static native void pyrDown_0(long src_nativeObj, long dst_nativeObj, double dstsize_width, double dstsize_height, int borderType); + private static native void pyrDown_1(long src_nativeObj, long dst_nativeObj, double dstsize_width, double dstsize_height); + private static native void pyrDown_2(long src_nativeObj, long dst_nativeObj); + + // C++: void pyrMeanShiftFiltering(Mat src, Mat& dst, double sp, double sr, int maxLevel = 1, TermCriteria termcrit = TermCriteria( TermCriteria::MAX_ITER+TermCriteria::EPS,5,1)) + private static native void pyrMeanShiftFiltering_0(long src_nativeObj, long dst_nativeObj, double sp, double sr, int maxLevel, int termcrit_type, int termcrit_maxCount, double termcrit_epsilon); + private static native void pyrMeanShiftFiltering_1(long src_nativeObj, long dst_nativeObj, double sp, double sr); + + // C++: void pyrUp(Mat src, Mat& dst, Size dstsize = Size(), int borderType = BORDER_DEFAULT) + private static native void pyrUp_0(long src_nativeObj, long dst_nativeObj, double dstsize_width, double dstsize_height, int borderType); + private static native void pyrUp_1(long src_nativeObj, long dst_nativeObj, double dstsize_width, double dstsize_height); + private static native void pyrUp_2(long src_nativeObj, long dst_nativeObj); + + // C++: void remap(Mat src, Mat& dst, Mat map1, Mat map2, int interpolation, int borderMode = BORDER_CONSTANT, Scalar borderValue = Scalar()) + private static native void remap_0(long src_nativeObj, long dst_nativeObj, long map1_nativeObj, long map2_nativeObj, int interpolation, int borderMode, double borderValue_val0, double borderValue_val1, double borderValue_val2, double borderValue_val3); + private static native void remap_1(long src_nativeObj, long dst_nativeObj, long map1_nativeObj, long map2_nativeObj, int interpolation); + + // C++: void resize(Mat src, Mat& dst, Size dsize, double fx = 0, double fy = 0, int interpolation = INTER_LINEAR) + private static native void resize_0(long src_nativeObj, long dst_nativeObj, double dsize_width, double dsize_height, double fx, double fy, int interpolation); + private static native void resize_1(long src_nativeObj, long dst_nativeObj, double dsize_width, double dsize_height); + + // C++: void sepFilter2D(Mat src, Mat& dst, int ddepth, Mat kernelX, Mat kernelY, Point anchor = Point(-1,-1), double delta = 0, int borderType = BORDER_DEFAULT) + private static native void sepFilter2D_0(long src_nativeObj, long dst_nativeObj, int ddepth, long kernelX_nativeObj, long kernelY_nativeObj, double anchor_x, double anchor_y, double delta, int borderType); + private static native void sepFilter2D_1(long src_nativeObj, long dst_nativeObj, int ddepth, long kernelX_nativeObj, long kernelY_nativeObj, double anchor_x, double anchor_y, double delta); + private static native void sepFilter2D_2(long src_nativeObj, long dst_nativeObj, int ddepth, long kernelX_nativeObj, long kernelY_nativeObj); + + // C++: double threshold(Mat src, Mat& dst, double thresh, double maxval, int type) + private static native double threshold_0(long src_nativeObj, long dst_nativeObj, double thresh, double maxval, int type); + + // C++: void undistort(Mat src, Mat& dst, Mat cameraMatrix, Mat distCoeffs, Mat newCameraMatrix = Mat()) + private static native void undistort_0(long src_nativeObj, long dst_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long newCameraMatrix_nativeObj); + private static native void undistort_1(long src_nativeObj, long dst_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_nativeObj); + + // C++: void undistortPoints(vector_Point2f src, vector_Point2f& dst, Mat cameraMatrix, Mat distCoeffs, Mat R = Mat(), Mat P = Mat()) + private static native void undistortPoints_0(long src_mat_nativeObj, long dst_mat_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long R_nativeObj, long P_nativeObj); + private static native void undistortPoints_1(long src_mat_nativeObj, long dst_mat_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_nativeObj); + + // C++: void warpAffine(Mat src, Mat& dst, Mat M, Size dsize, int flags = INTER_LINEAR, int borderMode = BORDER_CONSTANT, Scalar borderValue = Scalar()) + private static native void warpAffine_0(long src_nativeObj, long dst_nativeObj, long M_nativeObj, double dsize_width, double dsize_height, int flags, int borderMode, double borderValue_val0, double borderValue_val1, double borderValue_val2, double borderValue_val3); + private static native void warpAffine_1(long src_nativeObj, long dst_nativeObj, long M_nativeObj, double dsize_width, double dsize_height, int flags); + private static native void warpAffine_2(long src_nativeObj, long dst_nativeObj, long M_nativeObj, double dsize_width, double dsize_height); + + // C++: void warpPerspective(Mat src, Mat& dst, Mat M, Size dsize, int flags = INTER_LINEAR, int borderMode = BORDER_CONSTANT, Scalar borderValue = Scalar()) + private static native void warpPerspective_0(long src_nativeObj, long dst_nativeObj, long M_nativeObj, double dsize_width, double dsize_height, int flags, int borderMode, double borderValue_val0, double borderValue_val1, double borderValue_val2, double borderValue_val3); + private static native void warpPerspective_1(long src_nativeObj, long dst_nativeObj, long M_nativeObj, double dsize_width, double dsize_height, int flags); + private static native void warpPerspective_2(long src_nativeObj, long dst_nativeObj, long M_nativeObj, double dsize_width, double dsize_height); + + // C++: void watershed(Mat image, Mat& markers) + private static native void watershed_0(long image_nativeObj, long markers_nativeObj); + +} diff --git a/src/org/opencv/imgproc/Moments.java b/src/org/opencv/imgproc/Moments.java new file mode 100644 index 0000000..41b4326 --- /dev/null +++ b/src/org/opencv/imgproc/Moments.java @@ -0,0 +1,810 @@ + +// +// This file is auto-generated. Please don't modify it! +// +package org.opencv.imgproc; + + + +// C++: class Moments +public class Moments { + + protected final long nativeObj; + protected Moments(long addr) { nativeObj = addr; } + + + // + // C++: Moments::Moments() + // + + public Moments() + { + + nativeObj = Moments_0(); + + return; + } + + + // + // C++: double Moments::m00 + // + + public double get_m00() + { + + double retVal = get_m00_0(nativeObj); + + return retVal; + } + + + // + // C++: void Moments::m00 + // + + public void set_m00(double m00) + { + + set_m00_0(nativeObj, m00); + + return; + } + + + // + // C++: double Moments::m10 + // + + public double get_m10() + { + + double retVal = get_m10_0(nativeObj); + + return retVal; + } + + + // + // C++: void Moments::m10 + // + + public void set_m10(double m10) + { + + set_m10_0(nativeObj, m10); + + return; + } + + + // + // C++: double Moments::m01 + // + + public double get_m01() + { + + double retVal = get_m01_0(nativeObj); + + return retVal; + } + + + // + // C++: void Moments::m01 + // + + public void set_m01(double m01) + { + + set_m01_0(nativeObj, m01); + + return; + } + + + // + // C++: double Moments::m20 + // + + public double get_m20() + { + + double retVal = get_m20_0(nativeObj); + + return retVal; + } + + + // + // C++: void Moments::m20 + // + + public void set_m20(double m20) + { + + set_m20_0(nativeObj, m20); + + return; + } + + + // + // C++: double Moments::m11 + // + + public double get_m11() + { + + double retVal = get_m11_0(nativeObj); + + return retVal; + } + + + // + // C++: void Moments::m11 + // + + public void set_m11(double m11) + { + + set_m11_0(nativeObj, m11); + + return; + } + + + // + // C++: double Moments::m02 + // + + public double get_m02() + { + + double retVal = get_m02_0(nativeObj); + + return retVal; + } + + + // + // C++: void Moments::m02 + // + + public void set_m02(double m02) + { + + set_m02_0(nativeObj, m02); + + return; + } + + + // + // C++: double Moments::m30 + // + + public double get_m30() + { + + double retVal = get_m30_0(nativeObj); + + return retVal; + } + + + // + // C++: void Moments::m30 + // + + public void set_m30(double m30) + { + + set_m30_0(nativeObj, m30); + + return; + } + + + // + // C++: double Moments::m21 + // + + public double get_m21() + { + + double retVal = get_m21_0(nativeObj); + + return retVal; + } + + + // + // C++: void Moments::m21 + // + + public void set_m21(double m21) + { + + set_m21_0(nativeObj, m21); + + return; + } + + + // + // C++: double Moments::m12 + // + + public double get_m12() + { + + double retVal = get_m12_0(nativeObj); + + return retVal; + } + + + // + // C++: void Moments::m12 + // + + public void set_m12(double m12) + { + + set_m12_0(nativeObj, m12); + + return; + } + + + // + // C++: double Moments::m03 + // + + public double get_m03() + { + + double retVal = get_m03_0(nativeObj); + + return retVal; + } + + + // + // C++: void Moments::m03 + // + + public void set_m03(double m03) + { + + set_m03_0(nativeObj, m03); + + return; + } + + + // + // C++: double Moments::mu20 + // + + public double get_mu20() + { + + double retVal = get_mu20_0(nativeObj); + + return retVal; + } + + + // + // C++: void Moments::mu20 + // + + public void set_mu20(double mu20) + { + + set_mu20_0(nativeObj, mu20); + + return; + } + + + // + // C++: double Moments::mu11 + // + + public double get_mu11() + { + + double retVal = get_mu11_0(nativeObj); + + return retVal; + } + + + // + // C++: void Moments::mu11 + // + + public void set_mu11(double mu11) + { + + set_mu11_0(nativeObj, mu11); + + return; + } + + + // + // C++: double Moments::mu02 + // + + public double get_mu02() + { + + double retVal = get_mu02_0(nativeObj); + + return retVal; + } + + + // + // C++: void Moments::mu02 + // + + public void set_mu02(double mu02) + { + + set_mu02_0(nativeObj, mu02); + + return; + } + + + // + // C++: double Moments::mu30 + // + + public double get_mu30() + { + + double retVal = get_mu30_0(nativeObj); + + return retVal; + } + + + // + // C++: void Moments::mu30 + // + + public void set_mu30(double mu30) + { + + set_mu30_0(nativeObj, mu30); + + return; + } + + + // + // C++: double Moments::mu21 + // + + public double get_mu21() + { + + double retVal = get_mu21_0(nativeObj); + + return retVal; + } + + + // + // C++: void Moments::mu21 + // + + public void set_mu21(double mu21) + { + + set_mu21_0(nativeObj, mu21); + + return; + } + + + // + // C++: double Moments::mu12 + // + + public double get_mu12() + { + + double retVal = get_mu12_0(nativeObj); + + return retVal; + } + + + // + // C++: void Moments::mu12 + // + + public void set_mu12(double mu12) + { + + set_mu12_0(nativeObj, mu12); + + return; + } + + + // + // C++: double Moments::mu03 + // + + public double get_mu03() + { + + double retVal = get_mu03_0(nativeObj); + + return retVal; + } + + + // + // C++: void Moments::mu03 + // + + public void set_mu03(double mu03) + { + + set_mu03_0(nativeObj, mu03); + + return; + } + + + // + // C++: double Moments::nu20 + // + + public double get_nu20() + { + + double retVal = get_nu20_0(nativeObj); + + return retVal; + } + + + // + // C++: void Moments::nu20 + // + + public void set_nu20(double nu20) + { + + set_nu20_0(nativeObj, nu20); + + return; + } + + + // + // C++: double Moments::nu11 + // + + public double get_nu11() + { + + double retVal = get_nu11_0(nativeObj); + + return retVal; + } + + + // + // C++: void Moments::nu11 + // + + public void set_nu11(double nu11) + { + + set_nu11_0(nativeObj, nu11); + + return; + } + + + // + // C++: double Moments::nu02 + // + + public double get_nu02() + { + + double retVal = get_nu02_0(nativeObj); + + return retVal; + } + + + // + // C++: void Moments::nu02 + // + + public void set_nu02(double nu02) + { + + set_nu02_0(nativeObj, nu02); + + return; + } + + + // + // C++: double Moments::nu30 + // + + public double get_nu30() + { + + double retVal = get_nu30_0(nativeObj); + + return retVal; + } + + + // + // C++: void Moments::nu30 + // + + public void set_nu30(double nu30) + { + + set_nu30_0(nativeObj, nu30); + + return; + } + + + // + // C++: double Moments::nu21 + // + + public double get_nu21() + { + + double retVal = get_nu21_0(nativeObj); + + return retVal; + } + + + // + // C++: void Moments::nu21 + // + + public void set_nu21(double nu21) + { + + set_nu21_0(nativeObj, nu21); + + return; + } + + + // + // C++: double Moments::nu12 + // + + public double get_nu12() + { + + double retVal = get_nu12_0(nativeObj); + + return retVal; + } + + + // + // C++: void Moments::nu12 + // + + public void set_nu12(double nu12) + { + + set_nu12_0(nativeObj, nu12); + + return; + } + + + // + // C++: double Moments::nu03 + // + + public double get_nu03() + { + + double retVal = get_nu03_0(nativeObj); + + return retVal; + } + + + // + // C++: void Moments::nu03 + // + + public void set_nu03(double nu03) + { + + set_nu03_0(nativeObj, nu03); + + return; + } + + + @Override + protected void finalize() throws Throwable { + delete(nativeObj); + } + + + + // C++: Moments::Moments() + private static native long Moments_0(); + + // C++: double Moments::m00 + private static native double get_m00_0(long nativeObj); + + // C++: void Moments::m00 + private static native void set_m00_0(long nativeObj, double m00); + + // C++: double Moments::m10 + private static native double get_m10_0(long nativeObj); + + // C++: void Moments::m10 + private static native void set_m10_0(long nativeObj, double m10); + + // C++: double Moments::m01 + private static native double get_m01_0(long nativeObj); + + // C++: void Moments::m01 + private static native void set_m01_0(long nativeObj, double m01); + + // C++: double Moments::m20 + private static native double get_m20_0(long nativeObj); + + // C++: void Moments::m20 + private static native void set_m20_0(long nativeObj, double m20); + + // C++: double Moments::m11 + private static native double get_m11_0(long nativeObj); + + // C++: void Moments::m11 + private static native void set_m11_0(long nativeObj, double m11); + + // C++: double Moments::m02 + private static native double get_m02_0(long nativeObj); + + // C++: void Moments::m02 + private static native void set_m02_0(long nativeObj, double m02); + + // C++: double Moments::m30 + private static native double get_m30_0(long nativeObj); + + // C++: void Moments::m30 + private static native void set_m30_0(long nativeObj, double m30); + + // C++: double Moments::m21 + private static native double get_m21_0(long nativeObj); + + // C++: void Moments::m21 + private static native void set_m21_0(long nativeObj, double m21); + + // C++: double Moments::m12 + private static native double get_m12_0(long nativeObj); + + // C++: void Moments::m12 + private static native void set_m12_0(long nativeObj, double m12); + + // C++: double Moments::m03 + private static native double get_m03_0(long nativeObj); + + // C++: void Moments::m03 + private static native void set_m03_0(long nativeObj, double m03); + + // C++: double Moments::mu20 + private static native double get_mu20_0(long nativeObj); + + // C++: void Moments::mu20 + private static native void set_mu20_0(long nativeObj, double mu20); + + // C++: double Moments::mu11 + private static native double get_mu11_0(long nativeObj); + + // C++: void Moments::mu11 + private static native void set_mu11_0(long nativeObj, double mu11); + + // C++: double Moments::mu02 + private static native double get_mu02_0(long nativeObj); + + // C++: void Moments::mu02 + private static native void set_mu02_0(long nativeObj, double mu02); + + // C++: double Moments::mu30 + private static native double get_mu30_0(long nativeObj); + + // C++: void Moments::mu30 + private static native void set_mu30_0(long nativeObj, double mu30); + + // C++: double Moments::mu21 + private static native double get_mu21_0(long nativeObj); + + // C++: void Moments::mu21 + private static native void set_mu21_0(long nativeObj, double mu21); + + // C++: double Moments::mu12 + private static native double get_mu12_0(long nativeObj); + + // C++: void Moments::mu12 + private static native void set_mu12_0(long nativeObj, double mu12); + + // C++: double Moments::mu03 + private static native double get_mu03_0(long nativeObj); + + // C++: void Moments::mu03 + private static native void set_mu03_0(long nativeObj, double mu03); + + // C++: double Moments::nu20 + private static native double get_nu20_0(long nativeObj); + + // C++: void Moments::nu20 + private static native void set_nu20_0(long nativeObj, double nu20); + + // C++: double Moments::nu11 + private static native double get_nu11_0(long nativeObj); + + // C++: void Moments::nu11 + private static native void set_nu11_0(long nativeObj, double nu11); + + // C++: double Moments::nu02 + private static native double get_nu02_0(long nativeObj); + + // C++: void Moments::nu02 + private static native void set_nu02_0(long nativeObj, double nu02); + + // C++: double Moments::nu30 + private static native double get_nu30_0(long nativeObj); + + // C++: void Moments::nu30 + private static native void set_nu30_0(long nativeObj, double nu30); + + // C++: double Moments::nu21 + private static native double get_nu21_0(long nativeObj); + + // C++: void Moments::nu21 + private static native void set_nu21_0(long nativeObj, double nu21); + + // C++: double Moments::nu12 + private static native double get_nu12_0(long nativeObj); + + // C++: void Moments::nu12 + private static native void set_nu12_0(long nativeObj, double nu12); + + // C++: double Moments::nu03 + private static native double get_nu03_0(long nativeObj); + + // C++: void Moments::nu03 + private static native void set_nu03_0(long nativeObj, double nu03); + + // native support for java finalize() + private static native void delete(long nativeObj); + +} diff --git a/src/org/opencv/imgproc/Subdiv2D.java b/src/org/opencv/imgproc/Subdiv2D.java new file mode 100644 index 0000000..fbc9230 --- /dev/null +++ b/src/org/opencv/imgproc/Subdiv2D.java @@ -0,0 +1,362 @@ + +// +// This file is auto-generated. Please don't modify it! +// +package org.opencv.imgproc; + +import java.util.List; +import org.opencv.core.Mat; +import org.opencv.core.MatOfFloat4; +import org.opencv.core.MatOfFloat6; +import org.opencv.core.MatOfInt; +import org.opencv.core.MatOfPoint2f; +import org.opencv.core.Point; +import org.opencv.core.Rect; +import org.opencv.utils.Converters; + +// C++: class Subdiv2D +public class Subdiv2D { + + protected final long nativeObj; + protected Subdiv2D(long addr) { nativeObj = addr; } + + + public static final int + PTLOC_ERROR = -2, + PTLOC_OUTSIDE_RECT = -1, + PTLOC_INSIDE = 0, + PTLOC_VERTEX = 1, + PTLOC_ON_EDGE = 2, + NEXT_AROUND_ORG = 0x00, + NEXT_AROUND_DST = 0x22, + PREV_AROUND_ORG = 0x11, + PREV_AROUND_DST = 0x33, + NEXT_AROUND_LEFT = 0x13, + NEXT_AROUND_RIGHT = 0x31, + PREV_AROUND_LEFT = 0x20, + PREV_AROUND_RIGHT = 0x02; + + + // + // C++: Subdiv2D::Subdiv2D() + // + + public Subdiv2D() + { + + nativeObj = Subdiv2D_0(); + + return; + } + + + // + // C++: Subdiv2D::Subdiv2D(Rect rect) + // + + public Subdiv2D(Rect rect) + { + + nativeObj = Subdiv2D_1(rect.x, rect.y, rect.width, rect.height); + + return; + } + + + // + // C++: int Subdiv2D::edgeDst(int edge, Point2f* dstpt = 0) + // + + public int edgeDst(int edge, Point dstpt) + { + double[] dstpt_out = new double[2]; + int retVal = edgeDst_0(nativeObj, edge, dstpt_out); + if(dstpt!=null){ dstpt.x = dstpt_out[0]; dstpt.y = dstpt_out[1]; } + return retVal; + } + + public int edgeDst(int edge) + { + + int retVal = edgeDst_1(nativeObj, edge); + + return retVal; + } + + + // + // C++: int Subdiv2D::edgeOrg(int edge, Point2f* orgpt = 0) + // + + public int edgeOrg(int edge, Point orgpt) + { + double[] orgpt_out = new double[2]; + int retVal = edgeOrg_0(nativeObj, edge, orgpt_out); + if(orgpt!=null){ orgpt.x = orgpt_out[0]; orgpt.y = orgpt_out[1]; } + return retVal; + } + + public int edgeOrg(int edge) + { + + int retVal = edgeOrg_1(nativeObj, edge); + + return retVal; + } + + + // + // C++: int Subdiv2D::findNearest(Point2f pt, Point2f* nearestPt = 0) + // + + public int findNearest(Point pt, Point nearestPt) + { + double[] nearestPt_out = new double[2]; + int retVal = findNearest_0(nativeObj, pt.x, pt.y, nearestPt_out); + if(nearestPt!=null){ nearestPt.x = nearestPt_out[0]; nearestPt.y = nearestPt_out[1]; } + return retVal; + } + + public int findNearest(Point pt) + { + + int retVal = findNearest_1(nativeObj, pt.x, pt.y); + + return retVal; + } + + + // + // C++: int Subdiv2D::getEdge(int edge, int nextEdgeType) + // + + public int getEdge(int edge, int nextEdgeType) + { + + int retVal = getEdge_0(nativeObj, edge, nextEdgeType); + + return retVal; + } + + + // + // C++: void Subdiv2D::getEdgeList(vector_Vec4f& edgeList) + // + + public void getEdgeList(MatOfFloat4 edgeList) + { + Mat edgeList_mat = edgeList; + getEdgeList_0(nativeObj, edgeList_mat.nativeObj); + + return; + } + + + // + // C++: void Subdiv2D::getTriangleList(vector_Vec6f& triangleList) + // + + public void getTriangleList(MatOfFloat6 triangleList) + { + Mat triangleList_mat = triangleList; + getTriangleList_0(nativeObj, triangleList_mat.nativeObj); + + return; + } + + + // + // C++: Point2f Subdiv2D::getVertex(int vertex, int* firstEdge = 0) + // + + public Point getVertex(int vertex, int[] firstEdge) + { + double[] firstEdge_out = new double[1]; + Point retVal = new Point(getVertex_0(nativeObj, vertex, firstEdge_out)); + if(firstEdge!=null) firstEdge[0] = (int)firstEdge_out[0]; + return retVal; + } + + public Point getVertex(int vertex) + { + + Point retVal = new Point(getVertex_1(nativeObj, vertex)); + + return retVal; + } + + + // + // C++: void Subdiv2D::getVoronoiFacetList(vector_int idx, vector_vector_Point2f& facetList, vector_Point2f& facetCenters) + // + + public void getVoronoiFacetList(MatOfInt idx, List facetList, MatOfPoint2f facetCenters) + { + Mat idx_mat = idx; + Mat facetList_mat = new Mat(); + Mat facetCenters_mat = facetCenters; + getVoronoiFacetList_0(nativeObj, idx_mat.nativeObj, facetList_mat.nativeObj, facetCenters_mat.nativeObj); + Converters.Mat_to_vector_vector_Point2f(facetList_mat, facetList); + return; + } + + + // + // C++: void Subdiv2D::initDelaunay(Rect rect) + // + + public void initDelaunay(Rect rect) + { + + initDelaunay_0(nativeObj, rect.x, rect.y, rect.width, rect.height); + + return; + } + + + // + // C++: int Subdiv2D::insert(Point2f pt) + // + + public int insert(Point pt) + { + + int retVal = insert_0(nativeObj, pt.x, pt.y); + + return retVal; + } + + + // + // C++: void Subdiv2D::insert(vector_Point2f ptvec) + // + + public void insert(MatOfPoint2f ptvec) + { + Mat ptvec_mat = ptvec; + insert_1(nativeObj, ptvec_mat.nativeObj); + + return; + } + + + // + // C++: int Subdiv2D::locate(Point2f pt, int& edge, int& vertex) + // + + public int locate(Point pt, int[] edge, int[] vertex) + { + double[] edge_out = new double[1]; + double[] vertex_out = new double[1]; + int retVal = locate_0(nativeObj, pt.x, pt.y, edge_out, vertex_out); + if(edge!=null) edge[0] = (int)edge_out[0]; + if(vertex!=null) vertex[0] = (int)vertex_out[0]; + return retVal; + } + + + // + // C++: int Subdiv2D::nextEdge(int edge) + // + + public int nextEdge(int edge) + { + + int retVal = nextEdge_0(nativeObj, edge); + + return retVal; + } + + + // + // C++: int Subdiv2D::rotateEdge(int edge, int rotate) + // + + public int rotateEdge(int edge, int rotate) + { + + int retVal = rotateEdge_0(nativeObj, edge, rotate); + + return retVal; + } + + + // + // C++: int Subdiv2D::symEdge(int edge) + // + + public int symEdge(int edge) + { + + int retVal = symEdge_0(nativeObj, edge); + + return retVal; + } + + + @Override + protected void finalize() throws Throwable { + delete(nativeObj); + } + + + + // C++: Subdiv2D::Subdiv2D() + private static native long Subdiv2D_0(); + + // C++: Subdiv2D::Subdiv2D(Rect rect) + private static native long Subdiv2D_1(int rect_x, int rect_y, int rect_width, int rect_height); + + // C++: int Subdiv2D::edgeDst(int edge, Point2f* dstpt = 0) + private static native int edgeDst_0(long nativeObj, int edge, double[] dstpt_out); + private static native int edgeDst_1(long nativeObj, int edge); + + // C++: int Subdiv2D::edgeOrg(int edge, Point2f* orgpt = 0) + private static native int edgeOrg_0(long nativeObj, int edge, double[] orgpt_out); + private static native int edgeOrg_1(long nativeObj, int edge); + + // C++: int Subdiv2D::findNearest(Point2f pt, Point2f* nearestPt = 0) + private static native int findNearest_0(long nativeObj, double pt_x, double pt_y, double[] nearestPt_out); + private static native int findNearest_1(long nativeObj, double pt_x, double pt_y); + + // C++: int Subdiv2D::getEdge(int edge, int nextEdgeType) + private static native int getEdge_0(long nativeObj, int edge, int nextEdgeType); + + // C++: void Subdiv2D::getEdgeList(vector_Vec4f& edgeList) + private static native void getEdgeList_0(long nativeObj, long edgeList_mat_nativeObj); + + // C++: void Subdiv2D::getTriangleList(vector_Vec6f& triangleList) + private static native void getTriangleList_0(long nativeObj, long triangleList_mat_nativeObj); + + // C++: Point2f Subdiv2D::getVertex(int vertex, int* firstEdge = 0) + private static native double[] getVertex_0(long nativeObj, int vertex, double[] firstEdge_out); + private static native double[] getVertex_1(long nativeObj, int vertex); + + // C++: void Subdiv2D::getVoronoiFacetList(vector_int idx, vector_vector_Point2f& facetList, vector_Point2f& facetCenters) + private static native void getVoronoiFacetList_0(long nativeObj, long idx_mat_nativeObj, long facetList_mat_nativeObj, long facetCenters_mat_nativeObj); + + // C++: void Subdiv2D::initDelaunay(Rect rect) + private static native void initDelaunay_0(long nativeObj, int rect_x, int rect_y, int rect_width, int rect_height); + + // C++: int Subdiv2D::insert(Point2f pt) + private static native int insert_0(long nativeObj, double pt_x, double pt_y); + + // C++: void Subdiv2D::insert(vector_Point2f ptvec) + private static native void insert_1(long nativeObj, long ptvec_mat_nativeObj); + + // C++: int Subdiv2D::locate(Point2f pt, int& edge, int& vertex) + private static native int locate_0(long nativeObj, double pt_x, double pt_y, double[] edge_out, double[] vertex_out); + + // C++: int Subdiv2D::nextEdge(int edge) + private static native int nextEdge_0(long nativeObj, int edge); + + // C++: int Subdiv2D::rotateEdge(int edge, int rotate) + private static native int rotateEdge_0(long nativeObj, int edge, int rotate); + + // C++: int Subdiv2D::symEdge(int edge) + private static native int symEdge_0(long nativeObj, int edge); + + // native support for java finalize() + private static native void delete(long nativeObj); + +} diff --git a/src/org/opencv/imgproc/package.bluej b/src/org/opencv/imgproc/package.bluej new file mode 100644 index 0000000..e69de29 diff --git a/src/org/opencv/ml/CvANN_MLP.java b/src/org/opencv/ml/CvANN_MLP.java new file mode 100644 index 0000000..de411ed --- /dev/null +++ b/src/org/opencv/ml/CvANN_MLP.java @@ -0,0 +1,297 @@ + +// +// This file is auto-generated. Please don't modify it! +// +package org.opencv.ml; + +import org.opencv.core.Mat; + +// C++: class CvANN_MLP +/** + *

MLP model.

+ * + *

Unlike many other models in ML that are constructed and trained at once, in + * the MLP model these steps are separated. First, a network with the specified + * topology is created using the non-default constructor or the method + * "CvANN_MLP.create". All the weights are set to zeros. Then, the network is + * trained using a set of input and output vectors. The training procedure can + * be repeated more than once, that is, the weights can be adjusted based on the + * new training data.

+ * + * @see org.opencv.ml.CvANN_MLP : public CvStatModel + */ +public class CvANN_MLP extends CvStatModel { + + protected CvANN_MLP(long addr) { super(addr); } + + + public static final int + IDENTITY = 0, + SIGMOID_SYM = 1, + GAUSSIAN = 2, + UPDATE_WEIGHTS = 1, + NO_INPUT_SCALE = 2, + NO_OUTPUT_SCALE = 4; + + + // + // C++: CvANN_MLP::CvANN_MLP() + // + +/** + *

The constructors.

+ * + *

The advanced constructor allows to create MLP with the specified topology. + * See "CvANN_MLP.create" for details.

+ * + * @see org.opencv.ml.CvANN_MLP.CvANN_MLP + */ + public CvANN_MLP() + { + + super( CvANN_MLP_0() ); + + return; + } + + + // + // C++: CvANN_MLP::CvANN_MLP(Mat layerSizes, int activateFunc = CvANN_MLP::SIGMOID_SYM, double fparam1 = 0, double fparam2 = 0) + // + +/** + *

The constructors.

+ * + *

The advanced constructor allows to create MLP with the specified topology. + * See "CvANN_MLP.create" for details.

+ * + * @param layerSizes a layerSizes + * @param activateFunc a activateFunc + * @param fparam1 a fparam1 + * @param fparam2 a fparam2 + * + * @see org.opencv.ml.CvANN_MLP.CvANN_MLP + */ + public CvANN_MLP(Mat layerSizes, int activateFunc, double fparam1, double fparam2) + { + + super( CvANN_MLP_1(layerSizes.nativeObj, activateFunc, fparam1, fparam2) ); + + return; + } + +/** + *

The constructors.

+ * + *

The advanced constructor allows to create MLP with the specified topology. + * See "CvANN_MLP.create" for details.

+ * + * @param layerSizes a layerSizes + * + * @see org.opencv.ml.CvANN_MLP.CvANN_MLP + */ + public CvANN_MLP(Mat layerSizes) + { + + super( CvANN_MLP_2(layerSizes.nativeObj) ); + + return; + } + + + // + // C++: void CvANN_MLP::clear() + // + + public void clear() + { + + clear_0(nativeObj); + + return; + } + + + // + // C++: void CvANN_MLP::create(Mat layerSizes, int activateFunc = CvANN_MLP::SIGMOID_SYM, double fparam1 = 0, double fparam2 = 0) + // + +/** + *

Constructs MLP with the specified topology.

+ * + *

The method creates an MLP network with the specified topology and assigns the + * same activation function to all the neurons.

+ * + * @param layerSizes Integer vector specifying the number of neurons in each + * layer including the input and output layers. + * @param activateFunc Parameter specifying the activation function for each + * neuron: one of CvANN_MLP.IDENTITY, CvANN_MLP.SIGMOID_SYM, + * and CvANN_MLP.GAUSSIAN. + * @param fparam1 Free parameter of the activation function, alpha. See + * the formulas in the introduction section. + * @param fparam2 Free parameter of the activation function, beta. See + * the formulas in the introduction section. + * + * @see org.opencv.ml.CvANN_MLP.create + */ + public void create(Mat layerSizes, int activateFunc, double fparam1, double fparam2) + { + + create_0(nativeObj, layerSizes.nativeObj, activateFunc, fparam1, fparam2); + + return; + } + +/** + *

Constructs MLP with the specified topology.

+ * + *

The method creates an MLP network with the specified topology and assigns the + * same activation function to all the neurons.

+ * + * @param layerSizes Integer vector specifying the number of neurons in each + * layer including the input and output layers. + * + * @see org.opencv.ml.CvANN_MLP.create + */ + public void create(Mat layerSizes) + { + + create_1(nativeObj, layerSizes.nativeObj); + + return; + } + + + // + // C++: float CvANN_MLP::predict(Mat inputs, Mat& outputs) + // + +/** + *

Predicts responses for input samples.

+ * + *

The method returns a dummy value which should be ignored.

+ * + * @param inputs Input samples. + * @param outputs Predicted responses for corresponding samples. + * + * @see org.opencv.ml.CvANN_MLP.predict + */ + public float predict(Mat inputs, Mat outputs) + { + + float retVal = predict_0(nativeObj, inputs.nativeObj, outputs.nativeObj); + + return retVal; + } + + + // + // C++: int CvANN_MLP::train(Mat inputs, Mat outputs, Mat sampleWeights, Mat sampleIdx = cv::Mat(), CvANN_MLP_TrainParams params = CvANN_MLP_TrainParams(), int flags = 0) + // + +/** + *

Trains/updates MLP.

+ * + *

This method applies the specified training algorithm to computing/adjusting + * the network weights. It returns the number of done iterations.

+ * + *

The RPROP training algorithm is parallelized with the TBB library.

+ * + * @param inputs Floating-point matrix of input vectors, one vector per row. + * @param outputs Floating-point matrix of the corresponding output vectors, one + * vector per row. + * @param sampleWeights (RPROP only) Optional floating-point vector of weights + * for each sample. Some samples may be more important than others for training. + * You may want to raise the weight of certain classes to find the right balance + * between hit-rate and false-alarm rate, and so on. + * @param sampleIdx Optional integer vector indicating the samples (rows of + * inputs and outputs) that are taken into account. + * @param params Training parameters. See the "CvANN_MLP_TrainParams" + * description. + * @param flags Various parameters to control the training algorithm. A + * combination of the following parameters is possible: + *
    + *
  • UPDATE_WEIGHTS Algorithm updates the network weights, rather than + * computes them from scratch. In the latter case the weights are initialized + * using the Nguyen-Widrow algorithm. + *
  • NO_INPUT_SCALE Algorithm does not normalize the input vectors. If this + * flag is not set, the training algorithm normalizes each input feature + * independently, shifting its mean value to 0 and making the standard deviation + * equal to 1. If the network is assumed to be updated frequently, the new + * training data could be much different from original one. In this case, you + * should take care of proper normalization. + *
  • NO_OUTPUT_SCALE Algorithm does not normalize the output vectors. If + * the flag is not set, the training algorithm normalizes each output feature + * independently, by transforming it to the certain range depending on the used + * activation function. + *
+ * + * @see org.opencv.ml.CvANN_MLP.train + */ + public int train(Mat inputs, Mat outputs, Mat sampleWeights, Mat sampleIdx, CvANN_MLP_TrainParams params, int flags) + { + + int retVal = train_0(nativeObj, inputs.nativeObj, outputs.nativeObj, sampleWeights.nativeObj, sampleIdx.nativeObj, params.nativeObj, flags); + + return retVal; + } + +/** + *

Trains/updates MLP.

+ * + *

This method applies the specified training algorithm to computing/adjusting + * the network weights. It returns the number of done iterations.

+ * + *

The RPROP training algorithm is parallelized with the TBB library.

+ * + * @param inputs Floating-point matrix of input vectors, one vector per row. + * @param outputs Floating-point matrix of the corresponding output vectors, one + * vector per row. + * @param sampleWeights (RPROP only) Optional floating-point vector of weights + * for each sample. Some samples may be more important than others for training. + * You may want to raise the weight of certain classes to find the right balance + * between hit-rate and false-alarm rate, and so on. + * + * @see org.opencv.ml.CvANN_MLP.train + */ + public int train(Mat inputs, Mat outputs, Mat sampleWeights) + { + + int retVal = train_1(nativeObj, inputs.nativeObj, outputs.nativeObj, sampleWeights.nativeObj); + + return retVal; + } + + + @Override + protected void finalize() throws Throwable { + delete(nativeObj); + } + + + + // C++: CvANN_MLP::CvANN_MLP() + private static native long CvANN_MLP_0(); + + // C++: CvANN_MLP::CvANN_MLP(Mat layerSizes, int activateFunc = CvANN_MLP::SIGMOID_SYM, double fparam1 = 0, double fparam2 = 0) + private static native long CvANN_MLP_1(long layerSizes_nativeObj, int activateFunc, double fparam1, double fparam2); + private static native long CvANN_MLP_2(long layerSizes_nativeObj); + + // C++: void CvANN_MLP::clear() + private static native void clear_0(long nativeObj); + + // C++: void CvANN_MLP::create(Mat layerSizes, int activateFunc = CvANN_MLP::SIGMOID_SYM, double fparam1 = 0, double fparam2 = 0) + private static native void create_0(long nativeObj, long layerSizes_nativeObj, int activateFunc, double fparam1, double fparam2); + private static native void create_1(long nativeObj, long layerSizes_nativeObj); + + // C++: float CvANN_MLP::predict(Mat inputs, Mat& outputs) + private static native float predict_0(long nativeObj, long inputs_nativeObj, long outputs_nativeObj); + + // C++: int CvANN_MLP::train(Mat inputs, Mat outputs, Mat sampleWeights, Mat sampleIdx = cv::Mat(), CvANN_MLP_TrainParams params = CvANN_MLP_TrainParams(), int flags = 0) + private static native int train_0(long nativeObj, long inputs_nativeObj, long outputs_nativeObj, long sampleWeights_nativeObj, long sampleIdx_nativeObj, long params_nativeObj, int flags); + private static native int train_1(long nativeObj, long inputs_nativeObj, long outputs_nativeObj, long sampleWeights_nativeObj); + + // native support for java finalize() + private static native void delete(long nativeObj); + +} diff --git a/src/org/opencv/ml/CvANN_MLP_TrainParams.java b/src/org/opencv/ml/CvANN_MLP_TrainParams.java new file mode 100644 index 0000000..92c3cc1 --- /dev/null +++ b/src/org/opencv/ml/CvANN_MLP_TrainParams.java @@ -0,0 +1,390 @@ + +// +// This file is auto-generated. Please don't modify it! +// +package org.opencv.ml; + +import org.opencv.core.TermCriteria; + +// C++: class CvANN_MLP_TrainParams +/** + *

Parameters of the MLP training algorithm. You can initialize the structure by + * a constructor or the individual parameters can be adjusted after the + * structure is created.

+ * + *

The back-propagation algorithm parameters:

+ * + *

Strength of the weight gradient term. The recommended value is about 0.1.

+ * + *

Strength of the momentum term (the difference between weights on the 2 + * previous iterations). This parameter provides some inertia to smooth the + * random fluctuations of the weights. It can vary from 0 (the feature is + * disabled) to 1 and beyond. The value 0.1 or so is good enough

+ * + *

// C++ code:

+ * + *

The RPROP algorithm parameters (see [RPROP93] for details):

+ * + *

Initial value Delta_0 of update-values Delta_(ij).

+ * + *

Increase factor eta^+. It must be >1.

+ * + *

Decrease factor eta^-. It must be <1.

+ * + *

Update-values lower limit Delta_(min). It must be positive.

+ * + *

Update-values upper limit Delta_(max). It must be >1.

+ * + * @see org.opencv.ml.CvANN_MLP_TrainParams + */ +public class CvANN_MLP_TrainParams { + + protected final long nativeObj; + protected CvANN_MLP_TrainParams(long addr) { nativeObj = addr; } + + + public static final int + BACKPROP = 0, + RPROP = 1; + + + // + // C++: CvANN_MLP_TrainParams::CvANN_MLP_TrainParams() + // + +/** + *

The constructors.

+ * + *

By default the RPROP algorithm is used:

+ * + *

+ * + *

// C++ code:

+ * + *

CvANN_MLP_TrainParams.CvANN_MLP_TrainParams()

+ * + * + *

term_crit = cvTermCriteria(CV_TERMCRIT_ITER + CV_TERMCRIT_EPS, 1000, 0.01);

+ * + *

train_method = RPROP;

+ * + *

bp_dw_scale = bp_moment_scale = 0.1;

+ * + *

rp_dw0 = 0.1; rp_dw_plus = 1.2; rp_dw_minus = 0.5;

+ * + *

rp_dw_min = FLT_EPSILON; rp_dw_max = 50.;

+ * + * + * @see org.opencv.ml.CvANN_MLP_TrainParams.CvANN_MLP_TrainParams + */ + public CvANN_MLP_TrainParams() + { + + nativeObj = CvANN_MLP_TrainParams_0(); + + return; + } + + + // + // C++: TermCriteria CvANN_MLP_TrainParams::term_crit + // + + public TermCriteria get_term_crit() + { + + TermCriteria retVal = new TermCriteria(get_term_crit_0(nativeObj)); + + return retVal; + } + + + // + // C++: void CvANN_MLP_TrainParams::term_crit + // + + public void set_term_crit(TermCriteria term_crit) + { + + set_term_crit_0(nativeObj, term_crit.type, term_crit.maxCount, term_crit.epsilon); + + return; + } + + + // + // C++: int CvANN_MLP_TrainParams::train_method + // + + public int get_train_method() + { + + int retVal = get_train_method_0(nativeObj); + + return retVal; + } + + + // + // C++: void CvANN_MLP_TrainParams::train_method + // + + public void set_train_method(int train_method) + { + + set_train_method_0(nativeObj, train_method); + + return; + } + + + // + // C++: double CvANN_MLP_TrainParams::bp_dw_scale + // + + public double get_bp_dw_scale() + { + + double retVal = get_bp_dw_scale_0(nativeObj); + + return retVal; + } + + + // + // C++: void CvANN_MLP_TrainParams::bp_dw_scale + // + + public void set_bp_dw_scale(double bp_dw_scale) + { + + set_bp_dw_scale_0(nativeObj, bp_dw_scale); + + return; + } + + + // + // C++: double CvANN_MLP_TrainParams::bp_moment_scale + // + + public double get_bp_moment_scale() + { + + double retVal = get_bp_moment_scale_0(nativeObj); + + return retVal; + } + + + // + // C++: void CvANN_MLP_TrainParams::bp_moment_scale + // + + public void set_bp_moment_scale(double bp_moment_scale) + { + + set_bp_moment_scale_0(nativeObj, bp_moment_scale); + + return; + } + + + // + // C++: double CvANN_MLP_TrainParams::rp_dw0 + // + + public double get_rp_dw0() + { + + double retVal = get_rp_dw0_0(nativeObj); + + return retVal; + } + + + // + // C++: void CvANN_MLP_TrainParams::rp_dw0 + // + + public void set_rp_dw0(double rp_dw0) + { + + set_rp_dw0_0(nativeObj, rp_dw0); + + return; + } + + + // + // C++: double CvANN_MLP_TrainParams::rp_dw_plus + // + + public double get_rp_dw_plus() + { + + double retVal = get_rp_dw_plus_0(nativeObj); + + return retVal; + } + + + // + // C++: void CvANN_MLP_TrainParams::rp_dw_plus + // + + public void set_rp_dw_plus(double rp_dw_plus) + { + + set_rp_dw_plus_0(nativeObj, rp_dw_plus); + + return; + } + + + // + // C++: double CvANN_MLP_TrainParams::rp_dw_minus + // + + public double get_rp_dw_minus() + { + + double retVal = get_rp_dw_minus_0(nativeObj); + + return retVal; + } + + + // + // C++: void CvANN_MLP_TrainParams::rp_dw_minus + // + + public void set_rp_dw_minus(double rp_dw_minus) + { + + set_rp_dw_minus_0(nativeObj, rp_dw_minus); + + return; + } + + + // + // C++: double CvANN_MLP_TrainParams::rp_dw_min + // + + public double get_rp_dw_min() + { + + double retVal = get_rp_dw_min_0(nativeObj); + + return retVal; + } + + + // + // C++: void CvANN_MLP_TrainParams::rp_dw_min + // + + public void set_rp_dw_min(double rp_dw_min) + { + + set_rp_dw_min_0(nativeObj, rp_dw_min); + + return; + } + + + // + // C++: double CvANN_MLP_TrainParams::rp_dw_max + // + + public double get_rp_dw_max() + { + + double retVal = get_rp_dw_max_0(nativeObj); + + return retVal; + } + + + // + // C++: void CvANN_MLP_TrainParams::rp_dw_max + // + + public void set_rp_dw_max(double rp_dw_max) + { + + set_rp_dw_max_0(nativeObj, rp_dw_max); + + return; + } + + + @Override + protected void finalize() throws Throwable { + delete(nativeObj); + } + + + + // C++: CvANN_MLP_TrainParams::CvANN_MLP_TrainParams() + private static native long CvANN_MLP_TrainParams_0(); + + // C++: TermCriteria CvANN_MLP_TrainParams::term_crit + private static native double[] get_term_crit_0(long nativeObj); + + // C++: void CvANN_MLP_TrainParams::term_crit + private static native void set_term_crit_0(long nativeObj, int term_crit_type, int term_crit_maxCount, double term_crit_epsilon); + + // C++: int CvANN_MLP_TrainParams::train_method + private static native int get_train_method_0(long nativeObj); + + // C++: void CvANN_MLP_TrainParams::train_method + private static native void set_train_method_0(long nativeObj, int train_method); + + // C++: double CvANN_MLP_TrainParams::bp_dw_scale + private static native double get_bp_dw_scale_0(long nativeObj); + + // C++: void CvANN_MLP_TrainParams::bp_dw_scale + private static native void set_bp_dw_scale_0(long nativeObj, double bp_dw_scale); + + // C++: double CvANN_MLP_TrainParams::bp_moment_scale + private static native double get_bp_moment_scale_0(long nativeObj); + + // C++: void CvANN_MLP_TrainParams::bp_moment_scale + private static native void set_bp_moment_scale_0(long nativeObj, double bp_moment_scale); + + // C++: double CvANN_MLP_TrainParams::rp_dw0 + private static native double get_rp_dw0_0(long nativeObj); + + // C++: void CvANN_MLP_TrainParams::rp_dw0 + private static native void set_rp_dw0_0(long nativeObj, double rp_dw0); + + // C++: double CvANN_MLP_TrainParams::rp_dw_plus + private static native double get_rp_dw_plus_0(long nativeObj); + + // C++: void CvANN_MLP_TrainParams::rp_dw_plus + private static native void set_rp_dw_plus_0(long nativeObj, double rp_dw_plus); + + // C++: double CvANN_MLP_TrainParams::rp_dw_minus + private static native double get_rp_dw_minus_0(long nativeObj); + + // C++: void CvANN_MLP_TrainParams::rp_dw_minus + private static native void set_rp_dw_minus_0(long nativeObj, double rp_dw_minus); + + // C++: double CvANN_MLP_TrainParams::rp_dw_min + private static native double get_rp_dw_min_0(long nativeObj); + + // C++: void CvANN_MLP_TrainParams::rp_dw_min + private static native void set_rp_dw_min_0(long nativeObj, double rp_dw_min); + + // C++: double CvANN_MLP_TrainParams::rp_dw_max + private static native double get_rp_dw_max_0(long nativeObj); + + // C++: void CvANN_MLP_TrainParams::rp_dw_max + private static native void set_rp_dw_max_0(long nativeObj, double rp_dw_max); + + // native support for java finalize() + private static native void delete(long nativeObj); + +} diff --git a/src/org/opencv/ml/CvBoost.java b/src/org/opencv/ml/CvBoost.java new file mode 100644 index 0000000..96942fa --- /dev/null +++ b/src/org/opencv/ml/CvBoost.java @@ -0,0 +1,278 @@ + +// +// This file is auto-generated. Please don't modify it! +// +package org.opencv.ml; + +import org.opencv.core.Mat; +import org.opencv.core.Range; + +// C++: class CvBoost +/** + *

Boosted tree classifier derived from "CvStatModel".

+ * + * @see org.opencv.ml.CvBoost : public CvStatModel + */ +public class CvBoost extends CvStatModel { + + protected CvBoost(long addr) { super(addr); } + + + public static final int + DISCRETE = 0, + REAL = 1, + LOGIT = 2, + GENTLE = 3, + DEFAULT = 0, + GINI = 1, + MISCLASS = 3, + SQERR = 4; + + + // + // C++: CvBoost::CvBoost() + // + +/** + *

Default and training constructors.

+ * + *

The constructors follow conventions of "CvStatModel.CvStatModel". See + * "CvStatModel.train" for parameters descriptions.

+ * + * @see org.opencv.ml.CvBoost.CvBoost + */ + public CvBoost() + { + + super( CvBoost_0() ); + + return; + } + + + // + // C++: CvBoost::CvBoost(Mat trainData, int tflag, Mat responses, Mat varIdx = cv::Mat(), Mat sampleIdx = cv::Mat(), Mat varType = cv::Mat(), Mat missingDataMask = cv::Mat(), CvBoostParams params = CvBoostParams()) + // + +/** + *

Default and training constructors.

+ * + *

The constructors follow conventions of "CvStatModel.CvStatModel". See + * "CvStatModel.train" for parameters descriptions.

+ * + * @param trainData a trainData + * @param tflag a tflag + * @param responses a responses + * @param varIdx a varIdx + * @param sampleIdx a sampleIdx + * @param varType a varType + * @param missingDataMask a missingDataMask + * @param params a params + * + * @see org.opencv.ml.CvBoost.CvBoost + */ + public CvBoost(Mat trainData, int tflag, Mat responses, Mat varIdx, Mat sampleIdx, Mat varType, Mat missingDataMask, CvBoostParams params) + { + + super( CvBoost_1(trainData.nativeObj, tflag, responses.nativeObj, varIdx.nativeObj, sampleIdx.nativeObj, varType.nativeObj, missingDataMask.nativeObj, params.nativeObj) ); + + return; + } + +/** + *

Default and training constructors.

+ * + *

The constructors follow conventions of "CvStatModel.CvStatModel". See + * "CvStatModel.train" for parameters descriptions.

+ * + * @param trainData a trainData + * @param tflag a tflag + * @param responses a responses + * + * @see org.opencv.ml.CvBoost.CvBoost + */ + public CvBoost(Mat trainData, int tflag, Mat responses) + { + + super( CvBoost_2(trainData.nativeObj, tflag, responses.nativeObj) ); + + return; + } + + + // + // C++: void CvBoost::clear() + // + + public void clear() + { + + clear_0(nativeObj); + + return; + } + + + // + // C++: float CvBoost::predict(Mat sample, Mat missing = cv::Mat(), Range slice = cv::Range::all(), bool rawMode = false, bool returnSum = false) + // + +/** + *

Predicts a response for an input sample.

+ * + *

The method runs the sample through the trees in the ensemble and returns the + * output class label based on the weighted voting.

+ * + * @param sample Input sample. + * @param missing Optional mask of missing measurements. To handle missing + * measurements, the weak classifiers must include surrogate splits (see + * CvDTreeParams.use_surrogates). + * @param slice Continuous subset of the sequence of weak classifiers to be used + * for prediction. By default, all the weak classifiers are used. + * @param rawMode Normally, it should be set to false. + * @param returnSum If true then return sum of votes instead of the + * class label. + * + * @see org.opencv.ml.CvBoost.predict + */ + public float predict(Mat sample, Mat missing, Range slice, boolean rawMode, boolean returnSum) + { + + float retVal = predict_0(nativeObj, sample.nativeObj, missing.nativeObj, slice.start, slice.end, rawMode, returnSum); + + return retVal; + } + +/** + *

Predicts a response for an input sample.

+ * + *

The method runs the sample through the trees in the ensemble and returns the + * output class label based on the weighted voting.

+ * + * @param sample Input sample. + * + * @see org.opencv.ml.CvBoost.predict + */ + public float predict(Mat sample) + { + + float retVal = predict_1(nativeObj, sample.nativeObj); + + return retVal; + } + + + // + // C++: void CvBoost::prune(CvSlice slice) + // + +/** + *

Removes the specified weak classifiers.

+ * + *

The method removes the specified weak classifiers from the sequence.

+ * + *

Note: Do not confuse this method with the pruning of individual decision + * trees, which is currently not supported.

+ * + * @param slice Continuous subset of the sequence of weak classifiers to be + * removed. + * + * @see org.opencv.ml.CvBoost.prune + */ + public void prune(Range slice) + { + + prune_0(nativeObj, slice.start, slice.end); + + return; + } + + + // + // C++: bool CvBoost::train(Mat trainData, int tflag, Mat responses, Mat varIdx = cv::Mat(), Mat sampleIdx = cv::Mat(), Mat varType = cv::Mat(), Mat missingDataMask = cv::Mat(), CvBoostParams params = CvBoostParams(), bool update = false) + // + +/** + *

Trains a boosted tree classifier.

+ * + *

The train method follows the common template of "CvStatModel.train". The + * responses must be categorical, which means that boosted trees cannot be built + * for regression, and there should be two classes.

+ * + * @param trainData a trainData + * @param tflag a tflag + * @param responses a responses + * @param varIdx a varIdx + * @param sampleIdx a sampleIdx + * @param varType a varType + * @param missingDataMask a missingDataMask + * @param params a params + * @param update Specifies whether the classifier needs to be updated + * (true, the new weak tree classifiers added to the existing + * ensemble) or the classifier needs to be rebuilt from scratch + * (false). + * + * @see org.opencv.ml.CvBoost.train + */ + public boolean train(Mat trainData, int tflag, Mat responses, Mat varIdx, Mat sampleIdx, Mat varType, Mat missingDataMask, CvBoostParams params, boolean update) + { + + boolean retVal = train_0(nativeObj, trainData.nativeObj, tflag, responses.nativeObj, varIdx.nativeObj, sampleIdx.nativeObj, varType.nativeObj, missingDataMask.nativeObj, params.nativeObj, update); + + return retVal; + } + +/** + *

Trains a boosted tree classifier.

+ * + *

The train method follows the common template of "CvStatModel.train". The + * responses must be categorical, which means that boosted trees cannot be built + * for regression, and there should be two classes.

+ * + * @param trainData a trainData + * @param tflag a tflag + * @param responses a responses + * + * @see org.opencv.ml.CvBoost.train + */ + public boolean train(Mat trainData, int tflag, Mat responses) + { + + boolean retVal = train_1(nativeObj, trainData.nativeObj, tflag, responses.nativeObj); + + return retVal; + } + + + @Override + protected void finalize() throws Throwable { + delete(nativeObj); + } + + + + // C++: CvBoost::CvBoost() + private static native long CvBoost_0(); + + // C++: CvBoost::CvBoost(Mat trainData, int tflag, Mat responses, Mat varIdx = cv::Mat(), Mat sampleIdx = cv::Mat(), Mat varType = cv::Mat(), Mat missingDataMask = cv::Mat(), CvBoostParams params = CvBoostParams()) + private static native long CvBoost_1(long trainData_nativeObj, int tflag, long responses_nativeObj, long varIdx_nativeObj, long sampleIdx_nativeObj, long varType_nativeObj, long missingDataMask_nativeObj, long params_nativeObj); + private static native long CvBoost_2(long trainData_nativeObj, int tflag, long responses_nativeObj); + + // C++: void CvBoost::clear() + private static native void clear_0(long nativeObj); + + // C++: float CvBoost::predict(Mat sample, Mat missing = cv::Mat(), Range slice = cv::Range::all(), bool rawMode = false, bool returnSum = false) + private static native float predict_0(long nativeObj, long sample_nativeObj, long missing_nativeObj, int slice_start, int slice_end, boolean rawMode, boolean returnSum); + private static native float predict_1(long nativeObj, long sample_nativeObj); + + // C++: void CvBoost::prune(CvSlice slice) + private static native void prune_0(long nativeObj, int slice_start, int slice_end); + + // C++: bool CvBoost::train(Mat trainData, int tflag, Mat responses, Mat varIdx = cv::Mat(), Mat sampleIdx = cv::Mat(), Mat varType = cv::Mat(), Mat missingDataMask = cv::Mat(), CvBoostParams params = CvBoostParams(), bool update = false) + private static native boolean train_0(long nativeObj, long trainData_nativeObj, int tflag, long responses_nativeObj, long varIdx_nativeObj, long sampleIdx_nativeObj, long varType_nativeObj, long missingDataMask_nativeObj, long params_nativeObj, boolean update); + private static native boolean train_1(long nativeObj, long trainData_nativeObj, int tflag, long responses_nativeObj); + + // native support for java finalize() + private static native void delete(long nativeObj); + +} diff --git a/src/org/opencv/ml/CvBoostParams.java b/src/org/opencv/ml/CvBoostParams.java new file mode 100644 index 0000000..bbbb6ca --- /dev/null +++ b/src/org/opencv/ml/CvBoostParams.java @@ -0,0 +1,230 @@ + +// +// This file is auto-generated. Please don't modify it! +// +package org.opencv.ml; + + + +// C++: class CvBoostParams +/** + *

Boosting training parameters.

+ * + *

There is one structure member that you can set directly:

+ * + *

Splitting criteria used to choose optimal splits during a weak tree + * construction. Possible values are:

+ * + *
    + *
  • CvBoost.DEFAULT Use the default for the particular boosting method, + * see below. + *
+ *

+ * + *

// C++ code:

+ *
    + *
  • CvBoost.GINI Use Gini index. This is default option for Real + * AdaBoost; may be also used for Discrete AdaBoost. + *
  • CvBoost.MISCLASS Use misclassification rate. This is default option + * for Discrete AdaBoost; may be also used for Real AdaBoost. + *
  • CvBoost.SQERR Use least squares criteria. This is default and the + * only option for LogitBoost and Gentle AdaBoost. + *
+ * + *

The structure is derived from "CvDTreeParams" but not all of the decision + * tree parameters are supported. In particular, cross-validation is not + * supported. + *

+ * + *

All parameters are public. You can initialize them by a constructor and then + * override some of them directly if you want.

+ * + * @see org.opencv.ml.CvBoostParams : public CvDTreeParams + */ +public class CvBoostParams extends CvDTreeParams { + + protected CvBoostParams(long addr) { super(addr); } + + + // + // C++: CvBoostParams::CvBoostParams() + // + +/** + *

The constructors.

+ * + *

See "CvDTreeParams.CvDTreeParams" for description of other parameters.

+ * + *

Default parameters are:

+ * + *

+ * + *

// C++ code:

+ * + *

CvBoostParams.CvBoostParams()

+ * + * + *

boost_type = CvBoost.REAL;

+ * + *

weak_count = 100;

+ * + *

weight_trim_rate = 0.95;

+ * + *

cv_folds = 0;

+ * + *

max_depth = 1;

+ * + * + * @see org.opencv.ml.CvBoostParams.CvBoostParams + */ + public CvBoostParams() + { + + super( CvBoostParams_0() ); + + return; + } + + + // + // C++: int CvBoostParams::boost_type + // + + public int get_boost_type() + { + + int retVal = get_boost_type_0(nativeObj); + + return retVal; + } + + + // + // C++: void CvBoostParams::boost_type + // + + public void set_boost_type(int boost_type) + { + + set_boost_type_0(nativeObj, boost_type); + + return; + } + + + // + // C++: int CvBoostParams::weak_count + // + + public int get_weak_count() + { + + int retVal = get_weak_count_0(nativeObj); + + return retVal; + } + + + // + // C++: void CvBoostParams::weak_count + // + + public void set_weak_count(int weak_count) + { + + set_weak_count_0(nativeObj, weak_count); + + return; + } + + + // + // C++: int CvBoostParams::split_criteria + // + + public int get_split_criteria() + { + + int retVal = get_split_criteria_0(nativeObj); + + return retVal; + } + + + // + // C++: void CvBoostParams::split_criteria + // + + public void set_split_criteria(int split_criteria) + { + + set_split_criteria_0(nativeObj, split_criteria); + + return; + } + + + // + // C++: double CvBoostParams::weight_trim_rate + // + + public double get_weight_trim_rate() + { + + double retVal = get_weight_trim_rate_0(nativeObj); + + return retVal; + } + + + // + // C++: void CvBoostParams::weight_trim_rate + // + + public void set_weight_trim_rate(double weight_trim_rate) + { + + set_weight_trim_rate_0(nativeObj, weight_trim_rate); + + return; + } + + + @Override + protected void finalize() throws Throwable { + delete(nativeObj); + } + + + + // C++: CvBoostParams::CvBoostParams() + private static native long CvBoostParams_0(); + + // C++: int CvBoostParams::boost_type + private static native int get_boost_type_0(long nativeObj); + + // C++: void CvBoostParams::boost_type + private static native void set_boost_type_0(long nativeObj, int boost_type); + + // C++: int CvBoostParams::weak_count + private static native int get_weak_count_0(long nativeObj); + + // C++: void CvBoostParams::weak_count + private static native void set_weak_count_0(long nativeObj, int weak_count); + + // C++: int CvBoostParams::split_criteria + private static native int get_split_criteria_0(long nativeObj); + + // C++: void CvBoostParams::split_criteria + private static native void set_split_criteria_0(long nativeObj, int split_criteria); + + // C++: double CvBoostParams::weight_trim_rate + private static native double get_weight_trim_rate_0(long nativeObj); + + // C++: void CvBoostParams::weight_trim_rate + private static native void set_weight_trim_rate_0(long nativeObj, double weight_trim_rate); + + // native support for java finalize() + private static native void delete(long nativeObj); + +} diff --git a/src/org/opencv/ml/CvDTree.java b/src/org/opencv/ml/CvDTree.java new file mode 100644 index 0000000..0b503f5 --- /dev/null +++ b/src/org/opencv/ml/CvDTree.java @@ -0,0 +1,183 @@ + +// +// This file is auto-generated. Please don't modify it! +// +package org.opencv.ml; + +import org.opencv.core.Mat; + +// C++: class CvDTree +/** + *

The class implements a decision tree as described in the beginning of this + * section.

+ * + * @see org.opencv.ml.CvDTree : public CvStatModel + */ +public class CvDTree extends CvStatModel { + + protected CvDTree(long addr) { super(addr); } + + + // + // C++: CvDTree::CvDTree() + // + + public CvDTree() + { + + super( CvDTree_0() ); + + return; + } + + + // + // C++: void CvDTree::clear() + // + + public void clear() + { + + clear_0(nativeObj); + + return; + } + + + // + // C++: Mat CvDTree::getVarImportance() + // + +/** + *

Returns the variable importance array.

+ * + * @see org.opencv.ml.CvDTree.getVarImportance + */ + public Mat getVarImportance() + { + + Mat retVal = new Mat(getVarImportance_0(nativeObj)); + + return retVal; + } + + + // + // C++: CvDTreeNode* CvDTree::predict(Mat sample, Mat missingDataMask = cv::Mat(), bool preprocessedInput = false) + // + + // Return type 'CvDTreeNode*' is not supported, skipping the function + + + // + // C++: bool CvDTree::train(Mat trainData, int tflag, Mat responses, Mat varIdx = cv::Mat(), Mat sampleIdx = cv::Mat(), Mat varType = cv::Mat(), Mat missingDataMask = cv::Mat(), CvDTreeParams params = CvDTreeParams()) + // + +/** + *

Trains a decision tree.

+ * + *

There are four train methods in "CvDTree":

+ *
    + *
  • The first two methods follow the generic "CvStatModel.train" + * conventions. It is the most complete form. Both data layouts + * (tflag=CV_ROW_SAMPLE and tflag=CV_COL_SAMPLE) are + * supported, as well as sample and variable subsets, missing measurements, + * arbitrary combinations of input and output variable types, and so on. The + * last parameter contains all of the necessary training parameters (see the + * "CvDTreeParams" description). + *
  • The third method uses "CvMLData" to pass training data to a decision + * tree. + *
  • The last method train is mostly used for building tree + * ensembles. It takes the pre-constructed "CvDTreeTrainData" instance and an + * optional subset of the training set. The indices in subsampleIdx + * are counted relatively to the _sample_idx, passed to the + * CvDTreeTrainData constructor. For example, if _sample_idx=[1, + * 5, 7, 100], then subsampleIdx=[0,3] means that the + * samples [1, 100] of the original training set are used. + *
+ * + *

The function is parallelized with the TBB library.

+ * + * @param trainData a trainData + * @param tflag a tflag + * @param responses a responses + * @param varIdx a varIdx + * @param sampleIdx a sampleIdx + * @param varType a varType + * @param missingDataMask a missingDataMask + * @param params a params + * + * @see org.opencv.ml.CvDTree.train + */ + public boolean train(Mat trainData, int tflag, Mat responses, Mat varIdx, Mat sampleIdx, Mat varType, Mat missingDataMask, CvDTreeParams params) + { + + boolean retVal = train_0(nativeObj, trainData.nativeObj, tflag, responses.nativeObj, varIdx.nativeObj, sampleIdx.nativeObj, varType.nativeObj, missingDataMask.nativeObj, params.nativeObj); + + return retVal; + } + +/** + *

Trains a decision tree.

+ * + *

There are four train methods in "CvDTree":

+ *
    + *
  • The first two methods follow the generic "CvStatModel.train" + * conventions. It is the most complete form. Both data layouts + * (tflag=CV_ROW_SAMPLE and tflag=CV_COL_SAMPLE) are + * supported, as well as sample and variable subsets, missing measurements, + * arbitrary combinations of input and output variable types, and so on. The + * last parameter contains all of the necessary training parameters (see the + * "CvDTreeParams" description). + *
  • The third method uses "CvMLData" to pass training data to a decision + * tree. + *
  • The last method train is mostly used for building tree + * ensembles. It takes the pre-constructed "CvDTreeTrainData" instance and an + * optional subset of the training set. The indices in subsampleIdx + * are counted relatively to the _sample_idx, passed to the + * CvDTreeTrainData constructor. For example, if _sample_idx=[1, + * 5, 7, 100], then subsampleIdx=[0,3] means that the + * samples [1, 100] of the original training set are used. + *
+ * + *

The function is parallelized with the TBB library.

+ * + * @param trainData a trainData + * @param tflag a tflag + * @param responses a responses + * + * @see org.opencv.ml.CvDTree.train + */ + public boolean train(Mat trainData, int tflag, Mat responses) + { + + boolean retVal = train_1(nativeObj, trainData.nativeObj, tflag, responses.nativeObj); + + return retVal; + } + + + @Override + protected void finalize() throws Throwable { + delete(nativeObj); + } + + + + // C++: CvDTree::CvDTree() + private static native long CvDTree_0(); + + // C++: void CvDTree::clear() + private static native void clear_0(long nativeObj); + + // C++: Mat CvDTree::getVarImportance() + private static native long getVarImportance_0(long nativeObj); + + // C++: bool CvDTree::train(Mat trainData, int tflag, Mat responses, Mat varIdx = cv::Mat(), Mat sampleIdx = cv::Mat(), Mat varType = cv::Mat(), Mat missingDataMask = cv::Mat(), CvDTreeParams params = CvDTreeParams()) + private static native boolean train_0(long nativeObj, long trainData_nativeObj, int tflag, long responses_nativeObj, long varIdx_nativeObj, long sampleIdx_nativeObj, long varType_nativeObj, long missingDataMask_nativeObj, long params_nativeObj); + private static native boolean train_1(long nativeObj, long trainData_nativeObj, int tflag, long responses_nativeObj); + + // native support for java finalize() + private static native void delete(long nativeObj); + +} diff --git a/src/org/opencv/ml/CvDTreeParams.java b/src/org/opencv/ml/CvDTreeParams.java new file mode 100644 index 0000000..4711572 --- /dev/null +++ b/src/org/opencv/ml/CvDTreeParams.java @@ -0,0 +1,326 @@ + +// +// This file is auto-generated. Please don't modify it! +// +package org.opencv.ml; + + + +// C++: class CvDTreeParams +/** + *

The structure contains all the decision tree training parameters. You can + * initialize it by default constructor and then override any parameters + * directly before training, or the structure may be fully initialized using the + * advanced variant of the constructor.

+ * + * @see org.opencv.ml.CvDTreeParams + */ +public class CvDTreeParams { + + protected final long nativeObj; + protected CvDTreeParams(long addr) { nativeObj = addr; } + + + // + // C++: CvDTreeParams::CvDTreeParams() + // + +/** + *

The constructors.

+ * + *

The default constructor initializes all the parameters with the default + * values tuned for the standalone classification tree:

+ * + *

+ * + *

// C++ code:

+ * + *

CvDTreeParams() : max_categories(10), max_depth(INT_MAX), min_sample_count(10),

+ * + *

cv_folds(10), use_surrogates(true), use_1se_rule(true),

+ * + *

truncate_pruned_tree(true), regression_accuracy(0.01f), priors(0)

+ * + *

{}

+ * + * @see org.opencv.ml.CvDTreeParams.CvDTreeParams + */ + public CvDTreeParams() + { + + nativeObj = CvDTreeParams_0(); + + return; + } + + + // + // C++: int CvDTreeParams::max_categories + // + + public int get_max_categories() + { + + int retVal = get_max_categories_0(nativeObj); + + return retVal; + } + + + // + // C++: void CvDTreeParams::max_categories + // + + public void set_max_categories(int max_categories) + { + + set_max_categories_0(nativeObj, max_categories); + + return; + } + + + // + // C++: int CvDTreeParams::max_depth + // + + public int get_max_depth() + { + + int retVal = get_max_depth_0(nativeObj); + + return retVal; + } + + + // + // C++: void CvDTreeParams::max_depth + // + + public void set_max_depth(int max_depth) + { + + set_max_depth_0(nativeObj, max_depth); + + return; + } + + + // + // C++: int CvDTreeParams::min_sample_count + // + + public int get_min_sample_count() + { + + int retVal = get_min_sample_count_0(nativeObj); + + return retVal; + } + + + // + // C++: void CvDTreeParams::min_sample_count + // + + public void set_min_sample_count(int min_sample_count) + { + + set_min_sample_count_0(nativeObj, min_sample_count); + + return; + } + + + // + // C++: int CvDTreeParams::cv_folds + // + + public int get_cv_folds() + { + + int retVal = get_cv_folds_0(nativeObj); + + return retVal; + } + + + // + // C++: void CvDTreeParams::cv_folds + // + + public void set_cv_folds(int cv_folds) + { + + set_cv_folds_0(nativeObj, cv_folds); + + return; + } + + + // + // C++: bool CvDTreeParams::use_surrogates + // + + public boolean get_use_surrogates() + { + + boolean retVal = get_use_surrogates_0(nativeObj); + + return retVal; + } + + + // + // C++: void CvDTreeParams::use_surrogates + // + + public void set_use_surrogates(boolean use_surrogates) + { + + set_use_surrogates_0(nativeObj, use_surrogates); + + return; + } + + + // + // C++: bool CvDTreeParams::use_1se_rule + // + + public boolean get_use_1se_rule() + { + + boolean retVal = get_use_1se_rule_0(nativeObj); + + return retVal; + } + + + // + // C++: void CvDTreeParams::use_1se_rule + // + + public void set_use_1se_rule(boolean use_1se_rule) + { + + set_use_1se_rule_0(nativeObj, use_1se_rule); + + return; + } + + + // + // C++: bool CvDTreeParams::truncate_pruned_tree + // + + public boolean get_truncate_pruned_tree() + { + + boolean retVal = get_truncate_pruned_tree_0(nativeObj); + + return retVal; + } + + + // + // C++: void CvDTreeParams::truncate_pruned_tree + // + + public void set_truncate_pruned_tree(boolean truncate_pruned_tree) + { + + set_truncate_pruned_tree_0(nativeObj, truncate_pruned_tree); + + return; + } + + + // + // C++: float CvDTreeParams::regression_accuracy + // + + public float get_regression_accuracy() + { + + float retVal = get_regression_accuracy_0(nativeObj); + + return retVal; + } + + + // + // C++: void CvDTreeParams::regression_accuracy + // + + public void set_regression_accuracy(float regression_accuracy) + { + + set_regression_accuracy_0(nativeObj, regression_accuracy); + + return; + } + + + @Override + protected void finalize() throws Throwable { + delete(nativeObj); + } + + + + // C++: CvDTreeParams::CvDTreeParams() + private static native long CvDTreeParams_0(); + + // C++: int CvDTreeParams::max_categories + private static native int get_max_categories_0(long nativeObj); + + // C++: void CvDTreeParams::max_categories + private static native void set_max_categories_0(long nativeObj, int max_categories); + + // C++: int CvDTreeParams::max_depth + private static native int get_max_depth_0(long nativeObj); + + // C++: void CvDTreeParams::max_depth + private static native void set_max_depth_0(long nativeObj, int max_depth); + + // C++: int CvDTreeParams::min_sample_count + private static native int get_min_sample_count_0(long nativeObj); + + // C++: void CvDTreeParams::min_sample_count + private static native void set_min_sample_count_0(long nativeObj, int min_sample_count); + + // C++: int CvDTreeParams::cv_folds + private static native int get_cv_folds_0(long nativeObj); + + // C++: void CvDTreeParams::cv_folds + private static native void set_cv_folds_0(long nativeObj, int cv_folds); + + // C++: bool CvDTreeParams::use_surrogates + private static native boolean get_use_surrogates_0(long nativeObj); + + // C++: void CvDTreeParams::use_surrogates + private static native void set_use_surrogates_0(long nativeObj, boolean use_surrogates); + + // C++: bool CvDTreeParams::use_1se_rule + private static native boolean get_use_1se_rule_0(long nativeObj); + + // C++: void CvDTreeParams::use_1se_rule + private static native void set_use_1se_rule_0(long nativeObj, boolean use_1se_rule); + + // C++: bool CvDTreeParams::truncate_pruned_tree + private static native boolean get_truncate_pruned_tree_0(long nativeObj); + + // C++: void CvDTreeParams::truncate_pruned_tree + private static native void set_truncate_pruned_tree_0(long nativeObj, boolean truncate_pruned_tree); + + // C++: float CvDTreeParams::regression_accuracy + private static native float get_regression_accuracy_0(long nativeObj); + + // C++: void CvDTreeParams::regression_accuracy + private static native void set_regression_accuracy_0(long nativeObj, float regression_accuracy); + + // native support for java finalize() + private static native void delete(long nativeObj); + +} diff --git a/src/org/opencv/ml/CvERTrees.java b/src/org/opencv/ml/CvERTrees.java new file mode 100644 index 0000000..441ed9a --- /dev/null +++ b/src/org/opencv/ml/CvERTrees.java @@ -0,0 +1,75 @@ + +// +// This file is auto-generated. Please don't modify it! +// +package org.opencv.ml; + +import org.opencv.core.Mat; + +// C++: class CvERTrees +/** + *

The class implements the Extremely randomized trees algorithm. + * CvERTrees is inherited from "CvRTrees" and has the same + * interface, so see description of "CvRTrees" class to get details. To set the + * training parameters of Extremely randomized trees the same class "CvRTParams" + * is used.

+ * + * @see org.opencv.ml.CvERTrees : public CvRTrees + */ +public class CvERTrees extends CvRTrees { + + protected CvERTrees(long addr) { super(addr); } + + + // + // C++: CvERTrees::CvERTrees() + // + + public CvERTrees() + { + + super( CvERTrees_0() ); + + return; + } + + + // + // C++: bool CvERTrees::train(Mat trainData, int tflag, Mat responses, Mat varIdx = cv::Mat(), Mat sampleIdx = cv::Mat(), Mat varType = cv::Mat(), Mat missingDataMask = cv::Mat(), CvRTParams params = CvRTParams()) + // + + public boolean train(Mat trainData, int tflag, Mat responses, Mat varIdx, Mat sampleIdx, Mat varType, Mat missingDataMask, CvRTParams params) + { + + boolean retVal = train_0(nativeObj, trainData.nativeObj, tflag, responses.nativeObj, varIdx.nativeObj, sampleIdx.nativeObj, varType.nativeObj, missingDataMask.nativeObj, params.nativeObj); + + return retVal; + } + + public boolean train(Mat trainData, int tflag, Mat responses) + { + + boolean retVal = train_1(nativeObj, trainData.nativeObj, tflag, responses.nativeObj); + + return retVal; + } + + + @Override + protected void finalize() throws Throwable { + delete(nativeObj); + } + + + + // C++: CvERTrees::CvERTrees() + private static native long CvERTrees_0(); + + // C++: bool CvERTrees::train(Mat trainData, int tflag, Mat responses, Mat varIdx = cv::Mat(), Mat sampleIdx = cv::Mat(), Mat varType = cv::Mat(), Mat missingDataMask = cv::Mat(), CvRTParams params = CvRTParams()) + private static native boolean train_0(long nativeObj, long trainData_nativeObj, int tflag, long responses_nativeObj, long varIdx_nativeObj, long sampleIdx_nativeObj, long varType_nativeObj, long missingDataMask_nativeObj, long params_nativeObj); + private static native boolean train_1(long nativeObj, long trainData_nativeObj, int tflag, long responses_nativeObj); + + // native support for java finalize() + private static native void delete(long nativeObj); + +} diff --git a/src/org/opencv/ml/CvGBTrees.java b/src/org/opencv/ml/CvGBTrees.java new file mode 100644 index 0000000..566d887 --- /dev/null +++ b/src/org/opencv/ml/CvGBTrees.java @@ -0,0 +1,296 @@ + +// +// This file is auto-generated. Please don't modify it! +// +package org.opencv.ml; + +import org.opencv.core.Mat; +import org.opencv.core.Range; + +// C++: class CvGBTrees +/** + *

The class implements the Gradient boosted tree model as described in the + * beginning of this section.

+ * + * @see org.opencv.ml.CvGBTrees : public CvStatModel + */ +public class CvGBTrees extends CvStatModel { + + protected CvGBTrees(long addr) { super(addr); } + + + public static final int + SQUARED_LOSS = 0, + ABSOLUTE_LOSS = 0+1, + HUBER_LOSS = 3, + DEVIANCE_LOSS = 3+1; + + + // + // C++: CvGBTrees::CvGBTrees() + // + +/** + *

Default and training constructors.

+ * + *

The constructors follow conventions of "CvStatModel.CvStatModel". See + * "CvStatModel.train" for parameters descriptions.

+ * + * @see org.opencv.ml.CvGBTrees.CvGBTrees + */ + public CvGBTrees() + { + + super( CvGBTrees_0() ); + + return; + } + + + // + // C++: CvGBTrees::CvGBTrees(Mat trainData, int tflag, Mat responses, Mat varIdx = cv::Mat(), Mat sampleIdx = cv::Mat(), Mat varType = cv::Mat(), Mat missingDataMask = cv::Mat(), CvGBTreesParams params = CvGBTreesParams()) + // + +/** + *

Default and training constructors.

+ * + *

The constructors follow conventions of "CvStatModel.CvStatModel". See + * "CvStatModel.train" for parameters descriptions.

+ * + * @param trainData a trainData + * @param tflag a tflag + * @param responses a responses + * @param varIdx a varIdx + * @param sampleIdx a sampleIdx + * @param varType a varType + * @param missingDataMask a missingDataMask + * @param params a params + * + * @see org.opencv.ml.CvGBTrees.CvGBTrees + */ + public CvGBTrees(Mat trainData, int tflag, Mat responses, Mat varIdx, Mat sampleIdx, Mat varType, Mat missingDataMask, CvGBTreesParams params) + { + + super( CvGBTrees_1(trainData.nativeObj, tflag, responses.nativeObj, varIdx.nativeObj, sampleIdx.nativeObj, varType.nativeObj, missingDataMask.nativeObj, params.nativeObj) ); + + return; + } + +/** + *

Default and training constructors.

+ * + *

The constructors follow conventions of "CvStatModel.CvStatModel". See + * "CvStatModel.train" for parameters descriptions.

+ * + * @param trainData a trainData + * @param tflag a tflag + * @param responses a responses + * + * @see org.opencv.ml.CvGBTrees.CvGBTrees + */ + public CvGBTrees(Mat trainData, int tflag, Mat responses) + { + + super( CvGBTrees_2(trainData.nativeObj, tflag, responses.nativeObj) ); + + return; + } + + + // + // C++: void CvGBTrees::clear() + // + +/** + *

Clears the model.

+ * + *

The function deletes the data set information and all the weak models and + * sets all internal variables to the initial state. The function is called in + * "CvGBTrees.train" and in the destructor.

+ * + * @see org.opencv.ml.CvGBTrees.clear + */ + public void clear() + { + + clear_0(nativeObj); + + return; + } + + + // + // C++: float CvGBTrees::predict(Mat sample, Mat missing = cv::Mat(), Range slice = cv::Range::all(), int k = -1) + // + +/** + *

Predicts a response for an input sample.

+ * + *

The method predicts the response corresponding to the given sample (see + * "Predicting with GBT"). + * The result is either the class label or the estimated function value. The + * "CvGBTrees.predict" method enables using the parallel version of the GBT + * model prediction if the OpenCV is built with the TBB library. In this case, + * predictions of single trees are computed in a parallel fashion.

+ * + * @param sample Input feature vector that has the same format as every training + * set element. If not all the variables were actually used during training, + * sample contains forged values at the appropriate places. + * @param missing Missing values mask, which is a dimensional matrix of the same + * size as sample having the CV_8U type. + * 1 corresponds to the missing value in the same position in the + * sample vector. If there are no missing values in the feature + * vector, an empty matrix can be passed instead of the missing mask. + * @param slice Parameter defining the part of the ensemble used for prediction. + *

If slice = Range.all(), all trees are used. Use this parameter + * to get predictions of the GBT models with different ensemble sizes learning + * only one model.

+ * @param k Number of tree ensembles built in case of the classification problem + * (see "Training GBT"). Use this parameter to change the output to sum of the + * trees' predictions in the k-th ensemble only. To get the total + * GBT model prediction, k value must be -1. For regression + * problems, k is also equal to -1. + * + * @see org.opencv.ml.CvGBTrees.predict + */ + public float predict(Mat sample, Mat missing, Range slice, int k) + { + + float retVal = predict_0(nativeObj, sample.nativeObj, missing.nativeObj, slice.start, slice.end, k); + + return retVal; + } + +/** + *

Predicts a response for an input sample.

+ * + *

The method predicts the response corresponding to the given sample (see + * "Predicting with GBT"). + * The result is either the class label or the estimated function value. The + * "CvGBTrees.predict" method enables using the parallel version of the GBT + * model prediction if the OpenCV is built with the TBB library. In this case, + * predictions of single trees are computed in a parallel fashion.

+ * + * @param sample Input feature vector that has the same format as every training + * set element. If not all the variables were actually used during training, + * sample contains forged values at the appropriate places. + * + * @see org.opencv.ml.CvGBTrees.predict + */ + public float predict(Mat sample) + { + + float retVal = predict_1(nativeObj, sample.nativeObj); + + return retVal; + } + + + // + // C++: bool CvGBTrees::train(Mat trainData, int tflag, Mat responses, Mat varIdx = cv::Mat(), Mat sampleIdx = cv::Mat(), Mat varType = cv::Mat(), Mat missingDataMask = cv::Mat(), CvGBTreesParams params = CvGBTreesParams(), bool update = false) + // + +/** + *

Trains a Gradient boosted tree model.

+ * + *

The first train method follows the common template (see "CvStatModel.train"). + * Both tflag values (CV_ROW_SAMPLE, CV_COL_SAMPLE) + * are supported. + * trainData must be of the CV_32F type. + * responses must be a matrix of type CV_32S or + * CV_32F. In both cases it is converted into the CV_32F + * matrix inside the training procedure. varIdx and + * sampleIdx must be a list of indices (CV_32S) or a + * mask (CV_8U or CV_8S). update is a + * dummy parameter.

+ * + *

The second form of "CvGBTrees.train" function uses "CvMLData" as a data set + * container. update is still a dummy parameter.

+ * + *

All parameters specific to the GBT model are passed into the training + * function as a "CvGBTreesParams" structure.

+ * + * @param trainData a trainData + * @param tflag a tflag + * @param responses a responses + * @param varIdx a varIdx + * @param sampleIdx a sampleIdx + * @param varType a varType + * @param missingDataMask a missingDataMask + * @param params a params + * @param update a update + * + * @see org.opencv.ml.CvGBTrees.train + */ + public boolean train(Mat trainData, int tflag, Mat responses, Mat varIdx, Mat sampleIdx, Mat varType, Mat missingDataMask, CvGBTreesParams params, boolean update) + { + + boolean retVal = train_0(nativeObj, trainData.nativeObj, tflag, responses.nativeObj, varIdx.nativeObj, sampleIdx.nativeObj, varType.nativeObj, missingDataMask.nativeObj, params.nativeObj, update); + + return retVal; + } + +/** + *

Trains a Gradient boosted tree model.

+ * + *

The first train method follows the common template (see "CvStatModel.train"). + * Both tflag values (CV_ROW_SAMPLE, CV_COL_SAMPLE) + * are supported. + * trainData must be of the CV_32F type. + * responses must be a matrix of type CV_32S or + * CV_32F. In both cases it is converted into the CV_32F + * matrix inside the training procedure. varIdx and + * sampleIdx must be a list of indices (CV_32S) or a + * mask (CV_8U or CV_8S). update is a + * dummy parameter.

+ * + *

The second form of "CvGBTrees.train" function uses "CvMLData" as a data set + * container. update is still a dummy parameter.

+ * + *

All parameters specific to the GBT model are passed into the training + * function as a "CvGBTreesParams" structure.

+ * + * @param trainData a trainData + * @param tflag a tflag + * @param responses a responses + * + * @see org.opencv.ml.CvGBTrees.train + */ + public boolean train(Mat trainData, int tflag, Mat responses) + { + + boolean retVal = train_1(nativeObj, trainData.nativeObj, tflag, responses.nativeObj); + + return retVal; + } + + + @Override + protected void finalize() throws Throwable { + delete(nativeObj); + } + + + + // C++: CvGBTrees::CvGBTrees() + private static native long CvGBTrees_0(); + + // C++: CvGBTrees::CvGBTrees(Mat trainData, int tflag, Mat responses, Mat varIdx = cv::Mat(), Mat sampleIdx = cv::Mat(), Mat varType = cv::Mat(), Mat missingDataMask = cv::Mat(), CvGBTreesParams params = CvGBTreesParams()) + private static native long CvGBTrees_1(long trainData_nativeObj, int tflag, long responses_nativeObj, long varIdx_nativeObj, long sampleIdx_nativeObj, long varType_nativeObj, long missingDataMask_nativeObj, long params_nativeObj); + private static native long CvGBTrees_2(long trainData_nativeObj, int tflag, long responses_nativeObj); + + // C++: void CvGBTrees::clear() + private static native void clear_0(long nativeObj); + + // C++: float CvGBTrees::predict(Mat sample, Mat missing = cv::Mat(), Range slice = cv::Range::all(), int k = -1) + private static native float predict_0(long nativeObj, long sample_nativeObj, long missing_nativeObj, int slice_start, int slice_end, int k); + private static native float predict_1(long nativeObj, long sample_nativeObj); + + // C++: bool CvGBTrees::train(Mat trainData, int tflag, Mat responses, Mat varIdx = cv::Mat(), Mat sampleIdx = cv::Mat(), Mat varType = cv::Mat(), Mat missingDataMask = cv::Mat(), CvGBTreesParams params = CvGBTreesParams(), bool update = false) + private static native boolean train_0(long nativeObj, long trainData_nativeObj, int tflag, long responses_nativeObj, long varIdx_nativeObj, long sampleIdx_nativeObj, long varType_nativeObj, long missingDataMask_nativeObj, long params_nativeObj, boolean update); + private static native boolean train_1(long nativeObj, long trainData_nativeObj, int tflag, long responses_nativeObj); + + // native support for java finalize() + private static native void delete(long nativeObj); + +} diff --git a/src/org/opencv/ml/CvGBTreesParams.java b/src/org/opencv/ml/CvGBTreesParams.java new file mode 100644 index 0000000..b73d745 --- /dev/null +++ b/src/org/opencv/ml/CvGBTreesParams.java @@ -0,0 +1,189 @@ + +// +// This file is auto-generated. Please don't modify it! +// +package org.opencv.ml; + + + +// C++: class CvGBTreesParams +/** + *

GBT training parameters.

+ * + *

The structure contains parameters for each single decision tree in the + * ensemble, as well as the whole model characteristics. The structure is + * derived from "CvDTreeParams" but not all of the decision tree parameters are + * supported: cross-validation, pruning, and class priorities are not used.

+ * + * @see org.opencv.ml.CvGBTreesParams : public CvDTreeParams + */ +public class CvGBTreesParams extends CvDTreeParams { + + protected CvGBTreesParams(long addr) { super(addr); } + + + // + // C++: CvGBTreesParams::CvGBTreesParams() + // + +/** + *

By default the following constructor is used: CvGBTreesParams(CvGBTrees.SQUARED_LOSS, + * 200, 0.8f, 0.01f, 3, false)

+ * + *

// C++ code:

+ * + *

: CvDTreeParams(3, 10, 0, false, 10, 0, false, false, 0)

+ * + * @see org.opencv.ml.CvGBTreesParams.CvGBTreesParams + */ + public CvGBTreesParams() + { + + super( CvGBTreesParams_0() ); + + return; + } + + + // + // C++: int CvGBTreesParams::weak_count + // + + public int get_weak_count() + { + + int retVal = get_weak_count_0(nativeObj); + + return retVal; + } + + + // + // C++: void CvGBTreesParams::weak_count + // + + public void set_weak_count(int weak_count) + { + + set_weak_count_0(nativeObj, weak_count); + + return; + } + + + // + // C++: int CvGBTreesParams::loss_function_type + // + + public int get_loss_function_type() + { + + int retVal = get_loss_function_type_0(nativeObj); + + return retVal; + } + + + // + // C++: void CvGBTreesParams::loss_function_type + // + + public void set_loss_function_type(int loss_function_type) + { + + set_loss_function_type_0(nativeObj, loss_function_type); + + return; + } + + + // + // C++: float CvGBTreesParams::subsample_portion + // + + public float get_subsample_portion() + { + + float retVal = get_subsample_portion_0(nativeObj); + + return retVal; + } + + + // + // C++: void CvGBTreesParams::subsample_portion + // + + public void set_subsample_portion(float subsample_portion) + { + + set_subsample_portion_0(nativeObj, subsample_portion); + + return; + } + + + // + // C++: float CvGBTreesParams::shrinkage + // + + public float get_shrinkage() + { + + float retVal = get_shrinkage_0(nativeObj); + + return retVal; + } + + + // + // C++: void CvGBTreesParams::shrinkage + // + + public void set_shrinkage(float shrinkage) + { + + set_shrinkage_0(nativeObj, shrinkage); + + return; + } + + + @Override + protected void finalize() throws Throwable { + delete(nativeObj); + } + + + + // C++: CvGBTreesParams::CvGBTreesParams() + private static native long CvGBTreesParams_0(); + + // C++: int CvGBTreesParams::weak_count + private static native int get_weak_count_0(long nativeObj); + + // C++: void CvGBTreesParams::weak_count + private static native void set_weak_count_0(long nativeObj, int weak_count); + + // C++: int CvGBTreesParams::loss_function_type + private static native int get_loss_function_type_0(long nativeObj); + + // C++: void CvGBTreesParams::loss_function_type + private static native void set_loss_function_type_0(long nativeObj, int loss_function_type); + + // C++: float CvGBTreesParams::subsample_portion + private static native float get_subsample_portion_0(long nativeObj); + + // C++: void CvGBTreesParams::subsample_portion + private static native void set_subsample_portion_0(long nativeObj, float subsample_portion); + + // C++: float CvGBTreesParams::shrinkage + private static native float get_shrinkage_0(long nativeObj); + + // C++: void CvGBTreesParams::shrinkage + private static native void set_shrinkage_0(long nativeObj, float shrinkage); + + // native support for java finalize() + private static native void delete(long nativeObj); + +} diff --git a/src/org/opencv/ml/CvKNearest.java b/src/org/opencv/ml/CvKNearest.java new file mode 100644 index 0000000..4af7cb6 --- /dev/null +++ b/src/org/opencv/ml/CvKNearest.java @@ -0,0 +1,224 @@ + +// +// This file is auto-generated. Please don't modify it! +// +package org.opencv.ml; + +import org.opencv.core.Mat; + +// C++: class CvKNearest +/** + *

The class implements K-Nearest Neighbors model as described in the beginning + * of this section.

+ * + * @see org.opencv.ml.CvKNearest : public CvStatModel + */ +public class CvKNearest extends CvStatModel { + + protected CvKNearest(long addr) { super(addr); } + + + // + // C++: CvKNearest::CvKNearest() + // + +/** + *

Default and training constructors.

+ * + *

See "CvKNearest.train" for additional parameters descriptions.

+ * + * @see org.opencv.ml.CvKNearest.CvKNearest + */ + public CvKNearest() + { + + super( CvKNearest_0() ); + + return; + } + + + // + // C++: CvKNearest::CvKNearest(Mat trainData, Mat responses, Mat sampleIdx = cv::Mat(), bool isRegression = false, int max_k = 32) + // + +/** + *

Default and training constructors.

+ * + *

See "CvKNearest.train" for additional parameters descriptions.

+ * + * @param trainData a trainData + * @param responses a responses + * @param sampleIdx a sampleIdx + * @param isRegression a isRegression + * @param max_k a max_k + * + * @see org.opencv.ml.CvKNearest.CvKNearest + */ + public CvKNearest(Mat trainData, Mat responses, Mat sampleIdx, boolean isRegression, int max_k) + { + + super( CvKNearest_1(trainData.nativeObj, responses.nativeObj, sampleIdx.nativeObj, isRegression, max_k) ); + + return; + } + +/** + *

Default and training constructors.

+ * + *

See "CvKNearest.train" for additional parameters descriptions.

+ * + * @param trainData a trainData + * @param responses a responses + * + * @see org.opencv.ml.CvKNearest.CvKNearest + */ + public CvKNearest(Mat trainData, Mat responses) + { + + super( CvKNearest_2(trainData.nativeObj, responses.nativeObj) ); + + return; + } + + + // + // C++: float CvKNearest::find_nearest(Mat samples, int k, Mat& results, Mat& neighborResponses, Mat& dists) + // + +/** + *

Finds the neighbors and predicts responses for input vectors.

+ * + *

For each input vector (a row of the matrix samples), the method + * finds the k nearest neighbors. In case of regression, the + * predicted result is a mean value of the particular vector's neighbor + * responses. In case of classification, the class is determined by voting.

+ * + *

For each input vector, the neighbors are sorted by their distances to the + * vector.

+ * + *

In case of C++ interface you can use output pointers to empty matrices and + * the function will allocate memory itself.

+ * + *

If only a single input vector is passed, all output matrices are optional and + * the predicted value is returned by the method.

+ * + *

The function is parallelized with the TBB library.

+ * + * @param samples Input samples stored by rows. It is a single-precision + * floating-point matrix of number_of_samples x number_of_features + * size. + * @param k Number of used nearest neighbors. It must satisfy constraint: k + * <= "CvKNearest.get_max_k". + * @param results Vector with results of prediction (regression or + * classification) for each input sample. It is a single-precision + * floating-point vector with number_of_samples elements. + * @param neighborResponses Optional output values for corresponding + * neighbors. It is a single-precision floating-point matrix of + * number_of_samples x k size. + * @param dists a dists + * + * @see org.opencv.ml.CvKNearest.find_nearest + */ + public float find_nearest(Mat samples, int k, Mat results, Mat neighborResponses, Mat dists) + { + + float retVal = find_nearest_0(nativeObj, samples.nativeObj, k, results.nativeObj, neighborResponses.nativeObj, dists.nativeObj); + + return retVal; + } + + + // + // C++: bool CvKNearest::train(Mat trainData, Mat responses, Mat sampleIdx = cv::Mat(), bool isRegression = false, int maxK = 32, bool updateBase = false) + // + +/** + *

Trains the model.

+ * + *

The method trains the K-Nearest model. It follows the conventions of the + * generic "CvStatModel.train" approach with the following limitations:

+ *
    + *
  • Only CV_ROW_SAMPLE data layout is supported. + *
  • Input variables are all ordered. + *
  • Output variables can be either categorical (is_regression=false) + * or ordered (is_regression=true). + *
  • Variable subsets (var_idx) and missing measurements are + * not supported. + *
+ * + * @param trainData a trainData + * @param responses a responses + * @param sampleIdx a sampleIdx + * @param isRegression Type of the problem: true for regression and + * false for classification. + * @param maxK Number of maximum neighbors that may be passed to the method + * "CvKNearest.find_nearest". + * @param updateBase Specifies whether the model is trained from scratch + * (update_base=false), or it is updated using the new training + * data (update_base=true). In the latter case, the parameter + * maxK must not be larger than the original value. + * + * @see org.opencv.ml.CvKNearest.train + */ + public boolean train(Mat trainData, Mat responses, Mat sampleIdx, boolean isRegression, int maxK, boolean updateBase) + { + + boolean retVal = train_0(nativeObj, trainData.nativeObj, responses.nativeObj, sampleIdx.nativeObj, isRegression, maxK, updateBase); + + return retVal; + } + +/** + *

Trains the model.

+ * + *

The method trains the K-Nearest model. It follows the conventions of the + * generic "CvStatModel.train" approach with the following limitations:

+ *
    + *
  • Only CV_ROW_SAMPLE data layout is supported. + *
  • Input variables are all ordered. + *
  • Output variables can be either categorical (is_regression=false) + * or ordered (is_regression=true). + *
  • Variable subsets (var_idx) and missing measurements are + * not supported. + *
+ * + * @param trainData a trainData + * @param responses a responses + * + * @see org.opencv.ml.CvKNearest.train + */ + public boolean train(Mat trainData, Mat responses) + { + + boolean retVal = train_1(nativeObj, trainData.nativeObj, responses.nativeObj); + + return retVal; + } + + + @Override + protected void finalize() throws Throwable { + delete(nativeObj); + } + + + + // C++: CvKNearest::CvKNearest() + private static native long CvKNearest_0(); + + // C++: CvKNearest::CvKNearest(Mat trainData, Mat responses, Mat sampleIdx = cv::Mat(), bool isRegression = false, int max_k = 32) + private static native long CvKNearest_1(long trainData_nativeObj, long responses_nativeObj, long sampleIdx_nativeObj, boolean isRegression, int max_k); + private static native long CvKNearest_2(long trainData_nativeObj, long responses_nativeObj); + + // C++: float CvKNearest::find_nearest(Mat samples, int k, Mat& results, Mat& neighborResponses, Mat& dists) + private static native float find_nearest_0(long nativeObj, long samples_nativeObj, int k, long results_nativeObj, long neighborResponses_nativeObj, long dists_nativeObj); + + // C++: bool CvKNearest::train(Mat trainData, Mat responses, Mat sampleIdx = cv::Mat(), bool isRegression = false, int maxK = 32, bool updateBase = false) + private static native boolean train_0(long nativeObj, long trainData_nativeObj, long responses_nativeObj, long sampleIdx_nativeObj, boolean isRegression, int maxK, boolean updateBase); + private static native boolean train_1(long nativeObj, long trainData_nativeObj, long responses_nativeObj); + + // native support for java finalize() + private static native void delete(long nativeObj); + +} diff --git a/src/org/opencv/ml/CvNormalBayesClassifier.java b/src/org/opencv/ml/CvNormalBayesClassifier.java new file mode 100644 index 0000000..0bbde7a --- /dev/null +++ b/src/org/opencv/ml/CvNormalBayesClassifier.java @@ -0,0 +1,243 @@ + +// +// This file is auto-generated. Please don't modify it! +// +package org.opencv.ml; + +import org.opencv.core.Mat; + +// C++: class CvNormalBayesClassifier +/** + *

Bayes classifier for normally distributed data.

+ * + * @see org.opencv.ml.CvNormalBayesClassifier : public CvStatModel + */ +public class CvNormalBayesClassifier extends CvStatModel { + + protected CvNormalBayesClassifier(long addr) { super(addr); } + + + // + // C++: CvNormalBayesClassifier::CvNormalBayesClassifier() + // + +/** + *

Default and training constructors.

+ * + *

The constructors follow conventions of "CvStatModel.CvStatModel". See + * "CvStatModel.train" for parameters descriptions.

+ * + * @see org.opencv.ml.CvNormalBayesClassifier.CvNormalBayesClassifier + */ + public CvNormalBayesClassifier() + { + + super( CvNormalBayesClassifier_0() ); + + return; + } + + + // + // C++: CvNormalBayesClassifier::CvNormalBayesClassifier(Mat trainData, Mat responses, Mat varIdx = cv::Mat(), Mat sampleIdx = cv::Mat()) + // + +/** + *

Default and training constructors.

+ * + *

The constructors follow conventions of "CvStatModel.CvStatModel". See + * "CvStatModel.train" for parameters descriptions.

+ * + * @param trainData a trainData + * @param responses a responses + * @param varIdx a varIdx + * @param sampleIdx a sampleIdx + * + * @see org.opencv.ml.CvNormalBayesClassifier.CvNormalBayesClassifier + */ + public CvNormalBayesClassifier(Mat trainData, Mat responses, Mat varIdx, Mat sampleIdx) + { + + super( CvNormalBayesClassifier_1(trainData.nativeObj, responses.nativeObj, varIdx.nativeObj, sampleIdx.nativeObj) ); + + return; + } + +/** + *

Default and training constructors.

+ * + *

The constructors follow conventions of "CvStatModel.CvStatModel". See + * "CvStatModel.train" for parameters descriptions.

+ * + * @param trainData a trainData + * @param responses a responses + * + * @see org.opencv.ml.CvNormalBayesClassifier.CvNormalBayesClassifier + */ + public CvNormalBayesClassifier(Mat trainData, Mat responses) + { + + super( CvNormalBayesClassifier_2(trainData.nativeObj, responses.nativeObj) ); + + return; + } + + + // + // C++: void CvNormalBayesClassifier::clear() + // + + public void clear() + { + + clear_0(nativeObj); + + return; + } + + + // + // C++: float CvNormalBayesClassifier::predict(Mat samples, Mat* results = 0) + // + +/** + *

Predicts the response for sample(s).

+ * + *

The method estimates the most probable classes for input vectors. Input + * vectors (one or more) are stored as rows of the matrix samples. + * In case of multiple input vectors, there should be one output vector + * results. The predicted class for a single input vector is + * returned by the method.

+ * + *

The function is parallelized with the TBB library.

+ * + * @param samples a samples + * @param results a results + * + * @see org.opencv.ml.CvNormalBayesClassifier.predict + */ + public float predict(Mat samples, Mat results) + { + + float retVal = predict_0(nativeObj, samples.nativeObj, results.nativeObj); + + return retVal; + } + +/** + *

Predicts the response for sample(s).

+ * + *

The method estimates the most probable classes for input vectors. Input + * vectors (one or more) are stored as rows of the matrix samples. + * In case of multiple input vectors, there should be one output vector + * results. The predicted class for a single input vector is + * returned by the method.

+ * + *

The function is parallelized with the TBB library.

+ * + * @param samples a samples + * + * @see org.opencv.ml.CvNormalBayesClassifier.predict + */ + public float predict(Mat samples) + { + + float retVal = predict_1(nativeObj, samples.nativeObj); + + return retVal; + } + + + // + // C++: bool CvNormalBayesClassifier::train(Mat trainData, Mat responses, Mat varIdx = cv::Mat(), Mat sampleIdx = cv::Mat(), bool update = false) + // + +/** + *

Trains the model.

+ * + *

The method trains the Normal Bayes classifier. It follows the conventions of + * the generic "CvStatModel.train" approach with the following limitations:

+ *
    + *
  • Only CV_ROW_SAMPLE data layout is supported. + *
  • Input variables are all ordered. + *
  • Output variable is categorical, which means that elements of + * responses must be integer numbers, though the vector may have + * the CV_32FC1 type. + *
  • Missing measurements are not supported. + *
+ * + * @param trainData a trainData + * @param responses a responses + * @param varIdx a varIdx + * @param sampleIdx a sampleIdx + * @param update Identifies whether the model should be trained from scratch + * (update=false) or should be updated using the new training data + * (update=true). + * + * @see org.opencv.ml.CvNormalBayesClassifier.train + */ + public boolean train(Mat trainData, Mat responses, Mat varIdx, Mat sampleIdx, boolean update) + { + + boolean retVal = train_0(nativeObj, trainData.nativeObj, responses.nativeObj, varIdx.nativeObj, sampleIdx.nativeObj, update); + + return retVal; + } + +/** + *

Trains the model.

+ * + *

The method trains the Normal Bayes classifier. It follows the conventions of + * the generic "CvStatModel.train" approach with the following limitations:

+ *
    + *
  • Only CV_ROW_SAMPLE data layout is supported. + *
  • Input variables are all ordered. + *
  • Output variable is categorical, which means that elements of + * responses must be integer numbers, though the vector may have + * the CV_32FC1 type. + *
  • Missing measurements are not supported. + *
+ * + * @param trainData a trainData + * @param responses a responses + * + * @see org.opencv.ml.CvNormalBayesClassifier.train + */ + public boolean train(Mat trainData, Mat responses) + { + + boolean retVal = train_1(nativeObj, trainData.nativeObj, responses.nativeObj); + + return retVal; + } + + + @Override + protected void finalize() throws Throwable { + delete(nativeObj); + } + + + + // C++: CvNormalBayesClassifier::CvNormalBayesClassifier() + private static native long CvNormalBayesClassifier_0(); + + // C++: CvNormalBayesClassifier::CvNormalBayesClassifier(Mat trainData, Mat responses, Mat varIdx = cv::Mat(), Mat sampleIdx = cv::Mat()) + private static native long CvNormalBayesClassifier_1(long trainData_nativeObj, long responses_nativeObj, long varIdx_nativeObj, long sampleIdx_nativeObj); + private static native long CvNormalBayesClassifier_2(long trainData_nativeObj, long responses_nativeObj); + + // C++: void CvNormalBayesClassifier::clear() + private static native void clear_0(long nativeObj); + + // C++: float CvNormalBayesClassifier::predict(Mat samples, Mat* results = 0) + private static native float predict_0(long nativeObj, long samples_nativeObj, long results_nativeObj); + private static native float predict_1(long nativeObj, long samples_nativeObj); + + // C++: bool CvNormalBayesClassifier::train(Mat trainData, Mat responses, Mat varIdx = cv::Mat(), Mat sampleIdx = cv::Mat(), bool update = false) + private static native boolean train_0(long nativeObj, long trainData_nativeObj, long responses_nativeObj, long varIdx_nativeObj, long sampleIdx_nativeObj, boolean update); + private static native boolean train_1(long nativeObj, long trainData_nativeObj, long responses_nativeObj); + + // native support for java finalize() + private static native void delete(long nativeObj); + +} diff --git a/src/org/opencv/ml/CvParamGrid.java b/src/org/opencv/ml/CvParamGrid.java new file mode 100644 index 0000000..145e499 --- /dev/null +++ b/src/org/opencv/ml/CvParamGrid.java @@ -0,0 +1,192 @@ + +// +// This file is auto-generated. Please don't modify it! +// +package org.opencv.ml; + + + +// C++: class CvParamGrid +/** + *

The structure represents the logarithmic grid range of statmodel parameters. + * It is used for optimizing statmodel accuracy by varying model parameters, the + * accuracy estimate being computed by cross-validation.

+ * + *

Minimum value of the statmodel parameter.

+ * + *

Maximum value of the statmodel parameter. + *

+ * + *

// C++ code:

+ * + *

Logarithmic step for iterating the statmodel parameter.

+ * + *

The grid determines the following iteration sequence of the statmodel + * parameter values:

+ * + *

(min_val, min_val*step, min_val*(step)^2, dots, min_val*(step)^n),

+ * + *

where n is the maximal index satisfying

+ * + *

min_val * step ^n < max_val

+ * + *

The grid is logarithmic, so step must always be greater then 1.

+ * + * @see org.opencv.ml.CvParamGrid + */ +public class CvParamGrid { + + protected final long nativeObj; + protected CvParamGrid(long addr) { nativeObj = addr; } + + + public static final int + SVM_C = 0, + SVM_GAMMA = 1, + SVM_P = 2, + SVM_NU = 3, + SVM_COEF = 4, + SVM_DEGREE = 5; + + + // + // C++: CvParamGrid::CvParamGrid() + // + +/** + *

The constructors.

+ * + *

The full constructor initializes corresponding members. The default + * constructor creates a dummy grid:

+ * + *

+ * + *

// C++ code:

+ * + *

CvParamGrid.CvParamGrid()

+ * + * + *

min_val = max_val = step = 0;

+ * + * + * @see org.opencv.ml.CvParamGrid.CvParamGrid + */ + public CvParamGrid() + { + + nativeObj = CvParamGrid_0(); + + return; + } + + + // + // C++: double CvParamGrid::min_val + // + + public double get_min_val() + { + + double retVal = get_min_val_0(nativeObj); + + return retVal; + } + + + // + // C++: void CvParamGrid::min_val + // + + public void set_min_val(double min_val) + { + + set_min_val_0(nativeObj, min_val); + + return; + } + + + // + // C++: double CvParamGrid::max_val + // + + public double get_max_val() + { + + double retVal = get_max_val_0(nativeObj); + + return retVal; + } + + + // + // C++: void CvParamGrid::max_val + // + + public void set_max_val(double max_val) + { + + set_max_val_0(nativeObj, max_val); + + return; + } + + + // + // C++: double CvParamGrid::step + // + + public double get_step() + { + + double retVal = get_step_0(nativeObj); + + return retVal; + } + + + // + // C++: void CvParamGrid::step + // + + public void set_step(double step) + { + + set_step_0(nativeObj, step); + + return; + } + + + @Override + protected void finalize() throws Throwable { + delete(nativeObj); + } + + + + // C++: CvParamGrid::CvParamGrid() + private static native long CvParamGrid_0(); + + // C++: double CvParamGrid::min_val + private static native double get_min_val_0(long nativeObj); + + // C++: void CvParamGrid::min_val + private static native void set_min_val_0(long nativeObj, double min_val); + + // C++: double CvParamGrid::max_val + private static native double get_max_val_0(long nativeObj); + + // C++: void CvParamGrid::max_val + private static native void set_max_val_0(long nativeObj, double max_val); + + // C++: double CvParamGrid::step + private static native double get_step_0(long nativeObj); + + // C++: void CvParamGrid::step + private static native void set_step_0(long nativeObj, double step); + + // native support for java finalize() + private static native void delete(long nativeObj); + +} diff --git a/src/org/opencv/ml/CvRTParams.java b/src/org/opencv/ml/CvRTParams.java new file mode 100644 index 0000000..a2c06eb --- /dev/null +++ b/src/org/opencv/ml/CvRTParams.java @@ -0,0 +1,147 @@ + +// +// This file is auto-generated. Please don't modify it! +// +package org.opencv.ml; + +import org.opencv.core.TermCriteria; + +// C++: class CvRTParams +/** + *

Training parameters of random trees.

+ * + *

The set of training parameters for the forest is a superset of the training + * parameters for a single tree. However, random trees do not need all the + * functionality/features of decision trees. Most noticeably, the trees are not + * pruned, so the cross-validation parameters are not used.

+ * + * @see org.opencv.ml.CvRTParams : public CvDTreeParams + */ +public class CvRTParams extends CvDTreeParams { + + protected CvRTParams(long addr) { super(addr); } + + + // + // C++: CvRTParams::CvRTParams() + // + + public CvRTParams() + { + + super( CvRTParams_0() ); + + return; + } + + + // + // C++: bool CvRTParams::calc_var_importance + // + + public boolean get_calc_var_importance() + { + + boolean retVal = get_calc_var_importance_0(nativeObj); + + return retVal; + } + + + // + // C++: void CvRTParams::calc_var_importance + // + + public void set_calc_var_importance(boolean calc_var_importance) + { + + set_calc_var_importance_0(nativeObj, calc_var_importance); + + return; + } + + + // + // C++: int CvRTParams::nactive_vars + // + + public int get_nactive_vars() + { + + int retVal = get_nactive_vars_0(nativeObj); + + return retVal; + } + + + // + // C++: void CvRTParams::nactive_vars + // + + public void set_nactive_vars(int nactive_vars) + { + + set_nactive_vars_0(nativeObj, nactive_vars); + + return; + } + + + // + // C++: TermCriteria CvRTParams::term_crit + // + + public TermCriteria get_term_crit() + { + + TermCriteria retVal = new TermCriteria(get_term_crit_0(nativeObj)); + + return retVal; + } + + + // + // C++: void CvRTParams::term_crit + // + + public void set_term_crit(TermCriteria term_crit) + { + + set_term_crit_0(nativeObj, term_crit.type, term_crit.maxCount, term_crit.epsilon); + + return; + } + + + @Override + protected void finalize() throws Throwable { + delete(nativeObj); + } + + + + // C++: CvRTParams::CvRTParams() + private static native long CvRTParams_0(); + + // C++: bool CvRTParams::calc_var_importance + private static native boolean get_calc_var_importance_0(long nativeObj); + + // C++: void CvRTParams::calc_var_importance + private static native void set_calc_var_importance_0(long nativeObj, boolean calc_var_importance); + + // C++: int CvRTParams::nactive_vars + private static native int get_nactive_vars_0(long nativeObj); + + // C++: void CvRTParams::nactive_vars + private static native void set_nactive_vars_0(long nativeObj, int nactive_vars); + + // C++: TermCriteria CvRTParams::term_crit + private static native double[] get_term_crit_0(long nativeObj); + + // C++: void CvRTParams::term_crit + private static native void set_term_crit_0(long nativeObj, int term_crit_type, int term_crit_maxCount, double term_crit_epsilon); + + // native support for java finalize() + private static native void delete(long nativeObj); + +} diff --git a/src/org/opencv/ml/CvRTrees.java b/src/org/opencv/ml/CvRTrees.java new file mode 100644 index 0000000..57b903b --- /dev/null +++ b/src/org/opencv/ml/CvRTrees.java @@ -0,0 +1,256 @@ + +// +// This file is auto-generated. Please don't modify it! +// +package org.opencv.ml; + +import org.opencv.core.Mat; + +// C++: class CvRTrees +/** + *

The class implements the random forest predictor as described in the + * beginning of this section.

+ * + * @see org.opencv.ml.CvRTrees : public CvStatModel + */ +public class CvRTrees extends CvStatModel { + + protected CvRTrees(long addr) { super(addr); } + + + // + // C++: CvRTrees::CvRTrees() + // + + public CvRTrees() + { + + super( CvRTrees_0() ); + + return; + } + + + // + // C++: void CvRTrees::clear() + // + + public void clear() + { + + clear_0(nativeObj); + + return; + } + + + // + // C++: Mat CvRTrees::getVarImportance() + // + +/** + *

Returns the variable importance array.

+ * + *

The method returns the variable importance vector, computed at the training + * stage when CvRTParams.calc_var_importance is set to true. If + * this flag was set to false, the NULL pointer is returned. This + * differs from the decision trees where variable importance can be computed + * anytime after the training.

+ * + * @see org.opencv.ml.CvRTrees.getVarImportance + */ + public Mat getVarImportance() + { + + Mat retVal = new Mat(getVarImportance_0(nativeObj)); + + return retVal; + } + + + // + // C++: float CvRTrees::predict(Mat sample, Mat missing = cv::Mat()) + // + +/** + *

Predicts the output for an input sample.

+ * + *

The input parameters of the prediction method are the same as in + * "CvDTree.predict" but the return value type is different. This method + * returns the cumulative result from all the trees in the forest (the class + * that receives the majority of voices, or the mean of the regression function + * estimates).

+ * + * @param sample Sample for classification. + * @param missing Optional missing measurement mask of the sample. + * + * @see org.opencv.ml.CvRTrees.predict + */ + public float predict(Mat sample, Mat missing) + { + + float retVal = predict_0(nativeObj, sample.nativeObj, missing.nativeObj); + + return retVal; + } + +/** + *

Predicts the output for an input sample.

+ * + *

The input parameters of the prediction method are the same as in + * "CvDTree.predict" but the return value type is different. This method + * returns the cumulative result from all the trees in the forest (the class + * that receives the majority of voices, or the mean of the regression function + * estimates).

+ * + * @param sample Sample for classification. + * + * @see org.opencv.ml.CvRTrees.predict + */ + public float predict(Mat sample) + { + + float retVal = predict_1(nativeObj, sample.nativeObj); + + return retVal; + } + + + // + // C++: float CvRTrees::predict_prob(Mat sample, Mat missing = cv::Mat()) + // + +/** + *

Returns a fuzzy-predicted class label.

+ * + *

The function works for binary classification problems only. It returns the + * number between 0 and 1. This number represents probability or confidence of + * the sample belonging to the second class. It is calculated as the proportion + * of decision trees that classified the sample to the second class.

+ * + * @param sample Sample for classification. + * @param missing Optional missing measurement mask of the sample. + * + * @see org.opencv.ml.CvRTrees.predict_prob + */ + public float predict_prob(Mat sample, Mat missing) + { + + float retVal = predict_prob_0(nativeObj, sample.nativeObj, missing.nativeObj); + + return retVal; + } + +/** + *

Returns a fuzzy-predicted class label.

+ * + *

The function works for binary classification problems only. It returns the + * number between 0 and 1. This number represents probability or confidence of + * the sample belonging to the second class. It is calculated as the proportion + * of decision trees that classified the sample to the second class.

+ * + * @param sample Sample for classification. + * + * @see org.opencv.ml.CvRTrees.predict_prob + */ + public float predict_prob(Mat sample) + { + + float retVal = predict_prob_1(nativeObj, sample.nativeObj); + + return retVal; + } + + + // + // C++: bool CvRTrees::train(Mat trainData, int tflag, Mat responses, Mat varIdx = cv::Mat(), Mat sampleIdx = cv::Mat(), Mat varType = cv::Mat(), Mat missingDataMask = cv::Mat(), CvRTParams params = CvRTParams()) + // + +/** + *

Trains the Random Trees model.

+ * + *

The method "CvRTrees.train" is very similar to the method "CvDTree.train" + * and follows the generic method "CvStatModel.train" conventions. All the + * parameters specific to the algorithm training are passed as a "CvRTParams" + * instance. The estimate of the training error (oob-error) is + * stored in the protected class member oob_error.

+ * + *

The function is parallelized with the TBB library.

+ * + * @param trainData a trainData + * @param tflag a tflag + * @param responses a responses + * @param varIdx a varIdx + * @param sampleIdx a sampleIdx + * @param varType a varType + * @param missingDataMask a missingDataMask + * @param params a params + * + * @see org.opencv.ml.CvRTrees.train + */ + public boolean train(Mat trainData, int tflag, Mat responses, Mat varIdx, Mat sampleIdx, Mat varType, Mat missingDataMask, CvRTParams params) + { + + boolean retVal = train_0(nativeObj, trainData.nativeObj, tflag, responses.nativeObj, varIdx.nativeObj, sampleIdx.nativeObj, varType.nativeObj, missingDataMask.nativeObj, params.nativeObj); + + return retVal; + } + +/** + *

Trains the Random Trees model.

+ * + *

The method "CvRTrees.train" is very similar to the method "CvDTree.train" + * and follows the generic method "CvStatModel.train" conventions. All the + * parameters specific to the algorithm training are passed as a "CvRTParams" + * instance. The estimate of the training error (oob-error) is + * stored in the protected class member oob_error.

+ * + *

The function is parallelized with the TBB library.

+ * + * @param trainData a trainData + * @param tflag a tflag + * @param responses a responses + * + * @see org.opencv.ml.CvRTrees.train + */ + public boolean train(Mat trainData, int tflag, Mat responses) + { + + boolean retVal = train_1(nativeObj, trainData.nativeObj, tflag, responses.nativeObj); + + return retVal; + } + + + @Override + protected void finalize() throws Throwable { + delete(nativeObj); + } + + + + // C++: CvRTrees::CvRTrees() + private static native long CvRTrees_0(); + + // C++: void CvRTrees::clear() + private static native void clear_0(long nativeObj); + + // C++: Mat CvRTrees::getVarImportance() + private static native long getVarImportance_0(long nativeObj); + + // C++: float CvRTrees::predict(Mat sample, Mat missing = cv::Mat()) + private static native float predict_0(long nativeObj, long sample_nativeObj, long missing_nativeObj); + private static native float predict_1(long nativeObj, long sample_nativeObj); + + // C++: float CvRTrees::predict_prob(Mat sample, Mat missing = cv::Mat()) + private static native float predict_prob_0(long nativeObj, long sample_nativeObj, long missing_nativeObj); + private static native float predict_prob_1(long nativeObj, long sample_nativeObj); + + // C++: bool CvRTrees::train(Mat trainData, int tflag, Mat responses, Mat varIdx = cv::Mat(), Mat sampleIdx = cv::Mat(), Mat varType = cv::Mat(), Mat missingDataMask = cv::Mat(), CvRTParams params = CvRTParams()) + private static native boolean train_0(long nativeObj, long trainData_nativeObj, int tflag, long responses_nativeObj, long varIdx_nativeObj, long sampleIdx_nativeObj, long varType_nativeObj, long missingDataMask_nativeObj, long params_nativeObj); + private static native boolean train_1(long nativeObj, long trainData_nativeObj, int tflag, long responses_nativeObj); + + // native support for java finalize() + private static native void delete(long nativeObj); + +} diff --git a/src/org/opencv/ml/CvSVM.java b/src/org/opencv/ml/CvSVM.java new file mode 100644 index 0000000..cc9771d --- /dev/null +++ b/src/org/opencv/ml/CvSVM.java @@ -0,0 +1,442 @@ + +// +// This file is auto-generated. Please don't modify it! +// +package org.opencv.ml; + +import org.opencv.core.Mat; + +// C++: class CvSVM +/** + *

Support Vector Machines.

+ * + * @see org.opencv.ml.CvSVM : public CvStatModel + */ +public class CvSVM extends CvStatModel { + + protected CvSVM(long addr) { super(addr); } + + + public static final int + C_SVC = 100, + NU_SVC = 101, + ONE_CLASS = 102, + EPS_SVR = 103, + NU_SVR = 104, + LINEAR = 0, + POLY = 1, + RBF = 2, + SIGMOID = 3, + C = 0, + GAMMA = 1, + P = 2, + NU = 3, + COEF = 4, + DEGREE = 5; + + + // + // C++: CvSVM::CvSVM() + // + +/** + *

Default and training constructors.

+ * + *

The constructors follow conventions of "CvStatModel.CvStatModel". See + * "CvStatModel.train" for parameters descriptions.

+ * + * @see org.opencv.ml.CvSVM.CvSVM + */ + public CvSVM() + { + + super( CvSVM_0() ); + + return; + } + + + // + // C++: CvSVM::CvSVM(Mat trainData, Mat responses, Mat varIdx = cv::Mat(), Mat sampleIdx = cv::Mat(), CvSVMParams params = CvSVMParams()) + // + +/** + *

Default and training constructors.

+ * + *

The constructors follow conventions of "CvStatModel.CvStatModel". See + * "CvStatModel.train" for parameters descriptions.

+ * + * @param trainData a trainData + * @param responses a responses + * @param varIdx a varIdx + * @param sampleIdx a sampleIdx + * @param params a params + * + * @see org.opencv.ml.CvSVM.CvSVM + */ + public CvSVM(Mat trainData, Mat responses, Mat varIdx, Mat sampleIdx, CvSVMParams params) + { + + super( CvSVM_1(trainData.nativeObj, responses.nativeObj, varIdx.nativeObj, sampleIdx.nativeObj, params.nativeObj) ); + + return; + } + +/** + *

Default and training constructors.

+ * + *

The constructors follow conventions of "CvStatModel.CvStatModel". See + * "CvStatModel.train" for parameters descriptions.

+ * + * @param trainData a trainData + * @param responses a responses + * + * @see org.opencv.ml.CvSVM.CvSVM + */ + public CvSVM(Mat trainData, Mat responses) + { + + super( CvSVM_2(trainData.nativeObj, responses.nativeObj) ); + + return; + } + + + // + // C++: void CvSVM::clear() + // + + public void clear() + { + + clear_0(nativeObj); + + return; + } + + + // + // C++: int CvSVM::get_support_vector_count() + // + + public int get_support_vector_count() + { + + int retVal = get_support_vector_count_0(nativeObj); + + return retVal; + } + + + // + // C++: int CvSVM::get_var_count() + // + +/** + *

Returns the number of used features (variables count).

+ * + * @see org.opencv.ml.CvSVM.get_var_count + */ + public int get_var_count() + { + + int retVal = get_var_count_0(nativeObj); + + return retVal; + } + + + // + // C++: float CvSVM::predict(Mat sample, bool returnDFVal = false) + // + +/** + *

Predicts the response for input sample(s).

+ * + *

If you pass one sample then prediction result is returned. If you want to get + * responses for several samples then you should pass the results + * matrix where prediction results will be stored.

+ * + *

The function is parallelized with the TBB library.

+ * + * @param sample Input sample for prediction. + * @param returnDFVal Specifies a type of the return value. If true + * and the problem is 2-class classification then the method returns the + * decision function value that is signed distance to the margin, else the + * function returns a class label (classification) or estimated function value + * (regression). + * + * @see org.opencv.ml.CvSVM.predict + */ + public float predict(Mat sample, boolean returnDFVal) + { + + float retVal = predict_0(nativeObj, sample.nativeObj, returnDFVal); + + return retVal; + } + +/** + *

Predicts the response for input sample(s).

+ * + *

If you pass one sample then prediction result is returned. If you want to get + * responses for several samples then you should pass the results + * matrix where prediction results will be stored.

+ * + *

The function is parallelized with the TBB library.

+ * + * @param sample Input sample for prediction. + * + * @see org.opencv.ml.CvSVM.predict + */ + public float predict(Mat sample) + { + + float retVal = predict_1(nativeObj, sample.nativeObj); + + return retVal; + } + + + // + // C++: void CvSVM::predict(Mat samples, Mat& results) + // + +/** + *

Predicts the response for input sample(s).

+ * + *

If you pass one sample then prediction result is returned. If you want to get + * responses for several samples then you should pass the results + * matrix where prediction results will be stored.

+ * + *

The function is parallelized with the TBB library.

+ * + * @param samples Input samples for prediction. + * @param results Output prediction responses for corresponding samples. + * + * @see org.opencv.ml.CvSVM.predict + */ + public void predict_all(Mat samples, Mat results) + { + + predict_all_0(nativeObj, samples.nativeObj, results.nativeObj); + + return; + } + + + // + // C++: bool CvSVM::train(Mat trainData, Mat responses, Mat varIdx = cv::Mat(), Mat sampleIdx = cv::Mat(), CvSVMParams params = CvSVMParams()) + // + +/** + *

Trains an SVM.

+ * + *

The method trains the SVM model. It follows the conventions of the generic + * "CvStatModel.train" approach with the following limitations:

+ *
    + *
  • Only the CV_ROW_SAMPLE data layout is supported. + *
  • Input variables are all ordered. + *
  • Output variables can be either categorical (params.svm_type=CvSVM.C_SVC + * or params.svm_type=CvSVM.NU_SVC), or ordered (params.svm_type=CvSVM.EPS_SVR + * or params.svm_type=CvSVM.NU_SVR), or not required at all + * (params.svm_type=CvSVM.ONE_CLASS). + *
  • Missing measurements are not supported. + *
+ * + *

All the other parameters are gathered in the "CvSVMParams" structure.

+ * + * @param trainData a trainData + * @param responses a responses + * @param varIdx a varIdx + * @param sampleIdx a sampleIdx + * @param params a params + * + * @see org.opencv.ml.CvSVM.train + */ + public boolean train(Mat trainData, Mat responses, Mat varIdx, Mat sampleIdx, CvSVMParams params) + { + + boolean retVal = train_0(nativeObj, trainData.nativeObj, responses.nativeObj, varIdx.nativeObj, sampleIdx.nativeObj, params.nativeObj); + + return retVal; + } + +/** + *

Trains an SVM.

+ * + *

The method trains the SVM model. It follows the conventions of the generic + * "CvStatModel.train" approach with the following limitations:

+ *
    + *
  • Only the CV_ROW_SAMPLE data layout is supported. + *
  • Input variables are all ordered. + *
  • Output variables can be either categorical (params.svm_type=CvSVM.C_SVC + * or params.svm_type=CvSVM.NU_SVC), or ordered (params.svm_type=CvSVM.EPS_SVR + * or params.svm_type=CvSVM.NU_SVR), or not required at all + * (params.svm_type=CvSVM.ONE_CLASS). + *
  • Missing measurements are not supported. + *
+ * + *

All the other parameters are gathered in the "CvSVMParams" structure.

+ * + * @param trainData a trainData + * @param responses a responses + * + * @see org.opencv.ml.CvSVM.train + */ + public boolean train(Mat trainData, Mat responses) + { + + boolean retVal = train_1(nativeObj, trainData.nativeObj, responses.nativeObj); + + return retVal; + } + + + // + // C++: bool CvSVM::train_auto(Mat trainData, Mat responses, Mat varIdx, Mat sampleIdx, CvSVMParams params, int k_fold = 10, CvParamGrid Cgrid = CvSVM::get_default_grid(CvSVM::C), CvParamGrid gammaGrid = CvSVM::get_default_grid(CvSVM::GAMMA), CvParamGrid pGrid = CvSVM::get_default_grid(CvSVM::P), CvParamGrid nuGrid = CvSVM::get_default_grid(CvSVM::NU), CvParamGrid coeffGrid = CvSVM::get_default_grid(CvSVM::COEF), CvParamGrid degreeGrid = CvSVM::get_default_grid(CvSVM::DEGREE), bool balanced = false) + // + +/** + *

Trains an SVM with optimal parameters.

+ * + *

The method trains the SVM model automatically by choosing the optimal + * parameters C, gamma, p, + * nu, coef0, degree from "CvSVMParams". + * Parameters are considered optimal when the cross-validation estimate of the + * test set error is minimal.

+ * + *

If there is no need to optimize a parameter, the corresponding grid step + * should be set to any value less than or equal to 1. For example, to avoid + * optimization in gamma, set gamma_grid.step = 0, + * gamma_grid.min_val, gamma_grid.max_val as arbitrary + * numbers. In this case, the value params.gamma is taken for + * gamma.

+ * + *

And, finally, if the optimization in a parameter is required but the + * corresponding grid is unknown, you may call the function "CvSVM.get_default_grid". + * To generate a grid, for example, for gamma, call + * CvSVM.get_default_grid(CvSVM.GAMMA).

+ * + *

This function works for the classification (params.svm_type=CvSVM.C_SVC + * or params.svm_type=CvSVM.NU_SVC) as well as for the regression + * (params.svm_type=CvSVM.EPS_SVR or params.svm_type=CvSVM.NU_SVR). + * If params.svm_type=CvSVM.ONE_CLASS, no optimization is made and + * the usual SVM with parameters specified in params is executed.

+ * + * @param trainData a trainData + * @param responses a responses + * @param varIdx a varIdx + * @param sampleIdx a sampleIdx + * @param params a params + * @param k_fold Cross-validation parameter. The training set is divided into + * k_fold subsets. One subset is used to test the model, the others + * form the train set. So, the SVM algorithm is executed k_fold + * times. + * @param Cgrid a Cgrid + * @param gammaGrid Iteration grid for the corresponding SVM parameter. + * @param pGrid Iteration grid for the corresponding SVM parameter. + * @param nuGrid Iteration grid for the corresponding SVM parameter. + * @param coeffGrid Iteration grid for the corresponding SVM parameter. + * @param degreeGrid Iteration grid for the corresponding SVM parameter. + * @param balanced If true and the problem is 2-class + * classification then the method creates more balanced cross-validation subsets + * that is proportions between classes in subsets are close to such proportion + * in the whole train dataset. + * + * @see org.opencv.ml.CvSVM.train_auto + */ + public boolean train_auto(Mat trainData, Mat responses, Mat varIdx, Mat sampleIdx, CvSVMParams params, int k_fold, CvParamGrid Cgrid, CvParamGrid gammaGrid, CvParamGrid pGrid, CvParamGrid nuGrid, CvParamGrid coeffGrid, CvParamGrid degreeGrid, boolean balanced) + { + + boolean retVal = train_auto_0(nativeObj, trainData.nativeObj, responses.nativeObj, varIdx.nativeObj, sampleIdx.nativeObj, params.nativeObj, k_fold, Cgrid.nativeObj, gammaGrid.nativeObj, pGrid.nativeObj, nuGrid.nativeObj, coeffGrid.nativeObj, degreeGrid.nativeObj, balanced); + + return retVal; + } + +/** + *

Trains an SVM with optimal parameters.

+ * + *

The method trains the SVM model automatically by choosing the optimal + * parameters C, gamma, p, + * nu, coef0, degree from "CvSVMParams". + * Parameters are considered optimal when the cross-validation estimate of the + * test set error is minimal.

+ * + *

If there is no need to optimize a parameter, the corresponding grid step + * should be set to any value less than or equal to 1. For example, to avoid + * optimization in gamma, set gamma_grid.step = 0, + * gamma_grid.min_val, gamma_grid.max_val as arbitrary + * numbers. In this case, the value params.gamma is taken for + * gamma.

+ * + *

And, finally, if the optimization in a parameter is required but the + * corresponding grid is unknown, you may call the function "CvSVM.get_default_grid". + * To generate a grid, for example, for gamma, call + * CvSVM.get_default_grid(CvSVM.GAMMA).

+ * + *

This function works for the classification (params.svm_type=CvSVM.C_SVC + * or params.svm_type=CvSVM.NU_SVC) as well as for the regression + * (params.svm_type=CvSVM.EPS_SVR or params.svm_type=CvSVM.NU_SVR). + * If params.svm_type=CvSVM.ONE_CLASS, no optimization is made and + * the usual SVM with parameters specified in params is executed.

+ * + * @param trainData a trainData + * @param responses a responses + * @param varIdx a varIdx + * @param sampleIdx a sampleIdx + * @param params a params + * + * @see org.opencv.ml.CvSVM.train_auto + */ + public boolean train_auto(Mat trainData, Mat responses, Mat varIdx, Mat sampleIdx, CvSVMParams params) + { + + boolean retVal = train_auto_1(nativeObj, trainData.nativeObj, responses.nativeObj, varIdx.nativeObj, sampleIdx.nativeObj, params.nativeObj); + + return retVal; + } + + + @Override + protected void finalize() throws Throwable { + delete(nativeObj); + } + + + + // C++: CvSVM::CvSVM() + private static native long CvSVM_0(); + + // C++: CvSVM::CvSVM(Mat trainData, Mat responses, Mat varIdx = cv::Mat(), Mat sampleIdx = cv::Mat(), CvSVMParams params = CvSVMParams()) + private static native long CvSVM_1(long trainData_nativeObj, long responses_nativeObj, long varIdx_nativeObj, long sampleIdx_nativeObj, long params_nativeObj); + private static native long CvSVM_2(long trainData_nativeObj, long responses_nativeObj); + + // C++: void CvSVM::clear() + private static native void clear_0(long nativeObj); + + // C++: int CvSVM::get_support_vector_count() + private static native int get_support_vector_count_0(long nativeObj); + + // C++: int CvSVM::get_var_count() + private static native int get_var_count_0(long nativeObj); + + // C++: float CvSVM::predict(Mat sample, bool returnDFVal = false) + private static native float predict_0(long nativeObj, long sample_nativeObj, boolean returnDFVal); + private static native float predict_1(long nativeObj, long sample_nativeObj); + + // C++: void CvSVM::predict(Mat samples, Mat& results) + private static native void predict_all_0(long nativeObj, long samples_nativeObj, long results_nativeObj); + + // C++: bool CvSVM::train(Mat trainData, Mat responses, Mat varIdx = cv::Mat(), Mat sampleIdx = cv::Mat(), CvSVMParams params = CvSVMParams()) + private static native boolean train_0(long nativeObj, long trainData_nativeObj, long responses_nativeObj, long varIdx_nativeObj, long sampleIdx_nativeObj, long params_nativeObj); + private static native boolean train_1(long nativeObj, long trainData_nativeObj, long responses_nativeObj); + + // C++: bool CvSVM::train_auto(Mat trainData, Mat responses, Mat varIdx, Mat sampleIdx, CvSVMParams params, int k_fold = 10, CvParamGrid Cgrid = CvSVM::get_default_grid(CvSVM::C), CvParamGrid gammaGrid = CvSVM::get_default_grid(CvSVM::GAMMA), CvParamGrid pGrid = CvSVM::get_default_grid(CvSVM::P), CvParamGrid nuGrid = CvSVM::get_default_grid(CvSVM::NU), CvParamGrid coeffGrid = CvSVM::get_default_grid(CvSVM::COEF), CvParamGrid degreeGrid = CvSVM::get_default_grid(CvSVM::DEGREE), bool balanced = false) + private static native boolean train_auto_0(long nativeObj, long trainData_nativeObj, long responses_nativeObj, long varIdx_nativeObj, long sampleIdx_nativeObj, long params_nativeObj, int k_fold, long Cgrid_nativeObj, long gammaGrid_nativeObj, long pGrid_nativeObj, long nuGrid_nativeObj, long coeffGrid_nativeObj, long degreeGrid_nativeObj, boolean balanced); + private static native boolean train_auto_1(long nativeObj, long trainData_nativeObj, long responses_nativeObj, long varIdx_nativeObj, long sampleIdx_nativeObj, long params_nativeObj); + + // native support for java finalize() + private static native void delete(long nativeObj); + +} diff --git a/src/org/opencv/ml/CvSVMParams.java b/src/org/opencv/ml/CvSVMParams.java new file mode 100644 index 0000000..378bdf6 --- /dev/null +++ b/src/org/opencv/ml/CvSVMParams.java @@ -0,0 +1,360 @@ + +// +// This file is auto-generated. Please don't modify it! +// +package org.opencv.ml; + +import org.opencv.core.TermCriteria; + +// C++: class CvSVMParams +/** + *

SVM training parameters.

+ * + *

The structure must be initialized and passed to the training method of + * "CvSVM".

+ * + * @see org.opencv.ml.CvSVMParams + */ +public class CvSVMParams { + + protected final long nativeObj; + protected CvSVMParams(long addr) { nativeObj = addr; } + + + // + // C++: CvSVMParams::CvSVMParams() + // + +/** + *

The constructors.

+ * + *

The default constructor initialize the structure with following values:

+ * + *

+ * + *

// C++ code:

+ * + *

CvSVMParams.CvSVMParams() :

+ * + *

svm_type(CvSVM.C_SVC), kernel_type(CvSVM.RBF), degree(0),

+ * + *

gamma(1), coef0(0), C(1), nu(0), p(0), class_weights(0)

+ * + * + *

term_crit = cvTermCriteria(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, 1000, + * FLT_EPSILON);

+ * + * + * @see org.opencv.ml.CvSVMParams.CvSVMParams + */ + public CvSVMParams() + { + + nativeObj = CvSVMParams_0(); + + return; + } + + + // + // C++: int CvSVMParams::svm_type + // + + public int get_svm_type() + { + + int retVal = get_svm_type_0(nativeObj); + + return retVal; + } + + + // + // C++: void CvSVMParams::svm_type + // + + public void set_svm_type(int svm_type) + { + + set_svm_type_0(nativeObj, svm_type); + + return; + } + + + // + // C++: int CvSVMParams::kernel_type + // + + public int get_kernel_type() + { + + int retVal = get_kernel_type_0(nativeObj); + + return retVal; + } + + + // + // C++: void CvSVMParams::kernel_type + // + + public void set_kernel_type(int kernel_type) + { + + set_kernel_type_0(nativeObj, kernel_type); + + return; + } + + + // + // C++: double CvSVMParams::degree + // + + public double get_degree() + { + + double retVal = get_degree_0(nativeObj); + + return retVal; + } + + + // + // C++: void CvSVMParams::degree + // + + public void set_degree(double degree) + { + + set_degree_0(nativeObj, degree); + + return; + } + + + // + // C++: double CvSVMParams::gamma + // + + public double get_gamma() + { + + double retVal = get_gamma_0(nativeObj); + + return retVal; + } + + + // + // C++: void CvSVMParams::gamma + // + + public void set_gamma(double gamma) + { + + set_gamma_0(nativeObj, gamma); + + return; + } + + + // + // C++: double CvSVMParams::coef0 + // + + public double get_coef0() + { + + double retVal = get_coef0_0(nativeObj); + + return retVal; + } + + + // + // C++: void CvSVMParams::coef0 + // + + public void set_coef0(double coef0) + { + + set_coef0_0(nativeObj, coef0); + + return; + } + + + // + // C++: double CvSVMParams::C + // + + public double get_C() + { + + double retVal = get_C_0(nativeObj); + + return retVal; + } + + + // + // C++: void CvSVMParams::C + // + + public void set_C(double C) + { + + set_C_0(nativeObj, C); + + return; + } + + + // + // C++: double CvSVMParams::nu + // + + public double get_nu() + { + + double retVal = get_nu_0(nativeObj); + + return retVal; + } + + + // + // C++: void CvSVMParams::nu + // + + public void set_nu(double nu) + { + + set_nu_0(nativeObj, nu); + + return; + } + + + // + // C++: double CvSVMParams::p + // + + public double get_p() + { + + double retVal = get_p_0(nativeObj); + + return retVal; + } + + + // + // C++: void CvSVMParams::p + // + + public void set_p(double p) + { + + set_p_0(nativeObj, p); + + return; + } + + + // + // C++: TermCriteria CvSVMParams::term_crit + // + + public TermCriteria get_term_crit() + { + + TermCriteria retVal = new TermCriteria(get_term_crit_0(nativeObj)); + + return retVal; + } + + + // + // C++: void CvSVMParams::term_crit + // + + public void set_term_crit(TermCriteria term_crit) + { + + set_term_crit_0(nativeObj, term_crit.type, term_crit.maxCount, term_crit.epsilon); + + return; + } + + + @Override + protected void finalize() throws Throwable { + delete(nativeObj); + } + + + + // C++: CvSVMParams::CvSVMParams() + private static native long CvSVMParams_0(); + + // C++: int CvSVMParams::svm_type + private static native int get_svm_type_0(long nativeObj); + + // C++: void CvSVMParams::svm_type + private static native void set_svm_type_0(long nativeObj, int svm_type); + + // C++: int CvSVMParams::kernel_type + private static native int get_kernel_type_0(long nativeObj); + + // C++: void CvSVMParams::kernel_type + private static native void set_kernel_type_0(long nativeObj, int kernel_type); + + // C++: double CvSVMParams::degree + private static native double get_degree_0(long nativeObj); + + // C++: void CvSVMParams::degree + private static native void set_degree_0(long nativeObj, double degree); + + // C++: double CvSVMParams::gamma + private static native double get_gamma_0(long nativeObj); + + // C++: void CvSVMParams::gamma + private static native void set_gamma_0(long nativeObj, double gamma); + + // C++: double CvSVMParams::coef0 + private static native double get_coef0_0(long nativeObj); + + // C++: void CvSVMParams::coef0 + private static native void set_coef0_0(long nativeObj, double coef0); + + // C++: double CvSVMParams::C + private static native double get_C_0(long nativeObj); + + // C++: void CvSVMParams::C + private static native void set_C_0(long nativeObj, double C); + + // C++: double CvSVMParams::nu + private static native double get_nu_0(long nativeObj); + + // C++: void CvSVMParams::nu + private static native void set_nu_0(long nativeObj, double nu); + + // C++: double CvSVMParams::p + private static native double get_p_0(long nativeObj); + + // C++: void CvSVMParams::p + private static native void set_p_0(long nativeObj, double p); + + // C++: TermCriteria CvSVMParams::term_crit + private static native double[] get_term_crit_0(long nativeObj); + + // C++: void CvSVMParams::term_crit + private static native void set_term_crit_0(long nativeObj, int term_crit_type, int term_crit_maxCount, double term_crit_epsilon); + + // native support for java finalize() + private static native void delete(long nativeObj); + +} diff --git a/src/org/opencv/ml/CvStatModel.java b/src/org/opencv/ml/CvStatModel.java new file mode 100644 index 0000000..00303b6 --- /dev/null +++ b/src/org/opencv/ml/CvStatModel.java @@ -0,0 +1,176 @@ + +// +// This file is auto-generated. Please don't modify it! +// +package org.opencv.ml; + +import java.lang.String; + +// C++: class CvStatModel +/** + *

Base class for statistical models in ML.

+ * + *

class CvStatModel

+ * + *

// C++ code:

+ * + * + *

public:

+ * + *

/ * CvStatModel(); * /

+ * + *

/ * CvStatModel(const Mat& train_data...); * /

+ * + *

virtual ~CvStatModel();

+ * + *

virtual void clear()=0;

+ * + *

/ * virtual bool train(const Mat& train_data, [int tflag,]..., const

+ * + *

Mat& responses,...,

+ * + *

[const Mat& var_idx,]..., [const Mat& sample_idx,]...

+ * + *

[const Mat& var_type,]..., [const Mat& missing_mask,]

+ * + *

...)=0;

+ *
    + *
  • / + *
+ * + *

/ * virtual float predict(const Mat& sample...) const=0; * /

+ * + *

virtual void save(const char* filename, const char* name=0)=0;

+ * + *

virtual void load(const char* filename, const char* name=0)=0;

+ * + *

virtual void write(CvFileStorage* storage, const char* name)=0;

+ * + *

virtual void read(CvFileStorage* storage, CvFileNode* node)=0;

+ * + *

};

+ * + *

In this declaration, some methods are commented off. These are methods for + * which there is no unified API (with the exception of the default + * constructor). However, there are many similarities in the syntax and + * semantics that are briefly described below in this section, as if they are + * part of the base class. + *

+ * + * @see org.opencv.ml.CvStatModel + */ +public class CvStatModel { + + protected final long nativeObj; + protected CvStatModel(long addr) { nativeObj = addr; } + + + // + // C++: void CvStatModel::load(c_string filename, c_string name = 0) + // + +/** + *

Loads the model from a file.

+ * + *

The method load loads the complete model state with the + * specified name (or default model-dependent name) from the specified XML or + * YAML file. The previous model state is cleared by "CvStatModel.clear".

+ * + * @param filename a filename + * @param name a name + * + * @see org.opencv.ml.CvStatModel.load + */ + public void load(String filename, String name) + { + + load_0(nativeObj, filename, name); + + return; + } + +/** + *

Loads the model from a file.

+ * + *

The method load loads the complete model state with the + * specified name (or default model-dependent name) from the specified XML or + * YAML file. The previous model state is cleared by "CvStatModel.clear".

+ * + * @param filename a filename + * + * @see org.opencv.ml.CvStatModel.load + */ + public void load(String filename) + { + + load_1(nativeObj, filename); + + return; + } + + + // + // C++: void CvStatModel::save(c_string filename, c_string name = 0) + // + +/** + *

Saves the model to a file.

+ * + *

The method save saves the complete model state to the specified + * XML or YAML file with the specified name or default name (which depends on a + * particular class). *Data persistence* functionality from CxCore + * is used.

+ * + * @param filename a filename + * @param name a name + * + * @see org.opencv.ml.CvStatModel.save + */ + public void save(String filename, String name) + { + + save_0(nativeObj, filename, name); + + return; + } + +/** + *

Saves the model to a file.

+ * + *

The method save saves the complete model state to the specified + * XML or YAML file with the specified name or default name (which depends on a + * particular class). *Data persistence* functionality from CxCore + * is used.

+ * + * @param filename a filename + * + * @see org.opencv.ml.CvStatModel.save + */ + public void save(String filename) + { + + save_1(nativeObj, filename); + + return; + } + + + @Override + protected void finalize() throws Throwable { + delete(nativeObj); + } + + + + // C++: void CvStatModel::load(c_string filename, c_string name = 0) + private static native void load_0(long nativeObj, String filename, String name); + private static native void load_1(long nativeObj, String filename); + + // C++: void CvStatModel::save(c_string filename, c_string name = 0) + private static native void save_0(long nativeObj, String filename, String name); + private static native void save_1(long nativeObj, String filename); + + // native support for java finalize() + private static native void delete(long nativeObj); + +} diff --git a/src/org/opencv/ml/EM.java b/src/org/opencv/ml/EM.java new file mode 100644 index 0000000..2438bf9 --- /dev/null +++ b/src/org/opencv/ml/EM.java @@ -0,0 +1,356 @@ + +// +// This file is auto-generated. Please don't modify it! +// +package org.opencv.ml; + +import org.opencv.core.Algorithm; +import org.opencv.core.Mat; +import org.opencv.core.TermCriteria; + +// C++: class EM +/** + *

The class implements the EM algorithm as described in the beginning of this + * section. It is inherited from "Algorithm".

+ * + * @see org.opencv.ml.EM : public Algorithm + */ +public class EM extends Algorithm { + + protected EM(long addr) { super(addr); } + + + public static final int + COV_MAT_SPHERICAL = 0, + COV_MAT_DIAGONAL = 1, + COV_MAT_GENERIC = 2, + COV_MAT_DEFAULT = COV_MAT_DIAGONAL, + DEFAULT_NCLUSTERS = 5, + DEFAULT_MAX_ITERS = 100, + START_E_STEP = 1, + START_M_STEP = 2, + START_AUTO_STEP = 0; + + + // + // C++: EM::EM(int nclusters = EM::DEFAULT_NCLUSTERS, int covMatType = EM::COV_MAT_DIAGONAL, TermCriteria termCrit = TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, EM::DEFAULT_MAX_ITERS, FLT_EPSILON)) + // + +/** + *

The constructor of the class

+ * + * @param nclusters The number of mixture components in the Gaussian mixture + * model. Default value of the parameter is EM.DEFAULT_NCLUSTERS=5. + * Some of EM implementation could determine the optimal number of mixtures + * within a specified value range, but that is not the case in ML yet. + * @param covMatType Constraint on covariance matrices which defines type of + * matrices. Possible values are: + *
    + *
  • EM.COV_MAT_SPHERICAL A scaled identity matrix mu_k * I. + * There is the only parameter mu_k to be estimated for each matrix. + * The option may be used in special cases, when the constraint is relevant, or + * as a first step in the optimization (for example in case when the data is + * preprocessed with PCA). The results of such preliminary estimation may be + * passed again to the optimization procedure, this time with covMatType=EM.COV_MAT_DIAGONAL. + *
  • EM.COV_MAT_DIAGONAL A diagonal matrix with positive diagonal + * elements. The number of free parameters is d for each matrix. + * This is most commonly used option yielding good estimation results. + *
  • EM.COV_MAT_GENERIC A symmetric positively defined matrix. The number + * of free parameters in each matrix is about d^2/2. It is not + * recommended to use this option, unless there is pretty accurate initial + * estimation of the parameters and/or a huge number of training samples. + *
+ * @param termCrit The termination criteria of the EM algorithm. The EM + * algorithm can be terminated by the number of iterations termCrit.maxCount + * (number of M-steps) or when relative change of likelihood logarithm is less + * than termCrit.epsilon. Default maximum number of iterations is + * EM.DEFAULT_MAX_ITERS=100. + * + * @see org.opencv.ml.EM.EM + */ + public EM(int nclusters, int covMatType, TermCriteria termCrit) + { + + super( EM_0(nclusters, covMatType, termCrit.type, termCrit.maxCount, termCrit.epsilon) ); + + return; + } + +/** + *

The constructor of the class

+ * + * @see org.opencv.ml.EM.EM + */ + public EM() + { + + super( EM_1() ); + + return; + } + + + // + // C++: void EM::clear() + // + + public void clear() + { + + clear_0(nativeObj); + + return; + } + + + // + // C++: bool EM::isTrained() + // + + public boolean isTrained() + { + + boolean retVal = isTrained_0(nativeObj); + + return retVal; + } + + + // + // C++: Vec2d EM::predict(Mat sample, Mat& probs = Mat()) + // + +/** + *

Returns a likelihood logarithm value and an index of the most probable + * mixture component for the given sample.

+ * + *

The method returns a two-element double vector. Zero element is + * a likelihood logarithm value for the sample. First element is an index of the + * most probable mixture component for the given sample.

+ * + * @param sample A sample for classification. It should be a one-channel matrix + * of 1 x dims or dims x 1 size. + * @param probs Optional output matrix that contains posterior probabilities of + * each component given the sample. It has 1 x nclusters size and + * CV_64FC1 type. + * + * @see org.opencv.ml.EM.predict + */ + public double[] predict(Mat sample, Mat probs) + { + + double[] retVal = predict_0(nativeObj, sample.nativeObj, probs.nativeObj); + + return retVal; + } + +/** + *

Returns a likelihood logarithm value and an index of the most probable + * mixture component for the given sample.

+ * + *

The method returns a two-element double vector. Zero element is + * a likelihood logarithm value for the sample. First element is an index of the + * most probable mixture component for the given sample.

+ * + * @param sample A sample for classification. It should be a one-channel matrix + * of 1 x dims or dims x 1 size. + * + * @see org.opencv.ml.EM.predict + */ + public double[] predict(Mat sample) + { + + double[] retVal = predict_1(nativeObj, sample.nativeObj); + + return retVal; + } + + + // + // C++: bool EM::train(Mat samples, Mat& logLikelihoods = Mat(), Mat& labels = Mat(), Mat& probs = Mat()) + // + +/** + *

Estimates the Gaussian mixture parameters from a samples set.

+ * + *

Three versions of training method differ in the initialization of Gaussian + * mixture model parameters and start step:

+ *
    + *
  • train - Starts with Expectation step. Initial values of the model + * parameters will be estimated by the k-means algorithm. + *
  • trainE - Starts with Expectation step. You need to provide initial + * means a_k of mixture components. Optionally you can pass initial + * weights pi_k and covariance matrices S_k of mixture + * components. + *
  • trainM - Starts with Maximization step. You need to provide initial + * probabilities p_(i,k) to use this option. + *
+ * + *

The methods return true if the Gaussian mixture model was + * trained successfully, otherwise it returns false.

+ * + *

Unlike many of the ML models, EM is an unsupervised learning algorithm and it + * does not take responses (class labels or function values) as input. Instead, + * it computes the *Maximum Likelihood Estimate* of the Gaussian mixture + * parameters from an input sample set, stores all the parameters inside the + * structure: p_(i,k) in probs, a_k in + * means, S_k in covs[k], pi_k in + * weights, and optionally computes the output "class label" for + * each sample: labels_i=arg max_k(p_(i,k)), i=1..N (indices of the + * most probable mixture component for each sample).

+ * + *

The trained model can be used further for prediction, just like any other + * classifier. The trained model is similar to the "CvNormalBayesClassifier".

+ * + * @param samples Samples from which the Gaussian mixture model will be + * estimated. It should be a one-channel matrix, each row of which is a sample. + * If the matrix does not have CV_64F type it will be converted to + * the inner matrix of such type for the further computing. + * @param logLikelihoods The optional output matrix that contains a likelihood + * logarithm value for each sample. It has nsamples x 1 size and + * CV_64FC1 type. + * @param labels The optional output "class label" for each sample: + * labels_i=arg max_k(p_(i,k)), i=1..N (indices of the most probable + * mixture component for each sample). It has nsamples x 1 size and + * CV_32SC1 type. + * @param probs The optional output matrix that contains posterior probabilities + * of each Gaussian mixture component given the each sample. It has nsamples + * x nclusters size and CV_64FC1 type. + * + * @see org.opencv.ml.EM.train + */ + public boolean train(Mat samples, Mat logLikelihoods, Mat labels, Mat probs) + { + + boolean retVal = train_0(nativeObj, samples.nativeObj, logLikelihoods.nativeObj, labels.nativeObj, probs.nativeObj); + + return retVal; + } + +/** + *

Estimates the Gaussian mixture parameters from a samples set.

+ * + *

Three versions of training method differ in the initialization of Gaussian + * mixture model parameters and start step:

+ *
    + *
  • train - Starts with Expectation step. Initial values of the model + * parameters will be estimated by the k-means algorithm. + *
  • trainE - Starts with Expectation step. You need to provide initial + * means a_k of mixture components. Optionally you can pass initial + * weights pi_k and covariance matrices S_k of mixture + * components. + *
  • trainM - Starts with Maximization step. You need to provide initial + * probabilities p_(i,k) to use this option. + *
+ * + *

The methods return true if the Gaussian mixture model was + * trained successfully, otherwise it returns false.

+ * + *

Unlike many of the ML models, EM is an unsupervised learning algorithm and it + * does not take responses (class labels or function values) as input. Instead, + * it computes the *Maximum Likelihood Estimate* of the Gaussian mixture + * parameters from an input sample set, stores all the parameters inside the + * structure: p_(i,k) in probs, a_k in + * means, S_k in covs[k], pi_k in + * weights, and optionally computes the output "class label" for + * each sample: labels_i=arg max_k(p_(i,k)), i=1..N (indices of the + * most probable mixture component for each sample).

+ * + *

The trained model can be used further for prediction, just like any other + * classifier. The trained model is similar to the "CvNormalBayesClassifier".

+ * + * @param samples Samples from which the Gaussian mixture model will be + * estimated. It should be a one-channel matrix, each row of which is a sample. + * If the matrix does not have CV_64F type it will be converted to + * the inner matrix of such type for the further computing. + * + * @see org.opencv.ml.EM.train + */ + public boolean train(Mat samples) + { + + boolean retVal = train_1(nativeObj, samples.nativeObj); + + return retVal; + } + + + // + // C++: bool EM::trainE(Mat samples, Mat means0, Mat covs0 = Mat(), Mat weights0 = Mat(), Mat& logLikelihoods = Mat(), Mat& labels = Mat(), Mat& probs = Mat()) + // + + public boolean trainE(Mat samples, Mat means0, Mat covs0, Mat weights0, Mat logLikelihoods, Mat labels, Mat probs) + { + + boolean retVal = trainE_0(nativeObj, samples.nativeObj, means0.nativeObj, covs0.nativeObj, weights0.nativeObj, logLikelihoods.nativeObj, labels.nativeObj, probs.nativeObj); + + return retVal; + } + + public boolean trainE(Mat samples, Mat means0) + { + + boolean retVal = trainE_1(nativeObj, samples.nativeObj, means0.nativeObj); + + return retVal; + } + + + // + // C++: bool EM::trainM(Mat samples, Mat probs0, Mat& logLikelihoods = Mat(), Mat& labels = Mat(), Mat& probs = Mat()) + // + + public boolean trainM(Mat samples, Mat probs0, Mat logLikelihoods, Mat labels, Mat probs) + { + + boolean retVal = trainM_0(nativeObj, samples.nativeObj, probs0.nativeObj, logLikelihoods.nativeObj, labels.nativeObj, probs.nativeObj); + + return retVal; + } + + public boolean trainM(Mat samples, Mat probs0) + { + + boolean retVal = trainM_1(nativeObj, samples.nativeObj, probs0.nativeObj); + + return retVal; + } + + + @Override + protected void finalize() throws Throwable { + delete(nativeObj); + } + + + + // C++: EM::EM(int nclusters = EM::DEFAULT_NCLUSTERS, int covMatType = EM::COV_MAT_DIAGONAL, TermCriteria termCrit = TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, EM::DEFAULT_MAX_ITERS, FLT_EPSILON)) + private static native long EM_0(int nclusters, int covMatType, int termCrit_type, int termCrit_maxCount, double termCrit_epsilon); + private static native long EM_1(); + + // C++: void EM::clear() + private static native void clear_0(long nativeObj); + + // C++: bool EM::isTrained() + private static native boolean isTrained_0(long nativeObj); + + // C++: Vec2d EM::predict(Mat sample, Mat& probs = Mat()) + private static native double[] predict_0(long nativeObj, long sample_nativeObj, long probs_nativeObj); + private static native double[] predict_1(long nativeObj, long sample_nativeObj); + + // C++: bool EM::train(Mat samples, Mat& logLikelihoods = Mat(), Mat& labels = Mat(), Mat& probs = Mat()) + private static native boolean train_0(long nativeObj, long samples_nativeObj, long logLikelihoods_nativeObj, long labels_nativeObj, long probs_nativeObj); + private static native boolean train_1(long nativeObj, long samples_nativeObj); + + // C++: bool EM::trainE(Mat samples, Mat means0, Mat covs0 = Mat(), Mat weights0 = Mat(), Mat& logLikelihoods = Mat(), Mat& labels = Mat(), Mat& probs = Mat()) + private static native boolean trainE_0(long nativeObj, long samples_nativeObj, long means0_nativeObj, long covs0_nativeObj, long weights0_nativeObj, long logLikelihoods_nativeObj, long labels_nativeObj, long probs_nativeObj); + private static native boolean trainE_1(long nativeObj, long samples_nativeObj, long means0_nativeObj); + + // C++: bool EM::trainM(Mat samples, Mat probs0, Mat& logLikelihoods = Mat(), Mat& labels = Mat(), Mat& probs = Mat()) + private static native boolean trainM_0(long nativeObj, long samples_nativeObj, long probs0_nativeObj, long logLikelihoods_nativeObj, long labels_nativeObj, long probs_nativeObj); + private static native boolean trainM_1(long nativeObj, long samples_nativeObj, long probs0_nativeObj); + + // native support for java finalize() + private static native void delete(long nativeObj); + +} diff --git a/src/org/opencv/ml/Ml.java b/src/org/opencv/ml/Ml.java new file mode 100644 index 0000000..104445b --- /dev/null +++ b/src/org/opencv/ml/Ml.java @@ -0,0 +1,13 @@ + +// +// This file is auto-generated. Please don't modify it! +// +package org.opencv.ml; + + + +public class Ml { + + + +} diff --git a/src/org/opencv/ml/package.bluej b/src/org/opencv/ml/package.bluej new file mode 100644 index 0000000..e69de29 diff --git a/src/org/opencv/objdetect/CascadeClassifier.java b/src/org/opencv/objdetect/CascadeClassifier.java new file mode 100644 index 0000000..54970ec --- /dev/null +++ b/src/org/opencv/objdetect/CascadeClassifier.java @@ -0,0 +1,258 @@ + +// +// This file is auto-generated. Please don't modify it! +// +package org.opencv.objdetect; + +import java.lang.String; +import org.opencv.core.Mat; +import org.opencv.core.MatOfDouble; +import org.opencv.core.MatOfInt; +import org.opencv.core.MatOfRect; +import org.opencv.core.Size; + +// C++: class CascadeClassifier +/** + *

Cascade classifier class for object detection.

+ * + * @see org.opencv.objdetect.CascadeClassifier + */ +public class CascadeClassifier { + + protected final long nativeObj; + protected CascadeClassifier(long addr) { nativeObj = addr; } + + + // + // C++: CascadeClassifier::CascadeClassifier() + // + +/** + *

Loads a classifier from a file.

+ * + * @see org.opencv.objdetect.CascadeClassifier.CascadeClassifier + */ + public CascadeClassifier() + { + + nativeObj = CascadeClassifier_0(); + + return; + } + + + // + // C++: CascadeClassifier::CascadeClassifier(string filename) + // + +/** + *

Loads a classifier from a file.

+ * + * @param filename Name of the file from which the classifier is loaded. + * + * @see org.opencv.objdetect.CascadeClassifier.CascadeClassifier + */ + public CascadeClassifier(String filename) + { + + nativeObj = CascadeClassifier_1(filename); + + return; + } + + + // + // C++: void CascadeClassifier::detectMultiScale(Mat image, vector_Rect& objects, double scaleFactor = 1.1, int minNeighbors = 3, int flags = 0, Size minSize = Size(), Size maxSize = Size()) + // + +/** + *

Detects objects of different sizes in the input image. The detected objects + * are returned as a list of rectangles.

+ * + *

The function is parallelized with the TBB library.

+ * + * @param image Matrix of the type CV_8U containing an image where + * objects are detected. + * @param objects Vector of rectangles where each rectangle contains the + * detected object. + * @param scaleFactor Parameter specifying how much the image size is reduced at + * each image scale. + * @param minNeighbors Parameter specifying how many neighbors each candidate + * rectangle should have to retain it. + * @param flags Parameter with the same meaning for an old cascade as in the + * function cvHaarDetectObjects. It is not used for a new cascade. + * @param minSize Minimum possible object size. Objects smaller than that are + * ignored. + * @param maxSize Maximum possible object size. Objects larger than that are + * ignored. + * + * @see org.opencv.objdetect.CascadeClassifier.detectMultiScale + */ + public void detectMultiScale(Mat image, MatOfRect objects, double scaleFactor, int minNeighbors, int flags, Size minSize, Size maxSize) + { + Mat objects_mat = objects; + detectMultiScale_0(nativeObj, image.nativeObj, objects_mat.nativeObj, scaleFactor, minNeighbors, flags, minSize.width, minSize.height, maxSize.width, maxSize.height); + + return; + } + +/** + *

Detects objects of different sizes in the input image. The detected objects + * are returned as a list of rectangles.

+ * + *

The function is parallelized with the TBB library.

+ * + * @param image Matrix of the type CV_8U containing an image where + * objects are detected. + * @param objects Vector of rectangles where each rectangle contains the + * detected object. + * + * @see org.opencv.objdetect.CascadeClassifier.detectMultiScale + */ + public void detectMultiScale(Mat image, MatOfRect objects) + { + Mat objects_mat = objects; + detectMultiScale_1(nativeObj, image.nativeObj, objects_mat.nativeObj); + + return; + } + + + // + // C++: void CascadeClassifier::detectMultiScale(Mat image, vector_Rect& objects, vector_int rejectLevels, vector_double levelWeights, double scaleFactor = 1.1, int minNeighbors = 3, int flags = 0, Size minSize = Size(), Size maxSize = Size(), bool outputRejectLevels = false) + // + +/** + *

Detects objects of different sizes in the input image. The detected objects + * are returned as a list of rectangles.

+ * + *

The function is parallelized with the TBB library.

+ * + * @param image Matrix of the type CV_8U containing an image where + * objects are detected. + * @param objects Vector of rectangles where each rectangle contains the + * detected object. + * @param rejectLevels a rejectLevels + * @param levelWeights a levelWeights + * @param scaleFactor Parameter specifying how much the image size is reduced at + * each image scale. + * @param minNeighbors Parameter specifying how many neighbors each candidate + * rectangle should have to retain it. + * @param flags Parameter with the same meaning for an old cascade as in the + * function cvHaarDetectObjects. It is not used for a new cascade. + * @param minSize Minimum possible object size. Objects smaller than that are + * ignored. + * @param maxSize Maximum possible object size. Objects larger than that are + * ignored. + * @param outputRejectLevels a outputRejectLevels + * + * @see org.opencv.objdetect.CascadeClassifier.detectMultiScale + */ + public void detectMultiScale(Mat image, MatOfRect objects, MatOfInt rejectLevels, MatOfDouble levelWeights, double scaleFactor, int minNeighbors, int flags, Size minSize, Size maxSize, boolean outputRejectLevels) + { + Mat objects_mat = objects; + Mat rejectLevels_mat = rejectLevels; + Mat levelWeights_mat = levelWeights; + detectMultiScale_2(nativeObj, image.nativeObj, objects_mat.nativeObj, rejectLevels_mat.nativeObj, levelWeights_mat.nativeObj, scaleFactor, minNeighbors, flags, minSize.width, minSize.height, maxSize.width, maxSize.height, outputRejectLevels); + + return; + } + +/** + *

Detects objects of different sizes in the input image. The detected objects + * are returned as a list of rectangles.

+ * + *

The function is parallelized with the TBB library.

+ * + * @param image Matrix of the type CV_8U containing an image where + * objects are detected. + * @param objects Vector of rectangles where each rectangle contains the + * detected object. + * @param rejectLevels a rejectLevels + * @param levelWeights a levelWeights + * + * @see org.opencv.objdetect.CascadeClassifier.detectMultiScale + */ + public void detectMultiScale(Mat image, MatOfRect objects, MatOfInt rejectLevels, MatOfDouble levelWeights) + { + Mat objects_mat = objects; + Mat rejectLevels_mat = rejectLevels; + Mat levelWeights_mat = levelWeights; + detectMultiScale_3(nativeObj, image.nativeObj, objects_mat.nativeObj, rejectLevels_mat.nativeObj, levelWeights_mat.nativeObj); + + return; + } + + + // + // C++: bool CascadeClassifier::empty() + // + +/** + *

Checks whether the classifier has been loaded.

+ * + * @see org.opencv.objdetect.CascadeClassifier.empty + */ + public boolean empty() + { + + boolean retVal = empty_0(nativeObj); + + return retVal; + } + + + // + // C++: bool CascadeClassifier::load(string filename) + // + +/** + *

Loads a classifier from a file.

+ * + * @param filename Name of the file from which the classifier is loaded. The + * file may contain an old HAAR classifier trained by the haartraining + * application or a new cascade classifier trained by the traincascade + * application. + * + * @see org.opencv.objdetect.CascadeClassifier.load + */ + public boolean load(String filename) + { + + boolean retVal = load_0(nativeObj, filename); + + return retVal; + } + + + @Override + protected void finalize() throws Throwable { + delete(nativeObj); + } + + + + // C++: CascadeClassifier::CascadeClassifier() + private static native long CascadeClassifier_0(); + + // C++: CascadeClassifier::CascadeClassifier(string filename) + private static native long CascadeClassifier_1(String filename); + + // C++: void CascadeClassifier::detectMultiScale(Mat image, vector_Rect& objects, double scaleFactor = 1.1, int minNeighbors = 3, int flags = 0, Size minSize = Size(), Size maxSize = Size()) + private static native void detectMultiScale_0(long nativeObj, long image_nativeObj, long objects_mat_nativeObj, double scaleFactor, int minNeighbors, int flags, double minSize_width, double minSize_height, double maxSize_width, double maxSize_height); + private static native void detectMultiScale_1(long nativeObj, long image_nativeObj, long objects_mat_nativeObj); + + // C++: void CascadeClassifier::detectMultiScale(Mat image, vector_Rect& objects, vector_int rejectLevels, vector_double levelWeights, double scaleFactor = 1.1, int minNeighbors = 3, int flags = 0, Size minSize = Size(), Size maxSize = Size(), bool outputRejectLevels = false) + private static native void detectMultiScale_2(long nativeObj, long image_nativeObj, long objects_mat_nativeObj, long rejectLevels_mat_nativeObj, long levelWeights_mat_nativeObj, double scaleFactor, int minNeighbors, int flags, double minSize_width, double minSize_height, double maxSize_width, double maxSize_height, boolean outputRejectLevels); + private static native void detectMultiScale_3(long nativeObj, long image_nativeObj, long objects_mat_nativeObj, long rejectLevels_mat_nativeObj, long levelWeights_mat_nativeObj); + + // C++: bool CascadeClassifier::empty() + private static native boolean empty_0(long nativeObj); + + // C++: bool CascadeClassifier::load(string filename) + private static native boolean load_0(long nativeObj, String filename); + + // native support for java finalize() + private static native void delete(long nativeObj); + +} diff --git a/src/org/opencv/objdetect/HOGDescriptor.java b/src/org/opencv/objdetect/HOGDescriptor.java new file mode 100644 index 0000000..e5490d6 --- /dev/null +++ b/src/org/opencv/objdetect/HOGDescriptor.java @@ -0,0 +1,538 @@ + +// +// This file is auto-generated. Please don't modify it! +// +package org.opencv.objdetect; + +import java.lang.String; +import org.opencv.core.Mat; +import org.opencv.core.MatOfDouble; +import org.opencv.core.MatOfFloat; +import org.opencv.core.MatOfPoint; +import org.opencv.core.MatOfRect; +import org.opencv.core.Size; + +// C++: class HOGDescriptor +public class HOGDescriptor { + + protected final long nativeObj; + protected HOGDescriptor(long addr) { nativeObj = addr; } + + + public static final int + L2Hys = 0, + DEFAULT_NLEVELS = 64; + + + // + // C++: HOGDescriptor::HOGDescriptor() + // + + public HOGDescriptor() + { + + nativeObj = HOGDescriptor_0(); + + return; + } + + + // + // C++: HOGDescriptor::HOGDescriptor(Size _winSize, Size _blockSize, Size _blockStride, Size _cellSize, int _nbins, int _derivAperture = 1, double _winSigma = -1, int _histogramNormType = HOGDescriptor::L2Hys, double _L2HysThreshold = 0.2, bool _gammaCorrection = false, int _nlevels = HOGDescriptor::DEFAULT_NLEVELS) + // + + public HOGDescriptor(Size _winSize, Size _blockSize, Size _blockStride, Size _cellSize, int _nbins, int _derivAperture, double _winSigma, int _histogramNormType, double _L2HysThreshold, boolean _gammaCorrection, int _nlevels) + { + + nativeObj = HOGDescriptor_1(_winSize.width, _winSize.height, _blockSize.width, _blockSize.height, _blockStride.width, _blockStride.height, _cellSize.width, _cellSize.height, _nbins, _derivAperture, _winSigma, _histogramNormType, _L2HysThreshold, _gammaCorrection, _nlevels); + + return; + } + + public HOGDescriptor(Size _winSize, Size _blockSize, Size _blockStride, Size _cellSize, int _nbins) + { + + nativeObj = HOGDescriptor_2(_winSize.width, _winSize.height, _blockSize.width, _blockSize.height, _blockStride.width, _blockStride.height, _cellSize.width, _cellSize.height, _nbins); + + return; + } + + + // + // C++: HOGDescriptor::HOGDescriptor(String filename) + // + + public HOGDescriptor(String filename) + { + + nativeObj = HOGDescriptor_3(filename); + + return; + } + + + // + // C++: bool HOGDescriptor::checkDetectorSize() + // + + public boolean checkDetectorSize() + { + + boolean retVal = checkDetectorSize_0(nativeObj); + + return retVal; + } + + + // + // C++: void HOGDescriptor::compute(Mat img, vector_float& descriptors, Size winStride = Size(), Size padding = Size(), vector_Point locations = vector()) + // + + public void compute(Mat img, MatOfFloat descriptors, Size winStride, Size padding, MatOfPoint locations) + { + Mat descriptors_mat = descriptors; + Mat locations_mat = locations; + compute_0(nativeObj, img.nativeObj, descriptors_mat.nativeObj, winStride.width, winStride.height, padding.width, padding.height, locations_mat.nativeObj); + + return; + } + + public void compute(Mat img, MatOfFloat descriptors) + { + Mat descriptors_mat = descriptors; + compute_1(nativeObj, img.nativeObj, descriptors_mat.nativeObj); + + return; + } + + + // + // C++: void HOGDescriptor::computeGradient(Mat img, Mat& grad, Mat& angleOfs, Size paddingTL = Size(), Size paddingBR = Size()) + // + + public void computeGradient(Mat img, Mat grad, Mat angleOfs, Size paddingTL, Size paddingBR) + { + + computeGradient_0(nativeObj, img.nativeObj, grad.nativeObj, angleOfs.nativeObj, paddingTL.width, paddingTL.height, paddingBR.width, paddingBR.height); + + return; + } + + public void computeGradient(Mat img, Mat grad, Mat angleOfs) + { + + computeGradient_1(nativeObj, img.nativeObj, grad.nativeObj, angleOfs.nativeObj); + + return; + } + + + // + // C++: void HOGDescriptor::detect(Mat img, vector_Point& foundLocations, vector_double& weights, double hitThreshold = 0, Size winStride = Size(), Size padding = Size(), vector_Point searchLocations = vector()) + // + + public void detect(Mat img, MatOfPoint foundLocations, MatOfDouble weights, double hitThreshold, Size winStride, Size padding, MatOfPoint searchLocations) + { + Mat foundLocations_mat = foundLocations; + Mat weights_mat = weights; + Mat searchLocations_mat = searchLocations; + detect_0(nativeObj, img.nativeObj, foundLocations_mat.nativeObj, weights_mat.nativeObj, hitThreshold, winStride.width, winStride.height, padding.width, padding.height, searchLocations_mat.nativeObj); + + return; + } + + public void detect(Mat img, MatOfPoint foundLocations, MatOfDouble weights) + { + Mat foundLocations_mat = foundLocations; + Mat weights_mat = weights; + detect_1(nativeObj, img.nativeObj, foundLocations_mat.nativeObj, weights_mat.nativeObj); + + return; + } + + + // + // C++: void HOGDescriptor::detectMultiScale(Mat img, vector_Rect& foundLocations, vector_double& foundWeights, double hitThreshold = 0, Size winStride = Size(), Size padding = Size(), double scale = 1.05, double finalThreshold = 2.0, bool useMeanshiftGrouping = false) + // + + public void detectMultiScale(Mat img, MatOfRect foundLocations, MatOfDouble foundWeights, double hitThreshold, Size winStride, Size padding, double scale, double finalThreshold, boolean useMeanshiftGrouping) + { + Mat foundLocations_mat = foundLocations; + Mat foundWeights_mat = foundWeights; + detectMultiScale_0(nativeObj, img.nativeObj, foundLocations_mat.nativeObj, foundWeights_mat.nativeObj, hitThreshold, winStride.width, winStride.height, padding.width, padding.height, scale, finalThreshold, useMeanshiftGrouping); + + return; + } + + public void detectMultiScale(Mat img, MatOfRect foundLocations, MatOfDouble foundWeights) + { + Mat foundLocations_mat = foundLocations; + Mat foundWeights_mat = foundWeights; + detectMultiScale_1(nativeObj, img.nativeObj, foundLocations_mat.nativeObj, foundWeights_mat.nativeObj); + + return; + } + + + // + // C++: static vector_float HOGDescriptor::getDaimlerPeopleDetector() + // + + public static MatOfFloat getDaimlerPeopleDetector() + { + + MatOfFloat retVal = MatOfFloat.fromNativeAddr(getDaimlerPeopleDetector_0()); + + return retVal; + } + + + // + // C++: static vector_float HOGDescriptor::getDefaultPeopleDetector() + // + + public static MatOfFloat getDefaultPeopleDetector() + { + + MatOfFloat retVal = MatOfFloat.fromNativeAddr(getDefaultPeopleDetector_0()); + + return retVal; + } + + + // + // C++: size_t HOGDescriptor::getDescriptorSize() + // + + public long getDescriptorSize() + { + + long retVal = getDescriptorSize_0(nativeObj); + + return retVal; + } + + + // + // C++: double HOGDescriptor::getWinSigma() + // + + public double getWinSigma() + { + + double retVal = getWinSigma_0(nativeObj); + + return retVal; + } + + + // + // C++: bool HOGDescriptor::load(String filename, String objname = String()) + // + + public boolean load(String filename, String objname) + { + + boolean retVal = load_0(nativeObj, filename, objname); + + return retVal; + } + + public boolean load(String filename) + { + + boolean retVal = load_1(nativeObj, filename); + + return retVal; + } + + + // + // C++: void HOGDescriptor::save(String filename, String objname = String()) + // + + public void save(String filename, String objname) + { + + save_0(nativeObj, filename, objname); + + return; + } + + public void save(String filename) + { + + save_1(nativeObj, filename); + + return; + } + + + // + // C++: void HOGDescriptor::setSVMDetector(Mat _svmdetector) + // + + public void setSVMDetector(Mat _svmdetector) + { + + setSVMDetector_0(nativeObj, _svmdetector.nativeObj); + + return; + } + + + // + // C++: Size HOGDescriptor::winSize + // + + public Size get_winSize() + { + + Size retVal = new Size(get_winSize_0(nativeObj)); + + return retVal; + } + + + // + // C++: Size HOGDescriptor::blockSize + // + + public Size get_blockSize() + { + + Size retVal = new Size(get_blockSize_0(nativeObj)); + + return retVal; + } + + + // + // C++: Size HOGDescriptor::blockStride + // + + public Size get_blockStride() + { + + Size retVal = new Size(get_blockStride_0(nativeObj)); + + return retVal; + } + + + // + // C++: Size HOGDescriptor::cellSize + // + + public Size get_cellSize() + { + + Size retVal = new Size(get_cellSize_0(nativeObj)); + + return retVal; + } + + + // + // C++: int HOGDescriptor::nbins + // + + public int get_nbins() + { + + int retVal = get_nbins_0(nativeObj); + + return retVal; + } + + + // + // C++: int HOGDescriptor::derivAperture + // + + public int get_derivAperture() + { + + int retVal = get_derivAperture_0(nativeObj); + + return retVal; + } + + + // + // C++: double HOGDescriptor::winSigma + // + + public double get_winSigma() + { + + double retVal = get_winSigma_0(nativeObj); + + return retVal; + } + + + // + // C++: int HOGDescriptor::histogramNormType + // + + public int get_histogramNormType() + { + + int retVal = get_histogramNormType_0(nativeObj); + + return retVal; + } + + + // + // C++: double HOGDescriptor::L2HysThreshold + // + + public double get_L2HysThreshold() + { + + double retVal = get_L2HysThreshold_0(nativeObj); + + return retVal; + } + + + // + // C++: bool HOGDescriptor::gammaCorrection + // + + public boolean get_gammaCorrection() + { + + boolean retVal = get_gammaCorrection_0(nativeObj); + + return retVal; + } + + + // + // C++: vector_float HOGDescriptor::svmDetector + // + + public MatOfFloat get_svmDetector() + { + + MatOfFloat retVal = MatOfFloat.fromNativeAddr(get_svmDetector_0(nativeObj)); + + return retVal; + } + + + // + // C++: int HOGDescriptor::nlevels + // + + public int get_nlevels() + { + + int retVal = get_nlevels_0(nativeObj); + + return retVal; + } + + + @Override + protected void finalize() throws Throwable { + delete(nativeObj); + } + + + + // C++: HOGDescriptor::HOGDescriptor() + private static native long HOGDescriptor_0(); + + // C++: HOGDescriptor::HOGDescriptor(Size _winSize, Size _blockSize, Size _blockStride, Size _cellSize, int _nbins, int _derivAperture = 1, double _winSigma = -1, int _histogramNormType = HOGDescriptor::L2Hys, double _L2HysThreshold = 0.2, bool _gammaCorrection = false, int _nlevels = HOGDescriptor::DEFAULT_NLEVELS) + private static native long HOGDescriptor_1(double _winSize_width, double _winSize_height, double _blockSize_width, double _blockSize_height, double _blockStride_width, double _blockStride_height, double _cellSize_width, double _cellSize_height, int _nbins, int _derivAperture, double _winSigma, int _histogramNormType, double _L2HysThreshold, boolean _gammaCorrection, int _nlevels); + private static native long HOGDescriptor_2(double _winSize_width, double _winSize_height, double _blockSize_width, double _blockSize_height, double _blockStride_width, double _blockStride_height, double _cellSize_width, double _cellSize_height, int _nbins); + + // C++: HOGDescriptor::HOGDescriptor(String filename) + private static native long HOGDescriptor_3(String filename); + + // C++: bool HOGDescriptor::checkDetectorSize() + private static native boolean checkDetectorSize_0(long nativeObj); + + // C++: void HOGDescriptor::compute(Mat img, vector_float& descriptors, Size winStride = Size(), Size padding = Size(), vector_Point locations = vector()) + private static native void compute_0(long nativeObj, long img_nativeObj, long descriptors_mat_nativeObj, double winStride_width, double winStride_height, double padding_width, double padding_height, long locations_mat_nativeObj); + private static native void compute_1(long nativeObj, long img_nativeObj, long descriptors_mat_nativeObj); + + // C++: void HOGDescriptor::computeGradient(Mat img, Mat& grad, Mat& angleOfs, Size paddingTL = Size(), Size paddingBR = Size()) + private static native void computeGradient_0(long nativeObj, long img_nativeObj, long grad_nativeObj, long angleOfs_nativeObj, double paddingTL_width, double paddingTL_height, double paddingBR_width, double paddingBR_height); + private static native void computeGradient_1(long nativeObj, long img_nativeObj, long grad_nativeObj, long angleOfs_nativeObj); + + // C++: void HOGDescriptor::detect(Mat img, vector_Point& foundLocations, vector_double& weights, double hitThreshold = 0, Size winStride = Size(), Size padding = Size(), vector_Point searchLocations = vector()) + private static native void detect_0(long nativeObj, long img_nativeObj, long foundLocations_mat_nativeObj, long weights_mat_nativeObj, double hitThreshold, double winStride_width, double winStride_height, double padding_width, double padding_height, long searchLocations_mat_nativeObj); + private static native void detect_1(long nativeObj, long img_nativeObj, long foundLocations_mat_nativeObj, long weights_mat_nativeObj); + + // C++: void HOGDescriptor::detectMultiScale(Mat img, vector_Rect& foundLocations, vector_double& foundWeights, double hitThreshold = 0, Size winStride = Size(), Size padding = Size(), double scale = 1.05, double finalThreshold = 2.0, bool useMeanshiftGrouping = false) + private static native void detectMultiScale_0(long nativeObj, long img_nativeObj, long foundLocations_mat_nativeObj, long foundWeights_mat_nativeObj, double hitThreshold, double winStride_width, double winStride_height, double padding_width, double padding_height, double scale, double finalThreshold, boolean useMeanshiftGrouping); + private static native void detectMultiScale_1(long nativeObj, long img_nativeObj, long foundLocations_mat_nativeObj, long foundWeights_mat_nativeObj); + + // C++: static vector_float HOGDescriptor::getDaimlerPeopleDetector() + private static native long getDaimlerPeopleDetector_0(); + + // C++: static vector_float HOGDescriptor::getDefaultPeopleDetector() + private static native long getDefaultPeopleDetector_0(); + + // C++: size_t HOGDescriptor::getDescriptorSize() + private static native long getDescriptorSize_0(long nativeObj); + + // C++: double HOGDescriptor::getWinSigma() + private static native double getWinSigma_0(long nativeObj); + + // C++: bool HOGDescriptor::load(String filename, String objname = String()) + private static native boolean load_0(long nativeObj, String filename, String objname); + private static native boolean load_1(long nativeObj, String filename); + + // C++: void HOGDescriptor::save(String filename, String objname = String()) + private static native void save_0(long nativeObj, String filename, String objname); + private static native void save_1(long nativeObj, String filename); + + // C++: void HOGDescriptor::setSVMDetector(Mat _svmdetector) + private static native void setSVMDetector_0(long nativeObj, long _svmdetector_nativeObj); + + // C++: Size HOGDescriptor::winSize + private static native double[] get_winSize_0(long nativeObj); + + // C++: Size HOGDescriptor::blockSize + private static native double[] get_blockSize_0(long nativeObj); + + // C++: Size HOGDescriptor::blockStride + private static native double[] get_blockStride_0(long nativeObj); + + // C++: Size HOGDescriptor::cellSize + private static native double[] get_cellSize_0(long nativeObj); + + // C++: int HOGDescriptor::nbins + private static native int get_nbins_0(long nativeObj); + + // C++: int HOGDescriptor::derivAperture + private static native int get_derivAperture_0(long nativeObj); + + // C++: double HOGDescriptor::winSigma + private static native double get_winSigma_0(long nativeObj); + + // C++: int HOGDescriptor::histogramNormType + private static native int get_histogramNormType_0(long nativeObj); + + // C++: double HOGDescriptor::L2HysThreshold + private static native double get_L2HysThreshold_0(long nativeObj); + + // C++: bool HOGDescriptor::gammaCorrection + private static native boolean get_gammaCorrection_0(long nativeObj); + + // C++: vector_float HOGDescriptor::svmDetector + private static native long get_svmDetector_0(long nativeObj); + + // C++: int HOGDescriptor::nlevels + private static native int get_nlevels_0(long nativeObj); + + // native support for java finalize() + private static native void delete(long nativeObj); + +} diff --git a/src/org/opencv/objdetect/Objdetect.java b/src/org/opencv/objdetect/Objdetect.java new file mode 100644 index 0000000..83e8768 --- /dev/null +++ b/src/org/opencv/objdetect/Objdetect.java @@ -0,0 +1,105 @@ + +// +// This file is auto-generated. Please don't modify it! +// +package org.opencv.objdetect; + +import org.opencv.core.Mat; +import org.opencv.core.MatOfInt; +import org.opencv.core.MatOfRect; + +public class Objdetect { + + public static final int + CASCADE_DO_CANNY_PRUNING = 1, + CASCADE_SCALE_IMAGE = 2, + CASCADE_FIND_BIGGEST_OBJECT = 4, + CASCADE_DO_ROUGH_SEARCH = 8; + + + // + // C++: void drawDataMatrixCodes(Mat& image, vector_string codes, Mat corners) + // + + // Unknown type 'vector_string' (I), skipping the function + + + // + // C++: void findDataMatrix(Mat image, vector_string& codes, Mat& corners = Mat(), vector_Mat& dmtx = vector_Mat()) + // + + // Unknown type 'vector_string' (O), skipping the function + + + // + // C++: void groupRectangles(vector_Rect& rectList, vector_int& weights, int groupThreshold, double eps = 0.2) + // + +/** + *

Groups the object candidate rectangles.

+ * + *

The function is a wrapper for the generic function "partition". It clusters + * all the input rectangles using the rectangle equivalence criteria that + * combines rectangles with similar sizes and similar locations. The similarity + * is defined by eps. When eps=0, no clustering is + * done at all. If eps-> +inf, all the rectangles are put in one + * cluster. Then, the small clusters containing less than or equal to + * groupThreshold rectangles are rejected. In each other cluster, + * the average rectangle is computed and put into the output rectangle list.

+ * + * @param rectList Input/output vector of rectangles. Output vector includes + * retained and grouped rectangles. (The Python list is not modified in place.) + * @param weights a weights + * @param groupThreshold Minimum possible number of rectangles minus 1. The + * threshold is used in a group of rectangles to retain it. + * @param eps Relative difference between sides of the rectangles to merge them + * into a group. + * + * @see org.opencv.objdetect.Objdetect.groupRectangles + */ + public static void groupRectangles(MatOfRect rectList, MatOfInt weights, int groupThreshold, double eps) + { + Mat rectList_mat = rectList; + Mat weights_mat = weights; + groupRectangles_0(rectList_mat.nativeObj, weights_mat.nativeObj, groupThreshold, eps); + + return; + } + +/** + *

Groups the object candidate rectangles.

+ * + *

The function is a wrapper for the generic function "partition". It clusters + * all the input rectangles using the rectangle equivalence criteria that + * combines rectangles with similar sizes and similar locations. The similarity + * is defined by eps. When eps=0, no clustering is + * done at all. If eps-> +inf, all the rectangles are put in one + * cluster. Then, the small clusters containing less than or equal to + * groupThreshold rectangles are rejected. In each other cluster, + * the average rectangle is computed and put into the output rectangle list.

+ * + * @param rectList Input/output vector of rectangles. Output vector includes + * retained and grouped rectangles. (The Python list is not modified in place.) + * @param weights a weights + * @param groupThreshold Minimum possible number of rectangles minus 1. The + * threshold is used in a group of rectangles to retain it. + * + * @see org.opencv.objdetect.Objdetect.groupRectangles + */ + public static void groupRectangles(MatOfRect rectList, MatOfInt weights, int groupThreshold) + { + Mat rectList_mat = rectList; + Mat weights_mat = weights; + groupRectangles_1(rectList_mat.nativeObj, weights_mat.nativeObj, groupThreshold); + + return; + } + + + + + // C++: void groupRectangles(vector_Rect& rectList, vector_int& weights, int groupThreshold, double eps = 0.2) + private static native void groupRectangles_0(long rectList_mat_nativeObj, long weights_mat_nativeObj, int groupThreshold, double eps); + private static native void groupRectangles_1(long rectList_mat_nativeObj, long weights_mat_nativeObj, int groupThreshold); + +} diff --git a/src/org/opencv/objdetect/package.bluej b/src/org/opencv/objdetect/package.bluej new file mode 100644 index 0000000..e69de29 diff --git a/src/org/opencv/package.bluej b/src/org/opencv/package.bluej new file mode 100644 index 0000000..e69de29 diff --git a/src/org/opencv/photo/Photo.java b/src/org/opencv/photo/Photo.java new file mode 100644 index 0000000..908a275 --- /dev/null +++ b/src/org/opencv/photo/Photo.java @@ -0,0 +1,347 @@ + +// +// This file is auto-generated. Please don't modify it! +// +package org.opencv.photo; + +import java.util.List; +import org.opencv.core.Mat; +import org.opencv.utils.Converters; + +public class Photo { + + private static final int + CV_INPAINT_NS = 0, + CV_INPAINT_TELEA = 1; + + + public static final int + INPAINT_NS = CV_INPAINT_NS, + INPAINT_TELEA = CV_INPAINT_TELEA; + + + // + // C++: void fastNlMeansDenoising(Mat src, Mat& dst, float h = 3, int templateWindowSize = 7, int searchWindowSize = 21) + // + +/** + *

Perform image denoising using Non-local Means Denoising algorithm + * http://www.ipol.im/pub/algo/bcm_non_local_means_denoising/ with several + * computational optimizations. Noise expected to be a gaussian white noise

+ * + *

This function expected to be applied to grayscale images. For colored images + * look at fastNlMeansDenoisingColored. + * Advanced usage of this functions can be manual denoising of colored image in + * different colorspaces. + * Such approach is used in fastNlMeansDenoisingColored by + * converting image to CIELAB colorspace and then separately denoise L and AB + * components with different h parameter.

+ * + * @param src Input 8-bit 1-channel, 2-channel or 3-channel image. + * @param dst Output image with the same size and type as src. + * @param h Parameter regulating filter strength. Big h value perfectly removes + * noise but also removes image details, smaller h value preserves details but + * also preserves some noise + * @param templateWindowSize Size in pixels of the template patch that is used + * to compute weights. Should be odd. Recommended value 7 pixels + * @param searchWindowSize Size in pixels of the window that is used to compute + * weighted average for given pixel. Should be odd. Affect performance linearly: + * greater searchWindowsSize - greater denoising time. Recommended value 21 + * pixels + * + * @see org.opencv.photo.Photo.fastNlMeansDenoising + */ + public static void fastNlMeansDenoising(Mat src, Mat dst, float h, int templateWindowSize, int searchWindowSize) + { + + fastNlMeansDenoising_0(src.nativeObj, dst.nativeObj, h, templateWindowSize, searchWindowSize); + + return; + } + +/** + *

Perform image denoising using Non-local Means Denoising algorithm + * http://www.ipol.im/pub/algo/bcm_non_local_means_denoising/ with several + * computational optimizations. Noise expected to be a gaussian white noise

+ * + *

This function expected to be applied to grayscale images. For colored images + * look at fastNlMeansDenoisingColored. + * Advanced usage of this functions can be manual denoising of colored image in + * different colorspaces. + * Such approach is used in fastNlMeansDenoisingColored by + * converting image to CIELAB colorspace and then separately denoise L and AB + * components with different h parameter.

+ * + * @param src Input 8-bit 1-channel, 2-channel or 3-channel image. + * @param dst Output image with the same size and type as src. + * + * @see org.opencv.photo.Photo.fastNlMeansDenoising + */ + public static void fastNlMeansDenoising(Mat src, Mat dst) + { + + fastNlMeansDenoising_1(src.nativeObj, dst.nativeObj); + + return; + } + + + // + // C++: void fastNlMeansDenoisingColored(Mat src, Mat& dst, float h = 3, float hColor = 3, int templateWindowSize = 7, int searchWindowSize = 21) + // + +/** + *

Modification of fastNlMeansDenoising function for colored images

+ * + *

The function converts image to CIELAB colorspace and then separately denoise + * L and AB components with given h parameters using fastNlMeansDenoising + * function.

+ * + * @param src Input 8-bit 3-channel image. + * @param dst Output image with the same size and type as src. + * @param h Parameter regulating filter strength for luminance component. Bigger + * h value perfectly removes noise but also removes image details, smaller h + * value preserves details but also preserves some noise + * @param hColor a hColor + * @param templateWindowSize Size in pixels of the template patch that is used + * to compute weights. Should be odd. Recommended value 7 pixels + * @param searchWindowSize Size in pixels of the window that is used to compute + * weighted average for given pixel. Should be odd. Affect performance linearly: + * greater searchWindowsSize - greater denoising time. Recommended value 21 + * pixels + * + * @see org.opencv.photo.Photo.fastNlMeansDenoisingColored + */ + public static void fastNlMeansDenoisingColored(Mat src, Mat dst, float h, float hColor, int templateWindowSize, int searchWindowSize) + { + + fastNlMeansDenoisingColored_0(src.nativeObj, dst.nativeObj, h, hColor, templateWindowSize, searchWindowSize); + + return; + } + +/** + *

Modification of fastNlMeansDenoising function for colored images

+ * + *

The function converts image to CIELAB colorspace and then separately denoise + * L and AB components with given h parameters using fastNlMeansDenoising + * function.

+ * + * @param src Input 8-bit 3-channel image. + * @param dst Output image with the same size and type as src. + * + * @see org.opencv.photo.Photo.fastNlMeansDenoisingColored + */ + public static void fastNlMeansDenoisingColored(Mat src, Mat dst) + { + + fastNlMeansDenoisingColored_1(src.nativeObj, dst.nativeObj); + + return; + } + + + // + // C++: void fastNlMeansDenoisingColoredMulti(vector_Mat srcImgs, Mat& dst, int imgToDenoiseIndex, int temporalWindowSize, float h = 3, float hColor = 3, int templateWindowSize = 7, int searchWindowSize = 21) + // + +/** + *

Modification of fastNlMeansDenoisingMulti function for colored + * images sequences

+ * + *

The function converts images to CIELAB colorspace and then separately denoise + * L and AB components with given h parameters using fastNlMeansDenoisingMulti + * function.

+ * + * @param srcImgs Input 8-bit 3-channel images sequence. All images should have + * the same type and size. + * @param dst Output image with the same size and type as srcImgs + * images. + * @param imgToDenoiseIndex Target image to denoise index in srcImgs + * sequence + * @param temporalWindowSize Number of surrounding images to use for target + * image denoising. Should be odd. Images from imgToDenoiseIndex - + * temporalWindowSize / 2 to imgToDenoiseIndex - temporalWindowSize + * / 2 from srcImgs will be used to denoise + * srcImgs[imgToDenoiseIndex] image. + * @param h Parameter regulating filter strength for luminance component. Bigger + * h value perfectly removes noise but also removes image details, smaller h + * value preserves details but also preserves some noise. + * @param hColor a hColor + * @param templateWindowSize Size in pixels of the template patch that is used + * to compute weights. Should be odd. Recommended value 7 pixels + * @param searchWindowSize Size in pixels of the window that is used to compute + * weighted average for given pixel. Should be odd. Affect performance linearly: + * greater searchWindowsSize - greater denoising time. Recommended value 21 + * pixels + * + * @see org.opencv.photo.Photo.fastNlMeansDenoisingColoredMulti + */ + public static void fastNlMeansDenoisingColoredMulti(List srcImgs, Mat dst, int imgToDenoiseIndex, int temporalWindowSize, float h, float hColor, int templateWindowSize, int searchWindowSize) + { + Mat srcImgs_mat = Converters.vector_Mat_to_Mat(srcImgs); + fastNlMeansDenoisingColoredMulti_0(srcImgs_mat.nativeObj, dst.nativeObj, imgToDenoiseIndex, temporalWindowSize, h, hColor, templateWindowSize, searchWindowSize); + + return; + } + +/** + *

Modification of fastNlMeansDenoisingMulti function for colored + * images sequences

+ * + *

The function converts images to CIELAB colorspace and then separately denoise + * L and AB components with given h parameters using fastNlMeansDenoisingMulti + * function.

+ * + * @param srcImgs Input 8-bit 3-channel images sequence. All images should have + * the same type and size. + * @param dst Output image with the same size and type as srcImgs + * images. + * @param imgToDenoiseIndex Target image to denoise index in srcImgs + * sequence + * @param temporalWindowSize Number of surrounding images to use for target + * image denoising. Should be odd. Images from imgToDenoiseIndex - + * temporalWindowSize / 2 to imgToDenoiseIndex - temporalWindowSize + * / 2 from srcImgs will be used to denoise + * srcImgs[imgToDenoiseIndex] image. + * + * @see org.opencv.photo.Photo.fastNlMeansDenoisingColoredMulti + */ + public static void fastNlMeansDenoisingColoredMulti(List srcImgs, Mat dst, int imgToDenoiseIndex, int temporalWindowSize) + { + Mat srcImgs_mat = Converters.vector_Mat_to_Mat(srcImgs); + fastNlMeansDenoisingColoredMulti_1(srcImgs_mat.nativeObj, dst.nativeObj, imgToDenoiseIndex, temporalWindowSize); + + return; + } + + + // + // C++: void fastNlMeansDenoisingMulti(vector_Mat srcImgs, Mat& dst, int imgToDenoiseIndex, int temporalWindowSize, float h = 3, int templateWindowSize = 7, int searchWindowSize = 21) + // + +/** + *

Modification of fastNlMeansDenoising function for images + * sequence where consequtive images have been captured in small period of time. + * For example video. This version of the function is for grayscale images or + * for manual manipulation with colorspaces. + * For more details see http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.131.6394

+ * + * @param srcImgs Input 8-bit 1-channel, 2-channel or 3-channel images sequence. + * All images should have the same type and size. + * @param dst Output image with the same size and type as srcImgs + * images. + * @param imgToDenoiseIndex Target image to denoise index in srcImgs + * sequence + * @param temporalWindowSize Number of surrounding images to use for target + * image denoising. Should be odd. Images from imgToDenoiseIndex - + * temporalWindowSize / 2 to imgToDenoiseIndex - temporalWindowSize + * / 2 from srcImgs will be used to denoise + * srcImgs[imgToDenoiseIndex] image. + * @param h Parameter regulating filter strength for luminance component. Bigger + * h value perfectly removes noise but also removes image details, smaller h + * value preserves details but also preserves some noise + * @param templateWindowSize Size in pixels of the template patch that is used + * to compute weights. Should be odd. Recommended value 7 pixels + * @param searchWindowSize Size in pixels of the window that is used to compute + * weighted average for given pixel. Should be odd. Affect performance linearly: + * greater searchWindowsSize - greater denoising time. Recommended value 21 + * pixels + * + * @see org.opencv.photo.Photo.fastNlMeansDenoisingMulti + */ + public static void fastNlMeansDenoisingMulti(List srcImgs, Mat dst, int imgToDenoiseIndex, int temporalWindowSize, float h, int templateWindowSize, int searchWindowSize) + { + Mat srcImgs_mat = Converters.vector_Mat_to_Mat(srcImgs); + fastNlMeansDenoisingMulti_0(srcImgs_mat.nativeObj, dst.nativeObj, imgToDenoiseIndex, temporalWindowSize, h, templateWindowSize, searchWindowSize); + + return; + } + +/** + *

Modification of fastNlMeansDenoising function for images + * sequence where consequtive images have been captured in small period of time. + * For example video. This version of the function is for grayscale images or + * for manual manipulation with colorspaces. + * For more details see http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.131.6394

+ * + * @param srcImgs Input 8-bit 1-channel, 2-channel or 3-channel images sequence. + * All images should have the same type and size. + * @param dst Output image with the same size and type as srcImgs + * images. + * @param imgToDenoiseIndex Target image to denoise index in srcImgs + * sequence + * @param temporalWindowSize Number of surrounding images to use for target + * image denoising. Should be odd. Images from imgToDenoiseIndex - + * temporalWindowSize / 2 to imgToDenoiseIndex - temporalWindowSize + * / 2 from srcImgs will be used to denoise + * srcImgs[imgToDenoiseIndex] image. + * + * @see org.opencv.photo.Photo.fastNlMeansDenoisingMulti + */ + public static void fastNlMeansDenoisingMulti(List srcImgs, Mat dst, int imgToDenoiseIndex, int temporalWindowSize) + { + Mat srcImgs_mat = Converters.vector_Mat_to_Mat(srcImgs); + fastNlMeansDenoisingMulti_1(srcImgs_mat.nativeObj, dst.nativeObj, imgToDenoiseIndex, temporalWindowSize); + + return; + } + + + // + // C++: void inpaint(Mat src, Mat inpaintMask, Mat& dst, double inpaintRadius, int flags) + // + +/** + *

Restores the selected region in an image using the region neighborhood.

+ * + *

The function reconstructs the selected image area from the pixel near the + * area boundary. The function may be used to remove dust and scratches from a + * scanned photo, or to remove undesirable objects from still images or video. + * See http://en.wikipedia.org/wiki/Inpainting for more details.

+ * + * @param src Input 8-bit 1-channel or 3-channel image. + * @param inpaintMask Inpainting mask, 8-bit 1-channel image. Non-zero pixels + * indicate the area that needs to be inpainted. + * @param dst Output image with the same size and type as src. + * @param inpaintRadius Radius of a circular neighborhood of each point + * inpainted that is considered by the algorithm. + * @param flags Inpainting method that could be one of the following: + *
    + *
  • INPAINT_NS Navier-Stokes based method. + *
  • INPAINT_TELEA Method by Alexandru Telea [Telea04]. + *
+ * + * @see org.opencv.photo.Photo.inpaint + */ + public static void inpaint(Mat src, Mat inpaintMask, Mat dst, double inpaintRadius, int flags) + { + + inpaint_0(src.nativeObj, inpaintMask.nativeObj, dst.nativeObj, inpaintRadius, flags); + + return; + } + + + + + // C++: void fastNlMeansDenoising(Mat src, Mat& dst, float h = 3, int templateWindowSize = 7, int searchWindowSize = 21) + private static native void fastNlMeansDenoising_0(long src_nativeObj, long dst_nativeObj, float h, int templateWindowSize, int searchWindowSize); + private static native void fastNlMeansDenoising_1(long src_nativeObj, long dst_nativeObj); + + // C++: void fastNlMeansDenoisingColored(Mat src, Mat& dst, float h = 3, float hColor = 3, int templateWindowSize = 7, int searchWindowSize = 21) + private static native void fastNlMeansDenoisingColored_0(long src_nativeObj, long dst_nativeObj, float h, float hColor, int templateWindowSize, int searchWindowSize); + private static native void fastNlMeansDenoisingColored_1(long src_nativeObj, long dst_nativeObj); + + // C++: void fastNlMeansDenoisingColoredMulti(vector_Mat srcImgs, Mat& dst, int imgToDenoiseIndex, int temporalWindowSize, float h = 3, float hColor = 3, int templateWindowSize = 7, int searchWindowSize = 21) + private static native void fastNlMeansDenoisingColoredMulti_0(long srcImgs_mat_nativeObj, long dst_nativeObj, int imgToDenoiseIndex, int temporalWindowSize, float h, float hColor, int templateWindowSize, int searchWindowSize); + private static native void fastNlMeansDenoisingColoredMulti_1(long srcImgs_mat_nativeObj, long dst_nativeObj, int imgToDenoiseIndex, int temporalWindowSize); + + // C++: void fastNlMeansDenoisingMulti(vector_Mat srcImgs, Mat& dst, int imgToDenoiseIndex, int temporalWindowSize, float h = 3, int templateWindowSize = 7, int searchWindowSize = 21) + private static native void fastNlMeansDenoisingMulti_0(long srcImgs_mat_nativeObj, long dst_nativeObj, int imgToDenoiseIndex, int temporalWindowSize, float h, int templateWindowSize, int searchWindowSize); + private static native void fastNlMeansDenoisingMulti_1(long srcImgs_mat_nativeObj, long dst_nativeObj, int imgToDenoiseIndex, int temporalWindowSize); + + // C++: void inpaint(Mat src, Mat inpaintMask, Mat& dst, double inpaintRadius, int flags) + private static native void inpaint_0(long src_nativeObj, long inpaintMask_nativeObj, long dst_nativeObj, double inpaintRadius, int flags); + +} diff --git a/src/org/opencv/photo/package.bluej b/src/org/opencv/photo/package.bluej new file mode 100644 index 0000000..e69de29 diff --git a/src/org/opencv/utils/Converters.java b/src/org/opencv/utils/Converters.java new file mode 100644 index 0000000..49c0844 --- /dev/null +++ b/src/org/opencv/utils/Converters.java @@ -0,0 +1,724 @@ +package org.opencv.utils; + +import java.util.ArrayList; +import java.util.List; + +import org.opencv.core.CvType; +import org.opencv.core.Mat; +import org.opencv.core.MatOfByte; +import org.opencv.core.MatOfDMatch; +import org.opencv.core.MatOfKeyPoint; +import org.opencv.core.MatOfPoint; +import org.opencv.core.MatOfPoint2f; +import org.opencv.core.MatOfPoint3f; +import org.opencv.core.Point; +import org.opencv.core.Point3; +import org.opencv.core.Rect; +import org.opencv.features2d.DMatch; +import org.opencv.features2d.KeyPoint; + +public class Converters { + + public static Mat vector_Point_to_Mat(List pts) { + return vector_Point_to_Mat(pts, CvType.CV_32S); + } + + public static Mat vector_Point2f_to_Mat(List pts) { + return vector_Point_to_Mat(pts, CvType.CV_32F); + } + + public static Mat vector_Point2d_to_Mat(List pts) { + return vector_Point_to_Mat(pts, CvType.CV_64F); + } + + public static Mat vector_Point_to_Mat(List pts, int typeDepth) { + Mat res; + int count = (pts != null) ? pts.size() : 0; + if (count > 0) { + switch (typeDepth) { + case CvType.CV_32S: { + res = new Mat(count, 1, CvType.CV_32SC2); + int[] buff = new int[count * 2]; + for (int i = 0; i < count; i++) { + Point p = pts.get(i); + buff[i * 2] = (int) p.x; + buff[i * 2 + 1] = (int) p.y; + } + res.put(0, 0, buff); + } + break; + + case CvType.CV_32F: { + res = new Mat(count, 1, CvType.CV_32FC2); + float[] buff = new float[count * 2]; + for (int i = 0; i < count; i++) { + Point p = pts.get(i); + buff[i * 2] = (float) p.x; + buff[i * 2 + 1] = (float) p.y; + } + res.put(0, 0, buff); + } + break; + + case CvType.CV_64F: { + res = new Mat(count, 1, CvType.CV_64FC2); + double[] buff = new double[count * 2]; + for (int i = 0; i < count; i++) { + Point p = pts.get(i); + buff[i * 2] = p.x; + buff[i * 2 + 1] = p.y; + } + res.put(0, 0, buff); + } + break; + + default: + throw new IllegalArgumentException("'typeDepth' can be CV_32S, CV_32F or CV_64F"); + } + } else { + res = new Mat(); + } + return res; + } + + public static Mat vector_Point3i_to_Mat(List pts) { + return vector_Point3_to_Mat(pts, CvType.CV_32S); + } + + public static Mat vector_Point3f_to_Mat(List pts) { + return vector_Point3_to_Mat(pts, CvType.CV_32F); + } + + public static Mat vector_Point3d_to_Mat(List pts) { + return vector_Point3_to_Mat(pts, CvType.CV_64F); + } + + public static Mat vector_Point3_to_Mat(List pts, int typeDepth) { + Mat res; + int count = (pts != null) ? pts.size() : 0; + if (count > 0) { + switch (typeDepth) { + case CvType.CV_32S: { + res = new Mat(count, 1, CvType.CV_32SC3); + int[] buff = new int[count * 3]; + for (int i = 0; i < count; i++) { + Point3 p = pts.get(i); + buff[i * 3] = (int) p.x; + buff[i * 3 + 1] = (int) p.y; + buff[i * 3 + 2] = (int) p.z; + } + res.put(0, 0, buff); + } + break; + + case CvType.CV_32F: { + res = new Mat(count, 1, CvType.CV_32FC3); + float[] buff = new float[count * 3]; + for (int i = 0; i < count; i++) { + Point3 p = pts.get(i); + buff[i * 3] = (float) p.x; + buff[i * 3 + 1] = (float) p.y; + buff[i * 3 + 2] = (float) p.z; + } + res.put(0, 0, buff); + } + break; + + case CvType.CV_64F: { + res = new Mat(count, 1, CvType.CV_64FC3); + double[] buff = new double[count * 3]; + for (int i = 0; i < count; i++) { + Point3 p = pts.get(i); + buff[i * 3] = p.x; + buff[i * 3 + 1] = p.y; + buff[i * 3 + 2] = p.z; + } + res.put(0, 0, buff); + } + break; + + default: + throw new IllegalArgumentException("'typeDepth' can be CV_32S, CV_32F or CV_64F"); + } + } else { + res = new Mat(); + } + return res; + } + + public static void Mat_to_vector_Point2f(Mat m, List pts) { + Mat_to_vector_Point(m, pts); + } + + public static void Mat_to_vector_Point2d(Mat m, List pts) { + Mat_to_vector_Point(m, pts); + } + + public static void Mat_to_vector_Point(Mat m, List pts) { + if (pts == null) + throw new java.lang.IllegalArgumentException("Output List can't be null"); + int count = m.rows(); + int type = m.type(); + if (m.cols() != 1) + throw new java.lang.IllegalArgumentException("Input Mat should have one column\n" + m); + + pts.clear(); + if (type == CvType.CV_32SC2) { + int[] buff = new int[2 * count]; + m.get(0, 0, buff); + for (int i = 0; i < count; i++) { + pts.add(new Point(buff[i * 2], buff[i * 2 + 1])); + } + } else if (type == CvType.CV_32FC2) { + float[] buff = new float[2 * count]; + m.get(0, 0, buff); + for (int i = 0; i < count; i++) { + pts.add(new Point(buff[i * 2], buff[i * 2 + 1])); + } + } else if (type == CvType.CV_64FC2) { + double[] buff = new double[2 * count]; + m.get(0, 0, buff); + for (int i = 0; i < count; i++) { + pts.add(new Point(buff[i * 2], buff[i * 2 + 1])); + } + } else { + throw new java.lang.IllegalArgumentException( + "Input Mat should be of CV_32SC2, CV_32FC2 or CV_64FC2 type\n" + m); + } + } + + public static void Mat_to_vector_Point3i(Mat m, List pts) { + Mat_to_vector_Point3(m, pts); + } + + public static void Mat_to_vector_Point3f(Mat m, List pts) { + Mat_to_vector_Point3(m, pts); + } + + public static void Mat_to_vector_Point3d(Mat m, List pts) { + Mat_to_vector_Point3(m, pts); + } + + public static void Mat_to_vector_Point3(Mat m, List pts) { + if (pts == null) + throw new java.lang.IllegalArgumentException("Output List can't be null"); + int count = m.rows(); + int type = m.type(); + if (m.cols() != 1) + throw new java.lang.IllegalArgumentException("Input Mat should have one column\n" + m); + + pts.clear(); + if (type == CvType.CV_32SC3) { + int[] buff = new int[3 * count]; + m.get(0, 0, buff); + for (int i = 0; i < count; i++) { + pts.add(new Point3(buff[i * 3], buff[i * 3 + 1], buff[i * 3 + 2])); + } + } else if (type == CvType.CV_32FC3) { + float[] buff = new float[3 * count]; + m.get(0, 0, buff); + for (int i = 0; i < count; i++) { + pts.add(new Point3(buff[i * 3], buff[i * 3 + 1], buff[i * 3 + 2])); + } + } else if (type == CvType.CV_64FC3) { + double[] buff = new double[3 * count]; + m.get(0, 0, buff); + for (int i = 0; i < count; i++) { + pts.add(new Point3(buff[i * 3], buff[i * 3 + 1], buff[i * 3 + 2])); + } + } else { + throw new java.lang.IllegalArgumentException( + "Input Mat should be of CV_32SC3, CV_32FC3 or CV_64FC3 type\n" + m); + } + } + + public static Mat vector_Mat_to_Mat(List mats) { + Mat res; + int count = (mats != null) ? mats.size() : 0; + if (count > 0) { + res = new Mat(count, 1, CvType.CV_32SC2); + int[] buff = new int[count * 2]; + for (int i = 0; i < count; i++) { + long addr = mats.get(i).nativeObj; + buff[i * 2] = (int) (addr >> 32); + buff[i * 2 + 1] = (int) (addr & 0xffffffff); + } + res.put(0, 0, buff); + } else { + res = new Mat(); + } + return res; + } + + public static void Mat_to_vector_Mat(Mat m, List mats) { + if (mats == null) + throw new java.lang.IllegalArgumentException("mats == null"); + int count = m.rows(); + if (CvType.CV_32SC2 != m.type() || m.cols() != 1) + throw new java.lang.IllegalArgumentException( + "CvType.CV_32SC2 != m.type() || m.cols()!=1\n" + m); + + mats.clear(); + int[] buff = new int[count * 2]; + m.get(0, 0, buff); + for (int i = 0; i < count; i++) { + long addr = (((long) buff[i * 2]) << 32) | (((long) buff[i * 2 + 1]) & 0xffffffffL); + mats.add(new Mat(addr)); + } + } + + public static Mat vector_float_to_Mat(List fs) { + Mat res; + int count = (fs != null) ? fs.size() : 0; + if (count > 0) { + res = new Mat(count, 1, CvType.CV_32FC1); + float[] buff = new float[count]; + for (int i = 0; i < count; i++) { + float f = fs.get(i); + buff[i] = f; + } + res.put(0, 0, buff); + } else { + res = new Mat(); + } + return res; + } + + public static void Mat_to_vector_float(Mat m, List fs) { + if (fs == null) + throw new java.lang.IllegalArgumentException("fs == null"); + int count = m.rows(); + if (CvType.CV_32FC1 != m.type() || m.cols() != 1) + throw new java.lang.IllegalArgumentException( + "CvType.CV_32FC1 != m.type() || m.cols()!=1\n" + m); + + fs.clear(); + float[] buff = new float[count]; + m.get(0, 0, buff); + for (int i = 0; i < count; i++) { + fs.add(buff[i]); + } + } + + public static Mat vector_uchar_to_Mat(List bs) { + Mat res; + int count = (bs != null) ? bs.size() : 0; + if (count > 0) { + res = new Mat(count, 1, CvType.CV_8UC1); + byte[] buff = new byte[count]; + for (int i = 0; i < count; i++) { + byte b = bs.get(i); + buff[i] = b; + } + res.put(0, 0, buff); + } else { + res = new Mat(); + } + return res; + } + + public static void Mat_to_vector_uchar(Mat m, List us) { + if (us == null) + throw new java.lang.IllegalArgumentException("Output List can't be null"); + int count = m.rows(); + if (CvType.CV_8UC1 != m.type() || m.cols() != 1) + throw new java.lang.IllegalArgumentException( + "CvType.CV_8UC1 != m.type() || m.cols()!=1\n" + m); + + us.clear(); + byte[] buff = new byte[count]; + m.get(0, 0, buff); + for (int i = 0; i < count; i++) { + us.add(buff[i]); + } + } + + public static Mat vector_char_to_Mat(List bs) { + Mat res; + int count = (bs != null) ? bs.size() : 0; + if (count > 0) { + res = new Mat(count, 1, CvType.CV_8SC1); + byte[] buff = new byte[count]; + for (int i = 0; i < count; i++) { + byte b = bs.get(i); + buff[i] = b; + } + res.put(0, 0, buff); + } else { + res = new Mat(); + } + return res; + } + + public static Mat vector_int_to_Mat(List is) { + Mat res; + int count = (is != null) ? is.size() : 0; + if (count > 0) { + res = new Mat(count, 1, CvType.CV_32SC1); + int[] buff = new int[count]; + for (int i = 0; i < count; i++) { + int v = is.get(i); + buff[i] = v; + } + res.put(0, 0, buff); + } else { + res = new Mat(); + } + return res; + } + + public static void Mat_to_vector_int(Mat m, List is) { + if (is == null) + throw new java.lang.IllegalArgumentException("is == null"); + int count = m.rows(); + if (CvType.CV_32SC1 != m.type() || m.cols() != 1) + throw new java.lang.IllegalArgumentException( + "CvType.CV_32SC1 != m.type() || m.cols()!=1\n" + m); + + is.clear(); + int[] buff = new int[count]; + m.get(0, 0, buff); + for (int i = 0; i < count; i++) { + is.add(buff[i]); + } + } + + public static void Mat_to_vector_char(Mat m, List bs) { + if (bs == null) + throw new java.lang.IllegalArgumentException("Output List can't be null"); + int count = m.rows(); + if (CvType.CV_8SC1 != m.type() || m.cols() != 1) + throw new java.lang.IllegalArgumentException( + "CvType.CV_8SC1 != m.type() || m.cols()!=1\n" + m); + + bs.clear(); + byte[] buff = new byte[count]; + m.get(0, 0, buff); + for (int i = 0; i < count; i++) { + bs.add(buff[i]); + } + } + + public static Mat vector_Rect_to_Mat(List rs) { + Mat res; + int count = (rs != null) ? rs.size() : 0; + if (count > 0) { + res = new Mat(count, 1, CvType.CV_32SC4); + int[] buff = new int[4 * count]; + for (int i = 0; i < count; i++) { + Rect r = rs.get(i); + buff[4 * i] = r.x; + buff[4 * i + 1] = r.y; + buff[4 * i + 2] = r.width; + buff[4 * i + 3] = r.height; + } + res.put(0, 0, buff); + } else { + res = new Mat(); + } + return res; + } + + public static void Mat_to_vector_Rect(Mat m, List rs) { + if (rs == null) + throw new java.lang.IllegalArgumentException("rs == null"); + int count = m.rows(); + if (CvType.CV_32SC4 != m.type() || m.cols() != 1) + throw new java.lang.IllegalArgumentException( + "CvType.CV_32SC4 != m.type() || m.rows()!=1\n" + m); + + rs.clear(); + int[] buff = new int[4 * count]; + m.get(0, 0, buff); + for (int i = 0; i < count; i++) { + rs.add(new Rect(buff[4 * i], buff[4 * i + 1], buff[4 * i + 2], buff[4 * i + 3])); + } + } + + public static Mat vector_KeyPoint_to_Mat(List kps) { + Mat res; + int count = (kps != null) ? kps.size() : 0; + if (count > 0) { + res = new Mat(count, 1, CvType.CV_64FC(7)); + double[] buff = new double[count * 7]; + for (int i = 0; i < count; i++) { + KeyPoint kp = kps.get(i); + buff[7 * i] = kp.pt.x; + buff[7 * i + 1] = kp.pt.y; + buff[7 * i + 2] = kp.size; + buff[7 * i + 3] = kp.angle; + buff[7 * i + 4] = kp.response; + buff[7 * i + 5] = kp.octave; + buff[7 * i + 6] = kp.class_id; + } + res.put(0, 0, buff); + } else { + res = new Mat(); + } + return res; + } + + public static void Mat_to_vector_KeyPoint(Mat m, List kps) { + if (kps == null) + throw new java.lang.IllegalArgumentException("Output List can't be null"); + int count = m.rows(); + if (CvType.CV_64FC(7) != m.type() || m.cols() != 1) + throw new java.lang.IllegalArgumentException( + "CvType.CV_64FC(7) != m.type() || m.cols()!=1\n" + m); + + kps.clear(); + double[] buff = new double[7 * count]; + m.get(0, 0, buff); + for (int i = 0; i < count; i++) { + kps.add(new KeyPoint((float) buff[7 * i], (float) buff[7 * i + 1], (float) buff[7 * i + 2], (float) buff[7 * i + 3], + (float) buff[7 * i + 4], (int) buff[7 * i + 5], (int) buff[7 * i + 6])); + } + } + + // vector_vector_Point + public static Mat vector_vector_Point_to_Mat(List pts, List mats) { + Mat res; + int lCount = (pts != null) ? pts.size() : 0; + if (lCount > 0) { + for (MatOfPoint vpt : pts) + mats.add(vpt); + res = vector_Mat_to_Mat(mats); + } else { + res = new Mat(); + } + return res; + } + + public static void Mat_to_vector_vector_Point(Mat m, List pts) { + if (pts == null) + throw new java.lang.IllegalArgumentException("Output List can't be null"); + + if (m == null) + throw new java.lang.IllegalArgumentException("Input Mat can't be null"); + + List mats = new ArrayList(m.rows()); + Mat_to_vector_Mat(m, mats); + for (Mat mi : mats) { + MatOfPoint pt = new MatOfPoint(mi); + pts.add(pt); + } + } + + // vector_vector_Point2f + public static void Mat_to_vector_vector_Point2f(Mat m, List pts) { + if (pts == null) + throw new java.lang.IllegalArgumentException("Output List can't be null"); + + if (m == null) + throw new java.lang.IllegalArgumentException("Input Mat can't be null"); + + List mats = new ArrayList(m.rows()); + Mat_to_vector_Mat(m, mats); + for (Mat mi : mats) { + MatOfPoint2f pt = new MatOfPoint2f(mi); + pts.add(pt); + } + } + + // vector_vector_Point2f + public static Mat vector_vector_Point2f_to_Mat(List pts, List mats) { + Mat res; + int lCount = (pts != null) ? pts.size() : 0; + if (lCount > 0) { + for (MatOfPoint2f vpt : pts) + mats.add(vpt); + res = vector_Mat_to_Mat(mats); + } else { + res = new Mat(); + } + return res; + } + + // vector_vector_Point3f + public static void Mat_to_vector_vector_Point3f(Mat m, List pts) { + if (pts == null) + throw new java.lang.IllegalArgumentException("Output List can't be null"); + + if (m == null) + throw new java.lang.IllegalArgumentException("Input Mat can't be null"); + + List mats = new ArrayList(m.rows()); + Mat_to_vector_Mat(m, mats); + for (Mat mi : mats) { + MatOfPoint3f pt = new MatOfPoint3f(mi); + pts.add(pt); + } + } + + // vector_vector_Point3f + public static Mat vector_vector_Point3f_to_Mat(List pts, List mats) { + Mat res; + int lCount = (pts != null) ? pts.size() : 0; + if (lCount > 0) { + for (MatOfPoint3f vpt : pts) + mats.add(vpt); + res = vector_Mat_to_Mat(mats); + } else { + res = new Mat(); + } + return res; + } + + // vector_vector_KeyPoint + public static Mat vector_vector_KeyPoint_to_Mat(List kps, List mats) { + Mat res; + int lCount = (kps != null) ? kps.size() : 0; + if (lCount > 0) { + for (MatOfKeyPoint vkp : kps) + mats.add(vkp); + res = vector_Mat_to_Mat(mats); + } else { + res = new Mat(); + } + return res; + } + + public static void Mat_to_vector_vector_KeyPoint(Mat m, List kps) { + if (kps == null) + throw new java.lang.IllegalArgumentException("Output List can't be null"); + + if (m == null) + throw new java.lang.IllegalArgumentException("Input Mat can't be null"); + + List mats = new ArrayList(m.rows()); + Mat_to_vector_Mat(m, mats); + for (Mat mi : mats) { + MatOfKeyPoint vkp = new MatOfKeyPoint(mi); + kps.add(vkp); + } + } + + public static Mat vector_double_to_Mat(List ds) { + Mat res; + int count = (ds != null) ? ds.size() : 0; + if (count > 0) { + res = new Mat(count, 1, CvType.CV_64FC1); + double[] buff = new double[count]; + for (int i = 0; i < count; i++) { + double v = ds.get(i); + buff[i] = v; + } + res.put(0, 0, buff); + } else { + res = new Mat(); + } + return res; + } + + public static void Mat_to_vector_double(Mat m, List ds) { + if (ds == null) + throw new java.lang.IllegalArgumentException("ds == null"); + int count = m.rows(); + if (CvType.CV_64FC1 != m.type() || m.cols() != 1) + throw new java.lang.IllegalArgumentException( + "CvType.CV_64FC1 != m.type() || m.cols()!=1\n" + m); + + ds.clear(); + double[] buff = new double[count]; + m.get(0, 0, buff); + for (int i = 0; i < count; i++) { + ds.add(buff[i]); + } + } + + public static Mat vector_DMatch_to_Mat(List matches) { + Mat res; + int count = (matches != null) ? matches.size() : 0; + if (count > 0) { + res = new Mat(count, 1, CvType.CV_64FC4); + double[] buff = new double[count * 4]; + for (int i = 0; i < count; i++) { + DMatch m = matches.get(i); + buff[4 * i] = m.queryIdx; + buff[4 * i + 1] = m.trainIdx; + buff[4 * i + 2] = m.imgIdx; + buff[4 * i + 3] = m.distance; + } + res.put(0, 0, buff); + } else { + res = new Mat(); + } + return res; + } + + public static void Mat_to_vector_DMatch(Mat m, List matches) { + if (matches == null) + throw new java.lang.IllegalArgumentException("Output List can't be null"); + int count = m.rows(); + if (CvType.CV_64FC4 != m.type() || m.cols() != 1) + throw new java.lang.IllegalArgumentException( + "CvType.CV_64FC4 != m.type() || m.cols()!=1\n" + m); + + matches.clear(); + double[] buff = new double[4 * count]; + m.get(0, 0, buff); + for (int i = 0; i < count; i++) { + matches.add(new DMatch((int) buff[4 * i], (int) buff[4 * i + 1], (int) buff[4 * i + 2], (float) buff[4 * i + 3])); + } + } + + // vector_vector_DMatch + public static Mat vector_vector_DMatch_to_Mat(List lvdm, List mats) { + Mat res; + int lCount = (lvdm != null) ? lvdm.size() : 0; + if (lCount > 0) { + for (MatOfDMatch vdm : lvdm) + mats.add(vdm); + res = vector_Mat_to_Mat(mats); + } else { + res = new Mat(); + } + return res; + } + + public static void Mat_to_vector_vector_DMatch(Mat m, List lvdm) { + if (lvdm == null) + throw new java.lang.IllegalArgumentException("Output List can't be null"); + + if (m == null) + throw new java.lang.IllegalArgumentException("Input Mat can't be null"); + + List mats = new ArrayList(m.rows()); + Mat_to_vector_Mat(m, mats); + lvdm.clear(); + for (Mat mi : mats) { + MatOfDMatch vdm = new MatOfDMatch(mi); + lvdm.add(vdm); + } + } + + // vector_vector_char + public static Mat vector_vector_char_to_Mat(List lvb, List mats) { + Mat res; + int lCount = (lvb != null) ? lvb.size() : 0; + if (lCount > 0) { + for (MatOfByte vb : lvb) + mats.add(vb); + res = vector_Mat_to_Mat(mats); + } else { + res = new Mat(); + } + return res; + } + + public static void Mat_to_vector_vector_char(Mat m, List> llb) { + if (llb == null) + throw new java.lang.IllegalArgumentException("Output List can't be null"); + + if (m == null) + throw new java.lang.IllegalArgumentException("Input Mat can't be null"); + + List mats = new ArrayList(m.rows()); + Mat_to_vector_Mat(m, mats); + for (Mat mi : mats) { + List lb = new ArrayList(); + Mat_to_vector_char(mi, lb); + llb.add(lb); + } + } +} diff --git a/src/org/opencv/utils/package.bluej b/src/org/opencv/utils/package.bluej new file mode 100644 index 0000000..e69de29 diff --git a/src/org/opencv/video/BackgroundSubtractor.java b/src/org/opencv/video/BackgroundSubtractor.java new file mode 100644 index 0000000..dd40a9f --- /dev/null +++ b/src/org/opencv/video/BackgroundSubtractor.java @@ -0,0 +1,93 @@ + +// +// This file is auto-generated. Please don't modify it! +// +package org.opencv.video; + +import org.opencv.core.Algorithm; +import org.opencv.core.Mat; + +// C++: class BackgroundSubtractor +/** + *

Base class for background/foreground segmentation.

+ * + *

class BackgroundSubtractor : public Algorithm

+ * + *

// C++ code:

+ * + * + *

public:

+ * + *

virtual ~BackgroundSubtractor();

+ * + *

virtual void operator()(InputArray image, OutputArray fgmask, double + * learningRate=0);

+ * + *

virtual void getBackgroundImage(OutputArray backgroundImage) const;

+ * + *

};

+ * + *

The class is only used to define the common interface for the whole family of + * background/foreground segmentation algorithms. + *

+ * + * @see org.opencv.video.BackgroundSubtractor : public Algorithm + */ +public class BackgroundSubtractor extends Algorithm { + + protected BackgroundSubtractor(long addr) { super(addr); } + + + // + // C++: void BackgroundSubtractor::operator ()(Mat image, Mat& fgmask, double learningRate = 0) + // + +/** + *

Computes a foreground mask.

+ * + * @param image Next video frame. + * @param fgmask The output foreground mask as an 8-bit binary image. + * @param learningRate a learningRate + * + * @see org.opencv.video.BackgroundSubtractor.operator() + */ + public void apply(Mat image, Mat fgmask, double learningRate) + { + + apply_0(nativeObj, image.nativeObj, fgmask.nativeObj, learningRate); + + return; + } + +/** + *

Computes a foreground mask.

+ * + * @param image Next video frame. + * @param fgmask The output foreground mask as an 8-bit binary image. + * + * @see org.opencv.video.BackgroundSubtractor.operator() + */ + public void apply(Mat image, Mat fgmask) + { + + apply_1(nativeObj, image.nativeObj, fgmask.nativeObj); + + return; + } + + + @Override + protected void finalize() throws Throwable { + delete(nativeObj); + } + + + + // C++: void BackgroundSubtractor::operator ()(Mat image, Mat& fgmask, double learningRate = 0) + private static native void apply_0(long nativeObj, long image_nativeObj, long fgmask_nativeObj, double learningRate); + private static native void apply_1(long nativeObj, long image_nativeObj, long fgmask_nativeObj); + + // native support for java finalize() + private static native void delete(long nativeObj); + +} diff --git a/src/org/opencv/video/BackgroundSubtractorMOG.java b/src/org/opencv/video/BackgroundSubtractorMOG.java new file mode 100644 index 0000000..709afa7 --- /dev/null +++ b/src/org/opencv/video/BackgroundSubtractorMOG.java @@ -0,0 +1,106 @@ + +// +// This file is auto-generated. Please don't modify it! +// +package org.opencv.video; + + + +// C++: class BackgroundSubtractorMOG +/** + *

Gaussian Mixture-based Background/Foreground Segmentation Algorithm.

+ * + *

The class implements the algorithm described in P. KadewTraKuPong and R. + * Bowden, *An improved adaptive background mixture model for real-time tracking + * with shadow detection*, Proc. 2nd European Workshop on Advanced Video-Based + * Surveillance Systems, 2001: http://personal.ee.surrey.ac.uk/Personal/R.Bowden/publications/avbs01/avbs01.pdf

+ * + * @see org.opencv.video.BackgroundSubtractorMOG : public BackgroundSubtractor + */ +public class BackgroundSubtractorMOG extends BackgroundSubtractor { + + protected BackgroundSubtractorMOG(long addr) { super(addr); } + + + // + // C++: BackgroundSubtractorMOG::BackgroundSubtractorMOG() + // + +/** + *

The constructors.

+ * + *

Default constructor sets all parameters to default values.

+ * + * @see org.opencv.video.BackgroundSubtractorMOG.BackgroundSubtractorMOG + */ + public BackgroundSubtractorMOG() + { + + super( BackgroundSubtractorMOG_0() ); + + return; + } + + + // + // C++: BackgroundSubtractorMOG::BackgroundSubtractorMOG(int history, int nmixtures, double backgroundRatio, double noiseSigma = 0) + // + +/** + *

The constructors.

+ * + *

Default constructor sets all parameters to default values.

+ * + * @param history Length of the history. + * @param nmixtures Number of Gaussian mixtures. + * @param backgroundRatio Background ratio. + * @param noiseSigma Noise strength. + * + * @see org.opencv.video.BackgroundSubtractorMOG.BackgroundSubtractorMOG + */ + public BackgroundSubtractorMOG(int history, int nmixtures, double backgroundRatio, double noiseSigma) + { + + super( BackgroundSubtractorMOG_1(history, nmixtures, backgroundRatio, noiseSigma) ); + + return; + } + +/** + *

The constructors.

+ * + *

Default constructor sets all parameters to default values.

+ * + * @param history Length of the history. + * @param nmixtures Number of Gaussian mixtures. + * @param backgroundRatio Background ratio. + * + * @see org.opencv.video.BackgroundSubtractorMOG.BackgroundSubtractorMOG + */ + public BackgroundSubtractorMOG(int history, int nmixtures, double backgroundRatio) + { + + super( BackgroundSubtractorMOG_2(history, nmixtures, backgroundRatio) ); + + return; + } + + + @Override + protected void finalize() throws Throwable { + delete(nativeObj); + } + + + + // C++: BackgroundSubtractorMOG::BackgroundSubtractorMOG() + private static native long BackgroundSubtractorMOG_0(); + + // C++: BackgroundSubtractorMOG::BackgroundSubtractorMOG(int history, int nmixtures, double backgroundRatio, double noiseSigma = 0) + private static native long BackgroundSubtractorMOG_1(int history, int nmixtures, double backgroundRatio, double noiseSigma); + private static native long BackgroundSubtractorMOG_2(int history, int nmixtures, double backgroundRatio); + + // native support for java finalize() + private static native void delete(long nativeObj); + +} diff --git a/src/org/opencv/video/KalmanFilter.java b/src/org/opencv/video/KalmanFilter.java new file mode 100644 index 0000000..2b83749 --- /dev/null +++ b/src/org/opencv/video/KalmanFilter.java @@ -0,0 +1,176 @@ + +// +// This file is auto-generated. Please don't modify it! +// +package org.opencv.video; + +import org.opencv.core.Mat; + +// C++: class KalmanFilter +/** + *

Kalman filter class.

+ * + *

The class implements a standard Kalman filter http://en.wikipedia.org/wiki/Kalman_filter, + * [Welch95]. However, you can modify transitionMatrix, + * controlMatrix, and measurementMatrix to get an + * extended Kalman filter functionality. See the OpenCV sample kalman.cpp.

+ * + * @see org.opencv.video.KalmanFilter + */ +public class KalmanFilter { + + protected final long nativeObj; + protected KalmanFilter(long addr) { nativeObj = addr; } + + + // + // C++: KalmanFilter::KalmanFilter() + // + +/** + *

The constructors.

+ * + *

The full constructor.

+ * + *

Note: In C API when CvKalman* kalmanFilter structure is not + * needed anymore, it should be released with cvReleaseKalman(&kalmanFilter)

+ * + * @see org.opencv.video.KalmanFilter.KalmanFilter + */ + public KalmanFilter() + { + + nativeObj = KalmanFilter_0(); + + return; + } + + + // + // C++: KalmanFilter::KalmanFilter(int dynamParams, int measureParams, int controlParams = 0, int type = CV_32F) + // + +/** + *

The constructors.

+ * + *

The full constructor.

+ * + *

Note: In C API when CvKalman* kalmanFilter structure is not + * needed anymore, it should be released with cvReleaseKalman(&kalmanFilter)

+ * + * @param dynamParams Dimensionality of the state. + * @param measureParams Dimensionality of the measurement. + * @param controlParams Dimensionality of the control vector. + * @param type Type of the created matrices that should be CV_32F + * or CV_64F. + * + * @see org.opencv.video.KalmanFilter.KalmanFilter + */ + public KalmanFilter(int dynamParams, int measureParams, int controlParams, int type) + { + + nativeObj = KalmanFilter_1(dynamParams, measureParams, controlParams, type); + + return; + } + +/** + *

The constructors.

+ * + *

The full constructor.

+ * + *

Note: In C API when CvKalman* kalmanFilter structure is not + * needed anymore, it should be released with cvReleaseKalman(&kalmanFilter)

+ * + * @param dynamParams Dimensionality of the state. + * @param measureParams Dimensionality of the measurement. + * + * @see org.opencv.video.KalmanFilter.KalmanFilter + */ + public KalmanFilter(int dynamParams, int measureParams) + { + + nativeObj = KalmanFilter_2(dynamParams, measureParams); + + return; + } + + + // + // C++: Mat KalmanFilter::correct(Mat measurement) + // + +/** + *

Updates the predicted state from the measurement.

+ * + * @param measurement The measured system parameters + * + * @see org.opencv.video.KalmanFilter.correct + */ + public Mat correct(Mat measurement) + { + + Mat retVal = new Mat(correct_0(nativeObj, measurement.nativeObj)); + + return retVal; + } + + + // + // C++: Mat KalmanFilter::predict(Mat control = Mat()) + // + +/** + *

Computes a predicted state.

+ * + * @param control The optional input control + * + * @see org.opencv.video.KalmanFilter.predict + */ + public Mat predict(Mat control) + { + + Mat retVal = new Mat(predict_0(nativeObj, control.nativeObj)); + + return retVal; + } + +/** + *

Computes a predicted state.

+ * + * @see org.opencv.video.KalmanFilter.predict + */ + public Mat predict() + { + + Mat retVal = new Mat(predict_1(nativeObj)); + + return retVal; + } + + + @Override + protected void finalize() throws Throwable { + delete(nativeObj); + } + + + + // C++: KalmanFilter::KalmanFilter() + private static native long KalmanFilter_0(); + + // C++: KalmanFilter::KalmanFilter(int dynamParams, int measureParams, int controlParams = 0, int type = CV_32F) + private static native long KalmanFilter_1(int dynamParams, int measureParams, int controlParams, int type); + private static native long KalmanFilter_2(int dynamParams, int measureParams); + + // C++: Mat KalmanFilter::correct(Mat measurement) + private static native long correct_0(long nativeObj, long measurement_nativeObj); + + // C++: Mat KalmanFilter::predict(Mat control = Mat()) + private static native long predict_0(long nativeObj, long control_nativeObj); + private static native long predict_1(long nativeObj); + + // native support for java finalize() + private static native void delete(long nativeObj); + +} diff --git a/src/org/opencv/video/Video.java b/src/org/opencv/video/Video.java new file mode 100644 index 0000000..4522e73 --- /dev/null +++ b/src/org/opencv/video/Video.java @@ -0,0 +1,731 @@ + +// +// This file is auto-generated. Please don't modify it! +// +package org.opencv.video; + +import java.util.List; +import org.opencv.core.Mat; +import org.opencv.core.MatOfByte; +import org.opencv.core.MatOfFloat; +import org.opencv.core.MatOfPoint2f; +import org.opencv.core.MatOfRect; +import org.opencv.core.Rect; +import org.opencv.core.RotatedRect; +import org.opencv.core.Size; +import org.opencv.core.TermCriteria; +import org.opencv.utils.Converters; + +public class Video { + + private static final int + CV_LKFLOW_INITIAL_GUESSES = 4, + CV_LKFLOW_GET_MIN_EIGENVALS = 8; + + + public static final int + OPTFLOW_USE_INITIAL_FLOW = CV_LKFLOW_INITIAL_GUESSES, + OPTFLOW_LK_GET_MIN_EIGENVALS = CV_LKFLOW_GET_MIN_EIGENVALS, + OPTFLOW_FARNEBACK_GAUSSIAN = 256; + + + // + // C++: RotatedRect CamShift(Mat probImage, Rect& window, TermCriteria criteria) + // + +/** + *

Finds an object center, size, and orientation.

+ * + *

The function implements the CAMSHIFT object tracking algorithm [Bradski98]. + * First, it finds an object center using "meanShift" and then adjusts the + * window size and finds the optimal rotation. The function returns the rotated + * rectangle structure that includes the object position, size, and orientation. + * The next position of the search window can be obtained with RotatedRect.boundingRect().

+ * + *

See the OpenCV sample camshiftdemo.c that tracks colored + * objects.

+ * + * @param probImage Back projection of the object histogram. See + * "calcBackProject". + * @param window Initial search window. + * @param criteria Stop criteria for the underlying "meanShift". + * + *

:returns: (in old interfaces) Number of iterations CAMSHIFT took to converge

+ * + * @see org.opencv.video.Video.CamShift + */ + public static RotatedRect CamShift(Mat probImage, Rect window, TermCriteria criteria) + { + double[] window_out = new double[4]; + RotatedRect retVal = new RotatedRect(CamShift_0(probImage.nativeObj, window.x, window.y, window.width, window.height, window_out, criteria.type, criteria.maxCount, criteria.epsilon)); + if(window!=null){ window.x = (int)window_out[0]; window.y = (int)window_out[1]; window.width = (int)window_out[2]; window.height = (int)window_out[3]; } + return retVal; + } + + + // + // C++: int buildOpticalFlowPyramid(Mat img, vector_Mat& pyramid, Size winSize, int maxLevel, bool withDerivatives = true, int pyrBorder = BORDER_REFLECT_101, int derivBorder = BORDER_CONSTANT, bool tryReuseInputImage = true) + // + +/** + *

Constructs the image pyramid which can be passed to "calcOpticalFlowPyrLK".

+ * + * @param img 8-bit input image. + * @param pyramid output pyramid. + * @param winSize window size of optical flow algorithm. Must be not less than + * winSize argument of "calcOpticalFlowPyrLK". It is needed to + * calculate required padding for pyramid levels. + * @param maxLevel 0-based maximal pyramid level number. + * @param withDerivatives set to precompute gradients for the every pyramid + * level. If pyramid is constructed without the gradients then "calcOpticalFlowPyrLK" + * will calculate them internally. + * @param pyrBorder the border mode for pyramid layers. + * @param derivBorder the border mode for gradients. + * @param tryReuseInputImage put ROI of input image into the pyramid if + * possible. You can pass false to force data copying. + * + *

:return: number of levels in constructed pyramid. Can be less than + * maxLevel.

+ * + * @see org.opencv.video.Video.buildOpticalFlowPyramid + */ + public static int buildOpticalFlowPyramid(Mat img, List pyramid, Size winSize, int maxLevel, boolean withDerivatives, int pyrBorder, int derivBorder, boolean tryReuseInputImage) + { + Mat pyramid_mat = new Mat(); + int retVal = buildOpticalFlowPyramid_0(img.nativeObj, pyramid_mat.nativeObj, winSize.width, winSize.height, maxLevel, withDerivatives, pyrBorder, derivBorder, tryReuseInputImage); + Converters.Mat_to_vector_Mat(pyramid_mat, pyramid); + return retVal; + } + +/** + *

Constructs the image pyramid which can be passed to "calcOpticalFlowPyrLK".

+ * + * @param img 8-bit input image. + * @param pyramid output pyramid. + * @param winSize window size of optical flow algorithm. Must be not less than + * winSize argument of "calcOpticalFlowPyrLK". It is needed to + * calculate required padding for pyramid levels. + * @param maxLevel 0-based maximal pyramid level number. + * + * @see org.opencv.video.Video.buildOpticalFlowPyramid + */ + public static int buildOpticalFlowPyramid(Mat img, List pyramid, Size winSize, int maxLevel) + { + Mat pyramid_mat = new Mat(); + int retVal = buildOpticalFlowPyramid_1(img.nativeObj, pyramid_mat.nativeObj, winSize.width, winSize.height, maxLevel); + Converters.Mat_to_vector_Mat(pyramid_mat, pyramid); + return retVal; + } + + + // + // C++: double calcGlobalOrientation(Mat orientation, Mat mask, Mat mhi, double timestamp, double duration) + // + +/** + *

Calculates a global motion orientation in a selected region.

+ * + *

The function calculates an average motion direction in the selected region + * and returns the angle between 0 degrees and 360 degrees. The average + * direction is computed from the weighted orientation histogram, where a recent + * motion has a larger weight and the motion occurred in the past has a smaller + * weight, as recorded in mhi.

+ * + * @param orientation Motion gradient orientation image calculated by the + * function "calcMotionGradient". + * @param mask Mask image. It may be a conjunction of a valid gradient mask, + * also calculated by "calcMotionGradient", and the mask of a region whose + * direction needs to be calculated. + * @param mhi Motion history image calculated by "updateMotionHistory". + * @param timestamp Timestamp passed to "updateMotionHistory". + * @param duration Maximum duration of a motion track in milliseconds, passed to + * "updateMotionHistory". + * + * @see org.opencv.video.Video.calcGlobalOrientation + */ + public static double calcGlobalOrientation(Mat orientation, Mat mask, Mat mhi, double timestamp, double duration) + { + + double retVal = calcGlobalOrientation_0(orientation.nativeObj, mask.nativeObj, mhi.nativeObj, timestamp, duration); + + return retVal; + } + + + // + // C++: void calcMotionGradient(Mat mhi, Mat& mask, Mat& orientation, double delta1, double delta2, int apertureSize = 3) + // + +/** + *

Calculates a gradient orientation of a motion history image.

+ * + *

The function calculates a gradient orientation at each pixel (x, y) + * as:

+ * + *

orientation(x,y)= arctan((dmhi/dy)/(dmhi/dx))

+ * + *

In fact, "fastAtan2" and "phase" are used so that the computed angle is + * measured in degrees and covers the full range 0..360. Also, the + * mask is filled to indicate pixels where the computed angle is + * valid.

+ * + * @param mhi Motion history single-channel floating-point image. + * @param mask Output mask image that has the type CV_8UC1 and the + * same size as mhi. Its non-zero elements mark pixels where the + * motion gradient data is correct. + * @param orientation Output motion gradient orientation image that has the same + * type and the same size as mhi. Each pixel of the image is a + * motion orientation, from 0 to 360 degrees. + * @param delta1 Minimal (or maximal) allowed difference between + * mhi values within a pixel neighborhood. + * @param delta2 Maximal (or minimal) allowed difference between + * mhi values within a pixel neighborhood. That is, the function + * finds the minimum (m(x,y)) and maximum (M(x,y)) + * mhi values over 3 x 3 neighborhood of each pixel and + * marks the motion orientation at (x, y) as valid only if + * + *

min(delta1, delta2) <= M(x,y)-m(x,y) <= max(delta1, delta2).

+ * @param apertureSize Aperture size of the "Sobel" operator. + * + * @see org.opencv.video.Video.calcMotionGradient + */ + public static void calcMotionGradient(Mat mhi, Mat mask, Mat orientation, double delta1, double delta2, int apertureSize) + { + + calcMotionGradient_0(mhi.nativeObj, mask.nativeObj, orientation.nativeObj, delta1, delta2, apertureSize); + + return; + } + +/** + *

Calculates a gradient orientation of a motion history image.

+ * + *

The function calculates a gradient orientation at each pixel (x, y) + * as:

+ * + *

orientation(x,y)= arctan((dmhi/dy)/(dmhi/dx))

+ * + *

In fact, "fastAtan2" and "phase" are used so that the computed angle is + * measured in degrees and covers the full range 0..360. Also, the + * mask is filled to indicate pixels where the computed angle is + * valid.

+ * + * @param mhi Motion history single-channel floating-point image. + * @param mask Output mask image that has the type CV_8UC1 and the + * same size as mhi. Its non-zero elements mark pixels where the + * motion gradient data is correct. + * @param orientation Output motion gradient orientation image that has the same + * type and the same size as mhi. Each pixel of the image is a + * motion orientation, from 0 to 360 degrees. + * @param delta1 Minimal (or maximal) allowed difference between + * mhi values within a pixel neighborhood. + * @param delta2 Maximal (or minimal) allowed difference between + * mhi values within a pixel neighborhood. That is, the function + * finds the minimum (m(x,y)) and maximum (M(x,y)) + * mhi values over 3 x 3 neighborhood of each pixel and + * marks the motion orientation at (x, y) as valid only if + * + *

min(delta1, delta2) <= M(x,y)-m(x,y) <= max(delta1, delta2).

+ * + * @see org.opencv.video.Video.calcMotionGradient + */ + public static void calcMotionGradient(Mat mhi, Mat mask, Mat orientation, double delta1, double delta2) + { + + calcMotionGradient_1(mhi.nativeObj, mask.nativeObj, orientation.nativeObj, delta1, delta2); + + return; + } + + + // + // C++: void calcOpticalFlowFarneback(Mat prev, Mat next, Mat& flow, double pyr_scale, int levels, int winsize, int iterations, int poly_n, double poly_sigma, int flags) + // + +/** + *

Computes a dense optical flow using the Gunnar Farneback's algorithm.

+ * + *

The function finds an optical flow for each prev pixel using the + * [Farneback2003] algorithm so that

+ * + *

prev(y,x) ~ next(y + flow(y,x)[1], x + flow(y,x)[0])

+ * + * @param prev first 8-bit single-channel input image. + * @param next second input image of the same size and the same type as + * prev. + * @param flow computed flow image that has the same size as prev + * and type CV_32FC2. + * @param pyr_scale parameter, specifying the image scale (<1) to build pyramids + * for each image; pyr_scale=0.5 means a classical pyramid, where + * each next layer is twice smaller than the previous one. + * @param levels number of pyramid layers including the initial image; + * levels=1 means that no extra layers are created and only the + * original images are used. + * @param winsize averaging window size; larger values increase the algorithm + * robustness to image noise and give more chances for fast motion detection, + * but yield more blurred motion field. + * @param iterations number of iterations the algorithm does at each pyramid + * level. + * @param poly_n size of the pixel neighborhood used to find polynomial + * expansion in each pixel; larger values mean that the image will be + * approximated with smoother surfaces, yielding more robust algorithm and more + * blurred motion field, typically poly_n =5 or 7. + * @param poly_sigma standard deviation of the Gaussian that is used to smooth + * derivatives used as a basis for the polynomial expansion; for + * poly_n=5, you can set poly_sigma=1.1, for + * poly_n=7, a good value would be poly_sigma=1.5. + * @param flags operation flags that can be a combination of the following: + *
    + *
  • OPTFLOW_USE_INITIAL_FLOW uses the input flow as an + * initial flow approximation. + *
  • OPTFLOW_FARNEBACK_GAUSSIAN uses the Gaussian winsizexwinsize + * filter instead of a box filter of the same size for optical flow estimation; + * usually, this option gives z more accurate flow than with a box filter, at + * the cost of lower speed; normally, winsize for a Gaussian window + * should be set to a larger value to achieve the same level of robustness. + *
+ * + * @see org.opencv.video.Video.calcOpticalFlowFarneback + */ + public static void calcOpticalFlowFarneback(Mat prev, Mat next, Mat flow, double pyr_scale, int levels, int winsize, int iterations, int poly_n, double poly_sigma, int flags) + { + + calcOpticalFlowFarneback_0(prev.nativeObj, next.nativeObj, flow.nativeObj, pyr_scale, levels, winsize, iterations, poly_n, poly_sigma, flags); + + return; + } + + + // + // C++: void calcOpticalFlowPyrLK(Mat prevImg, Mat nextImg, vector_Point2f prevPts, vector_Point2f& nextPts, vector_uchar& status, vector_float& err, Size winSize = Size(21,21), int maxLevel = 3, TermCriteria criteria = TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 30, 0.01), int flags = 0, double minEigThreshold = 1e-4) + // + +/** + *

Calculates an optical flow for a sparse feature set using the iterative + * Lucas-Kanade method with pyramids.

+ * + *

The function implements a sparse iterative version of the Lucas-Kanade + * optical flow in pyramids. See [Bouguet00]. The function is parallelized with + * the TBB library.

+ * + * @param prevImg first 8-bit input image or pyramid constructed by + * "buildOpticalFlowPyramid". + * @param nextImg second input image or pyramid of the same size and the same + * type as prevImg. + * @param prevPts vector of 2D points for which the flow needs to be found; + * point coordinates must be single-precision floating-point numbers. + * @param nextPts output vector of 2D points (with single-precision + * floating-point coordinates) containing the calculated new positions of input + * features in the second image; when OPTFLOW_USE_INITIAL_FLOW flag + * is passed, the vector must have the same size as in the input. + * @param status output status vector (of unsigned chars); each element of the + * vector is set to 1 if the flow for the corresponding features has been found, + * otherwise, it is set to 0. + * @param err output vector of errors; each element of the vector is set to an + * error for the corresponding feature, type of the error measure can be set in + * flags parameter; if the flow wasn't found then the error is not + * defined (use the status parameter to find such cases). + * @param winSize size of the search window at each pyramid level. + * @param maxLevel 0-based maximal pyramid level number; if set to 0, pyramids + * are not used (single level), if set to 1, two levels are used, and so on; if + * pyramids are passed to input then algorithm will use as many levels as + * pyramids have but no more than maxLevel. + * @param criteria parameter, specifying the termination criteria of the + * iterative search algorithm (after the specified maximum number of iterations + * criteria.maxCount or when the search window moves by less than + * criteria.epsilon. + * @param flags operation flags: + *
    + *
  • OPTFLOW_USE_INITIAL_FLOW uses initial estimations, stored in + * nextPts; if the flag is not set, then prevPts is + * copied to nextPts and is considered the initial estimate. + *
  • OPTFLOW_LK_GET_MIN_EIGENVALS use minimum eigen values as an error + * measure (see minEigThreshold description); if the flag is not + * set, then L1 distance between patches around the original and a moved point, + * divided by number of pixels in a window, is used as a error measure. + *
+ * @param minEigThreshold the algorithm calculates the minimum eigen value of a + * 2x2 normal matrix of optical flow equations (this matrix is called a spatial + * gradient matrix in [Bouguet00]), divided by number of pixels in a window; if + * this value is less than minEigThreshold, then a corresponding + * feature is filtered out and its flow is not processed, so it allows to remove + * bad points and get a performance boost. + * + * @see org.opencv.video.Video.calcOpticalFlowPyrLK + */ + public static void calcOpticalFlowPyrLK(Mat prevImg, Mat nextImg, MatOfPoint2f prevPts, MatOfPoint2f nextPts, MatOfByte status, MatOfFloat err, Size winSize, int maxLevel, TermCriteria criteria, int flags, double minEigThreshold) + { + Mat prevPts_mat = prevPts; + Mat nextPts_mat = nextPts; + Mat status_mat = status; + Mat err_mat = err; + calcOpticalFlowPyrLK_0(prevImg.nativeObj, nextImg.nativeObj, prevPts_mat.nativeObj, nextPts_mat.nativeObj, status_mat.nativeObj, err_mat.nativeObj, winSize.width, winSize.height, maxLevel, criteria.type, criteria.maxCount, criteria.epsilon, flags, minEigThreshold); + + return; + } + +/** + *

Calculates an optical flow for a sparse feature set using the iterative + * Lucas-Kanade method with pyramids.

+ * + *

The function implements a sparse iterative version of the Lucas-Kanade + * optical flow in pyramids. See [Bouguet00]. The function is parallelized with + * the TBB library.

+ * + * @param prevImg first 8-bit input image or pyramid constructed by + * "buildOpticalFlowPyramid". + * @param nextImg second input image or pyramid of the same size and the same + * type as prevImg. + * @param prevPts vector of 2D points for which the flow needs to be found; + * point coordinates must be single-precision floating-point numbers. + * @param nextPts output vector of 2D points (with single-precision + * floating-point coordinates) containing the calculated new positions of input + * features in the second image; when OPTFLOW_USE_INITIAL_FLOW flag + * is passed, the vector must have the same size as in the input. + * @param status output status vector (of unsigned chars); each element of the + * vector is set to 1 if the flow for the corresponding features has been found, + * otherwise, it is set to 0. + * @param err output vector of errors; each element of the vector is set to an + * error for the corresponding feature, type of the error measure can be set in + * flags parameter; if the flow wasn't found then the error is not + * defined (use the status parameter to find such cases). + * @param winSize size of the search window at each pyramid level. + * @param maxLevel 0-based maximal pyramid level number; if set to 0, pyramids + * are not used (single level), if set to 1, two levels are used, and so on; if + * pyramids are passed to input then algorithm will use as many levels as + * pyramids have but no more than maxLevel. + * + * @see org.opencv.video.Video.calcOpticalFlowPyrLK + */ + public static void calcOpticalFlowPyrLK(Mat prevImg, Mat nextImg, MatOfPoint2f prevPts, MatOfPoint2f nextPts, MatOfByte status, MatOfFloat err, Size winSize, int maxLevel) + { + Mat prevPts_mat = prevPts; + Mat nextPts_mat = nextPts; + Mat status_mat = status; + Mat err_mat = err; + calcOpticalFlowPyrLK_1(prevImg.nativeObj, nextImg.nativeObj, prevPts_mat.nativeObj, nextPts_mat.nativeObj, status_mat.nativeObj, err_mat.nativeObj, winSize.width, winSize.height, maxLevel); + + return; + } + +/** + *

Calculates an optical flow for a sparse feature set using the iterative + * Lucas-Kanade method with pyramids.

+ * + *

The function implements a sparse iterative version of the Lucas-Kanade + * optical flow in pyramids. See [Bouguet00]. The function is parallelized with + * the TBB library.

+ * + * @param prevImg first 8-bit input image or pyramid constructed by + * "buildOpticalFlowPyramid". + * @param nextImg second input image or pyramid of the same size and the same + * type as prevImg. + * @param prevPts vector of 2D points for which the flow needs to be found; + * point coordinates must be single-precision floating-point numbers. + * @param nextPts output vector of 2D points (with single-precision + * floating-point coordinates) containing the calculated new positions of input + * features in the second image; when OPTFLOW_USE_INITIAL_FLOW flag + * is passed, the vector must have the same size as in the input. + * @param status output status vector (of unsigned chars); each element of the + * vector is set to 1 if the flow for the corresponding features has been found, + * otherwise, it is set to 0. + * @param err output vector of errors; each element of the vector is set to an + * error for the corresponding feature, type of the error measure can be set in + * flags parameter; if the flow wasn't found then the error is not + * defined (use the status parameter to find such cases). + * + * @see org.opencv.video.Video.calcOpticalFlowPyrLK + */ + public static void calcOpticalFlowPyrLK(Mat prevImg, Mat nextImg, MatOfPoint2f prevPts, MatOfPoint2f nextPts, MatOfByte status, MatOfFloat err) + { + Mat prevPts_mat = prevPts; + Mat nextPts_mat = nextPts; + Mat status_mat = status; + Mat err_mat = err; + calcOpticalFlowPyrLK_2(prevImg.nativeObj, nextImg.nativeObj, prevPts_mat.nativeObj, nextPts_mat.nativeObj, status_mat.nativeObj, err_mat.nativeObj); + + return; + } + + + // + // C++: void calcOpticalFlowSF(Mat from, Mat to, Mat flow, int layers, int averaging_block_size, int max_flow) + // + +/** + *

Calculate an optical flow using "SimpleFlow" algorithm.

+ * + *

See [Tao2012]. And site of project - http://graphics.berkeley.edu/papers/Tao-SAN-2012-05/.

+ * + * @param from a from + * @param to a to + * @param flow a flow + * @param layers Number of layers + * @param averaging_block_size Size of block through which we sum up when + * calculate cost function for pixel + * @param max_flow maximal flow that we search at each level + * + * @see org.opencv.video.Video.calcOpticalFlowSF + */ + public static void calcOpticalFlowSF(Mat from, Mat to, Mat flow, int layers, int averaging_block_size, int max_flow) + { + + calcOpticalFlowSF_0(from.nativeObj, to.nativeObj, flow.nativeObj, layers, averaging_block_size, max_flow); + + return; + } + + + // + // C++: void calcOpticalFlowSF(Mat from, Mat to, Mat flow, int layers, int averaging_block_size, int max_flow, double sigma_dist, double sigma_color, int postprocess_window, double sigma_dist_fix, double sigma_color_fix, double occ_thr, int upscale_averaging_radius, double upscale_sigma_dist, double upscale_sigma_color, double speed_up_thr) + // + +/** + *

Calculate an optical flow using "SimpleFlow" algorithm.

+ * + *

See [Tao2012]. And site of project - http://graphics.berkeley.edu/papers/Tao-SAN-2012-05/.

+ * + * @param from a from + * @param to a to + * @param flow a flow + * @param layers Number of layers + * @param averaging_block_size Size of block through which we sum up when + * calculate cost function for pixel + * @param max_flow maximal flow that we search at each level + * @param sigma_dist vector smooth spatial sigma parameter + * @param sigma_color vector smooth color sigma parameter + * @param postprocess_window window size for postprocess cross bilateral filter + * @param sigma_dist_fix spatial sigma for postprocess cross bilateralf filter + * @param sigma_color_fix color sigma for postprocess cross bilateral filter + * @param occ_thr threshold for detecting occlusions + * @param upscale_averaging_radius a upscale_averaging_radius + * @param upscale_sigma_dist spatial sigma for bilateral upscale operation + * @param upscale_sigma_color color sigma for bilateral upscale operation + * @param speed_up_thr threshold to detect point with irregular flow - where + * flow should be recalculated after upscale + * + * @see org.opencv.video.Video.calcOpticalFlowSF + */ + public static void calcOpticalFlowSF(Mat from, Mat to, Mat flow, int layers, int averaging_block_size, int max_flow, double sigma_dist, double sigma_color, int postprocess_window, double sigma_dist_fix, double sigma_color_fix, double occ_thr, int upscale_averaging_radius, double upscale_sigma_dist, double upscale_sigma_color, double speed_up_thr) + { + + calcOpticalFlowSF_1(from.nativeObj, to.nativeObj, flow.nativeObj, layers, averaging_block_size, max_flow, sigma_dist, sigma_color, postprocess_window, sigma_dist_fix, sigma_color_fix, occ_thr, upscale_averaging_radius, upscale_sigma_dist, upscale_sigma_color, speed_up_thr); + + return; + } + + + // + // C++: Mat estimateRigidTransform(Mat src, Mat dst, bool fullAffine) + // + +/** + *

Computes an optimal affine transformation between two 2D point sets.

+ * + *

The function finds an optimal affine transform *[A|b]* (a 2 x 3 + * floating-point matrix) that approximates best the affine transformation + * between:

+ *
    + *
  • Two point sets + *
  • Two raster images. In this case, the function first finds some + * features in the src image and finds the corresponding features + * in dst image. After that, the problem is reduced to the first + * case. + *
+ * + *

In case of point sets, the problem is formulated as follows: you need to find + * a 2x2 matrix *A* and 2x1 vector *b* so that:

+ * + *

[A^*|b^*] = arg min _([A|b]) sum _i|dst[i] - A (src[i])^T - b| ^2

+ * + *

where src[i] and dst[i] are the i-th points in + * src and dst, respectively

+ * + *

[A|b] can be either arbitrary (when fullAffine=true) or + * have a form of

+ * + *

a_11 a_12 b_1 + * -a_12 a_11 b_2

+ * + *

when fullAffine=false.

+ * + * @param src First input 2D point set stored in std.vector or + * Mat, or an image stored in Mat. + * @param dst Second input 2D point set of the same size and the same type as + * A, or another image. + * @param fullAffine If true, the function finds an optimal affine + * transformation with no additional restrictions (6 degrees of freedom). + * Otherwise, the class of transformations to choose from is limited to + * combinations of translation, rotation, and uniform scaling (5 degrees of + * freedom). + * + * @see org.opencv.video.Video.estimateRigidTransform + * @see org.opencv.calib3d.Calib3d#findHomography + * @see org.opencv.imgproc.Imgproc#getAffineTransform + * @see org.opencv.imgproc.Imgproc#getPerspectiveTransform + */ + public static Mat estimateRigidTransform(Mat src, Mat dst, boolean fullAffine) + { + + Mat retVal = new Mat(estimateRigidTransform_0(src.nativeObj, dst.nativeObj, fullAffine)); + + return retVal; + } + + + // + // C++: int meanShift(Mat probImage, Rect& window, TermCriteria criteria) + // + +/** + *

Finds an object on a back projection image.

+ * + *

The function implements the iterative object search algorithm. It takes the + * input back projection of an object and the initial position. The mass center + * in window of the back projection image is computed and the + * search window center shifts to the mass center. The procedure is repeated + * until the specified number of iterations criteria.maxCount is + * done or until the window center shifts by less than criteria.epsilon. + * The algorithm is used inside "CamShift" and, unlike "CamShift", the search + * window size or orientation do not change during the search. You can simply + * pass the output of "calcBackProject" to this function. But better results can + * be obtained if you pre-filter the back projection and remove the noise. For + * example, you can do this by retrieving connected components with + * "findContours", throwing away contours with small area ("contourArea"), and + * rendering the remaining contours with "drawContours".

+ * + * @param probImage Back projection of the object histogram. See + * "calcBackProject" for details. + * @param window Initial search window. + * @param criteria Stop criteria for the iterative search algorithm. + * + *

:returns: Number of iterations CAMSHIFT took to converge.

+ * + * @see org.opencv.video.Video.meanShift + */ + public static int meanShift(Mat probImage, Rect window, TermCriteria criteria) + { + double[] window_out = new double[4]; + int retVal = meanShift_0(probImage.nativeObj, window.x, window.y, window.width, window.height, window_out, criteria.type, criteria.maxCount, criteria.epsilon); + if(window!=null){ window.x = (int)window_out[0]; window.y = (int)window_out[1]; window.width = (int)window_out[2]; window.height = (int)window_out[3]; } + return retVal; + } + + + // + // C++: void segmentMotion(Mat mhi, Mat& segmask, vector_Rect& boundingRects, double timestamp, double segThresh) + // + +/** + *

Splits a motion history image into a few parts corresponding to separate + * independent motions (for example, left hand, right hand).

+ * + *

The function finds all of the motion segments and marks them in + * segmask with individual values (1,2,...). It also computes a + * vector with ROIs of motion connected components. After that the motion + * direction for every component can be calculated with "calcGlobalOrientation" + * using the extracted mask of the particular component.

+ * + * @param mhi Motion history image. + * @param segmask Image where the found mask should be stored, single-channel, + * 32-bit floating-point. + * @param boundingRects Vector containing ROIs of motion connected components. + * @param timestamp Current time in milliseconds or other units. + * @param segThresh Segmentation threshold that is recommended to be equal to + * the interval between motion history "steps" or greater. + * + * @see org.opencv.video.Video.segmentMotion + */ + public static void segmentMotion(Mat mhi, Mat segmask, MatOfRect boundingRects, double timestamp, double segThresh) + { + Mat boundingRects_mat = boundingRects; + segmentMotion_0(mhi.nativeObj, segmask.nativeObj, boundingRects_mat.nativeObj, timestamp, segThresh); + + return; + } + + + // + // C++: void updateMotionHistory(Mat silhouette, Mat& mhi, double timestamp, double duration) + // + +/** + *

Updates the motion history image by a moving silhouette.

+ * + *

The function updates the motion history image as follows:

+ * + *

mhi(x,y)= timestamp if silhouette(x,y) != 0; 0 if silhouette(x,y) = 0 and + * mhi <(timestamp - duration); mhi(x,y) otherwise

+ * + *

That is, MHI pixels where the motion occurs are set to the current + * timestamp, while the pixels where the motion happened last time + * a long time ago are cleared.

+ * + *

The function, together with "calcMotionGradient" and "calcGlobalOrientation", + * implements a motion templates technique described in [Davis97] and + * [Bradski00]. + * See also the OpenCV sample motempl.c that demonstrates the use + * of all the motion template functions.

+ * + * @param silhouette Silhouette mask that has non-zero pixels where the motion + * occurs. + * @param mhi Motion history image that is updated by the function + * (single-channel, 32-bit floating-point). + * @param timestamp Current time in milliseconds or other units. + * @param duration Maximal duration of the motion track in the same units as + * timestamp. + * + * @see org.opencv.video.Video.updateMotionHistory + */ + public static void updateMotionHistory(Mat silhouette, Mat mhi, double timestamp, double duration) + { + + updateMotionHistory_0(silhouette.nativeObj, mhi.nativeObj, timestamp, duration); + + return; + } + + + + + // C++: RotatedRect CamShift(Mat probImage, Rect& window, TermCriteria criteria) + private static native double[] CamShift_0(long probImage_nativeObj, int window_x, int window_y, int window_width, int window_height, double[] window_out, int criteria_type, int criteria_maxCount, double criteria_epsilon); + + // C++: int buildOpticalFlowPyramid(Mat img, vector_Mat& pyramid, Size winSize, int maxLevel, bool withDerivatives = true, int pyrBorder = BORDER_REFLECT_101, int derivBorder = BORDER_CONSTANT, bool tryReuseInputImage = true) + private static native int buildOpticalFlowPyramid_0(long img_nativeObj, long pyramid_mat_nativeObj, double winSize_width, double winSize_height, int maxLevel, boolean withDerivatives, int pyrBorder, int derivBorder, boolean tryReuseInputImage); + private static native int buildOpticalFlowPyramid_1(long img_nativeObj, long pyramid_mat_nativeObj, double winSize_width, double winSize_height, int maxLevel); + + // C++: double calcGlobalOrientation(Mat orientation, Mat mask, Mat mhi, double timestamp, double duration) + private static native double calcGlobalOrientation_0(long orientation_nativeObj, long mask_nativeObj, long mhi_nativeObj, double timestamp, double duration); + + // C++: void calcMotionGradient(Mat mhi, Mat& mask, Mat& orientation, double delta1, double delta2, int apertureSize = 3) + private static native void calcMotionGradient_0(long mhi_nativeObj, long mask_nativeObj, long orientation_nativeObj, double delta1, double delta2, int apertureSize); + private static native void calcMotionGradient_1(long mhi_nativeObj, long mask_nativeObj, long orientation_nativeObj, double delta1, double delta2); + + // C++: void calcOpticalFlowFarneback(Mat prev, Mat next, Mat& flow, double pyr_scale, int levels, int winsize, int iterations, int poly_n, double poly_sigma, int flags) + private static native void calcOpticalFlowFarneback_0(long prev_nativeObj, long next_nativeObj, long flow_nativeObj, double pyr_scale, int levels, int winsize, int iterations, int poly_n, double poly_sigma, int flags); + + // C++: void calcOpticalFlowPyrLK(Mat prevImg, Mat nextImg, vector_Point2f prevPts, vector_Point2f& nextPts, vector_uchar& status, vector_float& err, Size winSize = Size(21,21), int maxLevel = 3, TermCriteria criteria = TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 30, 0.01), int flags = 0, double minEigThreshold = 1e-4) + private static native void calcOpticalFlowPyrLK_0(long prevImg_nativeObj, long nextImg_nativeObj, long prevPts_mat_nativeObj, long nextPts_mat_nativeObj, long status_mat_nativeObj, long err_mat_nativeObj, double winSize_width, double winSize_height, int maxLevel, int criteria_type, int criteria_maxCount, double criteria_epsilon, int flags, double minEigThreshold); + private static native void calcOpticalFlowPyrLK_1(long prevImg_nativeObj, long nextImg_nativeObj, long prevPts_mat_nativeObj, long nextPts_mat_nativeObj, long status_mat_nativeObj, long err_mat_nativeObj, double winSize_width, double winSize_height, int maxLevel); + private static native void calcOpticalFlowPyrLK_2(long prevImg_nativeObj, long nextImg_nativeObj, long prevPts_mat_nativeObj, long nextPts_mat_nativeObj, long status_mat_nativeObj, long err_mat_nativeObj); + + // C++: void calcOpticalFlowSF(Mat from, Mat to, Mat flow, int layers, int averaging_block_size, int max_flow) + private static native void calcOpticalFlowSF_0(long from_nativeObj, long to_nativeObj, long flow_nativeObj, int layers, int averaging_block_size, int max_flow); + + // C++: void calcOpticalFlowSF(Mat from, Mat to, Mat flow, int layers, int averaging_block_size, int max_flow, double sigma_dist, double sigma_color, int postprocess_window, double sigma_dist_fix, double sigma_color_fix, double occ_thr, int upscale_averaging_radius, double upscale_sigma_dist, double upscale_sigma_color, double speed_up_thr) + private static native void calcOpticalFlowSF_1(long from_nativeObj, long to_nativeObj, long flow_nativeObj, int layers, int averaging_block_size, int max_flow, double sigma_dist, double sigma_color, int postprocess_window, double sigma_dist_fix, double sigma_color_fix, double occ_thr, int upscale_averaging_radius, double upscale_sigma_dist, double upscale_sigma_color, double speed_up_thr); + + // C++: Mat estimateRigidTransform(Mat src, Mat dst, bool fullAffine) + private static native long estimateRigidTransform_0(long src_nativeObj, long dst_nativeObj, boolean fullAffine); + + // C++: int meanShift(Mat probImage, Rect& window, TermCriteria criteria) + private static native int meanShift_0(long probImage_nativeObj, int window_x, int window_y, int window_width, int window_height, double[] window_out, int criteria_type, int criteria_maxCount, double criteria_epsilon); + + // C++: void segmentMotion(Mat mhi, Mat& segmask, vector_Rect& boundingRects, double timestamp, double segThresh) + private static native void segmentMotion_0(long mhi_nativeObj, long segmask_nativeObj, long boundingRects_mat_nativeObj, double timestamp, double segThresh); + + // C++: void updateMotionHistory(Mat silhouette, Mat& mhi, double timestamp, double duration) + private static native void updateMotionHistory_0(long silhouette_nativeObj, long mhi_nativeObj, double timestamp, double duration); + +} diff --git a/src/org/opencv/video/package.bluej b/src/org/opencv/video/package.bluej new file mode 100644 index 0000000..e69de29 diff --git a/src/org/package.bluej b/src/org/package.bluej new file mode 100644 index 0000000..6c644ae --- /dev/null +++ b/src/org/package.bluej @@ -0,0 +1,15 @@ +#BlueJ package file +package.editor.height=400 +package.editor.width=560 +package.editor.x=285 +package.editor.y=141 +package.numDependencies=0 +package.numTargets=1 +package.showExtends=true +package.showUses=true +target1.height=62 +target1.name=opencv +target1.type=PackageTarget +target1.width=80 +target1.x=160 +target1.y=10 diff --git a/src/package.bluej b/src/package.bluej new file mode 100644 index 0000000..0c9a2f4 --- /dev/null +++ b/src/package.bluej @@ -0,0 +1,22 @@ +#BlueJ package file +package.editor.height=400 +package.editor.width=560 +package.editor.x=265 +package.editor.y=121 +package.numDependencies=0 +package.numTargets=2 +package.showExtends=true +package.showUses=true +project.charset=windows-1252 +target1.height=62 +target1.name=gab +target1.type=PackageTarget +target1.width=80 +target1.x=90 +target1.y=10 +target2.height=62 +target2.name=org +target2.type=PackageTarget +target2.width=80 +target2.x=180 +target2.y=10