diff --git a/exe/FaceLandmarkImg/FaceLandmarkImg.vcxproj b/exe/FaceLandmarkImg/FaceLandmarkImg.vcxproj
index 38bc8fdb..ad1383da 100644
--- a/exe/FaceLandmarkImg/FaceLandmarkImg.vcxproj
+++ b/exe/FaceLandmarkImg/FaceLandmarkImg.vcxproj
@@ -58,34 +58,34 @@
-
+
-
+
-
+
-
+
diff --git a/exe/FaceLandmarkVid/FaceLandmarkVid.vcxproj b/exe/FaceLandmarkVid/FaceLandmarkVid.vcxproj
index 98ad4bae..6b1a0fb2 100644
--- a/exe/FaceLandmarkVid/FaceLandmarkVid.vcxproj
+++ b/exe/FaceLandmarkVid/FaceLandmarkVid.vcxproj
@@ -58,34 +58,34 @@
-
+
-
+
-
+
-
+
diff --git a/exe/FaceLandmarkVidMulti/FaceLandmarkVidMulti.vcxproj b/exe/FaceLandmarkVidMulti/FaceLandmarkVidMulti.vcxproj
index d82f3ddf..0eec0218 100644
--- a/exe/FaceLandmarkVidMulti/FaceLandmarkVidMulti.vcxproj
+++ b/exe/FaceLandmarkVidMulti/FaceLandmarkVidMulti.vcxproj
@@ -57,34 +57,34 @@
-
+
-
+
-
+
-
+
diff --git a/exe/FeatureExtraction/FeatureExtraction.vcxproj b/exe/FeatureExtraction/FeatureExtraction.vcxproj
index cc9be52b..623927c5 100644
--- a/exe/FeatureExtraction/FeatureExtraction.vcxproj
+++ b/exe/FeatureExtraction/FeatureExtraction.vcxproj
@@ -57,34 +57,34 @@
-
+
-
+
-
+
-
+
diff --git a/exe/Recording/Recording.vcxproj b/exe/Recording/Recording.vcxproj
index ffd4bbd5..4cb2e17c 100644
--- a/exe/Recording/Recording.vcxproj
+++ b/exe/Recording/Recording.vcxproj
@@ -55,23 +55,23 @@
-
+
-
+
-
+
-
+
diff --git a/lib/3rdParty/OpenCV3.1/include/opencv2/core/hal/interface.h b/lib/3rdParty/OpenCV3.1/include/opencv2/core/hal/interface.h
deleted file mode 100644
index 51f76061..00000000
--- a/lib/3rdParty/OpenCV3.1/include/opencv2/core/hal/interface.h
+++ /dev/null
@@ -1,69 +0,0 @@
-#ifndef _HAL_INTERFACE_HPP_INCLUDED_
-#define _HAL_INTERFACE_HPP_INCLUDED_
-
-//! @addtogroup core_hal_interface
-//! @{
-
-#define CV_HAL_ERROR_OK 0
-#define CV_HAL_ERROR_NOT_IMPLEMENTED 1
-#define CV_HAL_ERROR_UNKNOWN -1
-
-#define CV_HAL_CMP_EQ 0
-#define CV_HAL_CMP_GT 1
-#define CV_HAL_CMP_GE 2
-#define CV_HAL_CMP_LT 3
-#define CV_HAL_CMP_LE 4
-#define CV_HAL_CMP_NE 5
-
-#ifdef __cplusplus
-#include
-#else
-#include
-#endif
-
-/* primitive types */
-/*
- schar - signed 1 byte integer
- uchar - unsigned 1 byte integer
- short - signed 2 byte integer
- ushort - unsigned 2 byte integer
- int - signed 4 byte integer
- uint - unsigned 4 byte integer
- int64 - signed 8 byte integer
- uint64 - unsigned 8 byte integer
-*/
-
-#if !defined _MSC_VER && !defined __BORLANDC__
-# if defined __cplusplus && __cplusplus >= 201103L && !defined __APPLE__
-# include
- typedef std::uint32_t uint;
-# else
-# include
- typedef uint32_t uint;
-# endif
-#else
- typedef unsigned uint;
-#endif
-
-typedef signed char schar;
-
-#ifndef __IPL_H__
- typedef unsigned char uchar;
- typedef unsigned short ushort;
-#endif
-
-#if defined _MSC_VER || defined __BORLANDC__
- typedef __int64 int64;
- typedef unsigned __int64 uint64;
-# define CV_BIG_INT(n) n##I64
-# define CV_BIG_UINT(n) n##UI64
-#else
- typedef int64_t int64;
- typedef uint64_t uint64;
-# define CV_BIG_INT(n) n##LL
-# define CV_BIG_UINT(n) n##ULL
-#endif
-
-//! @}
-
-#endif
diff --git a/lib/3rdParty/OpenCV3.1/include/opencv2/core/private.cuda.hpp b/lib/3rdParty/OpenCV3.1/include/opencv2/core/private.cuda.hpp
deleted file mode 100644
index d676ce85..00000000
--- a/lib/3rdParty/OpenCV3.1/include/opencv2/core/private.cuda.hpp
+++ /dev/null
@@ -1,172 +0,0 @@
-/*M///////////////////////////////////////////////////////////////////////////////////////
-//
-// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
-//
-// By downloading, copying, installing or using the software you agree to this license.
-// If you do not agree to this license, do not download, install,
-// copy or use the software.
-//
-//
-// License Agreement
-// For Open Source Computer Vision Library
-//
-// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
-// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
-// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
-// Third party copyrights are property of their respective owners.
-//
-// Redistribution and use in source and binary forms, with or without modification,
-// are permitted provided that the following conditions are met:
-//
-// * Redistribution's of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-//
-// * Redistribution's in binary form must reproduce the above copyright notice,
-// this list of conditions and the following disclaimer in the documentation
-// and/or other materials provided with the distribution.
-//
-// * The name of the copyright holders may not be used to endorse or promote products
-// derived from this software without specific prior written permission.
-//
-// This software is provided by the copyright holders and contributors "as is" and
-// any express or implied warranties, including, but not limited to, the implied
-// warranties of merchantability and fitness for a particular purpose are disclaimed.
-// In no event shall the Intel Corporation or contributors be liable for any direct,
-// indirect, incidental, special, exemplary, or consequential damages
-// (including, but not limited to, procurement of substitute goods or services;
-// loss of use, data, or profits; or business interruption) however caused
-// and on any theory of liability, whether in contract, strict liability,
-// or tort (including negligence or otherwise) arising in any way out of
-// the use of this software, even if advised of the possibility of such damage.
-//
-//M*/
-
-#ifndef __OPENCV_CORE_PRIVATE_CUDA_HPP__
-#define __OPENCV_CORE_PRIVATE_CUDA_HPP__
-
-#ifndef __OPENCV_BUILD
-# error this is a private header which should not be used from outside of the OpenCV library
-#endif
-
-#include "cvconfig.h"
-
-#include "opencv2/core/cvdef.h"
-#include "opencv2/core/base.hpp"
-
-#include "opencv2/core/cuda.hpp"
-
-#ifdef HAVE_CUDA
-# include
-# include
-# include
-# include "opencv2/core/cuda_stream_accessor.hpp"
-# include "opencv2/core/cuda/common.hpp"
-
-# define NPP_VERSION (NPP_VERSION_MAJOR * 1000 + NPP_VERSION_MINOR * 100 + NPP_VERSION_BUILD)
-
-# define CUDART_MINIMUM_REQUIRED_VERSION 4020
-
-# if (CUDART_VERSION < CUDART_MINIMUM_REQUIRED_VERSION)
-# error "Insufficient Cuda Runtime library version, please update it."
-# endif
-
-# if defined(CUDA_ARCH_BIN_OR_PTX_10)
-# error "OpenCV CUDA module doesn't support NVIDIA compute capability 1.0"
-# endif
-#endif
-
-//! @cond IGNORED
-
-namespace cv { namespace cuda {
- CV_EXPORTS cv::String getNppErrorMessage(int code);
- CV_EXPORTS cv::String getCudaDriverApiErrorMessage(int code);
-
- CV_EXPORTS GpuMat getInputMat(InputArray _src, Stream& stream);
-
- CV_EXPORTS GpuMat getOutputMat(OutputArray _dst, int rows, int cols, int type, Stream& stream);
- static inline GpuMat getOutputMat(OutputArray _dst, Size size, int type, Stream& stream)
- {
- return getOutputMat(_dst, size.height, size.width, type, stream);
- }
-
- CV_EXPORTS void syncOutput(const GpuMat& dst, OutputArray _dst, Stream& stream);
-}}
-
-#ifndef HAVE_CUDA
-
-static inline void throw_no_cuda() { CV_Error(cv::Error::GpuNotSupported, "The library is compiled without CUDA support"); }
-
-#else // HAVE_CUDA
-
-static inline void throw_no_cuda() { CV_Error(cv::Error::StsNotImplemented, "The called functionality is disabled for current build or platform"); }
-
-namespace cv { namespace cuda
-{
- class CV_EXPORTS BufferPool
- {
- public:
- explicit BufferPool(Stream& stream);
-
- GpuMat getBuffer(int rows, int cols, int type);
- GpuMat getBuffer(Size size, int type) { return getBuffer(size.height, size.width, type); }
-
- GpuMat::Allocator* getAllocator() const { return allocator_; }
-
- private:
- GpuMat::Allocator* allocator_;
- };
-
- static inline void checkNppError(int code, const char* file, const int line, const char* func)
- {
- if (code < 0)
- cv::error(cv::Error::GpuApiCallError, getNppErrorMessage(code), func, file, line);
- }
-
- static inline void checkCudaDriverApiError(int code, const char* file, const int line, const char* func)
- {
- if (code != CUDA_SUCCESS)
- cv::error(cv::Error::GpuApiCallError, getCudaDriverApiErrorMessage(code), func, file, line);
- }
-
- template struct NPPTypeTraits;
- template<> struct NPPTypeTraits { typedef Npp8u npp_type; };
- template<> struct NPPTypeTraits { typedef Npp8s npp_type; };
- template<> struct NPPTypeTraits { typedef Npp16u npp_type; };
- template<> struct NPPTypeTraits { typedef Npp16s npp_type; };
- template<> struct NPPTypeTraits { typedef Npp32s npp_type; };
- template<> struct NPPTypeTraits { typedef Npp32f npp_type; };
- template<> struct NPPTypeTraits { typedef Npp64f npp_type; };
-
- class NppStreamHandler
- {
- public:
- inline explicit NppStreamHandler(Stream& newStream)
- {
- oldStream = nppGetStream();
- nppSetStream(StreamAccessor::getStream(newStream));
- }
-
- inline explicit NppStreamHandler(cudaStream_t newStream)
- {
- oldStream = nppGetStream();
- nppSetStream(newStream);
- }
-
- inline ~NppStreamHandler()
- {
- nppSetStream(oldStream);
- }
-
- private:
- cudaStream_t oldStream;
- };
-}}
-
-#define nppSafeCall(expr) cv::cuda::checkNppError(expr, __FILE__, __LINE__, CV_Func)
-#define cuSafeCall(expr) cv::cuda::checkCudaDriverApiError(expr, __FILE__, __LINE__, CV_Func)
-
-#endif // HAVE_CUDA
-
-//! @endcond
-
-#endif // __OPENCV_CORE_CUDA_PRIVATE_HPP__
diff --git a/lib/3rdParty/OpenCV3.1/include/opencv2/core/private.hpp b/lib/3rdParty/OpenCV3.1/include/opencv2/core/private.hpp
deleted file mode 100644
index c71ec626..00000000
--- a/lib/3rdParty/OpenCV3.1/include/opencv2/core/private.hpp
+++ /dev/null
@@ -1,425 +0,0 @@
-/*M///////////////////////////////////////////////////////////////////////////////////////
-//
-// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
-//
-// By downloading, copying, installing or using the software you agree to this license.
-// If you do not agree to this license, do not download, install,
-// copy or use the software.
-//
-//
-// License Agreement
-// For Open Source Computer Vision Library
-//
-// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
-// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
-// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
-// Third party copyrights are property of their respective owners.
-//
-// Redistribution and use in source and binary forms, with or without modification,
-// are permitted provided that the following conditions are met:
-//
-// * Redistribution's of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-//
-// * Redistribution's in binary form must reproduce the above copyright notice,
-// this list of conditions and the following disclaimer in the documentation
-// and/or other materials provided with the distribution.
-//
-// * The name of the copyright holders may not be used to endorse or promote products
-// derived from this software without specific prior written permission.
-//
-// This software is provided by the copyright holders and contributors "as is" and
-// any express or implied warranties, including, but not limited to, the implied
-// warranties of merchantability and fitness for a particular purpose are disclaimed.
-// In no event shall the Intel Corporation or contributors be liable for any direct,
-// indirect, incidental, special, exemplary, or consequential damages
-// (including, but not limited to, procurement of substitute goods or services;
-// loss of use, data, or profits; or business interruption) however caused
-// and on any theory of liability, whether in contract, strict liability,
-// or tort (including negligence or otherwise) arising in any way out of
-// the use of this software, even if advised of the possibility of such damage.
-//
-//M*/
-
-#ifndef __OPENCV_CORE_PRIVATE_HPP__
-#define __OPENCV_CORE_PRIVATE_HPP__
-
-#ifndef __OPENCV_BUILD
-# error this is a private header which should not be used from outside of the OpenCV library
-#endif
-
-#include "opencv2/core.hpp"
-#include "cvconfig.h"
-
-#ifdef HAVE_EIGEN
-# if defined __GNUC__ && defined __APPLE__
-# pragma GCC diagnostic ignored "-Wshadow"
-# endif
-# include
-# include "opencv2/core/eigen.hpp"
-#endif
-
-#ifdef HAVE_TBB
-# include "tbb/tbb_stddef.h"
-# if TBB_VERSION_MAJOR*100 + TBB_VERSION_MINOR >= 202
-# include "tbb/tbb.h"
-# include "tbb/task.h"
-# undef min
-# undef max
-# else
-# undef HAVE_TBB
-# endif
-#endif
-
-//! @cond IGNORED
-
-namespace cv
-{
-#ifdef HAVE_TBB
-
- typedef tbb::blocked_range BlockedRange;
-
- template static inline
- void parallel_for( const BlockedRange& range, const Body& body )
- {
- tbb::parallel_for(range, body);
- }
-
- typedef tbb::split Split;
-
- template static inline
- void parallel_reduce( const BlockedRange& range, Body& body )
- {
- tbb::parallel_reduce(range, body);
- }
-
- typedef tbb::concurrent_vector ConcurrentRectVector;
-#else
- class BlockedRange
- {
- public:
- BlockedRange() : _begin(0), _end(0), _grainsize(0) {}
- BlockedRange(int b, int e, int g=1) : _begin(b), _end(e), _grainsize(g) {}
- int begin() const { return _begin; }
- int end() const { return _end; }
- int grainsize() const { return _grainsize; }
-
- protected:
- int _begin, _end, _grainsize;
- };
-
- template static inline
- void parallel_for( const BlockedRange& range, const Body& body )
- {
- body(range);
- }
- typedef std::vector ConcurrentRectVector;
-
- class Split {};
-
- template static inline
- void parallel_reduce( const BlockedRange& range, Body& body )
- {
- body(range);
- }
-#endif
-
- // Returns a static string if there is a parallel framework,
- // NULL otherwise.
- CV_EXPORTS const char* currentParallelFramework();
-} //namespace cv
-
-/****************************************************************************************\
-* Common declarations *
-\****************************************************************************************/
-
-/* the alignment of all the allocated buffers */
-#define CV_MALLOC_ALIGN 16
-
-/* IEEE754 constants and macros */
-#define CV_TOGGLE_FLT(x) ((x)^((int)(x) < 0 ? 0x7fffffff : 0))
-#define CV_TOGGLE_DBL(x) ((x)^((int64)(x) < 0 ? CV_BIG_INT(0x7fffffffffffffff) : 0))
-
-static inline void* cvAlignPtr( const void* ptr, int align = 32 )
-{
- CV_DbgAssert ( (align & (align-1)) == 0 );
- return (void*)( ((size_t)ptr + align - 1) & ~(size_t)(align-1) );
-}
-
-static inline int cvAlign( int size, int align )
-{
- CV_DbgAssert( (align & (align-1)) == 0 && size < INT_MAX );
- return (size + align - 1) & -align;
-}
-
-#ifdef IPL_DEPTH_8U
-static inline cv::Size cvGetMatSize( const CvMat* mat )
-{
- return cv::Size(mat->cols, mat->rows);
-}
-#endif
-
-namespace cv
-{
-CV_EXPORTS void scalarToRawData(const cv::Scalar& s, void* buf, int type, int unroll_to = 0);
-}
-
-// property implementation macros
-
-#define CV_IMPL_PROPERTY_RO(type, name, member) \
- inline type get##name() const { return member; }
-
-#define CV_HELP_IMPL_PROPERTY(r_type, w_type, name, member) \
- CV_IMPL_PROPERTY_RO(r_type, name, member) \
- inline void set##name(w_type val) { member = val; }
-
-#define CV_HELP_WRAP_PROPERTY(r_type, w_type, name, internal_name, internal_obj) \
- r_type get##name() const { return internal_obj.get##internal_name(); } \
- void set##name(w_type val) { internal_obj.set##internal_name(val); }
-
-#define CV_IMPL_PROPERTY(type, name, member) CV_HELP_IMPL_PROPERTY(type, type, name, member)
-#define CV_IMPL_PROPERTY_S(type, name, member) CV_HELP_IMPL_PROPERTY(type, const type &, name, member)
-
-#define CV_WRAP_PROPERTY(type, name, internal_name, internal_obj) CV_HELP_WRAP_PROPERTY(type, type, name, internal_name, internal_obj)
-#define CV_WRAP_PROPERTY_S(type, name, internal_name, internal_obj) CV_HELP_WRAP_PROPERTY(type, const type &, name, internal_name, internal_obj)
-
-#define CV_WRAP_SAME_PROPERTY(type, name, internal_obj) CV_WRAP_PROPERTY(type, name, name, internal_obj)
-#define CV_WRAP_SAME_PROPERTY_S(type, name, internal_obj) CV_WRAP_PROPERTY_S(type, name, name, internal_obj)
-
-/****************************************************************************************\
-* Structures and macros for integration with IPP *
-\****************************************************************************************/
-
-#ifdef HAVE_IPP
-#include "ipp.h"
-
-#ifndef IPP_VERSION_UPDATE // prior to 7.1
-#define IPP_VERSION_UPDATE 0
-#endif
-
-#define IPP_VERSION_X100 (IPP_VERSION_MAJOR * 100 + IPP_VERSION_MINOR*10 + IPP_VERSION_UPDATE)
-
-// General define for ipp function disabling
-#define IPP_DISABLE_BLOCK 0
-
-#ifdef CV_MALLOC_ALIGN
-#undef CV_MALLOC_ALIGN
-#endif
-#define CV_MALLOC_ALIGN 32 // required for AVX optimization
-
-#define setIppErrorStatus() cv::ipp::setIppStatus(-1, CV_Func, __FILE__, __LINE__)
-
-static inline IppiSize ippiSize(int width, int height)
-{
- IppiSize size = { width, height };
- return size;
-}
-
-static inline IppiSize ippiSize(const cv::Size & _size)
-{
- IppiSize size = { _size.width, _size.height };
- return size;
-}
-
-static inline IppiBorderType ippiGetBorderType(int borderTypeNI)
-{
- return borderTypeNI == cv::BORDER_CONSTANT ? ippBorderConst :
- borderTypeNI == cv::BORDER_WRAP ? ippBorderWrap :
- borderTypeNI == cv::BORDER_REPLICATE ? ippBorderRepl :
- borderTypeNI == cv::BORDER_REFLECT_101 ? ippBorderMirror :
- borderTypeNI == cv::BORDER_REFLECT ? ippBorderMirrorR : (IppiBorderType)-1;
-}
-
-static inline IppDataType ippiGetDataType(int depth)
-{
- return depth == CV_8U ? ipp8u :
- depth == CV_8S ? ipp8s :
- depth == CV_16U ? ipp16u :
- depth == CV_16S ? ipp16s :
- depth == CV_32S ? ipp32s :
- depth == CV_32F ? ipp32f :
- depth == CV_64F ? ipp64f : (IppDataType)-1;
-}
-
-// IPP temporary buffer hepler
-template
-class IppAutoBuffer
-{
-public:
- IppAutoBuffer() { m_pBuffer = NULL; }
- IppAutoBuffer(int size) { Alloc(size); }
- ~IppAutoBuffer() { Release(); }
- T* Alloc(int size) { m_pBuffer = (T*)ippMalloc(size); return m_pBuffer; }
- void Release() { if(m_pBuffer) ippFree(m_pBuffer); }
- inline operator T* () { return (T*)m_pBuffer;}
- inline operator const T* () const { return (const T*)m_pBuffer;}
-private:
- // Disable copy operations
- IppAutoBuffer(IppAutoBuffer &) {};
- IppAutoBuffer& operator =(const IppAutoBuffer &) {return *this;};
-
- T* m_pBuffer;
-};
-
-#else
-#define IPP_VERSION_X100 0
-#endif
-
-// There shoud be no API difference in OpenCV between ICV and IPP since 9.0
-#if (defined HAVE_IPP_ICV_ONLY) && IPP_VERSION_X100 >= 900
-#undef HAVE_IPP_ICV_ONLY
-#endif
-
-#ifdef HAVE_IPP_ICV_ONLY
-#define HAVE_ICV 1
-#else
-#define HAVE_ICV 0
-#endif
-
-#if defined HAVE_IPP
-#if IPP_VERSION_X100 >= 900
-#define IPP_INITIALIZER(FEAT) \
-{ \
- if(FEAT) \
- ippSetCpuFeatures(FEAT); \
- else \
- ippInit(); \
-}
-#elif IPP_VERSION_X100 >= 800
-#define IPP_INITIALIZER(FEAT) \
-{ \
- ippInit(); \
-}
-#else
-#define IPP_INITIALIZER(FEAT) \
-{ \
- ippStaticInit(); \
-}
-#endif
-
-#ifdef CVAPI_EXPORTS
-#define IPP_INITIALIZER_AUTO \
-struct __IppInitializer__ \
-{ \
- __IppInitializer__() \
- {IPP_INITIALIZER(cv::ipp::getIppFeatures())} \
-}; \
-static struct __IppInitializer__ __ipp_initializer__;
-#else
-#define IPP_INITIALIZER_AUTO
-#endif
-#else
-#define IPP_INITIALIZER
-#define IPP_INITIALIZER_AUTO
-#endif
-
-#define CV_IPP_CHECK_COND (cv::ipp::useIPP())
-#define CV_IPP_CHECK() if(CV_IPP_CHECK_COND)
-
-#ifdef HAVE_IPP
-
-#ifdef CV_IPP_RUN_VERBOSE
-#define CV_IPP_RUN_(condition, func, ...) \
- { \
- if (cv::ipp::useIPP() && (condition) && func) \
- { \
- printf("%s: IPP implementation is running\n", CV_Func); \
- fflush(stdout); \
- CV_IMPL_ADD(CV_IMPL_IPP); \
- return __VA_ARGS__; \
- } \
- else \
- { \
- printf("%s: Plain implementation is running\n", CV_Func); \
- fflush(stdout); \
- } \
- }
-#elif defined CV_IPP_RUN_ASSERT
-#define CV_IPP_RUN_(condition, func, ...) \
- { \
- if (cv::ipp::useIPP() && (condition)) \
- { \
- if(func) \
- { \
- CV_IMPL_ADD(CV_IMPL_IPP); \
- } \
- else \
- { \
- setIppErrorStatus(); \
- CV_Error(cv::Error::StsAssert, #func); \
- } \
- return __VA_ARGS__; \
- } \
- }
-#else
-#define CV_IPP_RUN_(condition, func, ...) \
- if (cv::ipp::useIPP() && (condition) && func) \
- { \
- CV_IMPL_ADD(CV_IMPL_IPP); \
- return __VA_ARGS__; \
- }
-#endif
-
-#else
-#define CV_IPP_RUN_(condition, func, ...)
-#endif
-
-#define CV_IPP_RUN(condition, func, ...) CV_IPP_RUN_(condition, func, __VA_ARGS__)
-
-
-#ifndef IPPI_CALL
-# define IPPI_CALL(func) CV_Assert((func) >= 0)
-#endif
-
-/* IPP-compatible return codes */
-typedef enum CvStatus
-{
- CV_BADMEMBLOCK_ERR = -113,
- CV_INPLACE_NOT_SUPPORTED_ERR= -112,
- CV_UNMATCHED_ROI_ERR = -111,
- CV_NOTFOUND_ERR = -110,
- CV_BADCONVERGENCE_ERR = -109,
-
- CV_BADDEPTH_ERR = -107,
- CV_BADROI_ERR = -106,
- CV_BADHEADER_ERR = -105,
- CV_UNMATCHED_FORMATS_ERR = -104,
- CV_UNSUPPORTED_COI_ERR = -103,
- CV_UNSUPPORTED_CHANNELS_ERR = -102,
- CV_UNSUPPORTED_DEPTH_ERR = -101,
- CV_UNSUPPORTED_FORMAT_ERR = -100,
-
- CV_BADARG_ERR = -49, //ipp comp
- CV_NOTDEFINED_ERR = -48, //ipp comp
-
- CV_BADCHANNELS_ERR = -47, //ipp comp
- CV_BADRANGE_ERR = -44, //ipp comp
- CV_BADSTEP_ERR = -29, //ipp comp
-
- CV_BADFLAG_ERR = -12,
- CV_DIV_BY_ZERO_ERR = -11, //ipp comp
- CV_BADCOEF_ERR = -10,
-
- CV_BADFACTOR_ERR = -7,
- CV_BADPOINT_ERR = -6,
- CV_BADSCALE_ERR = -4,
- CV_OUTOFMEM_ERR = -3,
- CV_NULLPTR_ERR = -2,
- CV_BADSIZE_ERR = -1,
- CV_NO_ERR = 0,
- CV_OK = CV_NO_ERR
-}
-CvStatus;
-
-#ifdef HAVE_TEGRA_OPTIMIZATION
-namespace tegra {
-
-CV_EXPORTS bool useTegra();
-CV_EXPORTS void setUseTegra(bool flag);
-
-}
-#endif
-
-//! @endcond
-
-#endif // __OPENCV_CORE_PRIVATE_HPP__
diff --git a/lib/3rdParty/OpenCV3.1/include/opencv2/flann/dummy.h b/lib/3rdParty/OpenCV3.1/include/opencv2/flann/dummy.h
deleted file mode 100644
index 26bd3fa5..00000000
--- a/lib/3rdParty/OpenCV3.1/include/opencv2/flann/dummy.h
+++ /dev/null
@@ -1,16 +0,0 @@
-
-#ifndef OPENCV_FLANN_DUMMY_H_
-#define OPENCV_FLANN_DUMMY_H_
-
-namespace cvflann
-{
-
-#if (defined WIN32 || defined _WIN32 || defined WINCE) && defined CVAPI_EXPORTS
-__declspec(dllexport)
-#endif
-void dummyfunc();
-
-}
-
-
-#endif /* OPENCV_FLANN_DUMMY_H_ */
diff --git a/lib/3rdParty/OpenCV3.1/include/opencv2/videoio.hpp b/lib/3rdParty/OpenCV3.1/include/opencv2/videoio.hpp
deleted file mode 100644
index ccd6c063..00000000
--- a/lib/3rdParty/OpenCV3.1/include/opencv2/videoio.hpp
+++ /dev/null
@@ -1,680 +0,0 @@
-/*M///////////////////////////////////////////////////////////////////////////////////////
-//
-// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
-//
-// By downloading, copying, installing or using the software you agree to this license.
-// If you do not agree to this license, do not download, install,
-// copy or use the software.
-//
-//
-// License Agreement
-// For Open Source Computer Vision Library
-//
-// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
-// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
-// Third party copyrights are property of their respective owners.
-//
-// Redistribution and use in source and binary forms, with or without modification,
-// are permitted provided that the following conditions are met:
-//
-// * Redistribution's of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-//
-// * Redistribution's in binary form must reproduce the above copyright notice,
-// this list of conditions and the following disclaimer in the documentation
-// and/or other materials provided with the distribution.
-//
-// * The name of the copyright holders may not be used to endorse or promote products
-// derived from this software without specific prior written permission.
-//
-// This software is provided by the copyright holders and contributors "as is" and
-// any express or implied warranties, including, but not limited to, the implied
-// warranties of merchantability and fitness for a particular purpose are disclaimed.
-// In no event shall the Intel Corporation or contributors be liable for any direct,
-// indirect, incidental, special, exemplary, or consequential damages
-// (including, but not limited to, procurement of substitute goods or services;
-// loss of use, data, or profits; or business interruption) however caused
-// and on any theory of liability, whether in contract, strict liability,
-// or tort (including negligence or otherwise) arising in any way out of
-// the use of this software, even if advised of the possibility of such damage.
-//
-//M*/
-
-#ifndef __OPENCV_VIDEOIO_HPP__
-#define __OPENCV_VIDEOIO_HPP__
-
-#include "opencv2/core.hpp"
-
-/**
- @defgroup videoio Media I/O
- @{
- @defgroup videoio_c C API
- @defgroup videoio_ios iOS glue
- @defgroup videoio_winrt WinRT glue
- @}
-*/
-
-////////////////////////////////// video io /////////////////////////////////
-
-typedef struct CvCapture CvCapture;
-typedef struct CvVideoWriter CvVideoWriter;
-
-namespace cv
-{
-
-//! @addtogroup videoio
-//! @{
-
-// Camera API
-enum { CAP_ANY = 0, // autodetect
- CAP_VFW = 200, // platform native
- CAP_V4L = 200,
- CAP_V4L2 = CAP_V4L,
- CAP_FIREWARE = 300, // IEEE 1394 drivers
- CAP_FIREWIRE = CAP_FIREWARE,
- CAP_IEEE1394 = CAP_FIREWARE,
- CAP_DC1394 = CAP_FIREWARE,
- CAP_CMU1394 = CAP_FIREWARE,
- CAP_QT = 500, // QuickTime
- CAP_UNICAP = 600, // Unicap drivers
- CAP_DSHOW = 700, // DirectShow (via videoInput)
- CAP_PVAPI = 800, // PvAPI, Prosilica GigE SDK
- CAP_OPENNI = 900, // OpenNI (for Kinect)
- CAP_OPENNI_ASUS = 910, // OpenNI (for Asus Xtion)
- CAP_ANDROID = 1000, // Android - not used
- CAP_XIAPI = 1100, // XIMEA Camera API
- CAP_AVFOUNDATION = 1200, // AVFoundation framework for iOS (OS X Lion will have the same API)
- CAP_GIGANETIX = 1300, // Smartek Giganetix GigEVisionSDK
- CAP_MSMF = 1400, // Microsoft Media Foundation (via videoInput)
- CAP_WINRT = 1410, // Microsoft Windows Runtime using Media Foundation
- CAP_INTELPERC = 1500, // Intel Perceptual Computing SDK
- CAP_OPENNI2 = 1600, // OpenNI2 (for Kinect)
- CAP_OPENNI2_ASUS = 1610, // OpenNI2 (for Asus Xtion and Occipital Structure sensors)
- CAP_GPHOTO2 = 1700, // gPhoto2 connection
- CAP_GSTREAMER = 1800, // GStreamer
- CAP_FFMPEG = 1900, // FFMPEG
- CAP_IMAGES = 2000 // OpenCV Image Sequence (e.g. img_%02d.jpg)
- };
-
-// generic properties (based on DC1394 properties)
-enum { CAP_PROP_POS_MSEC =0,
- CAP_PROP_POS_FRAMES =1,
- CAP_PROP_POS_AVI_RATIO =2,
- CAP_PROP_FRAME_WIDTH =3,
- CAP_PROP_FRAME_HEIGHT =4,
- CAP_PROP_FPS =5,
- CAP_PROP_FOURCC =6,
- CAP_PROP_FRAME_COUNT =7,
- CAP_PROP_FORMAT =8,
- CAP_PROP_MODE =9,
- CAP_PROP_BRIGHTNESS =10,
- CAP_PROP_CONTRAST =11,
- CAP_PROP_SATURATION =12,
- CAP_PROP_HUE =13,
- CAP_PROP_GAIN =14,
- CAP_PROP_EXPOSURE =15,
- CAP_PROP_CONVERT_RGB =16,
- CAP_PROP_WHITE_BALANCE_BLUE_U =17,
- CAP_PROP_RECTIFICATION =18,
- CAP_PROP_MONOCHROME =19,
- CAP_PROP_SHARPNESS =20,
- CAP_PROP_AUTO_EXPOSURE =21, // DC1394: exposure control done by camera, user can adjust refernce level using this feature
- CAP_PROP_GAMMA =22,
- CAP_PROP_TEMPERATURE =23,
- CAP_PROP_TRIGGER =24,
- CAP_PROP_TRIGGER_DELAY =25,
- CAP_PROP_WHITE_BALANCE_RED_V =26,
- CAP_PROP_ZOOM =27,
- CAP_PROP_FOCUS =28,
- CAP_PROP_GUID =29,
- CAP_PROP_ISO_SPEED =30,
- CAP_PROP_BACKLIGHT =32,
- CAP_PROP_PAN =33,
- CAP_PROP_TILT =34,
- CAP_PROP_ROLL =35,
- CAP_PROP_IRIS =36,
- CAP_PROP_SETTINGS =37,
- CAP_PROP_BUFFERSIZE =38,
- CAP_PROP_AUTOFOCUS =39
- };
-
-
-// Generic camera output modes.
-// Currently, these are supported through the libv4l interface only.
-enum { CAP_MODE_BGR = 0, // BGR24 (default)
- CAP_MODE_RGB = 1, // RGB24
- CAP_MODE_GRAY = 2, // Y8
- CAP_MODE_YUYV = 3 // YUYV
- };
-
-
-// DC1394 only
-// modes of the controlling registers (can be: auto, manual, auto single push, absolute Latter allowed with any other mode)
-// every feature can have only one mode turned on at a time
-enum { CAP_PROP_DC1394_OFF = -4, //turn the feature off (not controlled manually nor automatically)
- CAP_PROP_DC1394_MODE_MANUAL = -3, //set automatically when a value of the feature is set by the user
- CAP_PROP_DC1394_MODE_AUTO = -2,
- CAP_PROP_DC1394_MODE_ONE_PUSH_AUTO = -1,
- CAP_PROP_DC1394_MAX = 31
- };
-
-
-// OpenNI map generators
-enum { CAP_OPENNI_DEPTH_GENERATOR = 1 << 31,
- CAP_OPENNI_IMAGE_GENERATOR = 1 << 30,
- CAP_OPENNI_GENERATORS_MASK = CAP_OPENNI_DEPTH_GENERATOR + CAP_OPENNI_IMAGE_GENERATOR
- };
-
-// Properties of cameras available through OpenNI interfaces
-enum { CAP_PROP_OPENNI_OUTPUT_MODE = 100,
- CAP_PROP_OPENNI_FRAME_MAX_DEPTH = 101, // in mm
- CAP_PROP_OPENNI_BASELINE = 102, // in mm
- CAP_PROP_OPENNI_FOCAL_LENGTH = 103, // in pixels
- CAP_PROP_OPENNI_REGISTRATION = 104, // flag that synchronizes the remapping depth map to image map
- // by changing depth generator's view point (if the flag is "on") or
- // sets this view point to its normal one (if the flag is "off").
- CAP_PROP_OPENNI_REGISTRATION_ON = CAP_PROP_OPENNI_REGISTRATION,
- CAP_PROP_OPENNI_APPROX_FRAME_SYNC = 105,
- CAP_PROP_OPENNI_MAX_BUFFER_SIZE = 106,
- CAP_PROP_OPENNI_CIRCLE_BUFFER = 107,
- CAP_PROP_OPENNI_MAX_TIME_DURATION = 108,
- CAP_PROP_OPENNI_GENERATOR_PRESENT = 109,
- CAP_PROP_OPENNI2_SYNC = 110,
- CAP_PROP_OPENNI2_MIRROR = 111
- };
-
-// OpenNI shortcats
-enum { CAP_OPENNI_IMAGE_GENERATOR_PRESENT = CAP_OPENNI_IMAGE_GENERATOR + CAP_PROP_OPENNI_GENERATOR_PRESENT,
- CAP_OPENNI_IMAGE_GENERATOR_OUTPUT_MODE = CAP_OPENNI_IMAGE_GENERATOR + CAP_PROP_OPENNI_OUTPUT_MODE,
- CAP_OPENNI_DEPTH_GENERATOR_BASELINE = CAP_OPENNI_DEPTH_GENERATOR + CAP_PROP_OPENNI_BASELINE,
- CAP_OPENNI_DEPTH_GENERATOR_FOCAL_LENGTH = CAP_OPENNI_DEPTH_GENERATOR + CAP_PROP_OPENNI_FOCAL_LENGTH,
- CAP_OPENNI_DEPTH_GENERATOR_REGISTRATION = CAP_OPENNI_DEPTH_GENERATOR + CAP_PROP_OPENNI_REGISTRATION,
- CAP_OPENNI_DEPTH_GENERATOR_REGISTRATION_ON = CAP_OPENNI_DEPTH_GENERATOR_REGISTRATION
- };
-
-// OpenNI data given from depth generator
-enum { CAP_OPENNI_DEPTH_MAP = 0, // Depth values in mm (CV_16UC1)
- CAP_OPENNI_POINT_CLOUD_MAP = 1, // XYZ in meters (CV_32FC3)
- CAP_OPENNI_DISPARITY_MAP = 2, // Disparity in pixels (CV_8UC1)
- CAP_OPENNI_DISPARITY_MAP_32F = 3, // Disparity in pixels (CV_32FC1)
- CAP_OPENNI_VALID_DEPTH_MASK = 4, // CV_8UC1
-
- // Data given from RGB image generator
- CAP_OPENNI_BGR_IMAGE = 5,
- CAP_OPENNI_GRAY_IMAGE = 6
- };
-
-// Supported output modes of OpenNI image generator
-enum { CAP_OPENNI_VGA_30HZ = 0,
- CAP_OPENNI_SXGA_15HZ = 1,
- CAP_OPENNI_SXGA_30HZ = 2,
- CAP_OPENNI_QVGA_30HZ = 3,
- CAP_OPENNI_QVGA_60HZ = 4
- };
-
-
-// GStreamer
-enum { CAP_PROP_GSTREAMER_QUEUE_LENGTH = 200 // default is 1
- };
-
-
-// PVAPI
-enum { CAP_PROP_PVAPI_MULTICASTIP = 300, // ip for anable multicast master mode. 0 for disable multicast
- CAP_PROP_PVAPI_FRAMESTARTTRIGGERMODE = 301, // FrameStartTriggerMode: Determines how a frame is initiated
- CAP_PROP_PVAPI_DECIMATIONHORIZONTAL = 302, // Horizontal sub-sampling of the image
- CAP_PROP_PVAPI_DECIMATIONVERTICAL = 303, // Vertical sub-sampling of the image
- CAP_PROP_PVAPI_BINNINGX = 304, // Horizontal binning factor
- CAP_PROP_PVAPI_BINNINGY = 305, // Vertical binning factor
- CAP_PROP_PVAPI_PIXELFORMAT = 306 // Pixel format
- };
-
-// PVAPI: FrameStartTriggerMode
-enum { CAP_PVAPI_FSTRIGMODE_FREERUN = 0, // Freerun
- CAP_PVAPI_FSTRIGMODE_SYNCIN1 = 1, // SyncIn1
- CAP_PVAPI_FSTRIGMODE_SYNCIN2 = 2, // SyncIn2
- CAP_PVAPI_FSTRIGMODE_FIXEDRATE = 3, // FixedRate
- CAP_PVAPI_FSTRIGMODE_SOFTWARE = 4 // Software
- };
-
-// PVAPI: DecimationHorizontal, DecimationVertical
-enum { CAP_PVAPI_DECIMATION_OFF = 1, // Off
- CAP_PVAPI_DECIMATION_2OUTOF4 = 2, // 2 out of 4 decimation
- CAP_PVAPI_DECIMATION_2OUTOF8 = 4, // 2 out of 8 decimation
- CAP_PVAPI_DECIMATION_2OUTOF16 = 8 // 2 out of 16 decimation
- };
-
-// PVAPI: PixelFormat
-enum { CAP_PVAPI_PIXELFORMAT_MONO8 = 1, // Mono8
- CAP_PVAPI_PIXELFORMAT_MONO16 = 2, // Mono16
- CAP_PVAPI_PIXELFORMAT_BAYER8 = 3, // Bayer8
- CAP_PVAPI_PIXELFORMAT_BAYER16 = 4, // Bayer16
- CAP_PVAPI_PIXELFORMAT_RGB24 = 5, // Rgb24
- CAP_PVAPI_PIXELFORMAT_BGR24 = 6, // Bgr24
- CAP_PVAPI_PIXELFORMAT_RGBA32 = 7, // Rgba32
- CAP_PVAPI_PIXELFORMAT_BGRA32 = 8, // Bgra32
- };
-
-// Properties of cameras available through XIMEA SDK interface
-enum { CAP_PROP_XI_DOWNSAMPLING = 400, // Change image resolution by binning or skipping.
- CAP_PROP_XI_DATA_FORMAT = 401, // Output data format.
- CAP_PROP_XI_OFFSET_X = 402, // Horizontal offset from the origin to the area of interest (in pixels).
- CAP_PROP_XI_OFFSET_Y = 403, // Vertical offset from the origin to the area of interest (in pixels).
- CAP_PROP_XI_TRG_SOURCE = 404, // Defines source of trigger.
- CAP_PROP_XI_TRG_SOFTWARE = 405, // Generates an internal trigger. PRM_TRG_SOURCE must be set to TRG_SOFTWARE.
- CAP_PROP_XI_GPI_SELECTOR = 406, // Selects general purpose input
- CAP_PROP_XI_GPI_MODE = 407, // Set general purpose input mode
- CAP_PROP_XI_GPI_LEVEL = 408, // Get general purpose level
- CAP_PROP_XI_GPO_SELECTOR = 409, // Selects general purpose output
- CAP_PROP_XI_GPO_MODE = 410, // Set general purpose output mode
- CAP_PROP_XI_LED_SELECTOR = 411, // Selects camera signalling LED
- CAP_PROP_XI_LED_MODE = 412, // Define camera signalling LED functionality
- CAP_PROP_XI_MANUAL_WB = 413, // Calculates White Balance(must be called during acquisition)
- CAP_PROP_XI_AUTO_WB = 414, // Automatic white balance
- CAP_PROP_XI_AEAG = 415, // Automatic exposure/gain
- CAP_PROP_XI_EXP_PRIORITY = 416, // Exposure priority (0.5 - exposure 50%, gain 50%).
- CAP_PROP_XI_AE_MAX_LIMIT = 417, // Maximum limit of exposure in AEAG procedure
- CAP_PROP_XI_AG_MAX_LIMIT = 418, // Maximum limit of gain in AEAG procedure
- CAP_PROP_XI_AEAG_LEVEL = 419, // Average intensity of output signal AEAG should achieve(in %)
- CAP_PROP_XI_TIMEOUT = 420 // Image capture timeout in milliseconds
- };
-
-// Properties of cameras available through AVFOUNDATION interface
-enum { CAP_PROP_IOS_DEVICE_FOCUS = 9001,
- CAP_PROP_IOS_DEVICE_EXPOSURE = 9002,
- CAP_PROP_IOS_DEVICE_FLASH = 9003,
- CAP_PROP_IOS_DEVICE_WHITEBALANCE = 9004,
- CAP_PROP_IOS_DEVICE_TORCH = 9005
- };
-
-
-// Properties of cameras available through Smartek Giganetix Ethernet Vision interface
-/* --- Vladimir Litvinenko (litvinenko.vladimir@gmail.com) --- */
-enum { CAP_PROP_GIGA_FRAME_OFFSET_X = 10001,
- CAP_PROP_GIGA_FRAME_OFFSET_Y = 10002,
- CAP_PROP_GIGA_FRAME_WIDTH_MAX = 10003,
- CAP_PROP_GIGA_FRAME_HEIGH_MAX = 10004,
- CAP_PROP_GIGA_FRAME_SENS_WIDTH = 10005,
- CAP_PROP_GIGA_FRAME_SENS_HEIGH = 10006
- };
-
-enum { CAP_PROP_INTELPERC_PROFILE_COUNT = 11001,
- CAP_PROP_INTELPERC_PROFILE_IDX = 11002,
- CAP_PROP_INTELPERC_DEPTH_LOW_CONFIDENCE_VALUE = 11003,
- CAP_PROP_INTELPERC_DEPTH_SATURATION_VALUE = 11004,
- CAP_PROP_INTELPERC_DEPTH_CONFIDENCE_THRESHOLD = 11005,
- CAP_PROP_INTELPERC_DEPTH_FOCAL_LENGTH_HORZ = 11006,
- CAP_PROP_INTELPERC_DEPTH_FOCAL_LENGTH_VERT = 11007
- };
-
-// Intel PerC streams
-enum { CAP_INTELPERC_DEPTH_GENERATOR = 1 << 29,
- CAP_INTELPERC_IMAGE_GENERATOR = 1 << 28,
- CAP_INTELPERC_GENERATORS_MASK = CAP_INTELPERC_DEPTH_GENERATOR + CAP_INTELPERC_IMAGE_GENERATOR
- };
-
-enum { CAP_INTELPERC_DEPTH_MAP = 0, // Each pixel is a 16-bit integer. The value indicates the distance from an object to the camera's XY plane or the Cartesian depth.
- CAP_INTELPERC_UVDEPTH_MAP = 1, // Each pixel contains two 32-bit floating point values in the range of 0-1, representing the mapping of depth coordinates to the color coordinates.
- CAP_INTELPERC_IR_MAP = 2, // Each pixel is a 16-bit integer. The value indicates the intensity of the reflected laser beam.
- CAP_INTELPERC_IMAGE = 3
- };
-
-enum { VIDEOWRITER_PROP_QUALITY = 1, // Quality (0..100%) of the videostream encoded
- VIDEOWRITER_PROP_FRAMEBYTES = 2, // (Read-only): Size of just encoded video frame
- VIDEOWRITER_PROP_NSTRIPES = 3 // Number of stripes for parallel encoding. -1 for auto detection
- };
-
-// gPhoto2 properties, if propertyId is less than 0 then work on widget with that __additive inversed__ camera setting ID
-// Get IDs by using CAP_PROP_GPHOTO2_WIDGET_ENUMERATE.
-// @see CvCaptureCAM_GPHOTO2 for more info
-enum { CAP_PROP_GPHOTO2_PREVIEW = 17001, // Capture only preview from liveview mode.
- CAP_PROP_GPHOTO2_WIDGET_ENUMERATE = 17002, // Readonly, returns (const char *).
- CAP_PROP_GPHOTO2_RELOAD_CONFIG = 17003, // Trigger, only by set. Reload camera settings.
- CAP_PROP_GPHOTO2_RELOAD_ON_CHANGE = 17004, // Reload all settings on set.
- CAP_PROP_GPHOTO2_COLLECT_MSGS = 17005, // Collect messages with details.
- CAP_PROP_GPHOTO2_FLUSH_MSGS = 17006, // Readonly, returns (const char *).
- CAP_PROP_SPEED = 17007, // Exposure speed. Can be readonly, depends on camera program.
- CAP_PROP_APERTURE = 17008, // Aperture. Can be readonly, depends on camera program.
- CAP_PROP_EXPOSUREPROGRAM = 17009, // Camera exposure program.
- CAP_PROP_VIEWFINDER = 17010 // Enter liveview mode.
- };
-
-//enum {
-
-class IVideoCapture;
-
-/** @brief Class for video capturing from video files, image sequences or cameras. The class provides C++ API
-for capturing video from cameras or for reading video files and image sequences. Here is how the
-class can be used: :
-@code
- #include "opencv2/opencv.hpp"
-
- using namespace cv;
-
- int main(int, char**)
- {
- VideoCapture cap(0); // open the default camera
- if(!cap.isOpened()) // check if we succeeded
- return -1;
-
- Mat edges;
- namedWindow("edges",1);
- for(;;)
- {
- Mat frame;
- cap >> frame; // get a new frame from camera
- cvtColor(frame, edges, COLOR_BGR2GRAY);
- GaussianBlur(edges, edges, Size(7,7), 1.5, 1.5);
- Canny(edges, edges, 0, 30, 3);
- imshow("edges", edges);
- if(waitKey(30) >= 0) break;
- }
- // the camera will be deinitialized automatically in VideoCapture destructor
- return 0;
- }
-@endcode
-@note In C API the black-box structure CvCapture is used instead of VideoCapture.
-
-@note
-- A basic sample on using the VideoCapture interface can be found at
- opencv_source_code/samples/cpp/starter_video.cpp
-- Another basic video processing sample can be found at
- opencv_source_code/samples/cpp/video_dmtx.cpp
-- (Python) A basic sample on using the VideoCapture interface can be found at
- opencv_source_code/samples/python/video.py
-- (Python) Another basic video processing sample can be found at
- opencv_source_code/samples/python/video_dmtx.py
-- (Python) A multi threaded video processing sample can be found at
- opencv_source_code/samples/python/video_threaded.py
- */
-class CV_EXPORTS_W VideoCapture
-{
-public:
- /** @brief
- @note In C API, when you finished working with video, release CvCapture structure with
- cvReleaseCapture(), or use Ptr\ that calls cvReleaseCapture() automatically in the
- destructor.
- */
- CV_WRAP VideoCapture();
-
- /** @overload
- @param filename name of the opened video file (eg. video.avi) or image sequence (eg.
- img_%02d.jpg, which will read samples like img_00.jpg, img_01.jpg, img_02.jpg, ...)
- */
- CV_WRAP VideoCapture(const String& filename);
-
- /** @overload
- @param filename name of the opened video file (eg. video.avi) or image sequence (eg.
- img_%02d.jpg, which will read samples like img_00.jpg, img_01.jpg, img_02.jpg, ...)
-
- @param apiPreference preferred Capture API to use. Can be used to enforce a specific reader
- implementation if multiple are available: e.g. CAP_FFMPEG or CAP_IMAGES
- */
- CV_WRAP VideoCapture(const String& filename, int apiPreference);
-
- /** @overload
- @param index = camera_id + domain_offset (CAP_*). id of the video capturing device to open. If there is a single
- camera connected, just pass 0. Advanced Usage: to open Camera 1 using the MS Media Foundation API: index = 1 + CAP_MSMF
- */
- CV_WRAP VideoCapture(int index);
-
- virtual ~VideoCapture();
-
- /** @brief Open video file or a capturing device for video capturing
-
- @param filename name of the opened video file (eg. video.avi) or image sequence (eg.
- img_%02d.jpg, which will read samples like img_00.jpg, img_01.jpg, img_02.jpg, ...)
-
- The methods first call VideoCapture::release to close the already opened file or camera.
- */
- CV_WRAP virtual bool open(const String& filename);
-
- /** @overload
- @param index = camera_id + domain_offset (CAP_*). id of the video capturing device to open. If there is a single
- camera connected, just pass 0. Advanced Usage: to open Camera 1 using the MS Media Foundation API: index = 1 + CAP_MSMF
- */
- CV_WRAP virtual bool open(int index);
-
- /** @brief Returns true if video capturing has been initialized already.
-
- If the previous call to VideoCapture constructor or VideoCapture::open succeeded, the method returns
- true.
- */
- CV_WRAP virtual bool isOpened() const;
-
- /** @brief Closes video file or capturing device.
-
- The methods are automatically called by subsequent VideoCapture::open and by VideoCapture
- destructor.
-
- The C function also deallocates memory and clears \*capture pointer.
- */
- CV_WRAP virtual void release();
-
- /** @brief Grabs the next frame from video file or capturing device.
-
- The methods/functions grab the next frame from video file or camera and return true (non-zero) in
- the case of success.
-
- The primary use of the function is in multi-camera environments, especially when the cameras do not
- have hardware synchronization. That is, you call VideoCapture::grab() for each camera and after that
- call the slower method VideoCapture::retrieve() to decode and get frame from each camera. This way
- the overhead on demosaicing or motion jpeg decompression etc. is eliminated and the retrieved frames
- from different cameras will be closer in time.
-
- Also, when a connected camera is multi-head (for example, a stereo camera or a Kinect device), the
- correct way of retrieving data from it is to call VideoCapture::grab first and then call
- VideoCapture::retrieve one or more times with different values of the channel parameter. See
-
- */
- CV_WRAP virtual bool grab();
-
- /** @brief Decodes and returns the grabbed video frame.
-
- The methods/functions decode and return the just grabbed frame. If no frames has been grabbed
- (camera has been disconnected, or there are no more frames in video file), the methods return false
- and the functions return NULL pointer.
-
- @note OpenCV 1.x functions cvRetrieveFrame and cv.RetrieveFrame return image stored inside the video
- capturing structure. It is not allowed to modify or release the image! You can copy the frame using
- :ocvcvCloneImage and then do whatever you want with the copy.
- */
- CV_WRAP virtual bool retrieve(OutputArray image, int flag = 0);
- virtual VideoCapture& operator >> (CV_OUT Mat& image);
- virtual VideoCapture& operator >> (CV_OUT UMat& image);
-
- /** @brief Grabs, decodes and returns the next video frame.
-
- The methods/functions combine VideoCapture::grab and VideoCapture::retrieve in one call. This is the
- most convenient method for reading video files or capturing data from decode and return the just
- grabbed frame. If no frames has been grabbed (camera has been disconnected, or there are no more
- frames in video file), the methods return false and the functions return NULL pointer.
-
- @note OpenCV 1.x functions cvRetrieveFrame and cv.RetrieveFrame return image stored inside the video
- capturing structure. It is not allowed to modify or release the image! You can copy the frame using
- :ocvcvCloneImage and then do whatever you want with the copy.
- */
- CV_WRAP virtual bool read(OutputArray image);
-
- /** @brief Sets a property in the VideoCapture.
-
- @param propId Property identifier. It can be one of the following:
- - **CAP_PROP_POS_MSEC** Current position of the video file in milliseconds.
- - **CAP_PROP_POS_FRAMES** 0-based index of the frame to be decoded/captured next.
- - **CAP_PROP_POS_AVI_RATIO** Relative position of the video file: 0 - start of the
- film, 1 - end of the film.
- - **CAP_PROP_FRAME_WIDTH** Width of the frames in the video stream.
- - **CAP_PROP_FRAME_HEIGHT** Height of the frames in the video stream.
- - **CAP_PROP_FPS** Frame rate.
- - **CAP_PROP_FOURCC** 4-character code of codec.
- - **CAP_PROP_FRAME_COUNT** Number of frames in the video file.
- - **CAP_PROP_FORMAT** Format of the Mat objects returned by retrieve() .
- - **CAP_PROP_MODE** Backend-specific value indicating the current capture mode.
- - **CAP_PROP_BRIGHTNESS** Brightness of the image (only for cameras).
- - **CAP_PROP_CONTRAST** Contrast of the image (only for cameras).
- - **CAP_PROP_SATURATION** Saturation of the image (only for cameras).
- - **CAP_PROP_HUE** Hue of the image (only for cameras).
- - **CAP_PROP_GAIN** Gain of the image (only for cameras).
- - **CAP_PROP_EXPOSURE** Exposure (only for cameras).
- - **CAP_PROP_CONVERT_RGB** Boolean flags indicating whether images should be converted
- to RGB.
- - **CAP_PROP_WHITE_BALANCE** Currently unsupported
- - **CAP_PROP_RECTIFICATION** Rectification flag for stereo cameras (note: only supported
- by DC1394 v 2.x backend currently)
- @param value Value of the property.
- */
- CV_WRAP virtual bool set(int propId, double value);
-
- /** @brief Returns the specified VideoCapture property
-
- @param propId Property identifier. It can be one of the following:
- - **CAP_PROP_POS_MSEC** Current position of the video file in milliseconds or video
- capture timestamp.
- - **CAP_PROP_POS_FRAMES** 0-based index of the frame to be decoded/captured next.
- - **CAP_PROP_POS_AVI_RATIO** Relative position of the video file: 0 - start of the
- film, 1 - end of the film.
- - **CAP_PROP_FRAME_WIDTH** Width of the frames in the video stream.
- - **CAP_PROP_FRAME_HEIGHT** Height of the frames in the video stream.
- - **CAP_PROP_FPS** Frame rate.
- - **CAP_PROP_FOURCC** 4-character code of codec.
- - **CAP_PROP_FRAME_COUNT** Number of frames in the video file.
- - **CAP_PROP_FORMAT** Format of the Mat objects returned by retrieve() .
- - **CAP_PROP_MODE** Backend-specific value indicating the current capture mode.
- - **CAP_PROP_BRIGHTNESS** Brightness of the image (only for cameras).
- - **CAP_PROP_CONTRAST** Contrast of the image (only for cameras).
- - **CAP_PROP_SATURATION** Saturation of the image (only for cameras).
- - **CAP_PROP_HUE** Hue of the image (only for cameras).
- - **CAP_PROP_GAIN** Gain of the image (only for cameras).
- - **CAP_PROP_EXPOSURE** Exposure (only for cameras).
- - **CAP_PROP_CONVERT_RGB** Boolean flags indicating whether images should be converted
- to RGB.
- - **CAP_PROP_WHITE_BALANCE** Currently not supported
- - **CAP_PROP_RECTIFICATION** Rectification flag for stereo cameras (note: only supported
- by DC1394 v 2.x backend currently)
-
- @note When querying a property that is not supported by the backend used by the VideoCapture
- class, value 0 is returned.
- */
- CV_WRAP virtual double get(int propId) const;
-
- /** @overload
-
- @param filename name of the opened video file (eg. video.avi) or image sequence (eg.
- img_%02d.jpg, which will read samples like img_00.jpg, img_01.jpg, img_02.jpg, ...)
-
- @param apiPreference preferred Capture API to use. Can be used to enforce a specific reader
- implementation if multiple are available: e.g. CAP_FFMPEG or CAP_IMAGES
-
- The methods first call VideoCapture::release to close the already opened file or camera.
- */
- CV_WRAP virtual bool open(const String& filename, int apiPreference);
-
-protected:
- Ptr cap;
- Ptr icap;
-};
-
-class IVideoWriter;
-
-/** @brief Video writer class.
- */
-class CV_EXPORTS_W VideoWriter
-{
-public:
- /** @brief VideoWriter constructors
-
- The constructors/functions initialize video writers. On Linux FFMPEG is used to write videos; on
- Windows FFMPEG or VFW is used; on MacOSX QTKit is used.
- */
- CV_WRAP VideoWriter();
-
- /** @overload
- @param filename Name of the output video file.
- @param fourcc 4-character code of codec used to compress the frames. For example,
- VideoWriter::fourcc('P','I','M','1') is a MPEG-1 codec, VideoWriter::fourcc('M','J','P','G') is a
- motion-jpeg codec etc. List of codes can be obtained at [Video Codecs by
- FOURCC](http://www.fourcc.org/codecs.php) page. FFMPEG backend with MP4 container natively uses
- other values as fourcc code: see [ObjectType](http://www.mp4ra.org/codecs.html),
- so you may receive a warning message from OpenCV about fourcc code conversion.
- @param fps Framerate of the created video stream.
- @param frameSize Size of the video frames.
- @param isColor If it is not zero, the encoder will expect and encode color frames, otherwise it
- will work with grayscale frames (the flag is currently supported on Windows only).
- */
- CV_WRAP VideoWriter(const String& filename, int fourcc, double fps,
- Size frameSize, bool isColor = true);
-
- virtual ~VideoWriter();
-
- /** @brief Initializes or reinitializes video writer.
-
- The method opens video writer. Parameters are the same as in the constructor
- VideoWriter::VideoWriter.
- */
- CV_WRAP virtual bool open(const String& filename, int fourcc, double fps,
- Size frameSize, bool isColor = true);
-
- /** @brief Returns true if video writer has been successfully initialized.
- */
- CV_WRAP virtual bool isOpened() const;
-
- /** @brief Closes the video writer.
-
- The methods are automatically called by subsequent VideoWriter::open and by the VideoWriter
- destructor.
- */
- CV_WRAP virtual void release();
- virtual VideoWriter& operator << (const Mat& image);
-
- /** @brief Writes the next video frame
-
- @param image The written frame
-
- The functions/methods write the specified image to video file. It must have the same size as has
- been specified when opening the video writer.
- */
- CV_WRAP virtual void write(const Mat& image);
-
- /** @brief Sets a property in the VideoWriter.
-
- @param propId Property identifier. It can be one of the following:
- - **VIDEOWRITER_PROP_QUALITY** Quality (0..100%) of the videostream encoded. Can be adjusted dynamically in some codecs.
- - **VIDEOWRITER_PROP_NSTRIPES** Number of stripes for parallel encoding
- @param value Value of the property.
- */
- CV_WRAP virtual bool set(int propId, double value);
-
- /** @brief Returns the specified VideoWriter property
-
- @param propId Property identifier. It can be one of the following:
- - **VIDEOWRITER_PROP_QUALITY** Current quality of the encoded videostream.
- - **VIDEOWRITER_PROP_FRAMEBYTES** (Read-only) Size of just encoded video frame; note that the encoding order may be different from representation order.
- - **VIDEOWRITER_PROP_NSTRIPES** Number of stripes for parallel encoding
-
- @note When querying a property that is not supported by the backend used by the VideoWriter
- class, value 0 is returned.
- */
- CV_WRAP virtual double get(int propId) const;
-
- /** @brief Concatenates 4 chars to a fourcc code
-
- This static method constructs the fourcc code of the codec to be used in the constructor
- VideoWriter::VideoWriter or VideoWriter::open.
- */
- CV_WRAP static int fourcc(char c1, char c2, char c3, char c4);
-
-protected:
- Ptr writer;
- Ptr iwriter;
-
- static Ptr create(const String& filename, int fourcc, double fps,
- Size frameSize, bool isColor = true);
-};
-
-template<> CV_EXPORTS void DefaultDeleter::operator ()(CvCapture* obj) const;
-template<> CV_EXPORTS void DefaultDeleter::operator ()(CvVideoWriter* obj) const;
-
-//! @} videoio
-
-} // cv
-
-#endif //__OPENCV_VIDEOIO_HPP__
diff --git a/lib/3rdParty/OpenCV3.1/x64/v140/lib/opencv_world310.lib b/lib/3rdParty/OpenCV3.1/x64/v140/lib/opencv_world310.lib
deleted file mode 100644
index 7a20501e..00000000
Binary files a/lib/3rdParty/OpenCV3.1/x64/v140/lib/opencv_world310.lib and /dev/null differ
diff --git a/lib/3rdParty/OpenCV3.1/x86/v140/bin/opencv_world310.dll b/lib/3rdParty/OpenCV3.1/x86/v140/bin/opencv_world310.dll
deleted file mode 100644
index 681f0c9f..00000000
Binary files a/lib/3rdParty/OpenCV3.1/x86/v140/bin/opencv_world310.dll and /dev/null differ
diff --git a/lib/3rdParty/OpenCV3.1/x86/v140/lib/opencv_world310.lib b/lib/3rdParty/OpenCV3.1/x86/v140/lib/opencv_world310.lib
deleted file mode 100644
index 17c517af..00000000
Binary files a/lib/3rdParty/OpenCV3.1/x86/v140/lib/opencv_world310.lib and /dev/null differ
diff --git a/lib/3rdParty/OpenCV3.1/bin/opencv_ffmpeg310_64.dll b/lib/3rdParty/OpenCV3.4/bin/opencv_ffmpeg340.dll
similarity index 54%
rename from lib/3rdParty/OpenCV3.1/bin/opencv_ffmpeg310_64.dll
rename to lib/3rdParty/OpenCV3.4/bin/opencv_ffmpeg340.dll
index 040198c5..2a9b7aa9 100644
Binary files a/lib/3rdParty/OpenCV3.1/bin/opencv_ffmpeg310_64.dll and b/lib/3rdParty/OpenCV3.4/bin/opencv_ffmpeg340.dll differ
diff --git a/lib/3rdParty/OpenCV3.1/bin/opencv_ffmpeg310.dll b/lib/3rdParty/OpenCV3.4/bin/opencv_ffmpeg340_64.dll
similarity index 51%
rename from lib/3rdParty/OpenCV3.1/bin/opencv_ffmpeg310.dll
rename to lib/3rdParty/OpenCV3.4/bin/opencv_ffmpeg340_64.dll
index 4abb8ca9..45dc8391 100644
Binary files a/lib/3rdParty/OpenCV3.1/bin/opencv_ffmpeg310.dll and b/lib/3rdParty/OpenCV3.4/bin/opencv_ffmpeg340_64.dll differ
diff --git a/lib/3rdParty/OpenCV3.1/classifiers/haarcascade_frontalface_alt.xml b/lib/3rdParty/OpenCV3.4/classifiers/haarcascade_frontalface_alt.xml
similarity index 100%
rename from lib/3rdParty/OpenCV3.1/classifiers/haarcascade_frontalface_alt.xml
rename to lib/3rdParty/OpenCV3.4/classifiers/haarcascade_frontalface_alt.xml
diff --git a/lib/3rdParty/OpenCV3.1/classifiers/haarcascade_frontalface_alt2.xml b/lib/3rdParty/OpenCV3.4/classifiers/haarcascade_frontalface_alt2.xml
similarity index 100%
rename from lib/3rdParty/OpenCV3.1/classifiers/haarcascade_frontalface_alt2.xml
rename to lib/3rdParty/OpenCV3.4/classifiers/haarcascade_frontalface_alt2.xml
diff --git a/lib/3rdParty/OpenCV3.1/classifiers/haarcascade_frontalface_alt_tree.xml b/lib/3rdParty/OpenCV3.4/classifiers/haarcascade_frontalface_alt_tree.xml
similarity index 100%
rename from lib/3rdParty/OpenCV3.1/classifiers/haarcascade_frontalface_alt_tree.xml
rename to lib/3rdParty/OpenCV3.4/classifiers/haarcascade_frontalface_alt_tree.xml
diff --git a/lib/3rdParty/OpenCV3.1/classifiers/haarcascade_frontalface_default.xml b/lib/3rdParty/OpenCV3.4/classifiers/haarcascade_frontalface_default.xml
similarity index 100%
rename from lib/3rdParty/OpenCV3.1/classifiers/haarcascade_frontalface_default.xml
rename to lib/3rdParty/OpenCV3.4/classifiers/haarcascade_frontalface_default.xml
diff --git a/lib/3rdParty/OpenCV3.1/classifiers/haarcascade_profileface.xml b/lib/3rdParty/OpenCV3.4/classifiers/haarcascade_profileface.xml
similarity index 100%
rename from lib/3rdParty/OpenCV3.1/classifiers/haarcascade_profileface.xml
rename to lib/3rdParty/OpenCV3.4/classifiers/haarcascade_profileface.xml
diff --git a/lib/3rdParty/OpenCV3.1/include/opencv/cv.h b/lib/3rdParty/OpenCV3.4/include/opencv/cv.h
similarity index 98%
rename from lib/3rdParty/OpenCV3.1/include/opencv/cv.h
rename to lib/3rdParty/OpenCV3.4/include/opencv/cv.h
index 0aefc6d2..19a74e29 100644
--- a/lib/3rdParty/OpenCV3.1/include/opencv/cv.h
+++ b/lib/3rdParty/OpenCV3.4/include/opencv/cv.h
@@ -40,8 +40,8 @@
//
//M*/
-#ifndef __OPENCV_OLD_CV_H__
-#define __OPENCV_OLD_CV_H__
+#ifndef OPENCV_OLD_CV_H
+#define OPENCV_OLD_CV_H
#if defined(_MSC_VER)
#define CV_DO_PRAGMA(x) __pragma(x)
diff --git a/lib/3rdParty/OpenCV3.1/include/opencv/cv.hpp b/lib/3rdParty/OpenCV3.4/include/opencv/cv.hpp
similarity index 97%
rename from lib/3rdParty/OpenCV3.1/include/opencv/cv.hpp
rename to lib/3rdParty/OpenCV3.4/include/opencv/cv.hpp
index e498d7ac..86739564 100644
--- a/lib/3rdParty/OpenCV3.1/include/opencv/cv.hpp
+++ b/lib/3rdParty/OpenCV3.4/include/opencv/cv.hpp
@@ -40,8 +40,8 @@
//
//M*/
-#ifndef __OPENCV_OLD_CV_HPP__
-#define __OPENCV_OLD_CV_HPP__
+#ifndef OPENCV_OLD_CV_HPP
+#define OPENCV_OLD_CV_HPP
//#if defined(__GNUC__)
//#warning "This is a deprecated opencv header provided for compatibility. Please include a header from a corresponding opencv module"
diff --git a/lib/3rdParty/OpenCV3.1/include/opencv/cvaux.h b/lib/3rdParty/OpenCV3.4/include/opencv/cvaux.h
similarity index 97%
rename from lib/3rdParty/OpenCV3.1/include/opencv/cvaux.h
rename to lib/3rdParty/OpenCV3.4/include/opencv/cvaux.h
index fe86c5d9..c0367cc2 100644
--- a/lib/3rdParty/OpenCV3.1/include/opencv/cvaux.h
+++ b/lib/3rdParty/OpenCV3.4/include/opencv/cvaux.h
@@ -39,8 +39,8 @@
//
//M*/
-#ifndef __OPENCV_OLD_AUX_H__
-#define __OPENCV_OLD_AUX_H__
+#ifndef OPENCV_OLD_AUX_H
+#define OPENCV_OLD_AUX_H
//#if defined(__GNUC__)
//#warning "This is a deprecated opencv header provided for compatibility. Please include a header from a corresponding opencv module"
diff --git a/lib/3rdParty/OpenCV3.1/include/opencv/cvaux.hpp b/lib/3rdParty/OpenCV3.4/include/opencv/cvaux.hpp
similarity index 97%
rename from lib/3rdParty/OpenCV3.1/include/opencv/cvaux.hpp
rename to lib/3rdParty/OpenCV3.4/include/opencv/cvaux.hpp
index b0e60a30..4888eef2 100644
--- a/lib/3rdParty/OpenCV3.1/include/opencv/cvaux.hpp
+++ b/lib/3rdParty/OpenCV3.4/include/opencv/cvaux.hpp
@@ -39,8 +39,8 @@
//
//M*/
-#ifndef __OPENCV_OLD_AUX_HPP__
-#define __OPENCV_OLD_AUX_HPP__
+#ifndef OPENCV_OLD_AUX_HPP
+#define OPENCV_OLD_AUX_HPP
//#if defined(__GNUC__)
//#warning "This is a deprecated opencv header provided for compatibility. Please include a header from a corresponding opencv module"
diff --git a/lib/3rdParty/OpenCV3.1/include/opencv/cvwimage.h b/lib/3rdParty/OpenCV3.4/include/opencv/cvwimage.h
similarity index 96%
rename from lib/3rdParty/OpenCV3.1/include/opencv/cvwimage.h
rename to lib/3rdParty/OpenCV3.4/include/opencv/cvwimage.h
index de89c927..ec0ab141 100644
--- a/lib/3rdParty/OpenCV3.1/include/opencv/cvwimage.h
+++ b/lib/3rdParty/OpenCV3.4/include/opencv/cvwimage.h
@@ -38,8 +38,8 @@
// the use of this software, even if advised of the possibility of such damage.
-#ifndef __OPENCV_OLD_WIMAGE_HPP__
-#define __OPENCV_OLD_WIMAGE_HPP__
+#ifndef OPENCV_OLD_WIMAGE_HPP
+#define OPENCV_OLD_WIMAGE_HPP
#include "opencv2/core/wimage.hpp"
diff --git a/lib/3rdParty/OpenCV3.1/include/opencv/cxcore.h b/lib/3rdParty/OpenCV3.4/include/opencv/cxcore.h
similarity index 97%
rename from lib/3rdParty/OpenCV3.1/include/opencv/cxcore.h
rename to lib/3rdParty/OpenCV3.4/include/opencv/cxcore.h
index 0982bd75..dc070c77 100644
--- a/lib/3rdParty/OpenCV3.1/include/opencv/cxcore.h
+++ b/lib/3rdParty/OpenCV3.4/include/opencv/cxcore.h
@@ -40,8 +40,8 @@
//
//M*/
-#ifndef __OPENCV_OLD_CXCORE_H__
-#define __OPENCV_OLD_CXCORE_H__
+#ifndef OPENCV_OLD_CXCORE_H
+#define OPENCV_OLD_CXCORE_H
//#if defined(__GNUC__)
//#warning "This is a deprecated opencv header provided for compatibility. Please include a header from a corresponding opencv module"
diff --git a/lib/3rdParty/OpenCV3.1/include/opencv/cxcore.hpp b/lib/3rdParty/OpenCV3.4/include/opencv/cxcore.hpp
similarity index 97%
rename from lib/3rdParty/OpenCV3.1/include/opencv/cxcore.hpp
rename to lib/3rdParty/OpenCV3.4/include/opencv/cxcore.hpp
index 9af4ac74..c371677c 100644
--- a/lib/3rdParty/OpenCV3.1/include/opencv/cxcore.hpp
+++ b/lib/3rdParty/OpenCV3.4/include/opencv/cxcore.hpp
@@ -40,8 +40,8 @@
//
//M*/
-#ifndef __OPENCV_OLD_CXCORE_HPP__
-#define __OPENCV_OLD_CXCORE_HPP__
+#ifndef OPENCV_OLD_CXCORE_HPP
+#define OPENCV_OLD_CXCORE_HPP
//#if defined(__GNUC__)
//#warning "This is a deprecated opencv header provided for compatibility. Please include a header from a corresponding opencv module"
diff --git a/lib/3rdParty/OpenCV3.1/include/opencv/cxeigen.hpp b/lib/3rdParty/OpenCV3.4/include/opencv/cxeigen.hpp
similarity index 97%
rename from lib/3rdParty/OpenCV3.1/include/opencv/cxeigen.hpp
rename to lib/3rdParty/OpenCV3.4/include/opencv/cxeigen.hpp
index 1f04d1a3..1d3df914 100644
--- a/lib/3rdParty/OpenCV3.1/include/opencv/cxeigen.hpp
+++ b/lib/3rdParty/OpenCV3.4/include/opencv/cxeigen.hpp
@@ -40,8 +40,8 @@
//
//M*/
-#ifndef __OPENCV_OLD_EIGEN_HPP__
-#define __OPENCV_OLD_EIGEN_HPP__
+#ifndef OPENCV_OLD_EIGEN_HPP
+#define OPENCV_OLD_EIGEN_HPP
#include "opencv2/core/eigen.hpp"
diff --git a/lib/3rdParty/OpenCV3.1/include/opencv/cxmisc.h b/lib/3rdParty/OpenCV3.4/include/opencv/cxmisc.h
similarity index 53%
rename from lib/3rdParty/OpenCV3.1/include/opencv/cxmisc.h
rename to lib/3rdParty/OpenCV3.4/include/opencv/cxmisc.h
index 6c93a0cc..9b9bc820 100644
--- a/lib/3rdParty/OpenCV3.1/include/opencv/cxmisc.h
+++ b/lib/3rdParty/OpenCV3.4/include/opencv/cxmisc.h
@@ -1,5 +1,5 @@
-#ifndef __OPENCV_OLD_CXMISC_H__
-#define __OPENCV_OLD_CXMISC_H__
+#ifndef OPENCV_OLD_CXMISC_H
+#define OPENCV_OLD_CXMISC_H
#ifdef __cplusplus
# include "opencv2/core/utility.hpp"
diff --git a/lib/3rdParty/OpenCV3.1/include/opencv/highgui.h b/lib/3rdParty/OpenCV3.4/include/opencv/highgui.h
similarity index 97%
rename from lib/3rdParty/OpenCV3.1/include/opencv/highgui.h
rename to lib/3rdParty/OpenCV3.4/include/opencv/highgui.h
index 0261029c..69b394e0 100644
--- a/lib/3rdParty/OpenCV3.1/include/opencv/highgui.h
+++ b/lib/3rdParty/OpenCV3.4/include/opencv/highgui.h
@@ -39,8 +39,8 @@
//
//M*/
-#ifndef __OPENCV_OLD_HIGHGUI_H__
-#define __OPENCV_OLD_HIGHGUI_H__
+#ifndef OPENCV_OLD_HIGHGUI_H
+#define OPENCV_OLD_HIGHGUI_H
#include "opencv2/core/core_c.h"
#include "opencv2/highgui/highgui_c.h"
diff --git a/lib/3rdParty/OpenCV3.1/include/opencv/ml.h b/lib/3rdParty/OpenCV3.4/include/opencv/ml.h
similarity index 97%
rename from lib/3rdParty/OpenCV3.1/include/opencv/ml.h
rename to lib/3rdParty/OpenCV3.4/include/opencv/ml.h
index d8e967f8..0c376bac 100644
--- a/lib/3rdParty/OpenCV3.1/include/opencv/ml.h
+++ b/lib/3rdParty/OpenCV3.4/include/opencv/ml.h
@@ -38,8 +38,8 @@
//
//M*/
-#ifndef __OPENCV_OLD_ML_H__
-#define __OPENCV_OLD_ML_H__
+#ifndef OPENCV_OLD_ML_H
+#define OPENCV_OLD_ML_H
#include "opencv2/core/core_c.h"
#include "opencv2/ml.hpp"
diff --git a/lib/3rdParty/OpenCV3.1/include/opencv2/calib3d.hpp b/lib/3rdParty/OpenCV3.4/include/opencv2/calib3d.hpp
similarity index 82%
rename from lib/3rdParty/OpenCV3.1/include/opencv2/calib3d.hpp
rename to lib/3rdParty/OpenCV3.4/include/opencv2/calib3d.hpp
index ddffffec..74b63c6c 100644
--- a/lib/3rdParty/OpenCV3.1/include/opencv2/calib3d.hpp
+++ b/lib/3rdParty/OpenCV3.4/include/opencv2/calib3d.hpp
@@ -41,8 +41,8 @@
//
//M*/
-#ifndef __OPENCV_CALIB3D_HPP__
-#define __OPENCV_CALIB3D_HPP__
+#ifndef OPENCV_CALIB3D_HPP
+#define OPENCV_CALIB3D_HPP
#include "opencv2/core.hpp"
#include "opencv2/features2d.hpp"
@@ -96,6 +96,10 @@ u = f_x*x' + c_x \\
v = f_y*y' + c_y
\end{array}\f]
+The following figure illustrates the pinhole camera model.
+
+
+
Real lenses usually have some distortion, mostly radial distortion and slight tangential distortion.
So, the above model is extended as:
@@ -114,6 +118,10 @@ v = f_y*y'' + c_y
tangential distortion coefficients. \f$s_1\f$, \f$s_2\f$, \f$s_3\f$, and \f$s_4\f$, are the thin prism distortion
coefficients. Higher-order coefficients are not considered in OpenCV.
+The next figure shows two common types of radial distortion: barrel distortion (typically \f$ k_1 > 0 \f$ and pincushion distortion (typically \f$ k_1 < 0 \f$).
+
+
+
In some cases the image sensor may be tilted in order to focus an oblique plane in front of the
camera (Scheimpfug condition). This can be useful for particle image velocimetry (PIV) or
triangulation with a laser fan. The tilt causes a perspective distortion of \f$x''\f$ and
@@ -190,7 +198,7 @@ pattern (every view is described by several 3D-2D point correspondences).
\f[x = Xc_1 \\ y = Xc_2 \\ z = Xc_3\f]
- The pinehole projection coordinates of P is [a; b] where
+ The pinhole projection coordinates of P is [a; b] where
\f[a = x / z \ and \ b = y / z \\ r^2 = a^2 + b^2 \\ \theta = atan(r)\f]
@@ -200,12 +208,12 @@ pattern (every view is described by several 3D-2D point correspondences).
The distorted point coordinates are [x'; y'] where
- \f[x' = (\theta_d / r) x \\ y' = (\theta_d / r) y \f]
+ \f[x' = (\theta_d / r) a \\ y' = (\theta_d / r) b \f]
Finally, conversion into pixel coordinates: The final pixel coordinates vector [u; v] where:
\f[u = f_x (x' + \alpha y') + c_x \\
- v = f_y yy + c_y\f]
+ v = f_y y' + c_y\f]
@defgroup calib3d_c C API
@@ -228,8 +236,9 @@ enum { SOLVEPNP_ITERATIVE = 0,
SOLVEPNP_EPNP = 1, //!< EPnP: Efficient Perspective-n-Point Camera Pose Estimation @cite lepetit2009epnp
SOLVEPNP_P3P = 2, //!< Complete Solution Classification for the Perspective-Three-Point Problem @cite gao2003complete
SOLVEPNP_DLS = 3, //!< A Direct Least-Squares (DLS) Method for PnP @cite hesch2011direct
- SOLVEPNP_UPNP = 4 //!< Exhaustive Linearization for Robust Camera Pose and Focal Length Estimation @cite penate2013exhaustive
-
+ SOLVEPNP_UPNP = 4, //!< Exhaustive Linearization for Robust Camera Pose and Focal Length Estimation @cite penate2013exhaustive
+ SOLVEPNP_AP3P = 5, //!< An Efficient Algebraic Solution to the Perspective-Three-Point Problem @cite Ke17
+ SOLVEPNP_MAX_COUNT //!< Used for count
};
enum { CALIB_CB_ADAPTIVE_THRESH = 1,
@@ -259,6 +268,8 @@ enum { CALIB_USE_INTRINSIC_GUESS = 0x00001,
CALIB_FIX_S1_S2_S3_S4 = 0x10000,
CALIB_TILTED_MODEL = 0x40000,
CALIB_FIX_TAUX_TAUY = 0x80000,
+ CALIB_USE_QR = 0x100000, //!< use QR instead of SVD decomposition for solving. Faster but potentially less precise
+ CALIB_FIX_TANGENT_DIST = 0x200000,
// only for stereo
CALIB_FIX_INTRINSIC = 0x00100,
CALIB_SAME_FOCAL_LENGTH = 0x00200,
@@ -295,6 +306,12 @@ optimization procedures like calibrateCamera, stereoCalibrate, or solvePnP .
*/
CV_EXPORTS_W void Rodrigues( InputArray src, OutputArray dst, OutputArray jacobian = noArray() );
+/** @example pose_from_homography.cpp
+ An example program about pose estimation from coplanar points
+
+ Check @ref tutorial_homography "the corresponding tutorial" for more details
+ */
+
/** @brief Finds a perspective transformation between two planes.
@param srcPoints Coordinates of the points in the original plane, a matrix of the type CV_32FC2
@@ -316,7 +333,7 @@ mask values are ignored.
@param maxIters The maximum number of RANSAC iterations, 2000 is the maximum it can be.
@param confidence Confidence level, between 0 and 1.
-The functions find and return the perspective transformation \f$H\f$ between the source and the
+The function finds and returns the perspective transformation \f$H\f$ between the source and the
destination planes:
\f[s_i \vecthree{x'_i}{y'_i}{1} \sim H \vecthree{x_i}{y_i}{1}\f]
@@ -351,13 +368,8 @@ determined up to a scale. Thus, it is normalized so that \f$h_{33}=1\f$. Note th
cannot be estimated, an empty one will be returned.
@sa
- getAffineTransform, getPerspectiveTransform, estimateRigidTransform, warpPerspective,
- perspectiveTransform
-
-@note
- - A example on calculating a homography for image matching can be found at
- opencv_source_code/samples/cpp/video_homography.cpp
-
+getAffineTransform, estimateAffine2D, estimateAffinePartial2D, getPerspectiveTransform, warpPerspective,
+perspectiveTransform
*/
CV_EXPORTS_W Mat findHomography( InputArray srcPoints, InputArray dstPoints,
int method = 0, double ransacReprojThreshold = 3,
@@ -383,8 +395,8 @@ and a rotation matrix.
It optionally returns three rotation matrices, one for each axis, and the three Euler angles in
degrees (as the return value) that could be used in OpenGL. Note, there is always more than one
-sequence of rotations about the three principle axes that results in the same orientation of an
-object, eg. see @cite Slabaugh . Returned tree rotation matrices and corresponding three Euler angules
+sequence of rotations about the three principal axes that results in the same orientation of an
+object, e.g. see @cite Slabaugh . Returned tree rotation matrices and corresponding three Euler angles
are only one of the possible solutions.
*/
CV_EXPORTS_W Vec3d RQDecomp3x3( InputArray src, OutputArray mtxR, OutputArray mtxQ,
@@ -409,8 +421,8 @@ matrix and the position of a camera.
It optionally returns three rotation matrices, one for each axis, and three Euler angles that could
be used in OpenGL. Note, there is always more than one sequence of rotations about the three
-principle axes that results in the same orientation of an object, eg. see @cite Slabaugh . Returned
-tree rotation matrices and corresponding three Euler angules are only one of the possible solutions.
+principal axes that results in the same orientation of an object, e.g. see @cite Slabaugh . Returned
+tree rotation matrices and corresponding three Euler angles are only one of the possible solutions.
The function is based on RQDecomp3x3 .
*/
@@ -513,21 +525,27 @@ CV_EXPORTS_W void projectPoints( InputArray objectPoints,
OutputArray jacobian = noArray(),
double aspectRatio = 0 );
+/** @example homography_from_camera_displacement.cpp
+ An example program about homography from the camera displacement
+
+ Check @ref tutorial_homography "the corresponding tutorial" for more details
+ */
+
/** @brief Finds an object pose from 3D-2D point correspondences.
-@param objectPoints Array of object points in the object coordinate space, 3xN/Nx3 1-channel or
+@param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or
1xN/Nx1 3-channel, where N is the number of points. vector\ can be also passed here.
-@param imagePoints Array of corresponding image points, 2xN/Nx2 1-channel or 1xN/Nx1 2-channel,
+@param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
where N is the number of points. vector\ can be also passed here.
@param cameraMatrix Input camera matrix \f$A = \vecthreethree{fx}{0}{cx}{0}{fy}{cy}{0}{0}{1}\f$ .
@param distCoeffs Input vector of distortion coefficients
\f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6 [, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\f$ of
4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are
assumed.
-@param rvec Output rotation vector (see Rodrigues ) that, together with tvec , brings points from
+@param rvec Output rotation vector (see @ref Rodrigues ) that, together with tvec , brings points from
the model coordinate system to the camera coordinate system.
@param tvec Output translation vector.
-@param useExtrinsicGuess Parameter used for SOLVEPNP_ITERATIVE. If true (1), the function uses
+@param useExtrinsicGuess Parameter used for #SOLVEPNP_ITERATIVE. If true (1), the function uses
the provided rvec and tvec values as initial approximations of the rotation and translation
vectors, respectively, and further optimizes them.
@param flags Method for solving a PnP problem:
@@ -536,20 +554,116 @@ this case the function finds such a pose that minimizes reprojection error, that
of squared distances between the observed projections imagePoints and the projected (using
projectPoints ) objectPoints .
- **SOLVEPNP_P3P** Method is based on the paper of X.S. Gao, X.-R. Hou, J. Tang, H.-F. Chang
-"Complete Solution Classification for the Perspective-Three-Point Problem". In this case the
-function requires exactly four object and image points.
+"Complete Solution Classification for the Perspective-Three-Point Problem" (@cite gao2003complete).
+In this case the function requires exactly four object and image points.
+- **SOLVEPNP_AP3P** Method is based on the paper of T. Ke, S. Roumeliotis
+"An Efficient Algebraic Solution to the Perspective-Three-Point Problem" (@cite Ke17).
+In this case the function requires exactly four object and image points.
- **SOLVEPNP_EPNP** Method has been introduced by F.Moreno-Noguer, V.Lepetit and P.Fua in the
-paper "EPnP: Efficient Perspective-n-Point Camera Pose Estimation".
+paper "EPnP: Efficient Perspective-n-Point Camera Pose Estimation" (@cite lepetit2009epnp).
- **SOLVEPNP_DLS** Method is based on the paper of Joel A. Hesch and Stergios I. Roumeliotis.
-"A Direct Least-Squares (DLS) Method for PnP".
+"A Direct Least-Squares (DLS) Method for PnP" (@cite hesch2011direct).
- **SOLVEPNP_UPNP** Method is based on the paper of A.Penate-Sanchez, J.Andrade-Cetto,
F.Moreno-Noguer. "Exhaustive Linearization for Robust Camera Pose and Focal Length
-Estimation". In this case the function also estimates the parameters \f$f_x\f$ and \f$f_y\f$
+Estimation" (@cite penate2013exhaustive). In this case the function also estimates the parameters \f$f_x\f$ and \f$f_y\f$
assuming that both have the same value. Then the cameraMatrix is updated with the estimated
focal length.
+- **SOLVEPNP_AP3P** Method is based on the paper of Tong Ke and Stergios I. Roumeliotis.
+"An Efficient Algebraic Solution to the Perspective-Three-Point Problem" (@cite Ke17). In this case the
+function requires exactly four object and image points.
The function estimates the object pose given a set of object points, their corresponding image
-projections, as well as the camera matrix and the distortion coefficients.
+projections, as well as the camera matrix and the distortion coefficients, see the figure below
+(more precisely, the X-axis of the camera frame is pointing to the right, the Y-axis downward
+and the Z-axis forward).
+
+
+
+Points expressed in the world frame \f$ \bf{X}_w \f$ are projected into the image plane \f$ \left[ u, v \right] \f$
+using the perspective projection model \f$ \Pi \f$ and the camera intrinsic parameters matrix \f$ \bf{A} \f$:
+
+\f[
+ \begin{align*}
+ \begin{bmatrix}
+ u \\
+ v \\
+ 1
+ \end{bmatrix} &=
+ \bf{A} \hspace{0.1em} \Pi \hspace{0.2em} ^{c}\bf{M}_w
+ \begin{bmatrix}
+ X_{w} \\
+ Y_{w} \\
+ Z_{w} \\
+ 1
+ \end{bmatrix} \\
+ \begin{bmatrix}
+ u \\
+ v \\
+ 1
+ \end{bmatrix} &=
+ \begin{bmatrix}
+ f_x & 0 & c_x \\
+ 0 & f_y & c_y \\
+ 0 & 0 & 1
+ \end{bmatrix}
+ \begin{bmatrix}
+ 1 & 0 & 0 & 0 \\
+ 0 & 1 & 0 & 0 \\
+ 0 & 0 & 1 & 0
+ \end{bmatrix}
+ \begin{bmatrix}
+ r_{11} & r_{12} & r_{13} & t_x \\
+ r_{21} & r_{22} & r_{23} & t_y \\
+ r_{31} & r_{32} & r_{33} & t_z \\
+ 0 & 0 & 0 & 1
+ \end{bmatrix}
+ \begin{bmatrix}
+ X_{w} \\
+ Y_{w} \\
+ Z_{w} \\
+ 1
+ \end{bmatrix}
+ \end{align*}
+\f]
+
+The estimated pose is thus the rotation (`rvec`) and the translation (`tvec`) vectors that allow to transform
+a 3D point expressed in the world frame into the camera frame:
+
+\f[
+ \begin{align*}
+ \begin{bmatrix}
+ X_c \\
+ Y_c \\
+ Z_c \\
+ 1
+ \end{bmatrix} &=
+ \hspace{0.2em} ^{c}\bf{M}_w
+ \begin{bmatrix}
+ X_{w} \\
+ Y_{w} \\
+ Z_{w} \\
+ 1
+ \end{bmatrix} \\
+ \begin{bmatrix}
+ X_c \\
+ Y_c \\
+ Z_c \\
+ 1
+ \end{bmatrix} &=
+ \begin{bmatrix}
+ r_{11} & r_{12} & r_{13} & t_x \\
+ r_{21} & r_{22} & r_{23} & t_y \\
+ r_{31} & r_{32} & r_{33} & t_z \\
+ 0 & 0 & 0 & 1
+ \end{bmatrix}
+ \begin{bmatrix}
+ X_{w} \\
+ Y_{w} \\
+ Z_{w} \\
+ 1
+ \end{bmatrix}
+ \end{align*}
+\f]
@note
- An example of how to use solvePnP for planar augmented reality can be found at
@@ -564,6 +678,15 @@ projections, as well as the camera matrix and the distortion coefficients.
- Thus, given some data D = np.array(...) where D.shape = (N,M), in order to use a subset of
it as, e.g., imagePoints, one must effectively copy it into a new array: imagePoints =
np.ascontiguousarray(D[:,:2]).reshape((N,1,2))
+ - The methods **SOLVEPNP_DLS** and **SOLVEPNP_UPNP** cannot be used as the current implementations are
+ unstable and sometimes give completely wrong results. If you pass one of these two
+ flags, **SOLVEPNP_EPNP** method will be used instead.
+ - The minimum number of points is 4 in the general case. In the case of **SOLVEPNP_P3P** and **SOLVEPNP_AP3P**
+ methods, it is required to use exactly 4 points (the first 3 points are used to estimate all the solutions
+ of the P3P problem, the last one is used to retain the best solution that minimizes the reprojection error).
+ - With **SOLVEPNP_ITERATIVE** method and `useExtrinsicGuess=true`, the minimum number of points is 3 (3 points
+ are sufficient to compute a pose but there are up to 4 solutions). The initial solution should be close to the
+ global solution to converge.
*/
CV_EXPORTS_W bool solvePnP( InputArray objectPoints, InputArray imagePoints,
InputArray cameraMatrix, InputArray distCoeffs,
@@ -572,9 +695,9 @@ CV_EXPORTS_W bool solvePnP( InputArray objectPoints, InputArray imagePoints,
/** @brief Finds an object pose from 3D-2D point correspondences using the RANSAC scheme.
-@param objectPoints Array of object points in the object coordinate space, 3xN/Nx3 1-channel or
+@param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or
1xN/Nx1 3-channel, where N is the number of points. vector\ can be also passed here.
-@param imagePoints Array of corresponding image points, 2xN/Nx2 1-channel or 1xN/Nx1 2-channel,
+@param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
where N is the number of points. vector\ can be also passed here.
@param cameraMatrix Input camera matrix \f$A = \vecthreethree{fx}{0}{cx}{0}{fy}{cy}{0}{0}{1}\f$ .
@param distCoeffs Input vector of distortion coefficients
@@ -604,6 +727,13 @@ makes the function resistant to outliers.
@note
- An example of how to use solvePNPRansac for object detection can be found at
opencv_source_code/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/
+ - The default method used to estimate the camera pose for the Minimal Sample Sets step
+ is #SOLVEPNP_EPNP. Exceptions are:
+ - if you choose #SOLVEPNP_P3P or #SOLVEPNP_AP3P, these methods will be used.
+ - if the number of input points is equal to 4, #SOLVEPNP_P3P is used.
+ - The method used to estimate the camera pose using all the inliers is defined by the
+ flags parameters unless it is equal to #SOLVEPNP_P3P or #SOLVEPNP_AP3P. In this case,
+ the method #SOLVEPNP_EPNP will be used instead.
*/
CV_EXPORTS_W bool solvePnPRansac( InputArray objectPoints, InputArray imagePoints,
InputArray cameraMatrix, InputArray distCoeffs,
@@ -611,6 +741,33 @@ CV_EXPORTS_W bool solvePnPRansac( InputArray objectPoints, InputArray imagePoint
bool useExtrinsicGuess = false, int iterationsCount = 100,
float reprojectionError = 8.0, double confidence = 0.99,
OutputArray inliers = noArray(), int flags = SOLVEPNP_ITERATIVE );
+/** @brief Finds an object pose from 3 3D-2D point correspondences.
+
+@param objectPoints Array of object points in the object coordinate space, 3x3 1-channel or
+1x3/3x1 3-channel. vector\ can be also passed here.
+@param imagePoints Array of corresponding image points, 3x2 1-channel or 1x3/3x1 2-channel.
+ vector\ can be also passed here.
+@param cameraMatrix Input camera matrix \f$A = \vecthreethree{fx}{0}{cx}{0}{fy}{cy}{0}{0}{1}\f$ .
+@param distCoeffs Input vector of distortion coefficients
+\f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6 [, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\f$ of
+4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are
+assumed.
+@param rvecs Output rotation vectors (see Rodrigues ) that, together with tvecs , brings points from
+the model coordinate system to the camera coordinate system. A P3P problem has up to 4 solutions.
+@param tvecs Output translation vectors.
+@param flags Method for solving a P3P problem:
+- **SOLVEPNP_P3P** Method is based on the paper of X.S. Gao, X.-R. Hou, J. Tang, H.-F. Chang
+"Complete Solution Classification for the Perspective-Three-Point Problem" (@cite gao2003complete).
+- **SOLVEPNP_AP3P** Method is based on the paper of Tong Ke and Stergios I. Roumeliotis.
+"An Efficient Algebraic Solution to the Perspective-Three-Point Problem" (@cite Ke17).
+
+The function estimates the object pose given 3 object points, their corresponding image
+projections, as well as the camera matrix and the distortion coefficients.
+ */
+CV_EXPORTS_W int solveP3P( InputArray objectPoints, InputArray imagePoints,
+ InputArray cameraMatrix, InputArray distCoeffs,
+ OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs,
+ int flags );
/** @brief Finds an initial camera matrix from 3D-2D point correspondences.
@@ -638,11 +795,11 @@ CV_EXPORTS_W Mat initCameraMatrix2D( InputArrayOfArrays objectPoints,
( patternSize = cvSize(points_per_row,points_per_colum) = cvSize(columns,rows) ).
@param corners Output array of detected corners.
@param flags Various operation flags that can be zero or a combination of the following values:
-- **CV_CALIB_CB_ADAPTIVE_THRESH** Use adaptive thresholding to convert the image to black
+- **CALIB_CB_ADAPTIVE_THRESH** Use adaptive thresholding to convert the image to black
and white, rather than a fixed threshold level (computed from the average image brightness).
-- **CV_CALIB_CB_NORMALIZE_IMAGE** Normalize the image gamma with equalizeHist before
+- **CALIB_CB_NORMALIZE_IMAGE** Normalize the image gamma with equalizeHist before
applying fixed or adaptive thresholding.
-- **CV_CALIB_CB_FILTER_QUADS** Use additional criteria (like contour area, perimeter,
+- **CALIB_CB_FILTER_QUADS** Use additional criteria (like contour area, perimeter,
square-like shape) to filter out false quads extracted at the contour retrieval stage.
- **CALIB_CB_FAST_CHECK** Run a fast check on the image that looks for chessboard corners,
and shortcut the call if none is found. This can drastically speed up the call in the
@@ -701,6 +858,38 @@ found, or as colored corners connected with lines if the board was found.
CV_EXPORTS_W void drawChessboardCorners( InputOutputArray image, Size patternSize,
InputArray corners, bool patternWasFound );
+struct CV_EXPORTS_W_SIMPLE CirclesGridFinderParameters
+{
+ CV_WRAP CirclesGridFinderParameters();
+ CV_PROP_RW cv::Size2f densityNeighborhoodSize;
+ CV_PROP_RW float minDensity;
+ CV_PROP_RW int kmeansAttempts;
+ CV_PROP_RW int minDistanceToAddKeypoint;
+ CV_PROP_RW int keypointScale;
+ CV_PROP_RW float minGraphConfidence;
+ CV_PROP_RW float vertexGain;
+ CV_PROP_RW float vertexPenalty;
+ CV_PROP_RW float existingVertexGain;
+ CV_PROP_RW float edgeGain;
+ CV_PROP_RW float edgePenalty;
+ CV_PROP_RW float convexHullFactor;
+ CV_PROP_RW float minRNGEdgeSwitchDist;
+
+ enum GridType
+ {
+ SYMMETRIC_GRID, ASYMMETRIC_GRID
+ };
+ GridType gridType;
+};
+
+struct CV_EXPORTS_W_SIMPLE CirclesGridFinderParameters2 : public CirclesGridFinderParameters
+{
+ CV_WRAP CirclesGridFinderParameters2();
+
+ CV_PROP_RW float squareSize; //!< Distance between two adjacent points. Used by CALIB_CB_CLUSTERING.
+ CV_PROP_RW float maxRectifiedDistance; //!< Max deviation from predicion. Used by CALIB_CB_CLUSTERING.
+};
+
/** @brief Finds centers in the grid of circles.
@param image grid view of input circles; it must be an 8-bit grayscale or color image.
@@ -713,6 +902,7 @@ CV_EXPORTS_W void drawChessboardCorners( InputOutputArray image, Size patternSiz
- **CALIB_CB_CLUSTERING** uses a special algorithm for grid detection. It is more robust to
perspective distortions but much more sensitive to background clutter.
@param blobDetector feature detector that finds blobs like dark circles on light background.
+@param parameters struct for finding circles in a grid pattern.
The function attempts to determine whether the input image contains a grid of circles. If it is, the
function locates centers of the circles. The function returns a non-zero value if all of the centers
@@ -732,6 +922,18 @@ Sample usage of detecting and drawing the centers of circles: :
@note The function requires white space (like a square-thick border, the wider the better) around
the board to make the detection more robust in various environments.
*/
+CV_EXPORTS_W bool findCirclesGrid( InputArray image, Size patternSize,
+ OutputArray centers, int flags,
+ const Ptr &blobDetector,
+ CirclesGridFinderParameters parameters);
+
+/** @overload */
+CV_EXPORTS_W bool findCirclesGrid2( InputArray image, Size patternSize,
+ OutputArray centers, int flags,
+ const Ptr &blobDetector,
+ CirclesGridFinderParameters2 parameters);
+
+/** @overload */
CV_EXPORTS_W bool findCirclesGrid( InputArray image, Size patternSize,
OutputArray centers, int flags = CALIB_CB_SYMMETRIC_GRID,
const Ptr &blobDetector = SimpleBlobDetector::create());
@@ -756,7 +958,7 @@ together.
@param imageSize Size of the image used only to initialize the intrinsic camera matrix.
@param cameraMatrix Output 3x3 floating-point camera matrix
\f$A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$ . If CV\_CALIB\_USE\_INTRINSIC\_GUESS
-and/or CV_CALIB_FIX_ASPECT_RATIO are specified, some or all of fx, fy, cx, cy must be
+and/or CALIB_FIX_ASPECT_RATIO are specified, some or all of fx, fy, cx, cy must be
initialized before calling the function.
@param distCoeffs Output vector of distortion coefficients
\f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6 [, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\f$ of
@@ -767,25 +969,33 @@ k-th translation vector (see the next output parameter description) brings the c
from the model coordinate space (in which object points are specified) to the world coordinate
space, that is, a real position of the calibration pattern in the k-th pattern view (k=0.. *M* -1).
@param tvecs Output vector of translation vectors estimated for each pattern view.
+@param stdDeviationsIntrinsics Output vector of standard deviations estimated for intrinsic parameters.
+ Order of deviations values:
+\f$(f_x, f_y, c_x, c_y, k_1, k_2, p_1, p_2, k_3, k_4, k_5, k_6 , s_1, s_2, s_3,
+ s_4, \tau_x, \tau_y)\f$ If one of parameters is not estimated, it's deviation is equals to zero.
+@param stdDeviationsExtrinsics Output vector of standard deviations estimated for extrinsic parameters.
+ Order of deviations values: \f$(R_1, T_1, \dotsc , R_M, T_M)\f$ where M is number of pattern views,
+ \f$R_i, T_i\f$ are concatenated 1x3 vectors.
+ @param perViewErrors Output vector of the RMS re-projection error estimated for each pattern view.
@param flags Different flags that may be zero or a combination of the following values:
-- **CV_CALIB_USE_INTRINSIC_GUESS** cameraMatrix contains valid initial values of
+- **CALIB_USE_INTRINSIC_GUESS** cameraMatrix contains valid initial values of
fx, fy, cx, cy that are optimized further. Otherwise, (cx, cy) is initially set to the image
center ( imageSize is used), and focal distances are computed in a least-squares fashion.
Note, that if intrinsic parameters are known, there is no need to use this function just to
estimate extrinsic parameters. Use solvePnP instead.
-- **CV_CALIB_FIX_PRINCIPAL_POINT** The principal point is not changed during the global
+- **CALIB_FIX_PRINCIPAL_POINT** The principal point is not changed during the global
optimization. It stays at the center or at a different location specified when
-CV_CALIB_USE_INTRINSIC_GUESS is set too.
-- **CV_CALIB_FIX_ASPECT_RATIO** The functions considers only fy as a free parameter. The
+CALIB_USE_INTRINSIC_GUESS is set too.
+- **CALIB_FIX_ASPECT_RATIO** The functions considers only fy as a free parameter. The
ratio fx/fy stays the same as in the input cameraMatrix . When
-CV_CALIB_USE_INTRINSIC_GUESS is not set, the actual input values of fx and fy are
+CALIB_USE_INTRINSIC_GUESS is not set, the actual input values of fx and fy are
ignored, only their ratio is computed and used further.
-- **CV_CALIB_ZERO_TANGENT_DIST** Tangential distortion coefficients \f$(p_1, p_2)\f$ are set
+- **CALIB_ZERO_TANGENT_DIST** Tangential distortion coefficients \f$(p_1, p_2)\f$ are set
to zeros and stay zero.
-- **CV_CALIB_FIX_K1,...,CV_CALIB_FIX_K6** The corresponding radial distortion
-coefficient is not changed during the optimization. If CV_CALIB_USE_INTRINSIC_GUESS is
+- **CALIB_FIX_K1,...,CALIB_FIX_K6** The corresponding radial distortion
+coefficient is not changed during the optimization. If CALIB_USE_INTRINSIC_GUESS is
set, the coefficient from the supplied distCoeffs matrix is used. Otherwise, it is set to 0.
-- **CV_CALIB_RATIONAL_MODEL** Coefficients k4, k5, and k6 are enabled. To provide the
+- **CALIB_RATIONAL_MODEL** Coefficients k4, k5, and k6 are enabled. To provide the
backward compatibility, this extra flag should be explicitly specified to make the
calibration function use the rational model and return 8 coefficients. If the flag is not
set, the function computes and returns only 5 distortion coefficients.
@@ -794,24 +1004,26 @@ backward compatibility, this extra flag should be explicitly specified to make t
calibration function use the thin prism model and return 12 coefficients. If the flag is not
set, the function computes and returns only 5 distortion coefficients.
- **CALIB_FIX_S1_S2_S3_S4** The thin prism distortion coefficients are not changed during
-the optimization. If CV_CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the
+the optimization. If CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the
supplied distCoeffs matrix is used. Otherwise, it is set to 0.
- **CALIB_TILTED_MODEL** Coefficients tauX and tauY are enabled. To provide the
backward compatibility, this extra flag should be explicitly specified to make the
calibration function use the tilted sensor model and return 14 coefficients. If the flag is not
set, the function computes and returns only 5 distortion coefficients.
- **CALIB_FIX_TAUX_TAUY** The coefficients of the tilted sensor model are not changed during
-the optimization. If CV_CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the
+the optimization. If CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the
supplied distCoeffs matrix is used. Otherwise, it is set to 0.
@param criteria Termination criteria for the iterative optimization algorithm.
+@return the overall RMS re-projection error.
+
The function estimates the intrinsic camera parameters and extrinsic parameters for each of the
views. The algorithm is based on @cite Zhang2000 and @cite BouguetMCT . The coordinates of 3D object
points and their corresponding 2D projections in each view must be specified. That may be achieved
by using an object with a known geometry and easily detectable feature points. Such an object is
called a calibration rig or calibration pattern, and OpenCV has built-in support for a chessboard as
a calibration rig (see findChessboardCorners ). Currently, initialization of intrinsic parameters
-(when CV_CALIB_USE_INTRINSIC_GUESS is not set) is only implemented for planar calibration
+(when CALIB_USE_INTRINSIC_GUESS is not set) is only implemented for planar calibration
patterns (where Z-coordinates of the object points must be all zeros). 3D calibration rigs can also
be used as long as initial cameraMatrix is provided.
@@ -819,7 +1031,7 @@ The algorithm performs the following steps:
- Compute the initial intrinsic parameters (the option only available for planar calibration
patterns) or read them from the input parameters. The distortion coefficients are all set to
- zeros initially unless some of CV_CALIB_FIX_K? are specified.
+ zeros initially unless some of CALIB_FIX_K? are specified.
- Estimate the initial camera pose as if the intrinsic parameters have been already known. This is
done using solvePnP .
@@ -829,8 +1041,6 @@ The algorithm performs the following steps:
the projected (using the current estimates for camera parameters and the poses) object points
objectPoints. See projectPoints for details.
-The function returns the final re-projection error.
-
@note
If you use a non-square (=non-NxN) grid and findChessboardCorners for calibration, and
calibrateCamera returns bad values (zero distortion coefficients, an image center very far from
@@ -841,6 +1051,24 @@ The function returns the final re-projection error.
@sa
findChessboardCorners, solvePnP, initCameraMatrix2D, stereoCalibrate, undistort
*/
+CV_EXPORTS_AS(calibrateCameraExtended) double calibrateCamera( InputArrayOfArrays objectPoints,
+ InputArrayOfArrays imagePoints, Size imageSize,
+ InputOutputArray cameraMatrix, InputOutputArray distCoeffs,
+ OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs,
+ OutputArray stdDeviationsIntrinsics,
+ OutputArray stdDeviationsExtrinsics,
+ OutputArray perViewErrors,
+ int flags = 0, TermCriteria criteria = TermCriteria(
+ TermCriteria::COUNT + TermCriteria::EPS, 30, DBL_EPSILON) );
+
+/** @overload double calibrateCamera( InputArrayOfArrays objectPoints,
+ InputArrayOfArrays imagePoints, Size imageSize,
+ InputOutputArray cameraMatrix, InputOutputArray distCoeffs,
+ OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs,
+ OutputArray stdDeviations, OutputArray perViewErrors,
+ int flags = 0, TermCriteria criteria = TermCriteria(
+ TermCriteria::COUNT + TermCriteria::EPS, 30, DBL_EPSILON) )
+ */
CV_EXPORTS_W double calibrateCamera( InputArrayOfArrays objectPoints,
InputArrayOfArrays imagePoints, Size imageSize,
InputOutputArray cameraMatrix, InputOutputArray distCoeffs,
@@ -883,8 +1111,8 @@ observed by the first camera.
observed by the second camera.
@param cameraMatrix1 Input/output first camera matrix:
\f$\vecthreethree{f_x^{(j)}}{0}{c_x^{(j)}}{0}{f_y^{(j)}}{c_y^{(j)}}{0}{0}{1}\f$ , \f$j = 0,\, 1\f$ . If
-any of CV_CALIB_USE_INTRINSIC_GUESS , CV_CALIB_FIX_ASPECT_RATIO ,
-CV_CALIB_FIX_INTRINSIC , or CV_CALIB_FIX_FOCAL_LENGTH are specified, some or all of the
+any of CALIB_USE_INTRINSIC_GUESS , CALIB_FIX_ASPECT_RATIO ,
+CALIB_FIX_INTRINSIC , or CALIB_FIX_FOCAL_LENGTH are specified, some or all of the
matrix components must be initialized. See the flags description for details.
@param distCoeffs1 Input/output vector of distortion coefficients
\f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6 [, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\f$ of
@@ -898,21 +1126,21 @@ is similar to distCoeffs1 .
@param E Output essential matrix.
@param F Output fundamental matrix.
@param flags Different flags that may be zero or a combination of the following values:
-- **CV_CALIB_FIX_INTRINSIC** Fix cameraMatrix? and distCoeffs? so that only R, T, E , and F
+- **CALIB_FIX_INTRINSIC** Fix cameraMatrix? and distCoeffs? so that only R, T, E , and F
matrices are estimated.
-- **CV_CALIB_USE_INTRINSIC_GUESS** Optimize some or all of the intrinsic parameters
+- **CALIB_USE_INTRINSIC_GUESS** Optimize some or all of the intrinsic parameters
according to the specified flags. Initial values are provided by the user.
-- **CV_CALIB_FIX_PRINCIPAL_POINT** Fix the principal points during the optimization.
-- **CV_CALIB_FIX_FOCAL_LENGTH** Fix \f$f^{(j)}_x\f$ and \f$f^{(j)}_y\f$ .
-- **CV_CALIB_FIX_ASPECT_RATIO** Optimize \f$f^{(j)}_y\f$ . Fix the ratio \f$f^{(j)}_x/f^{(j)}_y\f$
+- **CALIB_FIX_PRINCIPAL_POINT** Fix the principal points during the optimization.
+- **CALIB_FIX_FOCAL_LENGTH** Fix \f$f^{(j)}_x\f$ and \f$f^{(j)}_y\f$ .
+- **CALIB_FIX_ASPECT_RATIO** Optimize \f$f^{(j)}_y\f$ . Fix the ratio \f$f^{(j)}_x/f^{(j)}_y\f$
.
-- **CV_CALIB_SAME_FOCAL_LENGTH** Enforce \f$f^{(0)}_x=f^{(1)}_x\f$ and \f$f^{(0)}_y=f^{(1)}_y\f$ .
-- **CV_CALIB_ZERO_TANGENT_DIST** Set tangential distortion coefficients for each camera to
+- **CALIB_SAME_FOCAL_LENGTH** Enforce \f$f^{(0)}_x=f^{(1)}_x\f$ and \f$f^{(0)}_y=f^{(1)}_y\f$ .
+- **CALIB_ZERO_TANGENT_DIST** Set tangential distortion coefficients for each camera to
zeros and fix there.
-- **CV_CALIB_FIX_K1,...,CV_CALIB_FIX_K6** Do not change the corresponding radial
-distortion coefficient during the optimization. If CV_CALIB_USE_INTRINSIC_GUESS is set,
+- **CALIB_FIX_K1,...,CALIB_FIX_K6** Do not change the corresponding radial
+distortion coefficient during the optimization. If CALIB_USE_INTRINSIC_GUESS is set,
the coefficient from the supplied distCoeffs matrix is used. Otherwise, it is set to 0.
-- **CV_CALIB_RATIONAL_MODEL** Enable coefficients k4, k5, and k6. To provide the backward
+- **CALIB_RATIONAL_MODEL** Enable coefficients k4, k5, and k6. To provide the backward
compatibility, this extra flag should be explicitly specified to make the calibration
function use the rational model and return 8 coefficients. If the flag is not set, the
function computes and returns only 5 distortion coefficients.
@@ -921,14 +1149,14 @@ backward compatibility, this extra flag should be explicitly specified to make t
calibration function use the thin prism model and return 12 coefficients. If the flag is not
set, the function computes and returns only 5 distortion coefficients.
- **CALIB_FIX_S1_S2_S3_S4** The thin prism distortion coefficients are not changed during
-the optimization. If CV_CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the
+the optimization. If CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the
supplied distCoeffs matrix is used. Otherwise, it is set to 0.
- **CALIB_TILTED_MODEL** Coefficients tauX and tauY are enabled. To provide the
backward compatibility, this extra flag should be explicitly specified to make the
calibration function use the tilted sensor model and return 14 coefficients. If the flag is not
set, the function computes and returns only 5 distortion coefficients.
- **CALIB_FIX_TAUX_TAUY** The coefficients of the tilted sensor model are not changed during
-the optimization. If CV_CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the
+the optimization. If CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the
supplied distCoeffs matrix is used. Otherwise, it is set to 0.
@param criteria Termination criteria for the iterative optimization algorithm.
@@ -940,8 +1168,8 @@ This means that, given ( \f$R_1\f$,\f$T_1\f$ ), it should be possible to compute
need to know the position and orientation of the second camera relative to the first camera. This is
what the described function does. It computes ( \f$R\f$,\f$T\f$ ) so that:
-\f[R_2=R*R_1
-T_2=R*T_1 + T,\f]
+\f[R_2=R*R_1\f]
+\f[T_2=R*T_1 + T,\f]
Optionally, it computes the essential matrix E:
@@ -956,10 +1184,10 @@ Besides the stereo-related information, the function can also perform a full cal
two cameras. However, due to the high dimensionality of the parameter space and noise in the input
data, the function can diverge from the correct solution. If the intrinsic parameters can be
estimated with high accuracy for each of the cameras individually (for example, using
-calibrateCamera ), you are recommended to do so and then pass CV_CALIB_FIX_INTRINSIC flag to the
+calibrateCamera ), you are recommended to do so and then pass CALIB_FIX_INTRINSIC flag to the
function along with the computed intrinsic parameters. Otherwise, if all the parameters are
estimated at once, it makes sense to restrict some parameters, for example, pass
-CV_CALIB_SAME_FOCAL_LENGTH and CV_CALIB_ZERO_TANGENT_DIST flags, which is usually a
+CALIB_SAME_FOCAL_LENGTH and CALIB_ZERO_TANGENT_DIST flags, which is usually a
reasonable assumption.
Similarly to calibrateCamera , the function minimizes the total re-projection error for all the
@@ -991,7 +1219,7 @@ camera.
@param P2 Output 3x4 projection matrix in the new (rectified) coordinate systems for the second
camera.
@param Q Output \f$4 \times 4\f$ disparity-to-depth mapping matrix (see reprojectImageTo3D ).
-@param flags Operation flags that may be zero or CV_CALIB_ZERO_DISPARITY . If the flag is set,
+@param flags Operation flags that may be zero or CALIB_ZERO_DISPARITY . If the flag is set,
the function makes the principal points of each camera have the same pixel coordinates in the
rectified views. And if the flag is not set, the function may still shift the images in the
horizontal or vertical direction (depending on the orientation of epipolar lines) to maximize the
@@ -1030,7 +1258,7 @@ coordinates. The function distinguishes the following two cases:
\f[\texttt{P2} = \begin{bmatrix} f & 0 & cx_2 & T_x*f \\ 0 & f & cy & 0 \\ 0 & 0 & 1 & 0 \end{bmatrix} ,\f]
where \f$T_x\f$ is a horizontal shift between the cameras and \f$cx_1=cx_2\f$ if
- CV_CALIB_ZERO_DISPARITY is set.
+ CALIB_ZERO_DISPARITY is set.
- **Vertical stereo**: the first and the second camera views are shifted relative to each other
mainly in vertical direction (and probably a bit in the horizontal direction too). The epipolar
@@ -1076,7 +1304,7 @@ findFundamentalMat .
@param threshold Optional threshold used to filter out the outliers. If the parameter is greater
than zero, all the point pairs that do not comply with the epipolar geometry (that is, the points
for which \f$|\texttt{points2[i]}^T*\texttt{F}*\texttt{points1[i]}|>\texttt{threshold}\f$ ) are
-rejected prior to computing the homographies. Otherwise,all the points are considered inliers.
+rejected prior to computing the homographies. Otherwise, all the points are considered inliers.
The function computes the rectification transformations without knowing intrinsic parameters of the
cameras and their relative position in the space, which explains the suffix "uncalibrated". Another
@@ -1120,7 +1348,7 @@ assumed.
@param alpha Free scaling parameter between 0 (when all the pixels in the undistorted image are
valid) and 1 (when all the source image pixels are retained in the undistorted image). See
stereoRectify for details.
-@param newImgSize Image size after rectification. By default,it is set to imageSize .
+@param newImgSize Image size after rectification. By default, it is set to imageSize .
@param validPixROI Optional output rectangle that outlines all-good-pixels region in the
undistorted image. See roi1, roi2 description in stereoRectify .
@param centerPrincipalPoint Optional flag that indicates whether in the new camera matrix the
@@ -1131,7 +1359,7 @@ best fit a subset of the source image (determined by alpha) to the corrected ima
The function computes and returns the optimal new camera matrix based on the free scaling parameter.
By varying this parameter, you may retrieve only sensible pixels alpha=0 , keep all the original
image pixels if there is valuable information in the corners alpha=1 , or get something in between.
-When alpha\>0 , the undistortion result is likely to have some black pixels corresponding to
+When alpha\>0 , the undistorted result is likely to have some black pixels corresponding to
"virtual" pixels outside of the captured distorted image. The original camera matrix, distortion
coefficients, the computed new camera matrix, and newImageSize should be passed to
initUndistortRectifyMap to produce the maps for remap .
@@ -1242,15 +1470,15 @@ be floating-point (single or double precision).
@param cameraMatrix Camera matrix \f$K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$ .
Note that this function assumes that points1 and points2 are feature points from cameras with the
same camera matrix.
-@param method Method for computing a fundamental matrix.
+@param method Method for computing an essential matrix.
- **RANSAC** for the RANSAC algorithm.
-- **MEDS** for the LMedS algorithm.
+- **LMEDS** for the LMedS algorithm.
+@param prob Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of
+confidence (probability) that the estimated matrix is correct.
@param threshold Parameter used for RANSAC. It is the maximum distance from a point to an epipolar
line in pixels, beyond which the point is considered an outlier and is not used for computing the
final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
point localization, image resolution, and the image noise.
-@param prob Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of
-confidence (probability) that the estimated matrix is correct.
@param mask Output array of N elements, every element of which is set to 0 for outliers and to 1
for the other points. The array is computed only in the RANSAC and LMedS methods.
@@ -1273,8 +1501,8 @@ CV_EXPORTS_W Mat findEssentialMat( InputArray points1, InputArray points2,
be floating-point (single or double precision).
@param points2 Array of the second image points of the same size and format as points1 .
@param focal focal length of the camera. Note that this function assumes that points1 and points2
-are feature points from cameras with same focal length and principle point.
-@param pp principle point of the camera.
+are feature points from cameras with same focal length and principal point.
+@param pp principal point of the camera.
@param method Method for computing a fundamental matrix.
- **RANSAC** for the RANSAC algorithm.
- **LMEDS** for the LMedS algorithm.
@@ -1327,7 +1555,7 @@ floating-point (single or double precision).
Note that this function assumes that points1 and points2 are feature points from cameras with the
same camera matrix.
@param R Recovered relative rotation.
-@param t Recoverd relative translation.
+@param t Recovered relative translation.
@param mask Input/output mask for inliers in points1 and points2.
: If it is not empty, then it marks inliers in points1 and points2 for then given essential
matrix E. Only these inliers will be used to recover pose. In the output mask only inliers
@@ -1370,10 +1598,10 @@ CV_EXPORTS_W int recoverPose( InputArray E, InputArray points1, InputArray point
floating-point (single or double precision).
@param points2 Array of the second image points of the same size and format as points1 .
@param R Recovered relative rotation.
-@param t Recoverd relative translation.
+@param t Recovered relative translation.
@param focal Focal length of the camera. Note that this function assumes that points1 and points2
-are feature points from cameras with same focal length and principle point.
-@param pp Principle point of the camera.
+are feature points from cameras with same focal length and principal point.
+@param pp principal point of the camera.
@param mask Input/output mask for inliers in points1 and points2.
: If it is not empty, then it marks inliers in points1 and points2 for then given essential
matrix E. Only these inliers will be used to recover pose. In the output mask only inliers
@@ -1394,6 +1622,28 @@ CV_EXPORTS_W int recoverPose( InputArray E, InputArray points1, InputArray point
double focal = 1.0, Point2d pp = Point2d(0, 0),
InputOutputArray mask = noArray() );
+/** @overload
+@param E The input essential matrix.
+@param points1 Array of N 2D points from the first image. The point coordinates should be
+floating-point (single or double precision).
+@param points2 Array of the second image points of the same size and format as points1.
+@param cameraMatrix Camera matrix \f$K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$ .
+Note that this function assumes that points1 and points2 are feature points from cameras with the
+same camera matrix.
+@param R Recovered relative rotation.
+@param t Recovered relative translation.
+@param distanceThresh threshold distance which is used to filter out far away points (i.e. infinite points).
+@param mask Input/output mask for inliers in points1 and points2.
+: If it is not empty, then it marks inliers in points1 and points2 for then given essential
+matrix E. Only these inliers will be used to recover pose. In the output mask only inliers
+which pass the cheirality check.
+@param triangulatedPoints 3d points which were reconstructed by triangulation.
+ */
+
+CV_EXPORTS_W int recoverPose( InputArray E, InputArray points1, InputArray points2,
+ InputArray cameraMatrix, OutputArray R, OutputArray t, double distanceThresh, InputOutputArray mask = noArray(),
+ OutputArray triangulatedPoints = noArray());
+
/** @brief For points in an image of a stereo pair, computes the corresponding epilines in the other image.
@param points Input points. \f$N \times 1\f$ or \f$1 \times N\f$ matrix of type CV_32FC2 or
@@ -1504,7 +1754,7 @@ to 3D points with a very large Z value (currently set to 10000).
depth. ddepth can also be set to CV_16S, CV_32S or CV_32F.
The function transforms a single-channel disparity map to a 3-channel image representing a 3D
-surface. That is, for each pixel (x,y) andthe corresponding disparity d=disparity(x,y) , it
+surface. That is, for each pixel (x,y) and the corresponding disparity d=disparity(x,y) , it
computes:
\f[\begin{array}{l} [X \; Y \; Z \; W]^T = \texttt{Q} *[x \; y \; \texttt{disparity} (x,y) \; 1]^T \\ \texttt{\_3dImage} (x,y) = (X/W, \; Y/W, \; Z/W) \end{array}\f]
@@ -1548,6 +1798,99 @@ CV_EXPORTS_W int estimateAffine3D(InputArray src, InputArray dst,
OutputArray out, OutputArray inliers,
double ransacThreshold = 3, double confidence = 0.99);
+/** @brief Computes an optimal affine transformation between two 2D point sets.
+
+@param from First input 2D point set.
+@param to Second input 2D point set.
+@param inliers Output vector indicating which points are inliers.
+@param method Robust method used to compute transformation. The following methods are possible:
+- cv::RANSAC - RANSAC-based robust method
+- cv::LMEDS - Least-Median robust method
+RANSAC is the default method.
+@param ransacReprojThreshold Maximum reprojection error in the RANSAC algorithm to consider
+a point as an inlier. Applies only to RANSAC.
+@param maxIters The maximum number of robust method iterations, 2000 is the maximum it can be.
+@param confidence Confidence level, between 0 and 1, for the estimated transformation. Anything
+between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
+significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
+@param refineIters Maximum number of iterations of refining algorithm (Levenberg-Marquardt).
+Passing 0 will disable refining, so the output matrix will be output of robust method.
+
+@return Output 2D affine transformation matrix \f$2 \times 3\f$ or empty matrix if transformation
+could not be estimated.
+
+The function estimates an optimal 2D affine transformation between two 2D point sets using the
+selected robust algorithm.
+
+The computed transformation is then refined further (using only inliers) with the
+Levenberg-Marquardt method to reduce the re-projection error even more.
+
+@note
+The RANSAC method can handle practically any ratio of outliers but need a threshold to
+distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
+correctly only when there are more than 50% of inliers.
+
+@sa estimateAffinePartial2D, getAffineTransform
+*/
+CV_EXPORTS_W cv::Mat estimateAffine2D(InputArray from, InputArray to, OutputArray inliers = noArray(),
+ int method = RANSAC, double ransacReprojThreshold = 3,
+ size_t maxIters = 2000, double confidence = 0.99,
+ size_t refineIters = 10);
+
+/** @brief Computes an optimal limited affine transformation with 4 degrees of freedom between
+two 2D point sets.
+
+@param from First input 2D point set.
+@param to Second input 2D point set.
+@param inliers Output vector indicating which points are inliers.
+@param method Robust method used to compute transformation. The following methods are possible:
+- cv::RANSAC - RANSAC-based robust method
+- cv::LMEDS - Least-Median robust method
+RANSAC is the default method.
+@param ransacReprojThreshold Maximum reprojection error in the RANSAC algorithm to consider
+a point as an inlier. Applies only to RANSAC.
+@param maxIters The maximum number of robust method iterations, 2000 is the maximum it can be.
+@param confidence Confidence level, between 0 and 1, for the estimated transformation. Anything
+between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
+significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
+@param refineIters Maximum number of iterations of refining algorithm (Levenberg-Marquardt).
+Passing 0 will disable refining, so the output matrix will be output of robust method.
+
+@return Output 2D affine transformation (4 degrees of freedom) matrix \f$2 \times 3\f$ or
+empty matrix if transformation could not be estimated.
+
+The function estimates an optimal 2D affine transformation with 4 degrees of freedom limited to
+combinations of translation, rotation, and uniform scaling. Uses the selected algorithm for robust
+estimation.
+
+The computed transformation is then refined further (using only inliers) with the
+Levenberg-Marquardt method to reduce the re-projection error even more.
+
+Estimated transformation matrix is:
+\f[ \begin{bmatrix} \cos(\theta)s & -\sin(\theta)s & tx \\
+ \sin(\theta)s & \cos(\theta)s & ty
+\end{bmatrix} \f]
+Where \f$ \theta \f$ is the rotation angle, \f$ s \f$ the scaling factor and \f$ tx, ty \f$ are
+translations in \f$ x, y \f$ axes respectively.
+
+@note
+The RANSAC method can handle practically any ratio of outliers but need a threshold to
+distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
+correctly only when there are more than 50% of inliers.
+
+@sa estimateAffine2D, getAffineTransform
+*/
+CV_EXPORTS_W cv::Mat estimateAffinePartial2D(InputArray from, InputArray to, OutputArray inliers = noArray(),
+ int method = RANSAC, double ransacReprojThreshold = 3,
+ size_t maxIters = 2000, double confidence = 0.99,
+ size_t refineIters = 10);
+
+/** @example decompose_homography.cpp
+ An example program with homography decomposition.
+
+ Check @ref tutorial_homography "the corresponding tutorial" for more details.
+ */
+
/** @brief Decompose a homography matrix to rotation(s), translation(s) and plane normal(s).
@param H The input homography matrix between two images.
@@ -1683,7 +2026,8 @@ public:
{
MODE_SGBM = 0,
MODE_HH = 1,
- MODE_SGBM_3WAY = 2
+ MODE_SGBM_3WAY = 2,
+ MODE_HH4 = 3
};
CV_WRAP virtual int getPreFilterCap() const = 0;
@@ -1738,7 +2082,7 @@ public:
set StereoSGBM::numDisparities at minimum. The second constructor enables you to set each parameter
to a custom value.
*/
- CV_WRAP static Ptr create(int minDisparity, int numDisparities, int blockSize,
+ CV_WRAP static Ptr create(int minDisparity = 0, int numDisparities = 16, int blockSize = 3,
int P1 = 0, int P2 = 0, int disp12MaxDiff = 0,
int preFilterCap = 0, int uniquenessRatio = 0,
int speckleWindowSize = 0, int speckleRange = 0,
@@ -1756,15 +2100,16 @@ namespace fisheye
//! @{
enum{
- CALIB_USE_INTRINSIC_GUESS = 1,
- CALIB_RECOMPUTE_EXTRINSIC = 2,
- CALIB_CHECK_COND = 4,
- CALIB_FIX_SKEW = 8,
- CALIB_FIX_K1 = 16,
- CALIB_FIX_K2 = 32,
- CALIB_FIX_K3 = 64,
- CALIB_FIX_K4 = 128,
- CALIB_FIX_INTRINSIC = 256
+ CALIB_USE_INTRINSIC_GUESS = 1 << 0,
+ CALIB_RECOMPUTE_EXTRINSIC = 1 << 1,
+ CALIB_CHECK_COND = 1 << 2,
+ CALIB_FIX_SKEW = 1 << 3,
+ CALIB_FIX_K1 = 1 << 4,
+ CALIB_FIX_K2 = 1 << 5,
+ CALIB_FIX_K3 = 1 << 6,
+ CALIB_FIX_K4 = 1 << 7,
+ CALIB_FIX_INTRINSIC = 1 << 8,
+ CALIB_FIX_PRINCIPAL_POINT = 1 << 9
};
/** @brief Projects points using fisheye model
@@ -1802,6 +2147,10 @@ namespace fisheye
@param D Input vector of distortion coefficients \f$(k_1, k_2, k_3, k_4)\f$.
@param alpha The skew coefficient.
@param distorted Output array of image points, 1xN/Nx1 2-channel, or vector\ .
+
+ Note that the function assumes the camera matrix of the undistorted points to be identity.
+ This means if you want to transform back points undistorted with undistortPoints() you have to
+ multiply them with \f$P^{-1}\f$.
*/
CV_EXPORTS_W void distortPoints(InputArray undistorted, OutputArray distorted, InputArray K, InputArray D, double alpha = 0);
@@ -1910,8 +2259,10 @@ namespace fisheye
of intrinsic optimization.
- **fisheye::CALIB_CHECK_COND** The functions will check validity of condition number.
- **fisheye::CALIB_FIX_SKEW** Skew coefficient (alpha) is set to zero and stay zero.
- - **fisheye::CALIB_FIX_K1..4** Selected distortion coefficients are set to zeros and stay
- zero.
+ - **fisheye::CALIB_FIX_K1..fisheye::CALIB_FIX_K4** Selected distortion coefficients
+ are set to zeros and stay zero.
+ - **fisheye::CALIB_FIX_PRINCIPAL_POINT** The principal point is not changed during the global
+optimization. It stays at the center or at a different location specified when CALIB_USE_INTRINSIC_GUESS is set too.
@param criteria Termination criteria for the iterative optimization algorithm.
*/
CV_EXPORTS_W double calibrate(InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints, const Size& image_size,
@@ -1935,7 +2286,7 @@ namespace fisheye
@param P2 Output 3x4 projection matrix in the new (rectified) coordinate systems for the second
camera.
@param Q Output \f$4 \times 4\f$ disparity-to-depth mapping matrix (see reprojectImageTo3D ).
- @param flags Operation flags that may be zero or CV_CALIB_ZERO_DISPARITY . If the flag is set,
+ @param flags Operation flags that may be zero or CALIB_ZERO_DISPARITY . If the flag is set,
the function makes the principal points of each camera have the same pixel coordinates in the
rectified views. And if the flag is not set, the function may still shift the images in the
horizontal or vertical direction (depending on the orientation of epipolar lines) to maximize the
@@ -1961,7 +2312,7 @@ namespace fisheye
observed by the second camera.
@param K1 Input/output first camera matrix:
\f$\vecthreethree{f_x^{(j)}}{0}{c_x^{(j)}}{0}{f_y^{(j)}}{c_y^{(j)}}{0}{0}{1}\f$ , \f$j = 0,\, 1\f$ . If
- any of fisheye::CALIB_USE_INTRINSIC_GUESS , fisheye::CV_CALIB_FIX_INTRINSIC are specified,
+ any of fisheye::CALIB_USE_INTRINSIC_GUESS , fisheye::CALIB_FIX_INTRINSIC are specified,
some or all of the matrix components must be initialized.
@param D1 Input/output vector of distortion coefficients \f$(k_1, k_2, k_3, k_4)\f$ of 4 elements.
@param K2 Input/output second camera matrix. The parameter is similar to K1 .
@@ -1971,7 +2322,7 @@ namespace fisheye
@param R Output rotation matrix between the 1st and the 2nd camera coordinate systems.
@param T Output translation vector between the coordinate systems of the cameras.
@param flags Different flags that may be zero or a combination of the following values:
- - **fisheye::CV_CALIB_FIX_INTRINSIC** Fix K1, K2? and D1, D2? so that only R, T matrices
+ - **fisheye::CALIB_FIX_INTRINSIC** Fix K1, K2? and D1, D2? so that only R, T matrices
are estimated.
- **fisheye::CALIB_USE_INTRINSIC_GUESS** K1, K2 contains valid initial values of
fx, fy, cx, cy that are optimized further. Otherwise, (cx, cy) is initially set to the image
diff --git a/lib/3rdParty/OpenCV3.1/include/opencv2/calib3d/calib3d.hpp b/lib/3rdParty/OpenCV3.4/include/opencv2/calib3d/calib3d.hpp
similarity index 100%
rename from lib/3rdParty/OpenCV3.1/include/opencv2/calib3d/calib3d.hpp
rename to lib/3rdParty/OpenCV3.4/include/opencv2/calib3d/calib3d.hpp
diff --git a/lib/3rdParty/OpenCV3.1/include/opencv2/calib3d/calib3d_c.h b/lib/3rdParty/OpenCV3.4/include/opencv2/calib3d/calib3d_c.h
similarity index 99%
rename from lib/3rdParty/OpenCV3.1/include/opencv2/calib3d/calib3d_c.h
rename to lib/3rdParty/OpenCV3.4/include/opencv2/calib3d/calib3d_c.h
index 0e77aa88..8ec6390d 100644
--- a/lib/3rdParty/OpenCV3.1/include/opencv2/calib3d/calib3d_c.h
+++ b/lib/3rdParty/OpenCV3.4/include/opencv2/calib3d/calib3d_c.h
@@ -41,8 +41,8 @@
//
//M*/
-#ifndef __OPENCV_CALIB3D_C_H__
-#define __OPENCV_CALIB3D_C_H__
+#ifndef OPENCV_CALIB3D_C_H
+#define OPENCV_CALIB3D_C_H
#include "opencv2/core/core_c.h"
@@ -245,7 +245,9 @@ CVAPI(void) cvDrawChessboardCorners( CvArr* image, CvSize pattern_size,
#define CV_CALIB_FIX_S1_S2_S3_S4 65536
#define CV_CALIB_TILTED_MODEL 262144
#define CV_CALIB_FIX_TAUX_TAUY 524288
+#define CV_CALIB_FIX_TANGENT_DIST 2097152
+#define CV_CALIB_NINTRINSIC 18
/* Finds intrinsic and extrinsic camera parameters
from a few views of known calibration pattern */
@@ -422,4 +424,4 @@ public:
#endif
-#endif /* __OPENCV_CALIB3D_C_H__ */
+#endif /* OPENCV_CALIB3D_C_H */
diff --git a/lib/3rdParty/OpenCV3.1/include/opencv2/core.hpp b/lib/3rdParty/OpenCV3.4/include/opencv2/core.hpp
similarity index 91%
rename from lib/3rdParty/OpenCV3.1/include/opencv2/core.hpp
rename to lib/3rdParty/OpenCV3.4/include/opencv2/core.hpp
index 55922607..fe3af393 100644
--- a/lib/3rdParty/OpenCV3.1/include/opencv2/core.hpp
+++ b/lib/3rdParty/OpenCV3.4/include/opencv2/core.hpp
@@ -42,8 +42,8 @@
//
//M*/
-#ifndef __OPENCV_CORE_HPP__
-#define __OPENCV_CORE_HPP__
+#ifndef OPENCV_CORE_HPP
+#define OPENCV_CORE_HPP
#ifndef __cplusplus
# error core.hpp header must be compiled as C++
@@ -74,6 +74,7 @@
@{
@defgroup core_utils_sse SSE utilities
@defgroup core_utils_neon NEON utilities
+ @defgroup core_utils_softfloat Softfloat support
@}
@defgroup core_opengl OpenGL interoperability
@defgroup core_ipp Intel IPP Asynchronous C/C++ Converters
@@ -114,7 +115,7 @@ public:
*/
Exception();
/*!
- Full constructor. Normally the constuctor is not called explicitly.
+ Full constructor. Normally the constructor is not called explicitly.
Instead, the macros CV_Error(), CV_Error_() and CV_Assert() are used.
*/
Exception(int _code, const String& _err, const String& _func, const String& _file, int _line);
@@ -131,8 +132,8 @@ public:
int code; ///< error code @see CVStatus
String err; ///< error description
String func; ///< function name. Available only when the compiler supports getting it
- String file; ///< source file name where the error has occured
- int line; ///< line number in the source file where the error has occured
+ String file; ///< source file name where the error has occurred
+ int line; ///< line number in the source file where the error has occurred
};
/*! @brief Signals an error and raises the exception.
@@ -273,6 +274,9 @@ of p and len.
*/
CV_EXPORTS_W int borderInterpolate(int p, int len, int borderType);
+/** @example copyMakeBorder_demo.cpp
+An example using copyMakeBorder function
+ */
/** @brief Forms a border around an image.
The function copies the source image into the middle of the destination image. The areas to the
@@ -426,7 +430,7 @@ CV_EXPORTS_W void multiply(InputArray src1, InputArray src2,
/** @brief Performs per-element division of two arrays or a scalar by an array.
-The functions divide divide one array by another:
+The function cv::divide divides one array by another:
\f[\texttt{dst(I) = saturate(src1(I)*scale/src2(I))}\f]
or a scalar by an array when there is no src1 :
\f[\texttt{dst(I) = saturate(scale/src2(I))}\f]
@@ -471,6 +475,9 @@ The function can also be emulated with a matrix expression, for example:
*/
CV_EXPORTS_W void scaleAdd(InputArray src1, double alpha, InputArray src2, OutputArray dst);
+/** @example AddingImagesTrackbar.cpp
+
+ */
/** @brief Calculates the weighted sum of two arrays.
The function addWeighted calculates the weighted sum of two arrays as follows:
@@ -524,6 +531,17 @@ For example:
CV_EXPORTS_W void convertScaleAbs(InputArray src, OutputArray dst,
double alpha = 1, double beta = 0);
+/** @brief Converts an array to half precision floating number.
+
+This function converts FP32 (single precision floating point) from/to FP16 (half precision floating point). The input array has to have type of CV_32F or
+CV_16S to represent the bit depth. If the input array is neither of them, the function will raise an error.
+The format of half precision floating point is defined in IEEE 754-2008.
+
+@param src input array.
+@param dst output array.
+*/
+CV_EXPORTS_W void convertFp16(InputArray src, OutputArray dst);
+
/** @brief Performs a look-up table transform of an array.
The function LUT fills the output array with values from the look-up table. Indices of the entries
@@ -542,7 +560,7 @@ CV_EXPORTS_W void LUT(InputArray src, InputArray lut, OutputArray dst);
/** @brief Calculates the sum of array elements.
-The functions sum calculate and return the sum of array elements,
+The function cv::sum calculates and returns the sum of array elements,
independently for each channel.
@param src input array that must have from 1 to 4 channels.
@sa countNonZero, mean, meanStdDev, norm, minMaxLoc, reduce
@@ -588,10 +606,10 @@ CV_EXPORTS_W void findNonZero( InputArray src, OutputArray idx );
/** @brief Calculates an average (mean) of array elements.
-The function mean calculates the mean value M of array elements,
+The function cv::mean calculates the mean value M of array elements,
independently for each channel, and return it:
\f[\begin{array}{l} N = \sum _{I: \; \texttt{mask} (I) \ne 0} 1 \\ M_c = \left ( \sum _{I: \; \texttt{mask} (I) \ne 0}{ \texttt{mtx} (I)_c} \right )/N \end{array}\f]
-When all the mask elements are 0's, the functions return Scalar::all(0)
+When all the mask elements are 0's, the function returns Scalar::all(0)
@param src input array that should have from 1 to 4 channels so that the result can be stored in
Scalar_ .
@param mask optional operation mask.
@@ -601,11 +619,11 @@ CV_EXPORTS_W Scalar mean(InputArray src, InputArray mask = noArray());
/** Calculates a mean and standard deviation of array elements.
-The function meanStdDev calculates the mean and the standard deviation M
+The function cv::meanStdDev calculates the mean and the standard deviation M
of array elements independently for each channel and returns it via the
output parameters:
\f[\begin{array}{l} N = \sum _{I, \texttt{mask} (I) \ne 0} 1 \\ \texttt{mean} _c = \frac{\sum_{ I: \; \texttt{mask}(I) \ne 0} \texttt{src} (I)_c}{N} \\ \texttt{stddev} _c = \sqrt{\frac{\sum_{ I: \; \texttt{mask}(I) \ne 0} \left ( \texttt{src} (I)_c - \texttt{mean} _c \right )^2}{N}} \end{array}\f]
-When all the mask elements are 0's, the functions return
+When all the mask elements are 0's, the function returns
mean=stddev=Scalar::all(0).
@note The calculated standard deviation is only the diagonal of the
complete normalized covariance matrix. If the full matrix is needed, you
@@ -615,50 +633,57 @@ then pass the matrix to calcCovarMatrix .
@param src input array that should have from 1 to 4 channels so that the results can be stored in
Scalar_ 's.
@param mean output parameter: calculated mean value.
-@param stddev output parameter: calculateded standard deviation.
+@param stddev output parameter: calculated standard deviation.
@param mask optional operation mask.
@sa countNonZero, mean, norm, minMaxLoc, calcCovarMatrix
*/
CV_EXPORTS_W void meanStdDev(InputArray src, OutputArray mean, OutputArray stddev,
InputArray mask=noArray());
-/** @brief Calculates an absolute array norm, an absolute difference norm, or a
-relative difference norm.
+/** @brief Calculates the absolute norm of an array.
-The functions norm calculate an absolute norm of src1 (when there is no
-src2 ):
+This version of cv::norm calculates the absolute norm of src1. The type of norm to calculate is specified using cv::NormTypes.
-\f[norm = \forkthree{\|\texttt{src1}\|_{L_{\infty}} = \max _I | \texttt{src1} (I)|}{if \(\texttt{normType} = \texttt{NORM_INF}\) }
-{ \| \texttt{src1} \| _{L_1} = \sum _I | \texttt{src1} (I)|}{if \(\texttt{normType} = \texttt{NORM_L1}\) }
-{ \| \texttt{src1} \| _{L_2} = \sqrt{\sum_I \texttt{src1}(I)^2} }{if \(\texttt{normType} = \texttt{NORM_L2}\) }\f]
-
-or an absolute or relative difference norm if src2 is there:
-
-\f[norm = \forkthree{\|\texttt{src1}-\texttt{src2}\|_{L_{\infty}} = \max _I | \texttt{src1} (I) - \texttt{src2} (I)|}{if \(\texttt{normType} = \texttt{NORM_INF}\) }
-{ \| \texttt{src1} - \texttt{src2} \| _{L_1} = \sum _I | \texttt{src1} (I) - \texttt{src2} (I)|}{if \(\texttt{normType} = \texttt{NORM_L1}\) }
-{ \| \texttt{src1} - \texttt{src2} \| _{L_2} = \sqrt{\sum_I (\texttt{src1}(I) - \texttt{src2}(I))^2} }{if \(\texttt{normType} = \texttt{NORM_L2}\) }\f]
-
-or
-
-\f[norm = \forkthree{\frac{\|\texttt{src1}-\texttt{src2}\|_{L_{\infty}} }{\|\texttt{src2}\|_{L_{\infty}} }}{if \(\texttt{normType} = \texttt{NORM_RELATIVE_INF}\) }
-{ \frac{\|\texttt{src1}-\texttt{src2}\|_{L_1} }{\|\texttt{src2}\|_{L_1}} }{if \(\texttt{normType} = \texttt{NORM_RELATIVE_L1}\) }
-{ \frac{\|\texttt{src1}-\texttt{src2}\|_{L_2} }{\|\texttt{src2}\|_{L_2}} }{if \(\texttt{normType} = \texttt{NORM_RELATIVE_L2}\) }\f]
-
-The functions norm return the calculated norm.
+As example for one array consider the function \f$r(x)= \begin{pmatrix} x \\ 1-x \end{pmatrix}, x \in [-1;1]\f$.
+The \f$ L_{1}, L_{2} \f$ and \f$ L_{\infty} \f$ norm for the sample value \f$r(-1) = \begin{pmatrix} -1 \\ 2 \end{pmatrix}\f$
+is calculated as follows
+\f{align*}
+ \| r(-1) \|_{L_1} &= |-1| + |2| = 3 \\
+ \| r(-1) \|_{L_2} &= \sqrt{(-1)^{2} + (2)^{2}} = \sqrt{5} \\
+ \| r(-1) \|_{L_\infty} &= \max(|-1|,|2|) = 2
+\f}
+and for \f$r(0.5) = \begin{pmatrix} 0.5 \\ 0.5 \end{pmatrix}\f$ the calculation is
+\f{align*}
+ \| r(0.5) \|_{L_1} &= |0.5| + |0.5| = 1 \\
+ \| r(0.5) \|_{L_2} &= \sqrt{(0.5)^{2} + (0.5)^{2}} = \sqrt{0.5} \\
+ \| r(0.5) \|_{L_\infty} &= \max(|0.5|,|0.5|) = 0.5.
+\f}
+The following graphic shows all values for the three norm functions \f$\| r(x) \|_{L_1}, \| r(x) \|_{L_2}\f$ and \f$\| r(x) \|_{L_\infty}\f$.
+It is notable that the \f$ L_{1} \f$ norm forms the upper and the \f$ L_{\infty} \f$ norm forms the lower border for the example function \f$ r(x) \f$.
+
When the mask parameter is specified and it is not empty, the norm is
+
+If normType is not specified, NORM_L2 is used.
calculated only over the region specified by the mask.
-A multi-channel input arrays are treated as a single-channel, that is,
+Multi-channel input arrays are treated as single-channel arrays, that is,
the results for all channels are combined.
+Hamming norms can only be calculated with CV_8U depth arrays.
+
@param src1 first input array.
@param normType type of the norm (see cv::NormTypes).
@param mask optional operation mask; it must have the same size as src1 and CV_8UC1 type.
*/
CV_EXPORTS_W double norm(InputArray src1, int normType = NORM_L2, InputArray mask = noArray());
-/** @overload
+/** @brief Calculates an absolute difference norm or a relative difference norm.
+
+This version of cv::norm calculates the absolute difference norm
+or the relative difference norm of arrays src1 and src2.
+The type of norm to calculate is specified using cv::NormTypes.
+
@param src1 first input array.
@param src2 second input array of the same size and the same type as src1.
@param normType type of the norm (cv::NormTypes).
@@ -672,10 +697,21 @@ CV_EXPORTS_W double norm(InputArray src1, InputArray src2,
*/
CV_EXPORTS double norm( const SparseMat& src, int normType );
-/** @brief computes PSNR image/video quality metric
+/** @brief Computes the Peak Signal-to-Noise Ratio (PSNR) image quality metric.
+
+This function calculates the Peak Signal-to-Noise Ratio (PSNR) image quality metric in decibels (dB), between two input arrays src1 and src2. Arrays must have depth CV_8U.
+
+The PSNR is calculated as follows:
+
+\f[
+\texttt{PSNR} = 10 \cdot \log_{10}{\left( \frac{R^2}{MSE} \right) }
+\f]
+
+where R is the maximum integer value of depth CV_8U (255) and MSE is the mean squared error between the two arrays.
+
+@param src1 first input array.
+@param src2 second input array of the same size as src1.
-see http://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio for details
-@todo document
*/
CV_EXPORTS_W double PSNR(InputArray src1, InputArray src2);
@@ -692,7 +728,7 @@ CV_EXPORTS_W void batchDistance(InputArray src1, InputArray src2,
/** @brief Normalizes the norm or value range of an array.
-The functions normalize scale and shift the input array elements so that
+The function cv::normalize normalizes scale and shift the input array elements so that
\f[\| \texttt{dst} \| _{L_p}= \texttt{alpha}\f]
(where p=Inf, 1 or 2) when normType=NORM_INF, NORM_L1, or NORM_L2, respectively; or so that
\f[\min _I \texttt{dst} (I)= \texttt{alpha} , \, \, \max _I \texttt{dst} (I)= \texttt{beta}\f]
@@ -762,11 +798,11 @@ CV_EXPORTS void normalize( const SparseMat& src, SparseMat& dst, double alpha, i
/** @brief Finds the global minimum and maximum in an array.
-The functions minMaxLoc find the minimum and maximum element values and their positions. The
+The function cv::minMaxLoc finds the minimum and maximum element values and their positions. The
extremums are searched across the whole array or, if mask is not an empty array, in the specified
array region.
-The functions do not work with multi-channel arrays. If you need to find minimum or maximum
+The function do not work with multi-channel arrays. If you need to find minimum or maximum
elements across all the channels, use Mat::reshape first to reinterpret the array as
single-channel. Or you may extract the particular channel using either extractImageCOI , or
mixChannels , or split .
@@ -785,7 +821,7 @@ CV_EXPORTS_W void minMaxLoc(InputArray src, CV_OUT double* minVal,
/** @brief Finds the global minimum and maximum in an array
-The function minMaxIdx finds the minimum and maximum element values and their positions. The
+The function cv::minMaxIdx finds the minimum and maximum element values and their positions. The
extremums are searched across the whole array or, if mask is not an empty array, in the specified
array region. The function does not work with multi-channel arrays. If you need to find minimum or
maximum elements across all the channels, use Mat::reshape first to reinterpret the array as
@@ -823,12 +859,19 @@ CV_EXPORTS void minMaxLoc(const SparseMat& a, double* minVal,
/** @brief Reduces a matrix to a vector.
-The function reduce reduces the matrix to a vector by treating the matrix rows/columns as a set of
+The function cv::reduce reduces the matrix to a vector by treating the matrix rows/columns as a set of
1D vectors and performing the specified operation on the vectors until a single row/column is
obtained. For example, the function can be used to compute horizontal and vertical projections of a
-raster image. In case of REDUCE_SUM and REDUCE_AVG , the output may have a larger element
-bit-depth to preserve accuracy. And multi-channel arrays are also supported in these two reduction
-modes.
+raster image. In case of REDUCE_MAX and REDUCE_MIN , the output image should have the same type as the source one.
+In case of REDUCE_SUM and REDUCE_AVG , the output may have a larger element bit-depth to preserve accuracy.
+And multi-channel arrays are also supported in these two reduction modes.
+
+The following code demonstrates its usage for a single channel matrix.
+@snippet snippets/core_reduce.cpp example
+
+And the following code demonstrates its usage for a two-channel matrix.
+@snippet snippets/core_reduce.cpp example2
+
@param src input 2D matrix.
@param dst output vector. Its size and type is defined by dim and dtype parameters.
@param dim dimension index along which the matrix is reduced. 0 means that the matrix is reduced to
@@ -842,12 +885,16 @@ CV_EXPORTS_W void reduce(InputArray src, OutputArray dst, int dim, int rtype, in
/** @brief Creates one multi-channel array out of several single-channel ones.
-The function merge merges several arrays to make a single multi-channel array. That is, each
+The function cv::merge merges several arrays to make a single multi-channel array. That is, each
element of the output array will be a concatenation of the elements of the input arrays, where
elements of i-th input array are treated as mv[i].channels()-element vectors.
The function cv::split does the reverse operation. If you need to shuffle channels in some other
advanced way, use cv::mixChannels.
+
+The following example shows how to merge 3 single channel matrices into a single 3-channel matrix.
+@snippet snippets/core_merge.cpp example
+
@param mv input array of matrices to be merged; all the matrices in mv must have the same
size and the same depth.
@param count number of input matrices when mv is a plain C array; it must be greater than zero.
@@ -867,10 +914,14 @@ CV_EXPORTS_W void merge(InputArrayOfArrays mv, OutputArray dst);
/** @brief Divides a multi-channel array into several single-channel arrays.
-The functions split split a multi-channel array into separate single-channel arrays:
+The function cv::split splits a multi-channel array into separate single-channel arrays:
\f[\texttt{mv} [c](I) = \texttt{src} (I)_c\f]
If you need to extract a single channel or do some other sophisticated channel permutation, use
mixChannels .
+
+The following example demonstrates how to split a 3-channel matrix into 3 single channel matrices.
+@snippet snippets/core_split.cpp example
+
@param src input multi-channel array.
@param mvbegin output array; the number of arrays must match src.channels(); the arrays themselves are
reallocated, if needed.
@@ -889,7 +940,7 @@ output arrays.
The function cv::mixChannels provides an advanced mechanism for shuffling image channels.
-cv::split and cv::merge and some forms of cv::cvtColor are partial cases of cv::mixChannels .
+cv::split,cv::merge,cv::extractChannel,cv::insertChannel and some forms of cv::cvtColor are partial cases of cv::mixChannels.
In the example below, the code splits a 4-channel BGRA image into a 3-channel BGR (with B and R
channels swapped) and a separate alpha-channel image:
@@ -923,7 +974,7 @@ src[0].channels() + src[1].channels()-1, and so on, the same scheme is used for
channels; as a special case, when fromTo[k\*2] is negative, the corresponding output channel is
filled with zero .
@param npairs number of index pairs in `fromTo`.
-@sa cv::split, cv::merge, cv::cvtColor
+@sa split, merge, extractChannel, insertChannel, cvtColor
*/
CV_EXPORTS void mixChannels(const Mat* src, size_t nsrcs, Mat* dst, size_t ndsts,
const int* fromTo, size_t npairs);
@@ -961,19 +1012,25 @@ filled with zero .
CV_EXPORTS_W void mixChannels(InputArrayOfArrays src, InputOutputArrayOfArrays dst,
const std::vector& fromTo);
-/** @brief extracts a single channel from src (coi is 0-based index)
-@todo document
+/** @brief Extracts a single channel from src (coi is 0-based index)
+@param src input array
+@param dst output array
+@param coi index of channel to extract
+@sa mixChannels, split
*/
CV_EXPORTS_W void extractChannel(InputArray src, OutputArray dst, int coi);
-/** @brief inserts a single channel to dst (coi is 0-based index)
-@todo document
+/** @brief Inserts a single channel to dst (coi is 0-based index)
+@param src input array
+@param dst output array
+@param coi index of channel for insertion
+@sa mixChannels, merge
*/
CV_EXPORTS_W void insertChannel(InputArray src, InputOutputArray dst, int coi);
/** @brief Flips a 2D array around vertical, horizontal, or both axes.
-The function flip flips the array in one of three different ways (row
+The function cv::flip flips the array in one of three different ways (row
and column indices are 0-based):
\f[\texttt{dst} _{ij} =
\left\{
@@ -1005,26 +1062,44 @@ around both axes.
*/
CV_EXPORTS_W void flip(InputArray src, OutputArray dst, int flipCode);
+enum RotateFlags {
+ ROTATE_90_CLOCKWISE = 0, //Rotate 90 degrees clockwise
+ ROTATE_180 = 1, //Rotate 180 degrees clockwise
+ ROTATE_90_COUNTERCLOCKWISE = 2, //Rotate 270 degrees clockwise
+};
+/** @brief Rotates a 2D array in multiples of 90 degrees.
+The function rotate rotates the array in one of three different ways:
+* Rotate by 90 degrees clockwise (rotateCode = ROTATE_90).
+* Rotate by 180 degrees clockwise (rotateCode = ROTATE_180).
+* Rotate by 270 degrees clockwise (rotateCode = ROTATE_270).
+@param src input array.
+@param dst output array of the same type as src. The size is the same with ROTATE_180,
+and the rows and cols are switched for ROTATE_90 and ROTATE_270.
+@param rotateCode an enum to specify how to rotate the array; see the enum RotateFlags
+@sa transpose , repeat , completeSymm, flip, RotateFlags
+*/
+CV_EXPORTS_W void rotate(InputArray src, OutputArray dst, int rotateCode);
+
/** @brief Fills the output array with repeated copies of the input array.
-The functions repeat duplicate the input array one or more times along each of the two axes:
+The function cv::repeat duplicates the input array one or more times along each of the two axes:
\f[\texttt{dst} _{ij}= \texttt{src} _{i\mod src.rows, \; j\mod src.cols }\f]
The second variant of the function is more convenient to use with @ref MatrixExpressions.
@param src input array to replicate.
-@param dst output array of the same type as src.
-@param ny Flag to specify how many times the src is repeated along the
+@param ny Flag to specify how many times the `src` is repeated along the
vertical axis.
-@param nx Flag to specify how many times the src is repeated along the
+@param nx Flag to specify how many times the `src` is repeated along the
horizontal axis.
-@sa reduce
+@param dst output array of the same type as `src`.
+@sa cv::reduce
*/
CV_EXPORTS_W void repeat(InputArray src, int ny, int nx, OutputArray dst);
/** @overload
@param src input array to replicate.
-@param ny Flag to specify how many times the src is repeated along the
+@param ny Flag to specify how many times the `src` is repeated along the
vertical axis.
-@param nx Flag to specify how many times the src is repeated along the
+@param nx Flag to specify how many times the `src` is repeated along the
horizontal axis.
*/
CV_EXPORTS Mat repeat(const Mat& src, int ny, int nx);
@@ -1160,7 +1235,7 @@ CV_EXPORTS_W void vconcat(InputArrayOfArrays src, OutputArray dst);
Calculates the per-element bit-wise conjunction of two arrays or an
array and a scalar.
-The function calculates the per-element bit-wise logical conjunction for:
+The function cv::bitwise_and calculates the per-element bit-wise logical conjunction for:
* Two arrays when src1 and src2 have the same size:
\f[\texttt{dst} (I) = \texttt{src1} (I) \wedge \texttt{src2} (I) \quad \texttt{if mask} (I) \ne0\f]
* An array and a scalar when src2 is constructed from Scalar or has
@@ -1187,7 +1262,7 @@ CV_EXPORTS_W void bitwise_and(InputArray src1, InputArray src2,
/** @brief Calculates the per-element bit-wise disjunction of two arrays or an
array and a scalar.
-The function calculates the per-element bit-wise logical disjunction for:
+The function cv::bitwise_or calculates the per-element bit-wise logical disjunction for:
* Two arrays when src1 and src2 have the same size:
\f[\texttt{dst} (I) = \texttt{src1} (I) \vee \texttt{src2} (I) \quad \texttt{if mask} (I) \ne0\f]
* An array and a scalar when src2 is constructed from Scalar or has
@@ -1214,7 +1289,7 @@ CV_EXPORTS_W void bitwise_or(InputArray src1, InputArray src2,
/** @brief Calculates the per-element bit-wise "exclusive or" operation on two
arrays or an array and a scalar.
-The function calculates the per-element bit-wise logical "exclusive-or"
+The function cv::bitwise_xor calculates the per-element bit-wise logical "exclusive-or"
operation for:
* Two arrays when src1 and src2 have the same size:
\f[\texttt{dst} (I) = \texttt{src1} (I) \oplus \texttt{src2} (I) \quad \texttt{if mask} (I) \ne0\f]
@@ -1241,7 +1316,7 @@ CV_EXPORTS_W void bitwise_xor(InputArray src1, InputArray src2,
/** @brief Inverts every bit of an array.
-The function calculates per-element bit-wise inversion of the input
+The function cv::bitwise_not calculates per-element bit-wise inversion of the input
array:
\f[\texttt{dst} (I) = \neg \texttt{src} (I)\f]
In case of a floating-point input array, its machine-specific bit
@@ -1258,7 +1333,7 @@ CV_EXPORTS_W void bitwise_not(InputArray src, OutputArray dst,
/** @brief Calculates the per-element absolute difference between two arrays or between an array and a scalar.
-The function absdiff calculates:
+The function cv::absdiff calculates:
* Absolute difference between two arrays when they have the same
size and type:
\f[\texttt{dst}(I) = \texttt{saturate} (| \texttt{src1}(I) - \texttt{src2}(I)|)\f]
@@ -1333,7 +1408,7 @@ CV_EXPORTS_W void compare(InputArray src1, InputArray src2, OutputArray dst, int
/** @brief Calculates per-element minimum of two arrays or an array and a scalar.
-The functions min calculate the per-element minimum of two arrays:
+The function cv::min calculates the per-element minimum of two arrays:
\f[\texttt{dst} (I)= \min ( \texttt{src1} (I), \texttt{src2} (I))\f]
or array and a scalar:
\f[\texttt{dst} (I)= \min ( \texttt{src1} (I), \texttt{value} )\f]
@@ -1354,7 +1429,7 @@ CV_EXPORTS void min(const UMat& src1, const UMat& src2, UMat& dst);
/** @brief Calculates per-element maximum of two arrays or an array and a scalar.
-The functions max calculate the per-element maximum of two arrays:
+The function cv::max calculates the per-element maximum of two arrays:
\f[\texttt{dst} (I)= \max ( \texttt{src1} (I), \texttt{src2} (I))\f]
or array and a scalar:
\f[\texttt{dst} (I)= \max ( \texttt{src1} (I), \texttt{value} )\f]
@@ -1375,7 +1450,7 @@ CV_EXPORTS void max(const UMat& src1, const UMat& src2, UMat& dst);
/** @brief Calculates a square root of array elements.
-The functions sqrt calculate a square root of each input array element.
+The function cv::sqrt calculates a square root of each input array element.
In case of multi-channel arrays, each channel is processed
independently. The accuracy is approximately the same as of the built-in
std::sqrt .
@@ -1386,7 +1461,7 @@ CV_EXPORTS_W void sqrt(InputArray src, OutputArray dst);
/** @brief Raises every array element to a power.
-The function pow raises every element of the input array to power :
+The function cv::pow raises every element of the input array to power :
\f[\texttt{dst} (I) = \fork{\texttt{src}(I)^{power}}{if \(\texttt{power}\) is integer}{|\texttt{src}(I)|^{power}}{otherwise}\f]
So, for a non-integer power exponent, the absolute values of input array
@@ -1411,7 +1486,7 @@ CV_EXPORTS_W void pow(InputArray src, double power, OutputArray dst);
/** @brief Calculates the exponent of every array element.
-The function exp calculates the exponent of every element of the input
+The function cv::exp calculates the exponent of every element of the input
array:
\f[\texttt{dst} [I] = e^{ src(I) }\f]
@@ -1427,14 +1502,11 @@ CV_EXPORTS_W void exp(InputArray src, OutputArray dst);
/** @brief Calculates the natural logarithm of every array element.
-The function log calculates the natural logarithm of the absolute value
-of every element of the input array:
-\f[\texttt{dst} (I) = \fork{\log |\texttt{src}(I)|}{if \(\texttt{src}(I) \ne 0\) }{\texttt{C}}{otherwise}\f]
+The function cv::log calculates the natural logarithm of every element of the input array:
+\f[\texttt{dst} (I) = \log (\texttt{src}(I)) \f]
+
+Output on zero, negative and special (NaN, Inf) values is undefined.
-where C is a large negative number (about -700 in the current
-implementation). The maximum relative error is about 7e-6 for
-single-precision input and less than 1e-10 for double-precision input.
-Special values (NaN, Inf) are not handled.
@param src input array.
@param dst output array of the same size and type as src .
@sa exp, cartToPolar, polarToCart, phase, pow, sqrt, magnitude
@@ -1443,7 +1515,7 @@ CV_EXPORTS_W void log(InputArray src, OutputArray dst);
/** @brief Calculates x and y coordinates of 2D vectors from their magnitude and angle.
-The function polarToCart calculates the Cartesian coordinates of each 2D
+The function cv::polarToCart calculates the Cartesian coordinates of each 2D
vector represented by the corresponding elements of magnitude and angle:
\f[\begin{array}{l} \texttt{x} (I) = \texttt{magnitude} (I) \cos ( \texttt{angle} (I)) \\ \texttt{y} (I) = \texttt{magnitude} (I) \sin ( \texttt{angle} (I)) \\ \end{array}\f]
@@ -1466,7 +1538,7 @@ CV_EXPORTS_W void polarToCart(InputArray magnitude, InputArray angle,
/** @brief Calculates the magnitude and angle of 2D vectors.
-The function cartToPolar calculates either the magnitude, angle, or both
+The function cv::cartToPolar calculates either the magnitude, angle, or both
for every 2D vector (x(I),y(I)):
\f[\begin{array}{l} \texttt{magnitude} (I)= \sqrt{\texttt{x}(I)^2+\texttt{y}(I)^2} , \\ \texttt{angle} (I)= \texttt{atan2} ( \texttt{y} (I), \texttt{x} (I))[ \cdot180 / \pi ] \end{array}\f]
@@ -1488,7 +1560,7 @@ CV_EXPORTS_W void cartToPolar(InputArray x, InputArray y,
/** @brief Calculates the rotation angle of 2D vectors.
-The function phase calculates the rotation angle of each 2D vector that
+The function cv::phase calculates the rotation angle of each 2D vector that
is formed from the corresponding elements of x and y :
\f[\texttt{angle} (I) = \texttt{atan2} ( \texttt{y} (I), \texttt{x} (I))\f]
@@ -1507,7 +1579,7 @@ CV_EXPORTS_W void phase(InputArray x, InputArray y, OutputArray angle,
/** @brief Calculates the magnitude of 2D vectors.
-The function magnitude calculates the magnitude of 2D vectors formed
+The function cv::magnitude calculates the magnitude of 2D vectors formed
from the corresponding elements of x and y arrays:
\f[\texttt{dst} (I) = \sqrt{\texttt{x}(I)^2 + \texttt{y}(I)^2}\f]
@param x floating-point array of x-coordinates of the vectors.
@@ -1520,11 +1592,11 @@ CV_EXPORTS_W void magnitude(InputArray x, InputArray y, OutputArray magnitude);
/** @brief Checks every element of an input array for invalid values.
-The functions checkRange check that every array element is neither NaN nor infinite. When minVal \>
--DBL_MAX and maxVal \< DBL_MAX, the functions also check that each value is between minVal and
+The function cv::checkRange checks that every array element is neither NaN nor infinite. When minVal \>
+-DBL_MAX and maxVal \< DBL_MAX, the function also checks that each value is between minVal and
maxVal. In case of multi-channel arrays, each channel is processed independently. If some values
are out of range, position of the first outlier is stored in pos (when pos != NULL). Then, the
-functions either return false (when quiet=true) or throw an exception.
+function either returns false (when quiet=true) or throws an exception.
@param a input array.
@param quiet a flag, indicating whether the functions quietly return false when the array elements
are out of range or they throw an exception.
@@ -1542,7 +1614,7 @@ CV_EXPORTS_W void patchNaNs(InputOutputArray a, double val = 0);
/** @brief Performs generalized matrix multiplication.
-The function performs generalized matrix multiplication similar to the
+The function cv::gemm performs generalized matrix multiplication similar to the
gemm functions in BLAS level 3. For example,
`gemm(src1, src2, alpha, src3, beta, dst, GEMM_1_T + GEMM_3_T)`
corresponds to
@@ -1573,7 +1645,7 @@ CV_EXPORTS_W void gemm(InputArray src1, InputArray src2, double alpha,
/** @brief Calculates the product of a matrix and its transposition.
-The function mulTransposed calculates the product of src and its
+The function cv::mulTransposed calculates the product of src and its
transposition:
\f[\texttt{dst} = \texttt{scale} ( \texttt{src} - \texttt{delta} )^T ( \texttt{src} - \texttt{delta} )\f]
if aTa=true , and
@@ -1605,9 +1677,9 @@ CV_EXPORTS_W void mulTransposed( InputArray src, OutputArray dst, bool aTa,
/** @brief Transposes a matrix.
-The function transpose transposes the matrix src :
+The function cv::transpose transposes the matrix src :
\f[\texttt{dst} (i,j) = \texttt{src} (j,i)\f]
-@note No complex conjugation is done in case of a complex matrix. It it
+@note No complex conjugation is done in case of a complex matrix. It
should be done separately if needed.
@param src input array.
@param dst output array of the same type as src.
@@ -1616,7 +1688,7 @@ CV_EXPORTS_W void transpose(InputArray src, OutputArray dst);
/** @brief Performs the matrix transformation of every array element.
-The function transform performs the matrix transformation of every
+The function cv::transform performs the matrix transformation of every
element of the array src and stores the results in dst :
\f[\texttt{dst} (I) = \texttt{m} \cdot \texttt{src} (I)\f]
(when m.cols=src.channels() ), or
@@ -1636,13 +1708,13 @@ m.cols or m.cols-1.
@param dst output array of the same size and depth as src; it has as
many channels as m.rows.
@param m transformation 2x2 or 2x3 floating-point matrix.
-@sa perspectiveTransform, getAffineTransform, estimateRigidTransform, warpAffine, warpPerspective
+@sa perspectiveTransform, getAffineTransform, estimateAffine2D, warpAffine, warpPerspective
*/
CV_EXPORTS_W void transform(InputArray src, OutputArray dst, InputArray m );
/** @brief Performs the perspective matrix transformation of vectors.
-The function perspectiveTransform transforms every element of src by
+The function cv::perspectiveTransform transforms every element of src by
treating it as a 2D or 3D vector, in the following way:
\f[(x, y, z) \rightarrow (x'/w, y'/w, z'/w)\f]
where
@@ -1669,7 +1741,7 @@ CV_EXPORTS_W void perspectiveTransform(InputArray src, OutputArray dst, InputArr
/** @brief Copies the lower or the upper half of a square matrix to another half.
-The function completeSymm copies the lower half of a square matrix to
+The function cv::completeSymm copies the lower half of a square matrix to
its another half. The matrix diagonal remains unchanged:
* \f$\texttt{mtx}_{ij}=\texttt{mtx}_{ji}\f$ for \f$i > j\f$ if
lowerToUpper=false
@@ -1684,7 +1756,7 @@ CV_EXPORTS_W void completeSymm(InputOutputArray mtx, bool lowerToUpper = false);
/** @brief Initializes a scaled identity matrix.
-The function setIdentity initializes a scaled identity matrix:
+The function cv::setIdentity initializes a scaled identity matrix:
\f[\texttt{mtx} (i,j)= \fork{\texttt{value}}{ if \(i=j\)}{0}{otherwise}\f]
The function can also be emulated using the matrix initializers and the
@@ -1701,7 +1773,7 @@ CV_EXPORTS_W void setIdentity(InputOutputArray mtx, const Scalar& s = Scalar(1))
/** @brief Returns the determinant of a square floating-point matrix.
-The function determinant calculates and returns the determinant of the
+The function cv::determinant calculates and returns the determinant of the
specified matrix. For small matrices ( mtx.cols=mtx.rows\<=3 ), the
direct method is used. For larger matrices, the function uses LU
factorization with partial pivoting.
@@ -1716,7 +1788,7 @@ CV_EXPORTS_W double determinant(InputArray mtx);
/** @brief Returns the trace of a matrix.
-The function trace returns the sum of the diagonal elements of the
+The function cv::trace returns the sum of the diagonal elements of the
matrix mtx .
\f[\mathrm{tr} ( \texttt{mtx} ) = \sum _i \texttt{mtx} (i,i)\f]
@param mtx input matrix.
@@ -1725,7 +1797,7 @@ CV_EXPORTS_W Scalar trace(InputArray mtx);
/** @brief Finds the inverse or pseudo-inverse of a matrix.
-The function invert inverts the matrix src and stores the result in dst
+The function cv::invert inverts the matrix src and stores the result in dst
. When the matrix src is singular or non-square, the function calculates
the pseudo-inverse matrix (the dst matrix) so that norm(src\*dst - I) is
minimal, where I is an identity matrix.
@@ -1752,7 +1824,7 @@ CV_EXPORTS_W double invert(InputArray src, OutputArray dst, int flags = DECOMP_L
/** @brief Solves one or more linear systems or least-squares problems.
-The function solve solves a linear system or least-squares problem (the
+The function cv::solve solves a linear system or least-squares problem (the
latter is possible with SVD or QR methods, or by specifying the flag
DECOMP_NORMAL ):
\f[\texttt{dst} = \arg \min _X \| \texttt{src1} \cdot \texttt{X} - \texttt{src2} \|\f]
@@ -1777,7 +1849,7 @@ CV_EXPORTS_W bool solve(InputArray src1, InputArray src2,
/** @brief Sorts each row or each column of a matrix.
-The function sort sorts each matrix row or each matrix column in
+The function cv::sort sorts each matrix row or each matrix column in
ascending or descending order. So you should pass two operation flags to
get desired behaviour. If you want to sort matrix rows or columns
lexicographically, you can use STL std::sort generic function with the
@@ -1792,7 +1864,7 @@ CV_EXPORTS_W void sort(InputArray src, OutputArray dst, int flags);
/** @brief Sorts each row or each column of a matrix.
-The function sortIdx sorts each matrix row or each matrix column in the
+The function cv::sortIdx sorts each matrix row or each matrix column in the
ascending or descending order. So you should pass two operation flags to
get desired behaviour. Instead of reordering the elements themselves, it
stores the indices of sorted elements in the output array. For example:
@@ -1826,7 +1898,7 @@ CV_EXPORTS_W int solveCubic(InputArray coeffs, OutputArray roots);
/** @brief Finds the real or complex roots of a polynomial equation.
-The function solvePoly finds real and complex roots of a polynomial equation:
+The function cv::solvePoly finds real and complex roots of a polynomial equation:
\f[\texttt{coeffs} [n] x^{n} + \texttt{coeffs} [n-1] x^{n-1} + ... + \texttt{coeffs} [1] x + \texttt{coeffs} [0] = 0\f]
@param coeffs array of polynomial coefficients.
@param roots output (complex) array of roots.
@@ -1836,13 +1908,14 @@ CV_EXPORTS_W double solvePoly(InputArray coeffs, OutputArray roots, int maxIters
/** @brief Calculates eigenvalues and eigenvectors of a symmetric matrix.
-The functions eigen calculate just eigenvalues, or eigenvalues and eigenvectors of the symmetric
+The function cv::eigen calculates just eigenvalues, or eigenvalues and eigenvectors of the symmetric
matrix src:
@code
src*eigenvectors.row(i).t() = eigenvalues.at(i)*eigenvectors.row(i).t()
@endcode
-@note in the new and the old interfaces different ordering of eigenvalues and eigenvectors
-parameters is used.
+
+@note Use cv::eigenNonSymmetric for calculation of real eigenvalues and eigenvectors of non-symmetric matrix.
+
@param src input matrix that must have CV_32FC1 or CV_64FC1 type, square size and be symmetrical
(src ^T^ == src).
@param eigenvalues output vector of eigenvalues of the same type as src; the eigenvalues are stored
@@ -1850,14 +1923,31 @@ in the descending order.
@param eigenvectors output matrix of eigenvectors; it has the same size and type as src; the
eigenvectors are stored as subsequent matrix rows, in the same order as the corresponding
eigenvalues.
-@sa completeSymm , PCA
+@sa eigenNonSymmetric, completeSymm , PCA
*/
CV_EXPORTS_W bool eigen(InputArray src, OutputArray eigenvalues,
OutputArray eigenvectors = noArray());
+/** @brief Calculates eigenvalues and eigenvectors of a non-symmetric matrix (real eigenvalues only).
+
+@note Assumes real eigenvalues.
+
+The function calculates eigenvalues and eigenvectors (optional) of the square matrix src:
+@code
+ src*eigenvectors.row(i).t() = eigenvalues.at(i)*eigenvectors.row(i).t()
+@endcode
+
+@param src input matrix (CV_32FC1 or CV_64FC1 type).
+@param eigenvalues output vector of eigenvalues (type is the same type as src).
+@param eigenvectors output matrix of eigenvectors (type is the same type as src). The eigenvectors are stored as subsequent matrix rows, in the same order as the corresponding eigenvalues.
+@sa eigen
+*/
+CV_EXPORTS_W void eigenNonSymmetric(InputArray src, OutputArray eigenvalues,
+ OutputArray eigenvectors);
+
/** @brief Calculates the covariance matrix of a set of vectors.
-The functions calcCovarMatrix calculate the covariance matrix and, optionally, the mean vector of
+The function cv::calcCovarMatrix calculates the covariance matrix and, optionally, the mean vector of
the set of input vectors.
@param samples samples stored as separate matrices
@param nsamples number of samples
@@ -1907,7 +1997,7 @@ CV_EXPORTS_W void SVBackSubst( InputArray w, InputArray u, InputArray vt,
/** @brief Calculates the Mahalanobis distance between two vectors.
-The function Mahalanobis calculates and returns the weighted distance between two vectors:
+The function cv::Mahalanobis calculates and returns the weighted distance between two vectors:
\f[d( \texttt{vec1} , \texttt{vec2} )= \sqrt{\sum_{i,j}{\texttt{icovar(i,j)}\cdot(\texttt{vec1}(I)-\texttt{vec2}(I))\cdot(\texttt{vec1(j)}-\texttt{vec2(j)})} }\f]
The covariance matrix may be calculated using the cv::calcCovarMatrix function and then inverted using
the invert function (preferably using the cv::DECOMP_SVD method, as the most accurate).
@@ -1919,7 +2009,7 @@ CV_EXPORTS_W double Mahalanobis(InputArray v1, InputArray v2, InputArray icovar)
/** @brief Performs a forward or inverse Discrete Fourier transform of a 1D or 2D floating-point array.
-The function performs one of the following:
+The function cv::dft performs one of the following:
- Forward the Fourier transform of a 1D vector of N elements:
\f[Y = F^{(N)} \cdot X,\f]
where \f$F^{(N)}_{jk}=\exp(-2\pi i j k/N)\f$ and \f$i=\sqrt{-1}\f$
@@ -2067,7 +2157,7 @@ CV_EXPORTS_W void idft(InputArray src, OutputArray dst, int flags = 0, int nonze
/** @brief Performs a forward or inverse discrete Cosine transform of 1D or 2D array.
-The function dct performs a forward or inverse discrete Cosine transform (DCT) of a 1D or 2D
+The function cv::dct performs a forward or inverse discrete Cosine transform (DCT) of a 1D or 2D
floating-point array:
- Forward Cosine transform of a 1D vector of N elements:
\f[Y = C^{(N)} \cdot X\f]
@@ -2118,7 +2208,7 @@ CV_EXPORTS_W void idct(InputArray src, OutputArray dst, int flags = 0);
/** @brief Performs the per-element multiplication of two Fourier spectrums.
-The function mulSpectrums performs the per-element multiplication of the two CCS-packed or complex
+The function cv::mulSpectrums performs the per-element multiplication of the two CCS-packed or complex
matrices that are results of a real or complex Fourier transform.
The function, together with dft and idft , may be used to calculate convolution (pass conjB=false )
@@ -2145,7 +2235,7 @@ original one. Arrays whose size is a power-of-two (2, 4, 8, 16, 32, ...) are the
Though, the arrays whose size is a product of 2's, 3's, and 5's (for example, 300 = 5\*5\*3\*2\*2)
are also processed quite efficiently.
-The function getOptimalDFTSize returns the minimum number N that is greater than or equal to vecsize
+The function cv::getOptimalDFTSize returns the minimum number N that is greater than or equal to vecsize
so that the DFT of a vector of size N can be processed efficiently. In the current implementation N
= 2 ^p^ \* 3 ^q^ \* 5 ^r^ for some integer p, q, r.
@@ -2161,7 +2251,7 @@ CV_EXPORTS_W int getOptimalDFTSize(int vecsize);
/** @brief Returns the default random number generator.
-The function theRNG returns the default random number generator. For each thread, there is a
+The function cv::theRNG returns the default random number generator. For each thread, there is a
separate random number generator, so you can use the function safely in multi-thread environments.
If you just need to get a single random number using this generator or initialize an array, you can
use randu or randn instead. But if you are going to generate many random numbers inside a loop, it
@@ -2170,6 +2260,14 @@ is much faster to use this function to retrieve the generator and then use RNG::
*/
CV_EXPORTS RNG& theRNG();
+/** @brief Sets state of default random number generator.
+
+The function cv::setRNGSeed sets state of default random number generator to custom value.
+@param seed new state for default random number generator
+@sa RNG, randu, randn
+*/
+CV_EXPORTS_W void setRNGSeed(int seed);
+
/** @brief Generates a single uniformly-distributed random number or an array of random numbers.
Non-template variant of the function fills the matrix dst with uniformly-distributed
@@ -2184,7 +2282,7 @@ CV_EXPORTS_W void randu(InputOutputArray dst, InputArray low, InputArray high);
/** @brief Fills the array with normally distributed random numbers.
-The function randn fills the matrix dst with normally distributed random numbers with the specified
+The function cv::randn fills the matrix dst with normally distributed random numbers with the specified
mean vector and the standard deviation matrix. The generated random numbers are clipped to fit the
value range of the output array data type.
@param dst output array of random numbers; the array must be pre-allocated and have 1 to 4 channels.
@@ -2197,7 +2295,7 @@ CV_EXPORTS_W void randn(InputOutputArray dst, InputArray mean, InputArray stddev
/** @brief Shuffles the array elements randomly.
-The function randShuffle shuffles the specified 1D array by randomly choosing pairs of elements and
+The function cv::randShuffle shuffles the specified 1D array by randomly choosing pairs of elements and
swapping them. The number of such swap operations will be dst.rows\*dst.cols\*iterFactor .
@param dst input/output numerical 1D array.
@param iterFactor scale factor that determines the number of random swap operations (see the details
@@ -2316,11 +2414,11 @@ public:
The operator performs %PCA of the supplied dataset. It is safe to reuse
the same PCA structure for multiple datasets. That is, if the structure
has been previously used with another dataset, the existing internal
- data is reclaimed and the new eigenvalues, @ref eigenvectors , and @ref
+ data is reclaimed and the new @ref eigenvalues, @ref eigenvectors and @ref
mean are allocated and computed.
- The computed eigenvalues are sorted from the largest to the smallest and
- the corresponding eigenvectors are stored as eigenvectors rows.
+ The computed @ref eigenvalues are sorted from the largest to the smallest and
+ the corresponding @ref eigenvectors are stored as eigenvectors rows.
@param data input samples stored as the matrix rows or as the matrix
columns.
@@ -2400,11 +2498,17 @@ public:
*/
void backProject(InputArray vec, OutputArray result) const;
- /** @brief write and load PCA matrix
+ /** @brief write PCA objects
-*/
- void write(FileStorage& fs ) const;
- void read(const FileNode& fs);
+ Writes @ref eigenvalues @ref eigenvectors and @ref mean to specified FileStorage
+ */
+ void write(FileStorage& fs) const;
+
+ /** @brief load PCA objects
+
+ Loads @ref eigenvalues @ref eigenvectors and @ref mean from specified FileNode
+ */
+ void read(const FileNode& fn);
Mat eigenvectors; //!< eigenvectors of the covariation matrix
Mat eigenvalues; //!< eigenvalues of the covariation matrix
@@ -2716,7 +2820,7 @@ public:
double a1 = rng.uniform((double)0, (double)1);
// produces float from [0, 1)
- double b = rng.uniform(0.f, 1.f);
+ float b = rng.uniform(0.f, 1.f);
// produces double from [0, 1)
double c = rng.uniform(0., 1.);
@@ -2732,8 +2836,8 @@ public:
want a floating-point random number, but the range boundaries are
integer numbers, either put dots in the end, if they are constants, or
use explicit type cast operators, as in the a1 initialization above.
- @param a lower inclusive boundary of the returned random numbers.
- @param b upper non-inclusive boundary of the returned random numbers.
+ @param a lower inclusive boundary of the returned random number.
+ @param b upper non-inclusive boundary of the returned random number.
*/
int uniform(int a, int b);
/** @overload */
@@ -2788,6 +2892,8 @@ public:
double gaussian(double sigma);
uint64 state;
+
+ bool operator ==(const RNG& other) const;
};
/** @brief Mersenne Twister random number generator
@@ -2987,19 +3093,25 @@ public:
*/
virtual void write(FileStorage& fs) const { (void)fs; }
+ /** @brief simplified API for language bindings
+ * @overload
+ */
+ CV_WRAP void write(const Ptr& fs, const String& name = String()) const;
+
/** @brief Reads algorithm parameters from a file storage
*/
- virtual void read(const FileNode& fn) { (void)fn; }
+ CV_WRAP virtual void read(const FileNode& fn) { (void)fn; }
/** @brief Returns true if the Algorithm is empty (e.g. in the very beginning or after unsuccessful read
*/
- virtual bool empty() const { return false; }
+ CV_WRAP virtual bool empty() const { return false; }
/** @brief Reads algorithm from the file node
This is static template method of Algorithm. It's usage is following (in the case of SVM):
@code
- Ptr svm = Algorithm::read(fn);
+ cv::FileStorage fsRead("example.xml", FileStorage::READ);
+ Ptr svm = Algorithm::read(fsRead.root());
@endcode
In order to make this method work, the derived class must overwrite Algorithm::read(const
FileNode& fn) and also have static create() method without parameters
@@ -3027,7 +3139,9 @@ public:
template static Ptr<_Tp> load(const String& filename, const String& objname=String())
{
FileStorage fs(filename, FileStorage::READ);
+ CV_Assert(fs.isOpened());
FileNode fn = objname.empty() ? fs.getFirstTopLevelNode() : fs[objname];
+ if (fn.empty()) return Ptr<_Tp>();
Ptr<_Tp> obj = _Tp::create();
obj->read(fn);
return !obj->empty() ? obj : Ptr<_Tp>();
@@ -3059,6 +3173,9 @@ public:
/** Returns the algorithm string identifier.
This string is used as top level xml/yml node tag when the object is saved to a file or string. */
CV_WRAP virtual String getDefaultName() const;
+
+protected:
+ void writeFormat(FileStorage& fs) const;
};
struct Param {
@@ -3164,5 +3281,6 @@ template<> struct ParamType
#include "opencv2/core/cvstd.inl.hpp"
#include "opencv2/core/utility.hpp"
#include "opencv2/core/optim.hpp"
+#include "opencv2/core/ovx.hpp"
-#endif /*__OPENCV_CORE_HPP__*/
+#endif /*OPENCV_CORE_HPP*/
diff --git a/lib/3rdParty/OpenCV3.1/include/opencv2/core/affine.hpp b/lib/3rdParty/OpenCV3.4/include/opencv2/core/affine.hpp
similarity index 91%
rename from lib/3rdParty/OpenCV3.1/include/opencv2/core/affine.hpp
rename to lib/3rdParty/OpenCV3.4/include/opencv2/core/affine.hpp
index 2bce5b98..443097a5 100644
--- a/lib/3rdParty/OpenCV3.1/include/opencv2/core/affine.hpp
+++ b/lib/3rdParty/OpenCV3.4/include/opencv2/core/affine.hpp
@@ -41,8 +41,8 @@
//
//M*/
-#ifndef __OPENCV_CORE_AFFINE3_HPP__
-#define __OPENCV_CORE_AFFINE3_HPP__
+#ifndef OPENCV_CORE_AFFINE3_HPP
+#define OPENCV_CORE_AFFINE3_HPP
#ifdef __cplusplus
@@ -153,15 +153,24 @@ namespace cv
typedef _Tp channel_type;
enum { generic_type = 0,
- depth = DataType::depth,
channels = 16,
- fmt = DataType::fmt + ((channels - 1) << 8),
- type = CV_MAKETYPE(depth, channels)
+ fmt = traits::SafeFmt::fmt + ((channels - 1) << 8)
+#ifdef OPENCV_TRAITS_ENABLE_DEPRECATED
+ ,depth = DataType::depth
+ ,type = CV_MAKETYPE(depth, channels)
+#endif
};
typedef Vec vec_type;
};
+ namespace traits {
+ template
+ struct Depth< Affine3<_Tp> > { enum { value = Depth<_Tp>::value }; };
+ template
+ struct Type< Affine3<_Tp> > { enum { value = CV_MAKETYPE(Depth<_Tp>::value, 16) }; };
+ } // namespace
+
//! @} core
}
@@ -202,7 +211,7 @@ cv::Affine3::Affine3(const Vec3& _rvec, const Vec3& t)
template inline
cv::Affine3::Affine3(const cv::Mat& data, const Vec3& t)
{
- CV_Assert(data.type() == cv::DataType::type);
+ CV_Assert(data.type() == cv::traits::Type::value);
if (data.cols == 4 && data.rows == 4)
{
@@ -213,11 +222,13 @@ cv::Affine3::Affine3(const cv::Mat& data, const Vec3& t)
{
rotation(data(Rect(0, 0, 3, 3)));
translation(data(Rect(3, 0, 1, 3)));
- return;
+ }
+ else
+ {
+ rotation(data);
+ translation(t);
}
- rotation(data);
- translation(t);
matrix.val[12] = matrix.val[13] = matrix.val[14] = 0;
matrix.val[15] = 1;
}
@@ -241,30 +252,25 @@ void cv::Affine3::rotation(const Mat3& R)
template inline
void cv::Affine3::rotation(const Vec3& _rvec)
{
- double rx = _rvec[0], ry = _rvec[1], rz = _rvec[2];
- double theta = std::sqrt(rx*rx + ry*ry + rz*rz);
+ double theta = norm(_rvec);
if (theta < DBL_EPSILON)
rotation(Mat3::eye());
else
{
- const double I[] = { 1, 0, 0, 0, 1, 0, 0, 0, 1 };
-
double c = std::cos(theta);
double s = std::sin(theta);
double c1 = 1. - c;
double itheta = (theta != 0) ? 1./theta : 0.;
- rx *= itheta; ry *= itheta; rz *= itheta;
+ Point3_ r = _rvec*itheta;
- double rrt[] = { rx*rx, rx*ry, rx*rz, rx*ry, ry*ry, ry*rz, rx*rz, ry*rz, rz*rz };
- double _r_x_[] = { 0, -rz, ry, rz, 0, -rx, -ry, rx, 0 };
- Mat3 R;
+ Mat3 rrt( r.x*r.x, r.x*r.y, r.x*r.z, r.x*r.y, r.y*r.y, r.y*r.z, r.x*r.z, r.y*r.z, r.z*r.z );
+ Mat3 r_x( 0, -r.z, r.y, r.z, 0, -r.x, -r.y, r.x, 0 );
// R = cos(theta)*I + (1 - cos(theta))*r*rT + sin(theta)*[r_x]
// where [r_x] is [0 -rz ry; rz 0 -rx; -ry rx 0]
- for(int k = 0; k < 9; ++k)
- R.val[k] = static_cast(c*I[k] + c1*rrt[k] + s*_r_x_[k]);
+ Mat3 R = c*Mat3::eye() + c1*rrt + s*r_x;
rotation(R);
}
@@ -274,7 +280,7 @@ void cv::Affine3::rotation(const Vec3& _rvec)
template inline
void cv::Affine3::rotation(const cv::Mat& data)
{
- CV_Assert(data.type() == cv::DataType::type);
+ CV_Assert(data.type() == cv::traits::Type::value);
if (data.cols == 3 && data.rows == 3)
{
@@ -488,21 +494,21 @@ cv::Vec3d cv::operator*(const cv::Affine3d& affine, const cv::Vec3d& v)
template inline
cv::Affine3::Affine3(const Eigen::Transform& affine)
{
- cv::Mat(4, 4, cv::DataType::type, affine.matrix().data()).copyTo(matrix);
+ cv::Mat(4, 4, cv::traits::Type::value, affine.matrix().data()).copyTo(matrix);
}
template inline
cv::Affine3::Affine3(const Eigen::Transform& affine)
{
Eigen::Transform a = affine;
- cv::Mat(4, 4, cv::DataType::type, a.matrix().data()).copyTo(matrix);
+ cv::Mat(4, 4, cv::traits::Type::value, a.matrix().data()).copyTo(matrix);
}
template inline
cv::Affine3::operator Eigen::Transform() const
{
Eigen::Transform r;
- cv::Mat hdr(4, 4, cv::DataType::type, r.matrix().data());
+ cv::Mat hdr(4, 4, cv::traits::Type::value, r.matrix().data());
cv::Mat(matrix, false).copyTo(hdr);
return r;
}
@@ -519,4 +525,4 @@ cv::Affine3::operator Eigen::Transform() const
#endif /* __cplusplus */
-#endif /* __OPENCV_CORE_AFFINE3_HPP__ */
+#endif /* OPENCV_CORE_AFFINE3_HPP */
diff --git a/lib/3rdParty/OpenCV3.1/include/opencv2/core/base.hpp b/lib/3rdParty/OpenCV3.4/include/opencv2/core/base.hpp
similarity index 73%
rename from lib/3rdParty/OpenCV3.1/include/opencv2/core/base.hpp
rename to lib/3rdParty/OpenCV3.4/include/opencv2/core/base.hpp
index ed633f5d..05190927 100644
--- a/lib/3rdParty/OpenCV3.1/include/opencv2/core/base.hpp
+++ b/lib/3rdParty/OpenCV3.4/include/opencv2/core/base.hpp
@@ -42,13 +42,15 @@
//
//M*/
-#ifndef __OPENCV_CORE_BASE_HPP__
-#define __OPENCV_CORE_BASE_HPP__
+#ifndef OPENCV_CORE_BASE_HPP
+#define OPENCV_CORE_BASE_HPP
#ifndef __cplusplus
# error base.hpp header must be compiled as C++
#endif
+#include "opencv2/opencv_modules.hpp"
+
#include
#include
@@ -64,38 +66,38 @@ namespace cv
namespace Error {
//! error codes
enum Code {
- StsOk= 0, //!< everithing is ok
+ StsOk= 0, //!< everything is ok
StsBackTrace= -1, //!< pseudo error for back trace
StsError= -2, //!< unknown /unspecified error
StsInternal= -3, //!< internal error (bad state)
StsNoMem= -4, //!< insufficient memory
StsBadArg= -5, //!< function arg/param is bad
StsBadFunc= -6, //!< unsupported function
- StsNoConv= -7, //!< iter. didn't converge
+ StsNoConv= -7, //!< iteration didn't converge
StsAutoTrace= -8, //!< tracing
HeaderIsNull= -9, //!< image header is NULL
BadImageSize= -10, //!< image size is invalid
BadOffset= -11, //!< offset is invalid
BadDataPtr= -12, //!<
- BadStep= -13, //!<
+ BadStep= -13, //!< image step is wrong, this may happen for a non-continuous matrix.
BadModelOrChSeq= -14, //!<
- BadNumChannels= -15, //!<
+ BadNumChannels= -15, //!< bad number of channels, for example, some functions accept only single channel matrices.
BadNumChannel1U= -16, //!<
- BadDepth= -17, //!<
+ BadDepth= -17, //!< input image depth is not supported by the function
BadAlphaChannel= -18, //!<
- BadOrder= -19, //!<
- BadOrigin= -20, //!<
- BadAlign= -21, //!<
+ BadOrder= -19, //!< number of dimensions is out of range
+ BadOrigin= -20, //!< incorrect input origin
+ BadAlign= -21, //!< incorrect input align
BadCallBack= -22, //!<
BadTileSize= -23, //!<
- BadCOI= -24, //!<
- BadROISize= -25, //!<
+ BadCOI= -24, //!< input COI is not supported
+ BadROISize= -25, //!< incorrect input roi
MaskIsTiled= -26, //!<
StsNullPtr= -27, //!< null pointer
StsVecLengthErr= -28, //!< incorrect vector length
- StsFilterStructContentErr= -29, //!< incorr. filter structure content
- StsKernelStructContentErr= -30, //!< incorr. transform kernel content
- StsFilterOffsetErr= -31, //!< incorrect filter ofset value
+ StsFilterStructContentErr= -29, //!< incorrect filter structure content
+ StsKernelStructContentErr= -30, //!< incorrect transform kernel content
+ StsFilterOffsetErr= -31, //!< incorrect filter offset value
StsBadSize= -201, //!< the input/output structure size is incorrect
StsDivByZero= -202, //!< division by zero
StsInplaceNotSupported= -203, //!< in-place operation is not supported
@@ -111,13 +113,13 @@ enum Code {
StsNotImplemented= -213, //!< the requested function/feature is not implemented
StsBadMemBlock= -214, //!< an allocated block has been corrupted
StsAssert= -215, //!< assertion failed
- GpuNotSupported= -216,
- GpuApiCallError= -217,
- OpenGlNotSupported= -218,
- OpenGlApiCallError= -219,
- OpenCLApiCallError= -220,
+ GpuNotSupported= -216, //!< no CUDA support
+ GpuApiCallError= -217, //!< GPU API call error
+ OpenGlNotSupported= -218, //!< no OpenGL support
+ OpenGlApiCallError= -219, //!< OpenGL API call error
+ OpenCLApiCallError= -220, //!< OpenCL API call error
OpenCLDoubleNotSupported= -221,
- OpenCLInitError= -222,
+ OpenCLInitError= -222, //!< OpenCL initialization error
OpenCLNoAMDBlasFft= -223
};
} //Error
@@ -150,46 +152,57 @@ enum DecompTypes {
};
/** norm types
-- For one array:
-\f[norm = \forkthree{\|\texttt{src1}\|_{L_{\infty}} = \max _I | \texttt{src1} (I)|}{if \(\texttt{normType} = \texttt{NORM_INF}\) }
-{ \| \texttt{src1} \| _{L_1} = \sum _I | \texttt{src1} (I)|}{if \(\texttt{normType} = \texttt{NORM_L1}\) }
-{ \| \texttt{src1} \| _{L_2} = \sqrt{\sum_I \texttt{src1}(I)^2} }{if \(\texttt{normType} = \texttt{NORM_L2}\) }\f]
-- Absolute norm for two arrays
-\f[norm = \forkthree{\|\texttt{src1}-\texttt{src2}\|_{L_{\infty}} = \max _I | \texttt{src1} (I) - \texttt{src2} (I)|}{if \(\texttt{normType} = \texttt{NORM_INF}\) }
-{ \| \texttt{src1} - \texttt{src2} \| _{L_1} = \sum _I | \texttt{src1} (I) - \texttt{src2} (I)|}{if \(\texttt{normType} = \texttt{NORM_L1}\) }
-{ \| \texttt{src1} - \texttt{src2} \| _{L_2} = \sqrt{\sum_I (\texttt{src1}(I) - \texttt{src2}(I))^2} }{if \(\texttt{normType} = \texttt{NORM_L2}\) }\f]
+src1 and src2 denote input arrays.
+*/
-- Relative norm for two arrays
-\f[norm = \forkthree{\frac{\|\texttt{src1}-\texttt{src2}\|_{L_{\infty}} }{\|\texttt{src2}\|_{L_{\infty}} }}{if \(\texttt{normType} = \texttt{NORM_RELATIVE_INF}\) }
-{ \frac{\|\texttt{src1}-\texttt{src2}\|_{L_1} }{\|\texttt{src2}\|_{L_1}} }{if \(\texttt{normType} = \texttt{NORM_RELATIVE_L1}\) }
-{ \frac{\|\texttt{src1}-\texttt{src2}\|_{L_2} }{\|\texttt{src2}\|_{L_2}} }{if \(\texttt{normType} = \texttt{NORM_RELATIVE_L2}\) }\f]
-
-As example for one array consider the function \f$r(x)= \begin{pmatrix} x \\ 1-x \end{pmatrix}, x \in [-1;1]\f$.
-The \f$ L_{1}, L_{2} \f$ and \f$ L_{\infty} \f$ norm for the sample value \f$r(-1) = \begin{pmatrix} -1 \\ 2 \end{pmatrix}\f$
-is calculated as follows
-\f{align*}
- \| r(-1) \|_{L_1} &= |-1| + |2| = 3 \\
- \| r(-1) \|_{L_2} &= \sqrt{(-1)^{2} + (2)^{2}} = \sqrt{5} \\
- \| r(-1) \|_{L_\infty} &= \max(|-1|,|2|) = 2
-\f}
-and for \f$r(0.5) = \begin{pmatrix} 0.5 \\ 0.5 \end{pmatrix}\f$ the calculation is
-\f{align*}
- \| r(0.5) \|_{L_1} &= |0.5| + |0.5| = 1 \\
- \| r(0.5) \|_{L_2} &= \sqrt{(0.5)^{2} + (0.5)^{2}} = \sqrt{0.5} \\
- \| r(0.5) \|_{L_\infty} &= \max(|0.5|,|0.5|) = 0.5.
-\f}
-The following graphic shows all values for the three norm functions \f$\| r(x) \|_{L_1}, \| r(x) \|_{L_2}\f$ and \f$\| r(x) \|_{L_\infty}\f$.
-It is notable that the \f$ L_{1} \f$ norm forms the upper and the \f$ L_{\infty} \f$ norm forms the lower border for the example function \f$ r(x) \f$.
-
- */
-enum NormTypes { NORM_INF = 1,
+enum NormTypes {
+ /**
+ \f[
+ norm = \forkthree
+ {\|\texttt{src1}\|_{L_{\infty}} = \max _I | \texttt{src1} (I)|}{if \(\texttt{normType} = \texttt{NORM_INF}\) }
+ {\|\texttt{src1}-\texttt{src2}\|_{L_{\infty}} = \max _I | \texttt{src1} (I) - \texttt{src2} (I)|}{if \(\texttt{normType} = \texttt{NORM_INF}\) }
+ {\frac{\|\texttt{src1}-\texttt{src2}\|_{L_{\infty}} }{\|\texttt{src2}\|_{L_{\infty}} }}{if \(\texttt{normType} = \texttt{NORM_RELATIVE | NORM_INF}\) }
+ \f]
+ */
+ NORM_INF = 1,
+ /**
+ \f[
+ norm = \forkthree
+ {\| \texttt{src1} \| _{L_1} = \sum _I | \texttt{src1} (I)|}{if \(\texttt{normType} = \texttt{NORM_L1}\)}
+ { \| \texttt{src1} - \texttt{src2} \| _{L_1} = \sum _I | \texttt{src1} (I) - \texttt{src2} (I)|}{if \(\texttt{normType} = \texttt{NORM_L1}\) }
+ { \frac{\|\texttt{src1}-\texttt{src2}\|_{L_1} }{\|\texttt{src2}\|_{L_1}} }{if \(\texttt{normType} = \texttt{NORM_RELATIVE | NORM_L1}\) }
+ \f]*/
NORM_L1 = 2,
+ /**
+ \f[
+ norm = \forkthree
+ { \| \texttt{src1} \| _{L_2} = \sqrt{\sum_I \texttt{src1}(I)^2} }{if \(\texttt{normType} = \texttt{NORM_L2}\) }
+ { \| \texttt{src1} - \texttt{src2} \| _{L_2} = \sqrt{\sum_I (\texttt{src1}(I) - \texttt{src2}(I))^2} }{if \(\texttt{normType} = \texttt{NORM_L2}\) }
+ { \frac{\|\texttt{src1}-\texttt{src2}\|_{L_2} }{\|\texttt{src2}\|_{L_2}} }{if \(\texttt{normType} = \texttt{NORM_RELATIVE | NORM_L2}\) }
+ \f]
+ */
NORM_L2 = 4,
+ /**
+ \f[
+ norm = \forkthree
+ { \| \texttt{src1} \| _{L_2} ^{2} = \sum_I \texttt{src1}(I)^2} {if \(\texttt{normType} = \texttt{NORM_L2SQR}\)}
+ { \| \texttt{src1} - \texttt{src2} \| _{L_2} ^{2} = \sum_I (\texttt{src1}(I) - \texttt{src2}(I))^2 }{if \(\texttt{normType} = \texttt{NORM_L2SQR}\) }
+ { \left(\frac{\|\texttt{src1}-\texttt{src2}\|_{L_2} }{\|\texttt{src2}\|_{L_2}}\right)^2 }{if \(\texttt{normType} = \texttt{NORM_RELATIVE | NORM_L2}\) }
+ \f]
+ */
NORM_L2SQR = 5,
+ /**
+ In the case of one input array, calculates the Hamming distance of the array from zero,
+ In the case of two input arrays, calculates the Hamming distance between the arrays.
+ */
NORM_HAMMING = 6,
+ /**
+ Similar to NORM_HAMMING, but in the calculation, each two bits of the input sequence will
+ be added and treated as a single bit to be used in the same calculation as NORM_HAMMING.
+ */
NORM_HAMMING2 = 7,
- NORM_TYPE_MASK = 7,
+ NORM_TYPE_MASK = 7, //!< bit-mask which can be used to separate norm type from norm flags
NORM_RELATIVE = 8, //!< flag
NORM_MINMAX = 32 //!< flag
};
@@ -237,6 +250,10 @@ enum DftFlags {
into a real array and inverse transformation is executed, the function treats the input as a
packed complex-conjugate symmetrical array, and the output will also be a real array). */
DFT_REAL_OUTPUT = 32,
+ /** specifies that input is complex input. If this flag is set, the input must have 2 channels.
+ On the other hand, for backwards compatibility reason, if input has 2 channels, input is
+ already considered complex. */
+ DFT_COMPLEX_INPUT = 64,
/** performs an inverse 1D or 2D transform instead of the default forward transform. */
DCT_INVERSE = DFT_INVERSE,
/** performs a forward or inverse transform of every individual row of the input
@@ -325,7 +342,23 @@ enum BorderTypes {
#define CV_SUPPRESS_DEPRECATED_START
#define CV_SUPPRESS_DEPRECATED_END
#endif
+
#define CV_UNUSED(name) (void)name
+
+#if defined __GNUC__ && !defined __EXCEPTIONS
+#define CV_TRY
+#define CV_CATCH(A, B) for (A B; false; )
+#define CV_CATCH_ALL if (false)
+#define CV_THROW(A) abort()
+#define CV_RETHROW() abort()
+#else
+#define CV_TRY try
+#define CV_CATCH(A, B) catch(const A & B)
+#define CV_CATCH_ALL catch(...)
+#define CV_THROW(A) throw A
+#define CV_RETHROW() throw
+#endif
+
//! @endcond
/*! @brief Signals an error and raises the exception.
@@ -336,8 +369,8 @@ It is possible to alternate error processing by using redirectError().
@param _code - error code (Error::Code)
@param _err - error description
@param _func - function name. Available only when the compiler supports getting it
-@param _file - source file name where the error has occured
-@param _line - line number in the source file where the error has occured
+@param _file - source file name where the error has occurred
+@param _line - line number in the source file where the error has occurred
@see CV_Error, CV_Error_, CV_ErrorNoReturn, CV_ErrorNoReturn_, CV_Assert, CV_DbgAssert
*/
CV_EXPORTS void error(int _code, const String& _err, const char* _func, const char* _file, int _line);
@@ -375,6 +408,17 @@ CV_INLINE CV_NORETURN void errorNoReturn(int _code, const String& _err, const ch
#define CV_Func ""
#endif
+#ifdef CV_STATIC_ANALYSIS
+// In practice, some macro are not processed correctly (noreturn is not detected).
+// We need to use simplified definition for them.
+#define CV_Error(...) do { abort(); } while (0)
+#define CV_Error_(...) do { abort(); } while (0)
+#define CV_Assert(cond) do { if (!(cond)) abort(); } while (0)
+#define CV_ErrorNoReturn(...) do { abort(); } while (0)
+#define CV_ErrorNoReturn_(...) do { abort(); } while (0)
+
+#else // CV_STATIC_ANALYSIS
+
/** @brief Call the error handler.
Currently, the error handler prints the error code and the error message to the standard
@@ -407,7 +451,22 @@ The macros CV_Assert (and CV_DbgAssert(expr)) evaluate the specified expression.
raise an error (see cv::error). The macro CV_Assert checks the condition in both Debug and Release
configurations while CV_DbgAssert is only retained in the Debug configuration.
*/
-#define CV_Assert( expr ) if(!!(expr)) ; else cv::error( cv::Error::StsAssert, #expr, CV_Func, __FILE__, __LINE__ )
+
+#define CV_VA_NUM_ARGS_HELPER(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10, N, ...) N
+#define CV_VA_NUM_ARGS(...) CV_VA_NUM_ARGS_HELPER(__VA_ARGS__, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0)
+
+#define CV_Assert_1( expr ) if(!!(expr)) ; else cv::error( cv::Error::StsAssert, #expr, CV_Func, __FILE__, __LINE__ )
+#define CV_Assert_2( expr1, expr2 ) CV_Assert_1(expr1); CV_Assert_1(expr2)
+#define CV_Assert_3( expr1, expr2, expr3 ) CV_Assert_2(expr1, expr2); CV_Assert_1(expr3)
+#define CV_Assert_4( expr1, expr2, expr3, expr4 ) CV_Assert_3(expr1, expr2, expr3); CV_Assert_1(expr4)
+#define CV_Assert_5( expr1, expr2, expr3, expr4, expr5 ) CV_Assert_4(expr1, expr2, expr3, expr4); CV_Assert_1(expr5)
+#define CV_Assert_6( expr1, expr2, expr3, expr4, expr5, expr6 ) CV_Assert_5(expr1, expr2, expr3, expr4, expr5); CV_Assert_1(expr6)
+#define CV_Assert_7( expr1, expr2, expr3, expr4, expr5, expr6, expr7 ) CV_Assert_6(expr1, expr2, expr3, expr4, expr5, expr6 ); CV_Assert_1(expr7)
+#define CV_Assert_8( expr1, expr2, expr3, expr4, expr5, expr6, expr7, expr8 ) CV_Assert_7(expr1, expr2, expr3, expr4, expr5, expr6, expr7 ); CV_Assert_1(expr8)
+#define CV_Assert_9( expr1, expr2, expr3, expr4, expr5, expr6, expr7, expr8, expr9 ) CV_Assert_8(expr1, expr2, expr3, expr4, expr5, expr6, expr7, expr8 ); CV_Assert_1(expr9)
+#define CV_Assert_10( expr1, expr2, expr3, expr4, expr5, expr6, expr7, expr8, expr9, expr10 ) CV_Assert_9(expr1, expr2, expr3, expr4, expr5, expr6, expr7, expr8, expr9 ); CV_Assert_1(expr10)
+
+#define CV_Assert(...) CVAUX_CONCAT(CV_Assert_, CV_VA_NUM_ARGS(__VA_ARGS__)) (__VA_ARGS__)
/** same as CV_Error(code,msg), but does not return */
#define CV_ErrorNoReturn( code, msg ) cv::errorNoReturn( code, msg, CV_Func, __FILE__, __LINE__ )
@@ -415,6 +474,8 @@ configurations while CV_DbgAssert is only retained in the Debug configuration.
/** same as CV_Error_(code,args), but does not return */
#define CV_ErrorNoReturn_( code, args ) cv::errorNoReturn( code, cv::format args, CV_Func, __FILE__, __LINE__ )
+#endif // CV_STATIC_ANALYSIS
+
/** replaced with CV_Assert(expr) in Debug configuration */
#ifdef _DEBUG
# define CV_DbgAssert(expr) CV_Assert(expr)
@@ -665,13 +726,23 @@ namespace cudev
namespace ipp
{
-CV_EXPORTS int getIppFeatures();
-CV_EXPORTS void setIppStatus(int status, const char * const funcname = NULL, const char * const filename = NULL,
+#if OPENCV_ABI_COMPATIBILITY > 300
+CV_EXPORTS unsigned long long getIppFeatures();
+#else
+CV_EXPORTS int getIppFeatures();
+#endif
+CV_EXPORTS void setIppStatus(int status, const char * const funcname = NULL, const char * const filename = NULL,
int line = 0);
-CV_EXPORTS int getIppStatus();
-CV_EXPORTS String getIppErrorLocation();
-CV_EXPORTS bool useIPP();
-CV_EXPORTS void setUseIPP(bool flag);
+CV_EXPORTS int getIppStatus();
+CV_EXPORTS String getIppErrorLocation();
+CV_EXPORTS_W bool useIPP();
+CV_EXPORTS_W void setUseIPP(bool flag);
+CV_EXPORTS_W String getIppVersion();
+
+// IPP Not-Exact mode. This function may force use of IPP then both IPP and OpenCV provide proper results
+// but have internal accuracy differences which have to much direct or indirect impact on accuracy tests.
+CV_EXPORTS_W bool useIPP_NE();
+CV_EXPORTS_W void setUseIPP_NE(bool flag);
} // ipp
@@ -685,5 +756,6 @@ CV_EXPORTS void setUseIPP(bool flag);
} // cv
#include "opencv2/core/neon_utils.hpp"
+#include "opencv2/core/vsx_utils.hpp"
-#endif //__OPENCV_CORE_BASE_HPP__
+#endif //OPENCV_CORE_BASE_HPP
diff --git a/lib/3rdParty/OpenCV3.1/include/opencv2/core/bufferpool.hpp b/lib/3rdParty/OpenCV3.4/include/opencv2/core/bufferpool.hpp
similarity index 72%
rename from lib/3rdParty/OpenCV3.1/include/opencv2/core/bufferpool.hpp
rename to lib/3rdParty/OpenCV3.4/include/opencv2/core/bufferpool.hpp
index 76df2d29..4698e5da 100644
--- a/lib/3rdParty/OpenCV3.1/include/opencv2/core/bufferpool.hpp
+++ b/lib/3rdParty/OpenCV3.4/include/opencv2/core/bufferpool.hpp
@@ -4,8 +4,13 @@
//
// Copyright (C) 2014, Advanced Micro Devices, Inc., all rights reserved.
-#ifndef __OPENCV_CORE_BUFFER_POOL_HPP__
-#define __OPENCV_CORE_BUFFER_POOL_HPP__
+#ifndef OPENCV_CORE_BUFFER_POOL_HPP
+#define OPENCV_CORE_BUFFER_POOL_HPP
+
+#ifdef _MSC_VER
+#pragma warning(push)
+#pragma warning(disable: 4265)
+#endif
namespace cv
{
@@ -28,4 +33,8 @@ public:
}
-#endif // __OPENCV_CORE_BUFFER_POOL_HPP__
+#ifdef _MSC_VER
+#pragma warning(pop)
+#endif
+
+#endif // OPENCV_CORE_BUFFER_POOL_HPP
diff --git a/lib/3rdParty/OpenCV3.1/include/opencv2/core/core.hpp b/lib/3rdParty/OpenCV3.4/include/opencv2/core/core.hpp
similarity index 100%
rename from lib/3rdParty/OpenCV3.1/include/opencv2/core/core.hpp
rename to lib/3rdParty/OpenCV3.4/include/opencv2/core/core.hpp
diff --git a/lib/3rdParty/OpenCV3.1/include/opencv2/core/core_c.h b/lib/3rdParty/OpenCV3.4/include/opencv2/core/core_c.h
similarity index 98%
rename from lib/3rdParty/OpenCV3.1/include/opencv2/core/core_c.h
rename to lib/3rdParty/OpenCV3.4/include/opencv2/core/core_c.h
index a0ed6326..754af2fc 100644
--- a/lib/3rdParty/OpenCV3.1/include/opencv2/core/core_c.h
+++ b/lib/3rdParty/OpenCV3.4/include/opencv2/core/core_c.h
@@ -42,8 +42,8 @@
//M*/
-#ifndef __OPENCV_CORE_C_H__
-#define __OPENCV_CORE_C_H__
+#ifndef OPENCV_CORE_C_H
+#define OPENCV_CORE_C_H
#include "opencv2/core/types_c.h"
@@ -359,7 +359,7 @@ CVAPI(CvMat*) cvGetSubRect( const CvArr* arr, CvMat* submat, CvRect rect );
/** @brief Returns array row or row span.
-The functions return the header, corresponding to a specified row/row span of the input array.
+The function returns the header, corresponding to a specified row/row span of the input array.
cvGetRow(arr, submat, row) is a shortcut for cvGetRows(arr, submat, row, row+1).
@param arr Input array
@param submat Pointer to the resulting sub-array header
@@ -385,7 +385,7 @@ CV_INLINE CvMat* cvGetRow( const CvArr* arr, CvMat* submat, int row )
/** @brief Returns one of more array columns.
-The functions return the header, corresponding to a specified column span of the input array. That
+The function returns the header, corresponding to a specified column span of the input array. That
is, no data is copied. Therefore, any modifications of the submatrix will affect the original array.
If you need to copy the columns, use cvCloneMat. cvGetCol(arr, submat, col) is a shortcut for
@@ -1976,8 +1976,16 @@ CVAPI(void) cvSetIPLAllocators( Cv_iplCreateImageHeader create_header,
The function opens file storage for reading or writing data. In the latter case, a new file is
created or an existing file is rewritten. The type of the read or written file is determined by the
-filename extension: .xml for XML and .yml or .yaml for YAML. The function returns a pointer to the
-CvFileStorage structure. If the file cannot be opened then the function returns NULL.
+filename extension: .xml for XML, .yml or .yaml for YAML and .json for JSON.
+
+At the same time, it also supports adding parameters like "example.xml?base64". The three ways
+are the same:
+@snippet samples/cpp/filestorage_base64.cpp suffix_in_file_name
+@snippet samples/cpp/filestorage_base64.cpp flag_write_base64
+@snippet samples/cpp/filestorage_base64.cpp flag_write_and_flag_base64
+
+The function returns a pointer to the CvFileStorage structure.
+If the file cannot be opened then the function returns NULL.
@param filename Name of the file associated with the storage
@param memstorage Memory storage used for temporary data and for
: storing dynamic structures, such as CvSeq or CvGraph . If it is NULL, a temporary memory
@@ -1985,6 +1993,7 @@ CvFileStorage structure. If the file cannot be opened then the function returns
@param flags Can be one of the following:
> - **CV_STORAGE_READ** the storage is open for reading
> - **CV_STORAGE_WRITE** the storage is open for writing
+ (use **CV_STORAGE_WRITE | CV_STORAGE_WRITE_BASE64** to write rawdata in Base64)
@param encoding
*/
CVAPI(CvFileStorage*) cvOpenFileStorage( const char* filename, CvMemStorage* memstorage,
@@ -2022,7 +2031,8 @@ One and only one of the two above flags must be specified
@param type_name Optional parameter - the object type name. In
case of XML it is written as a type_id attribute of the structure opening tag. In the case of
YAML it is written after a colon following the structure name (see the example in
- CvFileStorage description). Mainly it is used with user objects. When the storage is read, the
+ CvFileStorage description). In case of JSON it is written as a name/value pair.
+ Mainly it is used with user objects. When the storage is read, the
encoded type name is used to determine the object type (see CvTypeInfo and cvFindType ).
@param attributes This parameter is not used in the current implementation
*/
@@ -2162,7 +2172,7 @@ the file with multiple streams looks like this:
@endcode
The YAML file will look like this:
@code{.yaml}
- %YAML:1.0
+ %YAML 1.0
# stream #1 data
...
---
@@ -2187,6 +2197,28 @@ to a sequence rather than a map.
CVAPI(void) cvWriteRawData( CvFileStorage* fs, const void* src,
int len, const char* dt );
+/** @brief Writes multiple numbers in Base64.
+
+If either CV_STORAGE_WRITE_BASE64 or cv::FileStorage::WRITE_BASE64 is used,
+this function will be the same as cvWriteRawData. If neither, the main
+difference is that it outputs a sequence in Base64 encoding rather than
+in plain text.
+
+This function can only be used to write a sequence with a type "binary".
+
+Consider the following two examples where their output is the same:
+@snippet samples/cpp/filestorage_base64.cpp without_base64_flag
+and
+@snippet samples/cpp/filestorage_base64.cpp with_write_base64_flag
+
+@param fs File storage
+@param src Pointer to the written array
+@param len Number of the array elements to write
+@param dt Specification of each array element, see @ref format_spec "format specification"
+*/
+CVAPI(void) cvWriteRawDataBase64( CvFileStorage* fs, const void* src,
+ int len, const char* dt );
+
/** @brief Returns a unique pointer for a given name.
The function returns a unique pointer for each particular file node name. This pointer can be then
@@ -2468,7 +2500,7 @@ CVAPI(void) cvReadRawData( const CvFileStorage* fs, const CvFileNode* src,
/** @brief Writes a file node to another file storage.
The function writes a copy of a file node to file storage. Possible applications of the function are
-merging several file storages into one and conversion between XML and YAML formats.
+merging several file storages into one and conversion between XML, YAML and JSON formats.
@param fs Destination file storage
@param new_node_name New name of the file node in the destination file storage. To keep the
existing name, use cvcvGetFileNodeName
@@ -2622,7 +2654,7 @@ CVAPI(int) cvGetErrMode( void );
/** Sets error processing mode, returns previously used mode */
CVAPI(int) cvSetErrMode( int mode );
-/** Sets error status and performs some additonal actions (displaying message box,
+/** Sets error status and performs some additional actions (displaying message box,
writing message to stderr, terminating application etc.)
depending on the current error mode */
CVAPI(void) cvError( int status, const char* func_name,
@@ -2631,7 +2663,7 @@ CVAPI(void) cvError( int status, const char* func_name,
/** Retrieves textual description of the error given its code */
CVAPI(const char*) cvErrorStr( int status );
-/** Retrieves detailed information about the last error occured */
+/** Retrieves detailed information about the last error occurred */
CVAPI(int) cvGetErrInfo( const char** errcode_desc, const char** description,
const char** filename, int* line );
diff --git a/lib/3rdParty/OpenCV3.1/include/opencv2/core/cuda.hpp b/lib/3rdParty/OpenCV3.4/include/opencv2/core/cuda.hpp
similarity index 93%
rename from lib/3rdParty/OpenCV3.1/include/opencv2/core/cuda.hpp
rename to lib/3rdParty/OpenCV3.4/include/opencv2/core/cuda.hpp
index 64bc53ef..6dca2723 100644
--- a/lib/3rdParty/OpenCV3.1/include/opencv2/core/cuda.hpp
+++ b/lib/3rdParty/OpenCV3.4/include/opencv2/core/cuda.hpp
@@ -41,8 +41,8 @@
//
//M*/
-#ifndef __OPENCV_CORE_CUDA_HPP__
-#define __OPENCV_CORE_CUDA_HPP__
+#ifndef OPENCV_CORE_CUDA_HPP
+#define OPENCV_CORE_CUDA_HPP
#ifndef __cplusplus
# error cuda.hpp header must be compiled as C++
@@ -327,6 +327,34 @@ The function does not reallocate memory if the matrix has proper attributes alre
*/
CV_EXPORTS void ensureSizeIsEnough(int rows, int cols, int type, OutputArray arr);
+/** @brief BufferPool for use with CUDA streams
+
+ * BufferPool utilizes cuda::Stream's allocator to create new buffers. It is
+ * particularly useful when BufferPoolUsage is set to true, or a custom
+ * allocator is specified for the cuda::Stream, and you want to implement your
+ * own stream based functions utilizing the same underlying GPU memory
+ * management.
+ */
+class CV_EXPORTS BufferPool
+{
+public:
+
+ //! Gets the BufferPool for the given stream.
+ explicit BufferPool(Stream& stream);
+
+ //! Allocates a new GpuMat of given size and type.
+ GpuMat getBuffer(int rows, int cols, int type);
+
+ //! Allocates a new GpuMat of given size and type.
+ GpuMat getBuffer(Size size, int type) { return getBuffer(size.height, size.width, type); }
+
+ //! Returns the allocator associated with the stream.
+ Ptr getAllocator() const { return allocator_; }
+
+private:
+ Ptr allocator_;
+};
+
//! BufferPool management (must be called before Stream creation)
CV_EXPORTS void setBufferPoolUsage(bool on);
CV_EXPORTS void setBufferPoolConfig(int deviceId, size_t stackSize, int stackCount);
@@ -447,7 +475,26 @@ CV_EXPORTS void unregisterPageLocked(Mat& m);
functions use the constant GPU memory, and next call may update the memory before the previous one
has been finished. But calling different operations asynchronously is safe because each operation
has its own constant buffer. Memory copy/upload/download/set operations to the buffers you hold are
-also safe. :
+also safe.
+
+@note The Stream class is not thread-safe. Please use different Stream objects for different CPU threads.
+
+@code
+void thread1()
+{
+ cv::cuda::Stream stream1;
+ cv::cuda::func1(..., stream1);
+}
+
+void thread2()
+{
+ cv::cuda::Stream stream2;
+ cv::cuda::func2(..., stream2);
+}
+@endcode
+
+@note By default all CUDA routines are launched in Stream::Null() object, if the stream is not specified by user.
+In multi-threading environment the stream objects must be passed explicitly (see previous note).
*/
class CV_EXPORTS Stream
{
@@ -460,6 +507,9 @@ public:
//! creates a new asynchronous stream
Stream();
+ //! creates a new asynchronous stream with custom allocator
+ Stream(const Ptr& allocator);
+
/** @brief Returns true if the current stream queue is finished. Otherwise, it returns false.
*/
bool queryIfComplete() const;
@@ -545,7 +595,8 @@ private:
/** @brief Returns the number of installed CUDA-enabled devices.
Use this function before any other CUDA functions calls. If OpenCV is compiled without CUDA support,
-this function returns 0.
+this function returns 0. If the CUDA driver is not installed, or is incompatible, this function
+returns -1.
*/
CV_EXPORTS int getCudaEnabledDeviceCount();
@@ -836,6 +887,15 @@ private:
CV_EXPORTS void printCudaDeviceInfo(int device);
CV_EXPORTS void printShortCudaDeviceInfo(int device);
+/** @brief Converts an array to half precision floating number.
+
+@param _src input array.
+@param _dst output array.
+@param stream Stream for the asynchronous version.
+@sa convertFp16
+*/
+CV_EXPORTS void convertFp16(InputArray _src, OutputArray _dst, Stream& stream = Stream::Null());
+
//! @} cudacore_init
}} // namespace cv { namespace cuda {
@@ -843,4 +903,4 @@ CV_EXPORTS void printShortCudaDeviceInfo(int device);
#include "opencv2/core/cuda.inl.hpp"
-#endif /* __OPENCV_CORE_CUDA_HPP__ */
+#endif /* OPENCV_CORE_CUDA_HPP */
diff --git a/lib/3rdParty/OpenCV3.1/include/opencv2/core/cuda.inl.hpp b/lib/3rdParty/OpenCV3.4/include/opencv2/core/cuda.inl.hpp
similarity index 99%
rename from lib/3rdParty/OpenCV3.1/include/opencv2/core/cuda.inl.hpp
rename to lib/3rdParty/OpenCV3.4/include/opencv2/core/cuda.inl.hpp
index 01dc6d7c..35ae2e49 100644
--- a/lib/3rdParty/OpenCV3.1/include/opencv2/core/cuda.inl.hpp
+++ b/lib/3rdParty/OpenCV3.4/include/opencv2/core/cuda.inl.hpp
@@ -41,8 +41,8 @@
//
//M*/
-#ifndef __OPENCV_CORE_CUDAINL_HPP__
-#define __OPENCV_CORE_CUDAINL_HPP__
+#ifndef OPENCV_CORE_CUDAINL_HPP
+#define OPENCV_CORE_CUDAINL_HPP
#include "opencv2/core/cuda.hpp"
@@ -628,4 +628,4 @@ Mat::Mat(const cuda::GpuMat& m)
//! @endcond
-#endif // __OPENCV_CORE_CUDAINL_HPP__
+#endif // OPENCV_CORE_CUDAINL_HPP
diff --git a/lib/3rdParty/OpenCV3.1/include/opencv2/core/cuda_stream_accessor.hpp b/lib/3rdParty/OpenCV3.4/include/opencv2/core/cuda_stream_accessor.hpp
similarity index 95%
rename from lib/3rdParty/OpenCV3.1/include/opencv2/core/cuda_stream_accessor.hpp
rename to lib/3rdParty/OpenCV3.4/include/opencv2/core/cuda_stream_accessor.hpp
index 0f8ee9b2..deaf356f 100644
--- a/lib/3rdParty/OpenCV3.1/include/opencv2/core/cuda_stream_accessor.hpp
+++ b/lib/3rdParty/OpenCV3.4/include/opencv2/core/cuda_stream_accessor.hpp
@@ -40,8 +40,8 @@
//
//M*/
-#ifndef __OPENCV_CORE_CUDA_STREAM_ACCESSOR_HPP__
-#define __OPENCV_CORE_CUDA_STREAM_ACCESSOR_HPP__
+#ifndef OPENCV_CORE_CUDA_STREAM_ACCESSOR_HPP
+#define OPENCV_CORE_CUDA_STREAM_ACCESSOR_HPP
#ifndef __cplusplus
# error cuda_stream_accessor.hpp header must be compiled as C++
@@ -83,4 +83,4 @@ namespace cv
}
}
-#endif /* __OPENCV_CORE_CUDA_STREAM_ACCESSOR_HPP__ */
+#endif /* OPENCV_CORE_CUDA_STREAM_ACCESSOR_HPP */
diff --git a/lib/3rdParty/OpenCV3.1/include/opencv2/core/cuda_types.hpp b/lib/3rdParty/OpenCV3.4/include/opencv2/core/cuda_types.hpp
similarity index 97%
rename from lib/3rdParty/OpenCV3.1/include/opencv2/core/cuda_types.hpp
rename to lib/3rdParty/OpenCV3.4/include/opencv2/core/cuda_types.hpp
index 8df816e8..f13a8474 100644
--- a/lib/3rdParty/OpenCV3.1/include/opencv2/core/cuda_types.hpp
+++ b/lib/3rdParty/OpenCV3.4/include/opencv2/core/cuda_types.hpp
@@ -40,8 +40,8 @@
//
//M*/
-#ifndef __OPENCV_CORE_CUDA_TYPES_HPP__
-#define __OPENCV_CORE_CUDA_TYPES_HPP__
+#ifndef OPENCV_CORE_CUDA_TYPES_HPP
+#define OPENCV_CORE_CUDA_TYPES_HPP
#ifndef __cplusplus
# error cuda_types.hpp header must be compiled as C++
@@ -132,4 +132,4 @@ namespace cv
//! @endcond
-#endif /* __OPENCV_CORE_CUDA_TYPES_HPP__ */
+#endif /* OPENCV_CORE_CUDA_TYPES_HPP */
diff --git a/lib/3rdParty/OpenCV3.4/include/opencv2/core/cv_cpu_dispatch.h b/lib/3rdParty/OpenCV3.4/include/opencv2/core/cv_cpu_dispatch.h
new file mode 100644
index 00000000..5261a414
--- /dev/null
+++ b/lib/3rdParty/OpenCV3.4/include/opencv2/core/cv_cpu_dispatch.h
@@ -0,0 +1,228 @@
+// This file is part of OpenCV project.
+// It is subject to the license terms in the LICENSE file found in the top-level directory
+// of this distribution and at http://opencv.org/license.html.
+
+#if defined __OPENCV_BUILD \
+
+#include "cv_cpu_config.h"
+#include "cv_cpu_helper.h"
+
+#ifdef CV_CPU_DISPATCH_MODE
+#define CV_CPU_OPTIMIZATION_NAMESPACE __CV_CAT(opt_, CV_CPU_DISPATCH_MODE)
+#define CV_CPU_OPTIMIZATION_NAMESPACE_BEGIN namespace __CV_CAT(opt_, CV_CPU_DISPATCH_MODE) {
+#define CV_CPU_OPTIMIZATION_NAMESPACE_END }
+#else
+#define CV_CPU_OPTIMIZATION_NAMESPACE cpu_baseline
+#define CV_CPU_OPTIMIZATION_NAMESPACE_BEGIN namespace cpu_baseline {
+#define CV_CPU_OPTIMIZATION_NAMESPACE_END }
+#endif
+
+
+#define __CV_CPU_DISPATCH_CHAIN_END(fn, args, mode, ...) /* done */
+#define __CV_CPU_DISPATCH(fn, args, mode, ...) __CV_EXPAND(__CV_CPU_DISPATCH_CHAIN_ ## mode(fn, args, __VA_ARGS__))
+#define __CV_CPU_DISPATCH_EXPAND(fn, args, ...) __CV_EXPAND(__CV_CPU_DISPATCH(fn, args, __VA_ARGS__))
+#define CV_CPU_DISPATCH(fn, args, ...) __CV_CPU_DISPATCH_EXPAND(fn, args, __VA_ARGS__, END) // expand macros
+
+
+#if defined CV_ENABLE_INTRINSICS \
+ && !defined CV_DISABLE_OPTIMIZATION \
+ && !defined __CUDACC__ /* do not include SSE/AVX/NEON headers for NVCC compiler */ \
+
+#ifdef CV_CPU_COMPILE_SSE2
+# include
+# define CV_MMX 1
+# define CV_SSE 1
+# define CV_SSE2 1
+#endif
+#ifdef CV_CPU_COMPILE_SSE3
+# include
+# define CV_SSE3 1
+#endif
+#ifdef CV_CPU_COMPILE_SSSE3
+# include
+# define CV_SSSE3 1
+#endif
+#ifdef CV_CPU_COMPILE_SSE4_1
+# include
+# define CV_SSE4_1 1
+#endif
+#ifdef CV_CPU_COMPILE_SSE4_2
+# include
+# define CV_SSE4_2 1
+#endif
+#ifdef CV_CPU_COMPILE_POPCNT
+# ifdef _MSC_VER
+# include
+# if defined(_M_X64)
+# define CV_POPCNT_U64 _mm_popcnt_u64
+# endif
+# define CV_POPCNT_U32 _mm_popcnt_u32
+# else
+# include
+# if defined(__x86_64__)
+# define CV_POPCNT_U64 __builtin_popcountll
+# endif
+# define CV_POPCNT_U32 __builtin_popcount
+# endif
+# define CV_POPCNT 1
+#endif
+#ifdef CV_CPU_COMPILE_AVX
+# include
+# define CV_AVX 1
+#endif
+#ifdef CV_CPU_COMPILE_FP16
+# if defined(__arm__) || defined(__aarch64__) || defined(_M_ARM)
+# include
+# else
+# include
+# endif
+# define CV_FP16 1
+#endif
+#ifdef CV_CPU_COMPILE_AVX2
+# include
+# define CV_AVX2 1
+#endif
+#ifdef CV_CPU_COMPILE_FMA3
+# define CV_FMA3 1
+#endif
+
+#if defined _WIN32 && defined(_M_ARM)
+# include
+# include
+# define CV_NEON 1
+#elif defined(__ARM_NEON__) || (defined (__ARM_NEON) && defined(__aarch64__))
+# include
+# define CV_NEON 1
+#endif
+
+#if defined(__ARM_NEON__) || defined(__aarch64__)
+# include
+#endif
+
+#if defined(__VSX__) && defined(__PPC64__) && defined(__LITTLE_ENDIAN__)
+# include
+# undef vector
+# undef pixel
+# undef bool
+# define CV_VSX 1
+#endif
+
+#endif // CV_ENABLE_INTRINSICS && !CV_DISABLE_OPTIMIZATION && !__CUDACC__
+
+#if defined CV_CPU_COMPILE_AVX && !defined CV_CPU_BASELINE_COMPILE_AVX
+struct VZeroUpperGuard {
+#ifdef __GNUC__
+ __attribute__((always_inline))
+#endif
+ inline ~VZeroUpperGuard() { _mm256_zeroupper(); }
+};
+#define __CV_AVX_GUARD VZeroUpperGuard __vzeroupper_guard; (void)__vzeroupper_guard;
+#endif
+
+#ifdef __CV_AVX_GUARD
+#define CV_AVX_GUARD __CV_AVX_GUARD
+#else
+#define CV_AVX_GUARD
+#endif
+
+#endif // __OPENCV_BUILD
+
+
+
+#if !defined __OPENCV_BUILD /* Compatibility code */ \
+ && !defined __CUDACC__ /* do not include SSE/AVX/NEON headers for NVCC compiler */
+#if defined __SSE2__ || defined _M_X64 || (defined _M_IX86_FP && _M_IX86_FP >= 2)
+# include
+# define CV_MMX 1
+# define CV_SSE 1
+# define CV_SSE2 1
+#elif defined _WIN32 && defined(_M_ARM)
+# include
+# include
+# define CV_NEON 1
+#elif defined(__ARM_NEON__) || (defined (__ARM_NEON) && defined(__aarch64__))
+# include
+# define CV_NEON 1
+#elif defined(__VSX__) && defined(__PPC64__) && defined(__LITTLE_ENDIAN__)
+# include
+# undef vector
+# undef pixel
+# undef bool
+# define CV_VSX 1
+#endif
+
+#endif // !__OPENCV_BUILD && !__CUDACC (Compatibility code)
+
+
+
+#ifndef CV_MMX
+# define CV_MMX 0
+#endif
+#ifndef CV_SSE
+# define CV_SSE 0
+#endif
+#ifndef CV_SSE2
+# define CV_SSE2 0
+#endif
+#ifndef CV_SSE3
+# define CV_SSE3 0
+#endif
+#ifndef CV_SSSE3
+# define CV_SSSE3 0
+#endif
+#ifndef CV_SSE4_1
+# define CV_SSE4_1 0
+#endif
+#ifndef CV_SSE4_2
+# define CV_SSE4_2 0
+#endif
+#ifndef CV_POPCNT
+# define CV_POPCNT 0
+#endif
+#ifndef CV_AVX
+# define CV_AVX 0
+#endif
+#ifndef CV_FP16
+# define CV_FP16 0
+#endif
+#ifndef CV_AVX2
+# define CV_AVX2 0
+#endif
+#ifndef CV_FMA3
+# define CV_FMA3 0
+#endif
+#ifndef CV_AVX_512F
+# define CV_AVX_512F 0
+#endif
+#ifndef CV_AVX_512BW
+# define CV_AVX_512BW 0
+#endif
+#ifndef CV_AVX_512CD
+# define CV_AVX_512CD 0
+#endif
+#ifndef CV_AVX_512DQ
+# define CV_AVX_512DQ 0
+#endif
+#ifndef CV_AVX_512ER
+# define CV_AVX_512ER 0
+#endif
+#ifndef CV_AVX_512IFMA512
+# define CV_AVX_512IFMA512 0
+#endif
+#ifndef CV_AVX_512PF
+# define CV_AVX_512PF 0
+#endif
+#ifndef CV_AVX_512VBMI
+# define CV_AVX_512VBMI 0
+#endif
+#ifndef CV_AVX_512VL
+# define CV_AVX_512VL 0
+#endif
+
+#ifndef CV_NEON
+# define CV_NEON 0
+#endif
+
+#ifndef CV_VSX
+# define CV_VSX 0
+#endif
diff --git a/lib/3rdParty/OpenCV3.4/include/opencv2/core/cv_cpu_helper.h b/lib/3rdParty/OpenCV3.4/include/opencv2/core/cv_cpu_helper.h
new file mode 100644
index 00000000..66a473f1
--- /dev/null
+++ b/lib/3rdParty/OpenCV3.4/include/opencv2/core/cv_cpu_helper.h
@@ -0,0 +1,199 @@
+// AUTOGENERATED, DO NOT EDIT
+
+#if !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_COMPILE_SSE
+# define CV_TRY_SSE 1
+# define CV_CPU_HAS_SUPPORT_SSE 1
+# define CV_CPU_CALL_SSE(fn, args) return (opt_SSE::fn args)
+#elif !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_DISPATCH_COMPILE_SSE
+# define CV_TRY_SSE 1
+# define CV_CPU_HAS_SUPPORT_SSE (cv::checkHardwareSupport(CV_CPU_SSE))
+# define CV_CPU_CALL_SSE(fn, args) if (CV_CPU_HAS_SUPPORT_SSE) return (opt_SSE::fn args)
+#else
+# define CV_TRY_SSE 0
+# define CV_CPU_HAS_SUPPORT_SSE 0
+# define CV_CPU_CALL_SSE(fn, args)
+#endif
+#define __CV_CPU_DISPATCH_CHAIN_SSE(fn, args, mode, ...) CV_CPU_CALL_SSE(fn, args); __CV_EXPAND(__CV_CPU_DISPATCH_CHAIN_ ## mode(fn, args, __VA_ARGS__))
+
+#if !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_COMPILE_SSE2
+# define CV_TRY_SSE2 1
+# define CV_CPU_HAS_SUPPORT_SSE2 1
+# define CV_CPU_CALL_SSE2(fn, args) return (opt_SSE2::fn args)
+#elif !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_DISPATCH_COMPILE_SSE2
+# define CV_TRY_SSE2 1
+# define CV_CPU_HAS_SUPPORT_SSE2 (cv::checkHardwareSupport(CV_CPU_SSE2))
+# define CV_CPU_CALL_SSE2(fn, args) if (CV_CPU_HAS_SUPPORT_SSE2) return (opt_SSE2::fn args)
+#else
+# define CV_TRY_SSE2 0
+# define CV_CPU_HAS_SUPPORT_SSE2 0
+# define CV_CPU_CALL_SSE2(fn, args)
+#endif
+#define __CV_CPU_DISPATCH_CHAIN_SSE2(fn, args, mode, ...) CV_CPU_CALL_SSE2(fn, args); __CV_EXPAND(__CV_CPU_DISPATCH_CHAIN_ ## mode(fn, args, __VA_ARGS__))
+
+#if !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_COMPILE_SSE3
+# define CV_TRY_SSE3 1
+# define CV_CPU_HAS_SUPPORT_SSE3 1
+# define CV_CPU_CALL_SSE3(fn, args) return (opt_SSE3::fn args)
+#elif !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_DISPATCH_COMPILE_SSE3
+# define CV_TRY_SSE3 1
+# define CV_CPU_HAS_SUPPORT_SSE3 (cv::checkHardwareSupport(CV_CPU_SSE3))
+# define CV_CPU_CALL_SSE3(fn, args) if (CV_CPU_HAS_SUPPORT_SSE3) return (opt_SSE3::fn args)
+#else
+# define CV_TRY_SSE3 0
+# define CV_CPU_HAS_SUPPORT_SSE3 0
+# define CV_CPU_CALL_SSE3(fn, args)
+#endif
+#define __CV_CPU_DISPATCH_CHAIN_SSE3(fn, args, mode, ...) CV_CPU_CALL_SSE3(fn, args); __CV_EXPAND(__CV_CPU_DISPATCH_CHAIN_ ## mode(fn, args, __VA_ARGS__))
+
+#if !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_COMPILE_SSSE3
+# define CV_TRY_SSSE3 1
+# define CV_CPU_HAS_SUPPORT_SSSE3 1
+# define CV_CPU_CALL_SSSE3(fn, args) return (opt_SSSE3::fn args)
+#elif !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_DISPATCH_COMPILE_SSSE3
+# define CV_TRY_SSSE3 1
+# define CV_CPU_HAS_SUPPORT_SSSE3 (cv::checkHardwareSupport(CV_CPU_SSSE3))
+# define CV_CPU_CALL_SSSE3(fn, args) if (CV_CPU_HAS_SUPPORT_SSSE3) return (opt_SSSE3::fn args)
+#else
+# define CV_TRY_SSSE3 0
+# define CV_CPU_HAS_SUPPORT_SSSE3 0
+# define CV_CPU_CALL_SSSE3(fn, args)
+#endif
+#define __CV_CPU_DISPATCH_CHAIN_SSSE3(fn, args, mode, ...) CV_CPU_CALL_SSSE3(fn, args); __CV_EXPAND(__CV_CPU_DISPATCH_CHAIN_ ## mode(fn, args, __VA_ARGS__))
+
+#if !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_COMPILE_SSE4_1
+# define CV_TRY_SSE4_1 1
+# define CV_CPU_HAS_SUPPORT_SSE4_1 1
+# define CV_CPU_CALL_SSE4_1(fn, args) return (opt_SSE4_1::fn args)
+#elif !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_DISPATCH_COMPILE_SSE4_1
+# define CV_TRY_SSE4_1 1
+# define CV_CPU_HAS_SUPPORT_SSE4_1 (cv::checkHardwareSupport(CV_CPU_SSE4_1))
+# define CV_CPU_CALL_SSE4_1(fn, args) if (CV_CPU_HAS_SUPPORT_SSE4_1) return (opt_SSE4_1::fn args)
+#else
+# define CV_TRY_SSE4_1 0
+# define CV_CPU_HAS_SUPPORT_SSE4_1 0
+# define CV_CPU_CALL_SSE4_1(fn, args)
+#endif
+#define __CV_CPU_DISPATCH_CHAIN_SSE4_1(fn, args, mode, ...) CV_CPU_CALL_SSE4_1(fn, args); __CV_EXPAND(__CV_CPU_DISPATCH_CHAIN_ ## mode(fn, args, __VA_ARGS__))
+
+#if !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_COMPILE_SSE4_2
+# define CV_TRY_SSE4_2 1
+# define CV_CPU_HAS_SUPPORT_SSE4_2 1
+# define CV_CPU_CALL_SSE4_2(fn, args) return (opt_SSE4_2::fn args)
+#elif !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_DISPATCH_COMPILE_SSE4_2
+# define CV_TRY_SSE4_2 1
+# define CV_CPU_HAS_SUPPORT_SSE4_2 (cv::checkHardwareSupport(CV_CPU_SSE4_2))
+# define CV_CPU_CALL_SSE4_2(fn, args) if (CV_CPU_HAS_SUPPORT_SSE4_2) return (opt_SSE4_2::fn args)
+#else
+# define CV_TRY_SSE4_2 0
+# define CV_CPU_HAS_SUPPORT_SSE4_2 0
+# define CV_CPU_CALL_SSE4_2(fn, args)
+#endif
+#define __CV_CPU_DISPATCH_CHAIN_SSE4_2(fn, args, mode, ...) CV_CPU_CALL_SSE4_2(fn, args); __CV_EXPAND(__CV_CPU_DISPATCH_CHAIN_ ## mode(fn, args, __VA_ARGS__))
+
+#if !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_COMPILE_POPCNT
+# define CV_TRY_POPCNT 1
+# define CV_CPU_HAS_SUPPORT_POPCNT 1
+# define CV_CPU_CALL_POPCNT(fn, args) return (opt_POPCNT::fn args)
+#elif !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_DISPATCH_COMPILE_POPCNT
+# define CV_TRY_POPCNT 1
+# define CV_CPU_HAS_SUPPORT_POPCNT (cv::checkHardwareSupport(CV_CPU_POPCNT))
+# define CV_CPU_CALL_POPCNT(fn, args) if (CV_CPU_HAS_SUPPORT_POPCNT) return (opt_POPCNT::fn args)
+#else
+# define CV_TRY_POPCNT 0
+# define CV_CPU_HAS_SUPPORT_POPCNT 0
+# define CV_CPU_CALL_POPCNT(fn, args)
+#endif
+#define __CV_CPU_DISPATCH_CHAIN_POPCNT(fn, args, mode, ...) CV_CPU_CALL_POPCNT(fn, args); __CV_EXPAND(__CV_CPU_DISPATCH_CHAIN_ ## mode(fn, args, __VA_ARGS__))
+
+#if !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_COMPILE_AVX
+# define CV_TRY_AVX 1
+# define CV_CPU_HAS_SUPPORT_AVX 1
+# define CV_CPU_CALL_AVX(fn, args) return (opt_AVX::fn args)
+#elif !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_DISPATCH_COMPILE_AVX
+# define CV_TRY_AVX 1
+# define CV_CPU_HAS_SUPPORT_AVX (cv::checkHardwareSupport(CV_CPU_AVX))
+# define CV_CPU_CALL_AVX(fn, args) if (CV_CPU_HAS_SUPPORT_AVX) return (opt_AVX::fn args)
+#else
+# define CV_TRY_AVX 0
+# define CV_CPU_HAS_SUPPORT_AVX 0
+# define CV_CPU_CALL_AVX(fn, args)
+#endif
+#define __CV_CPU_DISPATCH_CHAIN_AVX(fn, args, mode, ...) CV_CPU_CALL_AVX(fn, args); __CV_EXPAND(__CV_CPU_DISPATCH_CHAIN_ ## mode(fn, args, __VA_ARGS__))
+
+#if !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_COMPILE_FP16
+# define CV_TRY_FP16 1
+# define CV_CPU_HAS_SUPPORT_FP16 1
+# define CV_CPU_CALL_FP16(fn, args) return (opt_FP16::fn args)
+#elif !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_DISPATCH_COMPILE_FP16
+# define CV_TRY_FP16 1
+# define CV_CPU_HAS_SUPPORT_FP16 (cv::checkHardwareSupport(CV_CPU_FP16))
+# define CV_CPU_CALL_FP16(fn, args) if (CV_CPU_HAS_SUPPORT_FP16) return (opt_FP16::fn args)
+#else
+# define CV_TRY_FP16 0
+# define CV_CPU_HAS_SUPPORT_FP16 0
+# define CV_CPU_CALL_FP16(fn, args)
+#endif
+#define __CV_CPU_DISPATCH_CHAIN_FP16(fn, args, mode, ...) CV_CPU_CALL_FP16(fn, args); __CV_EXPAND(__CV_CPU_DISPATCH_CHAIN_ ## mode(fn, args, __VA_ARGS__))
+
+#if !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_COMPILE_AVX2
+# define CV_TRY_AVX2 1
+# define CV_CPU_HAS_SUPPORT_AVX2 1
+# define CV_CPU_CALL_AVX2(fn, args) return (opt_AVX2::fn args)
+#elif !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_DISPATCH_COMPILE_AVX2
+# define CV_TRY_AVX2 1
+# define CV_CPU_HAS_SUPPORT_AVX2 (cv::checkHardwareSupport(CV_CPU_AVX2))
+# define CV_CPU_CALL_AVX2(fn, args) if (CV_CPU_HAS_SUPPORT_AVX2) return (opt_AVX2::fn args)
+#else
+# define CV_TRY_AVX2 0
+# define CV_CPU_HAS_SUPPORT_AVX2 0
+# define CV_CPU_CALL_AVX2(fn, args)
+#endif
+#define __CV_CPU_DISPATCH_CHAIN_AVX2(fn, args, mode, ...) CV_CPU_CALL_AVX2(fn, args); __CV_EXPAND(__CV_CPU_DISPATCH_CHAIN_ ## mode(fn, args, __VA_ARGS__))
+
+#if !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_COMPILE_FMA3
+# define CV_TRY_FMA3 1
+# define CV_CPU_HAS_SUPPORT_FMA3 1
+# define CV_CPU_CALL_FMA3(fn, args) return (opt_FMA3::fn args)
+#elif !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_DISPATCH_COMPILE_FMA3
+# define CV_TRY_FMA3 1
+# define CV_CPU_HAS_SUPPORT_FMA3 (cv::checkHardwareSupport(CV_CPU_FMA3))
+# define CV_CPU_CALL_FMA3(fn, args) if (CV_CPU_HAS_SUPPORT_FMA3) return (opt_FMA3::fn args)
+#else
+# define CV_TRY_FMA3 0
+# define CV_CPU_HAS_SUPPORT_FMA3 0
+# define CV_CPU_CALL_FMA3(fn, args)
+#endif
+#define __CV_CPU_DISPATCH_CHAIN_FMA3(fn, args, mode, ...) CV_CPU_CALL_FMA3(fn, args); __CV_EXPAND(__CV_CPU_DISPATCH_CHAIN_ ## mode(fn, args, __VA_ARGS__))
+
+#if !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_COMPILE_NEON
+# define CV_TRY_NEON 1
+# define CV_CPU_HAS_SUPPORT_NEON 1
+# define CV_CPU_CALL_NEON(fn, args) return (opt_NEON::fn args)
+#elif !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_DISPATCH_COMPILE_NEON
+# define CV_TRY_NEON 1
+# define CV_CPU_HAS_SUPPORT_NEON (cv::checkHardwareSupport(CV_CPU_NEON))
+# define CV_CPU_CALL_NEON(fn, args) if (CV_CPU_HAS_SUPPORT_NEON) return (opt_NEON::fn args)
+#else
+# define CV_TRY_NEON 0
+# define CV_CPU_HAS_SUPPORT_NEON 0
+# define CV_CPU_CALL_NEON(fn, args)
+#endif
+#define __CV_CPU_DISPATCH_CHAIN_NEON(fn, args, mode, ...) CV_CPU_CALL_NEON(fn, args); __CV_EXPAND(__CV_CPU_DISPATCH_CHAIN_ ## mode(fn, args, __VA_ARGS__))
+
+#if !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_COMPILE_VSX
+# define CV_TRY_VSX 1
+# define CV_CPU_HAS_SUPPORT_VSX 1
+# define CV_CPU_CALL_VSX(fn, args) return (opt_VSX::fn args)
+#elif !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_DISPATCH_COMPILE_VSX
+# define CV_TRY_VSX 1
+# define CV_CPU_HAS_SUPPORT_VSX (cv::checkHardwareSupport(CV_CPU_VSX))
+# define CV_CPU_CALL_VSX(fn, args) if (CV_CPU_HAS_SUPPORT_VSX) return (opt_VSX::fn args)
+#else
+# define CV_TRY_VSX 0
+# define CV_CPU_HAS_SUPPORT_VSX 0
+# define CV_CPU_CALL_VSX(fn, args)
+#endif
+#define __CV_CPU_DISPATCH_CHAIN_VSX(fn, args, mode, ...) CV_CPU_CALL_VSX(fn, args); __CV_EXPAND(__CV_CPU_DISPATCH_CHAIN_ ## mode(fn, args, __VA_ARGS__))
+
+#define CV_CPU_CALL_BASELINE(fn, args) return (cpu_baseline::fn args)
+#define __CV_CPU_DISPATCH_CHAIN_BASELINE(fn, args, mode, ...) CV_CPU_CALL_BASELINE(fn, args) /* last in sequence */
diff --git a/lib/3rdParty/OpenCV3.1/include/opencv2/core/cvdef.h b/lib/3rdParty/OpenCV3.4/include/opencv2/core/cvdef.h
similarity index 61%
rename from lib/3rdParty/OpenCV3.1/include/opencv2/core/cvdef.h
rename to lib/3rdParty/OpenCV3.4/include/opencv2/core/cvdef.h
index af2abfbb..c1fcc6a1 100644
--- a/lib/3rdParty/OpenCV3.1/include/opencv2/core/cvdef.h
+++ b/lib/3rdParty/OpenCV3.4/include/opencv2/core/cvdef.h
@@ -42,15 +42,43 @@
//
//M*/
-#ifndef __OPENCV_CORE_CVDEF_H__
-#define __OPENCV_CORE_CVDEF_H__
+#ifndef OPENCV_CORE_CVDEF_H
+#define OPENCV_CORE_CVDEF_H
//! @addtogroup core_utils
//! @{
-#if !defined _CRT_SECURE_NO_DEPRECATE && defined _MSC_VER && _MSC_VER > 1300
-# define _CRT_SECURE_NO_DEPRECATE /* to avoid multiple Visual Studio warnings */
+#if !defined CV_DOXYGEN && !defined CV_IGNORE_DEBUG_BUILD_GUARD
+#if (defined(_MSC_VER) && (defined(DEBUG) || defined(_DEBUG))) || \
+ (defined(_GLIBCXX_DEBUG) || defined(_GLIBCXX_DEBUG_PEDANTIC))
+// Guard to prevent using of binary incompatible binaries / runtimes
+// https://github.com/opencv/opencv/pull/9161
+#define CV__DEBUG_NS_BEGIN namespace debug_build_guard {
+#define CV__DEBUG_NS_END }
+namespace cv { namespace debug_build_guard { } using namespace debug_build_guard; }
#endif
+#endif
+
+#ifndef CV__DEBUG_NS_BEGIN
+#define CV__DEBUG_NS_BEGIN
+#define CV__DEBUG_NS_END
+#endif
+
+
+#ifdef __OPENCV_BUILD
+#include "cvconfig.h"
+#endif
+
+#ifndef __CV_EXPAND
+#define __CV_EXPAND(x) x
+#endif
+
+#ifndef __CV_CAT
+#define __CV_CAT__(x, y) x ## y
+#define __CV_CAT_(x, y) __CV_CAT__(x, y)
+#define __CV_CAT(x, y) __CV_CAT_(x, y)
+#endif
+
// undef problematic defines sometimes defined by system headers (windows.h in particular)
#undef small
@@ -59,10 +87,6 @@
#undef abs
#undef Complex
-#if !defined _CRT_SECURE_NO_DEPRECATE && defined _MSC_VER && _MSC_VER > 1300
-# define _CRT_SECURE_NO_DEPRECATE /* to avoid multiple Visual Studio warnings */
-#endif
-
#include
#include "opencv2/core/hal/interface.h"
@@ -88,7 +112,7 @@
# endif
#endif
-#if defined CV_ICC && !defined CV_ENABLE_UNROLLED
+#if defined CV_DISABLE_OPTIMIZATION || (defined CV_ICC && !defined CV_ENABLE_UNROLLED)
# define CV_ENABLE_UNROLLED 0
#else
# define CV_ENABLE_UNROLLED 1
@@ -112,7 +136,7 @@
#define CV_CPU_SSE4_1 6
#define CV_CPU_SSE4_2 7
#define CV_CPU_POPCNT 8
-
+#define CV_CPU_FP16 9
#define CV_CPU_AVX 10
#define CV_CPU_AVX2 11
#define CV_CPU_FMA3 12
@@ -129,6 +153,8 @@
#define CV_CPU_NEON 100
+#define CV_CPU_VSX 200
+
// when adding to this list remember to update the following enum
#define CV_HARDWARE_MAX_FEATURE 255
@@ -143,7 +169,7 @@ enum CpuFeatures {
CPU_SSE4_1 = 6,
CPU_SSE4_2 = 7,
CPU_POPCNT = 8,
-
+ CPU_FP16 = 9,
CPU_AVX = 10,
CPU_AVX2 = 11,
CPU_FMA3 = 12,
@@ -158,156 +184,53 @@ enum CpuFeatures {
CPU_AVX_512VBMI = 20,
CPU_AVX_512VL = 21,
- CPU_NEON = 100
+ CPU_NEON = 100,
+
+ CPU_VSX = 200
};
-// do not include SSE/AVX/NEON headers for NVCC compiler
-#ifndef __CUDACC__
-#if defined __SSE2__ || defined _M_X64 || (defined _M_IX86_FP && _M_IX86_FP >= 2)
-# include
-# define CV_MMX 1
-# define CV_SSE 1
-# define CV_SSE2 1
-# if defined __SSE3__ || (defined _MSC_VER && _MSC_VER >= 1500)
-# include
-# define CV_SSE3 1
-# endif
-# if defined __SSSE3__ || (defined _MSC_VER && _MSC_VER >= 1500)
-# include
-# define CV_SSSE3 1
-# endif
-# if defined __SSE4_1__ || (defined _MSC_VER && _MSC_VER >= 1500)
-# include
-# define CV_SSE4_1 1
-# endif
-# if defined __SSE4_2__ || (defined _MSC_VER && _MSC_VER >= 1500)
-# include
-# define CV_SSE4_2 1
-# endif
-# if defined __POPCNT__ || (defined _MSC_VER && _MSC_VER >= 1500)
-# ifdef _MSC_VER
-# include
-# else
-# include
-# endif
-# define CV_POPCNT 1
-# endif
-# if defined __AVX__ || (defined _MSC_VER && _MSC_VER >= 1600 && 0)
-// MS Visual Studio 2010 (2012?) has no macro pre-defined to identify the use of /arch:AVX
-// See: http://connect.microsoft.com/VisualStudio/feedback/details/605858/arch-avx-should-define-a-predefined-macro-in-x64-and-set-a-unique-value-for-m-ix86-fp-in-win32
-# include
-# define CV_AVX 1
-# if defined(_XCR_XFEATURE_ENABLED_MASK)
-# define __xgetbv() _xgetbv(_XCR_XFEATURE_ENABLED_MASK)
-# else
-# define __xgetbv() 0
-# endif
-# endif
-# if defined __AVX2__ || (defined _MSC_VER && _MSC_VER >= 1800 && 0)
-# include
-# define CV_AVX2 1
-# if defined __FMA__
-# define CV_FMA3 1
-# endif
-# endif
-#endif
+#include "cv_cpu_dispatch.h"
-#if (defined WIN32 || defined _WIN32) && defined(_M_ARM)
-# include
-# include "arm_neon.h"
-# define CV_NEON 1
-# define CPU_HAS_NEON_FEATURE (true)
-#elif defined(__ARM_NEON__) || (defined (__ARM_NEON) && defined(__aarch64__))
-# include
-# define CV_NEON 1
-#endif
-
-#if defined __GNUC__ && defined __arm__ && (defined __ARM_PCS_VFP || defined __ARM_VFPV3__ || defined __ARM_NEON__) && !defined __SOFTFP__
-# define CV_VFP 1
-#endif
-
-#endif // __CUDACC__
-
-#ifndef CV_POPCNT
-#define CV_POPCNT 0
-#endif
-#ifndef CV_MMX
-# define CV_MMX 0
-#endif
-#ifndef CV_SSE
-# define CV_SSE 0
-#endif
-#ifndef CV_SSE2
-# define CV_SSE2 0
-#endif
-#ifndef CV_SSE3
-# define CV_SSE3 0
-#endif
-#ifndef CV_SSSE3
-# define CV_SSSE3 0
-#endif
-#ifndef CV_SSE4_1
-# define CV_SSE4_1 0
-#endif
-#ifndef CV_SSE4_2
-# define CV_SSE4_2 0
-#endif
-#ifndef CV_AVX
-# define CV_AVX 0
-#endif
-#ifndef CV_AVX2
-# define CV_AVX2 0
-#endif
-#ifndef CV_FMA3
-# define CV_FMA3 0
-#endif
-#ifndef CV_AVX_512F
-# define CV_AVX_512F 0
-#endif
-#ifndef CV_AVX_512BW
-# define CV_AVX_512BW 0
-#endif
-#ifndef CV_AVX_512CD
-# define CV_AVX_512CD 0
-#endif
-#ifndef CV_AVX_512DQ
-# define CV_AVX_512DQ 0
-#endif
-#ifndef CV_AVX_512ER
-# define CV_AVX_512ER 0
-#endif
-#ifndef CV_AVX_512IFMA512
-# define CV_AVX_512IFMA512 0
-#endif
-#ifndef CV_AVX_512PF
-# define CV_AVX_512PF 0
-#endif
-#ifndef CV_AVX_512VBMI
-# define CV_AVX_512VBMI 0
-#endif
-#ifndef CV_AVX_512VL
-# define CV_AVX_512VL 0
-#endif
-
-#ifndef CV_NEON
-# define CV_NEON 0
-#endif
-
-#ifndef CV_VFP
-# define CV_VFP 0
-#endif
/* fundamental constants */
#define CV_PI 3.1415926535897932384626433832795
-#define CV_2PI 6.283185307179586476925286766559
+#define CV_2PI 6.283185307179586476925286766559
#define CV_LOG2 0.69314718055994530941723212145818
+#if defined __ARM_FP16_FORMAT_IEEE \
+ && !defined __CUDACC__
+# define CV_FP16_TYPE 1
+#else
+# define CV_FP16_TYPE 0
+#endif
+
+typedef union Cv16suf
+{
+ short i;
+#if CV_FP16_TYPE
+ __fp16 h;
+#endif
+ struct _fp16Format
+ {
+ unsigned int significand : 10;
+ unsigned int exponent : 5;
+ unsigned int sign : 1;
+ } fmt;
+}
+Cv16suf;
+
typedef union Cv32suf
{
int i;
unsigned u;
float f;
+ struct _fp32Format
+ {
+ unsigned int significand : 23;
+ unsigned int exponent : 8;
+ unsigned int sign : 1;
+ } fmt;
}
Cv32suf;
@@ -325,12 +248,32 @@ Cv64suf;
# define DISABLE_OPENCV_24_COMPATIBILITY
#endif
-#if (defined WIN32 || defined _WIN32 || defined WINCE || defined __CYGWIN__) && defined CVAPI_EXPORTS
-# define CV_EXPORTS __declspec(dllexport)
-#elif defined __GNUC__ && __GNUC__ >= 4
-# define CV_EXPORTS __attribute__ ((visibility ("default")))
+#ifdef CVAPI_EXPORTS
+# if (defined _WIN32 || defined WINCE || defined __CYGWIN__)
+# define CV_EXPORTS __declspec(dllexport)
+# elif defined __GNUC__ && __GNUC__ >= 4
+# define CV_EXPORTS __attribute__ ((visibility ("default")))
+# endif
+#endif
+
+#ifndef CV_EXPORTS
+# define CV_EXPORTS
+#endif
+
+#ifdef _MSC_VER
+# define CV_EXPORTS_TEMPLATE
#else
-# define CV_EXPORTS
+# define CV_EXPORTS_TEMPLATE CV_EXPORTS
+#endif
+
+#ifndef CV_DEPRECATED
+# if defined(__GNUC__)
+# define CV_DEPRECATED __attribute__ ((deprecated))
+# elif defined(_MSC_VER)
+# define CV_DEPRECATED __declspec(deprecated)
+# else
+# define CV_DEPRECATED
+# endif
#endif
#ifndef CV_EXTERN_C
@@ -357,67 +300,6 @@ Cv64suf;
* Matrix type (Mat) *
\****************************************************************************************/
-#define CV_CN_MAX 512
-#define CV_CN_SHIFT 3
-#define CV_DEPTH_MAX (1 << CV_CN_SHIFT)
-
-#define CV_8U 0
-#define CV_8S 1
-#define CV_16U 2
-#define CV_16S 3
-#define CV_32S 4
-#define CV_32F 5
-#define CV_64F 6
-#define CV_USRTYPE1 7
-
-#define CV_MAT_DEPTH_MASK (CV_DEPTH_MAX - 1)
-#define CV_MAT_DEPTH(flags) ((flags) & CV_MAT_DEPTH_MASK)
-
-#define CV_MAKETYPE(depth,cn) (CV_MAT_DEPTH(depth) + (((cn)-1) << CV_CN_SHIFT))
-#define CV_MAKE_TYPE CV_MAKETYPE
-
-#define CV_8UC1 CV_MAKETYPE(CV_8U,1)
-#define CV_8UC2 CV_MAKETYPE(CV_8U,2)
-#define CV_8UC3 CV_MAKETYPE(CV_8U,3)
-#define CV_8UC4 CV_MAKETYPE(CV_8U,4)
-#define CV_8UC(n) CV_MAKETYPE(CV_8U,(n))
-
-#define CV_8SC1 CV_MAKETYPE(CV_8S,1)
-#define CV_8SC2 CV_MAKETYPE(CV_8S,2)
-#define CV_8SC3 CV_MAKETYPE(CV_8S,3)
-#define CV_8SC4 CV_MAKETYPE(CV_8S,4)
-#define CV_8SC(n) CV_MAKETYPE(CV_8S,(n))
-
-#define CV_16UC1 CV_MAKETYPE(CV_16U,1)
-#define CV_16UC2 CV_MAKETYPE(CV_16U,2)
-#define CV_16UC3 CV_MAKETYPE(CV_16U,3)
-#define CV_16UC4 CV_MAKETYPE(CV_16U,4)
-#define CV_16UC(n) CV_MAKETYPE(CV_16U,(n))
-
-#define CV_16SC1 CV_MAKETYPE(CV_16S,1)
-#define CV_16SC2 CV_MAKETYPE(CV_16S,2)
-#define CV_16SC3 CV_MAKETYPE(CV_16S,3)
-#define CV_16SC4 CV_MAKETYPE(CV_16S,4)
-#define CV_16SC(n) CV_MAKETYPE(CV_16S,(n))
-
-#define CV_32SC1 CV_MAKETYPE(CV_32S,1)
-#define CV_32SC2 CV_MAKETYPE(CV_32S,2)
-#define CV_32SC3 CV_MAKETYPE(CV_32S,3)
-#define CV_32SC4 CV_MAKETYPE(CV_32S,4)
-#define CV_32SC(n) CV_MAKETYPE(CV_32S,(n))
-
-#define CV_32FC1 CV_MAKETYPE(CV_32F,1)
-#define CV_32FC2 CV_MAKETYPE(CV_32F,2)
-#define CV_32FC3 CV_MAKETYPE(CV_32F,3)
-#define CV_32FC4 CV_MAKETYPE(CV_32F,4)
-#define CV_32FC(n) CV_MAKETYPE(CV_32F,(n))
-
-#define CV_64FC1 CV_MAKETYPE(CV_64F,1)
-#define CV_64FC2 CV_MAKETYPE(CV_64F,2)
-#define CV_64FC3 CV_MAKETYPE(CV_64F,3)
-#define CV_64FC4 CV_MAKETYPE(CV_64F,4)
-#define CV_64FC(n) CV_MAKETYPE(CV_64F,(n))
-
#define CV_MAT_CN_MASK ((CV_CN_MAX - 1) << CV_CN_SHIFT)
#define CV_MAT_CN(flags) ((((flags) & CV_MAT_CN_MASK) >> CV_CN_SHIFT) + 1)
#define CV_MAT_TYPE_MASK (CV_DEPTH_MAX*CV_CN_MAX - 1)
@@ -431,7 +313,7 @@ Cv64suf;
#define CV_IS_SUBMAT(flags) ((flags) & CV_MAT_SUBMAT_FLAG)
/** Size of each channel item,
- 0x124489 = 1000 0100 0100 0010 0010 0001 0001 ~ array of sizeof(arr_type_elem) */
+ 0x8442211 = 1000 0100 0100 0010 0010 0001 0001 ~ array of sizeof(arr_type_elem) */
#define CV_ELEM_SIZE1(type) \
((((sizeof(size_t)<<28)|0x8442211) >> CV_MAT_DEPTH(type)*4) & 15)
@@ -447,14 +329,36 @@ Cv64suf;
# define MAX(a,b) ((a) < (b) ? (b) : (a))
#endif
+/****************************************************************************************\
+* static analysys *
+\****************************************************************************************/
+
+// In practice, some macro are not processed correctly (noreturn is not detected).
+// We need to use simplified definition for them.
+#ifndef CV_STATIC_ANALYSIS
+# if defined(__KLOCWORK__) || defined(__clang_analyzer__) || defined(__COVERITY__)
+# define CV_STATIC_ANALYSIS
+# endif
+#endif
+
+/****************************************************************************************\
+* Thread sanitizer *
+\****************************************************************************************/
+#ifndef CV_THREAD_SANITIZER
+# if defined(__has_feature)
+# if __has_feature(thread_sanitizer)
+# define CV_THREAD_SANITIZER
+# endif
+# endif
+#endif
+
/****************************************************************************************\
* exchange-add operation for atomic operations on reference counters *
\****************************************************************************************/
-#if defined __INTEL_COMPILER && !(defined WIN32 || defined _WIN32)
- // atomic increment on the linux version of the Intel(tm) compiler
-# define CV_XADD(addr, delta) (int)_InterlockedExchangeAdd(const_cast(reinterpret_cast(addr)), delta)
-#elif defined __GNUC__
+#ifdef CV_XADD
+ // allow to use user-defined macro
+#elif defined __GNUC__ || defined __clang__
# if defined __clang__ && __clang_major__ >= 3 && !defined __ANDROID__ && !defined __EMSCRIPTEN__ && !defined(__CUDACC__)
# ifdef __ATOMIC_ACQ_REL
# define CV_XADD(addr, delta) __c11_atomic_fetch_add((_Atomic(int)*)(addr), delta, __ATOMIC_ACQ_REL)
@@ -492,12 +396,26 @@ Cv64suf;
#endif
+/****************************************************************************************\
+* C++ 11 *
+\****************************************************************************************/
+#ifndef CV_CXX11
+# if __cplusplus >= 201103L || (defined(_MSC_VER) && _MSC_VER >= 1800)
+# define CV_CXX11 1
+# endif
+#else
+# if CV_CXX11 == 0
+# undef CV_CXX11
+# endif
+#endif
+
+
/****************************************************************************************\
* C++ Move semantics *
\****************************************************************************************/
#ifndef CV_CXX_MOVE_SEMANTICS
-# if __cplusplus >= 201103L || defined(__GXX_EXPERIMENTAL_CXX0X__) || defined(_MSC_VER) && _MSC_VER >= 1600
+# if __cplusplus >= 201103L || defined(__GXX_EXPERIMENTAL_CXX0X__) || (defined(_MSC_VER) && _MSC_VER >= 1600)
# define CV_CXX_MOVE_SEMANTICS 1
# elif defined(__clang)
# if __has_feature(cxx_rvalue_references)
@@ -510,6 +428,21 @@ Cv64suf;
# endif
#endif
+/****************************************************************************************\
+* C++11 std::array *
+\****************************************************************************************/
+
+#ifndef CV_CXX_STD_ARRAY
+# if __cplusplus >= 201103L
+# define CV_CXX_STD_ARRAY 1
+# include
+# endif
+#else
+# if CV_CXX_STD_ARRAY == 0
+# undef CV_CXX_STD_ARRAY
+# endif
+#endif
+
//! @}
-#endif // __OPENCV_CORE_CVDEF_H__
+#endif // OPENCV_CORE_CVDEF_H
diff --git a/lib/3rdParty/OpenCV3.1/include/opencv2/core/cvstd.hpp b/lib/3rdParty/OpenCV3.4/include/opencv2/core/cvstd.hpp
similarity index 95%
rename from lib/3rdParty/OpenCV3.1/include/opencv2/core/cvstd.hpp
rename to lib/3rdParty/OpenCV3.4/include/opencv2/core/cvstd.hpp
index edae954f..0a3f553a 100644
--- a/lib/3rdParty/OpenCV3.1/include/opencv2/core/cvstd.hpp
+++ b/lib/3rdParty/OpenCV3.4/include/opencv2/core/cvstd.hpp
@@ -41,25 +41,21 @@
//
//M*/
-#ifndef __OPENCV_CORE_CVSTD_HPP__
-#define __OPENCV_CORE_CVSTD_HPP__
+#ifndef OPENCV_CORE_CVSTD_HPP
+#define OPENCV_CORE_CVSTD_HPP
#ifndef __cplusplus
# error cvstd.hpp header must be compiled as C++
#endif
#include "opencv2/core/cvdef.h"
-
#include
#include
#include
-#ifndef OPENCV_NOSTL
-# include
-#endif
+#include
// import useful primitives from stl
-#ifndef OPENCV_NOSTL_TRANSITIONAL
# include
# include
# include //for abs(int)
@@ -67,6 +63,11 @@
namespace cv
{
+ static inline uchar abs(uchar a) { return a; }
+ static inline ushort abs(ushort a) { return a; }
+ static inline unsigned abs(unsigned a) { return a; }
+ static inline uint64 abs(uint64 a) { return a; }
+
using std::min;
using std::max;
using std::abs;
@@ -77,29 +78,6 @@ namespace cv
using std::log;
}
-namespace std
-{
- static inline uchar abs(uchar a) { return a; }
- static inline ushort abs(ushort a) { return a; }
- static inline unsigned abs(unsigned a) { return a; }
- static inline uint64 abs(uint64 a) { return a; }
-}
-
-#else
-namespace cv
-{
- template static inline T min(T a, T b) { return a < b ? a : b; }
- template static inline T max(T a, T b) { return a > b ? a : b; }
- template static inline T abs(T a) { return a < 0 ? -a : a; }
- template static inline void swap(T& a, T& b) { T tmp = a; a = b; b = tmp; }
-
- template<> inline uchar abs(uchar a) { return a; }
- template<> inline ushort abs(ushort a) { return a; }
- template<> inline unsigned abs(unsigned a) { return a; }
- template<> inline uint64 abs(uint64 a) { return a; }
-}
-#endif
-
namespace cv {
//! @addtogroup core_utils
@@ -492,7 +470,7 @@ public:
static const size_t npos = size_t(-1);
- explicit String();
+ String();
String(const String& str);
String(const String& str, size_t pos, size_t len = npos);
String(const char* s);
@@ -559,7 +537,6 @@ public:
String toLowerCase() const;
-#ifndef OPENCV_NOSTL
String(const std::string& str);
String(const std::string& str, size_t pos, size_t len = npos);
String& operator=(const std::string& str);
@@ -568,7 +545,6 @@ public:
friend String operator+ (const String& lhs, const std::string& rhs);
friend String operator+ (const std::string& lhs, const String& rhs);
-#endif
private:
char* cstr_;
@@ -622,6 +598,7 @@ String::String(const char* s)
{
if (!s) return;
size_t len = strlen(s);
+ if (!len) return;
memcpy(allocate(len), s, len);
}
@@ -630,6 +607,7 @@ String::String(const char* s, size_t n)
: cstr_(0), len_(0)
{
if (!n) return;
+ if (!s) return;
memcpy(allocate(n), s, n);
}
@@ -637,6 +615,7 @@ inline
String::String(size_t n, char c)
: cstr_(0), len_(0)
{
+ if (!n) return;
memset(allocate(n), c, n);
}
@@ -645,6 +624,7 @@ String::String(const char* first, const char* last)
: cstr_(0), len_(0)
{
size_t len = (size_t)(last - first);
+ if (!len) return;
memcpy(allocate(len), first, len);
}
@@ -653,6 +633,7 @@ String::String(Iterator first, Iterator last)
: cstr_(0), len_(0)
{
size_t len = (size_t)(last - first);
+ if (!len) return;
char* str = allocate(len);
while (first != last)
{
@@ -685,7 +666,7 @@ String& String::operator=(const char* s)
deallocate();
if (!s) return *this;
size_t len = strlen(s);
- memcpy(allocate(len), s, len);
+ if (len) memcpy(allocate(len), s, len);
return *this;
}
@@ -751,7 +732,7 @@ const char* String::begin() const
inline
const char* String::end() const
{
- return len_ ? cstr_ + 1 : 0;
+ return len_ ? cstr_ + len_ : NULL;
}
inline
@@ -958,8 +939,9 @@ size_t String::find_last_of(const char* s, size_t pos) const
inline
String String::toLowerCase() const
{
+ if (!cstr_)
+ return String();
String res(cstr_, len_);
-
for (size_t i = 0; i < len_; ++i)
res.cstr_[i] = (char) ::tolower(cstr_[i]);
@@ -978,8 +960,8 @@ String operator + (const String& lhs, const String& rhs)
{
String s;
s.allocate(lhs.len_ + rhs.len_);
- memcpy(s.cstr_, lhs.cstr_, lhs.len_);
- memcpy(s.cstr_ + lhs.len_, rhs.cstr_, rhs.len_);
+ if (lhs.len_) memcpy(s.cstr_, lhs.cstr_, lhs.len_);
+ if (rhs.len_) memcpy(s.cstr_ + lhs.len_, rhs.cstr_, rhs.len_);
return s;
}
@@ -989,8 +971,8 @@ String operator + (const String& lhs, const char* rhs)
String s;
size_t rhslen = strlen(rhs);
s.allocate(lhs.len_ + rhslen);
- memcpy(s.cstr_, lhs.cstr_, lhs.len_);
- memcpy(s.cstr_ + lhs.len_, rhs, rhslen);
+ if (lhs.len_) memcpy(s.cstr_, lhs.cstr_, lhs.len_);
+ if (rhslen) memcpy(s.cstr_ + lhs.len_, rhs, rhslen);
return s;
}
@@ -1000,8 +982,8 @@ String operator + (const char* lhs, const String& rhs)
String s;
size_t lhslen = strlen(lhs);
s.allocate(lhslen + rhs.len_);
- memcpy(s.cstr_, lhs, lhslen);
- memcpy(s.cstr_ + lhslen, rhs.cstr_, rhs.len_);
+ if (lhslen) memcpy(s.cstr_, lhs, lhslen);
+ if (rhs.len_) memcpy(s.cstr_ + lhslen, rhs.cstr_, rhs.len_);
return s;
}
@@ -1010,7 +992,7 @@ String operator + (const String& lhs, char rhs)
{
String s;
s.allocate(lhs.len_ + 1);
- memcpy(s.cstr_, lhs.cstr_, lhs.len_);
+ if (lhs.len_) memcpy(s.cstr_, lhs.cstr_, lhs.len_);
s.cstr_[lhs.len_] = rhs;
return s;
}
@@ -1021,7 +1003,7 @@ String operator + (char lhs, const String& rhs)
String s;
s.allocate(rhs.len_ + 1);
s.cstr_[0] = lhs;
- memcpy(s.cstr_ + 1, rhs.cstr_, rhs.len_);
+ if (rhs.len_) memcpy(s.cstr_ + 1, rhs.cstr_, rhs.len_);
return s;
}
@@ -1048,22 +1030,11 @@ static inline bool operator>= (const String& lhs, const char* rhs) { return lh
} // cv
-#ifndef OPENCV_NOSTL_TRANSITIONAL
namespace std
{
static inline void swap(cv::String& a, cv::String& b) { a.swap(b); }
}
-#else
-namespace cv
-{
- template<> inline
- void swap(cv::String& a, cv::String& b)
- {
- a.swap(b);
- }
-}
-#endif
#include "opencv2/core/ptr.inl.hpp"
-#endif //__OPENCV_CORE_CVSTD_HPP__
+#endif //OPENCV_CORE_CVSTD_HPP
diff --git a/lib/3rdParty/OpenCV3.1/include/opencv2/core/cvstd.inl.hpp b/lib/3rdParty/OpenCV3.4/include/opencv2/core/cvstd.inl.hpp
similarity index 85%
rename from lib/3rdParty/OpenCV3.1/include/opencv2/core/cvstd.inl.hpp
rename to lib/3rdParty/OpenCV3.4/include/opencv2/core/cvstd.inl.hpp
index ad154061..85230f59 100644
--- a/lib/3rdParty/OpenCV3.1/include/opencv2/core/cvstd.inl.hpp
+++ b/lib/3rdParty/OpenCV3.4/include/opencv2/core/cvstd.inl.hpp
@@ -41,19 +41,21 @@
//
//M*/
-#ifndef __OPENCV_CORE_CVSTDINL_HPP__
-#define __OPENCV_CORE_CVSTDINL_HPP__
+#ifndef OPENCV_CORE_CVSTDINL_HPP
+#define OPENCV_CORE_CVSTDINL_HPP
-#ifndef OPENCV_NOSTL
-# include
-# include
-#endif
+#include
+#include
//! @cond IGNORED
+#ifdef _MSC_VER
+#pragma warning( push )
+#pragma warning( disable: 4127 )
+#endif
+
namespace cv
{
-#ifndef OPENCV_NOSTL
template class DataType< std::complex<_Tp> >
{
@@ -78,7 +80,7 @@ String::String(const std::string& str)
if (!str.empty())
{
size_t len = str.size();
- memcpy(allocate(len), str.c_str(), len);
+ if (len) memcpy(allocate(len), str.c_str(), len);
}
}
@@ -100,7 +102,7 @@ String& String::operator = (const std::string& str)
if (!str.empty())
{
size_t len = str.size();
- memcpy(allocate(len), str.c_str(), len);
+ if (len) memcpy(allocate(len), str.c_str(), len);
}
return *this;
}
@@ -124,8 +126,8 @@ String operator + (const String& lhs, const std::string& rhs)
String s;
size_t rhslen = rhs.size();
s.allocate(lhs.len_ + rhslen);
- memcpy(s.cstr_, lhs.cstr_, lhs.len_);
- memcpy(s.cstr_ + lhs.len_, rhs.c_str(), rhslen);
+ if (lhs.len_) memcpy(s.cstr_, lhs.cstr_, lhs.len_);
+ if (rhslen) memcpy(s.cstr_ + lhs.len_, rhs.c_str(), rhslen);
return s;
}
@@ -135,8 +137,8 @@ String operator + (const std::string& lhs, const String& rhs)
String s;
size_t lhslen = lhs.size();
s.allocate(lhslen + rhs.len_);
- memcpy(s.cstr_, lhs.c_str(), lhslen);
- memcpy(s.cstr_ + lhslen, rhs.cstr_, rhs.len_);
+ if (lhslen) memcpy(s.cstr_, lhs.c_str(), lhslen);
+ if (rhs.len_) memcpy(s.cstr_ + lhslen, rhs.cstr_, rhs.len_);
return s;
}
@@ -151,9 +153,7 @@ FileNode::operator std::string() const
template<> inline
void operator >> (const FileNode& n, std::string& value)
{
- String val;
- read(n, val, val);
- value = val;
+ read(n, value, std::string());
}
template<> inline
@@ -183,6 +183,18 @@ std::ostream& operator << (std::ostream& out, const Mat& mtx)
return out << Formatter::get()->format(mtx);
}
+static inline
+std::ostream& operator << (std::ostream& out, const UMat& m)
+{
+ return out << m.getMat(ACCESS_READ);
+}
+
+template static inline
+std::ostream& operator << (std::ostream& out, const Complex<_Tp>& c)
+{
+ return out << "(" << c.re << "," << c.im << ")";
+}
+
template static inline
std::ostream& operator << (std::ostream& out, const std::vector >& vec)
{
@@ -221,14 +233,7 @@ template static inline
std::ostream& operator << (std::ostream& out, const Vec<_Tp, n>& vec)
{
out << "[";
-#ifdef _MSC_VER
-#pragma warning( push )
-#pragma warning( disable: 4127 )
-#endif
- if(Vec<_Tp, n>::depth < CV_32F)
-#ifdef _MSC_VER
-#pragma warning( pop )
-#endif
+ if (cv::traits::Depth<_Tp>::value <= CV_32S)
{
for (int i = 0; i < n - 1; ++i) {
out << (int)vec[i] << ", ";
@@ -258,10 +263,24 @@ std::ostream& operator << (std::ostream& out, const Rect_<_Tp>& rect)
return out << "[" << rect.width << " x " << rect.height << " from (" << rect.x << ", " << rect.y << ")]";
}
+static inline std::ostream& operator << (std::ostream& out, const MatSize& msize)
+{
+ int i, dims = msize.p[-1];
+ for( i = 0; i < dims; i++ )
+ {
+ out << msize.p[i];
+ if( i < dims-1 )
+ out << " x ";
+ }
+ return out;
+}
-#endif // OPENCV_NOSTL
} // cv
+#ifdef _MSC_VER
+#pragma warning( pop )
+#endif
+
//! @endcond
-#endif // __OPENCV_CORE_CVSTDINL_HPP__
+#endif // OPENCV_CORE_CVSTDINL_HPP
diff --git a/lib/3rdParty/OpenCV3.1/include/opencv2/core/directx.hpp b/lib/3rdParty/OpenCV3.4/include/opencv2/core/directx.hpp
similarity index 98%
rename from lib/3rdParty/OpenCV3.1/include/opencv2/core/directx.hpp
rename to lib/3rdParty/OpenCV3.4/include/opencv2/core/directx.hpp
index 764af74d..056a85a1 100644
--- a/lib/3rdParty/OpenCV3.1/include/opencv2/core/directx.hpp
+++ b/lib/3rdParty/OpenCV3.4/include/opencv2/core/directx.hpp
@@ -39,8 +39,8 @@
//
//M*/
-#ifndef __OPENCV_CORE_DIRECTX_HPP__
-#define __OPENCV_CORE_DIRECTX_HPP__
+#ifndef OPENCV_CORE_DIRECTX_HPP
+#define OPENCV_CORE_DIRECTX_HPP
#include "mat.hpp"
#include "ocl.hpp"
@@ -181,4 +181,4 @@ CV_EXPORTS int getTypeFromD3DFORMAT(const int iD3DFORMAT); // enum D3DTYPE for D
} } // namespace cv::directx
-#endif // __OPENCV_CORE_DIRECTX_HPP__
+#endif // OPENCV_CORE_DIRECTX_HPP
diff --git a/lib/3rdParty/OpenCV3.1/include/opencv2/core/eigen.hpp b/lib/3rdParty/OpenCV3.4/include/opencv2/core/eigen.hpp
similarity index 73%
rename from lib/3rdParty/OpenCV3.1/include/opencv2/core/eigen.hpp
rename to lib/3rdParty/OpenCV3.4/include/opencv2/core/eigen.hpp
index 44df04c5..c8603aca 100644
--- a/lib/3rdParty/OpenCV3.1/include/opencv2/core/eigen.hpp
+++ b/lib/3rdParty/OpenCV3.4/include/opencv2/core/eigen.hpp
@@ -42,8 +42,8 @@
//M*/
-#ifndef __OPENCV_CORE_EIGEN_HPP__
-#define __OPENCV_CORE_EIGEN_HPP__
+#ifndef OPENCV_CORE_EIGEN_HPP
+#define OPENCV_CORE_EIGEN_HPP
#include "opencv2/core.hpp"
@@ -64,14 +64,14 @@ void eigen2cv( const Eigen::Matrix<_Tp, _rows, _cols, _options, _maxRows, _maxCo
{
if( !(src.Flags & Eigen::RowMajorBit) )
{
- Mat _src(src.cols(), src.rows(), DataType<_Tp>::type,
- (void*)src.data(), src.stride()*sizeof(_Tp));
+ Mat _src(src.cols(), src.rows(), traits::Type<_Tp>::value,
+ (void*)src.data(), src.outerStride()*sizeof(_Tp));
transpose(_src, dst);
}
else
{
- Mat _src(src.rows(), src.cols(), DataType<_Tp>::type,
- (void*)src.data(), src.stride()*sizeof(_Tp));
+ Mat _src(src.rows(), src.cols(), traits::Type<_Tp>::value,
+ (void*)src.data(), src.outerStride()*sizeof(_Tp));
_src.copyTo(dst);
}
}
@@ -98,8 +98,8 @@ void cv2eigen( const Mat& src,
CV_DbgAssert(src.rows == _rows && src.cols == _cols);
if( !(dst.Flags & Eigen::RowMajorBit) )
{
- const Mat _dst(src.cols, src.rows, DataType<_Tp>::type,
- dst.data(), (size_t)(dst.stride()*sizeof(_Tp)));
+ const Mat _dst(src.cols, src.rows, traits::Type<_Tp>::value,
+ dst.data(), (size_t)(dst.outerStride()*sizeof(_Tp)));
if( src.type() == _dst.type() )
transpose(src, _dst);
else if( src.cols == src.rows )
@@ -112,8 +112,8 @@ void cv2eigen( const Mat& src,
}
else
{
- const Mat _dst(src.rows, src.cols, DataType<_Tp>::type,
- dst.data(), (size_t)(dst.stride()*sizeof(_Tp)));
+ const Mat _dst(src.rows, src.cols, traits::Type<_Tp>::value,
+ dst.data(), (size_t)(dst.outerStride()*sizeof(_Tp)));
src.convertTo(_dst, _dst.type());
}
}
@@ -125,14 +125,14 @@ void cv2eigen( const Matx<_Tp, _rows, _cols>& src,
{
if( !(dst.Flags & Eigen::RowMajorBit) )
{
- const Mat _dst(_cols, _rows, DataType<_Tp>::type,
- dst.data(), (size_t)(dst.stride()*sizeof(_Tp)));
+ const Mat _dst(_cols, _rows, traits::Type<_Tp>::value,
+ dst.data(), (size_t)(dst.outerStride()*sizeof(_Tp)));
transpose(src, _dst);
}
else
{
- const Mat _dst(_rows, _cols, DataType<_Tp>::type,
- dst.data(), (size_t)(dst.stride()*sizeof(_Tp)));
+ const Mat _dst(_rows, _cols, traits::Type<_Tp>::value,
+ dst.data(), (size_t)(dst.outerStride()*sizeof(_Tp)));
Mat(src).copyTo(_dst);
}
}
@@ -144,8 +144,8 @@ void cv2eigen( const Mat& src,
dst.resize(src.rows, src.cols);
if( !(dst.Flags & Eigen::RowMajorBit) )
{
- const Mat _dst(src.cols, src.rows, DataType<_Tp>::type,
- dst.data(), (size_t)(dst.stride()*sizeof(_Tp)));
+ const Mat _dst(src.cols, src.rows, traits::Type<_Tp>::value,
+ dst.data(), (size_t)(dst.outerStride()*sizeof(_Tp)));
if( src.type() == _dst.type() )
transpose(src, _dst);
else if( src.cols == src.rows )
@@ -158,8 +158,8 @@ void cv2eigen( const Mat& src,
}
else
{
- const Mat _dst(src.rows, src.cols, DataType<_Tp>::type,
- dst.data(), (size_t)(dst.stride()*sizeof(_Tp)));
+ const Mat _dst(src.rows, src.cols, traits::Type<_Tp>::value,
+ dst.data(), (size_t)(dst.outerStride()*sizeof(_Tp)));
src.convertTo(_dst, _dst.type());
}
}
@@ -172,14 +172,14 @@ void cv2eigen( const Matx<_Tp, _rows, _cols>& src,
dst.resize(_rows, _cols);
if( !(dst.Flags & Eigen::RowMajorBit) )
{
- const Mat _dst(_cols, _rows, DataType<_Tp>::type,
- dst.data(), (size_t)(dst.stride()*sizeof(_Tp)));
+ const Mat _dst(_cols, _rows, traits::Type<_Tp>::value,
+ dst.data(), (size_t)(dst.outerStride()*sizeof(_Tp)));
transpose(src, _dst);
}
else
{
- const Mat _dst(_rows, _cols, DataType<_Tp>::type,
- dst.data(), (size_t)(dst.stride()*sizeof(_Tp)));
+ const Mat _dst(_rows, _cols, traits::Type<_Tp>::value,
+ dst.data(), (size_t)(dst.outerStride()*sizeof(_Tp)));
Mat(src).copyTo(_dst);
}
}
@@ -193,8 +193,8 @@ void cv2eigen( const Mat& src,
if( !(dst.Flags & Eigen::RowMajorBit) )
{
- const Mat _dst(src.cols, src.rows, DataType<_Tp>::type,
- dst.data(), (size_t)(dst.stride()*sizeof(_Tp)));
+ const Mat _dst(src.cols, src.rows, traits::Type<_Tp>::value,
+ dst.data(), (size_t)(dst.outerStride()*sizeof(_Tp)));
if( src.type() == _dst.type() )
transpose(src, _dst);
else
@@ -202,8 +202,8 @@ void cv2eigen( const Mat& src,
}
else
{
- const Mat _dst(src.rows, src.cols, DataType<_Tp>::type,
- dst.data(), (size_t)(dst.stride()*sizeof(_Tp)));
+ const Mat _dst(src.rows, src.cols, traits::Type<_Tp>::value,
+ dst.data(), (size_t)(dst.outerStride()*sizeof(_Tp)));
src.convertTo(_dst, _dst.type());
}
}
@@ -217,14 +217,14 @@ void cv2eigen( const Matx<_Tp, _rows, 1>& src,
if( !(dst.Flags & Eigen::RowMajorBit) )
{
- const Mat _dst(1, _rows, DataType<_Tp>::type,
- dst.data(), (size_t)(dst.stride()*sizeof(_Tp)));
+ const Mat _dst(1, _rows, traits::Type<_Tp>::value,
+ dst.data(), (size_t)(dst.outerStride()*sizeof(_Tp)));
transpose(src, _dst);
}
else
{
- const Mat _dst(_rows, 1, DataType<_Tp>::type,
- dst.data(), (size_t)(dst.stride()*sizeof(_Tp)));
+ const Mat _dst(_rows, 1, traits::Type<_Tp>::value,
+ dst.data(), (size_t)(dst.outerStride()*sizeof(_Tp)));
src.copyTo(_dst);
}
}
@@ -238,8 +238,8 @@ void cv2eigen( const Mat& src,
dst.resize(src.cols);
if( !(dst.Flags & Eigen::RowMajorBit) )
{
- const Mat _dst(src.cols, src.rows, DataType<_Tp>::type,
- dst.data(), (size_t)(dst.stride()*sizeof(_Tp)));
+ const Mat _dst(src.cols, src.rows, traits::Type<_Tp>::value,
+ dst.data(), (size_t)(dst.outerStride()*sizeof(_Tp)));
if( src.type() == _dst.type() )
transpose(src, _dst);
else
@@ -247,8 +247,8 @@ void cv2eigen( const Mat& src,
}
else
{
- const Mat _dst(src.rows, src.cols, DataType<_Tp>::type,
- dst.data(), (size_t)(dst.stride()*sizeof(_Tp)));
+ const Mat _dst(src.rows, src.cols, traits::Type<_Tp>::value,
+ dst.data(), (size_t)(dst.outerStride()*sizeof(_Tp)));
src.convertTo(_dst, _dst.type());
}
}
@@ -261,14 +261,14 @@ void cv2eigen( const Matx<_Tp, 1, _cols>& src,
dst.resize(_cols);
if( !(dst.Flags & Eigen::RowMajorBit) )
{
- const Mat _dst(_cols, 1, DataType<_Tp>::type,
- dst.data(), (size_t)(dst.stride()*sizeof(_Tp)));
+ const Mat _dst(_cols, 1, traits::Type<_Tp>::value,
+ dst.data(), (size_t)(dst.outerStride()*sizeof(_Tp)));
transpose(src, _dst);
}
else
{
- const Mat _dst(1, _cols, DataType<_Tp>::type,
- dst.data(), (size_t)(dst.stride()*sizeof(_Tp)));
+ const Mat _dst(1, _cols, traits::Type<_Tp>::value,
+ dst.data(), (size_t)(dst.outerStride()*sizeof(_Tp)));
Mat(src).copyTo(_dst);
}
}
diff --git a/lib/3rdParty/OpenCV3.1/include/opencv2/core/fast_math.hpp b/lib/3rdParty/OpenCV3.4/include/opencv2/core/fast_math.hpp
similarity index 79%
rename from lib/3rdParty/OpenCV3.1/include/opencv2/core/fast_math.hpp
rename to lib/3rdParty/OpenCV3.4/include/opencv2/core/fast_math.hpp
index b8b241b0..7858d404 100644
--- a/lib/3rdParty/OpenCV3.1/include/opencv2/core/fast_math.hpp
+++ b/lib/3rdParty/OpenCV3.4/include/opencv2/core/fast_math.hpp
@@ -42,11 +42,17 @@
//
//M*/
-#ifndef __OPENCV_CORE_FAST_MATH_HPP__
-#define __OPENCV_CORE_FAST_MATH_HPP__
+#ifndef OPENCV_CORE_FAST_MATH_HPP
+#define OPENCV_CORE_FAST_MATH_HPP
#include "opencv2/core/cvdef.h"
+#if ((defined _MSC_VER && defined _M_X64) || (defined __GNUC__ && defined __x86_64__ \
+ && defined __SSE2__ && !defined __APPLE__)) && !defined(__CUDACC__)
+#include
+#endif
+
+
//! @addtogroup core_utils
//! @{
@@ -54,24 +60,27 @@
* fast math *
\****************************************************************************************/
-#if defined __BORLANDC__
-# include
-#elif defined __cplusplus
+#ifdef __cplusplus
# include
#else
-# include
+# ifdef __BORLANDC__
+# include
+# else
+# include
+# endif
#endif
#ifdef HAVE_TEGRA_OPTIMIZATION
# include "tegra_round.hpp"
#endif
-#if CV_VFP
+#if defined __GNUC__ && defined __arm__ && (defined __ARM_PCS_VFP || defined __ARM_VFPV3__ || defined __ARM_NEON__) && !defined __SOFTFP__ && !defined(__CUDACC__)
// 1. general scheme
#define ARM_ROUND(_value, _asm_string) \
int res; \
float temp; \
- asm(_asm_string : [res] "=r" (res), [temp] "=w" (temp) : [value] "w" (_value)); \
+ (void)temp; \
+ __asm__(_asm_string : [res] "=r" (res), [temp] "=w" (temp) : [value] "w" (_value)); \
return res
// 2. version for double
#ifdef __clang__
@@ -81,7 +90,7 @@
#endif
// 3. version for float
#define ARM_ROUND_FLT(value) ARM_ROUND(value, "vcvtr.s32.f32 %[temp], %[value]\n vmov %[res], %[temp]")
-#endif // CV_VFP
+#endif
/** @brief Rounds floating-point number to the nearest integer
@@ -92,7 +101,7 @@ CV_INLINE int
cvRound( double value )
{
#if ((defined _MSC_VER && defined _M_X64) || (defined __GNUC__ && defined __x86_64__ \
- && defined __SSE2__ && !defined __APPLE__)) && !defined(__CUDACC__)
+ && defined __SSE2__ && !defined __APPLE__) || CV_SSE2) && !defined(__CUDACC__)
__m128d t = _mm_set_sd( value );
return _mm_cvtsd_si32(t);
#elif defined _MSC_VER && defined _M_IX86
@@ -107,7 +116,7 @@ cvRound( double value )
defined __GNUC__) && defined HAVE_TEGRA_OPTIMIZATION
TEGRA_ROUND_DBL(value);
#elif defined CV_ICC || defined __GNUC__
-# if CV_VFP
+# if defined ARM_ROUND_DBL
ARM_ROUND_DBL(value);
# else
return (int)lrint(value);
@@ -129,18 +138,8 @@ cvRound( double value )
*/
CV_INLINE int cvFloor( double value )
{
-#if (defined _MSC_VER && defined _M_X64 || (defined __GNUC__ && defined __SSE2__ && !defined __APPLE__)) && !defined(__CUDACC__)
- __m128d t = _mm_set_sd( value );
- int i = _mm_cvtsd_si32(t);
- return i - _mm_movemask_pd(_mm_cmplt_sd(t, _mm_cvtsi32_sd(t,i)));
-#elif defined __GNUC__
int i = (int)value;
return i - (i > value);
-#else
- int i = cvRound(value);
- float diff = (float)(value - i);
- return i - (diff < 0);
-#endif
}
/** @brief Rounds floating-point number to the nearest integer not smaller than the original.
@@ -152,18 +151,8 @@ CV_INLINE int cvFloor( double value )
*/
CV_INLINE int cvCeil( double value )
{
-#if (defined _MSC_VER && defined _M_X64 || (defined __GNUC__ && defined __SSE2__&& !defined __APPLE__)) && !defined(__CUDACC__)
- __m128d t = _mm_set_sd( value );
- int i = _mm_cvtsd_si32(t);
- return i + _mm_movemask_pd(_mm_cmplt_sd(_mm_cvtsi32_sd(t,i), t));
-#elif defined __GNUC__
int i = (int)value;
return i + (i < value);
-#else
- int i = cvRound(value);
- float diff = (float)(i - value);
- return i + (diff < 0);
-#endif
}
/** @brief Determines if the argument is Not A Number.
@@ -199,8 +188,8 @@ CV_INLINE int cvIsInf( double value )
/** @overload */
CV_INLINE int cvRound(float value)
{
-#if ((defined _MSC_VER && defined _M_X64) || (defined __GNUC__ && defined __x86_64__ && \
- defined __SSE2__ && !defined __APPLE__)) && !defined(__CUDACC__)
+#if ((defined _MSC_VER && defined _M_X64) || (defined __GNUC__ && defined __x86_64__ \
+ && defined __SSE2__ && !defined __APPLE__) || CV_SSE2) && !defined(__CUDACC__)
__m128 t = _mm_set_ss( value );
return _mm_cvtss_si32(t);
#elif defined _MSC_VER && defined _M_IX86
@@ -215,7 +204,7 @@ CV_INLINE int cvRound(float value)
defined __GNUC__) && defined HAVE_TEGRA_OPTIMIZATION
TEGRA_ROUND_FLT(value);
#elif defined CV_ICC || defined __GNUC__
-# if CV_VFP
+# if defined ARM_ROUND_FLT
ARM_ROUND_FLT(value);
# else
return (int)lrintf(value);
@@ -236,18 +225,8 @@ CV_INLINE int cvRound( int value )
/** @overload */
CV_INLINE int cvFloor( float value )
{
-#if (defined _MSC_VER && defined _M_X64 || (defined __GNUC__ && defined __SSE2__ && !defined __APPLE__)) && !defined(__CUDACC__)
- __m128 t = _mm_set_ss( value );
- int i = _mm_cvtss_si32(t);
- return i - _mm_movemask_ps(_mm_cmplt_ss(t, _mm_cvtsi32_ss(t,i)));
-#elif defined __GNUC__
int i = (int)value;
return i - (i > value);
-#else
- int i = cvRound(value);
- float diff = (float)(value - i);
- return i - (diff < 0);
-#endif
}
/** @overload */
@@ -259,18 +238,8 @@ CV_INLINE int cvFloor( int value )
/** @overload */
CV_INLINE int cvCeil( float value )
{
-#if (defined _MSC_VER && defined _M_X64 || (defined __GNUC__ && defined __SSE2__&& !defined __APPLE__)) && !defined(__CUDACC__)
- __m128 t = _mm_set_ss( value );
- int i = _mm_cvtss_si32(t);
- return i + _mm_movemask_ps(_mm_cmplt_ss(_mm_cvtsi32_ss(t,i), t));
-#elif defined __GNUC__
int i = (int)value;
return i + (i < value);
-#else
- int i = cvRound(value);
- float diff = (float)(i - value);
- return i + (diff < 0);
-#endif
}
/** @overload */
diff --git a/lib/3rdParty/OpenCV3.1/include/opencv2/core/hal/hal.hpp b/lib/3rdParty/OpenCV3.4/include/opencv2/core/hal/hal.hpp
similarity index 81%
rename from lib/3rdParty/OpenCV3.1/include/opencv2/core/hal/hal.hpp
rename to lib/3rdParty/OpenCV3.4/include/opencv2/core/hal/hal.hpp
index 118913eb..68900ec4 100644
--- a/lib/3rdParty/OpenCV3.1/include/opencv2/core/hal/hal.hpp
+++ b/lib/3rdParty/OpenCV3.4/include/opencv2/core/hal/hal.hpp
@@ -42,23 +42,13 @@
//
//M*/
-#ifndef __OPENCV_HAL_HPP__
-#define __OPENCV_HAL_HPP__
+#ifndef OPENCV_HAL_HPP
+#define OPENCV_HAL_HPP
#include "opencv2/core/cvdef.h"
+#include "opencv2/core/cvstd.hpp"
#include "opencv2/core/hal/interface.h"
-//! @cond IGNORED
-#define CALL_HAL(name, fun, ...) \
- int res = fun(__VA_ARGS__); \
- if (res == CV_HAL_ERROR_OK) \
- return; \
- else if (res != CV_HAL_ERROR_NOT_IMPLEMENTED) \
- CV_Error_(cv::Error::StsInternal, \
- ("HAL implementation " CVAUX_STR(name) " ==> " CVAUX_STR(fun) " returned %d (0x%08x)", res, res));
-//! @endcond
-
-
namespace cv { namespace hal {
//! @addtogroup core_hal_functions
@@ -74,6 +64,23 @@ CV_EXPORTS int LU32f(float* A, size_t astep, int m, float* b, size_t bstep, int
CV_EXPORTS int LU64f(double* A, size_t astep, int m, double* b, size_t bstep, int n);
CV_EXPORTS bool Cholesky32f(float* A, size_t astep, int m, float* b, size_t bstep, int n);
CV_EXPORTS bool Cholesky64f(double* A, size_t astep, int m, double* b, size_t bstep, int n);
+CV_EXPORTS void SVD32f(float* At, size_t astep, float* W, float* U, size_t ustep, float* Vt, size_t vstep, int m, int n, int flags);
+CV_EXPORTS void SVD64f(double* At, size_t astep, double* W, double* U, size_t ustep, double* Vt, size_t vstep, int m, int n, int flags);
+CV_EXPORTS int QR32f(float* A, size_t astep, int m, int n, int k, float* b, size_t bstep, float* hFactors);
+CV_EXPORTS int QR64f(double* A, size_t astep, int m, int n, int k, double* b, size_t bstep, double* hFactors);
+
+CV_EXPORTS void gemm32f(const float* src1, size_t src1_step, const float* src2, size_t src2_step,
+ float alpha, const float* src3, size_t src3_step, float beta, float* dst, size_t dst_step,
+ int m_a, int n_a, int n_d, int flags);
+CV_EXPORTS void gemm64f(const double* src1, size_t src1_step, const double* src2, size_t src2_step,
+ double alpha, const double* src3, size_t src3_step, double beta, double* dst, size_t dst_step,
+ int m_a, int n_a, int n_d, int flags);
+CV_EXPORTS void gemm32fc(const float* src1, size_t src1_step, const float* src2, size_t src2_step,
+ float alpha, const float* src3, size_t src3_step, float beta, float* dst, size_t dst_step,
+ int m_a, int n_a, int n_d, int flags);
+CV_EXPORTS void gemm64fc(const double* src1, size_t src1_step, const double* src2, size_t src2_step,
+ double alpha, const double* src3, size_t src3_step, double beta, double* dst, size_t dst_step,
+ int m_a, int n_a, int n_d, int flags);
CV_EXPORTS int normL1_(const uchar* a, const uchar* b, int n);
CV_EXPORTS float normL1_(const float* a, const float* b, int n);
@@ -84,7 +91,8 @@ CV_EXPORTS void exp64f(const double* src, double* dst, int n);
CV_EXPORTS void log32f(const float* src, float* dst, int n);
CV_EXPORTS void log64f(const double* src, double* dst, int n);
-CV_EXPORTS void fastAtan2(const float* y, const float* x, float* dst, int n, bool angleInDegrees);
+CV_EXPORTS void fastAtan32f(const float* y, const float* x, float* dst, int n, bool angleInDegrees);
+CV_EXPORTS void fastAtan64f(const double* y, const double* x, double* dst, int n, bool angleInDegrees);
CV_EXPORTS void magnitude32f(const float* x, const float* y, float* dst, int n);
CV_EXPORTS void magnitude64f(const double* x, const double* y, double* dst, int n);
CV_EXPORTS void sqrt32f(const float* src, float* dst, int len);
@@ -171,13 +179,13 @@ CV_EXPORTS void div32s( const int* src1, size_t step1, const int* src2, size_t s
CV_EXPORTS void div32f( const float* src1, size_t step1, const float* src2, size_t step2, float* dst, size_t step, int width, int height, void* scale);
CV_EXPORTS void div64f( const double* src1, size_t step1, const double* src2, size_t step2, double* dst, size_t step, int width, int height, void* scale);
-CV_EXPORTS void recip8u( const uchar* src1, size_t step1, const uchar* src2, size_t step2, uchar* dst, size_t step, int width, int height, void* scale);
-CV_EXPORTS void recip8s( const schar* src1, size_t step1, const schar* src2, size_t step2, schar* dst, size_t step, int width, int height, void* scale);
-CV_EXPORTS void recip16u( const ushort* src1, size_t step1, const ushort* src2, size_t step2, ushort* dst, size_t step, int width, int height, void* scale);
-CV_EXPORTS void recip16s( const short* src1, size_t step1, const short* src2, size_t step2, short* dst, size_t step, int width, int height, void* scale);
-CV_EXPORTS void recip32s( const int* src1, size_t step1, const int* src2, size_t step2, int* dst, size_t step, int width, int height, void* scale);
-CV_EXPORTS void recip32f( const float* src1, size_t step1, const float* src2, size_t step2, float* dst, size_t step, int width, int height, void* scale);
-CV_EXPORTS void recip64f( const double* src1, size_t step1, const double* src2, size_t step2, double* dst, size_t step, int width, int height, void* scale);
+CV_EXPORTS void recip8u( const uchar *, size_t, const uchar * src2, size_t step2, uchar* dst, size_t step, int width, int height, void* scale);
+CV_EXPORTS void recip8s( const schar *, size_t, const schar * src2, size_t step2, schar* dst, size_t step, int width, int height, void* scale);
+CV_EXPORTS void recip16u( const ushort *, size_t, const ushort * src2, size_t step2, ushort* dst, size_t step, int width, int height, void* scale);
+CV_EXPORTS void recip16s( const short *, size_t, const short * src2, size_t step2, short* dst, size_t step, int width, int height, void* scale);
+CV_EXPORTS void recip32s( const int *, size_t, const int * src2, size_t step2, int* dst, size_t step, int width, int height, void* scale);
+CV_EXPORTS void recip32f( const float *, size_t, const float * src2, size_t step2, float* dst, size_t step, int width, int height, void* scale);
+CV_EXPORTS void recip64f( const double *, size_t, const double * src2, size_t step2, double* dst, size_t step, int width, int height, void* scale);
CV_EXPORTS void addWeighted8u( const uchar* src1, size_t step1, const uchar* src2, size_t step2, uchar* dst, size_t step, int width, int height, void* _scalars );
CV_EXPORTS void addWeighted8s( const schar* src1, size_t step1, const schar* src2, size_t step2, schar* dst, size_t step, int width, int height, void* scalars );
@@ -187,6 +195,29 @@ CV_EXPORTS void addWeighted32s( const int* src1, size_t step1, const int* src2,
CV_EXPORTS void addWeighted32f( const float* src1, size_t step1, const float* src2, size_t step2, float* dst, size_t step, int width, int height, void* scalars );
CV_EXPORTS void addWeighted64f( const double* src1, size_t step1, const double* src2, size_t step2, double* dst, size_t step, int width, int height, void* scalars );
+struct CV_EXPORTS DFT1D
+{
+ static Ptr create(int len, int count, int depth, int flags, bool * useBuffer = 0);
+ virtual void apply(const uchar *src, uchar *dst) = 0;
+ virtual ~DFT1D() {}
+};
+
+struct CV_EXPORTS DFT2D
+{
+ static Ptr create(int width, int height, int depth,
+ int src_channels, int dst_channels,
+ int flags, int nonzero_rows = 0);
+ virtual void apply(const uchar *src_data, size_t src_step, uchar *dst_data, size_t dst_step) = 0;
+ virtual ~DFT2D() {}
+};
+
+struct CV_EXPORTS DCT2D
+{
+ static Ptr create(int width, int height, int depth, int flags);
+ virtual void apply(const uchar *src_data, size_t src_step, uchar *dst_data, size_t dst_step) = 0;
+ virtual ~DCT2D() {}
+};
+
//! @} core_hal
//=============================================================================
@@ -204,6 +235,7 @@ CV_EXPORTS void exp(const double* src, double* dst, int n);
CV_EXPORTS void log(const float* src, float* dst, int n);
CV_EXPORTS void log(const double* src, double* dst, int n);
+CV_EXPORTS void fastAtan2(const float* y, const float* x, float* dst, int n, bool angleInDegrees);
CV_EXPORTS void magnitude(const float* x, const float* y, float* dst, int n);
CV_EXPORTS void magnitude(const double* x, const double* y, double* dst, int n);
CV_EXPORTS void sqrt(const float* src, float* dst, int len);
@@ -215,4 +247,4 @@ CV_EXPORTS void invSqrt(const double* src, double* dst, int len);
}} //cv::hal
-#endif //__OPENCV_HAL_HPP__
+#endif //OPENCV_HAL_HPP
diff --git a/lib/3rdParty/OpenCV3.4/include/opencv2/core/hal/interface.h b/lib/3rdParty/OpenCV3.4/include/opencv2/core/hal/interface.h
new file mode 100644
index 00000000..8f640254
--- /dev/null
+++ b/lib/3rdParty/OpenCV3.4/include/opencv2/core/hal/interface.h
@@ -0,0 +1,182 @@
+#ifndef OPENCV_CORE_HAL_INTERFACE_H
+#define OPENCV_CORE_HAL_INTERFACE_H
+
+//! @addtogroup core_hal_interface
+//! @{
+
+//! @name Return codes
+//! @{
+#define CV_HAL_ERROR_OK 0
+#define CV_HAL_ERROR_NOT_IMPLEMENTED 1
+#define CV_HAL_ERROR_UNKNOWN -1
+//! @}
+
+#ifdef __cplusplus
+#include
+#else
+#include
+#include
+#endif
+
+//! @name Data types
+//! primitive types
+//! - schar - signed 1 byte integer
+//! - uchar - unsigned 1 byte integer
+//! - short - signed 2 byte integer
+//! - ushort - unsigned 2 byte integer
+//! - int - signed 4 byte integer
+//! - uint - unsigned 4 byte integer
+//! - int64 - signed 8 byte integer
+//! - uint64 - unsigned 8 byte integer
+//! @{
+#if !defined _MSC_VER && !defined __BORLANDC__
+# if defined __cplusplus && __cplusplus >= 201103L && !defined __APPLE__
+# include
+# ifdef __NEWLIB__
+ typedef unsigned int uint;
+# else
+ typedef std::uint32_t uint;
+# endif
+# else
+# include
+ typedef uint32_t uint;
+# endif
+#else
+ typedef unsigned uint;
+#endif
+
+typedef signed char schar;
+
+#ifndef __IPL_H__
+ typedef unsigned char uchar;
+ typedef unsigned short ushort;
+#endif
+
+#if defined _MSC_VER || defined __BORLANDC__
+ typedef __int64 int64;
+ typedef unsigned __int64 uint64;
+# define CV_BIG_INT(n) n##I64
+# define CV_BIG_UINT(n) n##UI64
+#else
+ typedef int64_t int64;
+ typedef uint64_t uint64;
+# define CV_BIG_INT(n) n##LL
+# define CV_BIG_UINT(n) n##ULL
+#endif
+
+#define CV_CN_MAX 512
+#define CV_CN_SHIFT 3
+#define CV_DEPTH_MAX (1 << CV_CN_SHIFT)
+
+#define CV_8U 0
+#define CV_8S 1
+#define CV_16U 2
+#define CV_16S 3
+#define CV_32S 4
+#define CV_32F 5
+#define CV_64F 6
+#define CV_USRTYPE1 7
+
+#define CV_MAT_DEPTH_MASK (CV_DEPTH_MAX - 1)
+#define CV_MAT_DEPTH(flags) ((flags) & CV_MAT_DEPTH_MASK)
+
+#define CV_MAKETYPE(depth,cn) (CV_MAT_DEPTH(depth) + (((cn)-1) << CV_CN_SHIFT))
+#define CV_MAKE_TYPE CV_MAKETYPE
+
+#define CV_8UC1 CV_MAKETYPE(CV_8U,1)
+#define CV_8UC2 CV_MAKETYPE(CV_8U,2)
+#define CV_8UC3 CV_MAKETYPE(CV_8U,3)
+#define CV_8UC4 CV_MAKETYPE(CV_8U,4)
+#define CV_8UC(n) CV_MAKETYPE(CV_8U,(n))
+
+#define CV_8SC1 CV_MAKETYPE(CV_8S,1)
+#define CV_8SC2 CV_MAKETYPE(CV_8S,2)
+#define CV_8SC3 CV_MAKETYPE(CV_8S,3)
+#define CV_8SC4 CV_MAKETYPE(CV_8S,4)
+#define CV_8SC(n) CV_MAKETYPE(CV_8S,(n))
+
+#define CV_16UC1 CV_MAKETYPE(CV_16U,1)
+#define CV_16UC2 CV_MAKETYPE(CV_16U,2)
+#define CV_16UC3 CV_MAKETYPE(CV_16U,3)
+#define CV_16UC4 CV_MAKETYPE(CV_16U,4)
+#define CV_16UC(n) CV_MAKETYPE(CV_16U,(n))
+
+#define CV_16SC1 CV_MAKETYPE(CV_16S,1)
+#define CV_16SC2 CV_MAKETYPE(CV_16S,2)
+#define CV_16SC3 CV_MAKETYPE(CV_16S,3)
+#define CV_16SC4 CV_MAKETYPE(CV_16S,4)
+#define CV_16SC(n) CV_MAKETYPE(CV_16S,(n))
+
+#define CV_32SC1 CV_MAKETYPE(CV_32S,1)
+#define CV_32SC2 CV_MAKETYPE(CV_32S,2)
+#define CV_32SC3 CV_MAKETYPE(CV_32S,3)
+#define CV_32SC4 CV_MAKETYPE(CV_32S,4)
+#define CV_32SC(n) CV_MAKETYPE(CV_32S,(n))
+
+#define CV_32FC1 CV_MAKETYPE(CV_32F,1)
+#define CV_32FC2 CV_MAKETYPE(CV_32F,2)
+#define CV_32FC3 CV_MAKETYPE(CV_32F,3)
+#define CV_32FC4 CV_MAKETYPE(CV_32F,4)
+#define CV_32FC(n) CV_MAKETYPE(CV_32F,(n))
+
+#define CV_64FC1 CV_MAKETYPE(CV_64F,1)
+#define CV_64FC2 CV_MAKETYPE(CV_64F,2)
+#define CV_64FC3 CV_MAKETYPE(CV_64F,3)
+#define CV_64FC4 CV_MAKETYPE(CV_64F,4)
+#define CV_64FC(n) CV_MAKETYPE(CV_64F,(n))
+//! @}
+
+//! @name Comparison operation
+//! @sa cv::CmpTypes
+//! @{
+#define CV_HAL_CMP_EQ 0
+#define CV_HAL_CMP_GT 1
+#define CV_HAL_CMP_GE 2
+#define CV_HAL_CMP_LT 3
+#define CV_HAL_CMP_LE 4
+#define CV_HAL_CMP_NE 5
+//! @}
+
+//! @name Border processing modes
+//! @sa cv::BorderTypes
+//! @{
+#define CV_HAL_BORDER_CONSTANT 0
+#define CV_HAL_BORDER_REPLICATE 1
+#define CV_HAL_BORDER_REFLECT 2
+#define CV_HAL_BORDER_WRAP 3
+#define CV_HAL_BORDER_REFLECT_101 4
+#define CV_HAL_BORDER_TRANSPARENT 5
+#define CV_HAL_BORDER_ISOLATED 16
+//! @}
+
+//! @name DFT flags
+//! @{
+#define CV_HAL_DFT_INVERSE 1
+#define CV_HAL_DFT_SCALE 2
+#define CV_HAL_DFT_ROWS 4
+#define CV_HAL_DFT_COMPLEX_OUTPUT 16
+#define CV_HAL_DFT_REAL_OUTPUT 32
+#define CV_HAL_DFT_TWO_STAGE 64
+#define CV_HAL_DFT_STAGE_COLS 128
+#define CV_HAL_DFT_IS_CONTINUOUS 512
+#define CV_HAL_DFT_IS_INPLACE 1024
+//! @}
+
+//! @name SVD flags
+//! @{
+#define CV_HAL_SVD_NO_UV 1
+#define CV_HAL_SVD_SHORT_UV 2
+#define CV_HAL_SVD_MODIFY_A 4
+#define CV_HAL_SVD_FULL_UV 8
+//! @}
+
+//! @name Gemm flags
+//! @{
+#define CV_HAL_GEMM_1_T 1
+#define CV_HAL_GEMM_2_T 2
+#define CV_HAL_GEMM_3_T 4
+//! @}
+
+//! @}
+
+#endif
diff --git a/lib/3rdParty/OpenCV3.1/include/opencv2/core/hal/intrin.hpp b/lib/3rdParty/OpenCV3.4/include/opencv2/core/hal/intrin.hpp
similarity index 65%
rename from lib/3rdParty/OpenCV3.1/include/opencv2/core/hal/intrin.hpp
rename to lib/3rdParty/OpenCV3.4/include/opencv2/core/hal/intrin.hpp
index 33e14b48..9dcfc562 100644
--- a/lib/3rdParty/OpenCV3.1/include/opencv2/core/hal/intrin.hpp
+++ b/lib/3rdParty/OpenCV3.4/include/opencv2/core/hal/intrin.hpp
@@ -42,8 +42,8 @@
//
//M*/
-#ifndef __OPENCV_HAL_INTRIN_HPP__
-#define __OPENCV_HAL_INTRIN_HPP__
+#ifndef OPENCV_HAL_INTRIN_HPP
+#define OPENCV_HAL_INTRIN_HPP
#include
#include
@@ -60,6 +60,25 @@
// access from within opencv code more accessible
namespace cv {
+#ifndef CV_DOXYGEN
+
+#ifdef CV_CPU_DISPATCH_MODE
+#define CV_CPU_OPTIMIZATION_HAL_NAMESPACE __CV_CAT(hal_, CV_CPU_DISPATCH_MODE)
+#define CV_CPU_OPTIMIZATION_HAL_NAMESPACE_BEGIN namespace __CV_CAT(hal_, CV_CPU_DISPATCH_MODE) {
+#define CV_CPU_OPTIMIZATION_HAL_NAMESPACE_END }
+#else
+#define CV_CPU_OPTIMIZATION_HAL_NAMESPACE hal_baseline
+#define CV_CPU_OPTIMIZATION_HAL_NAMESPACE_BEGIN namespace hal_baseline {
+#define CV_CPU_OPTIMIZATION_HAL_NAMESPACE_END }
+#endif
+
+
+CV_CPU_OPTIMIZATION_HAL_NAMESPACE_BEGIN
+CV_CPU_OPTIMIZATION_HAL_NAMESPACE_END
+using namespace CV_CPU_OPTIMIZATION_HAL_NAMESPACE;
+CV_CPU_OPTIMIZATION_HAL_NAMESPACE_BEGIN
+#endif
+
//! @addtogroup core_hal_intrin
//! @{
@@ -281,11 +300,15 @@ template struct V_SIMD128Traits
//! @}
+#ifndef CV_DOXYGEN
+CV_CPU_OPTIMIZATION_HAL_NAMESPACE_END
+#endif
}
#ifdef CV_DOXYGEN
# undef CV_SSE2
# undef CV_NEON
+# undef CV_VSX
#endif
#if CV_SSE2
@@ -296,6 +319,10 @@ template struct V_SIMD128Traits
#include "opencv2/core/hal/intrin_neon.hpp"
+#elif CV_VSX
+
+#include "opencv2/core/hal/intrin_vsx.hpp"
+
#else
#include "opencv2/core/hal/intrin_cpp.hpp"
@@ -317,4 +344,129 @@ template struct V_SIMD128Traits
//! @}
+//==================================================================================================
+
+//! @cond IGNORED
+
+namespace cv {
+
+#ifndef CV_DOXYGEN
+CV_CPU_OPTIMIZATION_HAL_NAMESPACE_BEGIN
+#endif
+
+template struct V_RegTrait128;
+
+template <> struct V_RegTrait128 {
+ typedef v_uint8x16 reg;
+ typedef v_uint16x8 w_reg;
+ typedef v_uint32x4 q_reg;
+ typedef v_uint8x16 u_reg;
+ static v_uint8x16 zero() { return v_setzero_u8(); }
+ static v_uint8x16 all(uchar val) { return v_setall_u8(val); }
+};
+
+template <> struct V_RegTrait128 {
+ typedef v_int8x16 reg;
+ typedef v_int16x8 w_reg;
+ typedef v_int32x4 q_reg;
+ typedef v_uint8x16 u_reg;
+ static v_int8x16 zero() { return v_setzero_s8(); }
+ static v_int8x16 all(schar val) { return v_setall_s8(val); }
+};
+
+template <> struct V_RegTrait128 {
+ typedef v_uint16x8 reg;
+ typedef v_uint32x4 w_reg;
+ typedef v_int16x8 int_reg;
+ typedef v_uint16x8 u_reg;
+ static v_uint16x8 zero() { return v_setzero_u16(); }
+ static v_uint16x8 all(ushort val) { return v_setall_u16(val); }
+};
+
+template <> struct V_RegTrait128 {
+ typedef v_int16x8 reg;
+ typedef v_int32x4 w_reg;
+ typedef v_uint16x8 u_reg;
+ static v_int16x8 zero() { return v_setzero_s16(); }
+ static v_int16x8 all(short val) { return v_setall_s16(val); }
+};
+
+template <> struct V_RegTrait128 {
+ typedef v_uint32x4 reg;
+ typedef v_uint64x2 w_reg;
+ typedef v_int32x4 int_reg;
+ typedef v_uint32x4 u_reg;
+ static v_uint32x4 zero() { return v_setzero_u32(); }
+ static v_uint32x4 all(unsigned val) { return v_setall_u32(val); }
+};
+
+template <> struct V_RegTrait128 {
+ typedef v_int32x4 reg;
+ typedef v_int64x2 w_reg;
+ typedef v_uint32x4 u_reg;
+ static v_int32x4 zero() { return v_setzero_s32(); }
+ static v_int32x4 all(int val) { return v_setall_s32(val); }
+};
+
+template <> struct V_RegTrait128 {
+ typedef v_uint64x2 reg;
+ static v_uint64x2 zero() { return v_setzero_u64(); }
+ static v_uint64x2 all(uint64 val) { return v_setall_u64(val); }
+};
+
+template <> struct V_RegTrait128 {
+ typedef v_int64x2 reg;
+ static v_int64x2 zero() { return v_setzero_s64(); }
+ static v_int64x2 all(int64 val) { return v_setall_s64(val); }
+};
+
+template <> struct V_RegTrait128 {
+ typedef v_float32x4 reg;
+ typedef v_int32x4 int_reg;
+ typedef v_float32x4 u_reg;
+ static v_float32x4 zero() { return v_setzero_f32(); }
+ static v_float32x4 all(float val) { return v_setall_f32(val); }
+};
+
+#if CV_SIMD128_64F
+template <> struct V_RegTrait128 {
+ typedef v_float64x2 reg;
+ typedef v_int32x4 int_reg;
+ typedef v_float64x2 u_reg;
+ static v_float64x2 zero() { return v_setzero_f64(); }
+ static v_float64x2 all(double val) { return v_setall_f64(val); }
+};
+#endif
+
+inline unsigned int trailingZeros32(unsigned int value) {
+#if defined(_MSC_VER)
+#if (_MSC_VER < 1700) || defined(_M_ARM)
+ unsigned long index = 0;
+ _BitScanForward(&index, value);
+ return (unsigned int)index;
+#else
+ return _tzcnt_u32(value);
+#endif
+#elif defined(__GNUC__) || defined(__GNUG__)
+ return __builtin_ctz(value);
+#elif defined(__ICC) || defined(__INTEL_COMPILER)
+ return _bit_scan_forward(value);
+#elif defined(__clang__)
+ return llvm.cttz.i32(value, true);
+#else
+ static const int MultiplyDeBruijnBitPosition[32] = {
+ 0, 1, 28, 2, 29, 14, 24, 3, 30, 22, 20, 15, 25, 17, 4, 8,
+ 31, 27, 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9 };
+ return MultiplyDeBruijnBitPosition[((uint32_t)((value & -value) * 0x077CB531U)) >> 27];
+#endif
+}
+
+#ifndef CV_DOXYGEN
+CV_CPU_OPTIMIZATION_HAL_NAMESPACE_END
+#endif
+
+} // cv::
+
+//! @endcond
+
#endif
diff --git a/lib/3rdParty/OpenCV3.1/include/opencv2/core/hal/intrin_cpp.hpp b/lib/3rdParty/OpenCV3.4/include/opencv2/core/hal/intrin_cpp.hpp
similarity index 84%
rename from lib/3rdParty/OpenCV3.1/include/opencv2/core/hal/intrin_cpp.hpp
rename to lib/3rdParty/OpenCV3.4/include/opencv2/core/hal/intrin_cpp.hpp
index 3929e0d0..e7ea899b 100644
--- a/lib/3rdParty/OpenCV3.1/include/opencv2/core/hal/intrin_cpp.hpp
+++ b/lib/3rdParty/OpenCV3.4/include/opencv2/core/hal/intrin_cpp.hpp
@@ -42,8 +42,8 @@
//
//M*/
-#ifndef __OPENCV_HAL_INTRIN_CPP_HPP__
-#define __OPENCV_HAL_INTRIN_CPP_HPP__
+#ifndef OPENCV_HAL_INTRIN_CPP_HPP
+#define OPENCV_HAL_INTRIN_CPP_HPP
#include
#include
@@ -53,6 +53,10 @@
namespace cv
{
+#ifndef CV_DOXYGEN
+CV_CPU_OPTIMIZATION_HAL_NAMESPACE_BEGIN
+#endif
+
/** @addtogroup core_hal_intrin
"Universal intrinsics" is a types and functions set intended to simplify vectorization of code on
@@ -95,7 +99,7 @@ block and to save contents of the register to memory block.
@ref v_setall_s8, @ref v_setall_u8, ...,
@ref v_setzero_u8, @ref v_setzero_s8, ...
- Memory operations:
-@ref v_load, @ref v_load_aligned, @ref v_load_halves,
+@ref v_load, @ref v_load_aligned, @ref v_load_low, @ref v_load_halves,
@ref v_store, @ref v_store_aligned,
@ref v_store_high, @ref v_store_low
@@ -103,7 +107,7 @@ block and to save contents of the register to memory block.
These operations allow to reorder or recombine elements in one or multiple vectors.
-- Interleave, deinterleave (3 and 4 channels): @ref v_load_deinterleave, @ref v_store_interleave
+- Interleave, deinterleave (2, 3 and 4 channels): @ref v_load_deinterleave, @ref v_store_interleave
- Expand: @ref v_load_expand, @ref v_load_expand_q, @ref v_expand
- Pack: @ref v_pack, @ref v_pack_u, @ref v_rshr_pack, @ref v_rshr_pack_u,
@ref v_pack_store, @ref v_pack_u_store, @ref v_rshr_pack_store, @ref v_rshr_pack_u_store
@@ -116,32 +120,32 @@ These operations allow to reorder or recombine elements in one or multiple vecto
Element-wise binary and unary operations.
- Arithmetics:
-@ref operator+(const v_reg &a, const v_reg &b) "+",
-@ref operator-(const v_reg &a, const v_reg &b) "-",
-@ref operator*(const v_reg &a, const v_reg &b) "*",
-@ref operator/(const v_reg &a, const v_reg &b) "/",
+@ref operator +(const v_reg &a, const v_reg &b) "+",
+@ref operator -(const v_reg &a, const v_reg &b) "-",
+@ref operator *(const v_reg &a, const v_reg &b) "*",
+@ref operator /(const v_reg &a, const v_reg &b) "/",
@ref v_mul_expand
- Non-saturating arithmetics: @ref v_add_wrap, @ref v_sub_wrap
- Bitwise shifts:
-@ref operator<<(const v_reg &a, int s) "<<",
-@ref operator>>(const v_reg &a, int s) ">>",
+@ref operator <<(const v_reg &a, int s) "<<",
+@ref operator >>(const v_reg &a, int s) ">>",
@ref v_shl, @ref v_shr
- Bitwise logic:
@ref operator&(const v_reg &a, const v_reg &b) "&",
-@ref operator|(const v_reg &a, const v_reg &b) "|",
-@ref operator^(const v_reg &a, const v_reg &b) "^",
-@ref operator~(const v_reg &a) "~"
+@ref operator |(const v_reg &a, const v_reg &b) "|",
+@ref operator ^(const v_reg &a, const v_reg &b) "^",
+@ref operator ~(const v_reg &a) "~"
- Comparison:
-@ref operator>(const v_reg &a, const v_reg &b) ">",
-@ref operator>=(const v_reg &a, const v_reg &b) ">=",
-@ref operator<(const v_reg &a, const v_reg &b) "<",
-@ref operator<=(const v_reg &a, const v_reg &b) "<=",
+@ref operator >(const v_reg &a, const v_reg &b) ">",
+@ref operator >=(const v_reg &a, const v_reg &b) ">=",
+@ref operator <(const v_reg &a, const v_reg &b) "<",
+@ref operator <=(const v_reg &a, const v_reg &b) "<=",
@ref operator==(const v_reg &a, const v_reg &b) "==",
-@ref operator!=(const v_reg &a, const v_reg &b) "!="
+@ref operator !=(const v_reg &a, const v_reg &b) "!="
- min/max: @ref v_min, @ref v_max
@@ -149,7 +153,7 @@ Element-wise binary and unary operations.
Most of these operations return only one value.
-- Reduce: @ref v_reduce_min, @ref v_reduce_max, @ref v_reduce_sum
+- Reduce: @ref v_reduce_min, @ref v_reduce_max, @ref v_reduce_sum, @ref v_popcount
- Mask: @ref v_signmask, @ref v_check_all, @ref v_check_any, @ref v_select
### Other math
@@ -455,8 +459,10 @@ template inline v_reg<_Tp, n> operator ~ (const v_reg<_Tp,
{
v_reg<_Tp, n> c;
for( int i = 0; i < n; i++ )
+ {
c.s[i] = V_TypeTraits<_Tp>::reinterpret_from_int(~V_TypeTraits<_Tp>::reinterpret_int(a.s[i]));
- return c;
+ }
+ return c;
}
//! @brief Helper macro
@@ -572,6 +578,49 @@ Scheme:
For 32-bit integer and 32-bit floating point types. */
OPENCV_HAL_IMPL_REDUCE_MINMAX_FUNC(v_reduce_max, std::max)
+static const unsigned char popCountTable[] =
+{
+ 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4,
+ 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
+ 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
+ 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
+ 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
+ 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
+ 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
+ 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
+ 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
+ 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
+ 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
+ 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
+ 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
+ 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
+ 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
+ 4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8,
+};
+/** @brief Count the 1 bits in the vector and return 4 values
+
+Scheme:
+@code
+{A1 A2 A3 ...} => popcount(A1)
+@endcode
+Any types but result will be in v_uint32x4*/
+template inline v_uint32x4 v_popcount(const v_reg<_Tp, n>& a)
+{
+ v_uint8x16 b;
+ b = v_reinterpret_as_u8(a);
+ for( int i = 0; i < v_uint8x16::nlanes; i++ )
+ {
+ b.s[i] = popCountTable[b.s[i]];
+ }
+ v_uint32x4 c;
+ for( int i = 0; i < v_uint32x4::nlanes; i++ )
+ {
+ c.s[i] = b.s[i*4] + b.s[i*4+1] + b.s[i*4+2] + b.s[i*4+3];
+ }
+ return c;
+}
+
+
//! @cond IGNORED
template
inline void v_minmax( const v_reg<_Tp, n>& a, const v_reg<_Tp, n>& b,
@@ -672,7 +721,7 @@ inline v_reg::abs_type, n> v_absdiff(const v_reg<_Tp,
{
typedef typename V_TypeTraits<_Tp>::abs_type rtype;
v_reg c;
- const rtype mask = std::numeric_limits<_Tp>::is_signed ? (1 << (sizeof(rtype)*8 - 1)) : 0;
+ const rtype mask = (rtype)(std::numeric_limits<_Tp>::is_signed ? (1 << (sizeof(rtype)*8 - 1)) : 0);
for( int i = 0; i < n; i++ )
{
rtype ua = a.s[i] ^ mask;
@@ -836,12 +885,59 @@ template inline v_reg<_Tp, n> operator shift_op(const v_reg
/** @brief Bitwise shift left
For 16-, 32- and 64-bit integer values. */
-OPENCV_HAL_IMPL_SHIFT_OP(<<)
+OPENCV_HAL_IMPL_SHIFT_OP(<< )
/** @brief Bitwise shift right
For 16-, 32- and 64-bit integer values. */
-OPENCV_HAL_IMPL_SHIFT_OP(>>)
+OPENCV_HAL_IMPL_SHIFT_OP(>> )
+
+/** @brief Element shift left among vector
+
+For all type */
+#define OPENCV_HAL_IMPL_ROTATE_SHIFT_OP(suffix,opA,opB) \
+template inline v_reg<_Tp, n> v_rotate_##suffix(const v_reg<_Tp, n>& a) \
+{ \
+ v_reg<_Tp, n> b; \
+ for (int i = 0; i < n; i++) \
+ { \
+ int sIndex = i opA imm; \
+ if (0 <= sIndex && sIndex < n) \
+ { \
+ b.s[i] = a.s[sIndex]; \
+ } \
+ else \
+ { \
+ b.s[i] = 0; \
+ } \
+ } \
+ return b; \
+} \
+template inline v_reg<_Tp, n> v_rotate_##suffix(const v_reg<_Tp, n>& a, const v_reg<_Tp, n>& b) \
+{ \
+ v_reg<_Tp, n> c; \
+ for (int i = 0; i < n; i++) \
+ { \
+ int aIndex = i opA imm; \
+ int bIndex = i opA imm opB n; \
+ if (0 <= bIndex && bIndex < n) \
+ { \
+ c.s[i] = b.s[bIndex]; \
+ } \
+ else if (0 <= aIndex && aIndex < n) \
+ { \
+ c.s[i] = a.s[aIndex]; \
+ } \
+ else \
+ { \
+ c.s[i] = 0; \
+ } \
+ } \
+ return c; \
+}
+
+OPENCV_HAL_IMPL_ROTATE_SHIFT_OP(left, -, +)
+OPENCV_HAL_IMPL_ROTATE_SHIFT_OP(right, +, -)
/** @brief Sum packed values
@@ -858,6 +954,27 @@ template inline typename V_TypeTraits<_Tp>::sum_type v_redu
return c;
}
+/** @brief Sums all elements of each input vector, returns the vector of sums
+
+ Scheme:
+ @code
+ result[0] = a[0] + a[1] + a[2] + a[3]
+ result[1] = b[0] + b[1] + b[2] + b[3]
+ result[2] = c[0] + c[1] + c[2] + c[3]
+ result[3] = d[0] + d[1] + d[2] + d[3]
+ @endcode
+*/
+inline v_float32x4 v_reduce_sum4(const v_float32x4& a, const v_float32x4& b,
+ const v_float32x4& c, const v_float32x4& d)
+{
+ v_float32x4 r;
+ r.s[0] = a.s[0] + a.s[1] + a.s[2] + a.s[3];
+ r.s[1] = b.s[0] + b.s[1] + b.s[2] + b.s[3];
+ r.s[2] = c.s[0] + c.s[1] + c.s[2] + c.s[3];
+ r.s[3] = d.s[0] + d.s[1] + d.s[2] + d.s[3];
+ return r;
+}
+
/** @brief Get negative values mask
Returned value is a bit mask with bits set to 1 on places corresponding to negative packed values indexes.
@@ -1010,6 +1127,26 @@ inline v_reg<_Tp, V_SIMD128Traits<_Tp>::nlanes> v_load_aligned(const _Tp* ptr)
return v_reg<_Tp, V_SIMD128Traits<_Tp>::nlanes>(ptr);
}
+/** @brief Load 64-bits of data to lower part (high part is undefined).
+
+@param ptr memory block containing data for first half (0..n/2)
+
+@code{.cpp}
+int lo[2] = { 1, 2 };
+v_int32x4 r = v_load_low(lo);
+@endcode
+ */
+template
+inline v_reg<_Tp, V_SIMD128Traits<_Tp>::nlanes> v_load_low(const _Tp* ptr)
+{
+ v_reg<_Tp, V_SIMD128Traits<_Tp>::nlanes> c;
+ for( int i = 0; i < c.nlanes/2; i++ )
+ {
+ c.s[i] = ptr[i];
+ }
+ return c;
+}
+
/** @brief Load register contents from two memory blocks
@param loptr memory block containing data for first half (0..n/2)
@@ -1075,12 +1212,31 @@ v_load_expand_q(const _Tp* ptr)
return c;
}
-/** @brief Load and deinterleave (4 channels)
+/** @brief Load and deinterleave (2 channels)
-Load data from memory deinterleave and store to 4 registers.
+Load data from memory deinterleave and store to 2 registers.
Scheme:
@code
-{A1 B1 C1 D1 A2 B2 C2 D2 ...} ==> {A1 A2 ...}, {B1 B2 ...}, {C1 C2 ...}, {D1 D2 ...}
+{A1 B1 A2 B2 ...} ==> {A1 A2 ...}, {B1 B2 ...}
+@endcode
+For all types except 64-bit. */
+template inline void v_load_deinterleave(const _Tp* ptr, v_reg<_Tp, n>& a,
+ v_reg<_Tp, n>& b)
+{
+ int i, i2;
+ for( i = i2 = 0; i < n; i++, i2 += 2 )
+ {
+ a.s[i] = ptr[i2];
+ b.s[i] = ptr[i2+1];
+ }
+}
+
+/** @brief Load and deinterleave (3 channels)
+
+Load data from memory deinterleave and store to 3 registers.
+Scheme:
+@code
+{A1 B1 C1 A2 B2 C2 ...} ==> {A1 A2 ...}, {B1 B2 ...}, {C1 C2 ...}
@endcode
For all types except 64-bit. */
template inline void v_load_deinterleave(const _Tp* ptr, v_reg<_Tp, n>& a,
@@ -1095,12 +1251,12 @@ template inline void v_load_deinterleave(const _Tp* ptr, v_
}
}
-/** @brief Load and deinterleave (3 channels)
+/** @brief Load and deinterleave (4 channels)
-Load data from memory deinterleave and store to 3 registers.
+Load data from memory deinterleave and store to 4 registers.
Scheme:
@code
-{A1 B1 C1 A2 B2 C2 ...} ==> {A1 A2 ...}, {B1 B2 ...}, {C1 C2 ...}
+{A1 B1 C1 D1 A2 B2 C2 D2 ...} ==> {A1 A2 ...}, {B1 B2 ...}, {C1 C2 ...}, {D1 D2 ...}
@endcode
For all types except 64-bit. */
template
@@ -1118,12 +1274,32 @@ inline void v_load_deinterleave(const _Tp* ptr, v_reg<_Tp, n>& a,
}
}
+/** @brief Interleave and store (2 channels)
+
+Interleave and store data from 2 registers to memory.
+Scheme:
+@code
+{A1 A2 ...}, {B1 B2 ...} ==> {A1 B1 A2 B2 ...}
+@endcode
+For all types except 64-bit. */
+template
+inline void v_store_interleave( _Tp* ptr, const v_reg<_Tp, n>& a,
+ const v_reg<_Tp, n>& b)
+{
+ int i, i2;
+ for( i = i2 = 0; i < n; i++, i2 += 2 )
+ {
+ ptr[i2] = a.s[i];
+ ptr[i2+1] = b.s[i];
+ }
+}
+
/** @brief Interleave and store (3 channels)
Interleave and store data from 3 registers to memory.
Scheme:
@code
-{A1 A2 ...}, {B1 B2 ...}, {C1 C2 ...}, {D1 D2 ...} ==> {A1 B1 C1 D1 A2 B2 C2 D2 ...}
+{A1 A2 ...}, {B1 B2 ...}, {C1 C2 ...} ==> {A1 B1 C1 A2 B2 C2 ...}
@endcode
For all types except 64-bit. */
template
@@ -1586,14 +1762,14 @@ OPENCV_HAL_IMPL_C_RSHIFTR(v_int64x2, int64)
//! @brief Helper macro
//! @ingroup core_hal_intrin_impl
-#define OPENCV_HAL_IMPL_C_PACK(_Tpvec, _Tpnvec, _Tpn, pack_suffix) \
+#define OPENCV_HAL_IMPL_C_PACK(_Tpvec, _Tpnvec, _Tpn, pack_suffix, cast) \
inline _Tpnvec v_##pack_suffix(const _Tpvec& a, const _Tpvec& b) \
{ \
_Tpnvec c; \
for( int i = 0; i < _Tpvec::nlanes; i++ ) \
{ \
- c.s[i] = saturate_cast<_Tpn>(a.s[i]); \
- c.s[i+_Tpvec::nlanes] = saturate_cast<_Tpn>(b.s[i]); \
+ c.s[i] = cast<_Tpn>(a.s[i]); \
+ c.s[i+_Tpvec::nlanes] = cast<_Tpn>(b.s[i]); \
} \
return c; \
}
@@ -1607,26 +1783,28 @@ inline _Tpnvec v_##pack_suffix(const _Tpvec& a, const _Tpvec& b) \
//!
//! - pack: for 16-, 32- and 64-bit integer input types
//! - pack_u: for 16- and 32-bit signed integer input types
-OPENCV_HAL_IMPL_C_PACK(v_uint16x8, v_uint8x16, uchar, pack)
-OPENCV_HAL_IMPL_C_PACK(v_int16x8, v_int8x16, schar, pack)
-OPENCV_HAL_IMPL_C_PACK(v_uint32x4, v_uint16x8, ushort, pack)
-OPENCV_HAL_IMPL_C_PACK(v_int32x4, v_int16x8, short, pack)
-OPENCV_HAL_IMPL_C_PACK(v_uint64x2, v_uint32x4, unsigned, pack)
-OPENCV_HAL_IMPL_C_PACK(v_int64x2, v_int32x4, int, pack)
-OPENCV_HAL_IMPL_C_PACK(v_int16x8, v_uint8x16, uchar, pack_u)
-OPENCV_HAL_IMPL_C_PACK(v_int32x4, v_uint16x8, ushort, pack_u)
+//!
+//! @note All variants except 64-bit use saturation.
+OPENCV_HAL_IMPL_C_PACK(v_uint16x8, v_uint8x16, uchar, pack, saturate_cast)
+OPENCV_HAL_IMPL_C_PACK(v_int16x8, v_int8x16, schar, pack, saturate_cast)
+OPENCV_HAL_IMPL_C_PACK(v_uint32x4, v_uint16x8, ushort, pack, saturate_cast)
+OPENCV_HAL_IMPL_C_PACK(v_int32x4, v_int16x8, short, pack, saturate_cast)
+OPENCV_HAL_IMPL_C_PACK(v_uint64x2, v_uint32x4, unsigned, pack, static_cast)
+OPENCV_HAL_IMPL_C_PACK(v_int64x2, v_int32x4, int, pack, static_cast)
+OPENCV_HAL_IMPL_C_PACK(v_int16x8, v_uint8x16, uchar, pack_u, saturate_cast)
+OPENCV_HAL_IMPL_C_PACK(v_int32x4, v_uint16x8, ushort, pack_u, saturate_cast)
//! @}
//! @brief Helper macro
//! @ingroup core_hal_intrin_impl
-#define OPENCV_HAL_IMPL_C_RSHR_PACK(_Tpvec, _Tp, _Tpnvec, _Tpn, pack_suffix) \
+#define OPENCV_HAL_IMPL_C_RSHR_PACK(_Tpvec, _Tp, _Tpnvec, _Tpn, pack_suffix, cast) \
template inline _Tpnvec v_rshr_##pack_suffix(const _Tpvec& a, const _Tpvec& b) \
{ \
_Tpnvec c; \
for( int i = 0; i < _Tpvec::nlanes; i++ ) \
{ \
- c.s[i] = saturate_cast<_Tpn>((a.s[i] + ((_Tp)1 << (n - 1))) >> n); \
- c.s[i+_Tpvec::nlanes] = saturate_cast<_Tpn>((b.s[i] + ((_Tp)1 << (n - 1))) >> n); \
+ c.s[i] = cast<_Tpn>((a.s[i] + ((_Tp)1 << (n - 1))) >> n); \
+ c.s[i+_Tpvec::nlanes] = cast<_Tpn>((b.s[i] + ((_Tp)1 << (n - 1))) >> n); \
} \
return c; \
}
@@ -1640,51 +1818,55 @@ template inline _Tpnvec v_rshr_##pack_suffix(const _Tpvec& a, const _Tpve
//!
//! - pack: for 16-, 32- and 64-bit integer input types
//! - pack_u: for 16- and 32-bit signed integer input types
-OPENCV_HAL_IMPL_C_RSHR_PACK(v_uint16x8, ushort, v_uint8x16, uchar, pack)
-OPENCV_HAL_IMPL_C_RSHR_PACK(v_int16x8, short, v_int8x16, schar, pack)
-OPENCV_HAL_IMPL_C_RSHR_PACK(v_uint32x4, unsigned, v_uint16x8, ushort, pack)
-OPENCV_HAL_IMPL_C_RSHR_PACK(v_int32x4, int, v_int16x8, short, pack)
-OPENCV_HAL_IMPL_C_RSHR_PACK(v_uint64x2, uint64, v_uint32x4, unsigned, pack)
-OPENCV_HAL_IMPL_C_RSHR_PACK(v_int64x2, int64, v_int32x4, int, pack)
-OPENCV_HAL_IMPL_C_RSHR_PACK(v_int16x8, short, v_uint8x16, uchar, pack_u)
-OPENCV_HAL_IMPL_C_RSHR_PACK(v_int32x4, int, v_uint16x8, ushort, pack_u)
+//!
+//! @note All variants except 64-bit use saturation.
+OPENCV_HAL_IMPL_C_RSHR_PACK(v_uint16x8, ushort, v_uint8x16, uchar, pack, saturate_cast)
+OPENCV_HAL_IMPL_C_RSHR_PACK(v_int16x8, short, v_int8x16, schar, pack, saturate_cast)
+OPENCV_HAL_IMPL_C_RSHR_PACK(v_uint32x4, unsigned, v_uint16x8, ushort, pack, saturate_cast)
+OPENCV_HAL_IMPL_C_RSHR_PACK(v_int32x4, int, v_int16x8, short, pack, saturate_cast)
+OPENCV_HAL_IMPL_C_RSHR_PACK(v_uint64x2, uint64, v_uint32x4, unsigned, pack, static_cast)
+OPENCV_HAL_IMPL_C_RSHR_PACK(v_int64x2, int64, v_int32x4, int, pack, static_cast)
+OPENCV_HAL_IMPL_C_RSHR_PACK(v_int16x8, short, v_uint8x16, uchar, pack_u, saturate_cast)
+OPENCV_HAL_IMPL_C_RSHR_PACK(v_int32x4, int, v_uint16x8, ushort, pack_u, saturate_cast)
//! @}
//! @brief Helper macro
//! @ingroup core_hal_intrin_impl
-#define OPENCV_HAL_IMPL_C_PACK_STORE(_Tpvec, _Tp, _Tpnvec, _Tpn, pack_suffix) \
+#define OPENCV_HAL_IMPL_C_PACK_STORE(_Tpvec, _Tp, _Tpnvec, _Tpn, pack_suffix, cast) \
inline void v_##pack_suffix##_store(_Tpn* ptr, const _Tpvec& a) \
{ \
for( int i = 0; i < _Tpvec::nlanes; i++ ) \
- ptr[i] = saturate_cast<_Tpn>(a.s[i]); \
+ ptr[i] = cast<_Tpn>(a.s[i]); \
}
//! @name Pack and store
//! @{
//! @brief Store values from the input vector into memory with pack
//!
-//! Values will be stored into memory with saturating conversion to narrower type.
+//! Values will be stored into memory with conversion to narrower type.
//! Variant with _u_ suffix converts to corresponding unsigned type.
//!
//! - pack: for 16-, 32- and 64-bit integer input types
//! - pack_u: for 16- and 32-bit signed integer input types
-OPENCV_HAL_IMPL_C_PACK_STORE(v_uint16x8, ushort, v_uint8x16, uchar, pack)
-OPENCV_HAL_IMPL_C_PACK_STORE(v_int16x8, short, v_int8x16, schar, pack)
-OPENCV_HAL_IMPL_C_PACK_STORE(v_uint32x4, unsigned, v_uint16x8, ushort, pack)
-OPENCV_HAL_IMPL_C_PACK_STORE(v_int32x4, int, v_int16x8, short, pack)
-OPENCV_HAL_IMPL_C_PACK_STORE(v_uint64x2, uint64, v_uint32x4, unsigned, pack)
-OPENCV_HAL_IMPL_C_PACK_STORE(v_int64x2, int64, v_int32x4, int, pack)
-OPENCV_HAL_IMPL_C_PACK_STORE(v_int16x8, short, v_uint8x16, uchar, pack_u)
-OPENCV_HAL_IMPL_C_PACK_STORE(v_int32x4, int, v_uint16x8, ushort, pack_u)
+//!
+//! @note All variants except 64-bit use saturation.
+OPENCV_HAL_IMPL_C_PACK_STORE(v_uint16x8, ushort, v_uint8x16, uchar, pack, saturate_cast)
+OPENCV_HAL_IMPL_C_PACK_STORE(v_int16x8, short, v_int8x16, schar, pack, saturate_cast)
+OPENCV_HAL_IMPL_C_PACK_STORE(v_uint32x4, unsigned, v_uint16x8, ushort, pack, saturate_cast)
+OPENCV_HAL_IMPL_C_PACK_STORE(v_int32x4, int, v_int16x8, short, pack, saturate_cast)
+OPENCV_HAL_IMPL_C_PACK_STORE(v_uint64x2, uint64, v_uint32x4, unsigned, pack, static_cast)
+OPENCV_HAL_IMPL_C_PACK_STORE(v_int64x2, int64, v_int32x4, int, pack, static_cast)
+OPENCV_HAL_IMPL_C_PACK_STORE(v_int16x8, short, v_uint8x16, uchar, pack_u, saturate_cast)
+OPENCV_HAL_IMPL_C_PACK_STORE(v_int32x4, int, v_uint16x8, ushort, pack_u, saturate_cast)
//! @}
//! @brief Helper macro
//! @ingroup core_hal_intrin_impl
-#define OPENCV_HAL_IMPL_C_RSHR_PACK_STORE(_Tpvec, _Tp, _Tpnvec, _Tpn, pack_suffix) \
+#define OPENCV_HAL_IMPL_C_RSHR_PACK_STORE(_Tpvec, _Tp, _Tpnvec, _Tpn, pack_suffix, cast) \
template inline void v_rshr_##pack_suffix##_store(_Tpn* ptr, const _Tpvec& a) \
{ \
for( int i = 0; i < _Tpvec::nlanes; i++ ) \
- ptr[i] = saturate_cast<_Tpn>((a.s[i] + ((_Tp)1 << (n - 1))) >> n); \
+ ptr[i] = cast<_Tpn>((a.s[i] + ((_Tp)1 << (n - 1))) >> n); \
}
//! @name Pack and store with rounding shift
@@ -1696,14 +1878,16 @@ template inline void v_rshr_##pack_suffix##_store(_Tpn* ptr, const _Tpvec
//!
//! - pack: for 16-, 32- and 64-bit integer input types
//! - pack_u: for 16- and 32-bit signed integer input types
-OPENCV_HAL_IMPL_C_RSHR_PACK_STORE(v_uint16x8, ushort, v_uint8x16, uchar, pack)
-OPENCV_HAL_IMPL_C_RSHR_PACK_STORE(v_int16x8, short, v_int8x16, schar, pack)
-OPENCV_HAL_IMPL_C_RSHR_PACK_STORE(v_uint32x4, unsigned, v_uint16x8, ushort, pack)
-OPENCV_HAL_IMPL_C_RSHR_PACK_STORE(v_int32x4, int, v_int16x8, short, pack)
-OPENCV_HAL_IMPL_C_RSHR_PACK_STORE(v_uint64x2, uint64, v_uint32x4, unsigned, pack)
-OPENCV_HAL_IMPL_C_RSHR_PACK_STORE(v_int64x2, int64, v_int32x4, int, pack)
-OPENCV_HAL_IMPL_C_RSHR_PACK_STORE(v_int16x8, short, v_uint8x16, uchar, pack_u)
-OPENCV_HAL_IMPL_C_RSHR_PACK_STORE(v_int32x4, int, v_uint16x8, ushort, pack_u)
+//!
+//! @note All variants except 64-bit use saturation.
+OPENCV_HAL_IMPL_C_RSHR_PACK_STORE(v_uint16x8, ushort, v_uint8x16, uchar, pack, saturate_cast)
+OPENCV_HAL_IMPL_C_RSHR_PACK_STORE(v_int16x8, short, v_int8x16, schar, pack, saturate_cast)
+OPENCV_HAL_IMPL_C_RSHR_PACK_STORE(v_uint32x4, unsigned, v_uint16x8, ushort, pack, saturate_cast)
+OPENCV_HAL_IMPL_C_RSHR_PACK_STORE(v_int32x4, int, v_int16x8, short, pack, saturate_cast)
+OPENCV_HAL_IMPL_C_RSHR_PACK_STORE(v_uint64x2, uint64, v_uint32x4, unsigned, pack, static_cast)
+OPENCV_HAL_IMPL_C_RSHR_PACK_STORE(v_int64x2, int64, v_int32x4, int, pack, static_cast)
+OPENCV_HAL_IMPL_C_RSHR_PACK_STORE(v_int16x8, short, v_uint8x16, uchar, pack_u, saturate_cast)
+OPENCV_HAL_IMPL_C_RSHR_PACK_STORE(v_int32x4, int, v_uint16x8, ushort, pack_u, saturate_cast)
//! @}
/** @brief Matrix multiplication
@@ -1731,8 +1915,45 @@ inline v_float32x4 v_matmul(const v_float32x4& v, const v_float32x4& m0,
v.s[0]*m0.s[3] + v.s[1]*m1.s[3] + v.s[2]*m2.s[3] + v.s[3]*m3.s[3]);
}
+/** @brief Matrix multiplication and add
+
+Scheme:
+@code
+{A0 A1 A2 } |V0| |D0|
+{B0 B1 B2 } |V1| |D1|
+{C0 C1 C2 } x |V2| + |D2|
+====================
+{R0 R1 R2 R3}, where:
+R0 = A0V0 + A1V1 + A2V2 + D0,
+R1 = B0V0 + B1V1 + B2V2 + D1
+...
+@endcode
+*/
+inline v_float32x4 v_matmuladd(const v_float32x4& v, const v_float32x4& m0,
+ const v_float32x4& m1, const v_float32x4& m2,
+ const v_float32x4& m3)
+{
+ return v_float32x4(v.s[0]*m0.s[0] + v.s[1]*m1.s[0] + v.s[2]*m2.s[0] + m3.s[0],
+ v.s[0]*m0.s[1] + v.s[1]*m1.s[1] + v.s[2]*m2.s[1] + m3.s[1],
+ v.s[0]*m0.s[2] + v.s[1]*m1.s[2] + v.s[2]*m2.s[2] + m3.s[2],
+ v.s[0]*m0.s[3] + v.s[1]*m1.s[3] + v.s[2]*m2.s[3] + m3.s[3]);
+}
+
//! @}
+//! @name Check SIMD support
+//! @{
+//! @brief Check CPU capability of SIMD operation
+static inline bool hasSIMD128()
+{
+ return false;
+}
+
+//! @}
+
+#ifndef CV_DOXYGEN
+CV_CPU_OPTIMIZATION_HAL_NAMESPACE_END
+#endif
}
#endif
diff --git a/lib/3rdParty/OpenCV3.1/include/opencv2/core/hal/intrin_neon.hpp b/lib/3rdParty/OpenCV3.4/include/opencv2/core/hal/intrin_neon.hpp
similarity index 65%
rename from lib/3rdParty/OpenCV3.1/include/opencv2/core/hal/intrin_neon.hpp
rename to lib/3rdParty/OpenCV3.4/include/opencv2/core/hal/intrin_neon.hpp
index f3e47ca8..c3c49c90 100644
--- a/lib/3rdParty/OpenCV3.1/include/opencv2/core/hal/intrin_neon.hpp
+++ b/lib/3rdParty/OpenCV3.4/include/opencv2/core/hal/intrin_neon.hpp
@@ -42,17 +42,42 @@
//
//M*/
-#ifndef __OPENCV_HAL_INTRIN_NEON_HPP__
-#define __OPENCV_HAL_INTRIN_NEON_HPP__
+#ifndef OPENCV_HAL_INTRIN_NEON_HPP
+#define OPENCV_HAL_INTRIN_NEON_HPP
#include
+#include "opencv2/core/utility.hpp"
namespace cv
{
//! @cond IGNORED
+CV_CPU_OPTIMIZATION_HAL_NAMESPACE_BEGIN
+
#define CV_SIMD128 1
+#if defined(__aarch64__)
+#define CV_SIMD128_64F 1
+#else
+#define CV_SIMD128_64F 0
+#endif
+
+#if CV_SIMD128_64F
+#define OPENCV_HAL_IMPL_NEON_REINTERPRET(_Tpv, suffix) \
+template static inline \
+_Tpv vreinterpretq_##suffix##_f64(T a) { return (_Tpv) a; } \
+template static inline \
+float64x2_t vreinterpretq_f64_##suffix(T a) { return (float64x2_t) a; }
+OPENCV_HAL_IMPL_NEON_REINTERPRET(uint8x16_t, u8)
+OPENCV_HAL_IMPL_NEON_REINTERPRET(int8x16_t, s8)
+OPENCV_HAL_IMPL_NEON_REINTERPRET(uint16x8_t, u16)
+OPENCV_HAL_IMPL_NEON_REINTERPRET(int16x8_t, s16)
+OPENCV_HAL_IMPL_NEON_REINTERPRET(uint32x4_t, u32)
+OPENCV_HAL_IMPL_NEON_REINTERPRET(int32x4_t, s32)
+OPENCV_HAL_IMPL_NEON_REINTERPRET(uint64x2_t, u64)
+OPENCV_HAL_IMPL_NEON_REINTERPRET(int64x2_t, s64)
+OPENCV_HAL_IMPL_NEON_REINTERPRET(float32x4_t, f32)
+#endif
struct v_uint8x16
{
@@ -201,7 +226,7 @@ struct v_uint64x2
v_uint64x2() {}
explicit v_uint64x2(uint64x2_t v) : val(v) {}
- v_uint64x2(unsigned v0, unsigned v1)
+ v_uint64x2(uint64 v0, uint64 v1)
{
uint64 v[] = {v0, v1};
val = vld1q_u64(v);
@@ -220,7 +245,7 @@ struct v_int64x2
v_int64x2() {}
explicit v_int64x2(int64x2_t v) : val(v) {}
- v_int64x2(int v0, int v1)
+ v_int64x2(int64 v0, int64 v1)
{
int64 v[] = {v0, v1};
val = vld1q_s64(v);
@@ -232,6 +257,70 @@ struct v_int64x2
int64x2_t val;
};
+#if CV_SIMD128_64F
+struct v_float64x2
+{
+ typedef double lane_type;
+ enum { nlanes = 2 };
+
+ v_float64x2() {}
+ explicit v_float64x2(float64x2_t v) : val(v) {}
+ v_float64x2(double v0, double v1)
+ {
+ double v[] = {v0, v1};
+ val = vld1q_f64(v);
+ }
+ double get0() const
+ {
+ return vgetq_lane_f64(val, 0);
+ }
+ float64x2_t val;
+};
+#endif
+
+#if CV_FP16
+// Workaround for old compilers
+template static inline int16x4_t vreinterpret_s16_f16(T a)
+{ return (int16x4_t)a; }
+template static inline float16x4_t vreinterpret_f16_s16(T a)
+{ return (float16x4_t)a; }
+template static inline float16x4_t cv_vld1_f16(const T* ptr)
+{
+#ifndef vld1_f16 // APPLE compiler defines vld1_f16 as macro
+ return vreinterpret_f16_s16(vld1_s16((const short*)ptr));
+#else
+ return vld1_f16((const __fp16*)ptr);
+#endif
+}
+template static inline void cv_vst1_f16(T* ptr, float16x4_t a)
+{
+#ifndef vst1_f16 // APPLE compiler defines vst1_f16 as macro
+ vst1_s16((short*)ptr, vreinterpret_s16_f16(a));
+#else
+ vst1_f16((__fp16*)ptr, a);
+#endif
+}
+
+struct v_float16x4
+{
+ typedef short lane_type;
+ enum { nlanes = 4 };
+
+ v_float16x4() {}
+ explicit v_float16x4(float16x4_t v) : val(v) {}
+ v_float16x4(short v0, short v1, short v2, short v3)
+ {
+ short v[] = {v0, v1, v2, v3};
+ val = cv_vld1_f16(v);
+ }
+ short get0() const
+ {
+ return vget_lane_s16(vreinterpret_s16_f16(val), 0);
+ }
+ float16x4_t val;
+};
+#endif
+
#define OPENCV_HAL_IMPL_NEON_INIT(_Tpv, _Tp, suffix) \
inline v_##_Tpv v_setzero_##suffix() { return v_##_Tpv(vdupq_n_##suffix((_Tp)0)); } \
inline v_##_Tpv v_setall_##suffix(_Tp v) { return v_##_Tpv(vdupq_n_##suffix(v)); } \
@@ -255,41 +344,56 @@ OPENCV_HAL_IMPL_NEON_INIT(int32x4, int, s32)
OPENCV_HAL_IMPL_NEON_INIT(uint64x2, uint64, u64)
OPENCV_HAL_IMPL_NEON_INIT(int64x2, int64, s64)
OPENCV_HAL_IMPL_NEON_INIT(float32x4, float, f32)
+#if CV_SIMD128_64F
+#define OPENCV_HAL_IMPL_NEON_INIT_64(_Tpv, suffix) \
+inline v_float64x2 v_reinterpret_as_f64(const v_##_Tpv& v) { return v_float64x2(vreinterpretq_f64_##suffix(v.val)); }
+OPENCV_HAL_IMPL_NEON_INIT(float64x2, double, f64)
+OPENCV_HAL_IMPL_NEON_INIT_64(uint8x16, u8)
+OPENCV_HAL_IMPL_NEON_INIT_64(int8x16, s8)
+OPENCV_HAL_IMPL_NEON_INIT_64(uint16x8, u16)
+OPENCV_HAL_IMPL_NEON_INIT_64(int16x8, s16)
+OPENCV_HAL_IMPL_NEON_INIT_64(uint32x4, u32)
+OPENCV_HAL_IMPL_NEON_INIT_64(int32x4, s32)
+OPENCV_HAL_IMPL_NEON_INIT_64(uint64x2, u64)
+OPENCV_HAL_IMPL_NEON_INIT_64(int64x2, s64)
+OPENCV_HAL_IMPL_NEON_INIT_64(float32x4, f32)
+OPENCV_HAL_IMPL_NEON_INIT_64(float64x2, f64)
+#endif
-#define OPENCV_HAL_IMPL_NEON_PACK(_Tpvec, _Tp, hreg, suffix, _Tpwvec, wsuffix, pack, op) \
+#define OPENCV_HAL_IMPL_NEON_PACK(_Tpvec, _Tp, hreg, suffix, _Tpwvec, pack, mov, rshr) \
inline _Tpvec v_##pack(const _Tpwvec& a, const _Tpwvec& b) \
{ \
- hreg a1 = vqmov##op##_##wsuffix(a.val), b1 = vqmov##op##_##wsuffix(b.val); \
+ hreg a1 = mov(a.val), b1 = mov(b.val); \
return _Tpvec(vcombine_##suffix(a1, b1)); \
} \
inline void v_##pack##_store(_Tp* ptr, const _Tpwvec& a) \
{ \
- hreg a1 = vqmov##op##_##wsuffix(a.val); \
+ hreg a1 = mov(a.val); \
vst1_##suffix(ptr, a1); \
} \
template inline \
_Tpvec v_rshr_##pack(const _Tpwvec& a, const _Tpwvec& b) \
{ \
- hreg a1 = vqrshr##op##_n_##wsuffix(a.val, n); \
- hreg b1 = vqrshr##op##_n_##wsuffix(b.val, n); \
+ hreg a1 = rshr(a.val, n); \
+ hreg b1 = rshr(b.val, n); \
return _Tpvec(vcombine_##suffix(a1, b1)); \
} \
template inline \
void v_rshr_##pack##_store(_Tp* ptr, const _Tpwvec& a) \
{ \
- hreg a1 = vqrshr##op##_n_##wsuffix(a.val, n); \
+ hreg a1 = rshr(a.val, n); \
vst1_##suffix(ptr, a1); \
}
-OPENCV_HAL_IMPL_NEON_PACK(v_uint8x16, uchar, uint8x8_t, u8, v_uint16x8, u16, pack, n)
-OPENCV_HAL_IMPL_NEON_PACK(v_int8x16, schar, int8x8_t, s8, v_int16x8, s16, pack, n)
-OPENCV_HAL_IMPL_NEON_PACK(v_uint16x8, ushort, uint16x4_t, u16, v_uint32x4, u32, pack, n)
-OPENCV_HAL_IMPL_NEON_PACK(v_int16x8, short, int16x4_t, s16, v_int32x4, s32, pack, n)
-OPENCV_HAL_IMPL_NEON_PACK(v_uint32x4, unsigned, uint32x2_t, u32, v_uint64x2, u64, pack, n)
-OPENCV_HAL_IMPL_NEON_PACK(v_int32x4, int, int32x2_t, s32, v_int64x2, s64, pack, n)
+OPENCV_HAL_IMPL_NEON_PACK(v_uint8x16, uchar, uint8x8_t, u8, v_uint16x8, pack, vqmovn_u16, vqrshrn_n_u16)
+OPENCV_HAL_IMPL_NEON_PACK(v_int8x16, schar, int8x8_t, s8, v_int16x8, pack, vqmovn_s16, vqrshrn_n_s16)
+OPENCV_HAL_IMPL_NEON_PACK(v_uint16x8, ushort, uint16x4_t, u16, v_uint32x4, pack, vqmovn_u32, vqrshrn_n_u32)
+OPENCV_HAL_IMPL_NEON_PACK(v_int16x8, short, int16x4_t, s16, v_int32x4, pack, vqmovn_s32, vqrshrn_n_s32)
+OPENCV_HAL_IMPL_NEON_PACK(v_uint32x4, unsigned, uint32x2_t, u32, v_uint64x2, pack, vmovn_u64, vrshrn_n_u64)
+OPENCV_HAL_IMPL_NEON_PACK(v_int32x4, int, int32x2_t, s32, v_int64x2, pack, vmovn_s64, vrshrn_n_s64)
-OPENCV_HAL_IMPL_NEON_PACK(v_uint8x16, uchar, uint8x8_t, u8, v_int16x8, s16, pack_u, un)
-OPENCV_HAL_IMPL_NEON_PACK(v_uint16x8, ushort, uint16x4_t, u16, v_int32x4, s32, pack_u, un)
+OPENCV_HAL_IMPL_NEON_PACK(v_uint8x16, uchar, uint8x8_t, u8, v_int16x8, pack_u, vqmovun_s16, vqrshrun_n_s16)
+OPENCV_HAL_IMPL_NEON_PACK(v_uint16x8, ushort, uint16x4_t, u16, v_int32x4, pack_u, vqmovun_s32, vqrshrun_n_s32)
inline v_float32x4 v_matmul(const v_float32x4& v, const v_float32x4& m0,
const v_float32x4& m1, const v_float32x4& m2,
@@ -303,6 +407,18 @@ inline v_float32x4 v_matmul(const v_float32x4& v, const v_float32x4& m0,
return v_float32x4(res);
}
+inline v_float32x4 v_matmuladd(const v_float32x4& v, const v_float32x4& m0,
+ const v_float32x4& m1, const v_float32x4& m2,
+ const v_float32x4& a)
+{
+ float32x2_t vl = vget_low_f32(v.val), vh = vget_high_f32(v.val);
+ float32x4_t res = vmulq_lane_f32(m0.val, vl, 0);
+ res = vmlaq_lane_f32(res, m1.val, vl, 1);
+ res = vmlaq_lane_f32(res, m2.val, vh, 0);
+ res = vaddq_f32(res, a.val);
+ return v_float32x4(res);
+}
+
#define OPENCV_HAL_IMPL_NEON_BIN_OP(bin_op, _Tpvec, intrin) \
inline _Tpvec operator bin_op (const _Tpvec& a, const _Tpvec& b) \
{ \
@@ -337,7 +453,13 @@ OPENCV_HAL_IMPL_NEON_BIN_OP(+, v_int64x2, vaddq_s64)
OPENCV_HAL_IMPL_NEON_BIN_OP(-, v_int64x2, vsubq_s64)
OPENCV_HAL_IMPL_NEON_BIN_OP(+, v_uint64x2, vaddq_u64)
OPENCV_HAL_IMPL_NEON_BIN_OP(-, v_uint64x2, vsubq_u64)
-
+#if CV_SIMD128_64F
+OPENCV_HAL_IMPL_NEON_BIN_OP(/, v_float32x4, vdivq_f32)
+OPENCV_HAL_IMPL_NEON_BIN_OP(+, v_float64x2, vaddq_f64)
+OPENCV_HAL_IMPL_NEON_BIN_OP(-, v_float64x2, vsubq_f64)
+OPENCV_HAL_IMPL_NEON_BIN_OP(*, v_float64x2, vmulq_f64)
+OPENCV_HAL_IMPL_NEON_BIN_OP(/, v_float64x2, vdivq_f64)
+#else
inline v_float32x4 operator / (const v_float32x4& a, const v_float32x4& b)
{
float32x4_t reciprocal = vrecpeq_f32(b.val);
@@ -353,6 +475,7 @@ inline v_float32x4& operator /= (v_float32x4& a, const v_float32x4& b)
a.val = vmulq_f32(a.val, reciprocal);
return a;
}
+#endif
inline void v_mul_expand(const v_int16x8& a, const v_int16x8& b,
v_int32x4& c, v_int32x4& d)
@@ -421,6 +544,18 @@ inline v_float32x4 operator ~ (const v_float32x4& a)
return v_float32x4(vreinterpretq_f32_s32(vmvnq_s32(vreinterpretq_s32_f32(a.val))));
}
+#if CV_SIMD128_64F
+inline v_float32x4 v_sqrt(const v_float32x4& x)
+{
+ return v_float32x4(vsqrtq_f32(x.val));
+}
+
+inline v_float32x4 v_invsqrt(const v_float32x4& x)
+{
+ v_float32x4 one = v_setall_f32(1.0f);
+ return one / v_sqrt(x);
+}
+#else
inline v_float32x4 v_sqrt(const v_float32x4& x)
{
float32x4_t x1 = vmaxq_f32(x.val, vdupq_n_f32(FLT_MIN));
@@ -437,10 +572,54 @@ inline v_float32x4 v_invsqrt(const v_float32x4& x)
e = vmulq_f32(vrsqrtsq_f32(vmulq_f32(x.val, e), e), e);
return v_float32x4(e);
}
+#endif
+
+#define OPENCV_HAL_IMPL_NEON_ABS(_Tpuvec, _Tpsvec, usuffix, ssuffix) \
+inline _Tpuvec v_abs(const _Tpsvec& a) { return v_reinterpret_as_##usuffix(_Tpsvec(vabsq_##ssuffix(a.val))); }
+
+OPENCV_HAL_IMPL_NEON_ABS(v_uint8x16, v_int8x16, u8, s8)
+OPENCV_HAL_IMPL_NEON_ABS(v_uint16x8, v_int16x8, u16, s16)
+OPENCV_HAL_IMPL_NEON_ABS(v_uint32x4, v_int32x4, u32, s32)
inline v_float32x4 v_abs(v_float32x4 x)
{ return v_float32x4(vabsq_f32(x.val)); }
+#if CV_SIMD128_64F
+#define OPENCV_HAL_IMPL_NEON_DBL_BIT_OP(bin_op, intrin) \
+inline v_float64x2 operator bin_op (const v_float64x2& a, const v_float64x2& b) \
+{ \
+ return v_float64x2(vreinterpretq_f64_s64(intrin(vreinterpretq_s64_f64(a.val), vreinterpretq_s64_f64(b.val)))); \
+} \
+inline v_float64x2& operator bin_op##= (v_float64x2& a, const v_float64x2& b) \
+{ \
+ a.val = vreinterpretq_f64_s64(intrin(vreinterpretq_s64_f64(a.val), vreinterpretq_s64_f64(b.val))); \
+ return a; \
+}
+
+OPENCV_HAL_IMPL_NEON_DBL_BIT_OP(&, vandq_s64)
+OPENCV_HAL_IMPL_NEON_DBL_BIT_OP(|, vorrq_s64)
+OPENCV_HAL_IMPL_NEON_DBL_BIT_OP(^, veorq_s64)
+
+inline v_float64x2 operator ~ (const v_float64x2& a)
+{
+ return v_float64x2(vreinterpretq_f64_s32(vmvnq_s32(vreinterpretq_s32_f64(a.val))));
+}
+
+inline v_float64x2 v_sqrt(const v_float64x2& x)
+{
+ return v_float64x2(vsqrtq_f64(x.val));
+}
+
+inline v_float64x2 v_invsqrt(const v_float64x2& x)
+{
+ v_float64x2 one = v_setall_f64(1.0f);
+ return one / v_sqrt(x);
+}
+
+inline v_float64x2 v_abs(v_float64x2 x)
+{ return v_float64x2(vabsq_f64(x.val)); }
+#endif
+
// TODO: exp, log, sin, cos
#define OPENCV_HAL_IMPL_NEON_BIN_FUNC(_Tpvec, func, intrin) \
@@ -463,8 +642,23 @@ OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_int32x4, v_min, vminq_s32)
OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_int32x4, v_max, vmaxq_s32)
OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_float32x4, v_min, vminq_f32)
OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_float32x4, v_max, vmaxq_f32)
+#if CV_SIMD128_64F
+OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_float64x2, v_min, vminq_f64)
+OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_float64x2, v_max, vmaxq_f64)
+#endif
-
+#if CV_SIMD128_64F
+inline int64x2_t vmvnq_s64(int64x2_t a)
+{
+ int64x2_t vx = vreinterpretq_s64_u32(vdupq_n_u32(0xFFFFFFFF));
+ return veorq_s64(a, vx);
+}
+inline uint64x2_t vmvnq_u64(uint64x2_t a)
+{
+ uint64x2_t vx = vreinterpretq_u64_u32(vdupq_n_u32(0xFFFFFFFF));
+ return veorq_u64(a, vx);
+}
+#endif
#define OPENCV_HAL_IMPL_NEON_INT_CMP_OP(_Tpvec, cast, suffix, not_suffix) \
inline _Tpvec operator == (const _Tpvec& a, const _Tpvec& b) \
{ return _Tpvec(cast(vceqq_##suffix(a.val, b.val))); } \
@@ -486,6 +680,11 @@ OPENCV_HAL_IMPL_NEON_INT_CMP_OP(v_int16x8, vreinterpretq_s16_u16, s16, u16)
OPENCV_HAL_IMPL_NEON_INT_CMP_OP(v_uint32x4, OPENCV_HAL_NOP, u32, u32)
OPENCV_HAL_IMPL_NEON_INT_CMP_OP(v_int32x4, vreinterpretq_s32_u32, s32, u32)
OPENCV_HAL_IMPL_NEON_INT_CMP_OP(v_float32x4, vreinterpretq_f32_u32, f32, u32)
+#if CV_SIMD128_64F
+OPENCV_HAL_IMPL_NEON_INT_CMP_OP(v_uint64x2, OPENCV_HAL_NOP, u64, u64)
+OPENCV_HAL_IMPL_NEON_INT_CMP_OP(v_int64x2, vreinterpretq_s64_u64, s64, u64)
+OPENCV_HAL_IMPL_NEON_INT_CMP_OP(v_float64x2, vreinterpretq_f64_u64, f64, u64)
+#endif
OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_uint8x16, v_add_wrap, vaddq_u8)
OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_int8x16, v_add_wrap, vaddq_s8)
@@ -501,6 +700,9 @@ OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_uint8x16, v_absdiff, vabdq_u8)
OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_uint16x8, v_absdiff, vabdq_u16)
OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_uint32x4, v_absdiff, vabdq_u32)
OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_float32x4, v_absdiff, vabdq_f32)
+#if CV_SIMD128_64F
+OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_float64x2, v_absdiff, vabdq_f64)
+#endif
#define OPENCV_HAL_IMPL_NEON_BIN_FUNC2(_Tpvec, _Tpvec2, cast, func, intrin) \
inline _Tpvec2 func(const _Tpvec& a, const _Tpvec& b) \
@@ -528,6 +730,24 @@ inline v_float32x4 v_muladd(const v_float32x4& a, const v_float32x4& b, const v_
return v_float32x4(vmlaq_f32(c.val, a.val, b.val));
}
+#if CV_SIMD128_64F
+inline v_float64x2 v_magnitude(const v_float64x2& a, const v_float64x2& b)
+{
+ v_float64x2 x(vaddq_f64(vmulq_f64(a.val, a.val), vmulq_f64(b.val, b.val)));
+ return v_sqrt(x);
+}
+
+inline v_float64x2 v_sqr_magnitude(const v_float64x2& a, const v_float64x2& b)
+{
+ return v_float64x2(vaddq_f64(vmulq_f64(a.val, a.val), vmulq_f64(b.val, b.val)));
+}
+
+inline v_float64x2 v_muladd(const v_float64x2& a, const v_float64x2& b, const v_float64x2& c)
+{
+ return v_float64x2(vaddq_f64(c.val, vmulq_f64(a.val, b.val)));
+}
+#endif
+
// trade efficiency for convenience
#define OPENCV_HAL_IMPL_NEON_SHIFT_OP(_Tpvec, suffix, _Tps, ssuffix) \
inline _Tpvec operator << (const _Tpvec& a, int n) \
@@ -539,7 +759,15 @@ template inline _Tpvec v_shl(const _Tpvec& a) \
template inline _Tpvec v_shr(const _Tpvec& a) \
{ return _Tpvec(vshrq_n_##suffix(a.val, n)); } \
template inline _Tpvec v_rshr(const _Tpvec& a) \
-{ return _Tpvec(vrshrq_n_##suffix(a.val, n)); }
+{ return _Tpvec(vrshrq_n_##suffix(a.val, n)); } \
+template inline _Tpvec v_rotate_right(const _Tpvec& a) \
+{ return _Tpvec(vextq_##suffix(a.val, vdupq_n_##suffix(0), n)); } \
+template inline _Tpvec v_rotate_left(const _Tpvec& a) \
+{ return _Tpvec(vextq_##suffix(vdupq_n_##suffix(0), a.val, _Tpvec::nlanes - n)); } \
+template inline _Tpvec v_rotate_right(const _Tpvec& a, const _Tpvec& b) \
+{ return _Tpvec(vextq_##suffix(a.val, b.val, n)); } \
+template inline _Tpvec v_rotate_left(const _Tpvec& a, const _Tpvec& b) \
+{ return _Tpvec(vextq_##suffix(b.val, a.val, _Tpvec::nlanes - n)); }
OPENCV_HAL_IMPL_NEON_SHIFT_OP(v_uint8x16, u8, schar, s8)
OPENCV_HAL_IMPL_NEON_SHIFT_OP(v_int8x16, s8, schar, s8)
@@ -555,6 +783,8 @@ inline _Tpvec v_load(const _Tp* ptr) \
{ return _Tpvec(vld1q_##suffix(ptr)); } \
inline _Tpvec v_load_aligned(const _Tp* ptr) \
{ return _Tpvec(vld1q_##suffix(ptr)); } \
+inline _Tpvec v_load_low(const _Tp* ptr) \
+{ return _Tpvec(vcombine_##suffix(vld1_##suffix(ptr), vdup_n_##suffix((_Tp)0))); } \
inline _Tpvec v_load_halves(const _Tp* ptr0, const _Tp* ptr1) \
{ return _Tpvec(vcombine_##suffix(vld1_##suffix(ptr0), vld1_##suffix(ptr1))); } \
inline void v_store(_Tp* ptr, const _Tpvec& a) \
@@ -575,26 +805,80 @@ OPENCV_HAL_IMPL_NEON_LOADSTORE_OP(v_int32x4, int, s32)
OPENCV_HAL_IMPL_NEON_LOADSTORE_OP(v_uint64x2, uint64, u64)
OPENCV_HAL_IMPL_NEON_LOADSTORE_OP(v_int64x2, int64, s64)
OPENCV_HAL_IMPL_NEON_LOADSTORE_OP(v_float32x4, float, f32)
+#if CV_SIMD128_64F
+OPENCV_HAL_IMPL_NEON_LOADSTORE_OP(v_float64x2, double, f64)
+#endif
-#define OPENCV_HAL_IMPL_NEON_REDUCE_OP_4(_Tpvec, scalartype, func, scalar_func) \
+#if CV_FP16
+// Workaround for old comiplers
+inline v_float16x4 v_load_f16(const short* ptr)
+{ return v_float16x4(cv_vld1_f16(ptr)); }
+inline void v_store_f16(short* ptr, v_float16x4& a)
+{ cv_vst1_f16(ptr, a.val); }
+#endif
+
+#define OPENCV_HAL_IMPL_NEON_REDUCE_OP_8(_Tpvec, _Tpnvec, scalartype, func, vectorfunc, suffix) \
inline scalartype v_reduce_##func(const _Tpvec& a) \
{ \
- scalartype CV_DECL_ALIGNED(16) buf[4]; \
- v_store_aligned(buf, a); \
- scalartype s0 = scalar_func(buf[0], buf[1]); \
- scalartype s1 = scalar_func(buf[2], buf[3]); \
- return scalar_func(s0, s1); \
+ _Tpnvec##_t a0 = vp##vectorfunc##_##suffix(vget_low_##suffix(a.val), vget_high_##suffix(a.val)); \
+ a0 = vp##vectorfunc##_##suffix(a0, a0); \
+ return (scalartype)vget_lane_##suffix(vp##vectorfunc##_##suffix(a0, a0),0); \
}
-OPENCV_HAL_IMPL_NEON_REDUCE_OP_4(v_uint32x4, unsigned, sum, OPENCV_HAL_ADD)
-OPENCV_HAL_IMPL_NEON_REDUCE_OP_4(v_uint32x4, unsigned, max, std::max)
-OPENCV_HAL_IMPL_NEON_REDUCE_OP_4(v_uint32x4, unsigned, min, std::min)
-OPENCV_HAL_IMPL_NEON_REDUCE_OP_4(v_int32x4, int, sum, OPENCV_HAL_ADD)
-OPENCV_HAL_IMPL_NEON_REDUCE_OP_4(v_int32x4, int, max, std::max)
-OPENCV_HAL_IMPL_NEON_REDUCE_OP_4(v_int32x4, int, min, std::min)
-OPENCV_HAL_IMPL_NEON_REDUCE_OP_4(v_float32x4, float, sum, OPENCV_HAL_ADD)
-OPENCV_HAL_IMPL_NEON_REDUCE_OP_4(v_float32x4, float, max, std::max)
-OPENCV_HAL_IMPL_NEON_REDUCE_OP_4(v_float32x4, float, min, std::min)
+OPENCV_HAL_IMPL_NEON_REDUCE_OP_8(v_uint16x8, uint16x4, unsigned short, sum, add, u16)
+OPENCV_HAL_IMPL_NEON_REDUCE_OP_8(v_uint16x8, uint16x4, unsigned short, max, max, u16)
+OPENCV_HAL_IMPL_NEON_REDUCE_OP_8(v_uint16x8, uint16x4, unsigned short, min, min, u16)
+OPENCV_HAL_IMPL_NEON_REDUCE_OP_8(v_int16x8, int16x4, short, sum, add, s16)
+OPENCV_HAL_IMPL_NEON_REDUCE_OP_8(v_int16x8, int16x4, short, max, max, s16)
+OPENCV_HAL_IMPL_NEON_REDUCE_OP_8(v_int16x8, int16x4, short, min, min, s16)
+
+#define OPENCV_HAL_IMPL_NEON_REDUCE_OP_4(_Tpvec, _Tpnvec, scalartype, func, vectorfunc, suffix) \
+inline scalartype v_reduce_##func(const _Tpvec& a) \
+{ \
+ _Tpnvec##_t a0 = vp##vectorfunc##_##suffix(vget_low_##suffix(a.val), vget_high_##suffix(a.val)); \
+ return (scalartype)vget_lane_##suffix(vp##vectorfunc##_##suffix(a0, vget_high_##suffix(a.val)),0); \
+}
+
+OPENCV_HAL_IMPL_NEON_REDUCE_OP_4(v_uint32x4, uint32x2, unsigned, sum, add, u32)
+OPENCV_HAL_IMPL_NEON_REDUCE_OP_4(v_uint32x4, uint32x2, unsigned, max, max, u32)
+OPENCV_HAL_IMPL_NEON_REDUCE_OP_4(v_uint32x4, uint32x2, unsigned, min, min, u32)
+OPENCV_HAL_IMPL_NEON_REDUCE_OP_4(v_int32x4, int32x2, int, sum, add, s32)
+OPENCV_HAL_IMPL_NEON_REDUCE_OP_4(v_int32x4, int32x2, int, max, max, s32)
+OPENCV_HAL_IMPL_NEON_REDUCE_OP_4(v_int32x4, int32x2, int, min, min, s32)
+OPENCV_HAL_IMPL_NEON_REDUCE_OP_4(v_float32x4, float32x2, float, sum, add, f32)
+OPENCV_HAL_IMPL_NEON_REDUCE_OP_4(v_float32x4, float32x2, float, max, max, f32)
+OPENCV_HAL_IMPL_NEON_REDUCE_OP_4(v_float32x4, float32x2, float, min, min, f32)
+
+inline v_float32x4 v_reduce_sum4(const v_float32x4& a, const v_float32x4& b,
+ const v_float32x4& c, const v_float32x4& d)
+{
+ float32x4x2_t ab = vtrnq_f32(a.val, b.val);
+ float32x4x2_t cd = vtrnq_f32(c.val, d.val);
+
+ float32x4_t u0 = vaddq_f32(ab.val[0], ab.val[1]); // a0+a1 b0+b1 a2+a3 b2+b3
+ float32x4_t u1 = vaddq_f32(cd.val[0], cd.val[1]); // c0+c1 d0+d1 c2+c3 d2+d3
+
+ float32x4_t v0 = vcombine_f32(vget_low_f32(u0), vget_low_f32(u1));
+ float32x4_t v1 = vcombine_f32(vget_high_f32(u0), vget_high_f32(u1));
+
+ return v_float32x4(vaddq_f32(v0, v1));
+}
+
+#define OPENCV_HAL_IMPL_NEON_POPCOUNT(_Tpvec, cast) \
+inline v_uint32x4 v_popcount(const _Tpvec& a) \
+{ \
+ uint8x16_t t = vcntq_u8(cast(a.val)); \
+ uint16x8_t t0 = vpaddlq_u8(t); /* 16 -> 8 */ \
+ uint32x4_t t1 = vpaddlq_u16(t0); /* 8 -> 4 */ \
+ return v_uint32x4(t1); \
+}
+
+OPENCV_HAL_IMPL_NEON_POPCOUNT(v_uint8x16, OPENCV_HAL_NOP)
+OPENCV_HAL_IMPL_NEON_POPCOUNT(v_uint16x8, vreinterpretq_u8_u16)
+OPENCV_HAL_IMPL_NEON_POPCOUNT(v_uint32x4, vreinterpretq_u8_u32)
+OPENCV_HAL_IMPL_NEON_POPCOUNT(v_int8x16, vreinterpretq_u8_s8)
+OPENCV_HAL_IMPL_NEON_POPCOUNT(v_int16x8, vreinterpretq_u8_s16)
+OPENCV_HAL_IMPL_NEON_POPCOUNT(v_int32x4, vreinterpretq_u8_s32)
inline int v_signmask(const v_uint8x16& a)
{
@@ -627,6 +911,16 @@ inline int v_signmask(const v_int32x4& a)
{ return v_signmask(v_reinterpret_as_u32(a)); }
inline int v_signmask(const v_float32x4& a)
{ return v_signmask(v_reinterpret_as_u32(a)); }
+#if CV_SIMD128_64F
+inline int v_signmask(const v_uint64x2& a)
+{
+ int64x1_t m0 = vdup_n_s64(0);
+ uint64x2_t v0 = vshlq_u64(vshrq_n_u64(a.val, 63), vcombine_s64(m0, m0));
+ return (int)vgetq_lane_u64(v0, 0) + ((int)vgetq_lane_u64(v0, 1) << 1);
+}
+inline int v_signmask(const v_float64x2& a)
+{ return v_signmask(v_reinterpret_as_u64(a)); }
+#endif
#define OPENCV_HAL_IMPL_NEON_CHECK_ALLANY(_Tpvec, suffix, shift) \
inline bool v_check_all(const v_##_Tpvec& a) \
@@ -645,6 +939,9 @@ inline bool v_check_any(const v_##_Tpvec& a) \
OPENCV_HAL_IMPL_NEON_CHECK_ALLANY(uint8x16, u8, 7)
OPENCV_HAL_IMPL_NEON_CHECK_ALLANY(uint16x8, u16, 15)
OPENCV_HAL_IMPL_NEON_CHECK_ALLANY(uint32x4, u32, 31)
+#if CV_SIMD128_64F
+OPENCV_HAL_IMPL_NEON_CHECK_ALLANY(uint64x2, u64, 63)
+#endif
inline bool v_check_all(const v_int8x16& a)
{ return v_check_all(v_reinterpret_as_u8(a)); }
@@ -664,6 +961,17 @@ inline bool v_check_any(const v_int32x4& a)
inline bool v_check_any(const v_float32x4& a)
{ return v_check_any(v_reinterpret_as_u32(a)); }
+#if CV_SIMD128_64F
+inline bool v_check_all(const v_int64x2& a)
+{ return v_check_all(v_reinterpret_as_u64(a)); }
+inline bool v_check_all(const v_float64x2& a)
+{ return v_check_all(v_reinterpret_as_u64(a)); }
+inline bool v_check_any(const v_int64x2& a)
+{ return v_check_any(v_reinterpret_as_u64(a)); }
+inline bool v_check_any(const v_float64x2& a)
+{ return v_check_any(v_reinterpret_as_u64(a)); }
+#endif
+
#define OPENCV_HAL_IMPL_NEON_SELECT(_Tpvec, suffix, usuffix) \
inline _Tpvec v_select(const _Tpvec& mask, const _Tpvec& a, const _Tpvec& b) \
{ \
@@ -677,6 +985,9 @@ OPENCV_HAL_IMPL_NEON_SELECT(v_int16x8, s16, u16)
OPENCV_HAL_IMPL_NEON_SELECT(v_uint32x4, u32, u32)
OPENCV_HAL_IMPL_NEON_SELECT(v_int32x4, s32, u32)
OPENCV_HAL_IMPL_NEON_SELECT(v_float32x4, f32, u32)
+#if CV_SIMD128_64F
+OPENCV_HAL_IMPL_NEON_SELECT(v_float64x2, f64, u64)
+#endif
#define OPENCV_HAL_IMPL_NEON_EXPAND(_Tpvec, _Tpwvec, _Tp, suffix) \
inline void v_expand(const _Tpvec& a, _Tpwvec& b0, _Tpwvec& b1) \
@@ -710,6 +1021,27 @@ inline v_int32x4 v_load_expand_q(const schar* ptr)
return v_int32x4(vmovl_s16(v1));
}
+#if defined(__aarch64__)
+#define OPENCV_HAL_IMPL_NEON_UNPACKS(_Tpvec, suffix) \
+inline void v_zip(const v_##_Tpvec& a0, const v_##_Tpvec& a1, v_##_Tpvec& b0, v_##_Tpvec& b1) \
+{ \
+ b0.val = vzip1q_##suffix(a0.val, a1.val); \
+ b1.val = vzip2q_##suffix(a0.val, a1.val); \
+} \
+inline v_##_Tpvec v_combine_low(const v_##_Tpvec& a, const v_##_Tpvec& b) \
+{ \
+ return v_##_Tpvec(vcombine_##suffix(vget_low_##suffix(a.val), vget_low_##suffix(b.val))); \
+} \
+inline v_##_Tpvec v_combine_high(const v_##_Tpvec& a, const v_##_Tpvec& b) \
+{ \
+ return v_##_Tpvec(vcombine_##suffix(vget_high_##suffix(a.val), vget_high_##suffix(b.val))); \
+} \
+inline void v_recombine(const v_##_Tpvec& a, const v_##_Tpvec& b, v_##_Tpvec& c, v_##_Tpvec& d) \
+{ \
+ c.val = vcombine_##suffix(vget_low_##suffix(a.val), vget_low_##suffix(b.val)); \
+ d.val = vcombine_##suffix(vget_high_##suffix(a.val), vget_high_##suffix(b.val)); \
+}
+#else
#define OPENCV_HAL_IMPL_NEON_UNPACKS(_Tpvec, suffix) \
inline void v_zip(const v_##_Tpvec& a0, const v_##_Tpvec& a1, v_##_Tpvec& b0, v_##_Tpvec& b1) \
{ \
@@ -730,6 +1062,7 @@ inline void v_recombine(const v_##_Tpvec& a, const v_##_Tpvec& b, v_##_Tpvec& c,
c.val = vcombine_##suffix(vget_low_##suffix(a.val), vget_low_##suffix(b.val)); \
d.val = vcombine_##suffix(vget_high_##suffix(a.val), vget_high_##suffix(b.val)); \
}
+#endif
OPENCV_HAL_IMPL_NEON_UNPACKS(uint8x16, u8)
OPENCV_HAL_IMPL_NEON_UNPACKS(int8x16, s8)
@@ -738,6 +1071,9 @@ OPENCV_HAL_IMPL_NEON_UNPACKS(int16x8, s16)
OPENCV_HAL_IMPL_NEON_UNPACKS(uint32x4, u32)
OPENCV_HAL_IMPL_NEON_UNPACKS(int32x4, s32)
OPENCV_HAL_IMPL_NEON_UNPACKS(float32x4, f32)
+#if CV_SIMD128_64F
+OPENCV_HAL_IMPL_NEON_UNPACKS(float64x2, f64)
+#endif
#define OPENCV_HAL_IMPL_NEON_EXTRACT(_Tpvec, suffix) \
template \
@@ -755,6 +1091,9 @@ OPENCV_HAL_IMPL_NEON_EXTRACT(int32x4, s32)
OPENCV_HAL_IMPL_NEON_EXTRACT(uint64x2, u64)
OPENCV_HAL_IMPL_NEON_EXTRACT(int64x2, s64)
OPENCV_HAL_IMPL_NEON_EXTRACT(float32x4, f32)
+#if CV_SIMD128_64F
+OPENCV_HAL_IMPL_NEON_EXTRACT(float64x2, f64)
+#endif
inline v_int32x4 v_round(const v_float32x4& a)
{
@@ -782,6 +1121,38 @@ inline v_int32x4 v_ceil(const v_float32x4& a)
inline v_int32x4 v_trunc(const v_float32x4& a)
{ return v_int32x4(vcvtq_s32_f32(a.val)); }
+#if CV_SIMD128_64F
+inline v_int32x4 v_round(const v_float64x2& a)
+{
+ static const int32x2_t zero = vdup_n_s32(0);
+ return v_int32x4(vcombine_s32(vmovn_s64(vcvtaq_s64_f64(a.val)), zero));
+}
+
+inline v_int32x4 v_floor(const v_float64x2& a)
+{
+ static const int32x2_t zero = vdup_n_s32(0);
+ int64x2_t a1 = vcvtq_s64_f64(a.val);
+ uint64x2_t mask = vcgtq_f64(vcvtq_f64_s64(a1), a.val);
+ a1 = vaddq_s64(a1, vreinterpretq_s64_u64(mask));
+ return v_int32x4(vcombine_s32(vmovn_s64(a1), zero));
+}
+
+inline v_int32x4 v_ceil(const v_float64x2& a)
+{
+ static const int32x2_t zero = vdup_n_s32(0);
+ int64x2_t a1 = vcvtq_s64_f64(a.val);
+ uint64x2_t mask = vcgtq_f64(a.val, vcvtq_f64_s64(a1));
+ a1 = vsubq_s64(a1, vreinterpretq_s64_u64(mask));
+ return v_int32x4(vcombine_s32(vmovn_s64(a1), zero));
+}
+
+inline v_int32x4 v_trunc(const v_float64x2& a)
+{
+ static const int32x2_t zero = vdup_n_s32(0);
+ return v_int32x4(vcombine_s32(vmovn_s64(vcvtaq_s64_f64(a.val)), zero));
+}
+#endif
+
#define OPENCV_HAL_IMPL_NEON_TRANSPOSE4x4(_Tpvec, suffix) \
inline void v_transpose4x4(const v_##_Tpvec& a0, const v_##_Tpvec& a1, \
const v_##_Tpvec& a2, const v_##_Tpvec& a3, \
@@ -809,6 +1180,12 @@ OPENCV_HAL_IMPL_NEON_TRANSPOSE4x4(int32x4, s32)
OPENCV_HAL_IMPL_NEON_TRANSPOSE4x4(float32x4, f32)
#define OPENCV_HAL_IMPL_NEON_INTERLEAVED(_Tpvec, _Tp, suffix) \
+inline void v_load_deinterleave(const _Tp* ptr, v_##_Tpvec& a, v_##_Tpvec& b) \
+{ \
+ _Tpvec##x2_t v = vld2q_##suffix(ptr); \
+ a.val = v.val[0]; \
+ b.val = v.val[1]; \
+} \
inline void v_load_deinterleave(const _Tp* ptr, v_##_Tpvec& a, v_##_Tpvec& b, v_##_Tpvec& c) \
{ \
_Tpvec##x3_t v = vld3q_##suffix(ptr); \
@@ -825,6 +1202,13 @@ inline void v_load_deinterleave(const _Tp* ptr, v_##_Tpvec& a, v_##_Tpvec& b, \
c.val = v.val[2]; \
d.val = v.val[3]; \
} \
+inline void v_store_interleave( _Tp* ptr, const v_##_Tpvec& a, const v_##_Tpvec& b) \
+{ \
+ _Tpvec##x2_t v; \
+ v.val[0] = a.val; \
+ v.val[1] = b.val; \
+ vst2q_##suffix(ptr, v); \
+} \
inline void v_store_interleave( _Tp* ptr, const v_##_Tpvec& a, const v_##_Tpvec& b, const v_##_Tpvec& c) \
{ \
_Tpvec##x3_t v; \
@@ -851,12 +1235,67 @@ OPENCV_HAL_IMPL_NEON_INTERLEAVED(int16x8, short, s16)
OPENCV_HAL_IMPL_NEON_INTERLEAVED(uint32x4, unsigned, u32)
OPENCV_HAL_IMPL_NEON_INTERLEAVED(int32x4, int, s32)
OPENCV_HAL_IMPL_NEON_INTERLEAVED(float32x4, float, f32)
+#if CV_SIMD128_64F
+OPENCV_HAL_IMPL_NEON_INTERLEAVED(float64x2, double, f64)
+#endif
inline v_float32x4 v_cvt_f32(const v_int32x4& a)
{
return v_float32x4(vcvtq_f32_s32(a.val));
}
+#if CV_SIMD128_64F
+inline v_float32x4 v_cvt_f32(const v_float64x2& a)
+{
+ float32x2_t zero = vdup_n_f32(0.0f);
+ return v_float32x4(vcombine_f32(vcvt_f32_f64(a.val), zero));
+}
+
+inline v_float64x2 v_cvt_f64(const v_int32x4& a)
+{
+ return v_float64x2(vcvt_f64_f32(vcvt_f32_s32(vget_low_s32(a.val))));
+}
+
+inline v_float64x2 v_cvt_f64_high(const v_int32x4& a)
+{
+ return v_float64x2(vcvt_f64_f32(vcvt_f32_s32(vget_high_s32(a.val))));
+}
+
+inline v_float64x2 v_cvt_f64(const v_float32x4& a)
+{
+ return v_float64x2(vcvt_f64_f32(vget_low_f32(a.val)));
+}
+
+inline v_float64x2 v_cvt_f64_high(const v_float32x4& a)
+{
+ return v_float64x2(vcvt_f64_f32(vget_high_f32(a.val)));
+}
+#endif
+
+#if CV_FP16
+inline v_float32x4 v_cvt_f32(const v_float16x4& a)
+{
+ return v_float32x4(vcvt_f32_f16(a.val));
+}
+
+inline v_float16x4 v_cvt_f16(const v_float32x4& a)
+{
+ return v_float16x4(vcvt_f16_f32(a.val));
+}
+#endif
+
+//! @name Check SIMD support
+//! @{
+//! @brief Check CPU capability of SIMD operation
+static inline bool hasSIMD128()
+{
+ return (CV_CPU_HAS_SUPPORT_NEON) ? true : false;
+}
+
+//! @}
+
+CV_CPU_OPTIMIZATION_HAL_NAMESPACE_END
+
//! @endcond
}
diff --git a/lib/3rdParty/OpenCV3.1/include/opencv2/core/hal/intrin_sse.hpp b/lib/3rdParty/OpenCV3.4/include/opencv2/core/hal/intrin_sse.hpp
similarity index 83%
rename from lib/3rdParty/OpenCV3.1/include/opencv2/core/hal/intrin_sse.hpp
rename to lib/3rdParty/OpenCV3.4/include/opencv2/core/hal/intrin_sse.hpp
index 1840e030..0e740f64 100644
--- a/lib/3rdParty/OpenCV3.1/include/opencv2/core/hal/intrin_sse.hpp
+++ b/lib/3rdParty/OpenCV3.4/include/opencv2/core/hal/intrin_sse.hpp
@@ -42,10 +42,11 @@
//
//M*/
-#ifndef __OPENCV_HAL_SSE_HPP__
-#define __OPENCV_HAL_SSE_HPP__
+#ifndef OPENCV_HAL_SSE_HPP
+#define OPENCV_HAL_SSE_HPP
#include
+#include "opencv2/core/utility.hpp"
#define CV_SIMD128 1
#define CV_SIMD128_64F 1
@@ -55,12 +56,14 @@ namespace cv
//! @cond IGNORED
+CV_CPU_OPTIMIZATION_HAL_NAMESPACE_BEGIN
+
struct v_uint8x16
{
typedef uchar lane_type;
enum { nlanes = 16 };
- v_uint8x16() {}
+ v_uint8x16() : val(_mm_setzero_si128()) {}
explicit v_uint8x16(__m128i v) : val(v) {}
v_uint8x16(uchar v0, uchar v1, uchar v2, uchar v3, uchar v4, uchar v5, uchar v6, uchar v7,
uchar v8, uchar v9, uchar v10, uchar v11, uchar v12, uchar v13, uchar v14, uchar v15)
@@ -83,7 +86,7 @@ struct v_int8x16
typedef schar lane_type;
enum { nlanes = 16 };
- v_int8x16() {}
+ v_int8x16() : val(_mm_setzero_si128()) {}
explicit v_int8x16(__m128i v) : val(v) {}
v_int8x16(schar v0, schar v1, schar v2, schar v3, schar v4, schar v5, schar v6, schar v7,
schar v8, schar v9, schar v10, schar v11, schar v12, schar v13, schar v14, schar v15)
@@ -106,7 +109,7 @@ struct v_uint16x8
typedef ushort lane_type;
enum { nlanes = 8 };
- v_uint16x8() {}
+ v_uint16x8() : val(_mm_setzero_si128()) {}
explicit v_uint16x8(__m128i v) : val(v) {}
v_uint16x8(ushort v0, ushort v1, ushort v2, ushort v3, ushort v4, ushort v5, ushort v6, ushort v7)
{
@@ -126,7 +129,7 @@ struct v_int16x8
typedef short lane_type;
enum { nlanes = 8 };
- v_int16x8() {}
+ v_int16x8() : val(_mm_setzero_si128()) {}
explicit v_int16x8(__m128i v) : val(v) {}
v_int16x8(short v0, short v1, short v2, short v3, short v4, short v5, short v6, short v7)
{
@@ -145,7 +148,7 @@ struct v_uint32x4
typedef unsigned lane_type;
enum { nlanes = 4 };
- v_uint32x4() {}
+ v_uint32x4() : val(_mm_setzero_si128()) {}
explicit v_uint32x4(__m128i v) : val(v) {}
v_uint32x4(unsigned v0, unsigned v1, unsigned v2, unsigned v3)
{
@@ -163,7 +166,7 @@ struct v_int32x4
typedef int lane_type;
enum { nlanes = 4 };
- v_int32x4() {}
+ v_int32x4() : val(_mm_setzero_si128()) {}
explicit v_int32x4(__m128i v) : val(v) {}
v_int32x4(int v0, int v1, int v2, int v3)
{
@@ -181,7 +184,7 @@ struct v_float32x4
typedef float lane_type;
enum { nlanes = 4 };
- v_float32x4() {}
+ v_float32x4() : val(_mm_setzero_ps()) {}
explicit v_float32x4(__m128 v) : val(v) {}
v_float32x4(float v0, float v1, float v2, float v3)
{
@@ -199,7 +202,7 @@ struct v_uint64x2
typedef uint64 lane_type;
enum { nlanes = 2 };
- v_uint64x2() {}
+ v_uint64x2() : val(_mm_setzero_si128()) {}
explicit v_uint64x2(__m128i v) : val(v) {}
v_uint64x2(uint64 v0, uint64 v1)
{
@@ -219,7 +222,7 @@ struct v_int64x2
typedef int64 lane_type;
enum { nlanes = 2 };
- v_int64x2() {}
+ v_int64x2() : val(_mm_setzero_si128()) {}
explicit v_int64x2(__m128i v) : val(v) {}
v_int64x2(int64 v0, int64 v1)
{
@@ -239,7 +242,7 @@ struct v_float64x2
typedef double lane_type;
enum { nlanes = 2 };
- v_float64x2() {}
+ v_float64x2() : val(_mm_setzero_pd()) {}
explicit v_float64x2(__m128d v) : val(v) {}
v_float64x2(double v0, double v1)
{
@@ -252,6 +255,26 @@ struct v_float64x2
__m128d val;
};
+#if CV_FP16
+struct v_float16x4
+{
+ typedef short lane_type;
+ enum { nlanes = 4 };
+
+ v_float16x4() : val(_mm_setzero_si128()) {}
+ explicit v_float16x4(__m128i v) : val(v) {}
+ v_float16x4(short v0, short v1, short v2, short v3)
+ {
+ val = _mm_setr_epi16(v0, v1, v2, v3, 0, 0, 0, 0);
+ }
+ short get0() const
+ {
+ return (short)_mm_cvtsi128_si32(val);
+ }
+ __m128i val;
+};
+#endif
+
#define OPENCV_HAL_IMPL_SSE_INITVEC(_Tpvec, _Tp, suffix, zsuffix, ssuffix, _Tps, cast) \
inline _Tpvec v_setzero_##suffix() { return _Tpvec(_mm_setzero_##zsuffix()); } \
inline _Tpvec v_setall_##suffix(_Tp v) { return _Tpvec(_mm_set1_##ssuffix((_Tps)v)); } \
@@ -579,6 +602,16 @@ inline v_float32x4 v_matmul(const v_float32x4& v, const v_float32x4& m0,
return v_float32x4(_mm_add_ps(_mm_add_ps(v0, v1), _mm_add_ps(v2, v3)));
}
+inline v_float32x4 v_matmuladd(const v_float32x4& v, const v_float32x4& m0,
+ const v_float32x4& m1, const v_float32x4& m2,
+ const v_float32x4& a)
+{
+ __m128 v0 = _mm_mul_ps(_mm_shuffle_ps(v.val, v.val, _MM_SHUFFLE(0, 0, 0, 0)), m0.val);
+ __m128 v1 = _mm_mul_ps(_mm_shuffle_ps(v.val, v.val, _MM_SHUFFLE(1, 1, 1, 1)), m1.val);
+ __m128 v2 = _mm_mul_ps(_mm_shuffle_ps(v.val, v.val, _MM_SHUFFLE(2, 2, 2, 2)), m2.val);
+
+ return v_float32x4(_mm_add_ps(_mm_add_ps(v0, v1), _mm_add_ps(v2, a.val)));
+}
#define OPENCV_HAL_IMPL_SSE_BIN_OP(bin_op, _Tpvec, intrin) \
inline _Tpvec operator bin_op (const _Tpvec& a, const _Tpvec& b) \
@@ -719,6 +752,18 @@ inline v_float64x2 v_invsqrt(const v_float64x2& x)
return v_float64x2(_mm_div_pd(v_1, _mm_sqrt_pd(x.val)));
}
+#define OPENCV_HAL_IMPL_SSE_ABS_INT_FUNC(_Tpuvec, _Tpsvec, func, suffix, subWidth) \
+inline _Tpuvec v_abs(const _Tpsvec& x) \
+{ return _Tpuvec(_mm_##func##_ep##suffix(x.val, _mm_sub_ep##subWidth(_mm_setzero_si128(), x.val))); }
+
+OPENCV_HAL_IMPL_SSE_ABS_INT_FUNC(v_uint8x16, v_int8x16, min, u8, i8)
+OPENCV_HAL_IMPL_SSE_ABS_INT_FUNC(v_uint16x8, v_int16x8, max, i16, i16)
+inline v_uint32x4 v_abs(const v_int32x4& x)
+{
+ __m128i s = _mm_srli_epi32(x.val, 31);
+ __m128i f = _mm_srai_epi32(x.val, 31);
+ return v_uint32x4(_mm_add_epi32(_mm_xor_si128(x.val, f), s));
+}
inline v_float32x4 v_abs(const v_float32x4& x)
{ return v_float32x4(_mm_and_ps(x.val, _mm_castsi128_ps(_mm_set1_epi32(0x7fffffff)))); }
inline v_float64x2 v_abs(const v_float64x2& x)
@@ -864,6 +909,15 @@ inline _Tpvec operator >= (const _Tpvec& a, const _Tpvec& b) \
OPENCV_HAL_IMPL_SSE_FLT_CMP_OP(v_float32x4, ps)
OPENCV_HAL_IMPL_SSE_FLT_CMP_OP(v_float64x2, pd)
+#define OPENCV_HAL_IMPL_SSE_64BIT_CMP_OP(_Tpvec, cast) \
+inline _Tpvec operator == (const _Tpvec& a, const _Tpvec& b) \
+{ return cast(v_reinterpret_as_f64(a) == v_reinterpret_as_f64(b)); } \
+inline _Tpvec operator != (const _Tpvec& a, const _Tpvec& b) \
+{ return cast(v_reinterpret_as_f64(a) != v_reinterpret_as_f64(b)); }
+
+OPENCV_HAL_IMPL_SSE_64BIT_CMP_OP(v_uint64x2, v_reinterpret_as_u64);
+OPENCV_HAL_IMPL_SSE_64BIT_CMP_OP(v_int64x2, v_reinterpret_as_s64);
+
OPENCV_HAL_IMPL_SSE_BIN_FUNC(v_uint8x16, v_add_wrap, _mm_add_epi8)
OPENCV_HAL_IMPL_SSE_BIN_FUNC(v_int8x16, v_add_wrap, _mm_add_epi8)
OPENCV_HAL_IMPL_SSE_BIN_FUNC(v_uint16x8, v_add_wrap, _mm_add_epi16)
@@ -967,11 +1021,40 @@ OPENCV_HAL_IMPL_SSE_SHIFT_OP(v_uint16x8, v_int16x8, epi16, _mm_srai_epi16)
OPENCV_HAL_IMPL_SSE_SHIFT_OP(v_uint32x4, v_int32x4, epi32, _mm_srai_epi32)
OPENCV_HAL_IMPL_SSE_SHIFT_OP(v_uint64x2, v_int64x2, epi64, v_srai_epi64)
+template
+inline _Tpvec v_rotate_right(const _Tpvec &a)
+{
+ enum { CV_SHIFT = imm*(sizeof(typename _Tpvec::lane_type)) };
+ return _Tpvec(_mm_srli_si128(a.val, CV_SHIFT));
+}
+template
+inline _Tpvec v_rotate_left(const _Tpvec &a)
+{
+ enum { CV_SHIFT = imm*(sizeof(typename _Tpvec::lane_type)) };
+ return _Tpvec(_mm_slli_si128(a.val, CV_SHIFT));
+}
+template
+inline _Tpvec v_rotate_right(const _Tpvec &a, const _Tpvec &b)
+{
+ enum { CV_SHIFT1 = imm*(sizeof(typename _Tpvec::lane_type)) };
+ enum { CV_SHIFT2 = 16 - imm*(sizeof(typename _Tpvec::lane_type)) };
+ return _Tpvec(_mm_or_si128(_mm_srli_si128(a.val, CV_SHIFT1), _mm_slli_si128(b.val, CV_SHIFT2)));
+}
+template
+inline _Tpvec v_rotate_left(const _Tpvec &a, const _Tpvec &b)
+{
+ enum { CV_SHIFT1 = imm*(sizeof(typename _Tpvec::lane_type)) };
+ enum { CV_SHIFT2 = 16 - imm*(sizeof(typename _Tpvec::lane_type)) };
+ return _Tpvec(_mm_or_si128(_mm_slli_si128(a.val, CV_SHIFT1), _mm_srli_si128(b.val, CV_SHIFT2)));
+}
+
#define OPENCV_HAL_IMPL_SSE_LOADSTORE_INT_OP(_Tpvec, _Tp) \
inline _Tpvec v_load(const _Tp* ptr) \
{ return _Tpvec(_mm_loadu_si128((const __m128i*)ptr)); } \
inline _Tpvec v_load_aligned(const _Tp* ptr) \
{ return _Tpvec(_mm_load_si128((const __m128i*)ptr)); } \
+inline _Tpvec v_load_low(const _Tp* ptr) \
+{ return _Tpvec(_mm_loadl_epi64((const __m128i*)ptr)); } \
inline _Tpvec v_load_halves(const _Tp* ptr0, const _Tp* ptr1) \
{ \
return _Tpvec(_mm_unpacklo_epi64(_mm_loadl_epi64((const __m128i*)ptr0), \
@@ -1000,6 +1083,8 @@ inline _Tpvec v_load(const _Tp* ptr) \
{ return _Tpvec(_mm_loadu_##suffix(ptr)); } \
inline _Tpvec v_load_aligned(const _Tp* ptr) \
{ return _Tpvec(_mm_load_##suffix(ptr)); } \
+inline _Tpvec v_load_low(const _Tp* ptr) \
+{ return _Tpvec(_mm_castsi128_##suffix(_mm_loadl_epi64((const __m128i*)ptr))); } \
inline _Tpvec v_load_halves(const _Tp* ptr0, const _Tp* ptr1) \
{ \
return _Tpvec(_mm_castsi128_##suffix( \
@@ -1021,6 +1106,62 @@ inline void v_store_high(_Tp* ptr, const _Tpvec& a) \
OPENCV_HAL_IMPL_SSE_LOADSTORE_FLT_OP(v_float32x4, float, ps)
OPENCV_HAL_IMPL_SSE_LOADSTORE_FLT_OP(v_float64x2, double, pd)
+#if CV_FP16
+inline v_float16x4 v_load_f16(const short* ptr)
+{ return v_float16x4(_mm_loadl_epi64((const __m128i*)ptr)); }
+inline void v_store_f16(short* ptr, v_float16x4& a)
+{ _mm_storel_epi64((__m128i*)ptr, a.val); }
+#endif
+
+#define OPENCV_HAL_IMPL_SSE_REDUCE_OP_8(_Tpvec, scalartype, func, suffix, sbit) \
+inline scalartype v_reduce_##func(const v_##_Tpvec& a) \
+{ \
+ __m128i val = a.val; \
+ val = _mm_##func##_##suffix(val, _mm_srli_si128(val,8)); \
+ val = _mm_##func##_##suffix(val, _mm_srli_si128(val,4)); \
+ val = _mm_##func##_##suffix(val, _mm_srli_si128(val,2)); \
+ return (scalartype)_mm_cvtsi128_si32(val); \
+} \
+inline unsigned scalartype v_reduce_##func(const v_u##_Tpvec& a) \
+{ \
+ __m128i val = a.val; \
+ __m128i smask = _mm_set1_epi16(sbit); \
+ val = _mm_xor_si128(val, smask); \
+ val = _mm_##func##_##suffix(val, _mm_srli_si128(val,8)); \
+ val = _mm_##func##_##suffix(val, _mm_srli_si128(val,4)); \
+ val = _mm_##func##_##suffix(val, _mm_srli_si128(val,2)); \
+ return (unsigned scalartype)(_mm_cvtsi128_si32(val) ^ sbit); \
+}
+#define OPENCV_HAL_IMPL_SSE_REDUCE_OP_8_SUM(_Tpvec, scalartype, suffix) \
+inline scalartype v_reduce_sum(const v_##_Tpvec& a) \
+{ \
+ __m128i val = a.val; \
+ val = _mm_adds_epi##suffix(val, _mm_srli_si128(val, 8)); \
+ val = _mm_adds_epi##suffix(val, _mm_srli_si128(val, 4)); \
+ val = _mm_adds_epi##suffix(val, _mm_srli_si128(val, 2)); \
+ return (scalartype)_mm_cvtsi128_si32(val); \
+} \
+inline unsigned scalartype v_reduce_sum(const v_u##_Tpvec& a) \
+{ \
+ __m128i val = a.val; \
+ val = _mm_adds_epu##suffix(val, _mm_srli_si128(val, 8)); \
+ val = _mm_adds_epu##suffix(val, _mm_srli_si128(val, 4)); \
+ val = _mm_adds_epu##suffix(val, _mm_srli_si128(val, 2)); \
+ return (unsigned scalartype)_mm_cvtsi128_si32(val); \
+}
+OPENCV_HAL_IMPL_SSE_REDUCE_OP_8(int16x8, short, max, epi16, (short)-32768)
+OPENCV_HAL_IMPL_SSE_REDUCE_OP_8(int16x8, short, min, epi16, (short)-32768)
+OPENCV_HAL_IMPL_SSE_REDUCE_OP_8_SUM(int16x8, short, 16)
+
+#define OPENCV_HAL_IMPL_SSE_REDUCE_OP_4_SUM(_Tpvec, scalartype, regtype, suffix, cast_from, cast_to, extract) \
+inline scalartype v_reduce_sum(const _Tpvec& a) \
+{ \
+ regtype val = a.val; \
+ val = _mm_add_##suffix(val, cast_to(_mm_srli_si128(cast_from(val), 8))); \
+ val = _mm_add_##suffix(val, cast_to(_mm_srli_si128(cast_from(val), 4))); \
+ return (scalartype)_mm_cvt##extract(val); \
+}
+
#define OPENCV_HAL_IMPL_SSE_REDUCE_OP_4(_Tpvec, scalartype, func, scalar_func) \
inline scalartype v_reduce_##func(const _Tpvec& a) \
{ \
@@ -1031,16 +1172,53 @@ inline scalartype v_reduce_##func(const _Tpvec& a) \
return scalar_func(s0, s1); \
}
-OPENCV_HAL_IMPL_SSE_REDUCE_OP_4(v_uint32x4, unsigned, sum, OPENCV_HAL_ADD)
+OPENCV_HAL_IMPL_SSE_REDUCE_OP_4_SUM(v_uint32x4, unsigned, __m128i, epi32, OPENCV_HAL_NOP, OPENCV_HAL_NOP, si128_si32)
+OPENCV_HAL_IMPL_SSE_REDUCE_OP_4_SUM(v_int32x4, int, __m128i, epi32, OPENCV_HAL_NOP, OPENCV_HAL_NOP, si128_si32)
+OPENCV_HAL_IMPL_SSE_REDUCE_OP_4_SUM(v_float32x4, float, __m128, ps, _mm_castps_si128, _mm_castsi128_ps, ss_f32)
+
+inline v_float32x4 v_reduce_sum4(const v_float32x4& a, const v_float32x4& b,
+ const v_float32x4& c, const v_float32x4& d)
+{
+#if CV_SSE3
+ __m128 ab = _mm_hadd_ps(a.val, b.val);
+ __m128 cd = _mm_hadd_ps(c.val, d.val);
+ return v_float32x4(_mm_hadd_ps(ab, cd));
+#else
+ __m128 ac = _mm_add_ps(_mm_unpacklo_ps(a.val, c.val), _mm_unpackhi_ps(a.val, c.val));
+ __m128 bd = _mm_add_ps(_mm_unpacklo_ps(b.val, d.val), _mm_unpackhi_ps(b.val, d.val));
+ return v_float32x4(_mm_add_ps(_mm_unpacklo_ps(ac, bd), _mm_unpackhi_ps(ac, bd)));
+#endif
+}
+
OPENCV_HAL_IMPL_SSE_REDUCE_OP_4(v_uint32x4, unsigned, max, std::max)
OPENCV_HAL_IMPL_SSE_REDUCE_OP_4(v_uint32x4, unsigned, min, std::min)
-OPENCV_HAL_IMPL_SSE_REDUCE_OP_4(v_int32x4, int, sum, OPENCV_HAL_ADD)
OPENCV_HAL_IMPL_SSE_REDUCE_OP_4(v_int32x4, int, max, std::max)
OPENCV_HAL_IMPL_SSE_REDUCE_OP_4(v_int32x4, int, min, std::min)
-OPENCV_HAL_IMPL_SSE_REDUCE_OP_4(v_float32x4, float, sum, OPENCV_HAL_ADD)
OPENCV_HAL_IMPL_SSE_REDUCE_OP_4(v_float32x4, float, max, std::max)
OPENCV_HAL_IMPL_SSE_REDUCE_OP_4(v_float32x4, float, min, std::min)
+#define OPENCV_HAL_IMPL_SSE_POPCOUNT(_Tpvec) \
+inline v_uint32x4 v_popcount(const _Tpvec& a) \
+{ \
+ __m128i m1 = _mm_set1_epi32(0x55555555); \
+ __m128i m2 = _mm_set1_epi32(0x33333333); \
+ __m128i m4 = _mm_set1_epi32(0x0f0f0f0f); \
+ __m128i p = a.val; \
+ p = _mm_add_epi32(_mm_and_si128(_mm_srli_epi32(p, 1), m1), _mm_and_si128(p, m1)); \
+ p = _mm_add_epi32(_mm_and_si128(_mm_srli_epi32(p, 2), m2), _mm_and_si128(p, m2)); \
+ p = _mm_add_epi32(_mm_and_si128(_mm_srli_epi32(p, 4), m4), _mm_and_si128(p, m4)); \
+ p = _mm_adds_epi8(p, _mm_srli_si128(p, 1)); \
+ p = _mm_adds_epi8(p, _mm_srli_si128(p, 2)); \
+ return v_uint32x4(_mm_and_si128(p, _mm_set1_epi32(0x000000ff))); \
+}
+
+OPENCV_HAL_IMPL_SSE_POPCOUNT(v_uint8x16)
+OPENCV_HAL_IMPL_SSE_POPCOUNT(v_uint16x8)
+OPENCV_HAL_IMPL_SSE_POPCOUNT(v_uint32x4)
+OPENCV_HAL_IMPL_SSE_POPCOUNT(v_int8x16)
+OPENCV_HAL_IMPL_SSE_POPCOUNT(v_int16x8)
+OPENCV_HAL_IMPL_SSE_POPCOUNT(v_int32x4)
+
#define OPENCV_HAL_IMPL_SSE_CHECK_SIGNS(_Tpvec, suffix, pack_op, and_op, signmask, allmask) \
inline int v_signmask(const _Tpvec& a) \
{ \
@@ -1256,6 +1434,24 @@ OPENCV_HAL_IMPL_SSE_TRANSPOSE4x4(v_int32x4, epi32, OPENCV_HAL_NOP, OPENCV_HAL_NO
OPENCV_HAL_IMPL_SSE_TRANSPOSE4x4(v_float32x4, ps, _mm_castps_si128, _mm_castsi128_ps)
// adopted from sse_utils.hpp
+inline void v_load_deinterleave(const uchar* ptr, v_uint8x16& a, v_uint8x16& b)
+{
+ __m128i t00 = _mm_loadu_si128((const __m128i*)ptr);
+ __m128i t01 = _mm_loadu_si128((const __m128i*)(ptr + 16));
+
+ __m128i t10 = _mm_unpacklo_epi8(t00, t01);
+ __m128i t11 = _mm_unpackhi_epi8(t00, t01);
+
+ __m128i t20 = _mm_unpacklo_epi8(t10, t11);
+ __m128i t21 = _mm_unpackhi_epi8(t10, t11);
+
+ __m128i t30 = _mm_unpacklo_epi8(t20, t21);
+ __m128i t31 = _mm_unpackhi_epi8(t20, t21);
+
+ a.val = _mm_unpacklo_epi8(t30, t31);
+ b.val = _mm_unpackhi_epi8(t30, t31);
+}
+
inline void v_load_deinterleave(const uchar* ptr, v_uint8x16& a, v_uint8x16& b, v_uint8x16& c)
{
__m128i t00 = _mm_loadu_si128((const __m128i*)ptr);
@@ -1374,6 +1570,65 @@ inline void v_load_deinterleave(const unsigned* ptr, v_uint32x4& a, v_uint32x4&
v_transpose4x4(u0, u1, u2, u3, a, b, c, d);
}
+inline void v_load_deinterleave(const uint64 *ptr, v_uint64x2& a, v_uint64x2& b, v_uint64x2& c)
+{
+ __m128i t0 = _mm_loadu_si128((const __m128i*)ptr);
+ __m128i t1 = _mm_loadu_si128((const __m128i*)(ptr + 2));
+ __m128i t2 = _mm_loadu_si128((const __m128i*)(ptr + 4));
+
+ a = v_uint64x2(_mm_unpacklo_epi64(t0, _mm_unpackhi_epi64(t1, t1)));
+ b = v_uint64x2(_mm_unpacklo_epi64(_mm_unpackhi_epi64(t0, t0), t2));
+ c = v_uint64x2(_mm_unpacklo_epi64(t1, _mm_unpackhi_epi64(t2, t2)));
+}
+
+inline void v_load_deinterleave(const int64 *ptr, v_int64x2& a, v_int64x2& b, v_int64x2& c)
+{
+ v_uint64x2 t0, t1, t2;
+ v_load_deinterleave((const uint64*)ptr, t0, t1, t2);
+ a = v_reinterpret_as_s64(t0);
+ b = v_reinterpret_as_s64(t1);
+ c = v_reinterpret_as_s64(t2);
+}
+
+inline void v_load_deinterleave(const double *ptr, v_float64x2& a, v_float64x2& b, v_float64x2& c)
+{
+ v_uint64x2 t0, t1, t2;
+ v_load_deinterleave((const uint64*)ptr, t0, t1, t2);
+ a = v_reinterpret_as_f64(t0);
+ b = v_reinterpret_as_f64(t1);
+ c = v_reinterpret_as_f64(t2);
+}
+
+// 2-channel, float only
+inline void v_load_deinterleave(const float* ptr, v_float32x4& a, v_float32x4& b)
+{
+ const int mask_lo = _MM_SHUFFLE(2, 0, 2, 0), mask_hi = _MM_SHUFFLE(3, 1, 3, 1);
+
+ __m128 u0 = _mm_loadu_ps(ptr); // a0 b0 a1 b1
+ __m128 u1 = _mm_loadu_ps((ptr + 4)); // a2 b2 a3 b3
+
+ a.val = _mm_shuffle_ps(u0, u1, mask_lo); // a0 a1 a2 a3
+ b.val = _mm_shuffle_ps(u0, u1, mask_hi); // b0 b1 ab b3
+}
+
+inline void v_store_interleave( short* ptr, const v_int16x8& a, const v_int16x8& b )
+{
+ __m128i t0, t1;
+ t0 = _mm_unpacklo_epi16(a.val, b.val);
+ t1 = _mm_unpackhi_epi16(a.val, b.val);
+ _mm_storeu_si128((__m128i*)(ptr), t0);
+ _mm_storeu_si128((__m128i*)(ptr + 8), t1);
+}
+
+inline void v_store_interleave( uchar* ptr, const v_uint8x16& a, const v_uint8x16& b)
+{
+ __m128i v0 = _mm_unpacklo_epi8(a.val, b.val);
+ __m128i v1 = _mm_unpackhi_epi8(a.val, b.val);
+
+ _mm_storeu_si128((__m128i*)(ptr), v0);
+ _mm_storeu_si128((__m128i*)(ptr + 16), v1);
+}
+
inline void v_store_interleave( uchar* ptr, const v_uint8x16& a, const v_uint8x16& b,
const v_uint8x16& c )
{
@@ -1529,6 +1784,39 @@ inline void v_store_interleave(unsigned* ptr, const v_uint32x4& a, const v_uint3
v_store(ptr + 12, t3);
}
+// 2-channel, float only
+inline void v_store_interleave(float* ptr, const v_float32x4& a, const v_float32x4& b)
+{
+ // a0 a1 a2 a3 ...
+ // b0 b1 b2 b3 ...
+ __m128 u0 = _mm_unpacklo_ps(a.val, b.val); // a0 b0 a1 b1
+ __m128 u1 = _mm_unpackhi_ps(a.val, b.val); // a2 b2 a3 b3
+
+ _mm_storeu_ps(ptr, u0);
+ _mm_storeu_ps((ptr + 4), u1);
+}
+
+inline void v_store_interleave(uint64 *ptr, const v_uint64x2& a, const v_uint64x2& b, const v_uint64x2& c)
+{
+ __m128i t0 = _mm_unpacklo_epi64(a.val, b.val);
+ __m128i t1 = _mm_unpacklo_epi64(c.val, _mm_unpackhi_epi64(a.val, a.val));
+ __m128i t2 = _mm_unpackhi_epi64(b.val, c.val);
+
+ _mm_storeu_si128((__m128i*)ptr, t0);
+ _mm_storeu_si128((__m128i*)(ptr + 2), t1);
+ _mm_storeu_si128((__m128i*)(ptr + 4), t2);
+}
+
+inline void v_store_interleave(int64 *ptr, const v_int64x2& a, const v_int64x2& b, const v_int64x2& c)
+{
+ v_store_interleave((uint64*)ptr, v_reinterpret_as_u64(a), v_reinterpret_as_u64(b), v_reinterpret_as_u64(c));
+}
+
+inline void v_store_interleave(double *ptr, const v_float64x2& a, const v_float64x2& b, const v_float64x2& c)
+{
+ v_store_interleave((uint64*)ptr, v_reinterpret_as_u64(a), v_reinterpret_as_u64(b), v_reinterpret_as_u64(c));
+}
+
#define OPENCV_HAL_IMPL_SSE_LOADSTORE_INTERLEAVE(_Tpvec, _Tp, suffix, _Tpuvec, _Tpu, usuffix) \
inline void v_load_deinterleave( const _Tp* ptr, _Tpvec& a0, \
_Tpvec& b0, _Tpvec& c0 ) \
@@ -1587,11 +1875,45 @@ inline v_float64x2 v_cvt_f64(const v_int32x4& a)
return v_float64x2(_mm_cvtepi32_pd(a.val));
}
+inline v_float64x2 v_cvt_f64_high(const v_int32x4& a)
+{
+ return v_float64x2(_mm_cvtepi32_pd(_mm_srli_si128(a.val,8)));
+}
+
inline v_float64x2 v_cvt_f64(const v_float32x4& a)
{
return v_float64x2(_mm_cvtps_pd(a.val));
}
+inline v_float64x2 v_cvt_f64_high(const v_float32x4& a)
+{
+ return v_float64x2(_mm_cvtps_pd(_mm_castsi128_ps(_mm_srli_si128(_mm_castps_si128(a.val),8))));
+}
+
+#if CV_FP16
+inline v_float32x4 v_cvt_f32(const v_float16x4& a)
+{
+ return v_float32x4(_mm_cvtph_ps(a.val));
+}
+
+inline v_float16x4 v_cvt_f16(const v_float32x4& a)
+{
+ return v_float16x4(_mm_cvtps_ph(a.val, 0));
+}
+#endif
+
+//! @name Check SIMD support
+//! @{
+//! @brief Check CPU capability of SIMD operation
+static inline bool hasSIMD128()
+{
+ return (CV_CPU_HAS_SUPPORT_SSE2) ? true : false;
+}
+
+//! @}
+
+CV_CPU_OPTIMIZATION_HAL_NAMESPACE_END
+
//! @endcond
}
diff --git a/lib/3rdParty/OpenCV3.4/include/opencv2/core/hal/intrin_vsx.hpp b/lib/3rdParty/OpenCV3.4/include/opencv2/core/hal/intrin_vsx.hpp
new file mode 100644
index 00000000..9f050f7c
--- /dev/null
+++ b/lib/3rdParty/OpenCV3.4/include/opencv2/core/hal/intrin_vsx.hpp
@@ -0,0 +1,962 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
+// Copyright (C) 2015, Itseez Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef OPENCV_HAL_VSX_HPP
+#define OPENCV_HAL_VSX_HPP
+
+#include
+#include "opencv2/core/utility.hpp"
+
+#define CV_SIMD128 1
+#define CV_SIMD128_64F 1
+
+/**
+ * todo: supporting half precision for power9
+ * convert instractions xvcvhpsp, xvcvsphp
+**/
+
+namespace cv
+{
+
+//! @cond IGNORED
+
+CV_CPU_OPTIMIZATION_HAL_NAMESPACE_BEGIN
+
+///////// Types ////////////
+
+struct v_uint8x16
+{
+ typedef uchar lane_type;
+ enum { nlanes = 16 };
+ vec_uchar16 val;
+
+ explicit v_uint8x16(const vec_uchar16& v) : val(v)
+ {}
+ v_uint8x16() : val(vec_uchar16_z)
+ {}
+ v_uint8x16(vec_bchar16 v) : val(vec_uchar16_c(v))
+ {}
+ v_uint8x16(uchar v0, uchar v1, uchar v2, uchar v3, uchar v4, uchar v5, uchar v6, uchar v7,
+ uchar v8, uchar v9, uchar v10, uchar v11, uchar v12, uchar v13, uchar v14, uchar v15)
+ : val(vec_uchar16_set(v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15))
+ {}
+ uchar get0() const
+ { return vec_extract(val, 0); }
+};
+
+struct v_int8x16
+{
+ typedef schar lane_type;
+ enum { nlanes = 16 };
+ vec_char16 val;
+
+ explicit v_int8x16(const vec_char16& v) : val(v)
+ {}
+ v_int8x16() : val(vec_char16_z)
+ {}
+ v_int8x16(vec_bchar16 v) : val(vec_char16_c(v))
+ {}
+ v_int8x16(schar v0, schar v1, schar v2, schar v3, schar v4, schar v5, schar v6, schar v7,
+ schar v8, schar v9, schar v10, schar v11, schar v12, schar v13, schar v14, schar v15)
+ : val(vec_char16_set(v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15))
+ {}
+ schar get0() const
+ { return vec_extract(val, 0); }
+};
+
+struct v_uint16x8
+{
+ typedef ushort lane_type;
+ enum { nlanes = 8 };
+ vec_ushort8 val;
+
+ explicit v_uint16x8(const vec_ushort8& v) : val(v)
+ {}
+ v_uint16x8() : val(vec_ushort8_z)
+ {}
+ v_uint16x8(vec_bshort8 v) : val(vec_ushort8_c(v))
+ {}
+ v_uint16x8(ushort v0, ushort v1, ushort v2, ushort v3, ushort v4, ushort v5, ushort v6, ushort v7)
+ : val(vec_ushort8_set(v0, v1, v2, v3, v4, v5, v6, v7))
+ {}
+ ushort get0() const
+ { return vec_extract(val, 0); }
+};
+
+struct v_int16x8
+{
+ typedef short lane_type;
+ enum { nlanes = 8 };
+ vec_short8 val;
+
+ explicit v_int16x8(const vec_short8& v) : val(v)
+ {}
+ v_int16x8() : val(vec_short8_z)
+ {}
+ v_int16x8(vec_bshort8 v) : val(vec_short8_c(v))
+ {}
+ v_int16x8(short v0, short v1, short v2, short v3, short v4, short v5, short v6, short v7)
+ : val(vec_short8_set(v0, v1, v2, v3, v4, v5, v6, v7))
+ {}
+ short get0() const
+ { return vec_extract(val, 0); }
+};
+
+struct v_uint32x4
+{
+ typedef unsigned lane_type;
+ enum { nlanes = 4 };
+ vec_uint4 val;
+
+ explicit v_uint32x4(const vec_uint4& v) : val(v)
+ {}
+ v_uint32x4() : val(vec_uint4_z)
+ {}
+ v_uint32x4(vec_bint4 v) : val(vec_uint4_c(v))
+ {}
+ v_uint32x4(unsigned v0, unsigned v1, unsigned v2, unsigned v3) : val(vec_uint4_set(v0, v1, v2, v3))
+ {}
+ uint get0() const
+ { return vec_extract(val, 0); }
+};
+
+struct v_int32x4
+{
+ typedef int lane_type;
+ enum { nlanes = 4 };
+ vec_int4 val;
+
+ explicit v_int32x4(const vec_int4& v) : val(v)
+ {}
+ v_int32x4() : val(vec_int4_z)
+ {}
+ v_int32x4(vec_bint4 v) : val(vec_int4_c(v))
+ {}
+ v_int32x4(int v0, int v1, int v2, int v3) : val(vec_int4_set(v0, v1, v2, v3))
+ {}
+ int get0() const
+ { return vec_extract(val, 0); }
+};
+
+struct v_float32x4
+{
+ typedef float lane_type;
+ enum { nlanes = 4 };
+ vec_float4 val;
+
+ explicit v_float32x4(const vec_float4& v) : val(v)
+ {}
+ v_float32x4() : val(vec_float4_z)
+ {}
+ v_float32x4(vec_bint4 v) : val(vec_float4_c(v))
+ {}
+ v_float32x4(float v0, float v1, float v2, float v3) : val(vec_float4_set(v0, v1, v2, v3))
+ {}
+ float get0() const
+ { return vec_extract(val, 0); }
+};
+
+struct v_uint64x2
+{
+ typedef uint64 lane_type;
+ enum { nlanes = 2 };
+ vec_udword2 val;
+
+ explicit v_uint64x2(const vec_udword2& v) : val(v)
+ {}
+ v_uint64x2() : val(vec_udword2_z)
+ {}
+ v_uint64x2(vec_bdword2 v) : val(vec_udword2_c(v))
+ {}
+ v_uint64x2(uint64 v0, uint64 v1) : val(vec_udword2_set(v0, v1))
+ {}
+ uint64 get0() const
+ { return vec_extract(val, 0); }
+};
+
+struct v_int64x2
+{
+ typedef int64 lane_type;
+ enum { nlanes = 2 };
+ vec_dword2 val;
+
+ explicit v_int64x2(const vec_dword2& v) : val(v)
+ {}
+ v_int64x2() : val(vec_dword2_z)
+ {}
+ v_int64x2(vec_bdword2 v) : val(vec_dword2_c(v))
+ {}
+ v_int64x2(int64 v0, int64 v1) : val(vec_dword2_set(v0, v1))
+ {}
+ int64 get0() const
+ { return vec_extract(val, 0); }
+};
+
+struct v_float64x2
+{
+ typedef double lane_type;
+ enum { nlanes = 2 };
+ vec_double2 val;
+
+ explicit v_float64x2(const vec_double2& v) : val(v)
+ {}
+ v_float64x2() : val(vec_double2_z)
+ {}
+ v_float64x2(vec_bdword2 v) : val(vec_double2_c(v))
+ {}
+ v_float64x2(double v0, double v1) : val(vec_double2_set(v0, v1))
+ {}
+ double get0() const
+ { return vec_extract(val, 0); }
+};
+
+//////////////// Load and store operations ///////////////
+
+/*
+ * clang-5 aborted during parse "vec_xxx_c" only if it's
+ * inside a function template which is defined by preprocessor macro.
+ *
+ * if vec_xxx_c defined as C++ cast, clang-5 will pass it
+*/
+#define OPENCV_HAL_IMPL_VSX_INITVEC(_Tpvec, _Tp, suffix, cast) \
+inline _Tpvec v_setzero_##suffix() { return _Tpvec(); } \
+inline _Tpvec v_setall_##suffix(_Tp v) { return _Tpvec(vec_splats((_Tp)v));} \
+template inline _Tpvec v_reinterpret_as_##suffix(const _Tpvec0 &a) \
+{ return _Tpvec((cast)a.val); }
+
+OPENCV_HAL_IMPL_VSX_INITVEC(v_uint8x16, uchar, u8, vec_uchar16)
+OPENCV_HAL_IMPL_VSX_INITVEC(v_int8x16, schar, s8, vec_char16)
+OPENCV_HAL_IMPL_VSX_INITVEC(v_uint16x8, ushort, u16, vec_ushort8)
+OPENCV_HAL_IMPL_VSX_INITVEC(v_int16x8, short, s16, vec_short8)
+OPENCV_HAL_IMPL_VSX_INITVEC(v_uint32x4, uint, u32, vec_uint4)
+OPENCV_HAL_IMPL_VSX_INITVEC(v_int32x4, int, s32, vec_int4)
+OPENCV_HAL_IMPL_VSX_INITVEC(v_uint64x2, uint64, u64, vec_udword2)
+OPENCV_HAL_IMPL_VSX_INITVEC(v_int64x2, int64, s64, vec_dword2)
+OPENCV_HAL_IMPL_VSX_INITVEC(v_float32x4, float, f32, vec_float4)
+OPENCV_HAL_IMPL_VSX_INITVEC(v_float64x2, double, f64, vec_double2)
+
+#define OPENCV_HAL_IMPL_VSX_LOADSTORE_INT_OP(_Tpvec, _Tp, ld_func, st_func) \
+inline _Tpvec v_load(const _Tp* ptr) \
+{ return _Tpvec(ld_func(0, ptr)); } \
+inline _Tpvec v_load_aligned(const _Tp* ptr) \
+{ return _Tpvec(ld_func(0, ptr)); } \
+inline _Tpvec v_load_low(const _Tp* ptr) \
+{ return _Tpvec(vec_ld_l8(ptr)); } \
+inline _Tpvec v_load_halves(const _Tp* ptr0, const _Tp* ptr1) \
+{ return _Tpvec(vec_mergesqh(vec_ld_l8(ptr0), vec_ld_l8(ptr1))); } \
+inline void v_store(_Tp* ptr, const _Tpvec& a) \
+{ st_func(a.val, 0, ptr); } \
+inline void v_store_aligned(_Tp* ptr, const _Tpvec& a) \
+{ st_func(a.val, 0, ptr); } \
+inline void v_store_low(_Tp* ptr, const _Tpvec& a) \
+{ vec_st_l8(a.val, ptr); } \
+inline void v_store_high(_Tp* ptr, const _Tpvec& a) \
+{ vec_st_h8(a.val, ptr); }
+
+OPENCV_HAL_IMPL_VSX_LOADSTORE_INT_OP(v_uint8x16, uchar, vsx_ld, vsx_st)
+OPENCV_HAL_IMPL_VSX_LOADSTORE_INT_OP(v_int8x16, schar, vsx_ld, vsx_st)
+OPENCV_HAL_IMPL_VSX_LOADSTORE_INT_OP(v_uint16x8, ushort, vsx_ld, vsx_st)
+OPENCV_HAL_IMPL_VSX_LOADSTORE_INT_OP(v_int16x8, short, vsx_ld, vsx_st)
+OPENCV_HAL_IMPL_VSX_LOADSTORE_INT_OP(v_uint32x4, uint, vsx_ld, vsx_st)
+OPENCV_HAL_IMPL_VSX_LOADSTORE_INT_OP(v_int32x4, int, vsx_ld, vsx_st)
+OPENCV_HAL_IMPL_VSX_LOADSTORE_INT_OP(v_float32x4, float, vsx_ld, vsx_st)
+OPENCV_HAL_IMPL_VSX_LOADSTORE_INT_OP(v_float64x2, double, vsx_ld, vsx_st)
+OPENCV_HAL_IMPL_VSX_LOADSTORE_INT_OP(v_uint64x2, uint64, vsx_ld2, vsx_st2)
+OPENCV_HAL_IMPL_VSX_LOADSTORE_INT_OP(v_int64x2, int64, vsx_ld2, vsx_st2)
+
+//////////////// Value reordering ///////////////
+
+/* de&interleave */
+#define OPENCV_HAL_IMPL_VSX_INTERLEAVE(_Tp, _Tpvec) \
+inline void v_load_deinterleave(const _Tp* ptr, _Tpvec& a, _Tpvec& b) \
+{ vec_ld_deinterleave(ptr, a.val, b.val);} \
+inline void v_load_deinterleave(const _Tp* ptr, _Tpvec& a, \
+ _Tpvec& b, _Tpvec& c) \
+{ vec_ld_deinterleave(ptr, a.val, b.val, c.val); } \
+inline void v_load_deinterleave(const _Tp* ptr, _Tpvec& a, _Tpvec& b, \
+ _Tpvec& c, _Tpvec& d) \
+{ vec_ld_deinterleave(ptr, a.val, b.val, c.val, d.val); } \
+inline void v_store_interleave(_Tp* ptr, const _Tpvec& a, const _Tpvec& b) \
+{ vec_st_interleave(a.val, b.val, ptr); } \
+inline void v_store_interleave(_Tp* ptr, const _Tpvec& a, \
+ const _Tpvec& b, const _Tpvec& c) \
+{ vec_st_interleave(a.val, b.val, c.val, ptr); } \
+inline void v_store_interleave(_Tp* ptr, const _Tpvec& a, const _Tpvec& b, \
+ const _Tpvec& c, const _Tpvec& d) \
+{ vec_st_interleave(a.val, b.val, c.val, d.val, ptr); }
+
+OPENCV_HAL_IMPL_VSX_INTERLEAVE(uchar, v_uint8x16)
+OPENCV_HAL_IMPL_VSX_INTERLEAVE(schar, v_int8x16)
+OPENCV_HAL_IMPL_VSX_INTERLEAVE(ushort, v_uint16x8)
+OPENCV_HAL_IMPL_VSX_INTERLEAVE(short, v_int16x8)
+OPENCV_HAL_IMPL_VSX_INTERLEAVE(uint, v_uint32x4)
+OPENCV_HAL_IMPL_VSX_INTERLEAVE(int, v_int32x4)
+OPENCV_HAL_IMPL_VSX_INTERLEAVE(float, v_float32x4)
+OPENCV_HAL_IMPL_VSX_INTERLEAVE(double, v_float64x2)
+
+/* Expand */
+#define OPENCV_HAL_IMPL_VSX_EXPAND(_Tpvec, _Tpwvec, _Tp, fl, fh) \
+inline void v_expand(const _Tpvec& a, _Tpwvec& b0, _Tpwvec& b1) \
+{ \
+ b0.val = fh(a.val); \
+ b1.val = fl(a.val); \
+} \
+inline _Tpwvec v_load_expand(const _Tp* ptr) \
+{ return _Tpwvec(fh(vsx_ld(0, ptr))); }
+
+OPENCV_HAL_IMPL_VSX_EXPAND(v_uint8x16, v_uint16x8, uchar, vec_unpacklu, vec_unpackhu)
+OPENCV_HAL_IMPL_VSX_EXPAND(v_int8x16, v_int16x8, schar, vec_unpackl, vec_unpackh)
+OPENCV_HAL_IMPL_VSX_EXPAND(v_uint16x8, v_uint32x4, ushort, vec_unpacklu, vec_unpackhu)
+OPENCV_HAL_IMPL_VSX_EXPAND(v_int16x8, v_int32x4, short, vec_unpackl, vec_unpackh)
+OPENCV_HAL_IMPL_VSX_EXPAND(v_uint32x4, v_uint64x2, uint, vec_unpacklu, vec_unpackhu)
+OPENCV_HAL_IMPL_VSX_EXPAND(v_int32x4, v_int64x2, int, vec_unpackl, vec_unpackh)
+
+inline v_uint32x4 v_load_expand_q(const uchar* ptr)
+{ return v_uint32x4(vec_ld_buw(ptr)); }
+
+inline v_int32x4 v_load_expand_q(const schar* ptr)
+{ return v_int32x4(vec_ld_bsw(ptr)); }
+
+/* pack */
+#define OPENCV_HAL_IMPL_VSX_PACK(_Tpvec, _Tp, _Tpwvec, _Tpvn, _Tpdel, sfnc, pkfnc, addfnc, pack) \
+inline _Tpvec v_##pack(const _Tpwvec& a, const _Tpwvec& b) \
+{ \
+ return _Tpvec(pkfnc(a.val, b.val)); \
+} \
+inline void v_##pack##_store(_Tp* ptr, const _Tpwvec& a) \
+{ \
+ vec_st_l8(pkfnc(a.val, a.val), ptr); \
+} \
+template \
+inline _Tpvec v_rshr_##pack(const _Tpwvec& a, const _Tpwvec& b) \
+{ \
+ const __vector _Tpvn vn = vec_splats((_Tpvn)n); \
+ const __vector _Tpdel delta = vec_splats((_Tpdel)((_Tpdel)1 << (n-1))); \
+ return _Tpvec(pkfnc(sfnc(addfnc(a.val, delta), vn), sfnc(addfnc(b.val, delta), vn))); \
+} \
+template \
+inline void v_rshr_##pack##_store(_Tp* ptr, const _Tpwvec& a) \
+{ \
+ const __vector _Tpvn vn = vec_splats((_Tpvn)n); \
+ const __vector _Tpdel delta = vec_splats((_Tpdel)((_Tpdel)1 << (n-1))); \
+ vec_st_l8(pkfnc(sfnc(addfnc(a.val, delta), vn), delta), ptr); \
+}
+
+OPENCV_HAL_IMPL_VSX_PACK(v_uint8x16, uchar, v_uint16x8, unsigned short, unsigned short,
+ vec_sr, vec_packs, vec_adds, pack)
+OPENCV_HAL_IMPL_VSX_PACK(v_int8x16, schar, v_int16x8, unsigned short, short,
+ vec_sra, vec_packs, vec_adds, pack)
+
+OPENCV_HAL_IMPL_VSX_PACK(v_uint16x8, ushort, v_uint32x4, unsigned int, unsigned int,
+ vec_sr, vec_packs, vec_add, pack)
+OPENCV_HAL_IMPL_VSX_PACK(v_int16x8, short, v_int32x4, unsigned int, int,
+ vec_sra, vec_packs, vec_add, pack)
+
+OPENCV_HAL_IMPL_VSX_PACK(v_uint32x4, uint, v_uint64x2, unsigned long long, unsigned long long,
+ vec_sr, vec_pack, vec_add, pack)
+OPENCV_HAL_IMPL_VSX_PACK(v_int32x4, int, v_int64x2, unsigned long long, long long,
+ vec_sra, vec_pack, vec_add, pack)
+
+OPENCV_HAL_IMPL_VSX_PACK(v_uint8x16, uchar, v_int16x8, unsigned short, short,
+ vec_sra, vec_packsu, vec_adds, pack_u)
+OPENCV_HAL_IMPL_VSX_PACK(v_uint16x8, ushort, v_int32x4, unsigned int, int,
+ vec_sra, vec_packsu, vec_add, pack_u)
+// Following variant is not implemented on other platforms:
+//OPENCV_HAL_IMPL_VSX_PACK(v_uint32x4, uint, v_int64x2, unsigned long long, long long,
+// vec_sra, vec_packsu, vec_add, pack_u)
+
+/* Recombine */
+template
+inline void v_zip(const _Tpvec& a0, const _Tpvec& a1, _Tpvec& b0, _Tpvec& b1)
+{
+ b0.val = vec_mergeh(a0.val, a1.val);
+ b1.val = vec_mergel(a0.val, a1.val);
+}
+
+template
+inline _Tpvec v_combine_high(const _Tpvec& a, const _Tpvec& b)
+{ return _Tpvec(vec_mergesql(a.val, b.val)); }
+
+template
+inline _Tpvec v_combine_low(const _Tpvec& a, const _Tpvec& b)
+{ return _Tpvec(vec_mergesqh(a.val, b.val)); }
+
+template
+inline void v_recombine(const _Tpvec& a, const _Tpvec& b, _Tpvec& c, _Tpvec& d)
+{
+ c.val = vec_mergesqh(a.val, b.val);
+ d.val = vec_mergesql(a.val, b.val);
+}
+
+/* Extract */
+template
+inline _Tpvec v_extract(const _Tpvec& a, const _Tpvec& b)
+{
+ const int w = sizeof(typename _Tpvec::lane_type);
+ const int n = _Tpvec::nlanes;
+ const unsigned int sf = ((w * n) - (s * w));
+ if (s == 0)
+ return _Tpvec(a.val);
+ else if (sf > 15)
+ return _Tpvec();
+ // bitwise it just to make xlc happy
+ return _Tpvec(vec_sld(b.val, a.val, sf & 15));
+}
+
+#define OPENCV_HAL_IMPL_VSX_EXTRACT_2(_Tpvec) \
+template \
+inline _Tpvec v_extract(const _Tpvec& a, const _Tpvec& b) \
+{ \
+ switch(s) { \
+ case 0: return _Tpvec(a.val); \
+ case 2: return _Tpvec(b.val); \
+ case 1: return _Tpvec(vec_sldw(b.val, a.val, 2)); \
+ default: return _Tpvec(); \
+ } \
+}
+OPENCV_HAL_IMPL_VSX_EXTRACT_2(v_uint64x2)
+OPENCV_HAL_IMPL_VSX_EXTRACT_2(v_int64x2)
+
+
+////////// Arithmetic, bitwise and comparison operations /////////
+
+/* Element-wise binary and unary operations */
+/** Arithmetics **/
+#define OPENCV_HAL_IMPL_VSX_BIN_OP(bin_op, _Tpvec, intrin) \
+inline _Tpvec operator bin_op (const _Tpvec& a, const _Tpvec& b) \
+{ return _Tpvec(intrin(a.val, b.val)); } \
+inline _Tpvec& operator bin_op##= (_Tpvec& a, const _Tpvec& b) \
+{ a.val = intrin(a.val, b.val); return a; }
+
+OPENCV_HAL_IMPL_VSX_BIN_OP(+, v_uint8x16, vec_adds)
+OPENCV_HAL_IMPL_VSX_BIN_OP(-, v_uint8x16, vec_subs)
+OPENCV_HAL_IMPL_VSX_BIN_OP(+, v_int8x16, vec_adds)
+OPENCV_HAL_IMPL_VSX_BIN_OP(-, v_int8x16, vec_subs)
+OPENCV_HAL_IMPL_VSX_BIN_OP(+, v_uint16x8, vec_adds)
+OPENCV_HAL_IMPL_VSX_BIN_OP(-, v_uint16x8, vec_subs)
+OPENCV_HAL_IMPL_VSX_BIN_OP(*, v_uint16x8, vec_mul)
+OPENCV_HAL_IMPL_VSX_BIN_OP(+, v_int16x8, vec_adds)
+OPENCV_HAL_IMPL_VSX_BIN_OP(-, v_int16x8, vec_subs)
+OPENCV_HAL_IMPL_VSX_BIN_OP(*, v_int16x8, vec_mul)
+OPENCV_HAL_IMPL_VSX_BIN_OP(+, v_uint32x4, vec_add)
+OPENCV_HAL_IMPL_VSX_BIN_OP(-, v_uint32x4, vec_sub)
+OPENCV_HAL_IMPL_VSX_BIN_OP(*, v_uint32x4, vec_mul)
+OPENCV_HAL_IMPL_VSX_BIN_OP(+, v_int32x4, vec_add)
+OPENCV_HAL_IMPL_VSX_BIN_OP(-, v_int32x4, vec_sub)
+OPENCV_HAL_IMPL_VSX_BIN_OP(*, v_int32x4, vec_mul)
+OPENCV_HAL_IMPL_VSX_BIN_OP(+, v_float32x4, vec_add)
+OPENCV_HAL_IMPL_VSX_BIN_OP(-, v_float32x4, vec_sub)
+OPENCV_HAL_IMPL_VSX_BIN_OP(*, v_float32x4, vec_mul)
+OPENCV_HAL_IMPL_VSX_BIN_OP(/, v_float32x4, vec_div)
+OPENCV_HAL_IMPL_VSX_BIN_OP(+, v_float64x2, vec_add)
+OPENCV_HAL_IMPL_VSX_BIN_OP(-, v_float64x2, vec_sub)
+OPENCV_HAL_IMPL_VSX_BIN_OP(*, v_float64x2, vec_mul)
+OPENCV_HAL_IMPL_VSX_BIN_OP(/, v_float64x2, vec_div)
+OPENCV_HAL_IMPL_VSX_BIN_OP(+, v_uint64x2, vec_add)
+OPENCV_HAL_IMPL_VSX_BIN_OP(-, v_uint64x2, vec_sub)
+OPENCV_HAL_IMPL_VSX_BIN_OP(+, v_int64x2, vec_add)
+OPENCV_HAL_IMPL_VSX_BIN_OP(-, v_int64x2, vec_sub)
+
+inline void v_mul_expand(const v_int16x8& a, const v_int16x8& b, v_int32x4& c, v_int32x4& d)
+{
+ c.val = vec_mul(vec_unpackh(a.val), vec_unpackh(b.val));
+ d.val = vec_mul(vec_unpackl(a.val), vec_unpackl(b.val));
+}
+inline void v_mul_expand(const v_uint16x8& a, const v_uint16x8& b, v_uint32x4& c, v_uint32x4& d)
+{
+ c.val = vec_mul(vec_unpackhu(a.val), vec_unpackhu(b.val));
+ d.val = vec_mul(vec_unpacklu(a.val), vec_unpacklu(b.val));
+}
+inline void v_mul_expand(const v_uint32x4& a, const v_uint32x4& b, v_uint64x2& c, v_uint64x2& d)
+{
+ c.val = vec_mul(vec_unpackhu(a.val), vec_unpackhu(b.val));
+ d.val = vec_mul(vec_unpacklu(a.val), vec_unpacklu(b.val));
+}
+
+/** Non-saturating arithmetics **/
+#define OPENCV_HAL_IMPL_VSX_BIN_FUNC(func, intrin) \
+template \
+inline _Tpvec func(const _Tpvec& a, const _Tpvec& b) \
+{ return _Tpvec(intrin(a.val, b.val)); }
+
+OPENCV_HAL_IMPL_VSX_BIN_FUNC(v_add_wrap, vec_add)
+OPENCV_HAL_IMPL_VSX_BIN_FUNC(v_sub_wrap, vec_sub)
+
+/** Bitwise shifts **/
+#define OPENCV_HAL_IMPL_VSX_SHIFT_OP(_Tpvec, shr, splfunc) \
+inline _Tpvec operator << (const _Tpvec& a, int imm) \
+{ return _Tpvec(vec_sl(a.val, splfunc(imm))); } \
+inline _Tpvec operator >> (const _Tpvec& a, int imm) \
+{ return _Tpvec(shr(a.val, splfunc(imm))); } \
+template inline _Tpvec v_shl(const _Tpvec& a) \
+{ return _Tpvec(vec_sl(a.val, splfunc(imm))); } \
+template inline _Tpvec v_shr(const _Tpvec& a) \
+{ return _Tpvec(shr(a.val, splfunc(imm))); }
+
+OPENCV_HAL_IMPL_VSX_SHIFT_OP(v_uint8x16, vec_sr, vec_uchar16_sp)
+OPENCV_HAL_IMPL_VSX_SHIFT_OP(v_uint16x8, vec_sr, vec_ushort8_sp)
+OPENCV_HAL_IMPL_VSX_SHIFT_OP(v_uint32x4, vec_sr, vec_uint4_sp)
+OPENCV_HAL_IMPL_VSX_SHIFT_OP(v_uint64x2, vec_sr, vec_udword2_sp)
+// algebraic right shift
+OPENCV_HAL_IMPL_VSX_SHIFT_OP(v_int8x16, vec_sra, vec_uchar16_sp)
+OPENCV_HAL_IMPL_VSX_SHIFT_OP(v_int16x8, vec_sra, vec_ushort8_sp)
+OPENCV_HAL_IMPL_VSX_SHIFT_OP(v_int32x4, vec_sra, vec_uint4_sp)
+OPENCV_HAL_IMPL_VSX_SHIFT_OP(v_int64x2, vec_sra, vec_udword2_sp)
+
+/** Bitwise logic **/
+#define OPENCV_HAL_IMPL_VSX_LOGIC_OP(_Tpvec) \
+OPENCV_HAL_IMPL_VSX_BIN_OP(&, _Tpvec, vec_and) \
+OPENCV_HAL_IMPL_VSX_BIN_OP(|, _Tpvec, vec_or) \
+OPENCV_HAL_IMPL_VSX_BIN_OP(^, _Tpvec, vec_xor) \
+inline _Tpvec operator ~ (const _Tpvec& a) \
+{ return _Tpvec(vec_not(a.val)); }
+
+OPENCV_HAL_IMPL_VSX_LOGIC_OP(v_uint8x16)
+OPENCV_HAL_IMPL_VSX_LOGIC_OP(v_int8x16)
+OPENCV_HAL_IMPL_VSX_LOGIC_OP(v_uint16x8)
+OPENCV_HAL_IMPL_VSX_LOGIC_OP(v_int16x8)
+OPENCV_HAL_IMPL_VSX_LOGIC_OP(v_uint32x4)
+OPENCV_HAL_IMPL_VSX_LOGIC_OP(v_int32x4)
+OPENCV_HAL_IMPL_VSX_LOGIC_OP(v_uint64x2)
+OPENCV_HAL_IMPL_VSX_LOGIC_OP(v_int64x2)
+OPENCV_HAL_IMPL_VSX_LOGIC_OP(v_float32x4)
+OPENCV_HAL_IMPL_VSX_LOGIC_OP(v_float64x2)
+
+/** Bitwise select **/
+#define OPENCV_HAL_IMPL_VSX_SELECT(_Tpvec, cast) \
+inline _Tpvec v_select(const _Tpvec& mask, const _Tpvec& a, const _Tpvec& b) \
+{ return _Tpvec(vec_sel(b.val, a.val, cast(mask.val))); }
+
+OPENCV_HAL_IMPL_VSX_SELECT(v_uint8x16, vec_bchar16_c)
+OPENCV_HAL_IMPL_VSX_SELECT(v_int8x16, vec_bchar16_c)
+OPENCV_HAL_IMPL_VSX_SELECT(v_uint16x8, vec_bshort8_c)
+OPENCV_HAL_IMPL_VSX_SELECT(v_int16x8, vec_bshort8_c)
+OPENCV_HAL_IMPL_VSX_SELECT(v_uint32x4, vec_bint4_c)
+OPENCV_HAL_IMPL_VSX_SELECT(v_int32x4, vec_bint4_c)
+OPENCV_HAL_IMPL_VSX_SELECT(v_float32x4, vec_bint4_c)
+OPENCV_HAL_IMPL_VSX_SELECT(v_float64x2, vec_bdword2_c)
+
+/** Comparison **/
+#define OPENCV_HAL_IMPL_VSX_INT_CMP_OP(_Tpvec) \
+inline _Tpvec operator == (const _Tpvec& a, const _Tpvec& b) \
+{ return _Tpvec(vec_cmpeq(a.val, b.val)); } \
+inline _Tpvec operator != (const _Tpvec& a, const _Tpvec& b) \
+{ return _Tpvec(vec_cmpne(a.val, b.val)); } \
+inline _Tpvec operator < (const _Tpvec& a, const _Tpvec& b) \
+{ return _Tpvec(vec_cmplt(a.val, b.val)); } \
+inline _Tpvec operator > (const _Tpvec& a, const _Tpvec& b) \
+{ return _Tpvec(vec_cmpgt(a.val, b.val)); } \
+inline _Tpvec operator <= (const _Tpvec& a, const _Tpvec& b) \
+{ return _Tpvec(vec_cmple(a.val, b.val)); } \
+inline _Tpvec operator >= (const _Tpvec& a, const _Tpvec& b) \
+{ return _Tpvec(vec_cmpge(a.val, b.val)); }
+
+OPENCV_HAL_IMPL_VSX_INT_CMP_OP(v_uint8x16)
+OPENCV_HAL_IMPL_VSX_INT_CMP_OP(v_int8x16)
+OPENCV_HAL_IMPL_VSX_INT_CMP_OP(v_uint16x8)
+OPENCV_HAL_IMPL_VSX_INT_CMP_OP(v_int16x8)
+OPENCV_HAL_IMPL_VSX_INT_CMP_OP(v_uint32x4)
+OPENCV_HAL_IMPL_VSX_INT_CMP_OP(v_int32x4)
+OPENCV_HAL_IMPL_VSX_INT_CMP_OP(v_float32x4)
+OPENCV_HAL_IMPL_VSX_INT_CMP_OP(v_float64x2)
+OPENCV_HAL_IMPL_VSX_INT_CMP_OP(v_uint64x2)
+OPENCV_HAL_IMPL_VSX_INT_CMP_OP(v_int64x2)
+
+/** min/max **/
+OPENCV_HAL_IMPL_VSX_BIN_FUNC(v_min, vec_min)
+OPENCV_HAL_IMPL_VSX_BIN_FUNC(v_max, vec_max)
+
+/** Rotate **/
+#define OPENCV_IMPL_VSX_ROTATE(_Tpvec, suffix, shf, cast) \
+template \
+inline _Tpvec v_rotate_##suffix(const _Tpvec& a) \
+{ \
+ const int wd = imm * sizeof(typename _Tpvec::lane_type); \
+ if (wd > 15) \
+ return _Tpvec(); \
+ return _Tpvec((cast)shf(vec_uchar16_c(a.val), vec_uchar16_sp(wd << 3))); \
+}
+
+#define OPENCV_IMPL_VSX_ROTATE_LR(_Tpvec, cast) \
+OPENCV_IMPL_VSX_ROTATE(_Tpvec, left, vec_slo, cast) \
+OPENCV_IMPL_VSX_ROTATE(_Tpvec, right, vec_sro, cast)
+
+OPENCV_IMPL_VSX_ROTATE_LR(v_uint8x16, vec_uchar16)
+OPENCV_IMPL_VSX_ROTATE_LR(v_int8x16, vec_char16)
+OPENCV_IMPL_VSX_ROTATE_LR(v_uint16x8, vec_ushort8)
+OPENCV_IMPL_VSX_ROTATE_LR(v_int16x8, vec_short8)
+OPENCV_IMPL_VSX_ROTATE_LR(v_uint32x4, vec_uint4)
+OPENCV_IMPL_VSX_ROTATE_LR(v_int32x4, vec_int4)
+OPENCV_IMPL_VSX_ROTATE_LR(v_uint64x2, vec_udword2)
+OPENCV_IMPL_VSX_ROTATE_LR(v_int64x2, vec_dword2)
+
+
+template
+inline _Tpvec v_rotate_right(const _Tpvec& a, const _Tpvec& b)
+{
+ enum { CV_SHIFT = 16 - imm * (sizeof(typename _Tpvec::lane_type)) };
+ if (CV_SHIFT == 16)
+ return a;
+#ifdef __IBMCPP__
+ return _Tpvec(vec_sld(b.val, a.val, CV_SHIFT & 15));
+#else
+ return _Tpvec(vec_sld(b.val, a.val, CV_SHIFT));
+#endif
+}
+
+template