以下是參考官方文件[1] Image Segmentation with Distance Transform and Watershed Algorithm

並將其範例封裝成C++ DLL, 由C# UI進行各項參數試驗

c++

ImageProc.h

#include <wchar.h>
#include <opencv2\highgui\highgui.hpp>
#include <opencv2\imgproc\imgproc.hpp>
#pragma once
 
using namespace std;
using namespace cv;
 
namespace CVision
{
    public ref class ImageProc
    {
 
        public:
            ImageProc(){};
            bool imread(wchar_t *filename);
            bool imshow();
            void changeBGBlack();
            void changeBGBlack(int threshBlue, int threshGreen, int threshRed);
            void sharpImage();
            void bgr2Gray(int thresh);
 
            void distTransform();
            void extractPeaks();
            void labelMarkers();
            void applyWatershed();
 
        private:
        std::string WstringToString(const std::wstring str);
        
    };
}

ImageProc.cpp

// ImageProcLib.cpp : 定義 DLL 應用程式的匯出函式。
//
 
#include "stdafx.h"
#include "ImageProcLib.h"
 
using namespace std;
using namespace cv;
 
namespace CVision
{
    string filename;
    Mat src;                    
    Mat bw;
    Mat dist;
    Mat dist_8u;
    Mat markers;
    vector<vector<Point> > contours;
 
    bool ImageProc::imread(wchar_t* txt)
    {        
        wstring ws(txt);
        
        filename = WstringToString(ws);
        src = cv::imread( filename );
        if( !src.data)
            return false;
        else
            return true;
    }
    void ImageProc::changeBGBlack()
    {
        // Change the background from white to black, since that will help later to extract
        // better results during the use of Distance Transform
        for( int x = 0; x < src.rows; x++ ) {
            for( int y = 0; y < src.cols; y++ ) {
                if ( src.at<Vec3b>(x, y) == Vec3b(255,255,255) ){
                src.at<Vec3b>(x, y)[0] = 0;
                src.at<Vec3b>(x, y)[1] = 0;
                src.at<Vec3b>(x, y)[2] = 0;
                }
            }
        }
        // Show output image
        namedWindow( "Black Background Image", CV_WINDOW_NORMAL );  
        cv::imshow("Black Background Image", src);
    }
    void ImageProc::changeBGBlack(int threshBlue, int threshGreen, int threshRed)
    {
        // Change the background from white to black, since that will help later to extract
        // better results during the use of Distance Transform
        for( int x = 0; x < src.rows; x++ ) {
            for( int y = 0; y < src.cols; y++ ) {
                //if ( src.at<Vec3b>(x, y) == Vec3b(255,255,255) )
                if(src.at<Vec3b>(x,y)[0] > (float)threshBlue &
                   src.at<Vec3b>(x,y)[1] > (float)threshGreen     &
                   src.at<Vec3b>(x,y)[2] > (float)threshRed   )
                {
                   src.at<Vec3b>(x, y)[0] = 0;
                   src.at<Vec3b>(x, y)[1] = 0;
                   src.at<Vec3b>(x, y)[2] = 0;
                }
            }
        }
        // Show output image
        namedWindow( "Black Background Image", CV_WINDOW_NORMAL );  
        cv::imshow("Black Background Image", src);
    }
    void ImageProc::sharpImage()
    {
        // Create a kernel that we will use for accuting/sharpening our image
        Mat kernel = (Mat_<float>(3,3) <<
            1,  1, 1,
            1, -8, 1,
            1,  1, 1); 
        // an approximation of second derivative, a quite strong kernel
        // do the laplacian filtering as it is
        // well, we need to convert everything in something more deeper then CV_8U
        // because the kernel has some negative values,
        // and we can expect in general to have a Laplacian image with negative values
        // BUT a 8bits unsigned int (the one we are working with) can contain values from 0 to 255
        // so the possible negative number will be truncated
        Mat imgLaplacian;
        Mat sharp = src; // copy source image to another temporary one
        filter2D(sharp, imgLaplacian, CV_32F, kernel);
        src.convertTo(sharp, CV_32F);
        Mat imgResult = sharp - imgLaplacian;
        // convert back to 8bits gray scale
       imgResult.convertTo(imgResult, CV_8UC3);
       imgLaplacian.convertTo(imgLaplacian, CV_8UC3);
       // imshow( "Laplace Filtered Image", imgLaplacian );
       namedWindow( "New Sharped Image", CV_WINDOW_NORMAL );  
       cv::imshow( "New Sharped Image", imgResult );
    }
    void ImageProc::bgr2Gray(int thresh)
    {
        // Create binary image from source image
        cvtColor(src, bw, CV_BGR2GRAY);
        threshold(bw, bw, thresh, 255, CV_THRESH_BINARY);
        //threshold(bw, bw, thresh, 255, CV_THRESH_BINARY | CV_THRESH_OTSU);
        namedWindow( "Binary Image", CV_WINDOW_NORMAL ); 
        cv::imshow("Binary Image", bw);
    }
    void ImageProc::distTransform()
    {
        // Perform the distance transform algorithm
        distanceTransform(bw, dist, CV_DIST_L2, 3);
        // Normalize the distance image for range = {0.0, 1.0}
        // so we can visualize and threshold it
        normalize(dist, dist, 0, 1., NORM_MINMAX);
        namedWindow( "Distance Transform Image", CV_WINDOW_NORMAL ); 
        cv::imshow("Distance Transform Image", dist);
    }
    void ImageProc::extractPeaks()
    {
         // Threshold to obtain the peaks
         // This will be the markers for the foreground objects
         threshold(dist, dist, .4, 1., CV_THRESH_BINARY);
         // Dilate a bit the dist image
         Mat kernel1 = Mat::ones(3, 3, CV_8UC1);
         dilate(dist, dist, kernel1);
         namedWindow( "Peaks", CV_WINDOW_NORMAL ); 
         cv::imshow("Peaks", dist);
    }
    void ImageProc::labelMarkers()
    {
         // Create the CV_8U version of the distance image
         // It is needed for findContours()
        
         dist.convertTo(dist_8u, CV_8U);
         // Find total markers
    
         findContours(dist_8u, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);
         // Create the marker image for the watershed algorithm
         markers = Mat::zeros(dist.size(), CV_32SC1);
         // Draw the foreground markers
         for (size_t i = 0; i < contours.size(); i++)
         drawContours(markers, contours, static_cast<int>(i), Scalar::all(static_cast<int>(i)+1), -1);
         // Draw the background marker
         circle(markers, Point(5,5), 3, CV_RGB(255,255,255), -1);
         namedWindow( "Markers", CV_WINDOW_NORMAL ); 
         cv::imshow("Markers", markers*10000);
    }
    void ImageProc::applyWatershed()
    {
         // Perform the watershed algorithm
         watershed(src, markers);
         Mat mark = Mat::zeros(markers.size(), CV_8UC1);
         markers.convertTo(mark, CV_8UC1);
         bitwise_not(mark, mark);
         namedWindow( "Markers_v2", CV_WINDOW_NORMAL ); 
         cv::imshow("Markers_v2", mark);   // uncomment this if you want to see how the mark
                                           // image looks like at that point
    
         // Generate random colors
         vector<Vec3b> colors;
         for (size_t i = 0; i < contours.size(); i++)
         {
            int b = theRNG().uniform(0, 255);
            int g = theRNG().uniform(0, 255);
            int r = theRNG().uniform(0, 255);
            colors.push_back(Vec3b((uchar)b, (uchar)g, (uchar)r));
         }
         // Create the result image
         Mat dst = Mat::zeros(markers.size(), CV_8UC3);
    
         // Fill labeled objects with random colors
         for (int i = 0; i < markers.rows; i++)
         {
            for (int j = 0; j < markers.cols; j++)
            {
                int index = markers.at<int>(i,j);
                if (index > 0 && index <= static_cast<int>(contours.size()))
                    dst.at<Vec3b>(i,j) = colors[index-1];
                else
                    dst.at<Vec3b>(i,j) = Vec3b(0,0,0);
            }
        }
        // Visualize the final image
        namedWindow( "Final Result", CV_WINDOW_NORMAL ); 
        cv::imshow("Final Result", dst);
 
    }
    bool ImageProc::imshow()
    {
         // Check if everything was fine
        if (!src.data)
        return -1;
        
        // Show source image
        namedWindow( "Source Image", CV_WINDOW_NORMAL );  
        cv::imshow("Source Image", src);
        return 0;
    }
    std::string ImageProc::WstringToString(const std::wstring str)
    {   
        unsigned len = str.size() * 4;
        setlocale(LC_CTYPE, "cht");
        char *p = new char[len];
        wcstombs(p,str.c_str(),len);
        std::string str1(p);
        delete[] p;
        return str1;
    }
};

C# UI

image

using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.Linq;
using System.Text;
using System.Windows.Forms;
 
using CVision;
namespace ImageProcLibClient
{
    public partial class Form1 : Form
    {
        ImageProc ip = new ImageProc();
        public Form1()
        {
            InitializeComponent();
        }
 
        private void openFileTspMenu_Click(object sender, EventArgs e)
        {
            int thresh     = Convert.ToInt16(numericUpDown1.Value);
            int threshBlue = Convert.ToInt16(numericUpDown2.Value);
            int threshGreen = Convert.ToInt16(numericUpDown3.Value);
            int threshRed = Convert.ToInt16(numericUpDown4.Value);
 
            using (OpenFileDialog ofd = new OpenFileDialog())
            {
                if (ofd.ShowDialog() == System.Windows.Forms.DialogResult.OK)
                {
                    unsafe
                    {
                        fixed (char* filename = ofd.FileName)
                        {
                            ip.imread(filename);
                            ip.imshow();
                            ip.changeBGBlack(threshBlue, threshGreen, threshRed);
                            ip.sharpImage();
                            ip.bgr2Gray(thresh);
                            ip.distTransform();
                            ip.extractPeaks();
                            ip.labelMarkers();
                            ip.applyWatershed();
                        }
                    }
 
                }
            }
        }
 
        private void numericUpDown1_ValueChanged(object sender, EventArgs e)
        {
            UpdateResult();
        }
 
        private void UpdateResult()
        {
            int thresh = Convert.ToInt16(numericUpDown1.Value);
            int threshBlue = Convert.ToInt16(numericUpDown2.Value);
            int threshGreen = Convert.ToInt16(numericUpDown3.Value);
            int threshRed = Convert.ToInt16(numericUpDown4.Value);
            ip.changeBGBlack(threshBlue, threshGreen, threshRed);
            ip.sharpImage();
            ip.bgr2Gray(thresh);
            ip.distTransform();
            ip.extractPeaks();
            ip.labelMarkers();
            ip.applyWatershed();
        }
 
        private void numericUpDown2_ValueChanged(object sender, EventArgs e)
        {
            UpdateResult();
        }
 
        private void numericUpDown3_ValueChanged(object sender, EventArgs e)
        {
            UpdateResult();
        }
 
        private void numericUpDown4_ValueChanged(object sender, EventArgs e)
        {
            UpdateResult();
        }
    }
}

=================================================================

封裝過程

1. Load the source image and check if it is loaded without any problem, then show it:

// Load the image
Mat src = imread(argv[1]);
 
// Check if everything was fine
if (!src.data)
    return -1;
 
// Show source image
imshow("Source Image", src);

我拆成兩個, 分別是imread和imshow

imread: 輸入檔案名稱,讀入一張照片

bool ImageProc::imread(wchar_t* txt)
{        
    wstring ws(txt);
    
    filename = WstringToString(ws);
    src = cv::imread( filename );
    if( !src.data)
        return false;
    else
        return true;
}

imshow: 顯示來源影像

bool ImageProc::imshow()
{
     // Check if everything was fine
    if (!src.data)
    return -1;
    
    // Show source image
    namedWindow( "Source Image", CV_WINDOW_NORMAL );  
    cv::imshow("Source Image", src);
    return 0;
}

2. Then if we have an image with white background, it is good to tranform it black. This will help us to desciminate the foreground objects easier when we will apply the Distance Transform:

// Change the background from white to black, since that will help later to extract
  // better results during the use of Distance Transform
  for( int x = 0; x < src.rows; x++ ) {
    for( int y = 0; y < src.cols; y++ ) {
        if ( src.at<Vec3b>(x, y) == Vec3b(255,255,255) ) {
          src.at<Vec3b>(x, y)[0] = 0;
          src.at<Vec3b>(x, y)[1] = 0;
          src.at<Vec3b>(x, y)[2] = 0;
        }
      }
  }
 
  // Show output image
  imshow("Black Background Image", src);

這個封裝為changeBGBlack()方法

void ImageProc::changeBGBlack()
{
    // Change the background from white to black, since that will help later to extract
    // better results during the use of Distance Transform
    for( int x = 0; x < src.rows; x++ ) {
        for( int y = 0; y < src.cols; y++ ) {
            if ( src.at<Vec3b>(x, y) == Vec3b(255,255,255) ){
            src.at<Vec3b>(x, y)[0] = 0;
            src.at<Vec3b>(x, y)[1] = 0;
            src.at<Vec3b>(x, y)[2] = 0;
            }
        }
    }
    // Show output image
    namedWindow( "Black Background Image", CV_WINDOW_NORMAL );  
    cv::imshow("Black Background Image", src);
}

由於本範例沒有附上原始圖, 我只號螢幕擷取翻拍測試影像, 所以完全沒有背景

(B, G, R) = (255, 255, 255)

image

因此如果門檻改為(B, G, R) = (254, 254, 254)

image

如果門檻改為(B, G, R) = (245, 245, 245) , 效果差強人意

image

如果門檻改為(B, G, R) = (245, 245, 245), 門檻過低, 撲克牌也開始被大量侵蝕

image

我改成大於一定門檻就切成黑色背景,

changeBGBlack()修改後版本如下:

void ImageProc::changeBGBlack(int threshBlue, int threshGreen, int threshRed)
{
    // Change the background from white to black, since that will help later to extract
    // better results during the use of Distance Transform
    for( int x = 0; x < src.rows; x++ ) {
        for( int y = 0; y < src.cols; y++ ) {
            //if ( src.at<Vec3b>(x, y) == Vec3b(255,255,255) )
            if(src.at<Vec3b>(x,y)[0] > (float)threshBlue &
               src.at<Vec3b>(x,y)[1] > (float)threshGreen     &
               src.at<Vec3b>(x,y)[2] > (float)threshRed   )
            {
               src.at<Vec3b>(x, y)[0] = 0;
               src.at<Vec3b>(x, y)[1] = 0;
               src.at<Vec3b>(x, y)[2] = 0;
            }
        }
    }
    // Show output image
    namedWindow( "Black Background Image", CV_WINDOW_NORMAL );  
    cv::imshow("Black Background Image", src);
}

3. Afterwards we will sharp our image in order to acute the edges of the foreground objects. We will apply a laplacian filter with a quite strong filter (an approximation of second derivative):

// Create a kernel that we will use for accuting/sharpening our image
    Mat kernel = (Mat_<float>(3,3) <<
            1,  1, 1,
            1, -8, 1,
            1,  1, 1); // an approximation of second derivative, a quite strong kernel
 
    // do the laplacian filtering as it is
    // well, we need to convert everything in something more deeper then CV_8U
    // because the kernel has some negative values,
    // and we can expect in general to have a Laplacian image with negative values
    // BUT a 8bits unsigned int (the one we are working with) can contain values from 0 to 255
    // so the possible negative number will be truncated
    Mat imgLaplacian;
    Mat sharp = src; // copy source image to another temporary one
    filter2D(sharp, imgLaplacian, CV_32F, kernel);
    src.convertTo(sharp, CV_32F);
    Mat imgResult = sharp - imgLaplacian;
 
    // convert back to 8bits gray scale
    imgResult.convertTo(imgResult, CV_8UC3);
    imgLaplacian.convertTo(imgLaplacian, CV_8UC3);
 
    // imshow( "Laplace Filtered Image", imgLaplacian );
    imshow( "New Sharped Image", imgResult );




cv::Mat_< _Tp > Class Template Reference

Template matrix class derived from Mat.

例如:

Mat_<float>(3,3):   3x3 float 精準度

Mat_<int>(3,3)       3x3 int 精準度

利用<<運算子初始化 kernel     

Mat kernel = (Mat_<float>(3,3) <<
             1,  1, 1,
             1, -8, 1,
             1,  1, 1);

----------------------------------------------------------------------------------------------------------

C++: void filter2D(InputArray src, OutputArray dst, int ddepth, InputArray kernel, Point anchor=Point(-1,-1), double delta=0, intborderType=BORDER_DEFAULT )
Python: cv2.filter2D(src, ddepth, kernel[, dst[, anchor[, delta[, borderType]]]]) → dst
C: void cvFilter2D(const CvArr* src, CvArr* dst, const CvMat* kernel, CvPoint anchor=cvPoint(-1,-1) )
Python: cv.Filter2D(src, dst, kernel, anchor=(-1, -1)) → None

Parameters:

  • src – input image.
  • dst – output image of the same size and the same number of channels as src.
  • ddepth
    desired depth of the destination image;
    if it is negative, it will be the same as src.depth(); the following combinations of src.depth() and ddepth are supported:
    目標精準度>= 來源精準度
    • src.depth() = CV_8U, ddepth = -1/CV_16S/CV_32F/CV_64F
    • src.depth() = CV_16U/CV_16S, ddepth = -1/CV_32F/CV_64F
    • src.depth() = CV_32F, ddepth = -1/CV_32F/CV_64F
    • src.depth() = CV_64F, ddepth = -1/CV_64F

----------------------------------------------------------------------------------------------------------

1. 先複製來源影像 sharp = src;

2. 二維濾波 filter2D

filter2D(來源, 目標, 想要輸出精準度, kernel);

Mat sharp = src; // copy source image to another temporary one
filter2D(sharp, imgLaplacian, CV_32F, kernel);

將原始影像src背景白色變黑色

image

所以得到一張Laplacian Image,如下:

image

接著將原始影像 – Laplacian影像

image

所以sharp(src)與imgLaplacian相減前也要轉到32F精準度, 利用convertTo()

src.convertTo(sharp, CV_32F);
Mat imgResult = sharp - imgLaplacian;

---------------------------------------------------------------------------------------------------------------------------

註:

Mat::convertTo

Converts an array to another data type with optional scaling.

C++: void Mat::convertTo(OutputArray m, int rtype, double alpha=1, double beta=0 ) const

Parameters:

  • moutput matrix; if it does not have a proper size or type before the operation, it is reallocated.
  • rtypedesired output matrix type or, rather, the depth since the number of channels are the same as the input has; if rtype is negative, the output matrix will have the same type as the input.
  • alpha – optional scale factor.
  • beta – optional delta added to the scaled values.

The method converts source pixel values to the target data type. saturate_cast<> is applied at the end to avoid possible overflows:

m(x,y) = saturate \_ cast&lt;rType&gt;( \alpha (*this)(x,y) +  \beta )

---------------------------------------------------------------------------------------------------------------------------

以下是原始demo的影像結果, 由左至右1.Source Image 2. Laplacian Image 3. Sharpened Image

black_bg.jpeglaplace.jpegsharp.jpeg

--------------------------------------------------------------------------------------------------------------------------------

4. Now we tranfrom our new sharped source image to a grayscale and a binary one, respectively:

// Create binary image from source image
 
Mat bw;
 
cvtColor(src, bw, CV_BGR2GRAY);
 
threshold(bw, bw, 40, 255, CV_THRESH_BINARY | CV_THRESH_OTSU);
 
imshow("Binary Image", bw);

將sharpened image進行二值化

原本的demo有錯誤, 這邊我改成針對imgResult, 而不是原始影像(src with black background)

新增公開函式 bgr2Gray()

void ImageProc::bgr2Gray(int thresh)
{
    // Create binary image from source image
    cvtColor(imgResult, bw, CV_BGR2GRAY);
    //cvtColor(src, bw, CV_BGR2GRAY);
    threshold(bw, bw, thresh, 255, CV_THRESH_BINARY);
    //threshold(bw, bw, thresh, 255, CV_THRESH_BINARY | CV_THRESH_OTSU);
    namedWindow( "Binary Image", CV_WINDOW_NORMAL ); 
    cv::imshow("Binary Image", bw);
}

image

5. We are ready now to apply the Distance Tranform on the binary image. Moreover, we normalize the output image in order to be able visualize and threshold the result:

// Perform the distance transform algorithm
   Mat dist;
   distanceTransform(bw, dist, CV_DIST_L2, 3);
 
// Normalize the distance image for range = {0.0, 1.0}
// so we can visualize and threshold it
   normalize(dist, dist, 0, 1., NORM_MINMAX);
   imshow("Distance Transform Image", dist);

新增公開函式distTransform(), 得到一張distance transform image

void ImageProc::distTransform()
{
    // Perform the distance transform algorithm
    distanceTransform(bw, dist, CV_DIST_L2, 3);
    // Normalize the distance image for range = {0.0, 1.0}
    // so we can visualize and threshold it
    normalize(dist, dist, 0, 1., NORM_MINMAX);
    namedWindow( "Distance Transform Image", CV_WINDOW_NORMAL ); 
    cv::imshow("Distance Transform Image", dist);
}

image

6. We threshold the dist image and then perform some morphology operation (i.e. dilation) in order to extract the peaks from the above image:

// Threshold to obtain the peaks
    // This will be the markers for the foreground objects
    threshold(dist, dist, .4, 1., CV_THRESH_BINARY);
 
    // Dilate a bit the dist image
    Mat kernel1 = Mat::ones(3, 3, CV_8UC1);
    dilate(dist, dist, kernel1);
    imshow("Peaks", dist);

這邊將上面門檻值.4開放出來, 改由C# UI傳入threshDist,

此外, 將kernelSize也順便拉出來, 方便測試

修改如下

void ImageProc::extractPeaks(float threshDist, int kernelSize)
{
     // Threshold to obtain the peaks
     // This will be the markers for the foreground objects
     threshold(dist, dist, threshDist, 1., CV_THRESH_BINARY);
     // Dilate a bit the dist image
     Mat kernel1 = Mat::ones(kernelSize, kernelSize, CV_8UC1);
     dilate(dist, dist, kernel1);
     namedWindow( "Peaks", CV_WINDOW_NORMAL ); 
     cv::imshow("Peaks", dist);
}

C# UI

image

image

7.From each blob then we create a seed/marker for the watershed algorithm with the help of the cv::findContours function:

// Create the CV_8U version of the distance image
   // It is needed for findContours()
   Mat dist_8u;
   dist.convertTo(dist_8u, CV_8U);
 
   // Find total markers
   vector<vector<Point> > contours;
   findContours(dist_8u, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);
 
   // Create the marker image for the watershed algorithm
   Mat markers = Mat::zeros(dist.size(), CV_32SC1);
 
   // Draw the foreground markers
   for (size_t i = 0; i < contours.size(); i++)
       drawContours(markers, contours, static_cast<int>(i), Scalar::all(static_cast<int>(i)+1), -1);
 
   // Draw the background marker
   circle(markers, Point(5,5), 3, CV_RGB(255,255,255), -1);
   imshow("Markers", markers*10000);

1. 產生一張全黑的markers, 尺寸為dist_8u 大小

2. 搜尋全部封閉區域: findContours

3. drawContours將每一個封閉區域貼上不同標籤, 標籤編號從1~contours.size()

4. 最後以(5,5)為圓心, 半徑=3, 貼上背景(255,255, 255)

注意: 下圖左上角即背景marker, 其餘為每一張撲克牌的markers(標籤編號為 1~contours.size())

image

-------------------------------------------------------------------------------------------------------------------------

findContours

Finds contours in a binary image.

C++: void findContours(InputOutputArray image, OutputArrayOfArrays contours, OutputArray hierarchy, int mode, int method, Point offset=Point())
C++: void findContours(InputOutputArray image, OutputArrayOfArrays contours, int mode, int method, Point offset=Point())
Python: cv2.findContours(image, mode, method[, contours[, hierarchy[, offset]]]) → contours, hierarchy
C: int cvFindContours(CvArr* image, CvMemStorage* storage, CvSeq** first_contour, int header_size=sizeof(CvContour), int mode=CV_RETR_LIST, int method=CV_CHAIN_APPROX_SIMPLE, CvPointoffset=cvPoint(0,0) )
Python: cv.FindContours(image, storage, mode=CV_RETR_LIST, method=CV_CHAIN_APPROX_SIMPLE, offset=(0, 0)) → contours

Parameters:

  • image – Source, an 8-bit single-channel image. Non-zero pixels are treated as 1’s. Zero pixels remain 0’s, so the image is treated as binary . You can use compare() , inRange() , threshold() ,adaptiveThreshold() , Canny() , and others to create a binary image out of a grayscale or color one. The function modifies the image while extracting the contours. If mode equals to CV_RETR_CCOMPor CV_RETR_FLOODFILL, the input can also be a 32-bit integer image of labels (CV_32SC1).
  • contours – Detected contours. Each contour is stored as a vector of points.
  • hierarchy – Optional output vector, containing information about the image topology. It has as many elements as the number of contours. For each i-th contour contours[i] , the elementshierarchy[i][0] , hiearchy[i][1] , hiearchy[i][2] , and hiearchy[i][3] are set to 0-based indices in contours of the next and previous contours at the same hierarchical level, the first child contour and the parent contour, respectively. If for the contour i there are no next, previous, parent, or nested contours, the corresponding elements of hierarchy[i] will be negative.
  • mode

    Contour retrieval mode (if you use Python see also a note below).

    • CV_RETR_EXTERNAL retrieves only the extreme outer contours. It sets hierarchy[i][2]=hierarchy[i][3]=-1 for all the contours.
    • CV_RETR_LIST retrieves all of the contours without establishing any hierarchical relationships.
    • CV_RETR_CCOMP retrieves all of the contours and organizes them into a two-level hierarchy. At the top level, there are external boundaries of the components. At the second level, there are boundaries of the holes. If there is another contour inside a hole of a connected component, it is still put at the top level.
    • CV_RETR_TREE retrieves all of the contours and reconstructs a full hierarchy of nested contours. This full hierarchy is built and shown in the OpenCV contours.c demo.
  • method

    Contour approximation method (if you use Python see also a note below).

    • CV_CHAIN_APPROX_NONE stores absolutely all the contour points. That is, any 2 subsequent points (x1,y1) and (x2,y2) of the contour will be either horizontal, vertical or diagonal neighbors, that is, max(abs(x1-x2),abs(y2-y1))==1.
    • CV_CHAIN_APPROX_SIMPLE compresses horizontal, vertical, and diagonal segments and leaves only their end points. For example, an up-right rectangular contour is encoded with 4 points.
    • CV_CHAIN_APPROX_TC89_L1,CV_CHAIN_APPROX_TC89_KCOS applies one of the flavors of the Teh-Chin chain approximation algorithm. See [TehChin89] for details.
  • offset – Optional offset by which every contour point is shifted. This is useful if the contours are extracted from the image ROI and then they should be analyzed in the whole image context.

-------------------------------------------------------------------------------------------------------------------------

drawContours

Draws contours outlines or filled contours.

C++: void drawContours(InputOutputArray image, InputArrayOfArrays contours, int contourIdx, const Scalar& color, int thickness=1, int lineType=8, InputArray hierarchy=noArray(), int maxLevel=INT_MAX, Pointoffset=Point() )
Python: cv2.drawContours(image, contours, contourIdx, color[, thickness[, lineType[, hierarchy[, maxLevel[, offset]]]]]) → None
C: void cvDrawContours(CvArr* img, CvSeq* contour, CvScalar externalColor, CvScalar holeColor, int maxLevel, int thickness=1, int lineType=8 )
Python: cv.DrawContours(img, contour, external_color, hole_color, max_level, thickness=1, lineType=8, offset=(0, 0)) → None

Parameters:

  • image – Destination image.
  • contours – All the input contours. Each contour is stored as a point vector.
  • contourIdx – Parameter indicating a contour to draw. If it is negative, all the contours are drawn.
  • color – Color of the contours.
  • thickness – Thickness of lines the contours are drawn with. If it is negative (for example, thickness=CV_FILLED ), the contour interiors are drawn.
  • lineType – Line connectivity. See line() for details.
  • hierarchy – Optional information about hierarchy. It is only needed if you want to draw only some of the contours (see maxLevel ).
  • maxLevel – Maximal level for drawn contours. If it is 0, only the specified contour is drawn. If it is 1, the function draws the contour(s) and all the nested contours. If it is 2, the function draws the contours, all the nested contours, all the nested-to-nested contours, and so on. This parameter is only taken into account when there is hierarchy available.
  • offset – Optional contour shift parameter. Shift all the drawn contours by the specified \texttt{offset}=(dx,dy) .
  • contour – Pointer to the first contour.
  • externalColor – Color of external contours.
  • holeColor – Color of internal contours (holes).

-------------------------------------------------------------------------------------------------------------------------

//! returns a scalar with all elements set to v0

static Scalar_<_Tp> all(_Tp v0);

drawContours(markers, contours, static_cast<int>(i), Scalar::all(static_cast<int>(i)+1), -1);

-------------------------------------------------------------------------------------------------------------------------

8. Finally, we can apply the watershed algorithm, and visualize the result:

1. 宣告一個vector<Vec3b>資料型態, 方便堆疊顏色

2. b,g, r介於 0~255亂數

3. 結果影像 dst: 初始化 全彩黑色的影像 Mat::zeros(markers.size(), CV_8UC3);

4. 取得像素(i, j)對應的標籤索引: markers.at<int>(i, j);

5. 如果標籤索引值>0 且小於 countours.size()

則貼上colors[index-1]

否則貼上黑色背景Vec3b(0,0,0)

// Perform the watershed algorithm
    watershed(src, markers);
 
    Mat mark = Mat::zeros(markers.size(), CV_8UC1);
    markers.convertTo(mark, CV_8UC1);
    bitwise_not(mark, mark);
//    imshow("Markers_v2", mark); // uncomment this if you want to see how the mark
                                  // image looks like at that point
 
    // Generate random colors
    vector<Vec3b> colors;
    for (size_t i = 0; i < contours.size(); i++)
    {
        int b = theRNG().uniform(0, 255);
        int g = theRNG().uniform(0, 255);
        int r = theRNG().uniform(0, 255);
 
        colors.push_back(Vec3b((uchar)b, (uchar)g, (uchar)r));
    }
 
    // Create the result image
    Mat dst = Mat::zeros(markers.size(), CV_8UC3);
 
    // Fill labeled objects with random colors
    for (int i = 0; i < markers.rows; i++)
    {
        for (int j = 0; j < markers.cols; j++)
        {
            int index = markers.at<int>(i,j);
            if (index > 0 && index <= static_cast<int>(contours.size()))
                dst.at<Vec3b>(i,j) = colors[index-1];
            else
                dst.at<Vec3b>(i,j) = Vec3b(0,0,0);
        }
    }
 
    // Visualize the final image
    imshow("Final Result", dst);


watershed(src, markers);

Mat mark = Mat::zeros(markers.size(), CV_8UC1);
markers.convertTo(mark, CV_8UC1);

image

bitwise_not(mark, mark);

image

-------------------------------------------------------------------------------------------------------------------------------

theRNG

Returns the default random number generator.

C++: RNG& theRNG()

The function theRNG returns the default random number generator. For each thread, there is a separate random number generator, so you can use the function safely in multi-thread environments. If you just need to get a single random number using this generator or initialize an array, you can use randu() or randn() instead. But if you are going to generate many random numbers inside a loop, it is much faster to use this function to retrieve the generator and then use RNG::operator _Tp() .

See also

RNG, randu(), randn()

-------------------------------------------------------------------------------------------------------------------------------


image

這是原始Demo的結果

final.jpeg

image


--------------------------------------------------------------------------------------------------------------------------------




參考資料

1. Image Segmentation with Distance Transform and Watershed Algorithm

2. 利用C# 拉UI並傳參數給OpenCV C++ DLL

3. OpenCV2.4.12 Image Filtering

4. cv::Mat_< _Tp > Class Template Reference

5. 標準C++的類型轉換:static_cast、dynamic_cast

arrow
arrow
    全站熱搜
    創作者介紹
    創作者 me1237guy 的頭像
    me1237guy

    天天向上

    me1237guy 發表在 痞客邦 留言(0) 人氣()