close
    最近在研究這篇Edge Based Template Matching, 以下為研究心得筆記
      原作者提供C++版本的實作(OpenCV 2.0 and Visual studio 2008 ), 搭配數學公式說明, 讓人很容易理解
        這個方法(Feature based approach)相較於傳統(Gray value based approach)強健許多,
          尤其當搜尋物件非完整顯示時,可以準確搜. 但計算量超大, 不適合大張圖像樣板比對
          目前我已經成功改寫成C# Emgu CV版本, 之後考慮改以GPU版本加速, 以利實際應用

下面三張圖簡單說明本範例應用情形:

第一張圖(左下角)為template image(樣板影像)

第二張圖source image或稱search image, 影像中包含一張樣板影像(有部分和其他物件重疊)

第三張圖表達的是背景亮度變化時, 依然可以找到樣板影像

Gray value based approach

the Normalized Cross Correlation (NCC) algorithm

image


Feature based approach

Step 1: Find the intensity gradient of the image

image

gx: 為整張灰階影像x方向梯度變化

gy: 為整張灰階影像y方向梯度變化

MagG: 某一點的梯度長度

direction: 某一點梯度方向(分成0, 45, 90, 135度, 共四種梯度方向)

magMat[i][j]:每一點的梯度長度

MaxGradient: 整張影像最大梯度長度

orients[count]: 紀錄每個梯度方向

cvSobel( src, gx, 1,0, 3 ); //gradient in X direction
cvSobel( src, gy, 0, 1, 3 ); //gradient in Y direction

for( i = 1; i < Ssize.height-1; i++ )
{
for( j = 1; j < Ssize.width-1; j++ )
{
_sdx = (short*)(gx->data.ptr + gx->step*i);
_sdy = (short*)(gy->data.ptr + gy->step*i);
fdx = _sdx[j]; fdy = _sdy[j];
// read x, y derivatives

//Magnitude = Sqrt(gx^2 +gy^2)
MagG = sqrt((float)(fdx*fdx) + (float)(fdy*fdy));
//Direction = invtan (Gy / Gx)
direction =cvFastArctan((float)fdy,(float)fdx);
magMat[i][j] = MagG;

if(MagG>MaxGradient)
MaxGradient=MagG;
// get maximum gradient value for normalizing.


// get closest angle from 0, 45, 90, 135 set
if ( (direction>0 && direction < 22.5) ||
(direction >157.5 && direction < 202.5) ||
(direction>337.5 && direction<360) )
direction = 0;
else if ( (direction>22.5 && direction < 67.5) ||
(direction >202.5 && direction <247.5) )
direction = 45;
else if ( (direction >67.5 && direction < 112.5)||
(direction>247.5 && direction<292.5) )
direction = 90;
else if ( (direction >112.5 && direction < 157.5)||
(direction>292.5 && direction<337.5) )
direction = 135;
else
direction = 0;

orients[count] = (int)direction;
count++;
}
}

梯度方向(分成0, 45, 90, 135度, 共四種梯度方向)

image

Step 2: Apply non-maximum suppression

掃描整張影像 目前像素的梯度長度(magMat[i][j])與鄰近點的梯度長度比較,

此外, 依據目前像素的方向orients[count], 決定選擇鄰近點的候選人(leftPixel和rightPixel)

比較結果兩種可能

1. 目前像素的長度均小於鄰近點的梯度長度, 則numEdges[i, j] = 0;

2. 否則, numEdges[i, j] = magMat[i,j]/MaxGradient*255;   <----- 正規化至0~255(灰階強度)

for( i = 1; i < Ssize.height-1; i++ )
{
for( j = 1; j < Ssize.width-1; j++ )
{
switch ( orients[count] )
{
case 0:
leftPixel = magMat[i][j-1];
rightPixel = magMat[i][j+1];
break;
case 45:
leftPixel = magMat[i-1][j+1];
rightPixel = magMat[i+1][j-1];
break;
case 90:
leftPixel = magMat[i-1][j];
rightPixel = magMat[i+1][j];
break;
case 135:
leftPixel = magMat[i-1][j-1];
rightPixel = magMat[i+1][j+1];
break;
}
// compare current pixels value with adjacent pixels
if (( magMat[i][j] < leftPixel ) || (magMat[i][j] < rightPixel ) )
(nmsEdges->data.ptr + nmsEdges->step*i)[j]=0;
Else
(nmsEdges->data.ptr + nmsEdges->step*i)[j]=
(uchar)(magMat[i][j]/MaxGradient*255);
count++;
}
}

Step 3: Do hysteresis threshold

如果 numEdges[i, j] < maxContrast (灰階強度上限)


{

如果 numEdges[i, j] < minConstrast (灰階強度下限)

{

numEdges[i,j] =0;


flag = 0;

}else{

如果 numEdges[i,j]鄰近8個像素強度均小於maxContrast

{

numEdges[i,j] =0;


flag = 0;

}


}

}

_sdx = (short*)(gx->data.ptr + gx->step*i);
_sdy = (short*)(gy->data.ptr + gy->step*i);
fdx = _sdx[j]; fdy = _sdy[j];

MagG = sqrt(fdx*fdx + fdy*fdy); //Magnitude = Sqrt(gx^2 +gy^2)
DirG =cvFastArctan((float)fdy,(float)fdx); //Direction = tan(y/x)

////((uchar*)(imgGDir->imageData + imgGDir->widthStep*i))[j]= MagG;
flag=1;
if(((double)((nmsEdges->data.ptr + nmsEdges->step*i))[j]) < maxContrast)
{
if(((double)((nmsEdges->data.ptr + nmsEdges->step*i))[j])< minContrast)
{
(nmsEdges->data.ptr + nmsEdges->step*i)[j]=0;
flag=0; // remove from edge
////((uchar*)(imgGDir->imageData + imgGDir->widthStep*i))[j]=0;
}
else
{ // if any of 8 neighboring pixel is not greater than max contraxt remove from edge
if( (((double)((nmsEdges->data.ptr + nmsEdges->step*(i-1)))[j-1]) < maxContrast) &&
(((double)((nmsEdges->data.ptr + nmsEdges->step*(i-1)))[j]) < maxContrast) &&
(((double)((nmsEdges->data.ptr + nmsEdges->step*(i-1)))[j+1]) < maxContrast) &&
(((double)((nmsEdges->data.ptr + nmsEdges->step*i))[j-1]) < maxContrast) &&
(((double)((nmsEdges->data.ptr + nmsEdges->step*i))[j+1]) < maxContrast) &&
(((double)((nmsEdges->data.ptr + nmsEdges->step*(i+1)))[j-1]) < maxContrast) &&
(((double)((nmsEdges->data.ptr + nmsEdges->step*(i+1)))[j]) < maxContrast) &&
(((double)((nmsEdges->data.ptr + nmsEdges->step*(i+1)))[j+1]) < maxContrast))
{
(nmsEdges->data.ptr + nmsEdges->step*i)[j]=0;
flag=0;
////((uchar*)(imgGDir->imageData + imgGDir->widthStep*i))[j]=0;
}
}
}

  • Step 4: Save the data set

目前為止, 邊緣點所在的位置(X,Y)即樣板模型

在此, 原作者定義curX和curY和傳統慣用的X-Y相反, curX紀錄列座標, curY紀錄行座標

cordinates[noOfCordinates].x = curX;              <---curX紀錄邊緣所在的列座標

cordinates[noOfCordinates].y = curY;              <---curY紀錄邊緣所在的行座標

edgeDerivativeX[noOfCordinates] = gx[i, j];     <---對應的gx水平方向梯度

edgeDerivativeY[noOfCordinates] = gy[i, j];     <---對應的gy垂直方向梯度

edgeMagnitude[noOfCordinates] = 1/MagG;     <---對應梯度長度的倒數

    // save selected edge information
    curX=i; curY=j;
    if(flag!=0)
    {
    if(fdx!=0 || fdy!=0)
    {
    RSum=RSum+curX; CSum=CSum+curY; // Row sum and column sum for center of gravity

    cordinates[noOfCordinates].x = curX;
    cordinates[noOfCordinates].y = curY;
    edgeDerivativeX[noOfCordinates] = fdx;
    edgeDerivativeY[noOfCordinates] = fdy;

    //handle divide by zero
    if(MagG!=0)
    edgeMagnitude[noOfCordinates] = 1/MagG; // gradient magnitude
    else
    edgeMagnitude[noOfCordinates] = 0;

    noOfCordinates++;
    }
    }

將codinates座標點的重心移到座標原點, 即原始座標(x,y)-重心座標(cx,cy)

01. centerOfGravity.x = RSum /noOfCordinates; // center of gravity  
02. centerOfGravity.y = CSum/noOfCordinates ; // center of gravity
03.
04. // change coordinates to reflect center of gravity
05. for(int m=0;m<noOfCordinates ;m++)
06. {
07. int temp; temp=cordinates[m].x;
08. cordinates[m].x=temp-centerOfGravity.x;
09. temp=cordinates[m].y;
10. cordinates[m].y =temp-centerOfGravity.y;
11. }

Find the edge based template model

image 樣板影像梯度向量 A

image 搜尋影像梯度向量 B

Simularity = Dot(A, B)/|A||B|,     其值介於0(完全不吻合)~1(完全吻合)

image

01.    cvSobel( src, Sdx, 1, 0, 3 );  // find X derivatives  
02. cvSobel( src, Sdy, 0, 1, 3 ); // find Y derivatives
03.
04. // stoping criterias to search for model
05. double normMinScore = minScore /noOfCordinates; // precompute minumum score
06. double normGreediness = ((1- greediness * minScore)/(1-greediness)) /noOfCordinates; // precompute greedniness
07.
08. for( i = 0; i < Ssize.height; i++ )
09. {
10. _Sdx = (short*)(Sdx->data.ptr + Sdx->step*(i));
11. _Sdy = (short*)(Sdy->data.ptr + Sdy->step*(i));
12.
13. for( j = 0; j < Ssize.width; j++ )
14. {
15. iSx=_Sdx[j]; // X derivative of Source image
16. iSy=_Sdy[j]; // Y derivative of Source image
17.
18. gradMag=sqrt((iSx*iSx)+(iSy*iSy)); //Magnitude = Sqrt(dx^2 +dy^2)
19.
20. if(gradMag!=0) // hande divide by zero
21. matGradMag[i][j]=1/gradMag; // 1/Sqrt(dx^2 +dy^2)
22. else
23. matGradMag[i][j]=0;
24.
25. }
26. }
27. for( i = 0; i < Ssize.height; i++ )
28. {
29. for( j = 0; j < Ssize.width; j++ )
30. {
31. partialSum = 0; // initilize partialSum measure
32. for(m=0;m<noOfCordinates;m++)
33. {
34. curX = i + cordinates[m].x ; // template X coordinate
35. curY = j + cordinates[m].y ; // template Y coordinate
36. iTx = edgeDerivativeX[m]; // template X derivative
37. iTy = edgeDerivativeY[m]; // template Y derivative
38.
39. if(curX<0 ||curY<0||curX>Ssize.height-1 ||curY>Ssize.width-1)
40. continue;
41.
42. _Sdx = (short*)(Sdx->data.ptr + Sdx->step*(curX));
43. _Sdy = (short*)(Sdy->data.ptr + Sdy->step*(curX));
44.
45. iSx=_Sdx[curY]; // get curresponding X derivative from source image
46. iSy=_Sdy[curY];// get curresponding Y derivative from source image
47.
48. if((iSx!=0 || iSy!=0) && (iTx!=0 || iTy!=0))
49. {
50. //partial Sum = Sum of(((Source X derivative* Template X drivative) + Source Y derivative * Template Y derivative)) / Edge magnitude of(Template)* edge magnitude of(Source))
51. partialSum = partialSum + ((iSx*iTx)+(iSy*iTy))*(edgeMagnitude[m] * matGradMag[curX][curY]);
52.
53. }
54.
55. sumOfCoords = m + 1;
56. partialScore = partialSum /sumOfCoords ;
57. // check termination criteria
58. // if partial score score is less than the score than needed to make the required score at that position
59. // break serching at that coordinate.
60. if( partialScore < (MIN((minScore -1) + normGreediness*sumOfCoords,normMinScore* sumOfCoords)))
61. break;
62.
63. }
64. if(partialScore > resultScore)
65. {
66. resultScore = partialScore; // Match score
67. resultPoint->x = i; // result coordinate X
68. resultPoint->y = j; // result coordinate Y
69. }
70. }
71. }



X derivative

image

Y derivative

image

Magnitude

image

Non maximum supression

image

Template with contour

image

Searching result

image

==========================================

X derivative

image

Y derivative

image

Magnitude

image

Non maximum supression

image

Template with contour

image

Searching result

image





參考資料

arrow
arrow
    全站熱搜

    me1237guy 發表在 痞客邦 留言(2) 人氣()