To do: to calculate depths for the surface reconstruction
ref.
P. D. Kovesi. MATLAB and Octave Functions for Computer Vision and Image Processing.
School of Computer Science & Software Engineering,
The University of Western Australia. Available from:
<http://www.csse.uwa.edu.au/~pk/research/matlabfns/>.
/* We want to solve Mz = v in a least squares sense. The
solution is M^T M z = M^T v. We denote M^T M as A and
M^T v as b, so A z = b. */
CMatrixSparse<double> A(M.mTm());
assert(A.isSymmetric());
CVector<double> r = A*z; /* r is the "residual error" */
CVector<double> b(v*M);
// solve the equation A z = b
solveQuadratic<double>(A,b,z,300,CGEPSILON);
// copy the depths back from the vector z into the image depths
copyDepths(z,zind,depths);
template <class T>
double solveQuadratic(const CMatrixSparse<T> & A, const CVector<T> & b,
CVector<T> & x,int i_max, double epsilon)
{
//my conjugate gradient solver for .5*x'*A*x -b'*x, based on the
// tutorial by Jonathan Shewchuk (or is it +b'*x?)
////////////////////////////////////////////////////////
// PsmView::drawNeedles(): use normal information to
// draw a needle map.
//
void
PsmView::drawNeedles() const
{
const int vectorStep = (int)theUI->needleSpacing->value();
const int vectorSize = (int)(vectorStep * .9);
///////////////////////////////////////////////////////////////////////////
// PsmApp::computeShadingNormals: given newly loaded or computed
// depths, compute the "shading normals" for this depth buffer, so
// that surface renderings reflect the normals of an integrable surface
//
bool
PsmApp::computeShadingNormals()
{
assert(depths);
// check recomputed normals buffer, reallocate if necessary,
// and recompute
if (shadingNormals && (shadingNormals->Shape() != depths->Shape()))
{
delete shadingNormals;
shadingNormals = 0;
}
int width = depths->Shape().width;
int height = depths->Shape().height;
if (!shadingNormals)
shadingNormals = new NormalImage(width,height,1);
for (int y = 0; y<height; y++)
for (int x = 0; x<width; x++)
{
Vec3f &n = shadingNormals->Pixel(x,y,0);
if (mask && !mask->Pixel(x,y,0))
n = Vec3f(0,0,1);
else
{
Vec3f dx = Vec3f(1,0,depths->Pixel(x+1,y,0) - depths->Pixel(x,y,0));
Vec3f dy = Vec3f(0,1,depths->Pixel(x,y+1,0) - depths->Pixel(x,y,0));
n = dx.cross(dy).normalized();
}
}
return true;
}
Vec3 cross (const Vec3 &v) const
{ return Vec3 (y * v.z - z * v.y,
z * v.x - x * v.z,
x * v.y - y * v.x); }
void cvLine( CvArr* img, CvPoint pt1, CvPoint pt2, CvScalar color,
int thickness=1, int line_type=8, int shift=0 );
img
The image.
pt1
First point of the line segment.
pt2
Second point of the line segment.
color
Line color.
thickness
Line thickness.
line_type
Type of the line:
8 (or omitted) - 8-connected line.
4 - 4-connected line.
CV_AA - antialiased line.
shift
Number of fractional bits in the point coordinates.
The function cvLine draws the line segment between pt1 and pt2 points in the image. The line is clipped by the image or ROI rectangle. For non-antialiased lines with integer coordinates the 8-connected or 4-connected Bresenham algorithm is used. Thick lines are drawn with rounding endings. Antialiased lines are drawn using Gaussian filtering. To specify the line color, the user may use the macro CV_RGB( r, g, b ).
Albedo http://en.wikipedia.org/wiki/Albedo
In general, the albedo depends on the direction and directional distribution of incoming radiation. Exceptions are Lambertian surfaces, which scatter radiation in all directions in a cosine function, so their albedo does not depend on the incoming distribution.
typedef struct _IplImage
{
int nSize; /* sizeof(IplImage) */
int ID; /* version (=0)*/
int nChannels; /* Most of OpenCV functions support 1,2,3 or 4 channels */
int alphaChannel; /* Ignored by OpenCV */
int depth; /* Pixel depth in bits: IPL_DEPTH_8U, IPL_DEPTH_8S, IPL_DEPTH_16S,
IPL_DEPTH_32S, IPL_DEPTH_32F and IPL_DEPTH_64F are supported. */
char colorModel[4]; /* Ignored by OpenCV */
char channelSeq[4]; /* ditto */
int dataOrder; /* 0 - interleaved color channels, 1 - separate color channels.
cvCreateImage can only create interleaved images */
int origin; /* 0 - top-left origin,
1 - bottom-left origin (Windows bitmaps style). */
int align; /* Alignment of image rows (4 or 8).
OpenCV ignores it and uses widthStep instead. */
int width; /* Image width in pixels. */
int height; /* Image height in pixels. */
struct _IplROI *roi; /* Image ROI. If NULL, the whole image is selected. */
struct _IplImage *maskROI; /* Must be NULL. */
void *imageId; /* " " */
struct _IplTileInfo *tileInfo; /* " " */
int imageSize; /* Image data size in bytes
(==image->height*image->widthStep
in case of interleaved data)*/
char *imageData; /* Pointer to aligned image data. */
int widthStep; /* Size of aligned image row in bytes. */
int BorderMode[4]; /* Ignored by OpenCV. */
int BorderConst[4]; /* Ditto. */
char *imageDataOrigin; /* Pointer to very origin of image data
(not necessarily aligned) -
needed for correct deallocation */
}
IplImage;