The function cvRetrieveFrame() returns the pointer to the image grabbed with the GrabFrame function. The returned image should not be released or modified by the user. In the event of an error, the return value may be NULL.
Canny edge detection
Canny edge detection in OpenCV image processing functions
Implements the Canny algorithm for edge detection.
Parameters:
image – Single-channel input image
edges – Single-channel image to store the edges found by the function
threshold1 – The first threshold
threshold2 – The second threshold
aperture_size – Aperture parameter for the Sobel operator (see cvSobel())
The function cvCanny() finds the edges on the input image image and marks them in the output image edges using the Canny algorithm. The smallest value between threshold1 and threshold2 is used for edge linking, the largest value is used to find the initial segments of strong edges.
IplImage *iplOriginalColor; // image to be captured
IplImage *iplOriginalGrey; // grey-scale image of "iplOriginalColor"
IplImage *iplEdge; // image detected by Canny edge algorithm
IplImage *iplImg; // resulting image to show tracking process
IplImage *iplEdgeClone;
// set the process noise
// covariance of Gaussian noise to control
CvMat* transition_noise = cvCreateMat(D, D, CV_64FC1);
// assume the transition noise
for (int row = 0; row < D; row++)
{
for (int col = 0; col < D; col++)
{
cvmSet(transition_noise, row, col, 0.0);
}
}
cvmSet(transition_noise, 0, 0, 3.0);
cvmSet(transition_noise, 1, 1, 3.0);
cvmSet(transition_noise, 2, 2, 0.3);
// set the measurement noise
/*
// covariance of Gaussian noise to measurement
CvMat* measurement_noise = cvCreateMat(D, D, CV_64FC1);
// initialize the measurement noise
for (int row = 0; row < D; row++)
{
for (int col = 0; col < D; col++)
{
cvmSet(measurement_noise, row, col, 0.0);
}
}
cvmSet(measurement_noise, 0, 0, 5.0);
cvmSet(measurement_noise, 1, 1, 5.0);
cvmSet(measurement_noise, 2, 2, 5.0);
*/
double measurement_noise = 2.0; // standrad deviation of Gaussian noise to measurement
CvMat* state = cvCreateMat(D, 1, CV_64FC1); // state of the system to be estimated
// CvMat* measurement = cvCreateMat(2, 1, CV_64FC1); // measurement of states
// declare particles
CvMat* pb [N]; // estimated particles
CvMat* pp [N]; // predicted particles
CvMat* pu [N]; // temporary variables to update a particle
CvMat* v[N]; // estimated velocity of each particle
CvMat* vu[N]; // temporary varialbe to update the velocity
double w[N]; // weight of each particle
for (int n = 0; n < N; n++)
{
pb[n] = cvCreateMat(D, 1, CV_64FC1);
pp[n] = cvCreateMat(D, 1, CV_64FC1);
pu[n] = cvCreateMat(D, 1, CV_64FC1);
v[n] = cvCreateMat(D, 1, CV_64FC1);
vu[n] = cvCreateMat(D, 1, CV_64FC1);
}
// initialize the state and particles
for (int n = 0; n < N; n++)
{
cvmSet(state, 0, 0, 258.0); // center-x
cvmSet(state, 1, 0, 406.0); // center-y
cvmSet(state, 2, 0, 38.0); // radius
// initialize the image window
cvZero(iplImg);
cvNamedWindow(titleResult);
cout << "start filtering... " << endl << endl;
float aperture = 3, thresLow = 50, thresHigh = 110;
// float aperture = 3, thresLow = 80, thresHigh = 110;
// for each frame
int frameNo = 0;
while(frameNo < frame_count && cvGrabFrame(capture)) {
// retrieve color frame from the movie "capture"
iplOriginalColor = cvRetrieveFrame(capture);
// convert color pixel values of "iplOriginalColor" to grey scales of "iplOriginalGrey"
cvCvtColor(iplOriginalColor, iplOriginalGrey, CV_RGB2GRAY);
// extract edges with Canny detector from "iplOriginalGrey" to save the results in the image "iplEdge"
cvCanny(iplOriginalGrey, iplEdge, thresLow, thresHigh, aperture);
int count = 0; // number of measurements
double dist_sum = 0;
for (int i = 0; i < 8; i++) // for 8 directions
{
double dist = scope[i] * 1.5;
for ( int range = 0; range < scope[i]; range++ )
{
int flag = 0;
for (int turn = -1; turn <= 1; turn += 2) // reverse the searching direction
{
search[i].x = direction[i].x + turn * range * d[i].x;
search[i].y = direction[i].y + turn * range * d[i].y;
// detect measurements
// CvScalar s = cvGet2D(iplEdge, cvRound(search[i].y), cvRound(search[i].x));
unsigned char s = CV_IMAGE_ELEM(iplEdge, unsigned char, cvRound(search[i].y), cvRound(search[i].x));
// if ( s.val[0] > 200 && s.val[1] > 200 && s.val[2] > 200 ) // bgr color
if (s > 250) // bgr color
{ // when the pixel value is white, that means a measurement is detected
flag = 1;
count++;
// cvCircle(iplEdgeClone, cvPoint(cvRound(search[i].x), cvRound(search[i].y)), 3, CV_RGB(200, 0, 255));
// cvShowImage("3D Particle filter", iplEdgeClone);
// cvWaitKey(1);
/* // get measurement
cvmSet(measurement, 0, 0, search[i].x);
cvmSet(measurement, 1, 0, search[i].y);
double dist = distance(measurement, pp[n]);
*/ // evaluate the difference between predictions of the particle and measurements
dist = distanceEuclidean(search[i], direction[i]);
break; // break for "turn"
} // end if
} // for turn
if ( flag == 1 )
{ break; } // break for "range"
} // for range
dist_sum += dist; // for all searching directions of one particle
} // for i direction
double dist_avg; // average distance of measurements from the one particle "n"
// cout << "count = " << count << endl;
dist_avg = dist_sum / 8;
// cout << "dist_avg = " << dist_avg << endl;
// estimate likelihood with "dist_avg"
like[n] = likelihood(dist_avg, measurement_noise);
// cout << "likelihood of particle-#" << n << " = " << like[n] << endl;
like_sum += like[n];
} // for n particle
// cout << "sum of likelihoods of N particles = " << like_sum << endl;
// estimate states
double state_x = 0.0;
double state_y = 0.0;
double state_r = 0.0;
// estimate the state with predicted particles
for (int n = 0; n < N; n++) // for "N" particles
{
w[n] = like[n] / like_sum; // update normalized weights of particles
// cout << "w" << n << "= " << w[n] << " ";
state_x += w[n] * cvmGet(pp[n], 0, 0); // center-x of the object
state_y += w[n] * cvmGet(pp[n], 1, 0); // center-y of the object
state_r += w[n] * cvmGet(pp[n], 2, 0); // radius of the object
}
if (state_r < 0) { state_r = 0; }
cvmSet(state, 0, 0, state_x);
cvmSet(state, 1, 0, state_y);
cvmSet(state, 2, 0, state_r);
// define integrated portions of each particles; 0 < a[0] < a[1] < a[2] = 1
a[0] = w[0];
for (int n = 1; n < N; n++)
{
a[n] = a[n - 1] + w[n];
// cout << "a" << n << "= " << a[n] << " ";
}
// cout << "a" << N << "= " << a[N] << " " << endl;
for (int n = 0; n < N; n++)
{
// select a particle from the distribution
int pselected;
for (int k = 0; k < N; k++)
{
if ( uniform_random() < a[k] )
{
pselected = k;
break;
}
}
// cout << "p " << n << " => " << pselected << " ";
// retain the selection
for (int row = 0; row < D; row++)
{
cvmSet(pu[n], row, 0, cvmGet(pp[pselected],row,0));
cvSub(pp[pselected], pb[pselected], vu[n]); // pp - pb -> vu
}
}
// updated each particle and its velocity
for (int n = 0; n < N; n++)
{
for (int row = 0; row < D; row++)
{
cvmSet(pb[n], row, 0, cvmGet(pu[n],row,0));
cvmSet(v[n], row , 0, cvmGet(vu[n],row,0));
}
}
cout << endl << endl ;
// count the number of detections in measurement process
int count_detections (void)
{
// set cases of measurement results
double mtype[4];
mtype [0] = 0.0;
mtype [1] = 0.5;
mtype [2] = mtype[1] + 0.3;
mtype [3] = mtype[2] + 0.2;
// cout << "check measurement type3 = " << mtype[3] << endl; // just to check
// select a measurement case
double mrn = uniform_random();
int type = 1;
for ( int k = 0; k < 3; k++ )
{
if ( mrn < mtype[k] )
{
type = k;
break;
}
}
return type;
}
// distance between measurement and prediction
double distance(CvMat* measurement, CvMat* prediction)
{
double distance2 = 0;
double differance = 0;
for (int u = 0; u < 2; u++)
{
differance = cvmGet(measurement,u,0) - cvmGet(prediction,u,0);
distance2 += differance * differance;
}
return sqrt(distance2);
}
// likelihood based on multivariative normal distribution (ref. 15p eqn(2.4))
double likelihood(CvMat *mean, CvMat *covariance, CvMat *sample) {
/*
struct particle
{
double weight; // weight of a particle
CvMat* loc_p = cvCreateMat(2, 1, CV_64FC1); // previously estimated position of a particle
CvMat* loc = cvCreateMat(2, 1, CV_64FC1); // currently estimated position of a particle
CvMat* velocity = cvCreateMat(2, 1, CV_64FC1); // estimated velocity of a particle
cvSub(loc, loc_p, velocity); // loc - loc_p -> velocity
};
*/
int main (int argc, char * const argv[]) {
srand(time(NULL));
int width = 400; // width of image window
int height = 400; // height of image window
CvMat* state = cvCreateMat(2, 1, CV_64FC1);
// declare particles
/* particle pb[N]; // N estimated particles
particle pp[N]; // N predicted particles
particle pu[N]; // temporary variables for updating particles
*/
CvMat* pb [N]; // estimated particles
CvMat* pp [N]; // predicted particles
CvMat* pu [N]; // temporary variables to update a particle
CvMat* v[N]; // estimated velocity of each particle
CvMat* vu[N]; // temporary varialbe to update the velocity
double w[N]; // weight of each particle
for (int n = 0; n < N; n++)
{
pb[n] = cvCreateMat(2, 1, CV_64FC1);
pp[n] = cvCreateMat(2, 1, CV_64FC1);
pu[n] = cvCreateMat(2, 1, CV_64FC1);
v[n] = cvCreateMat(2, 1, CV_64FC1);
vu[n] = cvCreateMat(2, 1, CV_64FC1);
}
// initialize particles and the state
for (int n = 0; n < N; n++)
{
w[n] = (double) 1 / (double) N; // equally weighted
for (int row=0; row < 2; row++)
{
cvmSet(pb[n], row, 0, 0.0);
cvmSet(v[n], row, 0, 15 * uniform_random());
cvmSet(state, row, 0, 0.0);
}
}
// set the system
CvMat* groundtruth = cvCreateMat(2, 1, CV_64FC1); // groundtruth of states
CvMat* measurement [3]; // measurement of states
for (int k = 0; k < 3; k++) // 3 types of measurement
{
measurement[k] = cvCreateMat(2, 1, CV_64FC1);
}
// evaluate measurements
double range = (double) width; // range to search measurements for each particle
// cout << "range of distances = " << range << endl;
int mselected;
for (int index = 0; index < count; index++)
{
double d = distance(measurement[index], pp[n]);
if ( d < range )
{
range = d;
mselected = index; // selected measurement
}
}
/// cout << "selected measurement # = " << mselected << endl;
like[n] = likelihood(measurement[mselected], measurement_noise, pp[n]);
/// cout << "likelihood of #" << n << " = " << like[n] << endl;
Lessons:
1. transition noise와 measurement noise, (그것들의 covariances) 그리고 각 입자의 초기 위치와 상태를 알맞게 설정하는 것이 관건임을 배웠다. 그것을 Tuning이라 부른다는 것도.
1-1. 코드에서 가정한 system에서는 특히 입자들의 초기 속도를 어떻게 주느냐에 따라 tracking의 성공 여부가 좌우된다.
2. 실제 상태는 등가속도 운동을 하는 비선형 시스템이나, 여기에서는 프레임 간의 운동을 등속으로 가정하여 선형 시스템으로 근사한 모델을 적용한 것이다.
2-1. 그러므로 여기에 Kalman filter를 적용하여 결과를 비교해 볼 수 있겠다. 이 경우, 3차원 Kalman gain을 계산해야 한다.
2-2. 분홍색 부분을 고쳐 비선형 모델로 만든 후 Particle filtering을 하면 결과가 더 좋지 않을까? Tuning도 더 쉬어지지 않을까?
3. 코드 좀 정리하자. 너무 지저분하다. ㅡㅡ;
4. 아니, 근데 영 헤매다가 갑자기 따라잡는 건 뭐지??? (아래 결과)
// count the number of detections in measurement process
int count_detections (void)
{
// set cases of measurement results
double mtype[4];
mtype [0] = 0.0;
mtype [1] = 0.5;
mtype [2] = mtype[1] + 0.3;
mtype [3] = mtype[2] + 0.2;
// cout << "check measurement type3 = " << mtype[3] << endl; // just to check
// select a measurement case
double mrn = uniform_random();
int type = 1;
for ( int k = 0; k < 3; k++ )
{
if ( mrn < mtype[k] )
{
type = k;
break;
}
}
return type;
}
// get measurements
int get_measurement (CvMat* measurement[], CvMat* measurement_noise, double x, double y)
{
// set measurement types
double c1 = 1.0, c2 = 4.0;
// measured point 1
cvmSet(measurement[0], 0, 0, x + (c1 * cvmGet(measurement_noise,0,0) * gaussian_random())); // x-value
cvmSet(measurement[0], 1, 0, y + (c1 * cvmGet(measurement_noise,1,1) * gaussian_random())); // y-value
// measured point 2
cvmSet(measurement[1], 0, 0, x + (c2 * cvmGet(measurement_noise,0,0) * gaussian_random())); // x-value
cvmSet(measurement[1], 1, 0, y + (c2 * cvmGet(measurement_noise,1,1) * gaussian_random())); // y-value
// measured point 3 // clutter noise
cvmSet(measurement[2], 0, 0, width*uniform_random()); // x-value
cvmSet(measurement[2], 1, 0, height*uniform_random()); // y-value
// count the number of measured points
int number = count_detections(); // number of detections
cout << "# of measured points = " << number << endl;
// get measurement
for (int index = 0; index < number; index++)
{
cout << "measurement #" << index << " : "
<< cvmGet(measurement[index],0,0) << " " << cvmGet(measurement[index],1,0) << endl;
// set the system
CvMat* state = cvCreateMat(2, 1, CV_64FC1); // state of the system to be estimated
CvMat* groundtruth = cvCreateMat(2, 1, CV_64FC1); // groundtruth of states
CvMat* measurement [3]; // measurement of states
for (int k = 0; k < 3; k++) // 3 types of measurement
{
measurement[k] = cvCreateMat(2, 1, CV_64FC1);
}
// declare particles
CvMat* pb [N]; // estimated particles
CvMat* pp [N]; // predicted particles
CvMat* pu [N]; // temporary variables to update a particle
CvMat* v[N]; // estimated velocity of each particle
CvMat* vu[N]; // temporary varialbe to update the velocity
double w[N]; // weight of each particle
for (int n = 0; n < N; n++)
{
pb[n] = cvCreateMat(2, 1, CV_64FC1);
pp[n] = cvCreateMat(2, 1, CV_64FC1);
pu[n] = cvCreateMat(2, 1, CV_64FC1);
v[n] = cvCreateMat(2, 1, CV_64FC1);
vu[n] = cvCreateMat(2, 1, CV_64FC1);
}
// initialize the state and particles
for (int n = 0; n < N; n++)
{
w[n] = (double) 1 / (double) N; // equally weighted
for (int row=0; row < 2; row++)
{
cvmSet(state, row, 0, 0.0);
cvmSet(pb[n], row, 0, 0.0);
cvmSet(v[n], row, 0, 15 * uniform_random());
}
}
// set the process noise
// covariance of Gaussian noise to control
CvMat* transition_noise = cvCreateMat(2, 2, CV_64FC1);
cvmSet(transition_noise, 0, 0, 3); //set transition_noise(0,0) to 3.0
cvmSet(transition_noise, 0, 1, 0.0);
cvmSet(transition_noise, 1, 0, 0.0);
cvmSet(transition_noise, 1, 1, 6);
// set the measurement noise
// covariance of Gaussian noise to measurement
CvMat* measurement_noise = cvCreateMat(2, 2, CV_64FC1);
cvmSet(measurement_noise, 0, 0, 2); //set measurement_noise(0,0) to 2.0
cvmSet(measurement_noise, 0, 1, 0.0);
cvmSet(measurement_noise, 1, 0, 0.0);
cvmSet(measurement_noise, 1, 1, 2);
// initialize the image window
cvZero(iplImg);
cvNamedWindow("ParticleFilter-3d", 0);
for (int t = 0; t < step; t++) // for "step" steps
{
// cvZero(iplImg);
cout << "step " << t << endl;
// get the groundtruth
double gx = 10 * t;
double gy = (-1.0 / width ) * (gx - width) * (gx - width) + height;
get_groundtruth(groundtruth, gx, gy);
// get measurements
int count = get_measurement(measurement, measurement_noise, gx, gy);
double like[N]; // likelihood between measurement and prediction
double like_sum = 0; // sum of likelihoods
for (int n = 0; n < N; n++) // for "N" particles
{
// predict
double prediction;
for (int row = 0; row < 2; row++)
{
prediction = cvmGet(pb[n],row,0) + cvmGet(v[n],row,0) + cvmGet(transition_noise,row,row) * gaussian_random();
cvmSet(pp[n], row, 0, prediction);
}
// cvLine(iplImg, cvPoint(cvRound(cvmGet(pp[n],0,0)), cvRound(cvmGet(pp[n],1,0))), cvPoint(cvRound(cvmGet(pb[n],0,0)), cvRound(cvmGet(pb[n],1,0))), CV_RGB(100,100,0), 1);
// cvCircle(iplImg, cvPoint(cvRound(cvmGet(pp[n],0,0)), cvRound(cvmGet(pp[n],1,0))), 1, CV_RGB(255, 255, 0));
// evaluate measurements
double range = (double) width; // range to search measurements for each particle
// cout << "range of distances = " << range << endl;
int mselected;
for (int index = 0; index < count; index++)
{
double d = distance(measurement[index], pp[n]);
if ( d < range )
{
range = d;
mselected = index; // selected measurement
}
}
// cout << "selected measurement # = " << mselected << endl;
like[n] = likelihood(measurement[mselected], measurement_noise, pp[n]);
// cout << "likelihood of #" << n << " = " << like[n] << endl;
like_sum += like[n];
}
// cout << "sum of likelihoods = " << like_sum << endl;
// estimate states
double state_x = 0.0;
double state_y = 0.0;
// estimate the state with predicted particles
for (int n = 0; n < N; n++) // for "N" particles
{
w[n] = like[n] / like_sum; // update normalized weights of particles
// cout << "w" << n << "= " << w[n] << " ";
state_x += w[n] * cvmGet(pp[n], 0, 0); // x-value
state_y += w[n] * cvmGet(pp[n], 1, 0); // y-value
}
cvmSet(state, 0, 0, state_x);
cvmSet(state, 1, 0, state_y);
// define integrated portions of each particles; 0 < a[0] < a[1] < a[2] = 1
a[0] = w[0];
for (int n = 1; n < N; n++)
{
a[n] = a[n - 1] + w[n];
// cout << "a" << n << "= " << a[n] << " ";
}
// cout << "a" << N << "= " << a[N] << " " << endl;
for (int n = 0; n < N; n++)
{
// select a particle from the distribution
int pselected;
for (int k = 0; k < N; k++)
{
if ( uniform_random() < a[k] )
{
pselected = k;
break;
}
}
// cout << "p " << n << " => " << pselected << " ";
This paper appears in: Robotics and Automation, 2003. Proceedings. ICRA '03. IEEE International Conference on
Publication Date: 14-19 Sept. 2003
Volume: 2
On page(s): 1985 - 1991 vol.2
Number of Pages: 3 vol.lii+4450
ISSN: 1050-4729
ISBN: 0-7803-7736-2
INSPEC Accession Number:7877180
Digital Object Identifier: 10.1109/ROBOT.2003.1241885
Current Version Published: 2003-11-10
the problem of building a map of an unknown environment from a sequence of noisy landmark measurements obtained from a moving robot + a robot localization problem => SLAM
autonomous robots operating in environments where precise maps and positioning are not available
Extended Kalman Filter (EKF)
: used for incrementally estimating the joint posterior distribution over robot pose and landmark positions
limitations of EKF
1) Quadratic complexity (: sensor updates require time quadratic in the total number of landmarks in the map)
=> limiting the number of landmarks to only a few hundred (whereas natural environment models frequently contain millions of features
2) Data association / correspondence (: mapping of observations to landmarks)
=> associating a small numver of observations with incorrect landmarks to cause the filter to diverge
FastSLAM decomposes the SLAM problem into a robot localization problem, and a collection of landmark estimation problems that are conditioned on the robot pose estimate.
FastSLAM factors the SLAM posterior into a localization problem and K independent landmark estimation problems conditioned on the robot pose estimate.
> a modified particle filter to estimate the posterior over robot paths
> each particle possessing K independent Kalman filters that estimate the landmark locations conditioned on the particle's path
=> an instance of the Rao-Blackwellized particle filter
Representing particles as binary trees of Kalman Filters
-> Incorporating observations into FastSLAM in O(MlogK) time (M, # of particles; K, # of landmarks)
Each particle represents a different robot pose hypothesis.
=> Data association can be considered separately for every particle.
=> 1) The noise of robot motion does not affect the accuracy of data association.
2) Incorrectly associated particls will receive lower probabilities and will be removed in future resampling steps.
On real-world data with ambiguous data association
Adding extra odometric noise
( Odometry is the use of data from the movement of actuators to estimate change in position over time. )
Estimating an accurate map without any odometry in the environment in which the Kalman Filter inevitably diverges.
How to incorporate negative information resulting in a measurable increase in the accuracy of the resulting map
ii) motion noise: robot pose uncertainty after incorporating a control
=> 1) adding a large amount of error to the robot's pose
2) causing a filter to diverge
conditional independece
: The problem of determining the landmark locations could be decoupled into K independent estimation problems, one for each landmark.
FastSLAM estimates the factored SLAM posterior using a modified particle filter, with K independent Kalman Filters for each particle to estimate the landmark positions conditioned on the hypothesized robot paths. The resulting algorithm is an instance of the Rao-Blackwellized particle filter.
This paper appears in: Signal Processing, IEEE Transactions on [see also Acoustics, Speech, and Signal Processing, IEEE Transactions on]
Publication Date: Feb. 2002
Volume: 50 , Issue: 2
On page(s): 174 - 188
ISSN: 1053-587X
CODEN: ITPRED
INSPEC Accession Number:7173038
Digital Object Identifier: 10.1109/78.978374
Current Version Published: 2002-08-07
Doucet, A., Freitas, N. d., Murphy, K. P., and Russell, S. J. 2000. Rao-Blackwellised Particle Filtering for Dynamic Bayesian Networks. In Proceedings of the 16th Conference on Uncertainty in Artificial intelligence (June 30 - July 03, 2000). C. Boutilier and M. Goldszmidt, Eds. Morgan Kaufmann Publishers, San Francisco, CA, 176-183.
Rudolph van der Merwe, Nando de Freitas, Arnaud Doucet, Eric Wan
The Unscented Particle Filter
In Advances in Neural Information Processing Systems 13 (Nov 2001)
This paper appears in: Computer Vision and Pattern Recognition, 2006 IEEE Computer Society Conference on
Publication Date: 17-22 June 2006
Volume: 1, On page(s): 469- 476
ISSN: 1063-6919
ISBN: 0-7695-2597-0
Digital Object Identifier: 10.1109/CVPR.2006.263
Current Version Published: 2006-07-05
monocular SLAM
particle filter + top-down search => real-time, large number of landmarks
the first to apply this FastSLAM-type particle filter to single-camera SLAM
1. Introduction
SLAM = Simultaneous Localization and Mapping
: process of causally estimating both egomotion and structure in an online system
SLAM using visual data in computer vision
SFM (= structure from motion): reconstructing scene geometry
+ causal or recursive estimation techniques
perspective-projection cameras
filtering methods to allow indirect observation models
Kalman filtering framework
Extended Kalman filter = EKF (-> to linearize the observation and dynamics models of the system)
causal estimation with recursive algorithms (cp. estimation depending only on observations up to the current time)
=> online operation (cp. SFM on global nonlinear optimization)
Davision's SLAM with a single camera
> EKF estimation framework
> top-down Bayesian estimation approach searching for landmarks in image regions constrained by estimate > uncertainty (instead of performing extensive bottom-up image processing and feature matching)
> Bayesian partial-initialization scheme for incorporating new landmarks
- cannot scale to large environment
EKF = the Extended Kalman filter
- N*N covariace matrix for N landmarks
- updated with N*N computation cost
> SLAM system using a single camera as the only sensor
> frame-rate operation with many landmarks
> FastSLAM-style particle filter (the first use of such an approach in a monocular SLAM setting)
> top-down active search
> an efficient algorithm for discovering the depth of new landmarks that avoids linearization errors
> a novel method for using partially initialized landmarks to help constrain camera pose
FastSLAM
: based on the Rao-Blackwellized Particle Filter
2. Background
2.1 Scalable SLAM
> submap
bounded complexity -> bounded computation and space requirements
Montemerlo & Thrun
If the entire camera motion is known then the estimates of the positions of different landmarks become independent of each other.