1
2
3
4
5
6
7
8
9
10
11
12
13
14 __docformat__ = "restructuredtext en"
15 '''
16 implementation of the Condensation algorithm, originally conceived by Michael Isard
17 http://www.cs.ubc.ca/~nando/smc/index.htm
18 '''
19
20 from numpy.random import uniform, normal, random
21 from numpy import array, ndarray, zeros, nan_to_num
22 from math import pi, exp, sqrt
23 import pylab as P
24
26 """
27 Example Model Specification
28 """
30 self.gdata = GlobalData((0, .2), (-.1, .4, .075), (-.1, .4, .075, .03), .03, 1000, 100)
31 self.data = IterationData()
32 self.out= zeros((100, 3), dtype = float)
34 for n in xrange(self.gdata.nsamples):
35 self.data.oldpos[n] = self.gdata.PriorModel[0] +self.gdata.PriorModel[1]*normal()
36
37 self.data.cumul_prob_array[n] = float(n)
38 self.data.sample_weights[n] = 1.0
39
40
41
42
43
44 self.data.largest_cumulative_prob = float(n)
45
46 self.data.meas[0] = 0.0;
47 - def iterate(self, previous, process):
48 '''
49 The process model for a first-order auto-regressive process is:
50
51 x_{t+1} - mean = (x_t - mean)*scaling + sigma*w_t
52
53 where w_t is unit iid Gaussian noise.
54
55 :Parameters:
56 - `previous`: previous data
57 - `process`: processmodel parameter tuple
58 '''
59 return process[0]+((previous-process[0])*process[1])+process[2]*normal()
60
62 '''
63 This routine samples from the distribution
64
65 p(x_t | x_{t-1} = oldpos[old_sample])
66
67 and stores the result in new_positions[new_sample]. This is
68 straightforward for the simple first-order auto-regressive process
69 model used here, but any model could be substituted.
70 '''
71 self.data.newpos[new_sample] = self.iterate(self.data.oldpos[old_sample], self.gdata.ProcessModel)
72
74 '''
75 This routine evaluates the observation density
76
77 p(z_t|x_t = newpos[new_sample])
78
79 The observation model in this implementation is a simple mixture of
80 Gaussians, where each simulated object is observed as a 1d position
81 and measurement noise is represented as Gaussian. For a
82 visual-tracking application, this routine would go and evaluate the
83 likelihood that the object is present in the image at the position
84 encoded by new_positions[new_sample].
85 '''
86 return evaluate_gaussian(self.data.newpos[new_sample]-self.data.meas[1], self.gdata.ObservationModel )
88 '''
89 In a real implementation, this routine would go and actually make
90 measurements and store them in the data.meas structure. This
91 simulation consists of an object moving around obeying a
92 first-order auto-regressive process, and being observed with its
93 true positions coorrupted by Gaussian measurement noise.
94 Accordingly, this routine calculates the new simulated true and
95 measured position of the object.
96 '''
97 self.data.meas[0] = self.iterate(self.data.meas[0], self.gdata.SceneModel)
98 self.data.meas[1] = self.data.meas[0]+self.gdata.SceneModel[3]*normal()
100 aggregate = 0.
101 aggregate = sum(self.data.newpos*self.data.sample_weights)/ self.data.largest_cumulative_prob
102
103 self.out[iteration, :] = (self.data.meas[1], self.data.meas[0], aggregate)
104
105
106
108 - def __init__(self, prior, process, scene, observ, nsam, nit):
109 '''
110 Class to hold global data for the simulation
111
112 :Parameters:
113 - `prior`: parameter tuple specifying the model of the prior distribution for the first step.
114 - `process`: the parameters specifying the process model. (mean,scaling, sigma)
115 - `scene`: parameters for the simlation model used to track the process (mean,scaling, sigma, sigma)
116 - `observ`: sigma of the observation model
117 - `nsam`: number of samples
118 - `nit`: number of terations of the model
119 '''
120
121 self.PriorModel = prior
122
123 self.ProcessModel = process
124 self.SceneModel = scene
125 self.ObservationModel = observ
126 self.nsamples = nsam
127 self.niterations = nit
128
131 self.newpos = 0.
132 self.oldpos = 0.
133 self.sample_weights = None
134 self.cumul_prob_array = None
135 self.largest_cumulative_prob = 1.
136 self.meas = [0., 0.]
137
140 self.globaldata = model.gdata
141 self.iterdata = model.data
142 self.model = model
143 self.iterdata.newpos = zeros((self.globaldata.nsamples, 1), dtype = float)
144 self.iterdata.oldpos = zeros((self.globaldata.nsamples, 1), dtype = float)
145 self.iterdata.sample_weights = zeros((self.globaldata.nsamples, 1), dtype = float)
146 self.iterdata.cumul_prob_array = zeros((self.globaldata.nsamples, 1), dtype = float)
147
148 self.model.setupPriorConditions()
149
151 '''
152 This is binary search using cumulative probabilities to pick a base
153 sample. The use of this routine makes Condensation O(NlogN) where N
154 is the number of samples. It is probably better to pick base
155 samples deterministically, since then the algorithm is O(N) and
156 probably marginally more efficient, but this routine is kept here
157 for conceptual simplicity and because it maps better to the
158 published literature.
159 '''
160 choice = random()*self.iterdata.largest_cumulative_prob
161 low = 0
162 high = self.globaldata.nsamples
163
164 while high>(low+1):
165 middle = (high+low)/2
166 if choice > self.iterdata.cumul_prob_array[middle]:
167 low = middle
168 else:
169 high = middle
170
171 return low
172
174 '''
175 This method computes all of the new (unweighted) sample
176 positions. For each sample, first a base is chosen, then the new
177 sample position is computed by sampling from the prediction density
178 p(x_t|x_t-1 = base). predict_sample_position is obviously
179 model-dependent and is found in Model, but it can be
180 replaced by any process model required.
181 '''
182 for n in xrange(self.globaldata.nsamples):
183 base = self.pickBaseSample()
184 self.model.predictSamplePosition(n, base)
185
200
202 '''
203 Once all the unweighted sample positions have been computed using
204 predict_new_bases, this routine computes the weights by evaluating
205 the observation density at each of the positions. Cumulative
206 probabilities are also computed at the same time, to permit an
207 efficient implementation of pick_base_sample using binary
208 search. evaluate_observation_density is obviously model-dependent
209 and is found in the Model class, but it can be replaced by any
210 observation model required.
211 '''
212 cumul_total = 0.0
213 for n in xrange(self.globaldata.nsamples):
214 self.iterdata.sample_weights[n] = self.model.evaluateObservationDensity(n)
215 self.iterdata.cumul_prob_array[n] = cumul_total;
216 cumul_total += self.iterdata.sample_weights[n];
217 self.iterdata.largest_cumulative_prob = cumul_total
218
220 '''
221 Go and output the estimate for this iteration (which is a
222 model-dependent routine found in Model) and then swap
223 over the arrays ready for the next iteration.
224 '''
225 self.model.display(iteration)
226 temp = self.iterdata.newpos
227 self.iterdata.newpos = self.iterdata.oldpos
228 self.iterdata.oldpos = temp
229
231 return 1.0/(sqrt(2.0*pi) * sigma) * exp(-0.5 * (val*val / (sigma*sigma)));
232
233
234
235
236 normal
237
238 if __name__=="__main__":
239 M = Model()
240 C = Condensation(M)
241 C.runFilter()
242