1 """
2 Various Monte Carlo equilibrium sampling algorithms, which simulate only one Markov chain.
3
4 Here is how to sample from a PDF using the L{HMCSampler} class. In the following
5 snippet we draw 5000 samples from a 1D normal distribution and plot them:
6
7
8 >>> import numpy
9 >>> from csb.io.plots import Chart
10 >>> from csb.statistics.pdf import Normal
11 >>> from csb.statistics.samplers import State
12 >>> from csb.statistics.samplers.mc.singlechain import HMCSampler
13
14 >>> initial_state = State(numpy.array([1.]))
15 >>> grad = lambda q, t: q
16 >>> timestep = 1.5
17 >>> nsteps = 30
18 >>> nsamples = 5000
19
20 >>> sampler = HMCSampler(Normal(), initial_state, grad, timestep, nsteps)
21
22 >>> states = []
23 >>> for i in range(nsamples):
24 sampler.sample()
25 states.append(sampler.state)
26
27 >>> print('acceptance rate:', sampler.acceptance_rate)
28 0.8
29
30 >>> states = [state.position[0]for state in states]
31 >>> chart = Chart()
32 >>> chart.plot.hist([numpy.random.normal(size=5000), states], bins=20, normed=True)
33 >>> chart.plot.legend(['numpy.random.normal', 'HMC'])
34 >>> chart.show()
35
36
37 First, several things which are being needed are imported.
38 As every sampler in this module implements a Markov Chain, an initial state has to be
39 chosen. In the following lines, several parameters are set:
40 - the gradient of the negative log-probability of the PDF under consideration
41 - the integration timestep
42 - the number of integration steps to be performed in each iteration, that is, the HMC
43 trajectory length
44 - the number of samples to be drawn
45
46 The empty list states is initialized. It will serve to store the samples drawn.
47 In the loop, C{sampler.sample()} is repeatedly called. After each call of C{sampler.sample()},
48 the current state of the Markov Chain is stored in sampler.state and this state is appended
49 to the sample storage list.
50
51 Then the acceptance rate is printed, the numeric values are being extracted from the
52 L{State} objects in states, a histogram is created and finally plotted.
53 """
54
55 import numpy
56 import csb.numeric
57
58 from csb.statistics.samplers import State
59 from csb.statistics.samplers.mc import AbstractSingleChainMC
60 from csb.statistics.samplers.mc import SimpleProposalCommunicator
61 from csb.statistics.samplers.mc.propagators import MDPropagator
62 from csb.numeric.integrators import FastLeapFrog
63 from csb.numeric import InvertibleMatrix
66 """
67 Hamilton Monte Carlo (HMC, also called Hybrid Monte Carlo by the inventors,
68 Duane, Kennedy, Pendleton, Duncan 1987).
69
70 @param pdf: Probability density function to be sampled from
71 @type pdf: L{csb.statistics.pdf.AbstractDensity}
72
73 @param state: Inital state
74 @type state: L{State}
75
76 @param gradient: Gradient of the negative log-probability
77 @type gradient: L{AbstractGradient}
78
79 @param timestep: Timestep used for integration
80 @type timestep: float
81
82 @param nsteps: Number of integration steps to be performed in
83 each iteration
84 @type nsteps: int
85
86 @param mass_matrix: Mass matrix
87 @type mass_matrix: n-dimensional L{InvertibleMatrix} with n being the dimension
88 of the configuration space, that is, the dimension of
89 the position / momentum vectors
90
91 @param integrator: Subclass of L{AbstractIntegrator} to be used for
92 integrating Hamiltionian equations of motion
93 @type integrator: L{AbstractIntegrator}
94
95 @param temperature: Pseudo-temperature of the Boltzmann ensemble
96 M{p(x) = 1/N * exp(-1/T * E(x))} with the
97 pseudo-energy defined as M{E(x) = -log(p(x))}
98 where M{p(x)} is the PDF under consideration
99 @type temperature: float
100 """
101
102 - def __init__(self, pdf, state, gradient, timestep, nsteps,
103 mass_matrix=None, integrator=FastLeapFrog, temperature=1.):
126
140
151
152 @property
154 return self._timestep
155
156 @timestep.setter
158 self._timestep = float(value)
159 if "_propagator" in dir(self):
160 self._propagator.timestep = self._timestep
161
162 @property
165
166 @nsteps.setter
168 self._nsteps = int(value)
169
170 @property
172 return self._mass_matrix
173 @mass_matrix.setter
175 self._mass_matrix = value
176 if "_propagator" in dir(self):
177 self._propagator.mass_matrix = self._mass_matrix
178
180 """
181 Random Walk Metropolis Monte Carlo implementation
182 (Metropolis, Rosenbluth, Teller, Teller 1953; Hastings, 1970).
183
184 @param pdf: Probability density function to be sampled from
185 @type pdf: L{csb.statistics.pdf.AbstractDensity}
186
187 @param state: Inital state
188 @type state: L{State}
189
190 @param stepsize: Serves to set the step size in
191 proposal_density, e.g. for automatic acceptance
192 rate adaption
193 @type stepsize: float
194
195 @param proposal_density: The proposal density as a function f(x, s)
196 of the current state x and the stepsize s.
197 By default, the proposal density is uniform,
198 centered around x, and has width s.
199 @type proposal_density: callable
200
201 @param temperature: Pseudo-temperature of the Boltzmann ensemble
202 M{p(x) = 1/N * exp(-1/T * E(x))} with the
203 pseudo-energy defined as M{E(x) = -log(p(x))}
204 where M{p(x)} is the PDF under consideration
205 @type temperature: float
206 """
207
208 - def __init__(self, pdf, state, stepsize=1., proposal_density=None, temperature=1.):
217
221
229
230 @property
232 return self._stepsize
233
234 @stepsize.setter
236 self._stepsize = float(value)
237