Source code for DABI_UQToolbox.DABIUncertaintyQuantification

# -*- coding: utf-8 -*-
"""
Created on Thu Apr 26 15:15:21 2012

@author: dabi
"""

import numpy as np

from numpy import linalg as la

from scipy import stats

from mpi4py import MPI

import itertools

import os.path

from time import clock

import sys

import collections

from prettytable import PrettyTable

from DABISpectralToolbox import DABISpectral1D
from DABISpectralToolbox import DABISpectralND
from DABISpectralToolbox import DABISparseGrids
from DABISpectralToolbox import orthpol

from sobol_lib import i4_sobol_generate

[docs]def Experiments(f,samples,params,paramUpdate,action): """ Compute the Experiments f on the samples. The implementation uses MPI for parallel computations. INPUT: - f : experiment function handle. Signature: f( params ) - samples : nd-array with the set of samples grouped by the first dimension - params: set of parameters to be passed to the experiment - action: post processing action OUTPUT: Array of computed values, ordered by the first dimension of the array. """ def iterF(f,samples,params,paramUpdate,action): sols = [] # LOG_FILE_BASE = "MonteCarlo" # i = 0 # while ( os.path.isfile("%s-Run%d-Proc%d.log" % (LOG_FILE_BASE,i,myrank)) ): # i = i + 1 # LOG_FILE = "%s-Run%d-Proc%d.log" % (LOG_FILE_BASE,i,myrank) for i in xrange(0,samples.shape[0]): params = paramUpdate(params,samples[i]) sol = f(params) print "Proc %d run %d/%d" % (myrank,i+1,len(samples)) # log = open(LOG_FILE,'a') # log.write("Proc %d run %d/%d\n" % (myrank,i+1,len(samples))) # log.close() #sols.append( sol ) sols = action(sols,sol) return sols comm = MPI.COMM_WORLD nprocs = comm.Get_size() myrank = comm.Get_rank() if myrank == 0: # Split the input array splittedSamples = np.array_split(samples,nprocs) startTime = clock() else: splittedSamples = None samplesPart = comm.scatter(splittedSamples,root=0) splittedSolutions = iterF(f,samplesPart,params,paramUpdate,action) solutionsList = comm.gather(splittedSolutions) if myrank == 0: # Reassemble post processing data # To be fixed for MPI!!!!!! Use the proper action.. solutions = [inner for outer in solutionsList for inner in outer] stopTime = clock() print "Elapsed Time: %f s" % (stopTime-startTime) return solutions
[docs]def MonteCarlo(dist,N,experiment,params,paramUpdate,postProc): """ Run Monte Carlo Simulations """ samples = dist.rvs(size=N) return (samples,Experiments(experiment, samples, params, paramUpdate, postProc))
[docs]def QuasiMonteCarlo(dists,N,experiment,params,paramUpdate,postProc,skip=None): """ Run Quasi Monte Carlo Simulations """ # Generate uniformly distributed samples using Sobol sequence if skip == None: dim = len(dists) skip = int( np.random.uniform(2**np.ceil(np.log2(dim+1)), 2**(np.ceil(np.log2(dim+1))+1)) ) unifSamples = i4_sobol_generate(len(dists),N,skip); samples = np.zeros(unifSamples.T.shape); for i in range(0,len(dists)): samples[:,i] = dists[i].ppf(unifSamples[i,:]) return (samples,Experiments(experiment, samples, params, paramUpdate, postProc), skip)
[docs]def gPC_MultiIndex(dist,poly,distPoly,N,NI=[]): """ gPC_MultiIndex(dist,poly,distPoly,N) Generates the Multi Index basis Vandermonde matrix using the list of distributions of the parameters space approximated by the selected polynomials. INPUT: dist: list of n distributions representing the n-Dimensional random space poly: list of n polynomials (class DABISpectral1D) used for approximating each of the directions in the random space distPoly: list of n standard distributions associated to the polynomial selected N: maximum polynomial order in the basis NI: (list, int) maximum polynomial order for additional interpolation basis OUTPUT: dictionary with the following attributes 'x': tensor product of collocation points in the standard distributions 'w': tensor product of the weights for cubature/projection rules (sum to 1 or prod(gamma0) respectively) 'vals': tensor product of collocation points 'V': Pascal's simplex of the basis functions """ if (len(dist) != len(distPoly) or len(distPoly) != len(poly)) : print "The number of elements in dist, distPoly and poly is not consistent" return else: DIM = len(dist) pp = DABISpectralND.PolyND(poly) (x, w) = pp.GaussQuadrature(np.tile(N,DIM),normed=False) # Compute relative samples using inverse CDF method vals = np.zeros(np.asarray(x).shape) for i in range(0,DIM): vals[:,i] = dist[i].ppf(distPoly[i].cdf(x[:,i])) # Compute Generalized Vandermonde if required xs = [] for i in range(0,DIM): x1D, w = poly[i].GaussQuadrature(N) xs.append(x1D) V = pp.GradVandermondePascalSimplex(xs, N, np.zeros(DIM), None) VI = [] for j in range(0,len(NI)): xs = [] for i in range(0,DIM): x1D, w = poly[i].GaussQuadrature(NI[j]) xs.append(x1D) VI.append( pp.GradVandermondePascalSimplex(xs, NI[j], np.zeros(DIM), None) ) # Create Dictionary dictionary = {'x': x, 'w': w, 'vals': vals, 'V': V, 'listVI': VI} return dictionary
[docs]def PCM_Tensor(dist,poly,distPoly,Ns,allocBasis=True): """ PCM_Tensor(dist,poly,distPoly,N) Generates the full tensor grid using the list of distributions of the parameters space approximated by the selected polynomials. INPUT: dist: list of n distributions representing the n-Dimensional random space poly: list of n polynomials (class DABISpectral1D) used for approximating each of the directions in the random space distPoly: list of n standard distributions associated to the polynomial selected Ns: list of n integers for the order of polynomial expansion in each direction allocBasis: boolean. True if you want the tensor basis. OUTPUT: dictionary with the following attributes 'x': tensor product of collocation points in the standard distributions 'w': tensor product of the weights for cubature/projection rules (sum to 1 or prod(gamma0) respectively) 'vals': tensor product of collocation points 'V': tensor basis functions """ if (len(dist) != len(distPoly) or len(distPoly) != len(poly) or len(poly) != len(Ns)) : print "The number of elements in dist, distPoly and poly is not consistent" return else: DIM = len(dist) pp = DABISpectralND.PolyND(poly) if allocBasis: (x, w) = pp.GaussQuadrature(Ns,normed=False) else: (x, w) = pp.GaussQuadrature(Ns,normed=True) # Compute relative samples using inverse CDF method vals = np.zeros(np.asarray(x).shape) for i in range(0,DIM): vals[:,i] = dist[i].ppf(distPoly[i].cdf(x[:,i])) # Compute Generalized Vandermonde if required if allocBasis: xs = [] for i in range(0,DIM): x1D, w1D = poly[i].GaussQuadrature(Ns[i]) xs.append(x1D) V = pp.GradVandermonde(xs, Ns, np.zeros(DIM), None) # Create Dictionary if allocBasis: dictionary = {'x': x, 'w': w, 'vals': vals, 'V': V} else: dictionary = {'x': x, 'w': w, 'vals': vals} return dictionary
[docs]def UQ_PCM_Tensor(solve,params,solvePostProcessing,dists,paramUpdate,polys,distsPoly,Ns,method): """ DESCRIPTION: UQ_PCM_Tensor(solve,params,solvePostProcessing,dists,paramUpdate,polys,distsPoly,N,method) Performs Uncertainty Quantification analysis of the system described by rhs using the Probabilistic Collocation Method by Tensor Product of the random space. INPUT: - solve: function handle to the experiment to be quantified. Signature: sol = solve(sample,params) - params: parameters to be passed to the ODE system (these contain the uncertain parameter as well) - solvePostProcessing: post processing function called after all the simulation have been run. Signature: sols = solvePostProcessing(sols) - dists: list of n scipy.stats distribution functions for each uncertain parameter - paramUpdate: function handle for updating the set of uncertain parameters. Signature: params = paramUpdate(params,sample) - polys: list of n DABISpectral1D polynomials using for approximating the distribution of the random parameters - distsPoly: list of n standard distributions associated to the selected approximating polynomials - Ns: list of n integers for the order of polynomial expansion in each random direction - method: 'projection' use projection rule in order to compute statistics, 'cubature' use cubature rule in order to compute statistics OUTPUT: dictionary with the following attributes - 'Samples': list of collocation points (samples) used for UQ - 'Y': solutions at collocation points - 'YHat': Galerkin coefficients (available only if method == 'projection') - 'meanSol': estimated mean - 'varSol': estimated variance """ comm = MPI.COMM_WORLD nprocs = comm.Get_size() myrank = comm.Get_rank() if (len(dists) != len(distsPoly) or len(distsPoly) != len(polys) or len(polys) != len(Ns)) : print "The number of elements in dist, distPoly and poly is not consistent" return else: DIM = len(dists) if (method != 'projection') and (method != 'cubature'): print "The selected method '%s' does not exist." % (method) return ################################################################################### # PRE - PROCESSING # ################################################################################### if method == 'projection': dictionary = PCM_Tensor(dists,polys,distsPoly,Ns,allocBasis=True) V = dictionary['V'] if method == 'cubature': dictionary = PCM_Tensor(dists,polys,distsPoly,Ns,allocBasis=False) w = dictionary['w'] vals = dictionary['vals'] Ncoll = len(vals) ################################################################################### # SIMULATION # ################################################################################### def action(sols,sol): sols.append(sol) return sols solutions = Experiments(solve,vals,params,paramUpdate,action) if myrank == 0: solutions = solvePostProcessing(solutions) ################################################################################### # POST-PROCESSING # ################################################################################### if myrank == 0: # Reshape the solutions so that they are [Ncoll x NDOF], where NDOF is the total DOF # Example: for a time dep. ode NDOF = DOF x len(t) solsShape = solutions[0].shape NDOF = np.prod(solsShape) solsRS = np.zeros( (Ncoll, NDOF) ) for i in range(0,Ncoll): # For each solution solsRS[i,:] = np.reshape(solutions[i],np.prod(solsShape)) # Post processing for the computation of the PCM coefficients # using discrete projection def PCM_PostProcProjection(sols): # Compute gPC post processing on sols # with the following order: DOF, samples, sols solsHat = np.zeros(sols.shape) A = V * np.tile(w,(1,V.shape[1])) for i in range(0,NDOF): # For each NDOF solsHat[:,i] = np.dot(A.T,sols[:,i]) return solsHat if method == 'projection': solsHatRS = PCM_PostProcProjection(solsRS) # Compute mean gamma = polys[0].Gamma(0) for i in range(1,DIM): gamma = gamma * polys[i].Gamma(0) meanSol = solsHatRS[0,:] / np.sqrt(gamma) # Compute variance varSol = np.zeros(NDOF) for i in range(0,NDOF): varSol[i] = sum(solsHatRS[1:,i]**2.) if method == 'cubature': meanSol = np.zeros(NDOF) varSol = np.zeros(NDOF) for i in range(0,NDOF): meanSol[i] = np.dot(solsRS[:,i],w) varSol[i] = np.dot(solsRS[:,i]**2.,w) - meanSol[i]**2. # Reshape the solutions and the statistics to the original shapes sols = np.zeros((Ncoll,)+solsShape) for i in range(0,Ncoll): sols[i,:] = np.reshape(solsRS[i,:],solsShape) meanSol = np.reshape(meanSol,solsShape) varSol = np.reshape(varSol,solsShape) if method == 'projection': solsHat = np.zeros((Ncoll,)+solsShape) for i in range(0,Ncoll): solsHat[i,:] = np.reshape(solsHatRS[i,:],solsShape) ################################################################################### # OUTPUT # ################################################################################### # Prepare a dictionary with necessary data if method == 'projection': dictionary = {'Samples': vals, 'Y': sols, 'YHat': solsHat, 'meanSol': meanSol, 'varSol': varSol} if method == 'cubature': dictionary = {'Samples': vals, 'Y': sols, 'meanSol': meanSol, 'varSol': varSol} return dictionary
[docs]def UQ_SparseGrid(solve,params,solvePostProcessing,dists,paramUpdate,method,k,sym=1): """ DESCRIPTION: UQ_PCM_Tensor(solve,params,solvePostProcessing,dists,paramUpdate,polys,distsPoly,N,method) Performs Uncertainty Quantification analysis of the system described by rhs using the Probabilistic Collocation Method by Tensor Product of the random space. INPUT: - solve: function handle to the experiment to be quantified. Signature: sol = solve(sample,params) - params: parameters to be passed to the ODE system (these contain the uncertain parameter as well) - solvePostProcessing: post processing function called after all the simulation have been run. Signature: sols = solvePostProcessing(sols) - dists: list of n scipy.stats distribution functions for each uncertain parameter - paramUpdate: function handle for updating the set of uncertain parameters. Signature: params = paramUpdate(params,sample) - method: Sparse Grid method selected. See description below. - k: accuracy of the Sparse Grid rule - sym: [optional, default = 1] symmetry parameter for Sparse Grid package OUTPUT: dictionary with the following attributes - 'Samples': list of collocation points (samples) used for UQ - 'Y': solutions at collocation points - 'meanSol': estimated mean - 'varSol': estimated variance Description: The methods available for sparse grid are: * KPU = Nested rule for unweighted integral over [0,1] * KPN = Nested rule for integral with Gaussian weight * GQU = Gaussian quadrature for unweighted integral over [0,1] (Gauss-Legendre) * GQN = Gaussian quadrature for integral with Gaussian weight (Gauss-Hermite) * func = any function provided by the user that accept level l and returns nodes n and weights w for univariate quadrature rule with polynomial exactness 2l-1 as [n w] = feval(func,level) """ comm = MPI.COMM_WORLD nprocs = comm.Get_size() myrank = comm.Get_rank() DIM = len(dists) if (method != DABISparseGrids.KPU) and (method != DABISparseGrids.KPN) and (method != DABISparseGrids.GQU) and (method != DABISparseGrids.GQN) and (not callable(method)): print "The selected method '%s' does not exist." % (method) return ################################################################################### # PRE - PROCESSING # ################################################################################### sg = DABISparseGrids.SparseGrid(method, DIM, k, sym); (n,w) = sg.sparseGrid(); vals = n.copy(); if (method == DABISparseGrids.KPN) or (method == DABISparseGrids.GQN): ndist = stats.norm(0.,1.); for i in range(0,DIM): vals[:,i] = dists[i].ppf(ndist.cdf(n[:,i])) elif (method == DABISparseGrids.KPU) or (method == DABISparseGrids.GQU): udist = stats.uniform(0.,1.); for i in range(0,DIM): vals[:,i] = dists[i].ppf(udist.cdf(n[:,i])) Ncoll = vals.shape[0] ################################################################################### # SIMULATION # ################################################################################### def action(sols,sol): sols.append(sol) return sols solutions = Experiments(solve,vals,params,paramUpdate,action) if myrank == 0: solutions = solvePostProcessing(solutions) ################################################################################### # POST-PROCESSING # ################################################################################### if myrank == 0: # Reshape the solutions so that they are [Ncoll x NDOF], where NDOF is the total DOF # Example: for a time dep. ode NDOF = DOF x len(t) solsShape = solutions[0].shape NDOF = np.prod(solsShape) solsRS = np.zeros( (Ncoll, NDOF) ) for i in range(0,Ncoll): # For each solution solsRS[i,:] = np.reshape(solutions[i],np.prod(solsShape)) if method == 'cubature': meanSol = np.zeros(NDOF) varSol = np.zeros(NDOF) for i in range(0,NDOF): meanSol[i] = np.dot(solsRS[:,i],w) varSol[i] = np.dot(solsRS[:,i]**2.,w) - meanSol[i]**2. # Reshape the solutions and the statistics to the original shapes sols = np.zeros((Ncoll,)+solsShape) for i in range(0,Ncoll): sols[i,:] = np.reshape(solsRS[i,:],solsShape) meanSol = np.reshape(meanSol,solsShape) varSol = np.reshape(varSol,solsShape) ################################################################################### # OUTPUT # ################################################################################### # Prepare a dictionary with necessary data dictionary = {'Samples': vals, 'Y': sols, 'meanSol': meanSol, 'varSol': varSol} return dictionary
[docs]class MEPCM_Element: ########### # PARAMETERS CONSTRUCTED BY THE USER OR REFINEMENT PROCEDURE DIM = 0; dists = []; # Stores the distributions along the D dimensions distsCond = []; # Stores the distribution scalings along the D dimensions el = []; # Stores the element coordinates along the D dimensions Ns = []; # Stores the order along the D dimensions finite = False; # True = the element is finite in all the dimensions, False = the element is infinite in at least one dimension ########### # DIRTY PARAMETER dirty = True; # True = tensor grid needs to be generated, False = nothing needs to be done ########### # PARAMETERS CONSTRUCED BY THE TENSOR GRID GENERATOR # 1D Parameters poly1D = []; # Stores the polynomial parameters along the D dimensions x1D = []; # Stores the collocation points needed by the element (in the scaled element reference frame) vals1D = []; # Stores the collocation points needed by the element w1D = []; # Stores the weights corresponding to the collocation points pr1D = []; # Stores the probability of the element Pr(I_B_k = 1) dim1D = []; # Stores the 1D element size V1D = []; # Stores the Vandermonde matrix for the element finite1D = []; # True = the element is finite the dimension, False = the element is infinite the dimension # ND Parameters xEl = []; # Stores the collocation points needed by the element (in the scaled element reference frame) valsEl = []; # Stores the collocation points needed by the element wEl = []; # Stores the weights corresponding to the collocation points prEl = 0.0; # Stores the probability of the element Pr(I_B_k = 1) dimEl = 0.0; # Stores the N-D hyper-volume of the element VEl = []; # Stores the Vandermonde matrix for the element orderEl = []; # Stores the tuples containing the order combination for each basis function def __init__(self, el, dists, distsCond, Ns): if (len(el) != len(dists) or len(dists) != len(Ns) or len(Ns) != len(distsCond)) : print "The number of elements in rs, Ns, ks and norms is not consistent" return self.DIM = len(el); self.dists = dists; self.distsCond = distsCond; self.el = el; self.Ns = Ns; self.poly1D = []; # Stores the polynomial parameters along the D dimensions self.x1D = []; # Stores the collocation points needed by the element (in the scaled element reference frame) self.vals1D = []; # Stores the collocation points needed by the element self.w1D = []; # Stores the weights corresponding to the collocation points self.pr1D = []; # Stores the probability of the element Pr(I_B_k = 1) self.dim1D = []; # Stores the 1D element size self.V1D = []; # Stores the Vandermonde matrix for the element self.finite1D = []; # True = the element is finite the dimension, False = the element is infinite the dimension # ND Parameters self.xEl = []; # Stores the collocation points needed by the element (in the scaled element reference frame) self.valsEl = []; # Stores the collocation points needed by the element self.wEl = []; # Stores the weights corresponding to the collocation points self.prEl = 0.0; # Stores the probability of the element Pr(I_B_k = 1) self.dimEl = 0.0; # Stores the N-D hyper-volume of the element self.VEl = []; # Stores the Vandermonde matrix for the element self.orderEl = []; # Stores the tuples containing the order combination for each basis function self.prEl = 1.0; self.dimEl = 1.0; for eld,distd in zip(self.el,self.dists): # Compute the elements probability pr = distd.cdf(eld[1])-distd.cdf(eld[0]); d = eld[1]-eld[0]; self.pr1D.append(pr); self.dim1D.append(d); self.prEl *= pr; self.dimEl *= d; # Check if finite element self.finite = True; for eld in self.el: self.finite1D.append(not any(abs(eld) == np.inf)); self.finite = self.finite and not any(abs(eld) == np.inf); self.dirty = True; def __getstate__(self): return dict(DIM = self.DIM, # dists = self.dists, # distsCond = self.distsCond, el = self.el, Ns = self.Ns, finite = self.finite, dirty = self.dirty, poly1D = self.poly1D, x1D = self.x1D, vals1D = self.vals1D, w1D = self.w1D, pr1D = self.pr1D, dim1D = self.dim1D, V1D = self.V1D, finite1D = self.finite1D, xEl = self.xEl, valsEl = self.valsEl, wEl = self.wEl, prEl = self.prEl, dimEl = self.dimEl, VEl = self.VEl, orderEl = self.orderEl); # def __reduce__(self): # return (MEPCM_Element, ([],[],[],[]), self.__getstate__()); def __setstate__(self,state,dists=None,distsCond=None): ########### # PARAMETERS CONSTRUCTED BY THE USER OR REFINEMENT PROCEDURE self.DIM = state['DIM']; # self.dists = state['dists']; # Stores the distributions along the D dimensions # self.distsCond = state['distsCond']; # Stores the distribution scalings along the D dimensions self.el = state['el']; # Stores the element coordinates along the D dimensions self.Ns = state['Ns']; # Stores the order along the D dimensions self.dists = dists; self.distsCond = distsCond; self.finite = state['finite']; # True = the element is finite in all the dimensions, False = the element is infinite in at least one dimension ########### # DIRTY PARAMETER self.dirty = state['dirty']; # True = tensor grid needs to be generated, False = nothing needs to be done ########### # PARAMETERS CONSTRUCED BY THE TENSOR GRID GENERATOR # 1D Parameters self.poly1D = state['poly1D']; # Stores the polynomial parameters along the D dimensions self.x1D = state['x1D']; # Stores the collocation points needed by the element (in the scaled element reference frame) self.vals1D = state['vals1D']; # Stores the collocation points needed by the element self.w1D = state['w1D']; # Stores the weights corresponding to the collocation points self.pr1D = state['pr1D']; # Stores the probability of the element Pr(I_B_k = 1) self.dim1D = state['dim1D']; # Stores the 1D element size self.V1D = state['V1D']; # Stores the Vandermonde matrix for the element self.finite1D = state['finite1D']; # True = the element is finite the dimension, False = the element is infinite the dimension # ND Parameters self.xEl = state['xEl']; # Stores the collocation points needed by the element (in the scaled element reference frame) self.valsEl = state['valsEl']; # Stores the collocation points needed by the element self.wEl = state['wEl']; # Stores the weights corresponding to the collocation points self.prEl = state['prEl']; # Stores the probability of the element Pr(I_B_k = 1) self.dimEl = state['dimEl']; # Stores the N-D hyper-volume of the element self.VEl = state['VEl']; # Stores the Vandermonde matrix for the element self.orderEl = state['orderEl']; # Stores the tuples containing the order combination for each basis function def __clear1D(self): self.poly1D = []; # Stores the polynomial parameters along the D dimensions self.x1D = []; # Stores the collocation points needed by the element (in the scaled element reference frame) self.vals1D = []; # Stores the collocation points needed by the element self.w1D = []; # Stores the weights corresponding to the collocation points self.V1D = []; # Stores the Vandermonde matrix for the element def __clearTensor(self): self.xEl = []; # Stores the collocation points needed by the element (in the scaled element reference frame) self.valsEl = []; # Stores the collocation points needed by the element self.wEl = []; # Stores the weights corresponding to the collocation points self.VEl = []; # Stores the Vandermonde matrix for the element self.orderEl = []; # Stores the tuples containing the order combination for each basis function def getstate(self): return self.__getstate__(); def setstate(self,state,dists=None,distsCond=None): self.__setstate__(state,dists,distsCond); def __InitBasis1D(self, allocBasis=True): """ Basis1D([allocBasis=True]) Generates the 1D basis using the list of distributions of the parameters space approximated by properly construced orthogonal polynomials. INPUT: ``allocBasis``: boolean. True if you want the tensor basis. OUTPUT: """ self.__clear1D(); ############# # Generate 1D basis for each dimension for d in range(0,self.DIM): dist = self.dists[d]; N = self.Ns[d]; eld = self.el[d]; distCond = self.distsCond[d]; # Scale the element using user provided function elSC = distCond(eld, 1); # Prepare the measure function if (abs(eld[0]) == np.inf) or (abs(eld[1]) == np.inf): # No mapping to [-1,1]. TAILS ARE ASSUMED TO GET RAPIDLY TO 0 def wf_cap(y,i): return dist.pdf(distCond(y,-1))/self.prEl; else: def wf_cap(y,i): centEl = (eld[1] + eld[0])/2.; dimEl = (eld[1]-eld[0]); return dist.pdf(y * dimEl/2. + centEl)/self.prEl * dimEl * 0.5**self.DIM; # Generate basis and Gauss points ncapm = 500; mc = 1; mp = 0; xp = np.array([]); yp = np.array([]); eps = 1e-13; iq = 0; # 0: Use qgp idelta = 2; irout = 0; # 1: Use sti internally, 0: Use lancz if (abs(elSC[0]) == np.inf) or (abs(elSC[1]) == np.inf): # Do not apply scaling (we assume tails decay!!) x = elSC; if (abs(elSC[0]) == np.inf): finl = False; else: finl = True; if (abs(elSC[1]) == np.inf): finr = False; else: finr = True; else: x = np.linspace(-1.,1.,mc+1); centEl = (elSC[1] + elSC[0])/2.; dimEl = (elSC[1]-elSC[0])/2.; finr = True; finl = True; endl = x[:-1]; endr = x[1:]; (alphaCap, betaCap, ncapCap, kountCap, ierrCap, ieCap) = orthpol.dmcdis(N, ncapm, mc, mp, xp, yp, wf_cap, eps, iq, idelta, irout, finl, finr, endl, endr); self.poly1D.append({'alpha' : alphaCap, 'beta' : betaCap}); (xg,wg,ierr) = orthpol.dgauss(N,alphaCap,betaCap,eps); wg /= np.sum(wg); if finl and finr: x = xg * dimEl + centEl; self.x1D.append(x); else: x = xg; self.x1D.append(x); self.w1D.append(wg); self.vals1D.append(distCond(x,-1)); if allocBasis: VCap = self.Vandermonde1D(self.vals1D[-1],d); self.V1D.append(VCap); def __InitTensor(self, allocBasis=True): """ Tensor([allocBasis=True]) Generates the full tensor grid using the list of distributions of the parameters space approximated by properly construced orthogonal polynomials. INPUT: ``allocBasis``: boolean. True if you want the tensor basis. OUTPUT: """ self.__clearTensor(); ############# # Generate ND basis xs = []; vals = []; ords = []; wKron = np.array([1]); if allocBasis: VKron = np.array([1]); for d in range(0,self.DIM): ords.append(range(0,self.Ns[d])) xs.append(self.x1D[d]); vals.append(self.vals1D[d]); wKron = np.kron(wKron, self.w1D[d]); if allocBasis: VKron = np.kron(VKron, self.V1D[d]); self.xEl = np.asarray(list(itertools.product(*xs))) self.valsEl = np.asarray(list(itertools.product(*vals))) self.orderEl = list(itertools.product(*ords)) self.wEl = wKron; if allocBasis: self.VEl = VKron; def Vandermonde1D(self,x,d): if not self.finite1D[d]: # Map to the Scaled values xSC = self.distsCond[d](x,1); if self.finite1D[d]: # Scale to [-1,1] if dimension is finite centEl = (self.el[d][1] + self.el[d][0])/2. dimEl = (self.el[d][1] - self.el[d][0])/2. xSC = (x - centEl)/dimEl return DABISpectral1D.VandermondeAB(xSC,self.Ns[d]-1,self.poly1D[d]['alpha'],self.poly1D[d]['beta']);
[docs] def TensorVandermonde(self,x,usekron=True): """ Takes a 2d-array (preferibly (N,DIM)) or list (of array of points for each dimension) If kron == True : take kron product of 1D vandermonde matrices If kron == False : take column multiplication of 1D matrices """ if type(x) == list: x = np.array(x); if (x.shape[0] == self.DIM) and (x.shape[1] != self.DIM): x = x.T; if (x.shape[1] != self.DIM) : print "The number of elements in x is not consistent with the dimension of the element" return if (x.shape[0] == 0): print "The number of points in x is 0." return if usekron: VKron = np.array([1.]); for d in range(0,self.DIM): VKron = np.kron(VKron, self.Vandermonde1D(x[:,d],d)); else: VKron = np.ones((x.shape[0],1)) for d in range(0,self.DIM): VKronNew = np.zeros((VKron.shape[0],VKron.shape[1] * (self.Ns[d]))) V = self.Vandermonde1D(x[:,d],d); for col in range(0,VKron.shape[1]): VKronNew[:,col*(self.Ns[d]):(col+1)*(self.Ns[d])] = np.tile(VKron[:,col],(self.Ns[d],1)).T * V VKron = VKronNew return VKron;
def GammaPower(self,power): maxBasis = self.VEl.shape[1]; dims = np.ones(power) * maxBasis; EIdxs = list(itertools.product(*[range(0,maxBasis) for i in range(0,power)])); E = np.zeros(dims); # Compute the number of gauss points needed in order to get exact approximation Ngauss = []; for N in self.Ns: Ngauss.append( int(np.ceil( power*N/2. )) ); # Compute recursion coefficients for the number of gammas required poly1D = []; x1D = []; w1D = []; vals1D = []; V1D = []; for d in range(0,self.DIM): dist = self.dists[d]; N = Ngauss[d]; eld = self.el[d]; distCond = self.distsCond[d]; # Scale the element using user provided function elSC = distCond(eld, 1); # Prepare the measure function if (abs(eld[0]) == np.inf) or (abs(eld[1]) == np.inf): # No mapping to [-1,1]. TAILS ARE ASSUMED TO GET RAPIDLY TO 0 def wf_cap(y,i): return dist.pdf(distCond(y,-1))/self.prEl; else: def wf_cap(y,i): centEl = (eld[1] + eld[0])/2.; dimEl = (eld[1]-eld[0]); return dist.pdf(y * dimEl/2. + centEl)/self.prEl * dimEl * 0.5**self.DIM; # Generate basis and Gauss points ncapm = 500; mc = 1; mp = 0; xp = np.array([]); yp = np.array([]); eps = 1e-13; iq = 0; # 0: Use qgp idelta = 2; irout = 0; # 1: Use sti internally, 0: Use lancz if (abs(elSC[0]) == np.inf) or (abs(elSC[1]) == np.inf): # Do not apply scaling (we assume tails decay!!) x = elSC; if (abs(elSC[0]) == np.inf): finl = False; else: finl = True; if (abs(elSC[1]) == np.inf): finr = False; else: finr = True; else: x = np.linspace(-1.,1.,mc+1); centEl = (elSC[1] + elSC[0])/2.; dimEl = (elSC[1]-elSC[0])/2.; finr = True; finl = True; endl = x[:-1]; endr = x[1:]; (alphaCap, betaCap, ncapCap, kountCap, ierrCap, ieCap) = orthpol.dmcdis(N, ncapm, mc, mp, xp, yp, wf_cap, eps, iq, idelta, irout, finl, finr, endl, endr); poly1D.append({'alpha' : alphaCap, 'beta' : betaCap}); (xg,wg,ierr) = orthpol.dgauss(N,alphaCap,betaCap,eps); wg /= np.sum(wg); if finl and finr: x = xg * dimEl + centEl; x1D.append(x); else: x = xg; x1D.append(x); w1D.append(wg); vals1D.append(distCond(x,-1)); VCap = self.Vandermonde1D(vals1D[-1],d); V1D.append(VCap); # Compute tensor product of the Vandermonde matrices and weights wKron = np.array([1]); VKron = np.array([1]); for d in range(0,self.DIM): wKron = np.kron(wKron, w1D[d]); VKron = np.kron(VKron, V1D[d]); # Compute recursion coeff for idx in EIdxs: prod = VKron[:,idx[0]]; for i in range(1,power): prod = prod * VKron[:,idx[i]]; E[idx] = np.dot(prod,wKron); return E;
[docs] def isInElement(self,x): """ Takes a 2d-array (preferibly (N,DIM)) or list (of array of points for each dimension) """ if type(x) == list: x = np.array(x); if (x.shape[0] == self.DIM) and (x.shape[1] != self.DIM): x = x.T; if (x.shape[1] != self.DIM) : print "The number of elements in x is not consistent with the dimension of the element" return # Check belonging belonging = np.ones(x.shape[0],dtype=bool); # Init all true for d in range(0,self.DIM): elBelonging = np.all([(x[:,d] >= self.el[d][0]),(x[:,d] < self.el[d][1])],axis=0) belonging = np.all([belonging, elBelonging], axis=0) return belonging
def refresh(self,allocBasis=True): self.__InitBasis1D(allocBasis); self.__InitTensor(allocBasis); self.dirty = False;
[docs]class MEPCM_Forest: DIM = 0; elements = []; dists1D = []; distsCond1D = []; def __init__(self, el1D=None, dists1D=None, distsCond1D=None, Ns1D=None): if el1D == None: self.elements = []; self.DIM = 0; return if (len(el1D) != len(dists1D) or len(dists1D) != len(Ns1D) or len(Ns1D) != len(distsCond1D)) : print "The number of elements in el1D, dists1D, distsCond1D is not consistent" return self.DIM = len(el1D); self.dists1D = dists1D; self.distsCond1D = distsCond1D; self.elements = self.split(el1D, Ns1D); # def __reduce__(self): # return (MEPCM_Forest, (None,None,None,None), self.__getstate__()); def __getstate__(self): return dict(DIM = self.DIM, elements = [e.getstate() for e in self.elements]) # dists1D = self.dists1D, # distsCond1D = self.distsCond1D) def __setstate__(self,state,dists=None,distsCond=None): self.DIM = state['DIM']; self.elements = [] self.dists1D = dists; self.distsCond1D = distsCond; for e in state['elements']: el = MEPCM_Element([],[],[],[]); el.setstate(e,dists,distsCond); self.elements.append(el); def getstate(self): return self.__getstate__(); def setstate(self,state,dists=None,distsCond=None): self.__setstate__(state,dists,distsCond); def split(self, el1D, Ns1D): elsIdx = []; for d in range(0,self.DIM): elsIdx.append(range(0,len(el1D[d]))); elsIdxList = list(itertools.product(*elsIdx)); root = []; for e in range(0,len(elsIdxList)): elsIdx = elsIdxList[e]; el = []; # Coordinates for the element in each dimension Ns = []; for d in range(0,self.DIM): el.append( el1D[d][elsIdx[d]] ); Ns.append( Ns1D[d][elsIdx[d]] ); newEl = MEPCM_Element(el,self.dists1D,self.distsCond1D,Ns); newEl.refresh(); root.append( newEl ); return root;
[docs] def refine(self, toRefIndices, toRefDims, toRefOrders): """ ``refineOrder``: 0 = let the order be equal to the parent element. 1 = drop the order of 1 """ refinedIndices = []; for toRefIdx, toRefDim, toRefOrder in zip(reversed(toRefIndices),reversed(toRefDims),reversed(toRefOrders)): el = self.elements[toRefIdx].el; Ns = self.elements[toRefIdx].Ns; for d in range(0,self.DIM): if toRefDim[d]: eld = el[d]; if (abs(eld[0]) == np.inf) or (abs(eld[1]) == np.inf): print "Error: you are trying to refine an infinite element" return dimEld = eld[1]-eld[0]; el[d] = [np.array([eld[0],eld[0]+dimEld/2.]),np.array([eld[0]+dimEld/2.,eld[1]])]; Ns[d] = [toRefOrder[d],toRefOrder[d]]; else: el[d] = [el[d]]; Ns[d] = [toRefOrder[d]]; newEls = self.split(el,Ns); self.elements.pop(toRefIdx); for newEl, i in zip(reversed(newEls), range(0,len(newEls))): self.elements.insert(toRefIdx,newEl); if i > 0: for j in range(0,len(refinedIndices)): refinedIndices[j] = refinedIndices[j] + 1 refinedIndices.append(toRefIdx); return list(reversed(refinedIndices));
[docs]def UQ_MEPCM_Tensor(domain,solve,params,solvePostProcessing,paramUpdate,refOrder,minRefOrd=2,maxRefOrd=2,iMax=10,gamma=0.5,theta1=1e-2, theta2=.75): """ DESCRIPTION: UQ_MEPCM_Tensor(solve,params,solvePostProcessing,dists,paramUpdate,polys,distsPoly,N,method) Performs Uncertainty Quantification analysis of the system described by rhs using the Probabilistic Collocation Method by Tensor Product of the random space. INPUT: ``domain``: (MEPCM_Forest object) initial domain on which to evaluate the functions ``solve``: function handle to the experiment to be quantified. Signature: sol = solve(sample,params) ``params``: parameters to be passed to the ODE system (these contain the uncertain parameter as well) ``solvePostProcessing``: post processing function called after all the simulation have been run. Signature: ``solsEl = solvePostProcessing(solsEl)``. It returns properly aligned solutions per element, meaning that for the solutions in each element solsEl[i] contains an [Ncoll x NDOF] array. ``paramUpdate``: function handle for updating the set of uncertain parameters. Signature: params = paramUpdate(params,sample) ``refinement``: 0 = No refinement, 1 = refinement + partial limiting, 2 = refinement + final limiting ``iMax``: maximum number of refinement iterations ``gamma``: parameter for adaptivity refinement criteria ``theta1``: parameter for adaptivity refinement criteria ``theta2``: parameter for the dimensional refinement criteria OUTPUT: dictionary with the following attributes ``domain``: MEPCM_Forest object ``solsEl``: (list #<multi-dim El>) solutions at collocation points per each element ``meanSol``: estimated mean ``varSol``: estimated variance ``uHatEl``: (list #<multi-dim El>) Galerkin coefficients (available only if method == 'projection') ``fEval``: total number of solve evalutations """ comm = MPI.COMM_WORLD nprocs = comm.Get_size() myrank = comm.Get_rank() ################################################################################### # PRE - PROCESSING # ################################################################################### def action(sols,sol): sols.append(sol) return sols iLoop = 0; stopCrit = False; toRefIndices = []; refinedIdx = range(0,len(domain.elements)); solsEl = []; totComputations = 0; while not stopCrit and (iLoop<iMax): iLoop = iLoop+1 if myrank == 0: print "Total number of new elements: %d" % len(refinedIdx); table = PrettyTable(['#el','PrEl','refined']); for eli in range(0,len(domain.elements)): table.add_row([eli, domain.elements[eli].prEl, (eli in refinedIdx)]) print table ################################################################################### # LOOP SIMULATION # ################################################################################### # Merge all the collocation points vals = np.zeros((0,domain.DIM)) splitSizes = []; for eli in refinedIdx: vals = np.vstack((vals,domain.elements[eli].valsEl)); splitSizes.append(domain.elements[eli].valsEl.shape[0]); splitPos = np.cumsum(splitSizes) totComputations += vals.shape[0]; # Run the experiments sols = Experiments(solve, vals, params, paramUpdate, action); if myrank == 0: # Split solutions by element solsElSplit = np.split(sols,splitPos[:-1],axis=0); # Insert new sols in solsEl for eli,i in zip(refinedIdx,range(0,len(refinedIdx))): solsEl.insert(eli,solsElSplit[i]); solsEl = comm.bcast(solsEl, root=0); ################################################################################### # LOOP POST-PROCESSING # ################################################################################### if myrank == 0: # COMPUTE COEFFICIENTS of expansion nEls = len(domain.elements); uHat = []; sigmaEl = []; etaEl = []; for i in range(0,nEls): uh = la.solve(domain.elements[i].VEl,solsEl[i]) uHat.append( uh ) if len(solsEl[i]) > 1: Np = min(domain.elements[i].Ns)-1 orderList = np.array([sum(o) for o in domain.elements[i].orderEl]) gammas = np.dot(domain.elements[i].wEl,domain.elements[i].VEl*domain.elements[i].VEl) sigma2bar = np.dot(uh[1:]**2., gammas[1:] ) sigmaEl.append( sigma2bar ) if sigma2bar != 0.: eta = np.dot(uh[orderList >= Np]**2., gammas[orderList >= Np]) / sigma2bar etaEl.append( eta ); else: etaEl.append(0.); else: sigmaEl.append(0.); etaEl.append(0.); # Check stopping criteria toRefIndices = []; toRefDims = []; toRefOrders = []; # Order change in each dimension stopCrit = True; if (iLoop < iMax): for eli in reversed(range(0,len(uHat))): if (etaEl[eli]**gamma * domain.elements[eli].prEl >= theta1): toRefIndices.append(eli); # Detect the dimension to be refined dimSens = np.zeros(domain.DIM); orderEl = np.array(domain.elements[eli].orderEl) orderList = np.array([sum(o) for o in domain.elements[eli].orderEl]) for d in range(0,domain.DIM): Np = domain.elements[eli].Ns[d]-1 NpIdx = (orderEl[:,d] == Np) * (orderList == Np); gammaNp = np.dot(domain.elements[eli].wEl,domain.elements[eli].VEl[:,NpIdx]*domain.elements[eli].VEl[:,NpIdx]) dimSens[d] = (uHat[eli][NpIdx][0] * gammaNp[0])/ (etaEl[eli] * sigmaEl[eli]); maxSens = np.amax(np.abs(dimSens)); refDims = np.zeros(domain.DIM,dtype=bool); refOrders = np.zeros(domain.DIM,dtype=int); for d in range(0,domain.DIM): if domain.elements[eli].finite1D[d] and (np.abs(dimSens[d]) >= theta2 * maxSens): refDims[d] = True; refOrders[d] = max(minRefOrd,min(domain.elements[eli].Ns[d]+refOrder,maxRefOrd)); else: refDims[d] = False; # Check eta condition for lower coefficients Np = domain.elements[eli].Ns[d]-1; # Remember Ns-1 is the highest order sens = 0.0; while (Np > minRefOrd) and (np.abs(sens) < theta2 * maxSens): Np -= 1; NpIdx = (orderEl[:,d] == Np) * (orderList == Np); gammaNp = np.dot(domain.elements[eli].wEl,domain.elements[eli].VEl[:,NpIdx]*domain.elements[eli].VEl[:,NpIdx]) sens = (uHat[eli][NpIdx][0] * gammaNp[0])/ (etaEl[eli] * sigmaEl[eli]); refOrders[d] = Np+1; toRefDims.append(refDims); toRefOrders.append(refOrders); if (iLoop < iMax): solsEl.pop(eli); stopCrit = False; toRefIndices = list(reversed(toRefIndices)); toRefDims = list(reversed(toRefDims)); toRefOrders = list(reversed(toRefOrders)); table = PrettyTable(['#el','PrEl', 'eta', 'etaPr','toRefine']); for eli in range(0,len(domain.elements)): table.add_row([eli, domain.elements[eli].prEl, etaEl[eli], etaEl[eli]**gamma * domain.elements[eli].prEl, (eli in toRefIndices)]) print table if not stopCrit and (iLoop<iMax): # Split elements (exclude tails) refinedIdx = domain.refine(toRefIndices,toRefDims,toRefOrders); domainState = domain.getstate() else: # Other processes domainState = []; stopCrit = comm.bcast(stopCrit, root=0); domainState = comm.bcast(domainState, root=0); refinedIdx = comm.bcast(refinedIdx, root=0); domain.setstate(domainState,domain.dists1D,domain.distsCond1D) ## END LOOPS ## if myrank == 0: print "Total number of experiments: %d" % totComputations # COMPUTE MEAN uHat0 = np.array([uHat[i][0] for i in range(0,len(uHat))]); prEls = np.array([domain.elements[i].prEl for i in range(0,len(domain.elements))]); meanSol = np.dot(uHat0, np.asarray(prEls)); # COMPUTE VARIANCE varSol = np.dot( np.array(sigmaEl) + (uHat0 - meanSol)**2. , prEls) ################################################################################### # OUTPUT # ################################################################################### # Prepare a dictionary with necessary data dictionary = {'domain': domain, 'solsEl': solsEl, 'meanSol': meanSol, 'varSol': varSol, 'uHatEl': uHat, 'fEval': totComputations} return dictionary