Package PyDSTool :: Module utils
[hide private]
[frames] | no frames]

Source Code for Module PyDSTool.utils

  1  """
 
  2      User utilities.
 
  3  """ 
  4  
 
  5  from errors import * 
  6  from common import * 
  7  import Redirector as redirc 
  8  from parseUtils import joinStrs 
  9  
 
 10  from numpy import Inf, NaN, isfinite, less, greater, sometrue, alltrue, \
 
 11       searchsorted, take, argsort, array, swapaxes, asarray, zeros, transpose, \
 
 12       float64, int32, argmin, ndarray, concatenate 
 13  from numpy.linalg import norm 
 14  from scipy.optimize import minpack, zeros 
 15  try: 
 16      newton_meth = minpack.newton 
 17  except AttributeError: 
 18      # newer version of scipy
 
 19      newton_meth = zeros.newton 
 20  import time, sys, os, platform 
 21  import copy 
 22  
 
 23  
 
 24  # --------------------------------------------------------------------
 
 25  
 
 26  # EXPORTS
 
 27  
 
 28  _classes = [] 
 29  
 
 30  _functions = ['intersect', 'remain', 'union', 'cartesianProduct',
 
 31                'makeDataDict', 'makeImplicitFunc', 'orderEventData',
 
 32                'saveObjects', 'loadObjects', 'info', 'compareList',
 
 33                'findClosestArray', 'findClosestPointIndex', 'find',
 
 34                'makeMfileFunction', 'make_RHS_wrap', 'make_Jac_wrap',
 
 35                'progressBar', 'distutil_destination'] 
 36  
 
 37  _mappings = ['_implicitSolveMethods', '_1DimplicitSolveMethods'] 
 38  
 
 39  __all__ = _classes + _functions + _mappings 
 40  
 
 41  
 
 42  rout = redirc.Redirector(redirc.STDOUT) 
 43  rerr = redirc.Redirector(redirc.STDERR) 
 44  
 
 45  ## ------------------------------------------------------------------
 
 46  
 
 47  ## Utility functions
 
 48  
 
49 -def makeMfileFunction(name, argname, defs):
50 """defs is a dictionary of left-hand side -> right-hand side definitions""" 51 # writeout file <name>.m 52 mfile = open(name+".m", 'w') 53 mfile.write("function %s = %s(%s)\n"%(name,name,argname)) 54 for k, v in defs.iteritems(): 55 if k != name: 56 mfile.write("%s = %s;\n"%(k,v)) 57 # now the final definition of tau_recip or inf 58 mfile.write("%s = %s;\n"%(name,defs[name])) 59 mfile.write("return\n") 60 mfile.close()
61 62
63 -def info(x, specName="Contents", offset=1, recurseDepth=1, 64 recurseDepthLimit=2, _repeatFirstTime=False):
65 """Pretty printer for showing argument lists and dictionary 66 specifications.""" 67 68 if recurseDepth == 1: 69 if not _repeatFirstTime: 70 # first time through 71 print "Information for " + specName + "\n" 72 else: 73 print specName + ":", 74 if x.__class__ is type: 75 return 76 if hasattr(x, 'iteritems'): 77 x_keys = sortedDictKeys(x) 78 if len(x_keys) == 0: 79 print "< empty >" 80 elif recurseDepth != 1: 81 print "" 82 for k in x_keys: 83 v = x[k] 84 kstr = object2str(k) 85 basestr = " "*(offset-1) + kstr 86 if hasattr(v, 'iteritems'): 87 info(v, basestr, offset+4, recurseDepth+1, 88 recurseDepthLimit) 89 else: 90 vStrList = object2str(v).split(', ') 91 if len(vStrList)==0: 92 vStrList = ['< no information >'] 93 elif len(vStrList)==1 and vStrList[0] == '': 94 vStrList = ['< empty >'] 95 outStrList = [basestr+": "] 96 for i in range(len(vStrList)): 97 if len(vStrList[i] + outStrList[-1]) < 78: 98 outStrList[-1] += ", "*(i>0) + vStrList[i] 99 else: 100 if i>0: 101 if i != len(vStrList): 102 # add trailing comma to previous line 103 outStrList[-1] += "," 104 # start on new line 105 outStrList.append(" "*(len(kstr)+3) + vStrList[i]) 106 else: 107 # too long for line and string has no commas 108 # could work harder here, but for now, just include 109 # the long line 110 outStrList[-1] += vStrList[i] 111 if recurseDepth==1 and len(outStrList)>1: 112 # print an extra space between topmost level entries 113 # provided those entries occupy more than one line. 114 print "\n" 115 for s in outStrList: 116 print s 117 elif hasattr(x, '__dict__') and recurseDepth <= recurseDepthLimit: 118 info(x.__dict__, specName, offset, recurseDepth, 119 recurseDepthLimit, True) 120 else: 121 xstr = repr(x) 122 if xstr == '': 123 xstr = '< no information >' 124 print xstr
125 126 127 _implicitSolveMethods = ['newton', 'bisect', 'steffe', 'fsolve'] 128 _1DimplicitSolveMethods = ['newton', 'bisect', 'steffe'] 129 130
131 -def makeImplicitFunc(f, x0, fprime=None, extrafargs=(), xtolval=1e-8, 132 maxnumiter=100, solmethod='newton', standalone=True):
133 """Builds an implicit function representation of an N-dimensional curve 134 specified by (N-1) equations. Thus argument f is a function of 1 variable. 135 In the case of the 'fsolve' method, f may have dimension up to N-1. 136 137 Available solution methods are: newton, bisect, steffensen, fsolve. 138 All methods utilize SciPy's Minpack wrappers to Fortran codes. 139 140 Steffenson uses Aitken's Delta-squared convergence acceleration. 141 fsolve uses Minpack's hybrd and hybrj algorithms. 142 143 Standalone option (True by default) returns regular function. If False, 144 an additional argument is added, so as to be compatible as a method 145 definition.""" 146 147 if solmethod == 'bisect': 148 assert isinstance(x0, _seq_types), \ 149 "Invalid type '"+str(type(x0))+"' for x0 = "+str(x0) 150 assert len(x0) == 2 151 elif solmethod == 'fsolve': 152 assert isinstance(x0, (_seq_types, _num_types)), \ 153 "Invalid type '"+str(type(x0))+"' for x0 = "+str(x0) 154 else: 155 assert isinstance(x0, _num_types), \ 156 "Invalid type '"+str(type(x0))+"' for x0 = "+str(x0) 157 158 # define the functions that could be used 159 # scipy signatures use y instead of t, but this naming is consistent 160 # with that in the Generator module 161 try: 162 if standalone: 163 def newton_fn(t): 164 rout.start() 165 ## rerr.start() 166 res = float(newton_meth(f, x0, args=(t,)+extrafargs, tol=xtolval, 167 maxiter=maxnumiter, fprime=fprime)) 168 rout.stop() 169 ## warns = rout.stop() 170 ## rerr.stop() 171 # return {'result': res, 'warnings': warns} 172 return res
173 174 def bisect_fn(t): 175 rout.start() 176 ## rerr.start() 177 res = minpack.bisection(f, x0[0], x0[1], args=(t,)+extrafargs, 178 xtol=xtolval, maxiter=maxnumiter) 179 rout.stop() 180 ## warns = rout.stop() 181 ## rerr.stop() 182 return res 183 184 def steffe_fn(t): 185 rout.start() 186 ## rerr.start() 187 res = minpack.fixed_point(f, x0, args=(t,)+extrafargs, 188 xtol=xtolval, maxiter=maxnumiter) 189 rout.stop() 190 ## warns = rout.stop() 191 ## rerr.stop() 192 return res 193 194 def fsolve_fn(t): 195 rout.start() 196 ## rerr.start() 197 res = minpack.fsolve(f, x0, args=(t,)+extrafargs, 198 xtol=xtolval, maxfev=maxnumiter, 199 fprime=fprime) 200 rout.stop() 201 ## warns = rout.stop() 202 ## rerr.stop() 203 return res 204 else: 205 def newton_fn(s, t): 206 rout.start() 207 ## rerr.start() 208 res = float(newton_meth(f, x0, args=(t,)+extrafargs, tol=xtolval, 209 maxiter=maxnumiter, fprime=fprime)) 210 rout.stop() 211 ## warns = rout.stop() 212 ## rerr.stop() 213 return res 214 215 def bisect_fn(s, t): 216 rout.start() 217 ## rerr.start() 218 res = minpack.bisection(f, x0[0], x0[1], args=(t,)+extrafargs, 219 xtol=xtolval, maxiter=maxnumiter) 220 rout.stop() 221 ## warns = rout.stop() 222 ## rerr.stop() 223 return res 224 225 def steffe_fn(s, t): 226 rout.start() 227 ## rerr.start() 228 res = minpack.fixed_point(f, x0, args=(t,)+extrafargs, 229 xtol=xtolval, maxiter=maxnumiter) 230 rout.stop() 231 ## warns = rout.stop() 232 ## rerr.stop() 233 return res 234 235 def fsolve_fn(s, t): 236 rout.start() 237 ## rerr.start() 238 res = minpack.fsolve(f, x0, args=(t,)+extrafargs, 239 xtol=xtolval, maxfev=maxnumiter, 240 fprime=fprime) 241 rout.stop() 242 ## warns = rout.stop() 243 ## rerr.stop() 244 return res 245 246 except TypeError, e: 247 if solmethod == 'bisect': 248 infostr = " (did you specify a pair for x0?)" 249 else: 250 infostr = "" 251 raise TypeError("Could not create function" +infostr + ": "+str(e)) 252 253 if solmethod == 'newton': 254 return newton_fn 255 elif solmethod == 'bisect': 256 if fprime is not None: 257 print "Warning: fprime argument unused for bisection method" 258 return bisect_fn 259 elif solmethod == 'steffe': 260 if fprime is not None: 261 print "Warning: fprime argument unused for aitken method" 262 return steffe_fn 263 elif solmethod == 'fsolve': 264 return fsolve_fn 265 else: 266 raise ValueError("Unrecognized type of implicit function solver") 267 268
269 -def findClosestPointIndex(pt, target, tol=Inf, in_order=True):
270 """ 271 Find index of the closest N-dimensional Point in the target N by M array 272 or Pointset. Uses norm of order given by the Point 273 or Pointset, unless they are inconsistent, in which case an exception is 274 raised, or unless they are both arrays, in which case 2-norm is assumed. 275 276 With the in_order boolean option (default True), the function will 277 attempt to determine the local "direction" of the values and return an 278 insertion index that will preserve this ordering. This option is 279 incompatible with the tol option (see below). 280 281 If the optional tolerance, tol, is given, then an index is returned only 282 if the closest distance is within the tolerance. Otherwise, a ValueError 283 is raised. This option is incompatible with the in_order option. 284 """ 285 try: 286 normord = pt._normord 287 except AttributeError: 288 normord = 2 289 try: 290 if target._normord != normord: 291 raise ValueError("Incompatible order of norm defined for inputs") 292 except AttributeError: 293 pass 294 295 dists = [norm(pt-x, normord) for x in target] 296 index = argmin(dists) 297 298 if in_order: 299 if index > 0: 300 lo_off = 1 301 # insertion offset index 302 ins_off = 1 303 if index < len(target): 304 hi_off = 1 305 else: 306 hi_off = 0 307 else: 308 lo_off = 0 309 hi_off = 2 310 # insertion offset index 311 ins_off = 0 312 313 pta = array([pt]) # extra [] to get compatible shape for concat 314 dim_range = range(target.shape[1]) 315 # neighborhood 316 nhood = target[index-lo_off:index+hi_off] 317 if all(ismonotonic(nhood[:,d]) for d in dim_range): 318 # try inserting at index, otherwise at index+1 319 new_nhood = concatenate((nhood[:ins_off], pta, nhood[ins_off:])) 320 if not all(ismonotonic(new_nhood[:,d]) for d in dim_range): 321 ins_off += 1 322 index += 1 323 new_nhood = concatenate((nhood[:ins_off], pta, nhood[ins_off:])) 324 if not all(ismonotonic(new_nhood[:,d]) for d in dim_range): 325 raise ValueError("Cannot add point in order, try deactivating the in_order option") 326 327 if in_order: 328 return index 329 else: 330 if dists[index] < tol: 331 return index 332 else: 333 raise ValueError("No index found within distance tolerance")
334 335
336 -def findClosestArray(input_array, target_array, tol):
337 """ 338 Find the set of elements in (1D) input_array that are closest to 339 elements in target_array. Record the indices of the elements in 340 target_array that are within tolerance, tol, of their closest 341 match. Also record the indices of the elements in target_array 342 that are outside tolerance, tol, of their match. 343 344 For example, given an array of observations with irregular 345 observation times along with an array of times of interest, this 346 routine can be used to find those observations that are closest to 347 the times of interest that are within a given time tolerance. 348 349 NOTE: input_array must be sorted! The array, target_array, does not have to be sorted. 350 351 Inputs: 352 input_array: a sorted float64 array 353 target_array: a float64 array 354 tol: a tolerance 355 356 Returns: 357 closest_indices: the array of indices of elements in input_array that are closest to elements in target_array 358 359 Author: Gerry Wiener, 2004 360 Version 1.0 361 """ 362 # NOT RETURNED IN THIS VERSION: 363 # accept_indices: the indices of elements in target_array that have a match in input_array within tolerance 364 # reject_indices: the indices of elements in target_array that do not have a match in input_array within tolerance 365 366 input_array_len = len(input_array) 367 closest_indices = searchsorted(input_array, target_array) # determine the locations of target_array in input_array 368 # acc_rej_indices = [-1] * len(target_array) 369 curr_tol = [tol] * len(target_array) 370 371 est_tol = 0.0 372 for i in xrange(len(target_array)): 373 best_off = 0 # used to adjust closest_indices[i] for best approximating element in input_array 374 375 if closest_indices[i] >= input_array_len: 376 # the value target_array[i] is >= all elements in input_array so check whether it is within tolerance of the last element 377 closest_indices[i] = input_array_len - 1 378 est_tol = target_array[i] - input_array[closest_indices[i]] 379 if est_tol < curr_tol[i]: 380 curr_tol[i] = est_tol 381 # acc_rej_indices[i] = i 382 elif target_array[i] == input_array[closest_indices[i]]: 383 # target_array[i] is in input_array 384 est_tol = 0.0 385 curr_tol[i] = 0.0 386 # acc_rej_indices[i] = i 387 elif closest_indices[i] == 0: 388 # target_array[i] is <= all elements in input_array 389 est_tol = input_array[0] - target_array[i] 390 if est_tol < curr_tol[i]: 391 curr_tol[i] = est_tol 392 # acc_rej_indices[i] = i 393 else: 394 # target_array[i] is between input_array[closest_indices[i]-1] and input_array[closest_indices[i]] 395 # and closest_indices[i] must be > 0 396 top_tol = input_array[closest_indices[i]] - target_array[i] 397 bot_tol = target_array[i] - input_array[closest_indices[i]-1] 398 if bot_tol <= top_tol: 399 est_tol = bot_tol 400 best_off = -1 # this is the only place where best_off != 0 401 else: 402 est_tol = top_tol 403 404 if est_tol < curr_tol[i]: 405 curr_tol[i] = est_tol 406 # acc_rej_indices[i] = i 407 408 if est_tol <= tol: 409 closest_indices[i] += best_off 410 411 # accept_indices = compress(greater(acc_rej_indices, -1), 412 # acc_rej_indices) 413 # reject_indices = compress(equal(acc_rej_indices, -1), 414 # arange(len(acc_rej_indices))) 415 return closest_indices #, accept_indices, reject_indices)
416 417
418 -def find(x, v, next_largest=1, indices=None):
419 """Returns the index into the 1D array x corresponding to the 420 element of x that is either equal to v or the nearest to 421 v. x is assumed to contain unique elements. 422 423 if v is outside the range of values in x then the index of the 424 smallest or largest element of x is returned. 425 426 If next_largest == 1 then the nearest element taken is the next 427 largest, otherwise if next_largest == 0 then the next smallest 428 is taken. 429 430 The optional argument indices speeds up multiple calls to this 431 function if you have pre-calculated indices=argsort(x). 432 """ 433 if indices is None: 434 indices=argsort(x) 435 xs=take(x, indices, axis=0) 436 assert next_largest in [0,1], "next_largest must be 0 or 1" 437 eqmask=(xs==v).tolist() 438 try: 439 ix = eqmask.index(1) 440 except ValueError: 441 if next_largest: 442 mask=(xs<v).tolist() 443 else: 444 mask=(xs>v).tolist() 445 try: 446 ix=min([max([0,mask.index(1-next_largest)+next_largest-1]),len(mask)-1]) 447 except ValueError: 448 ix = 0+next_largest-1 449 return indices[ix]
450 451
452 -def orderEventData(edict, evnames=None, nonames=False, bytime=False):
453 """Time-order event data dictionary items. 454 455 Returns time-ordered list of (eventname, time) tuples. 456 457 If 'evnames' argument included, this restricts output to only the named 458 events. 459 The 'nonames' flag (default False) forces routine to return only the event 460 times, with no associated event names. 461 The 'bytime' flag (default False) only works with nonames=False and returns 462 the list in (time, eventname) order. 463 """ 464 465 if evnames is None: 466 evnames = edict.keys() 467 else: 468 assert remain(evnames, edict.keys()) == [], "Invalid event names passed" 469 # put times as first tuple entry of etuplelist 470 if nonames: 471 alltlist = [] 472 for (evname,tlist) in edict.items(): 473 if evname in evnames: 474 alltlist.extend(tlist) 475 alltlist.sort() 476 return alltlist 477 else: 478 etuplelist = [] 479 for (evname,tlist) in edict.items(): 480 if evname in evnames: 481 etuplelist.extend([(t,evname) for t in tlist]) 482 # sort by times 483 etuplelist.sort() 484 if bytime: 485 return etuplelist 486 else: 487 # swap back to get event names as first tuple entry 488 return [(evname,t) for (t,evname) in etuplelist]
489 490 ## ------------------------------------------------------------ 491 ## Generator wrapping utilities 492
493 -def make_RHS_wrap(gen, xdict_base, x0_names, use_gen_params=False, overflow_penalty=1e4):
494 """Return function wrapping Generator argument gen's RHS function, 495 but restricting input and output dimensions to those specified by 496 x0_names. All other variable values will be given by those in xdict_base. 497 In case of overflow or ValueError during a call to the wrapped function, 498 an overflow penalty will be used for the returned values (default 1e4). 499 500 if use_gen_params flag is set (default False) 501 then: 502 Return function has signature Rhs_wrap(x,t) 503 and takes an array or list of x state variable values and scalar t, 504 returning an array type of length len(x). The Generator's current param 505 values (at call time) will be used. 506 else: 507 Return function has signature Rhs_wrap(x,t,pdict) 508 and takes an array or list of x state variable values, scalar t, and a 509 dictionary of parameters for the Generator, returning an array type of 510 length len(x). 511 512 NB: xdict_base will be copied as it will be updated in the wrapped 513 function.""" 514 var_ix_map = invertMap(gen.funcspec.vars) 515 x0_names.sort() # ensures sorted 516 x0_ixs = [var_ix_map[xname] for xname in x0_names] 517 dim = len(x0_names) 518 xdict = xdict_base.copy() 519 if use_gen_params: 520 def Rhs_wrap(x, t): 521 xdict.update(dict(zip(x0_names, x))) 522 try: 523 return take(gen.Rhs(t, xdict, gen.pars), x0_ixs) 524 except (OverflowError, ValueError): 525 return array([overflow_penalty]*dim)
526 527 else: 528 def Rhs_wrap(x, t, pdict): 529 xdict.update(dict(zip(x0_names, x))) 530 try: 531 return take(gen.Rhs(t, xdict, pdict), x0_ixs) 532 except (OverflowError, ValueError): 533 return array([overflow_penalty]*dim) 534 535 return Rhs_wrap 536 537
538 -def make_Jac_wrap(gen, xdict_base, x0_names, use_gen_params=False, overflow_penalty=1e4):
539 """Return function wrapping Generator argument gen's Jacobian function, 540 but restricting input and output dimensions to those specified by 541 x0_names. All other variable values will be given by those in xdict_base. 542 In case of overflow or ValueError during a call to the wrapped function, 543 an overflow penalty will be used for the returned values (default 1e4). 544 545 if use_gen_params flag is set (default False) 546 then: 547 Return function Jac_wrap(x,t) takes an array or list of x variable 548 values and scalar t, returning a 2D array type of size len(x) by len(x). 549 The Generator's current param values (at call time) will be used. 550 else: 551 Return function Jac_wrap(x,t,pdict) takes an array or list of x variable 552 values, scalar t, and a dictionary of parameters for the Generator, 553 returning a 2D array type of size len(x) by len(x). 554 555 NB: xdict_base will be copied as it will be updated in the wrapped 556 function.""" 557 if not gen.haveJacobian(): 558 raise ValueError("Jacobian not defined") 559 var_ix_map = invertMap(gen.funcspec.vars) 560 x0_names.sort() # ensures sorted 561 x0_ixs = [var_ix_map[xname] for xname in x0_names] 562 dim = len(x0_names) 563 xdict = xdict_base.copy() 564 if use_gen_params: 565 def Jac_wrap(x, t): 566 xdict.update(dict(zip(x0_names, x))) 567 try: 568 return take(take(gen.Jacobian(t, xdict, gen.pars), x0_ixs,0), x0_ixs,1) 569 except (OverflowError, ValueError): 570 return array([overflow_penalty]*dim)
571 else: 572 def Jac_wrap(x, t, pdict): 573 xdict.update(dict(zip(x0_names, x))) 574 try: 575 return take(take(gen.Jacobian(t, xdict, pdict), x0_ixs,0), x0_ixs,1) 576 except (OverflowError, ValueError): 577 return array([overflow_penalty]*dim) 578 579 return Jac_wrap 580 581 582 ## ------------------------------------------------------------ 583 584 # User-interaction utilities 585
586 -def progressBar(i, total, width=50):
587 """Print an increasing number of dashes up to given width, reflecting 588 i / total fraction of progress. Prints and refreshes on one line. 589 """ 590 percent = float(i)/total 591 dots = int(percent*width) 592 progress = str('[').ljust(dots+1, '-') 593 sys.stdout.write('\r'+progress.ljust(width, ' ')+str('] %.2f%%' % (percent*100.))) 594 sys.stdout.flush()
595 596 597 ## ------------------------------------------------------------ 598
599 -def saveObjects(objlist, filename, force=False):
600 """Store PyDSTool objects to file. Argument should be a tuple or list, 601 but if a singleton non-sequence object X is given then it will be 602 saved as a list [ X ]. 603 604 Some PyDSTool objects will not save using this function, and will complain 605 about attributes that do not have definitions in __main__. 606 """ 607 608 # passing protocol = -1 to pickle means it uses highest available 609 # protocol (e.g. binary format) 610 if not force: 611 if os.path.isfile(filename): 612 raise ValueError("File '" + filename + "' already exists") 613 pklfile = file(filename, 'wb') 614 # Win32 only: in call to pickle.dump ... 615 # DO NOT use binary option (or HIGHESTPROTOCOL) because 616 # IEE754 special values are not UNpickled correctly in Win32 617 # (you'll see no exception raised). This is a known bug 618 # and fixedpickle.py is a work-around. (June 2005) 619 if os.name == 'nt': 620 opt = None 621 else: 622 opt = 0 623 if not isinstance(objlist, list): 624 objlist=[objlist] 625 for obj in objlist: 626 try: 627 pickle.dump(obj, pklfile, opt) 628 except: 629 if hasattr(obj, 'name'): 630 print "Failed to save '%s'"%obj.name 631 else: 632 print "Failed to save object '%s'"%str(obj) 633 raise 634 pklfile.close()
635 636 637
638 -def loadObjects(filename, namelist=None):
639 """Retrieve PyDSTool objects from file. Returns list of objects 640 unless namelist option is given as a singleton string name. 641 Also, if only one object X was stored, it will be returned as [X], 642 and thus you will have to index the returned list with 0 to get X itself. 643 644 Optional namelist argument selects objects to return by name, 645 provided that the objects have name fields (otherwise they are ignored). 646 If namelist is a single string name then a single object is returned. 647 """ 648 649 # Since names are not intended to be unique in PyDSTool, the while 650 # loop always goes to the end of the file, and pulls out *all* 651 # occurrences of the names. 652 if not os.path.isfile(filename): 653 raise ValueError("File '" + filename + "' not found") 654 if namelist is None: 655 namelist = [] 656 was_singleton_name = isinstance(namelist, str) 657 if not isinstance(namelist, list): 658 if was_singleton_name: 659 namelist = [copy.copy(namelist)] 660 else: 661 raise TypeError("namelist must be list of strings or singleton string") 662 if not isUniqueSeq(namelist): 663 raise ValueError("Names must only appear once in namelist argument") 664 pklfile = file(filename, 'rb') 665 if namelist == []: 666 getall = True 667 else: 668 getall = False 669 objlist = [] 670 notDone = True 671 while notDone: 672 try: 673 if getall: 674 objlist.append(pickle.load(pklfile)) 675 else: 676 tempobj = pickle.load(pklfile) 677 if hasattr(tempobj, 'name'): 678 if tempobj.name in namelist: 679 objlist.append(tempobj) 680 except EOFError: 681 notDone = False 682 except: 683 print "Error in un-pickling:" 684 print "Was the object created with an old version of PyDSTool?" 685 pklfile.close() 686 raise 687 pklfile.close() 688 if objlist == []: 689 if getall: 690 print "No objects found in file" 691 else: 692 print "No named objects found in file" 693 if was_singleton_name: 694 return objlist[0] 695 else: 696 return objlist
697 698
699 -def intersect(a, b):
700 """Find intersection of two lists, sequences, etc. 701 Returns a list that includes repetitions if they occur in the inputs.""" 702 return filter(lambda e : e in b, a)
703
704 -def union(a, b):
705 """Find union of two lists, sequences, etc. 706 Returns a list that includes repetitions if they occur in the input lists. 707 """ 708 return list(a)+list(b)
709
710 -def remain(a, b):
711 """Find remainder of two lists, sequences, etc., after intersection. 712 Returns a list that includes repetitions if they occur in the inputs.""" 713 return filter(lambda e : e not in b, a)
714
715 -def compareList(a, b):
716 """Compare elements of lists, ignoring order (like sets).""" 717 return len(intersect(a,b))==len(a)==len(b)
718
719 -def cartesianProduct(a, b):
720 """Returns the cartesian product of the sequences.""" 721 ret = [] 722 for i in a: 723 ret.extend([(i, j) for j in b]) 724 return ret
725 726 # deprecated
727 -def makeDataDict(fieldnames, fieldvalues):
728 """Zip arrays of field names and values into a dictionary. 729 For instance, to use in Generator initialization arguments. 730 731 Deprecated as of v0.89.""" 732 if isinstance(fieldvalues, ndarray): 733 return dict(zip(fieldnames, [a.tolist() for a in fieldvalues])) 734 else: 735 return dict(zip(fieldnames, fieldvalues))
736 737 738 # ------------------------ 739
740 -def distutil_destination():
741 """Internal utility that makes the goofy destination directory string so that PyDSTool 742 can find where the distutils fortran/gcc compilers put things. 743 744 If your temp directory turns out to be different to the one created here, contact us 745 on sourceforge.net, but in the meantime you can override destdir with whatever directory 746 name you find that is being used. 747 """ 748 import scipy 749 osname = str.lower(platform.system()) 750 pyname = platform.python_version_tuple() 751 machinename = platform.machine() 752 if osname == 'linux': 753 destdir = 'src.'+osname+'-'+machinename+'-'+pyname[0] + '.' + pyname[1] 754 elif osname == 'darwin': 755 osver = platform.mac_ver()[0].split('.') 756 if int(scipy.__version__.split('.')[1]) > 5 and len(osver)>1 and osver != ['']: 757 destdir = 'src.macosx-'+osver[0]+'.'+osver[1]+'-'+machinename+'-'+pyname[0] + '.' + pyname[1] 758 else: 759 destdir = 'src.'+osname+'-'+platform.release()+'-'+machinename+'-'+pyname[0] + '.' + pyname[1] 760 elif osname == 'windows': 761 destdir = 'src.win32-'+pyname[0]+'.'+pyname[1] 762 else: 763 destdir = '' 764 765 return destdir
766