1 """Implementation of dominant scale analysis techniques for python.
2
3 Loosely based on Matlab tool DSSRT. This module is incomplete and is
4 still a work in progress! See my publications on this subject for further
5 details.
6
7 Robert Clewley, 2009
8
9 """
10 from __future__ import division
11
12 from PyDSTool import common
13 from PyDSTool import utils
14 from PyDSTool import HybridModel, NonHybridModel, Interval
15 from PyDSTool import parseUtils
16 from PyDSTool import Symbolic, Vode_ODEsystem
17 from PyDSTool.Trajectory import numeric_to_traj
18 from PyDSTool.Points import Point, Pointset, pointsToPointset
19 from PyDSTool.errors import *
20
21 import numpy as np
22 import scipy as sp
23 import copy
24 import cStringIO, operator
25
26
27
28 _classes = ['epoch', 'regime', 'dssrt_assistant', 'domscales',
29 'Scorer', 'EpochSeqScorer', 'VarAlphabet']
30
31 _predicates = ['is_active', 'is_inactive', 'is_modulatory',
32 'is_most_dominant', 'is_fast', 'is_slow', 'is_order1',
33 'join_actives', 'leave_actives', 'become_most_dominant',
34 'join_fast', 'join_slow', 'leave_fast', 'leave_slow']
35
36 _functions = ['find_epoch_period', 'transition_psi', 'transition_tau',
37 'get_infs', 'get_taus', 'split_pts', 'define_psi_events',
38 'normalized_psis', 'define_tau_events', 'find_regime_transition',
39 'find_ep_ix', 'show_epochs', 'plot_psis', 'indent',
40 'swdist', 'jaro', 'comp_seqs', 'editdist_edits',
41 'MATCH', 'MISMATCH', 'APPROX', 'CLOSE', 'VCLOSE',
42 'FAR', 'VFAR', 'GAP', 'get_symbol_sequence', 'tabulate_epoch_seqs']
43
44 __all__ = _classes + _functions + _predicates
45
46
47
48
50 name = 'join_active'
51
53 res = self.subject not in epochs[0].actives
54 self.record = (self.name, self.subject, res)
55 return res
56
58 return self.subject in epoch.actives
59
60
62 name = 'leave_actives'
63
65 res = self.subject in epochs[0].actives
66 self.record = (self.name, self.subject, res)
67 return res
68
70 return self.subject not in epoch.actives
71
72
74 name = 'become_most_dominant'
75
77 res = self.subject != epochs[0].actives[0]
78 self.record = (self.name, self.subject, res)
79 return res
80
82 return self.subject == epoch.actives[0]
83
84
86 name = 'join_fast'
87
89 res = self.subject not in epochs[0].fast
90 self.record = (self.name, self.subject, res)
91 return res
92
95
96
98 name = 'join_slow'
99
101 res = self.subject not in epochs[0].slow
102 self.record = (self.name, self.subject, res)
103 return res
104
107
108
110 name = 'leave_fast'
111
113 res = self.subject in epochs[0].fast
114 self.record = (self.name, self.subject, res)
115 return res
116
119
120
122 name = 'leave_slow'
123
125 res = self.subject in epochs[0].slow
126 self.record = (self.name, self.subject, res)
127 return res
128
131
132
134 name = 'is_active'
135
137 return self.subject in epoch.actives
138
139
141 name = 'is_inactive'
142
144 return self.subject in epoch.inactives
145
146
148 name = 'is_modulatory'
149
151 return self.subject in epoch.modulatory
152
153
155 name = 'is_most_dominant'
156
158 return self.subject == epoch.actives[0]
159
160
166
167
173
174
176 name = 'is_order1'
177
179 return self.subject in epoch.order1
180
181
182
183
184
186 """Finds epoch during time t"""
187 for i, ep in enumerate(eps):
188 if t >= ep.t0:
189 try:
190 test = t < eps[i+1].t0
191 except IndexError:
192 test = t <= eps[i].t1
193 if test:
194 return i
195 else:
196 raise ValueError("t must be in the epochs' time intervals")
197
198
200 - def __init__(self, epochs, global_criteria, transition_criteria):
201 self.epochs = epochs
202
203 self.actives = epochs[0].actives
204 self.t0 = epochs[0].t0
205 self.t1 = epochs[-1].t1
206
207
208 self.global_criteria = global_criteria
209 self.transition_criteria = transition_criteria
210
211
213 global_intervals = []
214 transition_intervals = []
215 record = {}
216 for criterion_ix, (epochs, global_criteria, transition_criteria) in enumerate(criteria_list):
217 check_global = global_criteria is not None
218 check_trans = transition_criteria is not None
219
220
221
222
223 glt0 = np.Inf
224 glt1 = np.Inf
225 trt0 = np.Inf
226 trt1 = np.Inf
227 trans_found = False
228
229 if not check_global and not check_trans:
230
231 continue
232 if check_trans:
233 if not transition_criteria.precondition(epochs):
234 record[np.Inf] = (criterion_ix, None, 'transitional_precondition', (None, None), transition_criteria.record)
235 return np.Inf, record
236 for epix, ep in enumerate(epochs):
237 if check_global and not global_criteria(ep) and ep.t1 >= min_t:
238 if ep.t0 < min_t:
239
240 record[np.Inf] = (criterion_ix, epix, 'global', (None, None), global_criteria.record)
241 return np.Inf, record
242 else:
243 glt0 = min_t
244 glt1 = ep.t0
245 record[glt0] = (criterion_ix, epix, 'global', (glt0, glt1), global_criteria.record)
246
247 break
248 if check_trans:
249 if transition_criteria(ep):
250 if not trans_found:
251 if ep.t0 < min_t:
252 if ep.t1 > min_t:
253 trans_found = True
254 trt0 = min_t
255 trt1 = ep.t1
256 else:
257 trt0 = ep.t0
258 trt1 = ep.t1
259 trans_found = True
260 else:
261 trt0 = max([min([trt0, ep.t0]), min_t])
262 trt1 = max([ep.t1, min_t])
263
264 elif trans_found:
265
266
267 break
268 if trans_found:
269 record[trt0] = (criterion_ix, epix, 'transitional', (trt0, trt1), transition_criteria.record)
270 if check_global and (np.isfinite(glt0) or np.isfinite(glt1)):
271 global_intervals.append(Interval('glob', float, [glt0, glt1]))
272 if check_trans and (np.isfinite(trt0) or np.isfinite(trt1)):
273 transition_intervals.append(Interval('tran', float, [trt0, trt1]))
274 tri0 = Interval('tran_result', float, [-np.Inf, np.Inf])
275 for i in transition_intervals:
276
277 try:
278 tri0 = tri0.intersect(i)
279 except:
280 if tri0 is None:
281 print "No intersection for transition intervals"
282 return np.Inf, record
283 else:
284 raise
285 if tri0 is None:
286 print "No intersection for transition intervals"
287 return np.Inf, record
288 gli0 = Interval('glob_result', float, [-np.Inf, np.Inf])
289 for i in global_intervals:
290
291 try:
292 gli0 = gli0.intersect(i)
293 except:
294 if gli0 is None:
295 print "No intersection for global intervals"
296 return np.Inf, record
297 else:
298 raise
299 if gli0 is None:
300 print "No intersection for global intervals"
301 return np.Inf, record
302 any_trans = len(transition_intervals) > 0
303 any_global = len(global_intervals) > 0
304 if any_trans:
305 if any_global:
306 if gli0[1] >= tri0[0]:
307 return tri0[0], record
308 else:
309 return gli0[1], record
310 else:
311 return tri0[0], record
312 else:
313 if any_global:
314 return gli0[1], record
315 else:
316 return np.Inf, record
317
318
319
320
322 - def __init__(self, focus_var, sigma, gamma, actives, modulatory, inactives,
323 order1, slow, fast, ever_slow, ever_fast, ranks, index0, index1,
324 influences, rel_taus, traj_pts, inv_psi_namemap, inv_tau_namemap,
325 relative_ratios=False, opts=None):
326
327
328
329 self.focus_var = focus_var
330 self.actives = actives
331
332 self.modulatory = modulatory
333
334 self.inactives = inactives
335 self.inactives.sort()
336 self.ranks = ranks
337 self.order1 = order1
338 self.slow = slow
339 self.fast = fast
340 self.ever_slow = ever_slow
341 self.ever_fast = ever_fast
342 self.traj_pts = traj_pts
343 self.sigma = sigma
344 self.gamma = gamma
345
346
347 self.inv_psi_namemap = inv_psi_namemap
348 self.inv_tau_namemap = inv_tau_namemap
349 self.relative_ratios = relative_ratios
350 self.length = len(influences)
351 self.index0 = index0
352 self.index1 = index1
353 assert self.length == index1-index0+1
354 self.opts = opts
355 if traj_pts is not None:
356 self.t0 = traj_pts.indepvararray[0]
357 self.t1 = traj_pts.indepvararray[-1]
358 else:
359 self.t0 = None
360 self.t1 = None
361 self.influences = influences
362
363 self.rel_taus = rel_taus
364 self.index_interval = Interval('indices', int, [index0, index1])
365 self.time_interval = Interval('time', float, [self.t0, self.t1])
366
367
369 if isinstance(other, epoch):
370 if self.focus_var == other.focus_var and self.sigma == other.sigma:
371 return self.actives == other.actives and \
372 self.modulatory == other.modulatory and \
373 self.order1 == other.order1 and \
374 self.fast == other.fast and \
375 self.slow == other.slow
376 else:
377 raise TypeError("Only compare epochs to other epochs")
378
379 __eq__ = __cmp__
380
382 actives_w_speed = []
383 for a in self.actives:
384 if a in self.fast:
385 actives_w_speed.append(a+'[F]')
386 elif a in self.slow:
387 actives_w_speed.append(a+'[S]')
388 else:
389 actives_w_speed.append(a)
390 mod_w_speed = []
391 for m in self.modulatory:
392 if m in self.fast:
393 mod_w_speed.append(m+'[F]')
394 elif m in self.slow:
395 mod_w_speed.append(m+'[S]')
396 else:
397 mod_w_speed.append(m)
398 return actives_w_speed, mod_w_speed
399
401 if self.opts.use_order:
402 uo_str = "(ord.)"
403 else:
404 uo_str = "(~ord.)"
405 str1 = "Epoch for %s at sigma %.5f %s: [%.5f, %.5f]" % \
406 (self.focus_var, self.sigma, uo_str, self.t0, self.t1)
407 if verboselevel == 0:
408 return str1
409 else:
410 actives_w_speed, mod_w_speed = self.seq_str()
411 str2 = " Actives: " + ",".join(actives_w_speed)
412 str3 = " Modulatory: " + ",".join(mod_w_speed)
413 return str1 + str2 + str3
414
415 - def info(self, verboselevel=1):
417
420
421 __repr__ = __str__
422
423
424 -def _comp(a, b, eps, atol):
425 try:
426 rat = a/b
427 except ZeroDivisionError:
428 rat = np.Inf
429 return np.allclose(rat, eps, atol=atol)
430
431
433 """Assume that, generically, there will only be one transition at a time.
434 """
435 acts = pt[epoch.inv_psi_namemap(epoch.actives)]
436
437 acts_plain = epoch.inv_psi_namemap.inverse()(acts.coordnames)
438 if epoch.modulatory == []:
439 mods = []
440 mods_plain = []
441 else:
442 mods = pt[epoch.inv_psi_namemap(epoch.modulatory)]
443 mods_plain = epoch.inv_psi_namemap.inverse()(mods.coordnames)
444 relrat = epoch.relative_ratios
445 leavers = []
446 joiners = []
447 if relrat:
448
449 if len(acts) > 1:
450 ptixs = np.argsort(acts)[::-1]
451 for i in range(len(ptixs)-1):
452 if _comp(acts[ptixs[i]], acts[ptixs[i+1]], epoch.sigma, atol):
453 leavers.append(acts_plain[i+1])
454
455
456 min_act = min(acts)
457 for i, m in enumerate(mods):
458 if _comp(min_act, m, epoch.sigma, atol):
459 joiners.append(mods_plain[i])
460 else:
461 max_ix = np.argmax(acts)
462 min_ix = np.argmin(acts)
463
464 if _comp(acts[max_ix], acts[min_ix], epoch.sigma, atol):
465 leavers.append(acts_plain[min_ix])
466
467 for i, m in enumerate(mods):
468 if _comp(acts[max_ix], m, epoch.sigma, atol):
469 joiners.append(mods_plain[i])
470 if len(leavers) + len(joiners) == 0:
471 raise PyDSTool_ValueError("No transition found: Tolerance too small?")
472 elif len(leavers) + len(joiners) > 1:
473 raise PyDSTool_ValueError("Too many transitions found: Tolerance too large?")
474 else:
475 if len(leavers) == 1:
476 return ('leave', leavers[0])
477 else:
478 return ('join', joiners[0])
479
480
482 """Assume that, generically, there will only be one transition at a time.
483 """
484 if epoch.slow == []:
485 slow = []
486 slow_plain = []
487 else:
488 slow = pt[epoch.inv_tau_namemap(epoch.slow)]
489 slow_plain = epoch.inv_tau_namemap.inverse()(slow.coordnames)
490 if epoch.fast == []:
491 fast = []
492 fast_plain = []
493 else:
494 fast = pt[epoch.inv_tau_namemap(epoch.fast)]
495 fast_plain = epoch.inv_tau_namemap.inverse()(fast.coordnames)
496 if epoch.order1 == []:
497 order1 = []
498 order1_plain = []
499 else:
500 order1 = pt[epoch.inv_tau_namemap(epoch.order1)]
501 order1_plain = epoch.inv_tau_namemap.inverse()(order1.coordnames)
502 slow_leavers = []
503 slow_joiners = []
504 max_ix = np.argmax(slow)
505 min_ix = np.argmin(slow)
506
507 if _comp(slow[min_ix], 1, epoch.gamma, atol):
508 slow_leavers.append(slow_plain[min_ix])
509
510 for i, m in enumerate(order1):
511 if _comp(m, 1, epoch.gamma, atol):
512 slow_joiners.append(order1_plain[i])
513
514 max_ix = np.argmax(fast)
515 min_ix = np.argmin(fast)
516
517 fast_leavers = []
518 fast_joiners = []
519 if _comp(1, fast[min_ix], epoch.gamma, atol):
520 fast_leavers.append(fast_plain[min_ix])
521
522 for i, m in enumerate(order1):
523 if _comp(1, m, epoch.gamma, atol):
524 fast_joiners.append(order1_plain[i])
525 len_slow_l = len(slow_leavers)
526 len_slow_j = len(slow_joiners)
527 len_fast_l = len(fast_leavers)
528 len_fast_j = len(fast_joiners)
529 if len_slow_l + len_slow_j + \
530 len_fast_l + len_fast_j == 0:
531 raise PyDSTool_ValueError("No transition found: Tolerance too small?")
532 elif len_slow_l + len_slow_j + \
533 len_fast_l + len_fast_j > 1:
534 raise PyDSTool_ValueError("Too many transitions found: Tolerance too large?")
535 else:
536 if len_slow_l == 1:
537 return ('slow_leave', slow_leavers[0])
538 elif len_slow_j == 1:
539 return ('slow_join', slow_joiners[0])
540 elif len_fast_l == 1:
541 return ('fast_leave', fast_leavers[0])
542 else:
543 return ('fast_join', fast_joiners[0])
544
546 def_vals = {'use_order': True, 'speed_fussy': True}
547 ok_keys = def_vals.keys()
548 if opts is None:
549 return common.args(**def_vals)
550 else:
551 for k in ok_keys:
552 if k not in opts.keys():
553 opts[k] = def_vals[k]
554 rem_keys = common.remain(opts.keys(), ok_keys)
555 if rem_keys != []:
556 raise ValueError("Invalid options passed in opts argument: %s" % rem_keys)
557 else:
558 return opts
559
560
562 - def __init__(self, focus_var, traj_pts, influence_pts,
563 influence_type, tau_refs, psi_refs, opts=None):
564
565 self.focus_var = focus_var
566 self.traj_pts = traj_pts
567 self.tau_refs = tau_refs
568 base = 'tau_'
569 avoid_prefix = len(base)
570 tau_namedict = {}
571 for c in traj_pts.coordnames:
572 if c[:avoid_prefix] == base:
573 tau_namedict[c] = c[avoid_prefix:]
574 self.tau_namemap = parseUtils.symbolMapClass(tau_namedict)
575 self.psi_refs = psi_refs
576 base = 'psi_'+focus_var+'_'
577 avoid_prefix = len(base)
578 psi_namedict = {}
579 for c in influence_pts.coordnames:
580 psi_namedict[c] = c[avoid_prefix:]
581 self.psi_namemap = parseUtils.symbolMapClass(psi_namedict)
582 influence_pts.mapNames(self.psi_namemap)
583 self.influence_pts = influence_pts
584
585 ranks = np.argsort(influence_pts.coordarray.T)[:,::-1]
586 greatest_vals = np.array([p[ranks[i,0]] for i, p in \
587 enumerate(influence_pts.coordarray.T)])
588 self.ranks = ranks
589 self.greatest_vals = greatest_vals
590
591 self.normed_coordarray = influence_pts.coordarray/greatest_vals
592 self.influence_type = influence_type
593 self.coordnames = influence_pts.coordnames
594 self.num_pts = len(influence_pts)
595 self.opts = check_opts(opts)
596
597
598 - def calc_epochs(self, sigma, gamma, relative_ratios=False):
599 """Calculate epochs without cycle checking"""
600 assert sigma > 1, "sigma parameter must be > 1"
601 assert gamma > 1, "gamma parameter must be > 1"
602 epochs = []
603 old_actives = []
604 old_fast = []
605 old_slow = []
606
607
608 all_modulatory = {}
609 ep_start = 0
610 complete_epoch = False
611 for i in xrange(self.num_pts):
612
613
614
615 ignore_change = False
616 ds_pt = self.get_domscales_point(i, relative_ratios)
617 groups = split(ds_pt, sigma)
618 actives = groups[0].coordnames
619 if not self.opts.use_order:
620 actives.sort()
621
622 if len(groups) > 1:
623 modulatory = groups[1].coordnames
624 if not self.opts.use_order:
625 modulatory.sort()
626 else:
627 modulatory = []
628 if self.opts.speed_fussy:
629 tau = get_taus(self.traj_pts[i], self.tau_refs)
630 tau_ref = tau[self.focus_var]
631 rel_tau = tau/tau_ref
632 slow_test = rel_tau/gamma > 1
633 fast_test = rel_tau*gamma < 1
634 cs = tau.coordnames
635 slow = common.intersect([cs[ci] for ci in range(len(cs)) if slow_test[ci]], actives)
636 fast = common.intersect([cs[ci] for ci in range(len(cs)) if fast_test[ci]], actives)
637 if i == 0:
638 if self.num_pts == 1:
639 complete_epoch = True
640 act_vars = actives
641 mod_vars = modulatory
642 i_stop = 1
643 elif not (len(old_fast) == len(fast) and common.remain(old_fast, fast) == [] and \
644 len(old_slow) == len(slow) and common.remain(old_slow, slow) == []):
645
646 if i - ep_start > 1:
647 complete_epoch = True
648 else:
649
650 ignore_change = True
651 act_vars = old_actives
652 mod_vars = all_modulatory.keys()
653 i_stop = i
654 if ignore_change:
655 old_fast = fast
656 old_slow = slow
657 else:
658 old_fast = common.makeSeqUnique(utils.union(old_fast,fast))
659 old_slow = common.makeSeqUnique(utils.union(old_slow,slow))
660 if old_actives == actives and i < self.num_pts-1:
661 all_modulatory.update(dict(zip(modulatory,enumerate(modulatory))))
662 else:
663
664 if old_actives == [] and self.num_pts > 1:
665
666 all_modulatory.update(dict(zip(modulatory,enumerate(modulatory))))
667 elif i - ep_start == 1 and i < self.num_pts-1:
668
669 all_modulatory.update(dict(zip(modulatory,enumerate(modulatory))))
670 ignore_change = True
671 else:
672 if self.num_pts == 1:
673 act_vars = actives
674 mod_vars = modulatory
675 i_stop = 1
676 else:
677 act_vars = old_actives
678 mod_vars = all_modulatory.keys()
679 i_stop = i
680 complete_epoch = True
681 if ignore_change:
682 old_actives = common.makeSeqUnique(utils.union(old_actives, actives))
683 else:
684
685 old_actives = actives
686 if complete_epoch:
687
688 complete_epoch = False
689 influences = self.influence_pts[ep_start:i_stop]
690 avg_influence = {}
691 for a in act_vars:
692 avg_influence[a] = np.mean(influences[a])
693
694 act_vars = common.sortedDictLists(avg_influence, reverse=True)[0]
695 avg_influence = {}
696 not_mod = []
697 for m in mod_vars:
698 avg = np.mean(influences[m])
699 if avg > 0:
700 avg_influence[m] = avg
701 else:
702
703
704 not_mod.append(m)
705 mod_vars = common.remain(mod_vars, not_mod)
706
707 mod_vars = common.sortedDictLists(avg_influence,
708 reverse=True)[0]
709 inact_vars = common.remain(self.coordnames,
710 act_vars+mod_vars)
711 pts = self.traj_pts[ep_start:i_stop]
712 order1, slow, fast, ever_slow, ever_fast, rel_taus = \
713 self.calc_fast_slow(get_taus(pts, self.tau_refs),
714 gamma)
715 rel_taus.mapNames(self.tau_namemap)
716 epochs.append(epoch(self.focus_var, sigma, gamma,
717 act_vars, mod_vars, inact_vars,
718 order1, slow, fast, ever_slow, ever_fast,
719 self.ranks[ep_start:i_stop,:],
720 ep_start, i_stop-1,
721 self.influence_pts[ep_start:i_stop], rel_taus,
722 pts, relative_ratios=ds_pt.relative_ratios,
723 inv_psi_namemap=self.psi_namemap.inverse(),
724 inv_tau_namemap=self.tau_namemap.inverse(),
725 opts=self.opts))
726 ep_start = i
727 all_modulatory = {}
728 self.epochs = epochs
729
730
732 """Determines variables which are always fast and slow, and those
733 which are ever fast and slow, over a given set of time scale points.
734 """
735 focus_taus = taus[self.focus_var]
736 rel_taus = taus/focus_taus
737 cs = taus.coordnames
738 ixs = xrange(len(cs))
739 n = xrange(len(taus))
740
741
742 slow_test = rel_taus/gamma > 1
743 fast_test = rel_taus*gamma < 1
744 slow_array = [[i for i in ixs if slow_test[i,j]] for j in n]
745 fast_array = [[i for i in ixs if fast_test[i,j]] for j in n]
746 slow_ixs = ever_slow_ixs = slow_array[0]
747 for row in slow_array[1:]:
748 slow_ixs = utils.intersect(slow_ixs, row)
749 ever_slow_ixs = np.unique(utils.union(ever_slow_ixs, row))
750 slow_vars = [cs[i] for i in slow_ixs]
751 ever_slow_vars = [cs[i] for i in ever_slow_ixs]
752 fast_ixs = ever_fast_ixs = fast_array[0]
753 for row in fast_array[1:]:
754 fast_ixs = utils.intersect(fast_ixs, row)
755 ever_fast_ixs = np.unique(utils.union(ever_fast_ixs, row))
756 fast_vars = [cs[i] for i in fast_ixs]
757 ever_fast_vars = [cs[i] for i in ever_fast_ixs]
758 order1_vars = utils.remain(cs, slow_vars+fast_vars)
759 return order1_vars, slow_vars, fast_vars, \
760 list(ever_slow_vars), list(ever_fast_vars), rel_taus
761
762
764 """Locates index ix in the epochs, returning the epoch index,
765 epoch object, and the index within the epoch's pointset data."""
766 assert self.epochs
767 for epix, ep in enumerate(self.epochs):
768 if ix >= ep.index0 and ix <= ep.index1:
769 return epix, ep, ix - ep.index0
770
772 """Locates time t in the epochs, returning the epoch index,
773 epoch object, and the approximate corresponding index within
774 the epoch's pointset data.
775 Note, because of the time discretization, time is compared up to
776 the next epoch's start time."""
777 assert self.epochs
778 for epix, ep in enumerate(self.epochs):
779 try:
780 next_time = self.epochs[epix+1].t0
781 comp = operator.lt
782 except IndexError:
783 next_time = ep.t1
784 comp = operator.le
785 if t >= ep.t0 and comp(t, next_time):
786 ix = ep.influences.find(t, end=0)
787 return epix, ep, ix
788
789
791 assert self.epochs
792 raise NotImplementedError
793
794
796 ds_pt = common.args(relative_ratios=relative_ratios)
797 ds_pt.coordnames = list(np.array(self.coordnames)[self.ranks[i]])
798
799 ds_pt.influences = list(self.influence_pts.coordarray[self.ranks[i],i])
800
801 try:
802 ds_pt.diameter = ds_pt.influences[0]/ds_pt.influences[-1]
803 except ZeroDivisionError:
804 ds_pt.diameter = np.inf
805 ds_pt.relative_influences = list(self.normed_coordarray[self.ranks[i],i])
806 if relative_ratios:
807 as_ratios = as_relative_ratios
808 else:
809 as_ratios = as_absolute_ratios
810 ds_pt.as_ratios = as_ratios(ds_pt.influences)
811 return ds_pt
812
813
815 if ignore_transitions is None:
816 acts_leave = acts_all
817 mods_join = mods_all
818 else:
819 remove_acts = []
820 remove_mods = []
821 for trans, var in ignore_transitions:
822 if trans == 'leave':
823 remove_acts.append(var)
824 elif trans == 'join':
825 remove_mods.append(var)
826 else:
827 raise ValueError("Invalid transition type: %s" % trans)
828 acts_leave = [a for a in acts_all if a not in remove_acts]
829 mods_join = [m for m in mods_all if m not in remove_mods]
830 act_coordlist = ['psi_'+focus_var+'_'+c for c in acts_all]
831 act_leave_coordlist = ['psi_'+focus_var+'_'+c for c in acts_leave]
832 mod_coordlist = ['psi_'+focus_var+'_'+c for c in mods_join]
833 evdefs = {}
834 if len(act_leave_coordlist) > 0 and len(act_coordlist) > 0:
835 if len(act_coordlist) == 1:
836 term1 = '(' + str(act_coordlist[0]) + ')'
837 else:
838 term1 = "max( [ %s ] )" % ",".join(act_coordlist)
839 if len(act_leave_coordlist) == 1:
840 term2 = '(' + str(act_leave_coordlist[0]) + ')'
841 else:
842 term2 = "min( [ %s ] )" % ",".join(act_leave_coordlist)
843 act_leave_def = "%s / %s - dssrt_sigma" % (term1, term2)
844 evdefs['act_leave_ev'] = common.args(defn=act_leave_def, dirn=1, pars=['dssrt_sigma'])
845 if len(act_coordlist) > 0 and len(mod_coordlist) > 0:
846 if len(act_coordlist) == 1:
847 term1 = '(' + str(act_coordlist[0]) + ')'
848 else:
849 term1 = "max( [ %s ] )" % ",".join(act_coordlist)
850 if len(mod_coordlist) == 1:
851 term2 = '(' + str(mod_coordlist[0]) + ')'
852 else:
853 term2 = "max( [ %s ] )" % ",".join(mod_coordlist)
854 mod_join_def = "%s / %s - dssrt_sigma" % (term1, term2)
855 evdefs['mod_join_ev'] = common.args(defn=mod_join_def, dirn=-1, pars=['dssrt_sigma'])
856 return evdefs
857
858
860 ref_name = 'tau_'+ref
861 assert ref in order1, "reference time scale must be in order1 list"
862 evdefs = {}
863 if slow != []:
864 slow_list = ", ".join(['tau_'+c for c in slow])
865 if len(slow) == 1:
866 slow_leave_def = "(%s)/(%s) - dssrt_gamma" % (slow_list[0], ref_name)
867 else:
868 slow_leave_def = "min([ %s ])/(%s) - dssrt_gamma" % (slow_list, ref_name)
869 evdefs['slow_leave_ev'] = common.args(defn=slow_leave_def, dirn=-1, pars=['dssrt_gamma'])
870 if fast != []:
871 fast_list = ", ".join(['tau_'+c for c in fast])
872 if len(fast) == 1:
873 fast_leave_def = "(%s)/(%s) - 1./dssrt_gamma" % (fast_list[0], ref_name)
874 else:
875 fast_leave_def = "max([ %s ])/(%s) - 1./dssrt_gamma" % (fast_list, ref_name)
876 evdefs['fast_leave_ev'] = common.args(defn=fast_leave_def, dirn=1, pars=['dssrt_gamma'])
877 other_o1 = common.remain(order1, ref)
878 if other_o1 != []:
879 o1_list = ", ".join(['tau_'+c for c in other_o1])
880 if len(other_o1) == 1:
881 slow_join_def = "(%s)/(%s) - dssrt_gamma" % (o1_list[0], ref_name)
882 fast_join_def = "(%s)/(%s) - 1./dssrt_gamma" % (o1_list[0], ref_name)
883 else:
884 slow_join_def = "max([ %s ])/(%s) - dssrt_gamma" % (o1_list, ref_name)
885 fast_join_def = "min([ %s ])/(%s) - 1./dssrt_gamma" % (o1_list, ref_name)
886 evdefs['slow_join_ev'] = common.args(defn=slow_join_def, dirn=1, pars=['dssrt_gamma'])
887 evdefs['fast_join_ev'] = common.args(defn=fast_join_def, dirn=-1, pars=['dssrt_gamma'])
888 return evdefs
889
890
893 model = kw['model']
894 if isinstance(model, HybridModel):
895
896 assert len(model.registry) == 1
897 self.model = model.registry.values()[0]
898 self.gen = self.model.registry.values()[0]
899 elif isinstance(model, NonHybridModel):
900 assert len(model.registry) == 1
901 self.model = model
902 self.gen = self.model.registry.values()[0]
903 else:
904
905 self.model = None
906 self.gen = model
907 FScompatMap = self.gen._FScompatibleNames
908 self.reset()
909 self.gamma1 = {}
910 self.gamma2 = {}
911 self.pars = self.gen.pars
912 if self.model is None or self.model._mspecdict is None:
913
914 inputs = kw['inputs']
915
916
917
918
919 for k, gam_inps in inputs.items():
920 gamma1 = []
921 try:
922 for i in gam_inps.gamma1:
923 if '.' in i and i not in FScompatMap:
924 i_FSc = i.replace('.','_')
925 FScompatMap[i] = i_FSc
926
927
928 gamma1.append(i)
929 except AttributeError:
930 pass
931 self.gamma1[k] = gamma1
932 gamma2 = []
933 try:
934 for i in gam_inps.gamma2:
935 if '.' in i and i not in FScompatMap:
936 i_FSc = i.replace('.','_')
937 FScompatMap[i] = i_FSc
938
939
940 gamma2.append(i)
941 except AttributeError:
942 pass
943 self.gamma2[k] = gamma2
944
945
946 self.all_vars = inputs.keys()
947 else:
948
949
950
951
952
953
954
955
956
957 self._init_from_MSpec(self.model._mspecdict.values()[0]['modelspec'])
958 all_inputs = []
959 for ins in self.gamma1.values() + self.gamma2.values():
960 try:
961 all_inputs.extend(ins)
962 except TypeError:
963
964 pass
965 self._FScompatMap = FScompatMap
966 self._FScompatMapInv = FScompatMap.inverse()
967 self.all_inputs = np.unique(all_inputs).tolist()
968 self.tau_refs = kw['taus']
969 self.inf_refs = kw['infs']
970 self.psi_refs = kw['psis']
971 opts = common.args()
972 try:
973 opts.use_order = kw['use_order']
974 except KeyError:
975 pass
976 try:
977 opts.speed_fussy = kw['speed_fussy']
978 except KeyError:
979 pass
980 self.opts = opts
981 self._setup_taus_psis()
982
984 d = copy.copy(self.__dict__)
985 del d['taus']
986 del d['taus_direct']
987 del d['infs']
988 del d['infs_direct']
989 del d['psis']
990 del d['psis_direct']
991 return d
992
996
998
999
1000
1001
1002
1003
1004
1005
1006 self._direct_fail = False
1007 d = {}
1008 fspecs = self.gen.funcspec._auxfnspecs
1009 fn_base = 'dssrt_fn_'
1010 for var, tau in self.tau_refs.items():
1011 if tau is None:
1012 continue
1013
1014
1015
1016
1017 v_hier_list = var.split('.')
1018 prefix = '_'.join(v_hier_list[:-1])
1019 var_postfix = v_hier_list[-1]
1020 if len(prefix) > 0:
1021 fname = prefix+'_'+fn_base+'tau'+var_postfix
1022 else:
1023 fname = fn_base+'tau'+var_postfix
1024
1025
1026 inps_to_var = self.gamma1[var] + self.gamma2[var]
1027 try:
1028 sig = fspecs[fname][0]
1029 except KeyError:
1030
1031 self._direct_fail = True
1032 break
1033
1034 for inp in inps_to_var:
1035 inp_hier_list = inp.split('.')
1036 try:
1037 ix = sig.index(inp_hier_list[-1])
1038 except ValueError:
1039
1040 pass
1041 else:
1042 sig[ix] = inp
1043 d[tau.replace('.','_')] = fname+'(' + ",".join(sig) + ')'
1044
1045 for var, inf in self.inf_refs.items():
1046 if inf is None:
1047 continue
1048
1049
1050
1051
1052 v_hier_list = var.split('.')
1053 prefix = '_'.join(v_hier_list[:-1])
1054 var_postfix = v_hier_list[-1]
1055 if len(prefix) > 0:
1056 fname = prefix+'_'+fn_base+var_postfix+'inf'
1057 else:
1058 fname = fn_base+var_postfix+'inf'
1059
1060
1061 inps_to_var = self.gamma1[var] + self.gamma2[var]
1062 try:
1063 sig = fspecs[fname][0]
1064 except KeyError:
1065
1066 self._direct_fail = True
1067 break
1068
1069 for inp in inps_to_var:
1070 inp_hier_list = inp.split('.')
1071 try:
1072 ix = sig.index(inp_hier_list[-1])
1073 except ValueError:
1074
1075 pass
1076 else:
1077 sig[ix] = inp
1078 d[inf.replace('.','_')] = fname+'(' + ",".join(sig) + ')'
1079
1080 self._direct_ref_mapping = parseUtils.symbolMapClass(d)
1081 FScMap = self._FScompatMap
1082 self.taus = {}
1083 self.taus_direct = {}
1084 for k, v in self.tau_refs.items():
1085 self.taus[k], self.taus_direct[k] = self._process_expr(FScMap(v))
1086
1087 self.infs = {}
1088 self.infs_direct = {}
1089 for k, v in self.inf_refs.items():
1090 self.infs[k], self.infs_direct[k] = self._process_expr(FScMap(v))
1091
1092 self.psis = {}
1093 self.psis_direct = {}
1094 for k, vdict in self.psi_refs.items():
1095 if vdict is None:
1096 self.psis[k], self.psis_direct[k] = (None, None)
1097 else:
1098 self.psis[k] = {}
1099 self.psis_direct[k] = {}
1100 for inp, psiv in vdict.items():
1101 self.psis[k][inp], self.psis_direct[k][inp] = \
1102 self._process_expr(FScMap(psiv))
1103
1104
1106
1107
1108
1109 raise NotImplementedError
1110
1111
1113 """Create two Symbolic.Fun(ction) versions of the given expression:
1114
1115 (1) The first is intended for processing post-integration
1116 pointsets to yield the dominant scale Psi quantities,
1117 when the pointsets contain already-computed auxiliary variables
1118 for taus, infs, etc.
1119
1120 (2) The second is intended for on-demand calculation of Psi's
1121 when only the system's state variables are given, thus
1122 taus and infs etc. must be computed this class. If user did not
1123 provide the dssrt_fn functions this will be ignored and not created.
1124
1125 The returned pair of pairs is
1126 ((arg_list_signature (1), symbolic function (1)),
1127 (arg_list_signature (2), symbolic function (2)))
1128 unless None was passed (in which case None is returned).
1129 """
1130 if expr is None:
1131
1132 return (None, None)
1133 elif parseUtils.isNumericToken(expr):
1134
1135 f = Symbolic.expr2fun(expr)
1136 return ( ([], f), ([], f) )
1137 elif expr in self.pars:
1138
1139 val = 'refpars["%s"]' % expr
1140 defs = {'refpars': self.pars}
1141 f = Symbolic.expr2fun(val, **defs)
1142 return ( ([], f), ([], f) )
1143 elif expr in self.gen.inputs:
1144
1145 f = Symbolic.expr2fun(expr+'(t)')
1146 return ( (['t'], f), (['t'], f) )
1147 else:
1148
1149
1150 fnames = self.gen.auxfns.keys()
1151 themap = dict(self.gen.auxfns.items())
1152 themap.update(dict(self.gen.inputs.items()))
1153 defs = {'refpars': self.pars}
1154 pnames = self.pars.keys()
1155 pvals = ['refpars["%s"]' % pn for pn in pnames]
1156
1157
1158
1159 val = expr
1160
1161 val_q = Symbolic.QuantSpec('__expr__', val)
1162 mapping = dict(zip(pnames, pvals))
1163 used_inputs = utils.intersect(val_q.parser.tokenized,
1164 self.gen.inputs.keys())
1165 for inp in used_inputs:
1166
1167 mapping[inp] = inp + '(t)'
1168 val_q.mapNames(mapping)
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182 if not self._direct_fail:
1183 val_q_direct = copy.copy(val_q)
1184 val_q_direct.mapNames(self._direct_ref_mapping)
1185
1186 defs.update(themap)
1187 if val in defs:
1188
1189
1190 fn = copy.copy(defs[val])
1191 del defs[val]
1192 return ( (fn._args, fn), (fn._args, fn) )
1193 else:
1194 fn = Symbolic.expr2fun(str(val_q), **defs)
1195 if not self._direct_fail:
1196 fn_direct = Symbolic.expr2fun(str(val_q_direct), **defs)
1197 return ( (fn._args, fn), (fn_direct._args, fn_direct) )
1198 else:
1199 return ( (fn._args, fn), (None, None) )
1200
1201
1203 """Delete current state based on given trajectory"""
1204 self.traj = None
1205 self.pts = None
1206 self.times = None
1207 self.focus_var = None
1208 self.tau_inf_vals = {}
1209 self.psi_vals = {}
1210 self.Is = {}
1211 self.pars = None
1212
1213
1215 assert self.gen is not None, "Must have a valid generator attribute"
1216 assert self.traj is not None, "Must provide trajectory attribute"
1217 assert self.focus_var is not None, "Must provide focus_var attribute"
1218 if self.pts is None:
1219 self.pts = self.traj.sample()
1220 if self.times is None:
1221 self.times = self.pts.indepvararray
1222
1223 self.pars = self.gen.pars
1224
1225
1227 """Calculate tau of target variable directly from a given single point
1228 containing only the state variables.
1229 """
1230 assert not self._direct_fail, "Option not available"
1231 ptFS = self._FScompatMap(pt)
1232 try:
1233 args, f = self.taus_direct[target]
1234 except TypeError:
1235
1236 return np.NaN
1237 else:
1238 try:
1239 return f(**common.filteredDict(ptFS, args))
1240 except:
1241 print f._args
1242 print f._call_spec
1243 raise
1244
1245
1247 """Calculate inf of target variable directly from a given single point
1248 containing only the state variables.
1249 """
1250 assert not self._direct_fail, "Option not available"
1251 ptFS = self._FScompatMap(pt)
1252 try:
1253 args, f = self.infs_direct[target]
1254 except TypeError:
1255
1256 return np.NaN
1257 else:
1258 try:
1259 return f(**common.filteredDict(ptFS, args))
1260 except:
1261 print f._args
1262 print f._call_spec
1263 raise
1264
1265
1266 - def calc_psi(self, target, pt, focus=None):
1267 """Calculate Psi of focus variable w.r.t. the target variable directly
1268 from a given single point containing only the state variables.
1269
1270 If focus is None (default) then focus_var attribute is assumed.
1271 """
1272 assert not self._direct_fail, "Option not available"
1273 ptFS = self._FScompatMap(pt)
1274 if focus is None:
1275 focus = self.focus_var
1276 try:
1277 args, f = self.psis_direct[focus][target]
1278 except TypeError:
1279
1280 return np.NaN
1281 except KeyError:
1282 raise ValueError("Invalid focus or target variable")
1283 else:
1284 try:
1285 return f(**common.filteredDict(ptFS, args))
1286 except:
1287 print f._args
1288 print f._call_spec
1289 raise
1290
1291
1293 """Calculate taus and infs from given trajectory.
1294
1295 If focus=True (default False), results are only returned for the
1296 variable of focus given by focus_var attribute"""
1297 self.ensure()
1298 vals = {}
1299 if focus:
1300 vars = [self.focus_var]
1301 else:
1302 vars = self.taus.keys()
1303 pts = self._FScompatMap(self.pts)
1304
1305
1306
1307
1308 for k in vars:
1309 tau_info = self.taus[k]
1310 if tau_info is None:
1311 vals['tau_'+k] = np.array([np.NaN for pt in pts])
1312 else:
1313 args, f = tau_info
1314 try:
1315 vals['tau_'+k] = np.array([f(**common.filteredDict(pt, args)) \
1316 for pt in pts])
1317 except:
1318 print f._args
1319 print f._call_spec
1320 raise
1321 inf_info = self.infs[k]
1322 if inf_info is None:
1323 vals['inf_'+k] = np.array([np.NaN for pt in pts])
1324 else:
1325 args, f = inf_info
1326 try:
1327 vals['inf_'+k] = np.array([f(**common.filteredDict(pt, args)) \
1328 for pt in pts])
1329 except:
1330 print f._args
1331 print f._call_spec
1332 raise
1333
1334 self.tau_inf_vals.update(vals)
1335
1336
1338 """Calculate currents from given trajectory"""
1339 raise NotImplementedError
1340
1341
1343 """Calculate Psis from given trajectory"""
1344 self.ensure()
1345 FScMap = self._FScompatMap
1346 fv = self.focus_var
1347 if self.tau_inf_vals == {}:
1348 self.calc_taus_infs(focus=True)
1349 psi_defs = self.psis[fv]
1350 if psi_defs is None:
1351 self.psi_vals[fv] = None
1352 return
1353 psis = {}
1354 all_var_inputs = self.gamma1[fv] + self.gamma2[fv]
1355 pts = FScMap(self.pts)
1356 ts = pts.indepvararray.copy()
1357 ts.shape = (ts.shape[0],1)
1358 c = pts.coordarray.T
1359 all_coords = np.concatenate((c, ts), axis=1)
1360
1361
1362
1363 for inp in all_var_inputs:
1364 if inp not in psi_defs:
1365 continue
1366 psi_info = psi_defs[inp]
1367 if psi_info is None:
1368 psis[inp] = np.array([NaN]*len(pts))
1369 else:
1370 args, f = psi_info
1371 if 't' in args:
1372 tix = args.index('t')
1373 args.remove('t')
1374 arg_ixs = pts._map_names_to_ixs(args)
1375 arg_ixs.insert(tix, all_coords.shape[1]-1)
1376 else:
1377 arg_ixs = pts._map_names_to_ixs(args)
1378 try:
1379 psis[inp] = np.array([f(*val_array.flatten()) \
1380 for val_array in all_coords[:,[arg_ixs]]])
1381 except:
1382 print f._args
1383 print f._call_spec
1384 raise
1385 self.psi_vals[fv] = psis
1386
1387
1389 self.ensure()
1390 fv = self.focus_var
1391 assert self.psi_vals[fv] != {}
1392 assert self.tau_inf_vals != {}
1393 all_vals = {}
1394 all_vals.update(self.tau_inf_vals)
1395 base='psi_'+fv
1396 for inp, psis in self.psi_vals[fv].items():
1397 all_vals[base+'_'+inp] = psis
1398 self.psi_pts = Pointset(indepvararray=self.times, coorddict=all_vals)
1399
1400
1402 self.ensure()
1403 if not hasattr(self, influence_type+'_pts'):
1404 raise "Call make_pointsets first"
1405 else:
1406 influence_pts = getattr(self, influence_type+'_pts')
1407 varnames = influence_pts.coordnames
1408
1409
1410 influence_pts = influence_pts[[v for v in varnames if v[:3]==influence_type]]
1411 self.domscales = {influence_type: domscales(self.focus_var, self.pts,
1412 influence_pts, influence_type,
1413 self.tau_refs, self.psi_refs,
1414 self.opts)}
1415
1416
1418 """Doesn't allow fast vars to be algebraic - better solution is to use
1419 ModelTransforms"""
1420 gen = self.gen
1421 name = "reduced_"+gen.funcspec._initargs['name']+"_"+"_".join(vars)
1422 varspecs = {}
1423 for xname in vars + gen.funcspec._initargs['auxvars']:
1424 varspecs[xname] = gen.funcspec._initargs['varspecs'][xname]
1425 sysargs = common.args(name=name, pars=gen.pars,
1426 ics=common.filteredDict(gen.initialconditions,vars),
1427 varspecs=varspecs, auxvars=gen.funcspec._initargs['auxvars'],
1428 fnspecs=gen.funcspec._initargs['fnspecs'])
1429 nan_auxs = [xname for xname, val in gen.initialconditions.iteritems() if not np.isfinite(val)]
1430 sysargs.pars.update(common.filteredDict(gen.initialconditions, vars+nan_auxs, neg=True))
1431 return Vode_ODEsystem(sysargs)
1432
1433
1434
1435
1443
1451
1452
1453 -def split_pts(pts, interval, reset_t0=None):
1454 """Return trajectory made up from pts argument restricted to the given
1455 interval.
1456
1457 Use reset_t0 = T (float) to reset output trajectory t0 to T.
1458 Default is to use native time frame of pts argument."""
1459 t0, t1 = interval
1460 ix0 = pts.find(t0)
1461 if not isinstance(ix0, int):
1462 ix0 = ix0[0]
1463 ix1 = pts.find(t1)
1464 if not isinstance(ix1, int):
1465 ix1 = ix1[1]
1466 try:
1467 vals = pts[ix0:ix1].coordarray.copy()
1468 except ValueError:
1469 print ix0, ix1
1470 raise
1471 ts = pts.indepvararray[ix0:ix1].copy()
1472 if reset_t0 is not None:
1473 ts += (reset_t0-ts[0])
1474 return numeric_to_traj(vals, 'split_%i_%i' % (ix0,ix1),
1475 pts.coordnames,
1476 indepvar=ts, discrete=False)
1477
1478
1480 cycle_len = None
1481 ixs = []
1482
1483 for i, ep0 in enumerate(epochs[1:-1]):
1484 if verbose:
1485 print "start epoch = ", i+1
1486 for j, ep in enumerate(epochs[i+1:]):
1487 if verbose:
1488 print "end epoch =", i+j+1
1489 try:
1490 res = [epochs[k]==epochs[k+j+1] for k in range(i,j)]
1491 except IndexError:
1492
1493 continue
1494 else:
1495 if verbose:
1496 print i, j, res
1497 if np.all(res) and j > i+1:
1498 if verbose:
1499 print "Found period between epochs", i+1, j+2+i, " len=", j+1
1500 cycle_len = j+1
1501 ixs = [i+1, i+j+2]
1502 return cycle_len, ixs
1503 return cycle_len, ixs
1504
1505
1506 -def split(ds_pt, thresh):
1507 """Split this grouping of values into sub-groups according to spectral
1508 gaps at threshold thresh"""
1509 sub_groups = []
1510 if ds_pt.relative_ratios:
1511 spectral_gaps = spectral_gaps_relative
1512 else:
1513 spectral_gaps = spectral_gaps_absolute
1514 sg = spectral_gaps(ds_pt.as_ratios, thresh)
1515 for ix_range in partition_range((0, len(ds_pt.influences)), sg):
1516 if ix_range[0] == ix_range[1]:
1517 continue
1518 sub_slice = slice(ix_range[0],ix_range[1])
1519 data = common.args()
1520 data.influences = ds_pt.influences[sub_slice]
1521 data.coordnames = ds_pt.coordnames[sub_slice]
1522 sub_groups.append(data)
1523 return sub_groups
1524
1525
1527 """Express a single time point's values as ratios of the largest (>1).
1528 first entry is ratio of largest to itself == 1.
1529 """
1530 try:
1531 return [1.0]+[vals[0]/v for v in vals[1:]]
1532 except ZeroDivisionError:
1533 rats = [1.0]
1534 for i in range(1,len(vals)):
1535 if vals[i] > 0:
1536 rats.append(vals[0]/vals[i])
1537 else:
1538
1539 rats.append(numpy.inf)
1540 return rats
1541
1542
1544 """Express a single time point's relative values as successive ratios (>1).
1545 first entry is ratio of largest to itself == 1.
1546 """
1547 try:
1548 return [1.0]+[vals[i-1]/vals[i] \
1549 for i in range(1,len(vals))]
1550 except ZeroDivisionError:
1551 rats = [1.0]
1552 for i in range(1,len(vals)):
1553 if vals[i] > 0:
1554 rats.append(vals[i-1]/vals[i])
1555 else:
1556
1557 rats.append(numpy.inf)
1558 return rats
1559
1560
1562 """List of indices of spectral gaps larger than thresh in size"""
1563
1564 return [i for i, is_gap in enumerate(np.asarray(as_rel_ratios) >= thresh) if is_gap]
1565
1566
1568 """List of indices of spectral gaps larger than thresh in size"""
1569 as_abs_ratios = np.asarray(as_abs_ratios)
1570 gaps = []
1571 while True:
1572 test = np.array(as_abs_ratios >= thresh, int)
1573 if 1 in test:
1574
1575 cutoff_ix = np.argmax(test)
1576 else:
1577
1578 break
1579 if gaps == []:
1580 gaps.append(cutoff_ix)
1581 else:
1582 gaps.append(cutoff_ix+gaps[-1])
1583 as_abs_ratios = as_abs_ratios[cutoff_ix:]
1584 if len(common.remain(as_abs_ratios, [np.inf])) <= 1:
1585 break
1586 else:
1587 as_abs_ratios /= min(as_abs_ratios)
1588 return gaps
1589
1590
1592
1593
1594 if not range_tuple[0] < range_tuple[1]:
1595 raise ValueError("Range tuple not in increasing order")
1596 if not np.alltrue(ix in xrange(range_tuple[0],range_tuple[1]) \
1597 for ix in split_ixs):
1598 raise ValueError("Split indices out of range")
1599 if range_tuple[0] in split_ixs or range_tuple[1] in split_ixs:
1600 raise ValueError("Split indices out of range")
1601 if split_ixs == []:
1602 partition = [range_tuple]
1603 else:
1604
1605 split_ixs.sort()
1606 split_ixs.reverse()
1607 end_ix = split_ixs.pop()
1608
1609 partition = [(range_tuple[0],end_ix)]
1610 while split_ixs != []:
1611
1612 next_split = (end_ix,split_ixs.pop())
1613 end_ix = next_split[1]
1614 partition.append(next_split)
1615 partition.append((end_ix,range_tuple[1]))
1616 return partition
1617
1618
1620 """Returns Pointset of normalized* psis based on contents of 'epochs' argument,
1621 which would be returned from running da.domscales['psi'].calc_epochs()
1622 where da is a domscales assistant class.
1623
1624 * Largest influence always normalized to 1 for every time point.
1625 """
1626 def zero_to_small(v):
1627 if v == 0:
1628 return small
1629 else:
1630 return v
1631
1632 inpnames = epochs[0].influences[0].coordnames
1633 res = []
1634 ts = []
1635 if midpoint_only:
1636 for ep in epochs:
1637 numpts = len(ep.influences)
1638 midpt = int(numpts/2)
1639 ts.append(ep.influences['t'][midpt])
1640 repinf = ep.influences[midpt]
1641 ranks = ep.ranks[midpt]
1642 inv_ranks = common.invertMap(ranks)
1643 vals_ord = np.take(repinf.coordarray, ranks)
1644 ratios = np.array([1.0] + [zero_to_small(v)/vals_ord[0] for v in vals_ord[1:]])
1645 res.append(np.take(ratios, common.sortedDictValues(inv_ranks)))
1646 else:
1647 for ep in epochs:
1648 numpts = len(ep.influences)
1649 for nix in range(numpts):
1650 ts.append(ep.influences['t'][nix])
1651 repinf = ep.influences[nix]
1652 ranks = ep.ranks[nix]
1653 inv_ranks = common.invertMap(ranks)
1654 vals_ord = np.take(repinf.coordarray, ranks)
1655 ratios = np.array([1.0] + [zero_to_small(v)/vals_ord[0] for v in vals_ord[1:]])
1656 res.append(np.take(ratios, common.sortedDictValues(inv_ranks)))
1657 coordnames = ['psi_%s_%s'%(root, i) for i in inpnames]
1658 return Pointset(coordarray=np.array(res).T, indepvararray=ts, coordnames=coordnames)
1659
1660
1661
1663 "Small utility to print more detailed information about epochs"
1664 for i, ep in enumerate(eps):
1665 print i,
1666 ep.info()
1667
1668
1669 -def plot_psis(da, cols=None, do_vars=None, do_log=True, use_prefix=True):
1670 """da is a dssrt_assistant object.
1671 cols is an optional dictionary mapping names of Psi entries to specific color/style character codes.
1672 Pass do_vars list of Psi names to restrict, otherwise all will be plotted.
1673 Option to plot on vertical log scale.
1674
1675 Option to switch off 'psi_' + da's focus_var as prefix for coordnames.
1676
1677 Requires matplotlib.
1678 """
1679 from PyDSTool.matplotlib_import import plot
1680 pts=da.psi_pts
1681 if use_prefix:
1682 root = 'psi_'+da.focus_var+'_'
1683 else:
1684 root = ''
1685 ts = pts['t']
1686 if do_vars is None:
1687 do_vars = [c[len(root):] for c in pts.coordnames if root in c]
1688 if cols is None:
1689 colvals = ['g', 'r', 'k', 'b', 'c', 'y', 'm']
1690 styles = ['-', ':', '--']
1691 cols = []
1692 for s in styles:
1693 for c in colvals:
1694 cols.append(c+s)
1695 if len(do_vars) > len(cols):
1696 raise ValueError("Max number of permitted variables for these colors/styles is %i"%len(cols))
1697 print "Color scheme:"
1698 if do_log:
1699 for i, v in enumerate(do_vars):
1700 if root+v in pts.coordnames:
1701 print " ", v, cols[i]
1702 plot(ts, np.log(pts[root+v]), cols[i])
1703 else:
1704 for i, v in enumerate(do_vars):
1705 if root+v in pts.coordnames:
1706 print " ", v, cols[i]
1707 plot(ts, pts[root+v], cols[i])
1708
1709
1710
1711
1712 MATCH = 'match'
1713 MISMATCH = 'mismatch'
1714 CLOSE = 'close'
1715 FAR = 'far'
1716 VCLOSE = 'vclose'
1717 VFAR = 'vfar'
1718 GAP = 'gap'
1719 APPROX = 'approx'
1720
1723 self.match = None
1724 self.approx = None
1725 self.mismatch = None
1726 self.gap = None
1727 self.extension = None
1728 self.alphabet = alphabet
1729 pairs = utils.cartesianProduct(alphabet.all_vars, alphabet.all_vars)
1730 self.score_dict = {}.fromkeys(pairs, None)
1731
1733 if k in self.score_dict:
1734 self.score_dict[k] = v
1735
1736 a, b = k
1737 self.score_dict[(b, a)] = v
1738 else:
1739 raise KeyError("Invalid key %s"%k)
1740
1742 return getattr(self, self.score_dict[k])
1743
1744
1747 Scorer.__init__(self, alphabet)
1748 self.match = 5
1749 self.gap = -2
1750 self.mismatch = -4
1751 self.extension = -1
1752
1753 fast = alphabet.fast
1754 slow = alphabet.slow
1755
1756 s1 = Scorer(alphabet)
1757 s1.match = 5
1758 s1.vclose = 4
1759 s1.close = 2
1760 s1.far = -1
1761 s1.vfar = -3
1762 s1.mismatch = -5
1763 s1.gap = -2
1764 s1.extension = -1
1765
1766 s2 = Scorer(alphabet)
1767 s2.gap = 1
1768 s2.mismatch = 0
1769 s2.far = 2
1770 s2.vfar = 1
1771 s2.close = 3
1772 s2.vclose = 4
1773 s2.match = 6
1774 s2.extension = 0
1775
1776 for a in alphabet.dynamic_vars:
1777 s1[(a,a)] = MATCH
1778 s1[(fast(a),fast(a))] = MATCH
1779 s1[(slow(a),slow(a))] = MATCH
1780 s1[(fast(a),a)] = VCLOSE
1781 s1[(slow(a),a)] = VCLOSE
1782 s1[(slow(a),fast(a))] = CLOSE
1783
1784
1785 s2[(a,a)] = MATCH
1786 s2[(fast(a),fast(a))] = MATCH
1787 s2[(slow(a),slow(a))] = MATCH
1788 s2[(fast(a),a)] = VCLOSE
1789 s2[(slow(a),a)] = VCLOSE
1790 s2[(slow(a),fast(a))] = CLOSE
1791
1792 s1[('_','_')] = CLOSE
1793 s2[('_','_')] = MATCH
1794
1795 for a in alphabet.non_dynamic_vars:
1796 s1[(a,a)] = MATCH
1797 s1[(a,'_')] = CLOSE
1798 s2[(a,a)] = MATCH
1799 s2[(a,'_')] = CLOSE
1800
1801 for k, v in s1.score_dict.iteritems():
1802 if v is None:
1803 d_common = len(common.intersect(alphabet.dynamic_vars, strip_speed(k)))
1804
1805 if (k[0] == '_' or k[1] == '_') and k[0] != k[1]:
1806 if d_common > 0:
1807 s1.score_dict[k] = MISMATCH
1808 s2.score_dict[k] = VFAR
1809 else:
1810 s1.score_dict[k] = MISMATCH
1811 s2.score_dict[k] = VFAR
1812 elif d_common > 0:
1813 s1.score_dict[k] = MISMATCH
1814 s2.score_dict[k] = VFAR
1815 else:
1816 s1.score_dict[k] = MISMATCH
1817 s2.score_dict[k] = CLOSE
1818
1819 self.s1 = s1
1820 self.s2 = s2
1821
1822 self.weight_unordered = 0.35
1823 self.weight_s1 = 0.2
1824 self.weight_s2 = 0.45
1825
1827 assert self.weight_s1 + self.weight_s2 + self.weight_unordered == 1
1828
1830 return self.scorer(k[0], k[1])*self.match
1831
1832
1834 self._validate_weights()
1835 s1 = self.s1
1836 s2 = self.s2
1837 minlen = min(len(a),len(b))
1838 maxlen = max(len(a),len(b))
1839 a_un = []
1840 b_un = []
1841 a_strip = strip_speed(a)
1842 b_strip = strip_speed(b)
1843 seen_cs = []
1844 for c in utils.union(a, b):
1845 cs = strip_speed([c])[0]
1846 if cs in seen_cs:
1847 continue
1848 else:
1849 seen_cs.append(cs)
1850 try:
1851 ixb = b_strip.index(cs)
1852 except ValueError:
1853 pass
1854 else:
1855 try:
1856 ixa = a_strip.index(cs)
1857 except ValueError:
1858 pass
1859 else:
1860 a_un.append(a[ixa])
1861 b_un.append(b[ixb])
1862 a_un = a_un + common.remain(a, a_un)
1863 b_un = b_un + common.remain(b, b_un)
1864 a_dyn = only_dynamic(pad(a, maxlen), s1.alphabet.dynamic_vars)
1865 b_dyn = only_dynamic(pad(b, maxlen), s1.alphabet.dynamic_vars)
1866
1867 unordered_sim = sum([s2[(pad(a_un, maxlen)[i], pad(b_un, maxlen)[i])] for i in range(maxlen)])/(maxlen*s2.match)
1868
1869
1870 return (self.weight_unordered*unordered_sim + \
1871 self.weight_s1*swdist(a_dyn, b_dyn, s1) + \
1872 self.weight_s2*swdist(a, b, s2))
1873
1874
1875
1877 seq = []
1878 for ep in epoch_list:
1879 a, m = ep.seq_str()
1880 if get_actives:
1881 if get_modulatory:
1882 l = (a, m)
1883 else:
1884 l = a
1885 else:
1886 if get_modulatory:
1887 l = m
1888 else:
1889 raise "Must specify at least one type of output"
1890 seq.append(l)
1891 return seq
1892
1894 - def __init__(self, dynamic, non_dynamic):
1895 self.dynamic_vars = dynamic
1896 self.non_dynamic_vars = non_dynamic
1897 alphabet = dynamic + non_dynamic
1898 for speed in ['F', 'S']:
1899 alphabet.extend([v+'[%s]'%speed for v in dynamic])
1900 self.all_vars = alphabet + ['_']
1901
1902 - def fast(self, a):
1903 if a in self.dynamic_vars:
1904 return a+'[F]'
1905 else:
1906 raise ValueError("%s is not a dynamic variable" % a)
1907
1908 - def slow(self, a):
1909 if a in self.dynamic_vars:
1910 return a+'[S]'
1911 else:
1912 raise ValueError("%s is not a dynamic variable" % a)
1913
1914
1916 res = []
1917 for a in seq:
1918 if a[-1] == ']':
1919 res.append(a[:-3])
1920 else:
1921 res.append(a)
1922 return res
1923
1925 m = len(seq)
1926 if m > n:
1927 raise ValueError
1928 elif m < n:
1929 return seq + ['_']*(n-m)
1930 else:
1931 return seq
1932
1934 res = []
1935 for a in seq:
1936 if strip_speed([a])[0] in dynamic_vars:
1937 res.append(a)
1938 else:
1939 res.append('_')
1940 return res
1941
1942
1944 """Based on code by Gergely Szollosi"""
1945 rows=len(seq1)+1
1946 cols=len(seq2)+1
1947 a=np.zeros((rows,cols),float)
1948 if isinstance(seq1, list):
1949 blank = [['_']]
1950 else:
1951 blank = ['_']
1952 for i in range(1,rows):
1953 for j in range(1,cols):
1954
1955
1956
1957
1958
1959
1960
1961 choice1 = a[i-1, j-1] + scorer[(seq1[i-1], seq2[j-1])]
1962 choice2 = a[i-1, j] + scorer.gap
1963 choice3 = a[i, j-1] + scorer.gap
1964 a[i, j] = max(choice1, choice2, choice3)
1965
1966 aseq1 = []
1967 aseq2 = []
1968
1969 i = len(seq1)
1970 j = len(seq2)
1971 while i>0 and j>0:
1972
1973
1974 score = a[i, j]
1975 score_diag = a[i-1, j-1]
1976 score_up = a[i, j-1]
1977 score_left = a[i-1, j]
1978 if score == score_diag + scorer[(seq1[i-1], seq2[j-1])]:
1979 aseq1 = [seq1[i-1]] + aseq1
1980 aseq2 = [seq2[j-1]] + aseq2
1981 i -= 1
1982 j -= 1
1983 elif score == score_left + scorer.gap:
1984 aseq1 = [seq1[i-1]] + aseq1
1985 aseq2 = blank + aseq2
1986 i -= 1
1987 elif score == score_up + scorer.gap:
1988 aseq1 = blank + aseq1
1989 aseq2 = [seq2[j-1]] + aseq2
1990 j -= 1
1991 else:
1992
1993 raise RuntimeError
1994 while i>0:
1995
1996 aseq1 = [seq1[i-1]] + aseq1
1997 aseq2 = blank + aseq2
1998 i -= 1
1999
2000 while j>0:
2001
2002 aseq1 = blank + aseq1
2003 aseq2 = [seq2[j-1]] + aseq2
2004 j -= 1
2005
2006 final_score = swdist(aseq1, aseq2, scorer)
2007
2008 return (aseq1, aseq2), final_score
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031 -def swdist(str1, str2, scorer, common_divisor='longest', min_threshold=None):
2032 """Return approximate string comparator measure (between 0.0 and 1.0)
2033 using the Smith-Waterman distance.
2034
2035 USAGE:
2036 score = swdist(str1, str2, scorer, common_divisor, min_threshold)
2037
2038 ARGUMENTS:
2039 str1 The first sequence
2040 str2 The second sequence
2041 scorer scorer object instance to compare two symbols
2042 common_divisor Method of how to calculate the divisor, it can be set to
2043 'average','shortest', or 'longest' , and is calculated
2044 according to the lengths of the two input strings
2045 min_threshold Minimum threshold between 0 and 1
2046
2047 DESCRIPTION:
2048 Smith-Waterman distance is commonly used in biological sequence alignment.
2049
2050 Scores for matches, misses, gap and extension penalties are set to values
2051 described in the scorer
2052 """
2053
2054
2055
2056
2057 n = len(str1)
2058 m = len(str2)
2059
2060 if n*m == 0:
2061 return 0.0
2062 elif str1 == str2:
2063 return 1.0
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075 if (common_divisor not in ['average','shortest','longest']):
2076 raise ValueError('Illegal value for common divisor: %s' % \
2077 (common_divisor))
2078
2079 if common_divisor == 'average':
2080 divisor = 0.5*(n+m)*scorer.match
2081 elif common_divisor == 'shortest':
2082 divisor = min(n,m)*scorer.match
2083 else:
2084 divisor = max(n,m)*scorer.match
2085
2086 best_score = 0
2087
2088 d = np.zeros((n+1,m+1),float)
2089
2090 for i in range(1,n+1):
2091 for j in range(1,m+1):
2092
2093 match = d[i-1, j-1] + scorer[(str1[i-1], str2[j-1])]
2094
2095 insert = 0
2096 for k in range(1,i):
2097 score = d[i-k, j] + scorer.gap + k*scorer.extension
2098 insert = max(insert, score)
2099
2100 delete = 0
2101 for l in range(1,j):
2102 score = d[i, j-l] + scorer.gap + l*scorer.extension
2103 delete = max(delete, score)
2104
2105 d[i, j] = max(match, insert, delete, 0)
2106 best_score = max(d[i, j], best_score)
2107
2108
2109
2110
2111
2112
2113 w = float(best_score) / float(divisor)
2114 assert (w >= 0.0) and (w <= 1.0), 'Similarity weight outside 0-1: %f' % (w)
2115 return w
2116
2117
2119 """Return approximate string comparator measure (between 0.0 and 1.0)
2120 using the edit (or Levenshtein) distance as well as a triplet with the
2121 counts of the actual edits (inserts, deletes and substitutions).
2122
2123 USAGE:
2124 score, edit_counts = editdist_edits(str1, str2)
2125
2126 ARGUMENTS:
2127 str1 The first seq
2128 str2 The second seq
2129
2130 DESCRIPTION:
2131 The edit distance is the minimal number of insertions, deletions and
2132 substitutions needed to make two strings equal.
2133
2134 edit_counts is a list with three elements that contain the number of
2135 inserts, deletes and substitutions that were needed to convert
2136 str1 into str2.
2137
2138 For more information on the modified Soundex see:
2139 - http://www.nist.gov/dads/HTML/editdistance.html
2140 """
2141
2142
2143
2144 n = len(str1)
2145 m = len(str2)
2146
2147 if n + m == 0:
2148 return 0.0, [0,0,0]
2149
2150 elif n*m == 0:
2151 if n == 0:
2152 return 0.0, [m,0,0]
2153 else:
2154 return 0.0, [0,n,0,0]
2155
2156 elif str1 == str2:
2157 return 1.0, [0,0,0]
2158
2159 d = []
2160
2161 current = range(n+1)
2162 d.append(current)
2163
2164 for i in range(1,m+1):
2165
2166 previous = current
2167 current = [i]+n*[0]
2168 str2char = str2[i-1]
2169
2170 for j in range(1,n+1):
2171 substitute = previous[j-1]
2172 if (str1[j-1] != str2char):
2173 substitute += 1
2174
2175
2176
2177 current[j] = min(previous[j]+1, current[j-1]+1, substitute)
2178
2179 d.append(current)
2180
2181
2182
2183 num_edits = [0,0,0]
2184
2185 d_curr = d[m][n]
2186 j = n
2187 i = m
2188
2189 while (d_curr > 0):
2190 if (d[i-1][j-1]+1 == d_curr):
2191 i -= 1
2192 j -= 1
2193 num_edits[2] += 1
2194 elif (d[i-1][j]+1 == d_curr):
2195 i -= 1
2196 num_edits[1] += 1
2197 elif (d[i][j-1]+1 == d_curr):
2198 j -= 1
2199 num_edits[0] += 1
2200
2201 else:
2202 if (d[i-1][j-1] == d_curr):
2203 i -= 1
2204 j -= 1
2205 elif (d[i-1][j] == d_curr):
2206 i -= 1
2207 elif (d[i][j-1] == d_curr):
2208 j -= 1
2209 d_curr = d[i][j]
2210
2211 w = 1.0 - float(d[m][n]) / float(max(n,m))
2212 assert (w >= 0.0) and (w <= 1.0), 'Similarity weight outside 0-1: %f' % (w)
2213
2214 return w, num_edits
2215
2216
2217
2218 JARO_MARKER_CHAR = '*'
2219
2220 -def jaro(str1, str2, min_threshold=None):
2221 """Return approximate string comparator measure (between 0.0 and 1.0)
2222
2223 USAGE:
2224 score = jaro(str1, str2, min_threshold)
2225
2226 ARGUMENTS:
2227 str1 The first string
2228 str2 The second string
2229 min_threshold Minimum threshold between 0 and 1 (currently not used)
2230
2231 DESCRIPTION:
2232 As desribed in 'An Application of the Fellegi-Sunter Model of
2233 Record Linkage to the 1990 U.S. Decennial Census' by William E. Winkler
2234 and Yves Thibaudeau.
2235 """
2236
2237
2238
2239 if (str1 == '') or (str2 == ''):
2240 return 0.0
2241 elif (str1 == str2):
2242 return 1.0
2243
2244 len1 = len(str1)
2245 len2 = len(str2)
2246
2247 halflen = max(len1,len2) / 2 + 1
2248
2249 ass1 = ''
2250 ass2 = ''
2251
2252 workstr1 = str1
2253 workstr2 = str2
2254
2255 common1 = 0
2256 common2 = 0
2257
2258
2259
2260 for i in range(len1):
2261 start = max(0,i-halflen)
2262 end = min(i+halflen+1,len2)
2263 index = workstr2.find(str1[i],start,end)
2264 if (index > -1):
2265 common1 += 1
2266 ass1 = ass1 + str1[i]
2267 workstr2 = workstr2[:index]+JARO_MARKER_CHAR+workstr2[index+1:]
2268
2269
2270
2271 for i in range(len2):
2272 start = max(0,i-halflen)
2273 end = min(i+halflen+1,len1)
2274 index = workstr1.find(str2[i],start,end)
2275 if (index > -1):
2276 common2 += 1
2277 ass2 = ass2 + str2[i]
2278 workstr1 = workstr1[:index]+JARO_MARKER_CHAR+workstr1[index+1:]
2279
2280 if (common1 != common2):
2281 print 'Jaro: Wrong common values for strings "%s" and "%s"' % \
2282 (str1, str2) + ', common1: %i, common2: %i' % (common1, common2) + \
2283 ', common should be the same.'
2284 common1 = float(common1+common2) / 2.0
2285
2286 if (common1 == 0):
2287 return 0.0
2288
2289
2290
2291 transposition = 0
2292 for i in range(len(ass1)):
2293 if (ass1[i] != ass2[i]):
2294 transposition += 1
2295 transposition = transposition / 2.0
2296
2297 common1 = float(common1)
2298 w = 1./3.*(common1 / float(len1) + common1 / float(len2) + \
2299 (common1-transposition) / common1)
2300
2301 assert (w >= 0.0) and (w <= 1.0), 'Similarity weight outside 0-1: %f' % (w)
2302
2303 return w
2304
2305
2307 """Create a text table of epochs"""
2308 str_table = []
2309 assert len(epseq1) == len(epseq2)
2310 for i in range(len(epseq1)):
2311 eps1 = epseq1[i]
2312 eps2 = epseq2[i]
2313 str_table.append([",".join(eps1), ",".join(eps2)])
2314 print indent(str_table)
2315
2316 -def indent(rows, hasHeader=False, headerChar='-', delim=' | ', justify='left',
2317 separateRows=False, prefix='', postfix='', wrapfunc=lambda x:x):
2318 """Indents a table by column.
2319 - rows: A sequence of sequences of items, one sequence per row.
2320 - hasHeader: True if the first row consists of the columns' names.
2321 - headerChar: Character to be used for the row separator line
2322 (if hasHeader==True or separateRows==True).
2323 - delim: The column delimiter.
2324 - justify: Determines how are data justified in their column.
2325 Valid values are 'left','right' and 'center'.
2326 - separateRows: True if rows are to be separated by a line
2327 of 'headerChar's.
2328 - prefix: A string prepended to each printed row.
2329 - postfix: A string appended to each printed row.
2330 - wrapfunc: A function f(text) for wrapping text; each element in
2331 the table is first wrapped by this function."""
2332
2333 def rowWrapper(row):
2334 newRows = [wrapfunc(item).split('\n') for item in row]
2335 return [[substr or '' for substr in item] for item in map(None,*newRows)]
2336
2337 logicalRows = [rowWrapper(row) for row in rows]
2338
2339 columns = map(None,*reduce(operator.add,logicalRows))
2340
2341 maxWidths = [max([len(str(item)) for item in column]) for column in columns]
2342 rowSeparator = headerChar * (len(prefix) + len(postfix) + sum(maxWidths) + \
2343 len(delim)*(len(maxWidths)-1))
2344
2345 justify = {'center':str.center, 'right':str.rjust, 'left':str.ljust}[justify.lower()]
2346 output=cStringIO.StringIO()
2347 if separateRows: print >> output, rowSeparator
2348 for physicalRows in logicalRows:
2349 for row in physicalRows:
2350 print >> output, \
2351 prefix \
2352 + delim.join([justify(str(item),width) for (item,width) in zip(row,maxWidths)]) \
2353 + postfix
2354 if separateRows or hasHeader: print >> output, rowSeparator; hasHeader=False
2355 return output.getvalue()
2356
2357
2358
2359
2360
2361 if __name__ == '__main__':
2362
2363 ep1=common.args(actives=['x','y'],modulatory=['z','w'],sigma=2,relative_ratios=True)
2364 res1 = transition_psi(ep1, Point({'x': 1, 'y': 0.499, 'z': 0.2, 'w': 0.1}), 0.01)
2365 assert res1 == ('leave', 'y')
2366 res2 = transition_psi(ep1, Point({'x': 1, 'y': 0.601, 'z': 0.3, 'w': 0.1}), 0.01)
2367 assert res2 == ('join', 'z')
2368 ep2=common.args(actives=['x'],modulatory=['z','w'],sigma=2,relative_ratios=True)
2369 res3 = transition_psi(ep2, Point({'x': 1, 'z': 0.1, 'w': 0.501}), 0.01)
2370 assert res3 == ('join', 'w')
2371 try:
2372 res4 = transition_psi(ep2, Point({'x': 1, 'z': 2, 'w': 0.1}), 0.01)
2373 except PyDSTool_ValueError:
2374 pass
2375 else:
2376 raise AssertionError
2377
2378 ep1=common.args(actives=['x','y'],modulatory=['z','w'],sigma=2,relative_ratios=False)
2379 res1 = transition_psi(ep1, Point({'x': 1, 'y': 0.499, 'z': 0.2, 'w': 0.1}), 0.01)
2380 assert res1 == ('leave', 'y')
2381 res2 = transition_psi(ep1, Point({'x': 1, 'y': 0.601, 'z': 0.499, 'w': 0.1}), 0.01)
2382 assert res2 == ('join', 'z')
2383 ep2=common.args(actives=['x'],modulatory=['z','w'],sigma=2,relative_ratios=False)
2384 res3 = transition_psi(ep2, Point({'x': 1, 'z': 0.1, 'w': 0.501}), 0.01)
2385 assert res3 == ('join', 'w')
2386 try:
2387 res4 = transition_psi(ep2, Point({'x': 1, 'z': 2, 'w': 0.1}), 0.01)
2388 except PyDSTool_ValueError:
2389 pass
2390 else:
2391 raise AssertionError
2392 ep3=common.args(actives=['x','y'],modulatory=[],sigma=2,relative_ratios=False)
2393 res5 = transition_psi(ep3, Point({'x': 1, 'y': 0.501}), 0.01)
2394 assert res5 == ('leave', 'y')
2395
2396 ep1=common.args(fast=['x','y'],slow=['w','z'],order1=['v'],gamma=2)
2397 res1 = transition_tau(ep1, Point({'x': 0.3, 'y': 0.2, 'v':1, 'w': 2.005, 'z': 3}), 0.01)
2398 assert res1 == ('slow_leave', 'w')
2399