Environment.pykov module¶
Pykov documentation.
-
exception
Environment.pykov.
PykovError
(value)[source]¶ Bases:
Exception
Exception definition form Pykov Errors.
-
__module__
= 'Environment.pykov'¶
-
__weakref__
¶ list of weak references to the object (if defined)
-
-
class
Environment.pykov.
Vector
(data=None, **kwargs)[source]¶ Bases:
collections.OrderedDict
-
__init__
(data=None, **kwargs)[source]¶ >>> pykov.Vector({'A':.3, 'B':.7}) {'A':.3, 'B':.7} >>> pykov.Vector(A=.3, B=.7) {'A':.3, 'B':.7}
-
__setitem__
(key, value)[source]¶ >>> q = pykov.Vector(C=.4, B=.6) >>> q['Z']=.2 >>> q {'C': 0.4, 'B': 0.6, 'Z': 0.2} >>> q['Z']=0 >>> q {'C': 0.4, 'B': 0.6}
-
__mul__
(M)[source]¶ >>> p = pykov.Vector(A=.3, B=.7) >>> p * 3 {'A': 0.9, 'B': 2.1} >>> q = pykov.Vector(C=.5, B=.5) >>> p * q 0.35 >>> T = pykov.Matrix({('A','B'): .3, ('A','A'): .7, ('B','A'): 1.}) >>> p * T {'A': 0.91, 'B': 0.09} >>> T * p {'A': 0.42, 'B': 0.3}
-
__add__
(v)[source]¶ >>> p = pykov.Vector(A=.3, B=.7) >>> q = pykov.Vector(C=.5, B=.5) >>> p + q {'A': 0.3, 'C': 0.5, 'B': 1.2}
-
__sub__
(v)[source]¶ >>> p = pykov.Vector(A=.3, B=.7) >>> q = pykov.Vector(C=.5, B=.5) >>> p - q {'A': 0.3, 'C': -0.5, 'B': 0.2} >>> q - p {'A': -0.3, 'C': 0.5, 'B': -0.2}
-
_toarray
(el2pos)[source]¶ >>> p = pykov.Vector(A=.3, B=.7) >>> el2pos = {'A': 1, 'B': 0} >>> v = p._toarray(el2pos) >>> v array([ 0.7, 0.3])
-
_fromarray
(arr, el2pos)[source]¶ >>> p = pykov.Vector() >>> el2pos = {'A': 1, 'B': 0} >>> v = numpy.array([ 0.7, 0.3]) >>> p._fromarray(v,el2pos) >>> p {'A': 0.3, 'B': 0.7}
-
sort
(reverse=False)[source]¶ List of (state,probability) sorted according the probability.
>>> p = pykov.Vector({'A':.3, 'B':.1, 'C':.6}) >>> p.sort() [('B', 0.1), ('A', 0.3), ('C', 0.6)] >>> p.sort(reverse=True) [('C', 0.6), ('A', 0.3), ('B', 0.1)]
-
normalize
()[source]¶ Normalize the vector so that the entries sum is 1.
>>> p = pykov.Vector({'A':3, 'B':1, 'C':6}) >>> p.normalize() >>> p {'A': 0.3, 'C': 0.6, 'B': 0.1}
-
choose
(random_func=None)[source]¶ Choose a state according to its probability.
>>> p = pykov.Vector(A=.3, B=.7) >>> p.choose() 'B'
Optionally, a function that generates a random number can be supplied. >>> def FakeRandom(min, max): return 0.01 >>> p = pykov.Vector(A=.05, B=.4, C=.4, D=.15) >>> p.choose(FakeRandom) ‘A’
See also
-
entropy
()[source]¶ Return the entropy.
\[H(p) = \sum_i p_i \ln p_i\]See also
Khinchin, A. I. Mathematical Foundations of Information Theory Dover, 1957.
>>> p = pykov.Vector(A=.3, B=.7) >>> p.entropy() 0.6108643020548935
-
relative_entropy
(p)[source]¶ Return the Kullback-Leibler distance.
\[d(q,p) = \sum_i q_i \ln (q_i/p_i)\]Note
The Kullback-Leibler distance is not symmetric.
>>> p = pykov.Vector(A=.3, B=.7) >>> q = pykov.Vector(A=.4, B=.6) >>> p.relative_entropy(q) 0.02160085414354654 >>> q.relative_entropy(p) 0.022582421084357485
-
copy
()[source]¶ Return a shallow copy.
>>> p = pykov.Vector(A=.3, B=.7) >>> q = p.copy() >>> p['C'] = 1. >>> q {'A': 0.3, 'B': 0.7}
-
dist
(v)[source]¶ Return the distance between the two probability vectors.
\[d(q,p) = \sum_i |q_i - p_i|\]>>> p = pykov.Vector(A=.3, B=.7) >>> q = pykov.Vector(C=.5, B=.5) >>> q.dist(p) 1.0
-
__module__
= 'Environment.pykov'¶
-
-
class
Environment.pykov.
Matrix
(data=None)[source]¶ Bases:
collections.OrderedDict
-
__getitem__
(*args)[source]¶ >>> T = pykov.Matrix({('A','B'): .3, ('A','A'): .7, ('B','A'): 1.}) >>> T[('A','B')] 0.3 >>> T['A','B'] 0.3 >>> 0.0
-
copy
()[source]¶ Return a shallow copy.
>>> T = pykov.Matrix({('A','B'): .3, ('A','A'): .7, ('B','A'): 1.}) >>> W = T.copy() >>> T[('B','B')] = 1. >>> W {('B', 'A'): 1.0, ('A', 'B'): 0.3, ('A', 'A'): 0.7}
-
_numpy_mat
(el2pos)[source]¶ Return a numpy.matrix object from a dictionary.
– Parameters – t_ij : the OrderedDict, values must be real numbers, keys should be tuples of two strings. el2pos : see _map()
-
_from_numpy_mat
(T, pos2el)[source]¶ Return a dictionary from a numpy.matrix object.
– Parameters – T : the numpy.matrix. pos2el : see _map()
-
stochastic
()[source]¶ Make a right stochastic matrix.
Set the sum of every row equal to one, raise
PykovError
if it is not possible.>>> T = pykov.Matrix({('A','B'): 3, ('A','A'): 7, ('B','A'): .2}) >>> T.stochastic() >>> T {('B', 'A'): 1.0, ('A', 'B'): 0.3, ('A', 'A'): 0.7} >>> T[('A','C')]=1 >>> T.stochastic() pykov.PykovError: 'Zero links from node C'
-
pred
(key=None)[source]¶ Return the precedessors of a state (if not indicated, of all states). In Matrix notation: return the coloum of the indicated state.
>>> T = pykov.Matrix({('A','B'): .3, ('A','A'): .7, ('B','A'): 1.}) >>> T.pred() {'A': {'A': 0.7, 'B': 1.0}, 'B': {'A': 0.3}} >>> T.pred('A') {'A': 0.7, 'B': 1.0}
-
succ
(key=None)[source]¶ Return the successors of a state (if not indicated, of all states). In Matrix notation: return the row of the indicated state.
>>> T = pykov.Matrix({('A','B'): .3, ('A','A'): .7, ('B','A'): 1.}) >>> T.succ() {'A': {'A': 0.7, 'B': 0.3}, 'B': {'A': 1.0}} >>> T.succ('A') {'A': 0.7, 'B': 0.3}
-
remove
(states)[source]¶ Return a copy of the Chain, without the indicated states.
Warning
All the links where the states appear are deleted, so that the result will not be in general a stochastic matrix.
>>> T = pykov.Matrix({('A','B'): .3, ('A','A'): .7, ('B','A'): 1.}) >>> T.remove(['B']) {('A', 'A'): 0.7} >>> T = pykov.Chain({('A','B'): .3, ('A','A'): .7, ('B','A'): 1., ('C','D'): .5, ('D','C'): 1., ('C','B'): .5}) >>> T.remove(['A','B']) {('C', 'D'): 0.5, ('D', 'C'): 1.0}
-
states
()[source]¶ Return the set of states.
>>> T = pykov.Matrix({('A','B'): .3, ('A','A'): .7, ('B','A'): 1.}) >>> T.states() {'A', 'B'}
-
__pow__
(n)[source]¶ >>> T = pykov.Matrix({('A','B'): .3, ('A','A'): .7, ('B','A'): 1.}) >>> T**2 {('A', 'B'): 0.21, ('B', 'A'): 0.70, ('A', 'A'): 0.79, ('B', 'B'): 0.30} >>> T**0 {('A', 'A'): 1.0, ('B', 'B'): 1.0}
-
__mul__
(v)[source]¶ >>> T = pykov.Matrix({('A','B'): .3, ('A','A'): .7, ('B','A'): 1.}) >>> T * 3 {('B', 'A'): 3.0, ('A', 'B'): 0.9, ('A', 'A'): 2.1} >>> p = pykov.Vector(A=.3, B=.7) >>> T * p {'A': 0.42, 'B': 0.3} >>> W = pykov.Matrix({('N', 'M'): 0.5, ('M', 'N'): 0.7, ('M', 'M'): 0.3, ('O', 'N'): 0.5, ('O', 'O'): 0.5, ('N', 'O'): 0.5}) >>> W * W {('N', 'M'): 0.15, ('M', 'N'): 0.21, ('M', 'O'): 0.35, ('M', 'M'): 0.44, ('O', 'M'): 0.25, ('O', 'N'): 0.25, ('O', 'O'): 0.5, ('N', 'O'): 0.25, ('N', 'N'): 0.6}
-
__rmul__
(v)[source]¶ >>> T = pykov.Matrix({('A','B'): .3, ('A','A'): .7, ('B','A'): 1.}) >>> 3 * T {('B', 'A'): 3.0, ('A', 'B'): 0.9, ('A', 'A'): 2.1}
-
__add__
(M)[source]¶ >>> T = pykov.Matrix({('A','B'): .3, ('A','A'): .7, ('B','A'): 1.}) >>> I = pykov.Matrix({('A','A'):1, ('B','B'):1}) >>> T + I {('B', 'A'): 1.0, ('A', 'B'): 0.3, ('A', 'A'): 1.7, ('B', 'B'): 1.0}
-
__sub__
(M)[source]¶ >>> T = pykov.Matrix({('A','B'): .3, ('A','A'): .7, ('B','A'): 1.}) >>> I = pykov.Matrix({('A','A'):1, ('B','B'):1}) >>> T - I {('B', 'A'): 1.0, ('A', 'B'): 0.3, ('A', 'A'): -0.3, ('B', 'B'): -1}
-
trace
()[source]¶ Return the Matrix trace.
>>> T = pykov.Matrix({('A','B'): .3, ('A','A'): .7, ('B','A'): 1.}) >>> T.trace() 0.7
-
eye
()[source]¶ Return the Identity Matrix.
>>> T = pykov.Matrix({('A','B'): .3, ('A','A'): .7, ('B','A'): 1.}) >>> T.eye() {('A', 'A'): 1., ('B', 'B'): 1.}
-
ones
()[source]¶ Return a
Vector
instance with entries equal to one.>>> T = pykov.Matrix({('A','B'): .3, ('A','A'): .7, ('B','A'): 1.}) >>> T.ones() {'A': 1.0, 'B': 1.0}
-
transpose
()[source]¶ Return the transpose Matrix.
>>> T = pykov.Matrix({('A','B'): .3, ('A','A'): .7, ('B','A'): 1.}) >>> T.transpose() {('B', 'A'): 0.3, ('A', 'B'): 1.0, ('A', 'A'): 0.7}
-
_UMPFPACKSolve
(b, x=None, method='UMFPACK_A')[source]¶ UMFPACK ( U nsymmetric M ulti F Rontal PACK age)
- method:
“UMFPACK_A” : mathbf{A} x = b (default) “UMFPACK_At” : mathbf{A}^T x = b
A column pre-ordering strategy for the unsymmetric-pattern multifrontal method, T. A. Davis, ACM Transactions on Mathematical Software, vol 30, no. 2, June 2004, pp. 165-195.
-
__module__
= 'Environment.pykov'¶
-
-
class
Environment.pykov.
Chain
(data=None)[source]¶ Bases:
Environment.pykov.Matrix
-
move
(state, random_func=None)[source]¶ Do one step from the indicated state, and return the final state.
>>> T = pykov.Chain({('A','B'): .3, ('A','A'): .7, ('B','A'): 1.}) >>> T.move('A') 'B'
Optionally, a function that generates a random number can be supplied. >>> def FakeRandom(min, max): return 0.01 >>> T.move(‘A’, FakeRandom) ‘B’
-
pow
(p, n)[source]¶ Find the probability distribution after n steps, starting from an initial
Vector
.>>> T = pykov.Chain({('A','B'): .3, ('A','A'): .7, ('B','A'): 1.}) >>> p = pykov.Vector(A=1) >>> T.pow(p,3) {'A': 0.7629999999999999, 'B': 0.23699999999999996} >>> p * T * T * T {'A': 0.7629999999999999, 'B': 0.23699999999999996}
-
steady
()[source]¶ With the assumption of ergodicity, return the steady state.
Note
Inverse iteration method (P is the Markov chain)
\[ \begin{align}\begin{aligned}Q = \mathbf{I} - P\\Q^T x = e\\e = (0,0,\dots,0,1)\end{aligned}\end{align} \]See also
W. Stewart: Introduction to the Numerical Solution of Markov Chains, Princeton University Press, Chichester, West Sussex, 1994.
>>> T = pykov.Chain({('A','B'): .3, ('A','A'): .7, ('B','A'): 1.}) >>> T.steady() {'A': 0.7692307692307676, 'B': 0.23076923076923028}
-
entropy
(p=None, norm=False)[source]¶ Return the
Chain
entropy, calculated with the indicated probability Vector (the steady state by default).\[ \begin{align}\begin{aligned}H_i = \sum_j P_{ij} \ln P_{ij}\\H = \sum \pi_i H_i\end{aligned}\end{align} \]See also
Khinchin, A. I. Mathematical Foundations of Information Theory Dover, 1957.
>>> T = pykov.Chain({('A','B'): .3, ('A','A'): .7, ('B','A'): 1.}) >>> T.entropy() 0.46989561696530169
With normalization entropy belongs to [0,1]
>>> T.entropy(norm=True) 0.33895603665233132
-
mfpt_to
(state)[source]¶ Return the Mean First Passage Times of every state to the indicated state.
See also
Kemeny J. G.; Snell, J. L. Finite Markov Chains. Springer-Verlag: New York, 1976.
>>> d = {('R', 'N'): 0.25, ('R', 'S'): 0.25, ('S', 'R'): 0.25, ('R', 'R'): 0.5, ('N', 'S'): 0.5, ('S', 'S'): 0.5, ('S', 'N'): 0.25, ('N', 'R'): 0.5, ('N', 'N'): 0.0} >>> T = pykov.Chain(d) >>> T.mfpt_to('R') {'S': 3.333333333333333, 'N': 2.666666666666667}
-
adjacency
()[source]¶ Return the adjacency matrix.
>>> T = pykov.Chain({('A','B'): .3, ('A','A'): .7, ('B','A'): 1.}) >>> T.adjacency() {('B', 'A'): 1, ('A', 'B'): 1, ('A', 'A'): 1}
-
walk
(steps, start=None, stop=None)[source]¶ Return a random walk of n steps, starting and stopping at the indicated states.
Note
If not indicated or is None, then the starting state is chosen according to its steady probability. If the stopping state is not None, the random walk stops early if the stopping state is reached.
>>> T = pykov.Chain({('A','B'): .3, ('A','A'): .7, ('B','A'): 1.}) >>> T.walk(10) ['B', 'A', 'B', 'A', 'A', 'B', 'A', 'A', 'A', 'B', 'A'] >>> T.walk(10,'B','B') ['B', 'A', 'A', 'A', 'A', 'A', 'B']
-
walk_probability
(walk)[source]¶ Given a walk, return the log of its probability.
>>> T = pykov.Chain({('A','B'): .3, ('A','A'): .7, ('B','A'): 1.}) >>> T.walk_probability(['A','A','B','A','A']) -1.917322692203401 >>> probability = math.exp(-1.917322692203401) 0.147 >>> p = T.walk_probability(['A','B','B','B','A']) >>> math.exp(p) 0.0
-
mixing_time
(cutoff=0.25, jump=1, p=None)[source]¶ Return the mixing time.
If the initial distribution (p) is not indicated, then it is set to p={‘less probable state’:1}.
Note
The mixing time is calculated here as the number of steps (n) needed to have
\[ \begin{align}\begin{aligned}|p(n)-\pi| < 0.25\\p(n)=p P^n\\\pi=\pi P\end{aligned}\end{align} \]The parameter
jump
controls the iteration step, for example withjump=2
n has values 2,4,6,8,..>>> d = {('R','R'):1./2, ('R','N'):1./4, ('R','S'):1./4, ('N','R'):1./2, ('N','N'):0., ('N','S'):1./2, ('S','R'):1./4, ('S','N'):1./4, ('S','S'):1./2} >>> T = pykov.Chain(d) >>> T.mixing_time() 2
-
absorbing_time
(transient_set)[source]¶ Mean number of steps needed to leave the transient set.
Return the
Vector tau
, thetau[i]
is the mean number of steps needed to leave the transient set starting from statei
. The parametertransient_set
is a subset of nodes.Note
If the starting point is a
Vector p
, then it is sufficient to calculatep * tau
in order to weigh the mean times according the initial conditions.>>> d = {('R','R'):1./2, ('R','N'):1./4, ('R','S'):1./4, ('N','R'):1./2, ('N','N'):0., ('N','S'):1./2, ('S','R'):1./4, ('S','N'):1./4, ('S','S'):1./2} >>> T = pykov.Chain(d) >>> p = pykov.Vector({'N':.3, 'S':.7}) >>> tau = T.absorbing_time(p.keys()) >>> p * tau 3.1333333333333329
-
absorbing_tour
(p, transient_set=None)[source]¶ Return a
Vector v
,v[i]
is the mean of the total number of times the process is in a given transient statei
before to leave the transient set.Note
v.sum()
is equal top * tau
(seeabsorbing_time()
method).In not specified, the
transient set
is defined by means of theVector p
.See also
Kemeny J. G.; Snell, J. L. Finite Markov Chains. Springer-Verlag: New York, 1976.
>>> d = {('R','R'):1./2, ('R','N'):1./4, ('R','S'):1./4, ('N','R'):1./2, ('N','N'):0., ('N','S'):1./2, ('S','R'):1./4, ('S','N'):1./4, ('S','S'):1./2} >>> T = pykov.Chain(d) >>> p = pykov.Vector({'N':.3, 'S':.7}) >>> T.absorbing_tour(p) {'S': 2.2666666666666666, 'N': 0.8666666666666669}
-
fundamental_matrix
()[source]¶ Return the fundamental matrix.
See also
Kemeny J. G.; Snell, J. L. Finite Markov Chains. Springer-Verlag: New York, 1976.
>>> T = pykov.Chain({('A','B'): .3, ('A','A'): .7, ('B','A'): 1.}) >>> T.fundamental_matrix() {('B', 'A'): 0.17751479289940991, ('A', 'B'): 0.053254437869822958, ('A', 'A'): 0.94674556213017902, ('B', 'B'): 0.82248520710059214}
-
kemeny_constant
()[source]¶ Return the Kemeny constant of the transition matrix.
>>> T = pykov.Chain({('A','B'): .3, ('A','A'): .7, ('B','A'): 1.}) >>> T.kemeny_constant() 1.7692307692307712
-
accessibility_matrix
()[source]¶ Return the accessibility matrix of the Markov chain.
..see also: http://www.ssc.wisc.edu/~jmontgom/commclasses.pdf
-
communication_classes
()[source]¶ Return a Set of all communication classes of the Markov chain.
..see also: http://www.ssc.wisc.edu/~jmontgom/commclasses.pdf
>>> T = pykov.Chain({('A','A'): 1.0, ('B','B'): 1.0}) >>> T.communication_classes()
-
__module__
= 'Environment.pykov'¶
-
-
Environment.pykov.
readmat
(filename)[source]¶ Read an external file and return a Chain.
The file must be of the form:
A A .7 A B .3 B A 1
>>> P = pykov.readmat('/mypath/mat') >>> P {('B', 'A'): 1.0, ('A', 'B'): 0.3, ('A', 'A'): 0.7}
-
Environment.pykov.
readtrj
(filename)[source]¶ In the case the
Chain
instance must be created from a finite chain of states, the transition matrix is not fully defined. The function defines the transition probabilities as the maximum likelihood probabilities calculated along the chain. Having the file/mypath/trj
with the following format:1 1 1 2 1 3
the
Chain
instance defined from that chain is:>>> t = pykov.readtrj('/mypath/trj') >>> t (1, 1, 1, 2, 1, 3) >>> p, P = maximum_likelihood_probabilities(t,lag_time=1, separator='0') >>> p {1: 0.6666666666666666, 2: 0.16666666666666666, 3: 0.16666666666666666} >>> P {(1, 2): 0.25, (1, 3): 0.25, (1, 1): 0.5, (2, 1): 1.0, (3, 3): 1.0} >>> type(P) <class 'pykov.Chain'> >>> type(p) <class 'pykov.Vector'>
-
Environment.pykov.
_writefile
(mylist, filename)[source]¶ Export in a file the list.
mylist could be a list of list.
>>> L = [[2,3],[4,5]] >>> pykov.writefile(L,'tmp') >>> l = [1,2] >>> pykov.writefile(l,'tmp')
-
Environment.pykov.
transitions
(trj, nsteps=1, lag_time=1, separator='0')[source]¶ Return the temporal list of transitions observed.
trj : the symbolic trajectory. nsteps : number of steps. lag_time : step length. separator: the special symbol indicating the presence of sub-trajectories.
>>> trj = [1,2,1,0,2,3,1,0,2,3,2,3,1,2,3] >>> pykov.transitions(trj,1,1,0) [(1, 2), (2, 1), (2, 3), (3, 1), (2, 3), (3, 2), (2, 3), (3, 1), (1, 2), (2, 3)] >>> pykov.transitions(trj,1,2,0) [(1, 1), (2, 1), (2, 2), (3, 3), (2, 1), (3, 2), (1, 3)] >>> pykov.transitions(trj,2,2,0) [(2, 2, 1), (3, 3, 2), (2, 1, 3)]
-
Environment.pykov.
maximum_likelihood_probabilities
(trj, lag_time=1, separator='0')[source]¶ Return a Chain calculated by means of maximum likelihood probabilities.
Return two objects: p : a Vector object, the probability distribution over the nodes. T : a Chain object, the Markov chain.
trj : the symbolic trajectory. lag_time : number of steps defining a transition. separator: the special symbol indicating the presence of sub-trajectories.
>>> t = [1,2,3,2,3,2,1,2,2,3,3,2] >>> p, T = pykov.maximum_likelihood_probabilities(t) >>> p {1: 0.18181818181818182, 2: 0.4545454545454546, 3: 0.36363636363636365} >>> T {(1, 2): 1.0, (3, 2): 0.7499999999999999, (2, 3): 0.5999999999999999, (3, 3): 0.25, (2, 2): 0.19999999999999998, (2, 1): 0.19999999999999998}
-
Environment.pykov.
_remove_dead_branch
(transitions_list)[source]¶ Remove dead branchs inserting a selfloop in every node that has not outgoing links.
>>> trj = [1,2,3,1,2,3,2,2,4,3,5] >>> tr = pykov.transitions(trj, nsteps=1) >>> tr [(1, 2), (2, 3), (3, 1), (1, 2), (2, 3), (3, 2), (2, 2), (2, 4), (4, 3), (3, 5)] >>> pykov._remove_dead_branch(tr) >>> tr [(1, 2), (2, 3), (3, 1), (1, 2), (2, 3), (3, 2), (2, 2), (2, 4), (4, 3), (3, 5), (5, 5)]