Source code for plfit.plfit

# -*- coding: latin-1 -*-
# 
# intended to implement a power-law fitting routine as specified in.....
# http://www.santafe.edu/~aaronc/powerlaws/
#
# The MLE for the power-law alpha is very easy to derive given knowledge
# of the lowest value at which a power law holds, but that point is 
# difficult to derive and must be acquired iteratively.

"""
numpy/matplotlib version of plfit.py
====================================
A power-law distribution fitter based on code by Aaron Clauset.  It can use
fortran, cython, or numpy-based power-law fitting 'backends'.  Fortran's
fastest.

Requires pylab (matplotlib), which requires numpy

Example very simple use::

    from plfit import plfit

    MyPL = plfit(mydata)
    MyPL.plotpdf(log=True)

"""

import numpy 
import numpy as np
import time
import pylab
try:
    import fplfit
    fortranOK = True
except:
    fortranOK = False
try:
    import cplfit
    cyOK = True
except:
    cyOK = False

import numpy.random as npr
from numpy import log,log10,sum,argmin,argmax,exp,min,max
try:
    import scipy.stats
    scipyOK = True
except ImportError:
    scipyOK = False
    print "scipy didn't import.  Can't compute certain basic statistics."


[docs]def alpha_gen(x): """ Create a mappable function alpha to apply to each xmin in a list of xmins. This is essentially the slow version of fplfit/cplfit, though I bet it could be speeded up with a clever use of parellel_map. Not intended to be used by users. Docstring for the generated alpha function:: Given a sorted data set and a minimum, returns power law MLE fit data is passed as a keyword parameter so that it can be vectorized If there is only one element, return alpha=0 """ def alpha_(xmin,x=x): """ Given a sorted data set and a minimum, returns power law MLE fit data is passed as a keyword parameter so that it can be vectorized If there is only one element, return alpha=0 """ gexmin = x>=xmin n = np.count_nonzero(gexmin) if n < 2: return 0 x = x[gexmin] a = 1 + float(n) / sum(log(x/xmin)) return a return alpha_
[docs]def kstest_gen(x,unique=False,finite=False): """ Create a mappable function kstest to apply to each xmin in a list of xmins. Parameters ---------- unique : bool If set, will filter the input array 'x' to its unique elements. Normally, this would be done at an earlier step, so `unique` can be disabled for performance improvement finite : bool Apply the finite-sample correction from Clauset et al 2007... Not clear yet which equation this comes from. Docstring for the generated kstest function:: Given a sorted data set and a minimum, returns power law MLE ks-test against the data data is passed as a keyword parameter so that it can be vectorized The returned value is the "D" parameter in the ks test. """ def kstest_(xmin,x=x): """ Given a sorted data set and a minimum, returns power law MLE ks-test against the data data is passed as a keyword parameter so that it can be vectorized The returned value is the "D" parameter in the ks test. """ if unique: x = np.unique(x) x = x[x>=xmin] n = len(x) if n == 0: return np.inf a = 1+float(n) / sum(log(x/xmin)) if finite: a = a*(n-1.)/n+1./n cx = np.arange(n,dtype='float')/float(n) cf = 1-(xmin/x)**(a-1) ks = max(abs(cf-cx)) return ks return kstest_
[docs]def sigma(alpha, n): """ Clauset et al 2007 equation 3.2: sigma = (alpha-1)/sqrt(n) """ return (alpha-1.) / n**0.5
[docs]class plfit(object): """ A Python implementation of the Matlab code `http://www.santafe.edu/~aaronc/powerlaws/plfit.m`_ from `http://www.santafe.edu/~aaronc/powerlaws/`_. See `A. Clauset, C.R. Shalizi, and M.E.J. Newman, "Power-law distributions in empirical data" SIAM Review, 51, 661-703 (2009). (arXiv:0706.1062) <http://arxiv.org/abs/0706.1062>`_ The output "alpha" is defined such that :math:`p(x) \sim (x/xmin)^{-alpha}` """ def __init__(self,x,**kwargs): """ Initializes and fits the power law. Can pass "quiet" to turn off output (except for warnings; "silent" turns off warnings) """ x = np.array(x) # make sure x is an array, otherwise the next step fails if (x<0).sum() > 0: print "Removed %i negative points" % ((x<0).sum()) x = x[x>0] self.data = x self.plfit(**kwargs)
[docs] def plfit(self, nosmall=True, finite=False, quiet=False, silent=False, usefortran=False, usecy=False, xmin=None, verbose=False, discrete=None, discrete_approx=True, discrete_n_alpha=1000): """ A Python implementation of the Matlab code http://www.santafe.edu/~aaronc/powerlaws/plfit.m from http://www.santafe.edu/~aaronc/powerlaws/ See A. Clauset, C.R. Shalizi, and M.E.J. Newman, "Power-law distributions in empirical data" SIAM Review, 51, 661-703 (2009). (arXiv:0706.1062) http://arxiv.org/abs/0706.1062 There are 3 implementations of xmin estimation. The fortran version is fastest, the C (cython) version is ~10% slower, and the python version is ~3x slower than the fortran version. Also, the cython code suffers ~2% numerical error relative to the fortran and python for unknown reasons. There is also a discrete version implemented in python - it is different from the continous version! *discrete* [ bool | None ] If *discrete* is None, the code will try to determine whether the data set is discrete or continous based on the uniqueness of the data; if your data set is continuous but you have any non-unique data points (e.g., flagged "bad" data), the "automatic" determination will fail. If *discrete* is True or False, the distcrete or continuous fitter will be used, respectively. *xmin* [ float / int ] If you specify xmin, the fitter will only determine alpha assuming the given xmin; the rest of the code (and most of the complexity) is determining an estimate for xmin and alpha. *nosmall* [ bool (True) ] When on, the code rejects low s/n points. WARNING: This option, which is on by default, may result in different answers than the original Matlab code and the "powerlaw" python package *finite* [ bool (False) ] There is a 'finite-size bias' to the estimator. The "alpha" the code measures is "alpha-hat" s.t. ᾶ = (nα-1)/(n-1), or α = (1 + ᾶ (n-1)) / n *quiet* [ bool (False) ] If False, delivers messages about what fitter is used and the fit results *verbose* [ bool (False) ] Deliver descriptive messages about the fit parameters (only if *quiet*==False) *silent* [ bool (False) ] If True, will print NO messages """ x = self.data if any(x < 0): raise ValueError("Power law distributions are only valid for " "positive data. Remove negative values before " "fitting.") z = np.sort(x) # xmins = the unique values of x that can be used as the threshold for # the power law fit # argxmins = the index of each of these possible thresholds xmins,argxmins = np.unique(z,return_index=True) self._nunique = len(xmins) if self._nunique == len(x) and discrete is None: if verbose: print("Using CONTINUOUS fitter because there are no repeated " "values.") discrete = False elif self._nunique < len(x) and discrete is None: if verbose: print("Using DISCRETE fitter because there are repeated " "values.") discrete = True t = time.time() if xmin is None: if discrete: self.discrete_best_alpha(approximate=discrete_approx, n_alpha=discrete_n_alpha, verbose=verbose, finite=finite) return self._xmin,self._alpha elif usefortran and fortranOK: kstest_values,alpha_values = fplfit.plfit(z, 0) if not quiet: print("FORTRAN plfit executed in %f seconds" % (time.time()-t)) elif usecy and cyOK: kstest_values,alpha_values = cplfit.plfit_loop(z, nosmall=False, zunique=xmins, argunique=argxmins) if not quiet: print("CYTHON plfit executed in %f seconds" % (time.time()-t)) else: # python (numpy) version f_alpha = alpha_gen(z) f_kstest = kstest_gen(z) alpha_values = np.asarray(map(f_alpha,xmins), dtype='float') kstest_values = np.asarray(map(f_kstest,xmins), dtype='float') if not quiet: print("PYTHON plfit executed in %f seconds" % (time.time()-t)) if not quiet: if usefortran and not fortranOK: raise ImportError("fortran fplfit did not load") if usecy and not cyOK: raise ImportError("cython cplfit did not load") # For each alpha, the number of included data points is # total data length - first index of xmin # No +1 is needed: xmin is included. sigma = (alpha_values-1)/np.sqrt(len(z)-argxmins) if nosmall: # test to make sure the number of data points is high enough # to provide a reasonable s/n on the computed alpha goodvals = sigma<0.1 nmax = argmin(goodvals) if nmax <= 0: nmax = len(xmins) - 1 if not silent: print("Not enough data left after flagging " "low S/N points. " "Using all data.") else: # -1 to weed out the very last data point; it cannot be correct # (can't have a power law with 1 data point). nmax = len(xmins)-1 best_ks_index = argmin(kstest_values[:nmax]) xmin = xmins[best_ks_index] self._alpha_values = alpha_values self._xmin_kstest = kstest_values self._sigma = sigma # sanity check n = np.count_nonzero(z>=xmin) alpha = 1. + float(n)/sum(log(z[z>=xmin]/xmin)) try: np.testing.assert_almost_equal(alpha, alpha_values[best_ks_index], decimal=5) except AssertionError: raise AssertionError("The alpha value computed was not self-" "consistent. This should not happen.") z = z[z>=xmin] n = len(z) alpha = 1. + float(n) / sum(log(z/xmin)) if finite: alpha = alpha*(n-1.)/n+1./n if n < 50 and not finite and not silent: print('(PLFIT) Warning: finite-size bias may be present. n=%i' % n) ks = max(abs( np.arange(n)/float(n) - (1-(xmin/z)**(alpha-1)) )) # Parallels Eqn 3.5 in Clauset et al 2009, but zeta(alpha, xmin) = # (alpha-1)/xmin. Really is Eqn B3 in paper. L = n*log((alpha-1)/xmin) - alpha*sum(log(z/xmin)) #requires another map... Larr = arange(len(unique(x))) * log((alpha_values-1)/unique(x)) - alpha_values*sum self._likelihood = L self._xmin = xmin self._xmins = xmins self._alpha= alpha self._alphaerr = (alpha-1)/np.sqrt(n) # this ks statistic may not have the same value as min(dat) because of unique() self._ks = ks if scipyOK: self._ks_prob = scipy.stats.kstwobign.sf(ks*np.sqrt(n)) self._ngtx = n if n == 1: if not silent: print "Failure: only 1 point kept. Probably not a power-law distribution." self._alpha = alpha = 0 self._alphaerr = 0 self._likelihood = L = 0 self._ks = 0 self._ks_prob = 0 self._xmin = xmin return xmin,0 if np.isnan(L) or np.isnan(xmin) or np.isnan(alpha): raise ValueError("plfit failed; returned a nan") if not quiet: if verbose: print "The lowest value included in the power-law fit, ", print "xmin: %g" % xmin, if verbose: print "\nThe number of values above xmin, ", print "n(>xmin): %i" % n, if verbose: print "\nThe derived power-law alpha (p(x)~x^-alpha) with MLE-derived error, ", print "alpha: %g +/- %g " % (alpha,self._alphaerr), if verbose: print "\nThe log of the Likelihood (the maximized parameter; you minimized the negative log likelihood), ", print "Log-Likelihood: %g " % L, if verbose: print "\nThe KS-test statistic between the best-fit power-law and the data, ", print "ks: %g" % (ks), if scipyOK: if verbose: print " occurs with probability ", print "p(ks): %g" % (self._ks_prob) else: print return xmin,alpha
[docs] def discrete_best_alpha(self, alpharangemults=(0.9,1.1), n_alpha=201, approximate=True, verbose=True, finite=True): """ Use the maximum likelihood to determine the most likely value of alpha *alpharangemults* [ 2-tuple ] Pair of values indicating multiplicative factors above and below the approximate alpha from the MLE alpha to use when determining the "exact" alpha (by directly maximizing the likelihood function) *n_alpha* [ int ] Number of alpha values to use when measuring. Larger number is more accurate. *approximate* [ bool ] If False, try to "zoom-in" around the MLE alpha and get the exact best alpha value within some range around the approximate best *vebose* [ bool ] *finite* [ bool ] Correction for finite data? """ data = self.data self._xmins = xmins = np.unique(data) if approximate: alpha_of_xmin = [ discrete_alpha_mle(data,xmin) for xmin in xmins ] else: alpha_approx = [ discrete_alpha_mle(data,xmin) for xmin in xmins ] alpharanges = [(0.9*a,1.1*a) for a in alpha_approx] alpha_of_xmin = [ most_likely_alpha(data,xmin,alpharange=ar,n_alpha=n_alpha) for xmin,ar in zip(xmins,alpharanges) ] ksvalues = np.array([discrete_ksD(data, xmin, alpha) for xmin,alpha in zip(xmins,alpha_of_xmin) ]) self._alpha_values = np.array(alpha_of_xmin) self._xmin_kstest = ksvalues ksvalues[np.isnan(ksvalues)] = np.inf best_index = argmin(ksvalues) self._alpha = best_alpha = alpha_of_xmin[best_index] self._xmin = best_xmin = xmins[best_index] self._ks = best_ks = ksvalues[best_index] self._likelihood = best_likelihood = discrete_likelihood(data, best_xmin, best_alpha) if finite: self._alpha = self._alpha*(n-1.)/n+1./n if verbose: print "alpha = %f xmin = %f ksD = %f L = %f (n<x) = %i (n>=x) = %i" % ( best_alpha, best_xmin, best_ks, best_likelihood, (data<best_xmin).sum(), (data>=best_xmin).sum()) self._ngtx = n = (self.data>=self._xmin).sum() self._alphaerr = (self._alpha-1.0)/np.sqrt(n) if scipyOK: self._ks_prob = scipy.stats.kstwobign.sf(self._ks*np.sqrt(n)) return best_alpha,best_xmin,best_ks,best_likelihood
[docs] def xminvsks(self, **kwargs): """ Plot xmin versus the ks value for derived alpha. This plot can be used as a diagnostic of whether you have derived the 'best' fit: if there are multiple local minima, your data set may be well suited to a broken powerlaw or a different function. """ pylab.plot(self._xmins,self._xmin_kstest,'.') pylab.plot(self._xmin,self._ks,'s') #pylab.errorbar([self._ks],self._alpha,yerr=self._alphaerr,fmt='+') ax=pylab.gca() ax.set_ylabel("KS statistic") ax.set_xlabel("min(x)") pylab.draw() return ax
[docs] def alphavsks(self,autozoom=True,**kwargs): """ Plot alpha versus the ks value for derived alpha. This plot can be used as a diagnostic of whether you have derived the 'best' fit: if there are multiple local minima, your data set may be well suited to a broken powerlaw or a different function. """ pylab.plot(self._alpha_values, self._xmin_kstest, '.') pylab.errorbar(self._alpha, self._ks, xerr=self._alphaerr, fmt='+') ax=pylab.gca() if autozoom: ax.set_ylim(0.8*(self._ks),3*(self._ks)) ax.set_xlim((self._alpha)-5*self._alphaerr,(self._alpha)+5*self._alphaerr) ax.set_ylabel("KS statistic") ax.set_xlabel(r'$\alpha$') pylab.draw() return ax
[docs] def plotcdf(self, x=None, xmin=None, alpha=None, pointcolor='k', pointmarker='+', **kwargs): """ Plots CDF and powerlaw """ if x is None: x=self.data if xmin is None: xmin=self._xmin if alpha is None: alpha=self._alpha x=np.sort(x) n=len(x) xcdf = np.arange(n,0,-1,dtype='float')/float(n) q = x[x>=xmin] fcdf = (q/xmin)**(1-alpha) nc = xcdf[argmax(x>=xmin)] fcdf_norm = nc*fcdf D_location = argmax(xcdf[x>=xmin]-fcdf_norm) pylab.vlines(q[D_location],xcdf[x>=xmin][D_location],fcdf_norm[D_location],color='m',linewidth=2) #plotx = pylab.linspace(q.min(),q.max(),1000) #ploty = (plotx/xmin)**(1-alpha) * nc pylab.loglog(x,xcdf,marker=pointmarker,color=pointcolor,**kwargs) #pylab.loglog(plotx,ploty,'r',**kwargs) pylab.loglog(q,fcdf_norm,'r',**kwargs)
[docs] def plotpdf(self,x=None,xmin=None,alpha=None,nbins=50,dolog=True,dnds=False, drawstyle='steps-post', histcolor='k', plcolor='r', **kwargs): """ Plots PDF and powerlaw. kwargs is passed to pylab.hist and pylab.plot """ if not(x): x=self.data if not(xmin): xmin=self._xmin if not(alpha): alpha=self._alpha x=np.sort(x) n=len(x) pylab.gca().set_xscale('log') pylab.gca().set_yscale('log') if dnds: hb = pylab.histogram(x,bins=np.logspace(log10(min(x)),log10(max(x)),nbins)) h = hb[0] b = hb[1] db = hb[1][1:]-hb[1][:-1] h = h/db pylab.plot(b[:-1],h,drawstyle=drawstyle,color=histcolor,**kwargs) #alpha -= 1 elif dolog: hb = pylab.hist(x,bins=np.logspace(log10(min(x)),log10(max(x)),nbins),log=True,fill=False,edgecolor=histcolor,**kwargs) alpha -= 1 h,b=hb[0],hb[1] else: hb = pylab.hist(x,bins=np.linspace((min(x)),(max(x)),nbins),fill=False,edgecolor=histcolor,**kwargs) h,b=hb[0],hb[1] # plotting points are at the center of each bin b = (b[1:]+b[:-1])/2.0 q = x[x>=xmin] px = (alpha-1)/xmin * (q/xmin)**(-alpha) # Normalize by the median ratio between the histogram and the power-law # The normalization is semi-arbitrary; an average is probably just as valid plotloc = (b>xmin)*(h>0) norm = np.median( h[plotloc] / ((alpha-1)/xmin * (b[plotloc]/xmin)**(-alpha)) ) px = px*norm plotx = pylab.linspace(q.min(),q.max(),1000) ploty = (alpha-1)/xmin * (plotx/xmin)**(-alpha) * norm #pylab.loglog(q,px,'r',**kwargs) pylab.loglog(plotx,ploty,color=plcolor,**kwargs) axlims = pylab.axis() pylab.vlines(xmin,axlims[2],max(px),colors=plcolor,linestyle='dashed') pylab.gca().set_xlim(min(x),max(x))
[docs] def plotppf(self,x=None,xmin=None,alpha=None,dolog=True,**kwargs): """ Plots the power-law-predicted value on the Y-axis against the real values along the X-axis. Can be used as a diagnostic of the fit quality. """ if not(xmin): xmin=self._xmin if not(alpha): alpha=self._alpha if not(x): x=np.sort(self.data[self.data>xmin]) else: x=np.sort(x[x>xmin]) # N = M^(-alpha+1) # M = N^(1/(-alpha+1)) m0 = min(x) N = (1.0+np.arange(len(x)))[::-1] xmodel = m0 * N**(1/(1-alpha)) / max(N)**(1/(1-alpha)) if dolog: pylab.loglog(x,xmodel,'.',**kwargs) pylab.gca().set_xlim(min(x),max(x)) pylab.gca().set_ylim(min(x),max(x)) else: pylab.plot(x,xmodel,'.',**kwargs) pylab.plot([min(x),max(x)],[min(x),max(x)],'k--') pylab.xlabel("Real Value") pylab.ylabel("Power-Law Model Value")
[docs] def test_pl(self,niter=1e3, print_timing=False, **kwargs): """ Monte-Carlo test to determine whether distribution is consistent with a power law Runs through niter iterations of a sample size identical to the input sample size. Will randomly select values from the data < xmin. The number of values selected will be chosen from a uniform random distribution with p(<xmin) = n(<xmin)/n. Once the sample is created, it is fit using above methods, then the best fit is used to compute a Kolmogorov-Smirnov statistic. The KS stat distribution is compared to the KS value for the fit to the actual data, and p = fraction of random ks values greater than the data ks value is computed. If p<.1, the data may be inconsistent with a powerlaw. A data set of n(>xmin)>100 is required to distinguish a PL from an exponential, and n(>xmin)>~300 is required to distinguish a log-normal distribution from a PL. For more details, see figure 4.1 and section **WARNING** This can take a very long time to run! Execution time scales as niter * setsize """ xmin = self._xmin alpha = self._alpha niter = int(niter) ntail = sum(self.data >= xmin) ntot = len(self.data) nnot = ntot-ntail # n(<xmin) pnot = nnot/float(ntot) # p(<xmin) nonpldata = self.data[self.data<xmin] nrandnot = sum( npr.rand(ntot) < pnot ) # randomly choose how many to sample from <xmin nrandtail = ntot - nrandnot # and the rest will be sampled from the powerlaw ksv = [] if print_timing: deltat = [] for i in xrange(niter): # first, randomly sample from power law # with caveat! nonplind = np.floor(npr.rand(nrandnot)*nnot).astype('int') fakenonpl = nonpldata[nonplind] randarr = npr.rand(nrandtail) fakepl = randarr**(1/(1-alpha)) * xmin fakedata = np.concatenate([fakenonpl,fakepl]) if print_timing: t0 = time.time() # second, fit to powerlaw # (add some silencing kwargs optionally) for k,v in {'quiet':True,'silent':True,'nosmall':True}.iteritems(): if k not in kwargs: kwargs[k] = v TEST = plfit(fakedata,**kwargs) ksv.append(TEST._ks) if print_timing: deltat.append( time.time() - t0 ) print "Iteration %i: %g seconds" % (i, deltat[-1]) ksv = np.array(ksv) p = (ksv>self._ks).sum() / float(niter) self._pval = p self._ks_rand = ksv print "p(%i) = %0.3f" % (niter,p) if print_timing: print "Iteration timing: %g +/- %g" % (np.mean(deltat),np.std(deltat)) return p,ksv
[docs] def lognormal(self,doprint=True): """ Use the maximum likelihood estimator for a lognormal distribution to produce the best-fit lognormal parameters """ # N = float(self.data.shape[0]) # mu = log(self.data).sum() / N # sigmasquared = ( ( log(self.data) - mu )**2 ).sum() / N # self.lognormal_mu = mu # self.lognormal_sigma = np.sqrt(sigmasquared) # self.lognormal_likelihood = -N/2. * log(np.pi*2) - N/2. * log(sigmasquared) - 1/(2*sigmasquared) * (( self.data - mu )**2).sum() # if doprint: # print "Best fit lognormal is exp( -(x-%g)^2 / (2*%g^2)" % (mu,np.sqrt(sigmasquared)) # print "Likelihood: %g" % (self.lognormal_likelihood) if scipyOK: fitpars = scipy.stats.lognorm.fit(self.data) self.lognormal_dist = scipy.stats.lognorm(*fitpars) self.lognormal_ksD,self.lognormal_ksP = scipy.stats.kstest(self.data,self.lognormal_dist.cdf) # nnlf = NEGATIVE log likelihood self.lognormal_likelihood = -1*scipy.stats.lognorm.nnlf(fitpars,self.data) # Is this the right likelihood ratio? # Definition of L from eqn. B3 of Clauset et al 2009: # L = log(p(x|alpha)) # _nnlf from scipy.stats.distributions: # -sum(log(self._pdf(x, *args)),axis=0) # Assuming the pdf and p(x|alpha) are both non-inverted, it looks # like the _nnlf and L have opposite signs, which would explain the # likelihood ratio I've used here: self.power_lognorm_likelihood = (self._likelihood + self.lognormal_likelihood) # a previous version had 2*(above). That is the correct form if you want the likelihood ratio # statistic "D": http://en.wikipedia.org/wiki/Likelihood-ratio_test # The above explanation makes sense, since nnlf is the *negative* log likelihood function: ## nnlf -- negative log likelihood function (to minimize) # # Assuming we want the ratio between the POSITIVE likelihoods, the D statistic is: # D = -2 log( L_power / L_lognormal ) self.likelihood_ratio_D = -2 * (log(self._likelihood/self.lognormal_likelihood)) if doprint: print "Lognormal KS D: %g p(D): %g" % (self.lognormal_ksD,self.lognormal_ksP), print " Likelihood Ratio Statistic (powerlaw/lognormal): %g" % self.likelihood_ratio_D print "At this point, have a look at Clauset et al 2009 Appendix C: determining sigma(likelihood_ratio)"
[docs] def plot_lognormal_pdf(self,**kwargs): """ Plot the fitted lognormal distribution """ if not hasattr(self,'lognormal_dist'): return normalized_pdf = self.lognormal_dist.pdf(self.data)/self.lognormal_dist.pdf(self.data).max() minY,maxY = pylab.gca().get_ylim() pylab.plot(self.data,normalized_pdf*maxY,'.',**kwargs)
[docs] def plot_lognormal_cdf(self,**kwargs): """ Plot the fitted lognormal distribution """ if not hasattr(self,'lognormal_dist'): return x=np.sort(self.data) n=len(x) xcdf = np.arange(n,0,-1,dtype='float')/float(n) lcdf = self.lognormal_dist.sf(x) D_location = argmax(xcdf-lcdf) pylab.vlines(x[D_location],xcdf[D_location],lcdf[D_location],color='m',linewidth=2) pylab.plot(x, lcdf,',',**kwargs)
[docs]def plfit_lsq(x,y): """ Returns A and B in y=Ax^B http://mathworld.wolfram.com/LeastSquaresFittingPowerLaw.html """ n = len(x) btop = n * (log(x)*log(y)).sum() - (log(x)).sum()*(log(y)).sum() bbottom = n*(log(x)**2).sum() - (log(x).sum())**2 b = btop / bbottom a = ( log(y).sum() - b * log(x).sum() ) / n A = exp(a) return A,b
[docs]def plexp_cdf(x,xmin=1,alpha=2.5, pl_only=False, exp_only=False): """ CDF(x) for the piecewise distribution exponential x<xmin, powerlaw x>=xmin This is the CDF version of the distributions drawn in fig 3.4a of Clauset et al. The constant "C" normalizes the PDF """ x = np.array(x) C = 1/(-xmin/(1 - alpha) - xmin/alpha + exp(alpha)*xmin/alpha) Ppl = lambda(X): 1+C*(xmin/(1-alpha)*(X/xmin)**(1-alpha)) Pexp = lambda(X): C*xmin/alpha*exp(alpha)-C*(xmin/alpha)*exp(-alpha*(X/xmin-1)) if exp_only: return Pexp(x) elif pl_only: return Ppl(x) d=Ppl(x) d[x<xmin]=Pexp(x)[x<xmin] return d
[docs]def plexp_pdf(x,xmin=1,alpha=2.5): x = np.array(x) C = 1/(-xmin/(1 - alpha) - xmin/alpha + exp(alpha)*xmin/alpha) Ppl = lambda(X): C*(X/xmin)**(-alpha) Pexp = lambda(X): C*exp(-alpha*(X/xmin-1)) d=Ppl(x) d[x<xmin] = Pexp(x)[x<xmin] return d # WRONG # def plexp_inv(P,xmin,alpha): # """ # Inverse CDF for a piecewise PDF as defined in eqn. 3.10 # of Clauset et al. # """ # # C = 1/(-xmin/(1 - alpha) - xmin/alpha + exp(alpha)*xmin/alpha) # Pxm = -C*(xmin/(1-alpha)) # x = P*0 # x[P>=Pxm] = xmin*( (P[P>=Pxm]-1) * (1-alpha)/(C*xmin) )**(1/(1-alpha)) # powerlaw # x[P<Pxm] = (log( (C*xmin/alpha*exp(alpha)-P[P<Pxm])/(C*xmin/alpha) ) - alpha) * (-xmin/alpha) # exp # # return x
[docs]def plexp_inv(P, xmin, alpha, guess=1.): """ Inverse CDF for a piecewise PDF as defined in eqn. 3.10 of Clauset et al. (previous version was incorrect and lead to weird discontinuities in the distribution function) """ equation = lambda x,prob: plexp_cdf(x, xmin, alpha)-prob # http://stackoverflow.com/questions/19840425/scipy-optimize-faster-root-finding-over-2d-grid def solver(y, x0=guess): return scipy.optimize.fsolve(equation, guess, args=(y,)) f = np.vectorize(solver) return f(P)
[docs]def pl_inv(P,xm,a): """ Inverse CDF for a pure power-law """ x = (1-P)**(1/(1-a)) * xm return x
[docs]def test_fitter(xmin=1.0,alpha=2.5,niter=500,npts=1000,invcdf=plexp_inv): """ Tests the power-law fitter Examples ======== Example (fig 3.4b in Clauset et al.):: xminin=[0.25,0.5,0.75,1,1.5,2,5,10,50,100] xmarr,af,ksv,nxarr = plfit.test_fitter(xmin=xminin,niter=1,npts=50000) loglog(xminin,xmarr.squeeze(),'x') Example 2:: xminin=[0.25,0.5,0.75,1,1.5,2,5,10,50,100] xmarr,af,ksv,nxarr = plfit.test_fitter(xmin=xminin,niter=10,npts=1000) loglog(xminin,xmarr.mean(axis=0),'x') Example 3:: xmarr,af,ksv,nxarr = plfit.test_fitter(xmin=1.0,niter=1000,npts=1000) hist(xmarr.squeeze()); # Test results: # mean(xmarr) = 0.70, median(xmarr)=0.65 std(xmarr)=0.20 # mean(af) = 2.51 median(af) = 2.49 std(af)=0.14 # biased distribution; far from correct value of xmin but close to correct alpha Example 4:: xmarr,af,ksv,nxarr = plfit.test_fitter(xmin=1.0,niter=1000,npts=1000,invcdf=pl_inv) print("mean(xmarr): %0.2f median(xmarr): %0.2f std(xmarr): %0.2f" % (mean(xmarr),median(xmarr),std(xmarr))) print("mean(af): %0.2f median(af): %0.2f std(af): %0.2f" % (mean(af),median(af),std(af))) # mean(xmarr): 1.19 median(xmarr): 1.03 std(xmarr): 0.35 # mean(af): 2.51 median(af): 2.50 std(af): 0.07 """ xmin = np.array(xmin) if xmin.shape == (): xmin.shape = 1 lx = len(xmin) sz = [niter,lx] xmarr,alphaf_v,ksv,nxarr = np.zeros(sz),np.zeros(sz),np.zeros(sz),np.zeros(sz) for j in xrange(lx): for i in xrange(niter): randarr = npr.rand(npts) fakedata = invcdf(randarr,xmin[j],alpha) TEST = plfit(fakedata,quiet=True,silent=True,nosmall=True) alphaf_v[i,j] = TEST._alpha ksv[i,j] = TEST._ks nxarr[i,j] = TEST._ngtx xmarr[i,j] = TEST._xmin return xmarr,alphaf_v,ksv,nxarr
[docs]def discrete_likelihood(data, xmin, alpha): """ Equation B.8 in Clauset Given a data set, an xmin value, and an alpha "scaling parameter", computes the log-likelihood (the value to be maximized) """ if not scipyOK: raise ImportError("Can't import scipy. Need scipy for zeta function.") from scipy.special import zeta as zeta zz = data[data>=xmin] nn = len(zz) sum_log_data = np.log(zz).sum() zeta = zeta(alpha, xmin) L_of_alpha = -1*nn*log(zeta) - alpha * sum_log_data return L_of_alpha
[docs]def discrete_likelihood_vector(data, xmin, alpharange=(1.5,3.5), n_alpha=201): """ Compute the likelihood for all "scaling parameters" in the range (alpharange) for a given xmin. This is only part of the discrete value likelihood maximization problem as described in Clauset et al (Equation B.8) *alpharange* [ 2-tuple ] Two floats specifying the upper and lower limits of the power law alpha to test """ from scipy.special import zeta as zeta zz = data[data>=xmin] nn = len(zz) alpha_vector = np.linspace(alpharange[0],alpharange[1],n_alpha) sum_log_data = np.log(zz).sum() # alpha_vector is a vector, xmin is a scalar zeta_vector = zeta(alpha_vector, xmin) #xminvec = np.arange(1.0,xmin) #xminalphasum = np.sum([xm**(-alpha_vector) for xm in xminvec]) #L = -1*alpha_vector*sum_log_data - nn*log(zeta_vector) - xminalphasum L_of_alpha = -1*nn*log(zeta_vector) - alpha_vector * sum_log_data return L_of_alpha
[docs]def discrete_max_likelihood_arg(data, xmin, alpharange=(1.5,3.5), n_alpha=201): """ Returns the *argument* of the max of the likelihood of the data given an input xmin """ likelihoods = discrete_likelihood_vector(data, xmin, alpharange=alpharange, n_alpha=n_alpha) Largmax = np.argmax(likelihoods) return Largmax
[docs]def discrete_max_likelihood(data, xmin, alpharange=(1.5,3.5), n_alpha=201): """ Returns the *argument* of the max of the likelihood of the data given an input xmin """ likelihoods = discrete_likelihood_vector(data, xmin, alpharange=alpharange, n_alpha=n_alpha) Lmax = np.max(likelihoods) return Lmax
[docs]def most_likely_alpha(data, xmin, alpharange=(1.5,3.5), n_alpha=201): """ Return the most likely alpha for the data given an xmin """ alpha_vector = np.linspace(alpharange[0],alpharange[1],n_alpha) return alpha_vector[discrete_max_likelihood_arg(data, xmin, alpharange=alpharange, n_alpha=n_alpha)]
[docs]def discrete_alpha_mle(data, xmin): """ Equation B.17 of Clauset et al 2009 The Maximum Likelihood Estimator of the "scaling parameter" alpha in the discrete case is similar to that in the continuous case """ # boolean indices of positive data gexmin = (data>=xmin) nn = gexmin.sum() if nn < 2: return 0 xx = data[gexmin] alpha = 1.0 + float(nn) * ( sum(log(xx/(xmin-0.5))) )**-1 return alpha
[docs]def discrete_best_alpha(data, alpharangemults=(0.9,1.1), n_alpha=201, approximate=True, verbose=True): """ Use the maximum L to determine the most likely value of alpha *alpharangemults* [ 2-tuple ] Pair of values indicating multiplicative factors above and below the approximate alpha from the MLE alpha to use when determining the "exact" alpha (by directly maximizing the likelihood function) """ xmins = np.unique(data) if approximate: alpha_of_xmin = [ discrete_alpha_mle(data,xmin) for xmin in xmins ] else: alpha_approx = [ discrete_alpha_mle(data,xmin) for xmin in xmins ] alpharanges = [(0.9*a,1.1*a) for a in alpha_approx] alpha_of_xmin = [ most_likely_alpha(data,xmin,alpharange=ar,n_alpha=n_alpha) for xmin,ar in zip(xmins,alpharanges) ] ksvalues = [ discrete_ksD(data, xmin, alpha) for xmin,alpha in zip(xmins,alpha_of_xmin) ] best_index = argmin(ksvalues) best_alpha = alpha_of_xmin[best_index] best_xmin = xmins[best_index] best_ks = ksvalues[best_index] best_likelihood = discrete_likelihood(data, best_xmin, best_alpha) if verbose: print "alpha = %f xmin = %f ksD = %f L = %f (n<x) = %i (n>=x) = %i" % ( best_alpha, best_xmin, best_ks, best_likelihood, (data<best_xmin).sum(), (data>=best_xmin).sum()) return best_alpha,best_xmin,best_ks,best_likelihood
[docs]def discrete_ksD(data, xmin, alpha): """ given a sorted data set, a minimum, and an alpha, returns the power law ks-test D value w/data The returned value is the "D" parameter in the ks test (this is implemented differently from the continuous version because there are potentially multiple identical points that need comparison to the power law) """ zz = np.sort(data[data>=xmin]) nn = float(len(zz)) if nn < 2: return np.inf #cx = np.arange(nn,dtype='float')/float(nn) #cf = 1.0-(zz/xmin)**(1.0-alpha) model_cdf = 1.0-(zz/xmin)**(1.0-alpha) data_cdf = np.searchsorted(zz,zz,side='left')/(float(nn)) ks = max(abs(model_cdf-data_cdf)) return ks