The true meaning of the alzabo
11 hours ago
Astronomy, science, technology, whimsy. Not necessarily in that order.
def fit_many_parameters(model, parameters, fit_function, n):
result = np.zeros((n,len(parameters)))
for i in range(n):
fake_data = model.generate(parameters)
result[i] = fit_function(model, fake_data)
return result
def plot_many_parameters(model, true_parameters):
data = model.generate(true_parameters)
fit_parameters = fit_linear_least_squares(model, data)
varied_parameters = fit_many_parameters(model, fit_parameters,
fit_linear_least_squares, 200)
plt.figure()
plt.plot(varied_parameters[:,0], varied_parameters[:,1],
".", label="simulated")
plt.errorbar([fit_parameters[0]],[fit_parameters[1]],
xerr=np.std(varied_parameters[:,0]),
yerr=np.std(varied_parameters[:,1]),
fmt="+", label="fit")
plt.plot([true_parameters[0]],[true_parameters[1]],"^", label="true")
plt.legend(loc="best")
def demo_least_squares_uncertainties():
true_parameters = np.array([2.,-1.])
n = 10
uncertainty = 0.1
model = SinCosModel(n,uncertainty)
plot_many_parameters(model, true_parameters, n, uncertainty)
plt.title("Fitted parameters to a sin/cos model")
plt.savefig("monte-carlo-errors-1.png")
class CovariantSinusoidModel(SinCosModel):
def predicted_values(self, parameters):
a, b = parameters
xs = np.arange(self.n)/float(self.n)
return a*np.sin(2*np.pi*xs) + b*np.sin(2*np.pi*xs+0.1)
def demo_least_squares_uncertainties_covariant():
true_parameters = np.array([2.,-1.])
n = 10
uncertainty = 0.1
model = CovariantSinusoidModel(n,uncertainty)
plot_many_parameters(model, true_parameters)
plt.title("Fitted parameters to a covariant model")
plt.savefig("monte-carlo-errors-2.png")
def chi2(data, predicted, uncertainties):
return np.sqrt(np.sum(((data-predicted)/uncertainties)**2,axis=-1))
def plot_quality_of_fit(model, parameters, data, n_samples=1000):
data_chi2 = chi2(data,
model.predicted_values(parameters),
model.uncertainties(parameters))
chi2_values = np.zeros(n_samples)
for i in range(n_samples):
chi2_values[i] = chi2(model.generate(parameters),
model.predicted_values(parameters),
model.uncertainties(parameters))
chi2_values.sort()
n_less = np.searchsorted(chi2_values, data_chi2)
plt.figure()
# make a "stair-step" plot
chi2_r = np.repeat(chi2_values,2)
y2 = np.hstack([0.0,
np.repeat(np.arange(1, n_samples)/float(n_samples),2),
1.0])
plt.plot(chi2_r, y2)
plt.axvline(data_chi2, color='red')
plt.axhline(n_less/float(n_samples), color='red')
plt.text(1,0,
("a fraction %g of the samples are this bad" %
(1-n_less/float(n_samples))),
horizontalalignment="right",
verticalalignment="bottom",
transform = plt.gca().transAxes)
plt.xlabel("$\chi^2$")
plt.ylabel("Fraction of samples less than given $\chi^2$")
def demo_goodness_of_good_fit():
true_parameters = np.array([2.,-1.])
n = 10
uncertainty = 0.1
model = SinCosModel(n,uncertainty)
data = model.generate(true_parameters)
fit_parameters = fit_linear_least_squares(model, data)
plot_quality_of_fit(model, fit_parameters, data)
plt.title("Goodness of fit for a correct model")
plt.savefig("monte-carlo-good-fit.png")
def demo_goodness_of_bad_fit():
n = 10
uncertainty = 0.1
xs = np.arange(n)/float(n)
model = SinCosModel(n,uncertainty)
data = np.random.normal(scale=uncertainty,size=n)
data[n//2] += 1.
fit_parameters = fit_linear_least_squares(model, data)
plt.figure()
plt.errorbar(xs, data, uncertainty, label="data", fmt="+")
plt.plot(xs,model.predicted_values(fit_parameters), label="fitted value")
plt.xlim(0,1)
plt.title("Fitting the wrong model")
plt.legend(loc="best")
plt.savefig("wrong-model.png")
plot_quality_of_fit(model, fit_parameters, data, n_samples=10000)
plt.title("Goodness of fit for an incorrect model")
plt.savefig("monte-carlo-bad-fit.png")
import numpy as np
import pylab as plt
class SinCosModel:
def __init__(self, n, uncertainty=1.):
self.initial_guess = np.array([1.,0.])
self.n = n
self.uncertainty = uncertainty
def predicted_values(self, parameters):
a, b = parameters
xs = np.arange(self.n)/float(self.n)
return a*np.sin(2*np.pi*xs) + b*np.cos(2*np.pi*xs)
def uncertainties(self, parameters):
return self.uncertainty*np.ones(self.n)
def generate(self, parameters):
return (self.predicted_values(parameters) +
np.random.normal(scale=self.uncertainty,size=self.n))
def fit_linear_least_squares(model, data):
n_params = len(model.initial_guess)
n_data = len(data)
assert len(model.predicted_values(model.initial_guess))==n_data
coefficient_matrix = np.zeros((n_data,n_params))
for i in range(n_params):
params = np.zeros(n_params)
params[i] = 1
coefficient_matrix[:,i] = model.predicted_values(params)
x, residues, rank, s = np.linalg.lstsq(coefficient_matrix, data)
return x
def demo_linear_least_squares():
true_parameters = np.array([2.,-1.])
n = 10
uncertainty = 0.1
model = SinCosModel(n,uncertainty)
xs = np.arange(n)/float(n)
data = model.generate(true_parameters)
fit_parameters = fit_linear_least_squares(model, data)
plt.figure()
plt.errorbar(xs, data, uncertainty, label="data", fmt="+")
plt.plot(xs,model.predicted_values(true_parameters), label="true value")
plt.plot(xs,model.predicted_values(fit_parameters), label="fitted value")
plt.xlim(0,1)
plt.title("Simple linear least-squares fitting")
plt.legend(loc="best")
plt.savefig("linear-least-squares-1.png")
if __name__=='__main__':
np.random.seed(0)
demo_linear_least_squares()
plt.show()
The edge of locality: visualizing a black hole from the inside
(Submitted on 27 Mar 2009)Abstract: We illustrate and discuss the view seen by an observer inside the horizon of a Schwarzschild black hole. The view as the observer approaches the central singularity is of particular interest because, according to ideas arising from "observer complementarity," points in opposite directions along the observer's past lightcone are at "the edge of locality," where standard flat-space quantum-field-theory commutation rules may be at the brink of failure. Near the singularity, the observer's view is aberrated by the diverging tidal force into a horizontal plane. The view in the horizontal plane is highly blueshifted, but all directions other than horizontal appear highly redshifted. We argue that the affine distance provides a canonical measure of distance along a light ray from emitter to observer. Since the affine distance is not directly measurable by the observer, we also consider perceptual distances, and argue that the trinocular distance (binocular distance is inadequate) provides an estimate of affine distance that would allow tree-leaping apes to survive in highly curved spacetime.