# Quick fits for TESS light curves#

import exoplanet

exoplanet.utils.docs_setup()
print(f"exoplanet.__version__ = '{exoplanet.__version__}'")

exoplanet.__version__ = '0.5.3.dev15+gaf19a5c'


In this tutorial, we will fit the TESS light curve for a known transiting planet. While the Fitting TESS data case study goes through the full details of an end-to-end fit, this tutorial is significantly faster to run and it can give pretty excellent results depending on your goals. Some of the main differences are:

1. We start from the light curve rather than doing the photometry ourselves. This should pretty much always be fine unless you have a very bright, faint, or crowded target.

2. We assume a circular orbit, but as you’ll see later, we can approximately relax this assumption later.

3. We only fit the data near transit. In many cases this will be just fine, but if you have predictable stellar variability (like coherent rotation) then you might do better fitting more data.

We’ll fit the planet in the HD 118203 (TIC 286923464) system that was found to transit by Pepper et al. (2019) because it is on an eccentric orbit so assumption #2 above is not valid.

First, let’s download the TESS light curve using lightkurve:

import numpy as np
import lightkurve as lk
import matplotlib.pyplot as plt

lcfs = lk.search_lightcurve(
"TIC 286923464", mission="TESS", author="SPOC"
lc = lcfs.stitch().remove_nans().remove_outliers(sigma=7)

x = np.ascontiguousarray(lc.time.value, dtype=np.float64)
y = np.ascontiguousarray(1e3 * (lc.flux - 1), dtype=np.float64)
yerr = np.ascontiguousarray(1e3 * lc.flux_err, dtype=np.float64)

texp = np.min(np.diff(x))

plt.plot(x, y, "k", linewidth=0.5)
plt.xlabel("time [days]")
_ = plt.ylabel("relative flux [ppt]") Then, find the period, phase and depth of the transit using box least squares:stitch

import exoplanet as xo

pg = xo.estimators.bls_estimator(x, y, yerr, min_period=2, max_period=20)

peak = pg["peak_info"]
period_guess = peak["period"]
t0_guess = peak["transit_time"]
depth_guess = peak["depth"]

plt.plot(pg["bls"].period, pg["bls"].power, "k", linewidth=0.5)
plt.axvline(period_guess, alpha=0.3, linewidth=5)
plt.xlabel("period [days]")
plt.ylabel("bls power")
plt.yticks([])
_ = plt.xlim(pg["bls"].period.min(), pg["bls"].period.max()) Then, for efficiency purposes, let’s extract just the data within 0.25 days of the transits:

transit_mask = (
np.abs(
(x - t0_guess + 0.5 * period_guess) % period_guess - 0.5 * period_guess
)
< 0.25
)

plt.figure(figsize=(8, 4))
x_fold = (
x - t0_guess + 0.5 * period_guess
) % period_guess - 0.5 * period_guess
plt.scatter(x_fold, y, c=x, s=3)
plt.xlabel("time since transit [days]")
plt.ylabel("relative flux [ppt]")
plt.colorbar(label="time [days]")
_ = plt.xlim(-0.25, 0.25) That looks a little janky, but it’s good enough for now.

## The probabilistic model#

Here’s how we set up the PyMC3 model in this case:

import pymc3 as pm
import aesara_theano_fallback.tensor as tt

import pymc3_ext as pmx
from celerite2.theano import terms, GaussianProcess

with pm.Model() as model:

# Stellar parameters
mean = pm.Normal("mean", mu=0.0, sigma=10.0)
star_params = [mean, u]

# Gaussian process noise model
sigma = pm.InverseGamma("sigma", alpha=3.0, beta=2 * np.median(yerr))
log_sigma_gp = pm.Normal("log_sigma_gp", mu=0.0, sigma=10.0)
log_rho_gp = pm.Normal("log_rho_gp", mu=np.log(10.0), sigma=10.0)
kernel = terms.SHOTerm(
sigma=tt.exp(log_sigma_gp), rho=tt.exp(log_rho_gp), Q=1.0 / 3
)
noise_params = [sigma, log_sigma_gp, log_rho_gp]

# Planet parameters
log_ror = pm.Normal(
"log_ror", mu=0.5 * np.log(depth_guess * 1e-3), sigma=10.0
)
ror = pm.Deterministic("ror", tt.exp(log_ror))

# Orbital parameters
log_period = pm.Normal("log_period", mu=np.log(period_guess), sigma=1.0)
period = pm.Deterministic("period", tt.exp(log_period))
t0 = pm.Normal("t0", mu=t0_guess, sigma=1.0)
log_dur = pm.Normal("log_dur", mu=np.log(0.1), sigma=10.0)
dur = pm.Deterministic("dur", tt.exp(log_dur))
b = xo.distributions.ImpactParameter("b", ror=ror)

# Set up the orbit
orbit = xo.orbits.KeplerianOrbit(period=period, duration=dur, t0=t0, b=b)

# We're going to track the implied density for reasons that will become clear later
pm.Deterministic("rho_circ", orbit.rho_star)

# Set up the mean transit model
star = xo.LimbDarkLightCurve(u)
lc_model = mean + 1e3 * tt.sum(
star.get_light_curve(orbit=orbit, r=ror, t=x), axis=-1
)

# Finally the GP observation model
gp = GaussianProcess(kernel, t=x, diag=yerr**2 + sigma**2)
gp.marginal("obs", observed=y - lc_model)

# Double check that everything looks good - we shouldn't see any NaNs!
print(model.check_test_point())

# Optimize the model
map_soln = model.test_point
map_soln = pmx.optimize(map_soln, [sigma])
map_soln = pmx.optimize(map_soln, [ror, b, dur])
map_soln = pmx.optimize(map_soln, noise_params)
map_soln = pmx.optimize(map_soln, star_params)
map_soln = pmx.optimize(map_soln)

mean                   -3.22
sigma_log__            -0.53
log_sigma_gp           -3.22
log_rho_gp             -3.22
log_ror                -3.22
log_period             -0.92
t0                     -0.92
log_dur                -3.22
b_impact__             -1.39
obs                -34364.49
Name: Log-probability of test_point, dtype: float64

optimizing logp for variables: [sigma]

100.00% [15/15 00:00<00:00 logp = -8.213e+03]


message: Optimization terminated successfully.
logp: -34387.13404363601 -> -8213.215637097148

optimizing logp for variables: [log_dur, b, log_ror]

100.00% [23/23 00:00<00:00 logp = -6.545e+03]


message: Optimization terminated successfully.
logp: -8213.215637097148 -> -6545.214222425457

optimizing logp for variables: [log_rho_gp, log_sigma_gp, sigma]

100.00% [26/26 00:00<00:00 logp = -2.398e+03]


message: Optimization terminated successfully.
logp: -6545.214222425457 -> -2398.090810295713

optimizing logp for variables: [u, mean]

100.00% [11/11 00:00<00:00 logp = -2.394e+03]
message: Optimization terminated successfully.
logp: -2398.090810295713 -> -2393.91680491521



optimizing logp for variables: [b, log_dur, t0, log_period, log_ror, log_rho_gp, log_sigma_gp, sigma, u, mean]

100.00% [112/112 00:00<00:00 logp = -1.842e+03]


message: Desired error not necessarily achieved due to precision loss.
logp: -2393.91680491521 -> -1841.6470841122261


Now we can plot our initial model:

with model:
lc_pred = pmx.eval_in_model(lc_model, map_soln)
gp_pred = pmx.eval_in_model(gp.predict(y - lc_pred), map_soln)

plt.figure(figsize=(8, 4))
x_fold = (x - map_soln["t0"] + 0.5 * map_soln["period"]) % map_soln[
"period"
] - 0.5 * map_soln["period"]
inds = np.argsort(x_fold)
plt.scatter(x_fold, y - gp_pred - map_soln["mean"], c=x, s=3)
plt.plot(x_fold[inds], lc_pred[inds] - map_soln["mean"], "k")
plt.xlabel("time since transit [days]")
plt.ylabel("relative flux [ppt]")
plt.colorbar(label="time [days]")
_ = plt.xlim(-0.25, 0.25) That looks better!

Now on to sampling:

with model:
trace = pmx.sample(
tune=1000,
draws=1000,
start=map_soln,
chains=2,
cores=2,
return_inferencedata=True,
random_seed=[286923464, 464329682],
)

Multiprocess sampling (2 chains in 2 jobs)

NUTS: [b, log_dur, t0, log_period, log_ror, log_rho_gp, log_sigma_gp, sigma, u, mean]

100.00% [4000/4000 02:03<00:00 Sampling 2 chains, 0 divergences]
Sampling 2 chains for 1_000 tune and 1_000 draw iterations (2_000 + 2_000 draws total) took 124 seconds.


Then we can take a look at the summary statistics:

import arviz as az

az.summary(trace)

mean sd hdi_3% hdi_97% mcse_mean mcse_sd ess_bulk ess_tail r_hat
mean 0.266 0.092 0.090 0.433 0.002 0.002 1535.0 1375.0 1.0
log_sigma_gp -0.802 0.142 -1.045 -0.521 0.004 0.003 1411.0 1258.0 1.0
log_rho_gp -0.560 0.187 -0.898 -0.197 0.005 0.003 1531.0 1414.0 1.0
log_ror -2.901 0.007 -2.916 -2.888 0.000 0.000 1657.0 1242.0 1.0
log_period 1.814 0.000 1.814 1.814 0.000 0.000 1993.0 1397.0 1.0
t0 1712.662 0.000 1712.662 1712.662 0.000 0.000 1767.0 1463.0 1.0
log_dur -1.503 0.002 -1.506 -1.499 0.000 0.000 2414.0 1309.0 1.0
u 0.296 0.071 0.162 0.422 0.002 0.001 1967.0 894.0 1.0
u 0.184 0.099 -0.011 0.359 0.002 0.002 2012.0 1058.0 1.0
sigma 0.187 0.006 0.175 0.198 0.000 0.000 2110.0 1497.0 1.0
ror 0.055 0.000 0.054 0.056 0.000 0.000 1657.0 1242.0 1.0
period 6.135 0.000 6.135 6.135 0.000 0.000 1993.0 1397.0 1.0
dur 0.223 0.000 0.222 0.223 0.000 0.000 2414.0 1309.0 1.0
b 0.131 0.082 0.001 0.270 0.002 0.002 1074.0 844.0 1.0
rho_circ 0.330 0.012 0.306 0.345 0.000 0.000 1338.0 1420.0 1.0

And plot the posterior covariances compared to the values from Pepper et al. (2019):

import corner

_ = corner.corner(
trace, var_names=["period", "ror", "b"], truths=[6.134980, 0.05538, 0.125]
) ## Bonus: eccentricity#

As discussed above, we fit this model assuming a circular orbit which speeds things up for a few reasons. First, setting eccentricity to zero means that the orbital dynamics are much simpler and more computationally efficient, since we don’t need to solve Kepler’s equation numerically. But this isn’t actually the main effect! Instead the bigger issues come from the fact that the degeneracies between eccentricity, arrgument of periasteron, impact parameter, and planet radius are hard for the sampler to handle, causing the sampler’s performance to plummet. In this case, by fitting with a circular orbit where duration is one of the parameters, everything is well behaved and the sampler runs faster.

But, in this case, the planet is actually on an eccentric orbit, so that assumption isn’t justified. It has been recognized by various researchers over the years (I first learned about this from Bekki Dawson) that, to first order, the eccentricity mainly just changes the transit duration. The key realization is that this can be thought of as a change in the implied density of the star. Therefore, if you fit the transit using stellar density (or duration, in this case) as one of the parameters (note: you must have a different stellar density parameter for each planet if there are more than one), you can use an independent measurement of the stellar density to infer the eccentricity of the orbit after the fact. All the details are described in Dawson & Johnson (2012), but here’s how you can do this here using the stellar density listed in the TESS input catalog:

from astroquery.mast import Catalogs

star = Catalogs.query_object("TIC 286923464", catalog="TIC", radius=0.001)
tic_rho_star = float(star["rho"]), float(star["e_rho"])
print("rho_star = {0} ± {1}".format(*tic_rho_star))

# Extract the implied density from the fit
rho_circ = np.repeat(np.asarray(trace.posterior["rho_circ"]).flatten(), 100)

# Sample eccentricity and omega from their priors (the math might
# be a little more subtle for more informative priors, but I leave
# that as an exercise for the reader...)
ecc = np.random.uniform(0, 1, len(rho_circ))
omega = np.random.uniform(-np.pi, np.pi, len(rho_circ))

# Compute the "g" parameter from Dawson & Johnson and what true
# density that implies
g = (1 + ecc * np.sin(omega)) / np.sqrt(1 - ecc**2)
rho = rho_circ / g**3

# Re-weight these samples to get weighted posterior samples
log_weights = -0.5 * ((rho - tic_rho_star) / tic_rho_star) ** 2
weights = np.exp(log_weights - np.max(log_weights))

# Estimate the expected posterior quantiles
q = corner.quantile(ecc, [0.16, 0.5, 0.84], weights=weights)
print(
"eccentricity = {0:.2f} +{1:.2f} -{1:.2f}".format(q, np.diff(q))
)

_ = corner.corner(
np.vstack((ecc, omega)).T,
weights=weights,
truths=[0.316, None],
plot_datapoints=False,
labels=["eccentricity", "omega"],
)

rho_star = 0.121689 ± 0.0281776
eccentricity = 0.46 +0.25 -0.13 As you can see, this eccentricity estimate is consistent (albeit with large uncertainties) with the value that Pepper et al. (2019) measure using radial velocities and it is definitely clear that this planet is not on a circular orbit.