import warningsimport polars as plimport numpy as npimport numbaimport scipy.optimizeimport scipy.stats as stimport bebi103import iqplotimport bokeh.iobokeh.io.output_notebook()
Loading BokehJS ...
We have previously introduced the Akaike information criterion. Here, we will demonstrate its use in model comparison and the mechanics of how to calculated it.
As a reminder, for a set of parameters \(\theta\) with MLE \(\theta^*\) and a model with log-likelihood \(\ell(\theta;\text{data})\), the AIC is given by
To begin, we will use the AIC to compare a single Negative Binomial model to a mixture of two Negative Binomials for smFISH data from Singer, et al.
48.1 AIC for mRNA counts
Let us now compare the single Negative Binomial to the mixture model for mRNA counts. We again need our functions for computing the MLE and computing the log-likelihood from previous lessons.
def log_like_iid_nbinom(params, n):"""Log likelihood for i.i.d. NBinom measurements, parametrized by alpha, b=1/beta.""" alpha, b = paramsif alpha <=0or b <=0:return-np.infreturn np.sum(st.nbinom.logpmf(n, alpha, 1/(1+b)))def mle_iid_nbinom(n):"""Perform maximum likelihood estimates for parameters for i.i.d. NBinom measurements, parametrized by alpha, b=1/beta"""with warnings.catch_warnings(): warnings.simplefilter("ignore") res = scipy.optimize.minimize( fun=lambda params, n: -log_like_iid_nbinom(params, n), x0=np.array([3, 3]), args=(n,), method='Powell' )if res.success:return res.xelse:raiseRuntimeError('Convergence failed with message', res.message)def initial_guess_mix(n, w_guess):"""Generate initial guess for mixture model.""" n_low = n[n < np.percentile(n, 100*w_guess)] n_high = n[n >= np.percentile(n, 100*w_guess)] alpha1, b1 = mle_iid_nbinom(n_low) alpha2, b2 = mle_iid_nbinom(n_high)return alpha1, b1, alpha2, b2def log_like_mix(alpha1, b1, alpha2, b2, w, n):"""Log-likeihood of binary Negative Binomial mixture model."""# Fix nonidentifiability be enforcing values of wif w <0or w >1:return-np.inf# Physical bounds on parametersif alpha1 <0or alpha2 <0or b1 <0or b2 <0:return-np.inf logx1 = st.nbinom.logpmf(n, alpha1, 1/(1+b1)) logx2 = st.nbinom.logpmf(n, alpha2, 1/(1+b2))# Multipliers for log-sum-exp lse_coeffs = np.tile([w, 1-w], [len(n), 1]).transpose()# log-likelihood for each measurement log_likes = scipy.special.logsumexp(np.vstack([logx1, logx2]), axis=0, b=lse_coeffs)return np.sum(log_likes)def mle_mix(n, w_guess):"""Obtain MLE estimate for parameters for binary mixture of Negative Binomials."""with warnings.catch_warnings(): warnings.simplefilter("ignore") res = scipy.optimize.minimize( fun=lambda params, n: -log_like_mix(*params, n), x0=[*initial_guess_mix(n, w_guess), w_guess], args=(n,), method='Powell', tol=1e-6, )if res.success:return res.xelse:raiseRuntimeError('Convergence failed with message', res.message)
Now we can load in the data and compute the MLEs for each of the four genes.
# Load in datadf = pl.read_csv( os.path.join(data_path, "singer_transcript_counts.csv"), comment_prefix="#")df_mle = pl.DataFrame( schema=[("gene", str)]+ [(param, float) for param in ["alpha", "b", "alpha1", "b1", "alpha2", "b2", "w"]])for gene in df.schema: n = df["Nanog"].to_numpy()# Single Negative Binomial MLE alpha, b = mle_iid_nbinom(df[gene].to_numpy())# Mixture model MLE alpha1, b1, alpha2, b2, w = mle_mix(df[gene].to_numpy(), 0.2)# Store results in data frame df_mle = pl.concat( ( df_mle, pl.DataFrame( data=[[gene, alpha, b, alpha1, b1, alpha2, b2, w]], schema=df_mle.schema, orient="row", ), ) )# Take a lookdf_mle
shape: (4, 8)
gene
alpha
b
alpha1
b1
alpha2
b2
w
str
f64
f64
f64
f64
f64
f64
f64
"Rex1"
1.634562
84.680915
3.497009
4.104916
5.089625
31.810375
0.160422
"Rest"
4.530335
16.543054
2.786601
12.395701
6.683424
11.953265
0.108772
"Nanog"
1.263097
69.347842
0.834832
66.535947
4.127488
28.133048
0.466636
"Prdm14"
0.552886
8.200636
2.385858
4.747279
0.558672
4.872751
0.210606
For each of the two models, we can compute the log likelihood evaluated at the MLEs for the parameters.
# Define funcitons taking Polars structs for computing log likelihoodsdef pl_log_like_iid_nbinom(s):return log_like_iid_nbinom((s["alpha"], s["b"]), df[s["gene"]].to_numpy())def pl_log_like_mix(s):return log_like_mix( s["alpha1"], s["b1"], s["alpha2"], s["b2"], s["w"], df[s["gene"]].to_numpy() )# Apply the functionsdf_mle = df_mle.with_columns(# Single negative binomial pl.struct(["alpha", "b", "gene"]) .map_elements(pl_log_like_iid_nbinom, return_dtype=float) .alias("log_like_single"),# Mixture model pl.struct(["alpha1", "b1", "alpha2", "b2", "w", "gene"]) .map_elements(pl_log_like_mix, return_dtype=float) .alias("log_like_mix"),)# Take a lookdf_mle
shape: (4, 10)
gene
alpha
b
alpha1
b1
alpha2
b2
w
log_like_single
log_like_mix
str
f64
f64
f64
f64
f64
f64
f64
f64
f64
"Rex1"
1.634562
84.680915
3.497009
4.104916
5.089625
31.810375
0.160422
-1638.678482
-1590.353743
"Rest"
4.530335
16.543054
2.786601
12.395701
6.683424
11.953265
0.108772
-1376.748398
-1372.108896
"Nanog"
1.263097
69.347842
0.834832
66.535947
4.127488
28.133048
0.466636
-1524.928918
-1512.444558
"Prdm14"
0.552886
8.200636
2.385858
4.747279
0.558672
4.872751
0.210606
-713.091587
-712.702876
We can already see a very large difference between the log likelihood evaluated at the MLE for Rex1, but not much difference for Prdm14. The mixture model has \(p = 5\) parameters, while the single Negative Binomial model has \(p = 2\). With these numbers, we can compute the AIC and then also the Akaike weights.
df_mle = df_mle.with_columns( (-2* (pl.col('log_like_single') -2)).alias('AIC_single'), (-2* (pl.col('log_like_mix') -5)).alias('AIC_mix'),)# Take a lookdf_mle
shape: (4, 12)
gene
alpha
b
alpha1
b1
alpha2
b2
w
log_like_single
log_like_mix
AIC_single
AIC_mix
str
f64
f64
f64
f64
f64
f64
f64
f64
f64
f64
f64
"Rex1"
1.634562
84.680915
3.497009
4.104916
5.089625
31.810375
0.160422
-1638.678482
-1590.353743
3281.356963
3190.707487
"Rest"
4.530335
16.543054
2.786601
12.395701
6.683424
11.953265
0.108772
-1376.748398
-1372.108896
2757.496797
2754.217792
"Nanog"
1.263097
69.347842
0.834832
66.535947
4.127488
28.133048
0.466636
-1524.928918
-1512.444558
3053.857837
3034.889116
"Prdm14"
0.552886
8.200636
2.385858
4.747279
0.558672
4.872751
0.210606
-713.091587
-712.702876
1430.183173
1435.405751
Finally, we can compute the Akaike weight for the model with a single Negative Binomial (the weight for the mixture model is \(1-w_\mathrm{single}\)).
In looking at the Akaike weight for the mixture (1 – w_single), is it clear that the mixture model is strongly preferred for Rex1 and Nanog. There is not strong preference for Rest, and a preference for the single Negative Binomial model for Prdm14. Reminding ourselves of the ECDFs, this makes sense.
Rex1 clearly is bimodal, and Nanog appears to have a second inflection point where the ECDF reaches a value of about 0.4, which is what we see in the MLE estimates in the mixture model. Rest and Prdm14 both appear to be unimodal, agreeing with what we saw with the AIC analysis.
Note that this underscores something we’ve been stressing all along. You should do good exploratory data analysis first, and the EDA often tells much of the story!
48.1.1 Caveat
Remember, though, that we did not take into account that the measurements of the four genes were done in the same cells. We modeled that when we presented the mixture models at the beginning of this lesson. The analysis of a more complicated model with MLE proved to be out of reach due to computational difficulty. So, we should not make strong conclusions about what the relative quality of the mixture of single Negative Binomial models mean in this context. We will address these kinds of modeling issues in the sequel of this course.
48.2 AIC for the spindle model
We can do a similar analysis for the two competing models for mitotic spindle size. We need our functions from earlier.
def theor_spindle_length(gamma, phi, d):"""Compute spindle length using mathematical model"""return gamma * d / np.cbrt(1+ (gamma * d / phi)**3)def log_likelihood(params, d, ell):"""Log likelihood of spindle length model.""" gamma, phi, sigma = paramsif gamma <=0or gamma >1or phi <=0:return-np.inf mu = theor_spindle_length(gamma, phi, d)return np.sum(st.norm.logpdf(ell, mu, sigma))def spindle_mle(d, ell):"""Compute MLE for parameters in spindle length model."""with warnings.catch_warnings(): warnings.simplefilter("ignore") res = scipy.optimize.minimize( fun=lambda params, d, ell: -log_likelihood(params, d, ell), x0=np.array([0.5, 35, 5]), args=(d, ell), method='Powell' )if res.success:return res.xelse:raiseRuntimeError('Convergence failed with message', res.message)
We can now perform MLE to get the parameters for each model and store the results.
The log likeihood for model 2, with spindle size depending on droplet diameter, is much greater than for model 1. And now we can compute the AIC,noting that there are two parameters for model 1 and three for model 2.
AIC_1 =-2* (log_like_1 -2)AIC_2 =-2* (log_like_2 -3)# Look at the AICsAIC_1, AIC_2
There is a massive disparity in the AICs, so we know that model 2 is strongly preferred. Nonetheless, we can compute the Akaike weight for model 1 to compare.