[1]:
import numpy as np
from sklearn.gaussian_process.kernels import Matern, RBF
import plotly
import plotly.express as px
from docs.mse_estimator import ErrorComparer
from docs.data_generation import gen_rbf_X, gen_matern_X, create_clus_split, gen_cov_mat
from docs.plotting_utils import gen_model_barplots
from spe.tree import Tree
from spe.relaxed_lasso import RelaxedLasso
from spe.estimators import new_y_est, cp_bagged, simple_train_test_split
[2]:
np.random.seed(1)
Bagged Models#
Here we demonstrate the effectiveness of spe.estimators.cp_bagged
to estimate MSE on simulated data.
[3]:
## number of realizations to run
niter = 100
## data generation parameters
gsize=10
n=20**2
p=5
s=5
delta = 0.75
snr = 0.4
tr_frac = .25
noise_kernel = 'matern'
noise_length_scale = 1.
noise_nu = .5
X_kernel = 'matern'
X_length_scale = 5.
X_nu = 2.5
## ErrorComparer parameters
k = 5
max_depth = 3
models = [RelaxedLasso(lambd=.31), Tree(max_depth=max_depth, max_features='sqrt')]
ests = [
new_y_est,
cp_bagged,
simple_train_test_split,
]
est_kwargs = [
{'alpha': None,
'full_refit': False,
'bagg': True},
{'use_trace_corr': False,
'full_refit': False},
{},
]
## plot parameters
model_names = ["Relaxed Lasso", "Random Forest"]
est_names = ["GenCp", "Split"]
[4]:
err_cmp = ErrorComparer()
[5]:
nx = ny = int(np.sqrt(n))
xs = np.linspace(0, gsize, nx)
ys = np.linspace(0, gsize, ny)
c_x, c_y = np.meshgrid(xs, ys)
c_x = c_x.flatten()
c_y = c_y.flatten()
coord = np.stack([c_x, c_y]).T
[6]:
if noise_kernel == 'rbf':
Sigma_t = gen_cov_mat(c_x, c_y, RBF(length_scale=noise_length_scale))
elif noise_kernel == 'matern':
Sigma_t = gen_cov_mat(c_x, c_y, Matern(length_scale=noise_length_scale, nu=noise_nu))
else:
Sigma_t = np.eye(n)
Cov_y_ystar = delta*Sigma_t
Sigma_t = delta*Sigma_t + (1-delta)*np.eye(n)
if noise_kernel == 'rbf' or noise_kernel == 'matern':
Chol_y = np.linalg.cholesky(Sigma_t)
else:
Chol_y = np.eye(n)
[7]:
if X_kernel == 'rbf':
X = gen_rbf_X(c_x, c_y, p)
elif X_kernel == 'matern':
X = gen_matern_X(c_x, c_y, p, length_scale=X_length_scale, nu=X_nu)
else:
X = np.random.randn(n,p)
Spatial 80/20 Split#
[8]:
tr_idx = create_clus_split(
int(np.sqrt(n)), int(np.sqrt(n)), tr_frac
)
Simulate \(Y, Y^* \overset{iid}{\sim} \mathcal{N}(\mu, \Sigma_Y)\)#
[9]:
spat_ind_model_errs = []
for model in models:
errs = err_cmp.compare(
model,
ests,
est_kwargs,
niter=niter,
n=n,
p=p,
s=s,
snr=snr,
X=X,
coord=coord,
Chol_y=Chol_y,
Chol_ystar=Chol_y,
Cov_y_ystar=None,
tr_idx=tr_idx,
fair=False,
friedman_mu=True,
)
spat_ind_model_errs.append(errs)
0%| | 0/100 [00:00<?, ?it/s]
100%|██████████| 100/100 [00:28<00:00, 3.54it/s]
100%|██████████| 100/100 [00:19<00:00, 5.02it/s]
[10]:
plotly.offline.init_notebook_mode()
spat_fig = gen_model_barplots(
spat_ind_model_errs,
model_names,
est_names,
title="Bagged Models: Spatial Train/Test Split, NSN",
color_discrete_sequence=[px.colors.qualitative.Bold[i] for i in [0,9]],
fig_name="bag_spat_ind",
)
spat_fig.show()
Simulate \(\begin{pmatrix} Y \\ Y^* \end{pmatrix} \sim \mathcal{N}\left(\begin{pmatrix} \mu \\ \mu \end{pmatrix}, \begin{pmatrix}\Sigma_Y & \Sigma_{Y, Y^*} \\ \Sigma_{Y^*, Y} & \Sigma_{Y} \end{pmatrix}\right)\)#
[11]:
spat_corr_model_errs = []
for model in models:
errs = err_cmp.compare(
model,
ests,
est_kwargs,
niter=niter,
n=n,
p=p,
s=s,
snr=snr,
X=X,
coord=coord,
Chol_y=Chol_y,
Chol_ystar=Chol_y,
Cov_y_ystar=Cov_y_ystar,
tr_idx=tr_idx,
fair=False,
friedman_mu=True,
)
spat_corr_model_errs.append(errs)
100%|██████████| 100/100 [00:54<00:00, 1.85it/s]
100%|██████████| 100/100 [00:46<00:00, 2.16it/s]
[12]:
spat_corr_fig = gen_model_barplots(
spat_corr_model_errs,
model_names,
est_names,
title="Bagged Models: Spatial Train/Test Split, SSN",
color_discrete_sequence=[px.colors.qualitative.Bold[i] for i in [0,9]],
fig_name="bag_spat_corr",
)
spat_corr_fig.show()
Random 80/20 Split#
[13]:
tr_idx = np.concatenate((np.ones(int(.8*n)), np.zeros(n - int(.8*n)))).astype(bool)
np.random.shuffle(tr_idx)
Simulate \(Y, Y^* \overset{iid}{\sim} \mathcal{N}(\mu, \Sigma_Y)\)#
[14]:
trts_ind_model_errs = []
for model in models:
errs = err_cmp.compare(
model,
ests,
est_kwargs,
niter=niter,
n=n,
p=p,
s=s,
snr=snr,
X=X,
coord=coord,
Chol_y=Chol_y,
Chol_ystar=Chol_y,
Cov_y_ystar=None,
tr_idx=tr_idx,
fair=False,
friedman_mu=True,
)
trts_ind_model_errs.append(errs)
100%|██████████| 100/100 [00:33<00:00, 3.01it/s]
100%|██████████| 100/100 [00:21<00:00, 4.68it/s]
[15]:
fig = gen_model_barplots(
trts_ind_model_errs,
model_names,
est_names,
title="Bagged Models: Random Train/Test Split, NSN",
color_discrete_sequence=[px.colors.qualitative.Bold[i] for i in [0,9]],
fig_name="bag_trts_ind",
)
fig.show()
Simulate \(\begin{pmatrix} Y \\ Y^* \end{pmatrix} \sim \mathcal{N}\left(\begin{pmatrix} \mu \\ \mu \end{pmatrix}, \begin{pmatrix}\Sigma_Y & \Sigma_{Y, Y^*} \\ \Sigma_{Y^*, Y} & \Sigma_{Y} \end{pmatrix}\right)\)#
[16]:
trts_corr_model_errs = []
for model in models:
errs = err_cmp.compare(
model,
ests,
est_kwargs,
niter=niter,
n=n,
p=p,
s=s,
snr=snr,
X=X,
coord=coord,
Chol_y=Chol_y,
Chol_ystar=Chol_y,
Cov_y_ystar=Cov_y_ystar,
tr_idx=tr_idx,
fair=False,
friedman_mu=True,
)
trts_corr_model_errs.append(errs)
100%|██████████| 100/100 [01:09<00:00, 1.44it/s]
100%|██████████| 100/100 [01:01<00:00, 1.62it/s]
[17]:
trts_corr_fig = gen_model_barplots(
trts_corr_model_errs,
model_names,
est_names,
title="Bagged Models: Random Train/Test Split, SSN",
color_discrete_sequence=[px.colors.qualitative.Bold[i] for i in [0,9]],
fig_name="bag_trts_corr",
)
trts_corr_fig.show()
[ ]: