@@ -1095,8 +1095,8 @@ def test_nonparallelized_chains_are_random(self):
1095
1095
samples = np .array (trace .get_values ("x" , combine = False ))[:, 5 ]
1096
1096
assert len (set (samples )) == 2 , \
1097
1097
"Non parallelized {} " "chains are identical." .format (
1098
- stepper
1099
- )
1098
+ stepper
1099
+ )
1100
1100
1101
1101
def test_parallelized_chains_are_random (self ):
1102
1102
"""Test that parallel chain are
@@ -1115,8 +1115,8 @@ def test_parallelized_chains_are_random(self):
1115
1115
samples = np .array (trace .get_values ("x" , combine = False ))[:, 5 ]
1116
1116
assert len (set (samples )) == 2 , \
1117
1117
"Parallelized {} " "chains are identical." .format (
1118
- stepper
1119
- )
1118
+ stepper
1119
+ )
1120
1120
1121
1121
def test_acceptance_rate_against_coarseness (self ):
1122
1122
"""Test that the acceptance rate increases
@@ -1160,8 +1160,9 @@ def test_mlda_non_blocked(self):
1160
1160
base_blocked = False ).next_step_method ,
1161
1161
CompoundStep )
1162
1162
1163
- def test_blocked (self ):
1164
- """Test the type of base sampler instantiated when switching base_blocked flag"""
1163
+ def test_mlda_blocked (self ):
1164
+ """Test the type of base sampler instantiated
1165
+ when switching base_blocked flag"""
1165
1166
_ , model = simple_2model_continuous ()
1166
1167
_ , model_coarse = simple_2model_continuous ()
1167
1168
with model :
@@ -1173,3 +1174,108 @@ def test_blocked(self):
1173
1174
base_blocked = True ).next_step_method ,
1174
1175
Metropolis )
1175
1176
1177
+ def test_tuning_and_scaling_on (self ):
1178
+ """Test that tune and base_scaling change as expected when
1179
+ tuning is on."""
1180
+ np .random .seed (1234 )
1181
+ ts = 100
1182
+ _ , model = simple_2model_continuous ()
1183
+ _ , model_coarse = simple_2model_continuous ()
1184
+ with model :
1185
+ trace = sample (
1186
+ tune = ts ,
1187
+ draws = 20 ,
1188
+ step = MLDA (coarse_models = [model_coarse ],
1189
+ base_tune_interval = 50 ,
1190
+ base_scaling = 100. ),
1191
+ chains = 1 ,
1192
+ discard_tuned_samples = False ,
1193
+ random_seed = 1234
1194
+ )
1195
+
1196
+ assert trace .get_sampler_stats ('tune' , chains = 0 )[0 ]
1197
+ assert trace .get_sampler_stats ('tune' , chains = 0 )[ts - 1 ]
1198
+ assert not trace .get_sampler_stats ('tune' , chains = 0 )[ts ]
1199
+ assert not trace .get_sampler_stats ('tune' , chains = 0 )[- 1 ]
1200
+ assert trace .get_sampler_stats ('base_scaling_x' , chains = 0 )[0 ] == 100.
1201
+ assert trace .get_sampler_stats ('base_scaling_y_logodds__' , chains = 0 )[0 ] == 100.
1202
+ assert trace .get_sampler_stats ('base_scaling_x' , chains = 0 )[- 1 ] < 100.
1203
+ assert trace .get_sampler_stats ('base_scaling_y_logodds__' , chains = 0 )[- 1 ] < 100.
1204
+
1205
+ def test_tuning_and_scaling_off (self ):
1206
+ """Test that tuning is deactivated when sample()'s tune=0 and that
1207
+ MLDA's tune=False is overridden by sample()'s tune."""
1208
+ np .random .seed (12345 )
1209
+ _ , model = simple_2model_continuous ()
1210
+ _ , model_coarse = simple_2model_continuous ()
1211
+
1212
+ ts_0 = 0
1213
+ with model :
1214
+ trace_0 = sample (
1215
+ tune = ts_0 ,
1216
+ draws = 100 ,
1217
+ step = MLDA (coarse_models = [model_coarse ],
1218
+ base_tune_interval = 50 ,
1219
+ base_scaling = 100. ,
1220
+ tune = False ),
1221
+ chains = 1 ,
1222
+ discard_tuned_samples = False ,
1223
+ random_seed = 12345
1224
+ )
1225
+
1226
+ ts_1 = 100
1227
+ with model :
1228
+ trace_1 = sample (
1229
+ tune = ts_1 ,
1230
+ draws = 20 ,
1231
+ step = MLDA (coarse_models = [model_coarse ],
1232
+ base_tune_interval = 50 ,
1233
+ base_scaling = 100. ,
1234
+ tune = False ),
1235
+ chains = 1 ,
1236
+ discard_tuned_samples = False ,
1237
+ random_seed = 12345
1238
+ )
1239
+
1240
+ assert not trace_0 .get_sampler_stats ('tune' , chains = 0 )[0 ]
1241
+ assert not trace_0 .get_sampler_stats ('tune' , chains = 0 )[- 1 ]
1242
+ assert trace_0 .get_sampler_stats ('base_scaling_x' , chains = 0 )[0 ] == \
1243
+ trace_0 .get_sampler_stats ('base_scaling_x' , chains = 0 )[- 1 ] == 100.
1244
+
1245
+ assert trace_1 .get_sampler_stats ('tune' , chains = 0 )[0 ]
1246
+ assert trace_1 .get_sampler_stats ('tune' , chains = 0 )[ts_1 - 1 ]
1247
+ assert not trace_1 .get_sampler_stats ('tune' , chains = 0 )[ts_1 ]
1248
+ assert not trace_1 .get_sampler_stats ('tune' , chains = 0 )[- 1 ]
1249
+ assert trace_1 .get_sampler_stats ('base_scaling_x' , chains = 0 )[0 ] == 100.
1250
+ assert trace_1 .get_sampler_stats ('base_scaling_y_logodds__' , chains = 0 )[0 ] == 100.
1251
+ assert trace_1 .get_sampler_stats ('base_scaling_x' , chains = 0 )[- 1 ] < 100.
1252
+ assert trace_1 .get_sampler_stats ('base_scaling_y_logodds__' , chains = 0 )[- 1 ] < 100.
1253
+
1254
+ def test_trace_length (self ):
1255
+ """Check if trace length is as expected."""
1256
+ tune = 100
1257
+ draws = 50
1258
+ with Model () as coarse_model :
1259
+ Normal ('n' , 0 , 2.2 , shape = (3 ,))
1260
+ with Model ():
1261
+ Normal ('n' , 0 , 2 , shape = (3 ,))
1262
+ step = MLDA (coarse_models = [coarse_model ])
1263
+ trace = sample (
1264
+ tune = tune ,
1265
+ draws = draws ,
1266
+ step = step ,
1267
+ chains = 1 ,
1268
+ discard_tuned_samples = False
1269
+ )
1270
+ assert len (trace ) == tune + draws
1271
+
1272
+ @pytest .mark .parametrize ('variable,has_grad,outcome' ,
1273
+ [('n' , True , 1 ), ('n' , False , 1 ), ('b' , True , 0 ), ('b' , False , 0 )])
1274
+ def test_competence (self , variable , has_grad , outcome ):
1275
+ """Test if competence function returns expected
1276
+ results for different models"""
1277
+ with Model () as pmodel :
1278
+ Normal ('n' , 0 , 2 , shape = (3 ,))
1279
+ Binomial ('b' , n = 2 , p = 0.3 )
1280
+ assert MLDA .competence (pmodel [variable ], has_grad = has_grad ) == outcome
1281
+ pass
0 commit comments