Skip to content

Commit 3e7a67c

Browse files
committed
Pushing the docs to _pst_preview/ for branch: new_web_theme, commit a9d8f103c7544ad52ece9057cc98b9208447ff8d
1 parent 7fb597c commit 3e7a67c

File tree

3,430 files changed

+44329
-87550
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

3,430 files changed

+44329
-87550
lines changed

_pst_preview/.buildinfo

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
11
# Sphinx build info version 1
22
# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done.
3-
config: 944134de34d7115406f3afe04b120a2c
3+
config: 1e442cc8803d396db4383208272980af
44
tags: 645f666f9bcd5a90fca523b33c5a78b7

_pst_preview/_changed.html

Lines changed: 447 additions & 0 deletions
Large diffs are not rendered by default.

_pst_preview/_downloads/006fc185672e58b056a5c134db26935c/plot_coin_segmentation.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -53,7 +53,7 @@
5353
"name": "python",
5454
"nbconvert_exporter": "python",
5555
"pygments_lexer": "ipython3",
56-
"version": "3.9.18"
56+
"version": "3.9.19"
5757
}
5858
},
5959
"nbformat": 4,

_pst_preview/_downloads/00ae629d652473137a3905a5e08ea815/plot_iris_dtc.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@
1414
1515
We also show the tree structure of a model built on all of the features.
1616
"""
17+
1718
# %%
1819
# First load the copy of the Iris dataset shipped with scikit-learn:
1920
from sklearn.datasets import load_iris

_pst_preview/_downloads/02d88d76c60b7397c8c6e221b31568dd/plot_grid_search_refit_callable.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -81,7 +81,7 @@ def best_low_complexity(cv_results):
8181
pipe = Pipeline(
8282
[
8383
("reduce_dim", PCA(random_state=42)),
84-
("classify", LinearSVC(random_state=42, C=0.01, dual="auto")),
84+
("classify", LinearSVC(random_state=42, C=0.01)),
8585
]
8686
)
8787

_pst_preview/_downloads/02f111fb3dd79805b161e14c564184fc/plot_sgd_comparison.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@
3535
"name": "python",
3636
"nbconvert_exporter": "python",
3737
"pygments_lexer": "ipython3",
38-
"version": "3.9.18"
38+
"version": "3.9.19"
3939
}
4040
},
4141
"nbformat": 4,

_pst_preview/_downloads/036b9372e2e7802453cbb994da7a6786/plot_linearsvc_support_vectors.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@
2121
plt.figure(figsize=(10, 5))
2222
for i, C in enumerate([1, 100]):
2323
# "hinge" is the standard SVM loss
24-
clf = LinearSVC(C=C, loss="hinge", random_state=42, dual="auto").fit(X, y)
24+
clf = LinearSVC(C=C, loss="hinge", random_state=42).fit(X, y)
2525
# obtain the support vectors through the decision function
2626
decision_function = clf.decision_function(X)
2727
# we can also calculate the decision function manually

_pst_preview/_downloads/0486bf9e537e44cedd2a236d034bcd90/plot_pcr_vs_pls.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -114,7 +114,7 @@
114114
"name": "python",
115115
"nbconvert_exporter": "python",
116116
"pygments_lexer": "ipython3",
117-
"version": "3.9.18"
117+
"version": "3.9.19"
118118
}
119119
},
120120
"nbformat": 4,

_pst_preview/_downloads/055e8313e28f2f3b5fd508054dfe5fe0/plot_roc_crossval.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -78,7 +78,7 @@
7878
"name": "python",
7979
"nbconvert_exporter": "python",
8080
"pygments_lexer": "ipython3",
81-
"version": "3.9.18"
81+
"version": "3.9.19"
8282
}
8383
},
8484
"nbformat": 4,

_pst_preview/_downloads/05ca8a4e90b4cc2acd69f9e24b4a1f3a/plot_classifier_chain_yeast.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -114,7 +114,7 @@
114114
"name": "python",
115115
"nbconvert_exporter": "python",
116116
"pygments_lexer": "ipython3",
117-
"version": "3.9.18"
117+
"version": "3.9.19"
118118
}
119119
},
120120
"nbformat": 4,

_pst_preview/_downloads/061854726c268bcdae5cd1c330cf8c75/plot_sgd_penalties.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@
3535
"name": "python",
3636
"nbconvert_exporter": "python",
3737
"pygments_lexer": "ipython3",
38-
"version": "3.9.18"
38+
"version": "3.9.19"
3939
}
4040
},
4141
"nbformat": 4,

_pst_preview/_downloads/067cd5d39b097d2c49dd98f563dac13a/plot_iterative_imputer_variants_comparison.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@
3535
"name": "python",
3636
"nbconvert_exporter": "python",
3737
"pygments_lexer": "ipython3",
38-
"version": "3.9.18"
38+
"version": "3.9.19"
3939
}
4040
},
4141
"nbformat": 4,

_pst_preview/_downloads/06cfc926acb27652fb2aa5bfc583e7cb/plot_hashing_vs_dict_vectorizer.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -323,7 +323,7 @@
323323
"name": "python",
324324
"nbconvert_exporter": "python",
325325
"pygments_lexer": "ipython3",
326-
"version": "3.9.18"
326+
"version": "3.9.19"
327327
}
328328
},
329329
"nbformat": 4,

_pst_preview/_downloads/06ffeb4f0ded6447302acd5a712f8490/plot_nearest_centroid.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@
3535
"name": "python",
3636
"nbconvert_exporter": "python",
3737
"pygments_lexer": "ipython3",
38-
"version": "3.9.18"
38+
"version": "3.9.19"
3939
}
4040
},
4141
"nbformat": 4,

_pst_preview/_downloads/0785ea6d45bde062e5beedda88131215/plot_release_highlights_1_3_0.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -139,7 +139,7 @@
139139
"name": "python",
140140
"nbconvert_exporter": "python",
141141
"pygments_lexer": "ipython3",
142-
"version": "3.9.18"
142+
"version": "3.9.19"
143143
}
144144
},
145145
"nbformat": 4,

_pst_preview/_downloads/07960f9087d379e9d0da6350d6ee3f41/plot_classification_probability.ipynb

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44
"cell_type": "markdown",
55
"metadata": {},
66
"source": [
7-
"\n# Plot classification probability\n\nPlot the classification probability for different classifiers. We use a 3 class\ndataset, and we classify it with a Support Vector classifier, L1 and L2\npenalized logistic regression with either a One-Vs-Rest or multinomial setting,\nand Gaussian process classification.\n\nLinear SVC is not a probabilistic classifier by default but it has a built-in\ncalibration option enabled in this example (`probability=True`).\n\nThe logistic regression with One-Vs-Rest is not a multiclass classifier out of\nthe box. As a result it has more trouble in separating class 2 and 3 than the\nother estimators.\n"
7+
"\n# Plot classification probability\n\nPlot the classification probability for different classifiers. We use a 3 class\ndataset, and we classify it with a Support Vector classifier, L1 and L2\npenalized logistic regression (multinomial multiclass), a One-Vs-Rest version with\nlogistic regression, and Gaussian process classification.\n\nLinear SVC is not a probabilistic classifier by default but it has a built-in\ncalibration option enabled in this example (`probability=True`).\n\nThe logistic regression with One-Vs-Rest is not a multiclass classifier out of\nthe box. As a result it has more trouble in separating class 2 and 3 than the\nother estimators.\n"
88
]
99
},
1010
{
@@ -15,7 +15,7 @@
1515
},
1616
"outputs": [],
1717
"source": [
18-
"# Author: Alexandre Gramfort <[email protected]>\n# License: BSD 3 clause\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom matplotlib import cm\n\nfrom sklearn import datasets\nfrom sklearn.gaussian_process import GaussianProcessClassifier\nfrom sklearn.gaussian_process.kernels import RBF\nfrom sklearn.inspection import DecisionBoundaryDisplay\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.svm import SVC\n\niris = datasets.load_iris()\nX = iris.data[:, 0:2] # we only take the first two features for visualization\ny = iris.target\n\nn_features = X.shape[1]\n\nC = 10\nkernel = 1.0 * RBF([1.0, 1.0]) # for GPC\n\n# Create different classifiers.\nclassifiers = {\n \"L1 logistic\": LogisticRegression(\n C=C, penalty=\"l1\", solver=\"saga\", multi_class=\"multinomial\", max_iter=10000\n ),\n \"L2 logistic (Multinomial)\": LogisticRegression(\n C=C, penalty=\"l2\", solver=\"saga\", multi_class=\"multinomial\", max_iter=10000\n ),\n \"L2 logistic (OvR)\": LogisticRegression(\n C=C, penalty=\"l2\", solver=\"saga\", multi_class=\"ovr\", max_iter=10000\n ),\n \"Linear SVC\": SVC(kernel=\"linear\", C=C, probability=True, random_state=0),\n \"GPC\": GaussianProcessClassifier(kernel),\n}\n\nn_classifiers = len(classifiers)\n\nfig, axes = plt.subplots(\n nrows=n_classifiers,\n ncols=len(iris.target_names),\n figsize=(3 * 2, n_classifiers * 2),\n)\nfor classifier_idx, (name, classifier) in enumerate(classifiers.items()):\n y_pred = classifier.fit(X, y).predict(X)\n accuracy = accuracy_score(y, y_pred)\n print(f\"Accuracy (train) for {name}: {accuracy:0.1%}\")\n for label in np.unique(y):\n # plot the probability estimate provided by the classifier\n disp = DecisionBoundaryDisplay.from_estimator(\n classifier,\n X,\n response_method=\"predict_proba\",\n class_of_interest=label,\n ax=axes[classifier_idx, label],\n vmin=0,\n vmax=1,\n )\n axes[classifier_idx, label].set_title(f\"Class {label}\")\n # plot data predicted to belong to given class\n mask_y_pred = y_pred == label\n axes[classifier_idx, label].scatter(\n X[mask_y_pred, 0], X[mask_y_pred, 1], marker=\"o\", c=\"w\", edgecolor=\"k\"\n )\n axes[classifier_idx, label].set(xticks=(), yticks=())\n axes[classifier_idx, 0].set_ylabel(name)\n\nax = plt.axes([0.15, 0.04, 0.7, 0.02])\nplt.title(\"Probability\")\n_ = plt.colorbar(\n cm.ScalarMappable(norm=None, cmap=\"viridis\"), cax=ax, orientation=\"horizontal\"\n)\n\nplt.show()"
18+
"# Author: Alexandre Gramfort <[email protected]>\n# License: BSD 3 clause\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom matplotlib import cm\n\nfrom sklearn import datasets\nfrom sklearn.gaussian_process import GaussianProcessClassifier\nfrom sklearn.gaussian_process.kernels import RBF\nfrom sklearn.inspection import DecisionBoundaryDisplay\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.multiclass import OneVsRestClassifier\nfrom sklearn.svm import SVC\n\niris = datasets.load_iris()\nX = iris.data[:, 0:2] # we only take the first two features for visualization\ny = iris.target\n\nn_features = X.shape[1]\n\nC = 10\nkernel = 1.0 * RBF([1.0, 1.0]) # for GPC\n\n# Create different classifiers.\nclassifiers = {\n \"L1 logistic\": LogisticRegression(C=C, penalty=\"l1\", solver=\"saga\", max_iter=10000),\n \"L2 logistic (Multinomial)\": LogisticRegression(\n C=C, penalty=\"l2\", solver=\"saga\", max_iter=10000\n ),\n \"L2 logistic (OvR)\": OneVsRestClassifier(\n LogisticRegression(C=C, penalty=\"l2\", solver=\"saga\", max_iter=10000)\n ),\n \"Linear SVC\": SVC(kernel=\"linear\", C=C, probability=True, random_state=0),\n \"GPC\": GaussianProcessClassifier(kernel),\n}\n\nn_classifiers = len(classifiers)\n\nfig, axes = plt.subplots(\n nrows=n_classifiers,\n ncols=len(iris.target_names),\n figsize=(3 * 2, n_classifiers * 2),\n)\nfor classifier_idx, (name, classifier) in enumerate(classifiers.items()):\n y_pred = classifier.fit(X, y).predict(X)\n accuracy = accuracy_score(y, y_pred)\n print(f\"Accuracy (train) for {name}: {accuracy:0.1%}\")\n for label in np.unique(y):\n # plot the probability estimate provided by the classifier\n disp = DecisionBoundaryDisplay.from_estimator(\n classifier,\n X,\n response_method=\"predict_proba\",\n class_of_interest=label,\n ax=axes[classifier_idx, label],\n vmin=0,\n vmax=1,\n )\n axes[classifier_idx, label].set_title(f\"Class {label}\")\n # plot data predicted to belong to given class\n mask_y_pred = y_pred == label\n axes[classifier_idx, label].scatter(\n X[mask_y_pred, 0], X[mask_y_pred, 1], marker=\"o\", c=\"w\", edgecolor=\"k\"\n )\n axes[classifier_idx, label].set(xticks=(), yticks=())\n axes[classifier_idx, 0].set_ylabel(name)\n\nax = plt.axes([0.15, 0.04, 0.7, 0.02])\nplt.title(\"Probability\")\n_ = plt.colorbar(\n cm.ScalarMappable(norm=None, cmap=\"viridis\"), cax=ax, orientation=\"horizontal\"\n)\n\nplt.show()"
1919
]
2020
}
2121
],
@@ -35,7 +35,7 @@
3535
"name": "python",
3636
"nbconvert_exporter": "python",
3737
"pygments_lexer": "ipython3",
38-
"version": "3.9.18"
38+
"version": "3.9.19"
3939
}
4040
},
4141
"nbformat": 4,
Binary file not shown.

_pst_preview/_downloads/0837676cf643e44f0684e848d0967551/plot_compare_cross_decomposition.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -132,7 +132,7 @@
132132
"name": "python",
133133
"nbconvert_exporter": "python",
134134
"pygments_lexer": "ipython3",
135-
"version": "3.9.18"
135+
"version": "3.9.19"
136136
}
137137
},
138138
"nbformat": 4,

_pst_preview/_downloads/083d8568c199bebbc1a847fc6c917e9e/plot_kernel_approximation.ipynb

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,7 @@
4040
},
4141
"outputs": [],
4242
"source": [
43-
"n_samples = len(digits.data)\ndata = digits.data / 16.0\ndata -= data.mean(axis=0)\n\n# We learn the digits on the first half of the digits\ndata_train, targets_train = (data[: n_samples // 2], digits.target[: n_samples // 2])\n\n\n# Now predict the value of the digit on the second half:\ndata_test, targets_test = (data[n_samples // 2 :], digits.target[n_samples // 2 :])\n# data_test = scaler.transform(data_test)\n\n# Create a classifier: a support vector classifier\nkernel_svm = svm.SVC(gamma=0.2)\nlinear_svm = svm.LinearSVC(dual=\"auto\", random_state=42)\n\n# create pipeline from kernel approximation\n# and linear svm\nfeature_map_fourier = RBFSampler(gamma=0.2, random_state=1)\nfeature_map_nystroem = Nystroem(gamma=0.2, random_state=1)\nfourier_approx_svm = pipeline.Pipeline(\n [\n (\"feature_map\", feature_map_fourier),\n (\"svm\", svm.LinearSVC(dual=\"auto\", random_state=42)),\n ]\n)\n\nnystroem_approx_svm = pipeline.Pipeline(\n [\n (\"feature_map\", feature_map_nystroem),\n (\"svm\", svm.LinearSVC(dual=\"auto\", random_state=42)),\n ]\n)\n\n# fit and predict using linear and kernel svm:\n\nkernel_svm_time = time()\nkernel_svm.fit(data_train, targets_train)\nkernel_svm_score = kernel_svm.score(data_test, targets_test)\nkernel_svm_time = time() - kernel_svm_time\n\nlinear_svm_time = time()\nlinear_svm.fit(data_train, targets_train)\nlinear_svm_score = linear_svm.score(data_test, targets_test)\nlinear_svm_time = time() - linear_svm_time\n\nsample_sizes = 30 * np.arange(1, 10)\nfourier_scores = []\nnystroem_scores = []\nfourier_times = []\nnystroem_times = []\n\nfor D in sample_sizes:\n fourier_approx_svm.set_params(feature_map__n_components=D)\n nystroem_approx_svm.set_params(feature_map__n_components=D)\n start = time()\n nystroem_approx_svm.fit(data_train, targets_train)\n nystroem_times.append(time() - start)\n\n start = time()\n fourier_approx_svm.fit(data_train, targets_train)\n fourier_times.append(time() - start)\n\n fourier_score = fourier_approx_svm.score(data_test, targets_test)\n nystroem_score = nystroem_approx_svm.score(data_test, targets_test)\n nystroem_scores.append(nystroem_score)\n fourier_scores.append(fourier_score)\n\n# plot the results:\nplt.figure(figsize=(16, 4))\naccuracy = plt.subplot(121)\n# second y axis for timings\ntimescale = plt.subplot(122)\n\naccuracy.plot(sample_sizes, nystroem_scores, label=\"Nystroem approx. kernel\")\ntimescale.plot(sample_sizes, nystroem_times, \"--\", label=\"Nystroem approx. kernel\")\n\naccuracy.plot(sample_sizes, fourier_scores, label=\"Fourier approx. kernel\")\ntimescale.plot(sample_sizes, fourier_times, \"--\", label=\"Fourier approx. kernel\")\n\n# horizontal lines for exact rbf and linear kernels:\naccuracy.plot(\n [sample_sizes[0], sample_sizes[-1]],\n [linear_svm_score, linear_svm_score],\n label=\"linear svm\",\n)\ntimescale.plot(\n [sample_sizes[0], sample_sizes[-1]],\n [linear_svm_time, linear_svm_time],\n \"--\",\n label=\"linear svm\",\n)\n\naccuracy.plot(\n [sample_sizes[0], sample_sizes[-1]],\n [kernel_svm_score, kernel_svm_score],\n label=\"rbf svm\",\n)\ntimescale.plot(\n [sample_sizes[0], sample_sizes[-1]],\n [kernel_svm_time, kernel_svm_time],\n \"--\",\n label=\"rbf svm\",\n)\n\n# vertical line for dataset dimensionality = 64\naccuracy.plot([64, 64], [0.7, 1], label=\"n_features\")\n\n# legends and labels\naccuracy.set_title(\"Classification accuracy\")\ntimescale.set_title(\"Training times\")\naccuracy.set_xlim(sample_sizes[0], sample_sizes[-1])\naccuracy.set_xticks(())\naccuracy.set_ylim(np.min(fourier_scores), 1)\ntimescale.set_xlabel(\"Sampling steps = transformed feature dimension\")\naccuracy.set_ylabel(\"Classification accuracy\")\ntimescale.set_ylabel(\"Training time in seconds\")\naccuracy.legend(loc=\"best\")\ntimescale.legend(loc=\"best\")\nplt.tight_layout()\nplt.show()"
43+
"n_samples = len(digits.data)\ndata = digits.data / 16.0\ndata -= data.mean(axis=0)\n\n# We learn the digits on the first half of the digits\ndata_train, targets_train = (data[: n_samples // 2], digits.target[: n_samples // 2])\n\n\n# Now predict the value of the digit on the second half:\ndata_test, targets_test = (data[n_samples // 2 :], digits.target[n_samples // 2 :])\n# data_test = scaler.transform(data_test)\n\n# Create a classifier: a support vector classifier\nkernel_svm = svm.SVC(gamma=0.2)\nlinear_svm = svm.LinearSVC(random_state=42)\n\n# create pipeline from kernel approximation\n# and linear svm\nfeature_map_fourier = RBFSampler(gamma=0.2, random_state=1)\nfeature_map_nystroem = Nystroem(gamma=0.2, random_state=1)\nfourier_approx_svm = pipeline.Pipeline(\n [\n (\"feature_map\", feature_map_fourier),\n (\"svm\", svm.LinearSVC(random_state=42)),\n ]\n)\n\nnystroem_approx_svm = pipeline.Pipeline(\n [\n (\"feature_map\", feature_map_nystroem),\n (\"svm\", svm.LinearSVC(random_state=42)),\n ]\n)\n\n# fit and predict using linear and kernel svm:\n\nkernel_svm_time = time()\nkernel_svm.fit(data_train, targets_train)\nkernel_svm_score = kernel_svm.score(data_test, targets_test)\nkernel_svm_time = time() - kernel_svm_time\n\nlinear_svm_time = time()\nlinear_svm.fit(data_train, targets_train)\nlinear_svm_score = linear_svm.score(data_test, targets_test)\nlinear_svm_time = time() - linear_svm_time\n\nsample_sizes = 30 * np.arange(1, 10)\nfourier_scores = []\nnystroem_scores = []\nfourier_times = []\nnystroem_times = []\n\nfor D in sample_sizes:\n fourier_approx_svm.set_params(feature_map__n_components=D)\n nystroem_approx_svm.set_params(feature_map__n_components=D)\n start = time()\n nystroem_approx_svm.fit(data_train, targets_train)\n nystroem_times.append(time() - start)\n\n start = time()\n fourier_approx_svm.fit(data_train, targets_train)\n fourier_times.append(time() - start)\n\n fourier_score = fourier_approx_svm.score(data_test, targets_test)\n nystroem_score = nystroem_approx_svm.score(data_test, targets_test)\n nystroem_scores.append(nystroem_score)\n fourier_scores.append(fourier_score)\n\n# plot the results:\nplt.figure(figsize=(16, 4))\naccuracy = plt.subplot(121)\n# second y axis for timings\ntimescale = plt.subplot(122)\n\naccuracy.plot(sample_sizes, nystroem_scores, label=\"Nystroem approx. kernel\")\ntimescale.plot(sample_sizes, nystroem_times, \"--\", label=\"Nystroem approx. kernel\")\n\naccuracy.plot(sample_sizes, fourier_scores, label=\"Fourier approx. kernel\")\ntimescale.plot(sample_sizes, fourier_times, \"--\", label=\"Fourier approx. kernel\")\n\n# horizontal lines for exact rbf and linear kernels:\naccuracy.plot(\n [sample_sizes[0], sample_sizes[-1]],\n [linear_svm_score, linear_svm_score],\n label=\"linear svm\",\n)\ntimescale.plot(\n [sample_sizes[0], sample_sizes[-1]],\n [linear_svm_time, linear_svm_time],\n \"--\",\n label=\"linear svm\",\n)\n\naccuracy.plot(\n [sample_sizes[0], sample_sizes[-1]],\n [kernel_svm_score, kernel_svm_score],\n label=\"rbf svm\",\n)\ntimescale.plot(\n [sample_sizes[0], sample_sizes[-1]],\n [kernel_svm_time, kernel_svm_time],\n \"--\",\n label=\"rbf svm\",\n)\n\n# vertical line for dataset dimensionality = 64\naccuracy.plot([64, 64], [0.7, 1], label=\"n_features\")\n\n# legends and labels\naccuracy.set_title(\"Classification accuracy\")\ntimescale.set_title(\"Training times\")\naccuracy.set_xlim(sample_sizes[0], sample_sizes[-1])\naccuracy.set_xticks(())\naccuracy.set_ylim(np.min(fourier_scores), 1)\ntimescale.set_xlabel(\"Sampling steps = transformed feature dimension\")\naccuracy.set_ylabel(\"Classification accuracy\")\ntimescale.set_ylabel(\"Training time in seconds\")\naccuracy.legend(loc=\"best\")\ntimescale.legend(loc=\"best\")\nplt.tight_layout()\nplt.show()"
4444
]
4545
},
4646
{
@@ -78,7 +78,7 @@
7878
"name": "python",
7979
"nbconvert_exporter": "python",
8080
"pygments_lexer": "ipython3",
81-
"version": "3.9.18"
81+
"version": "3.9.19"
8282
}
8383
},
8484
"nbformat": 4,

_pst_preview/_downloads/09c15b8ca914c1951a06a9ce3431460f/plot_ols_ridge_variance.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@
3535
"name": "python",
3636
"nbconvert_exporter": "python",
3737
"pygments_lexer": "ipython3",
38-
"version": "3.9.18"
38+
"version": "3.9.19"
3939
}
4040
},
4141
"nbformat": 4,

_pst_preview/_downloads/0a90f2b8e2dadb7d37ca67b3f7adb656/plot_gradient_boosting_regularization.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@
3535
"name": "python",
3636
"nbconvert_exporter": "python",
3737
"pygments_lexer": "ipython3",
38-
"version": "3.9.18"
38+
"version": "3.9.19"
3939
}
4040
},
4141
"nbformat": 4,

_pst_preview/_downloads/0aadb4e0dc9f402704c8a56152f01083/plot_lasso_dense_vs_sparse_data.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -71,7 +71,7 @@
7171
"name": "python",
7272
"nbconvert_exporter": "python",
7373
"pygments_lexer": "ipython3",
74-
"version": "3.9.18"
74+
"version": "3.9.19"
7575
}
7676
},
7777
"nbformat": 4,

_pst_preview/_downloads/0af0092c704518874f82d38d725bb97f/plot_dict_face_patches.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -78,7 +78,7 @@
7878
"name": "python",
7979
"nbconvert_exporter": "python",
8080
"pygments_lexer": "ipython3",
81-
"version": "3.9.18"
81+
"version": "3.9.19"
8282
}
8383
},
8484
"nbformat": 4,

_pst_preview/_downloads/0b39f715b5e32f01df3d212b6d822b82/plot_calibration.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,7 @@
2222
Brier score.
2323
2424
"""
25+
2526
# Authors:
2627
# Mathieu Blondel <[email protected]>
2728
# Alexandre Gramfort <[email protected]>

_pst_preview/_downloads/0b601219a14824c971bbf8bb797e8973/plot_logistic_path.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -89,7 +89,7 @@
8989
"name": "python",
9090
"nbconvert_exporter": "python",
9191
"pygments_lexer": "ipython3",
92-
"version": "3.9.18"
92+
"version": "3.9.19"
9393
}
9494
},
9595
"nbformat": 4,

_pst_preview/_downloads/0c15970ac17183d2bf864a9563081aeb/plot_calibration.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -89,7 +89,7 @@
8989
"name": "python",
9090
"nbconvert_exporter": "python",
9191
"pygments_lexer": "ipython3",
92-
"version": "3.9.18"
92+
"version": "3.9.19"
9393
}
9494
},
9595
"nbformat": 4,

_pst_preview/_downloads/0c988b0c2bea0040fec13fe1055db95c/plot_pca_vs_fa_model_selection.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -71,7 +71,7 @@
7171
"name": "python",
7272
"nbconvert_exporter": "python",
7373
"pygments_lexer": "ipython3",
74-
"version": "3.9.18"
74+
"version": "3.9.19"
7575
}
7676
},
7777
"nbformat": 4,

0 commit comments

Comments
 (0)