Skip to content

Commit aa2b03a

Browse files
authored
[BE] Make coding_ddpg.py runnable outside of Colab env
By setting spawn method to fork Also, add missing parenthesis
1 parent 1b011fb commit aa2b03a

File tree

1 file changed

+12
-6
lines changed

1 file changed

+12
-6
lines changed

advanced_source/coding_ddpg.py

Lines changed: 12 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -63,16 +63,22 @@
6363
# %%bash
6464
# pip3 install torchrl mujoco glfw
6565

66-
import torchrl
67-
import torch
68-
import tqdm
69-
from typing import Tuple
70-
7166
# sphinx_gallery_start_ignore
7267
import warnings
7368
warnings.filterwarnings("ignore")
69+
import multiprocessing
70+
# TorchRL prefers spawn method, that restricts creation of ParallelEnv inside
71+
# `__main__` method call, but for the easy of reading the code switch to fork
72+
# which is also a default spawn method in Google's Colaboratory
73+
multiprocessing.set_start_method("fork")
7474
# sphinx_gallery_end_ignore
7575

76+
77+
import torchrl
78+
import torch
79+
import tqdm
80+
from typing import Tuple
81+
7682
###############################################################################
7783
# We will execute the policy on CUDA if available
7884
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
@@ -1219,6 +1225,6 @@ def ceil_div(x, y):
12191225
#
12201226
# To iterate further on this loss module we might consider:
12211227
#
1222-
# - Using `@dispatch` (see `[Feature] Distpatch IQL loss module <https://github.com/pytorch/rl/pull/1230>`_.
1228+
# - Using `@dispatch` (see `[Feature] Distpatch IQL loss module <https://github.com/pytorch/rl/pull/1230>`_.)
12231229
# - Allowing flexible TensorDict keys.
12241230
#

0 commit comments

Comments
 (0)