|
24 | 24 | },
|
25 | 25 | "outputs": [
|
26 | 26 | {
|
27 |
| - "output_type": "stream", |
28 | 27 | "name": "stdout",
|
29 |
| - "text": "Note: you may need to restart the kernel to use updated packages.\n" |
| 28 | + "output_type": "stream", |
| 29 | + "text": [ |
| 30 | + "Note: you may need to restart the kernel to use updated packages.\n" |
| 31 | + ] |
30 | 32 | }
|
31 | 33 | ],
|
32 | 34 | "source": [
|
|
48 | 50 | },
|
49 | 51 | "outputs": [
|
50 | 52 | {
|
51 |
| - "output_type": "stream", |
52 | 53 | "name": "stdout",
|
53 |
| - "text": "MONAI version: 0.2.0\nPython version: 3.7.5 (default, Nov 7 2019, 10:50:52) [GCC 8.3.0]\nNumpy version: 1.19.1\nPytorch version: 1.6.0\n\nOptional dependencies:\nPytorch Ignite version: 0.3.0\nNibabel version: NOT INSTALLED or UNKNOWN VERSION.\nscikit-image version: NOT INSTALLED or UNKNOWN VERSION.\nPillow version: NOT INSTALLED or UNKNOWN VERSION.\nTensorboard version: NOT INSTALLED or UNKNOWN VERSION.\n\nFor details about installing the optional dependencies, please visit:\n https://docs.monai.io/en/latest/installation.html#installing-the-recommended-dependencies\n\n" |
| 54 | + "output_type": "stream", |
| 55 | + "text": [ |
| 56 | + "MONAI version: 0.2.0\n", |
| 57 | + "Python version: 3.7.5 (default, Nov 7 2019, 10:50:52) [GCC 8.3.0]\n", |
| 58 | + "Numpy version: 1.19.1\n", |
| 59 | + "Pytorch version: 1.6.0\n", |
| 60 | + "\n", |
| 61 | + "Optional dependencies:\n", |
| 62 | + "Pytorch Ignite version: 0.3.0\n", |
| 63 | + "Nibabel version: NOT INSTALLED or UNKNOWN VERSION.\n", |
| 64 | + "scikit-image version: NOT INSTALLED or UNKNOWN VERSION.\n", |
| 65 | + "Pillow version: NOT INSTALLED or UNKNOWN VERSION.\n", |
| 66 | + "Tensorboard version: NOT INSTALLED or UNKNOWN VERSION.\n", |
| 67 | + "\n", |
| 68 | + "For details about installing the optional dependencies, please visit:\n", |
| 69 | + " https://docs.monai.io/en/latest/installation.html#installing-the-recommended-dependencies\n", |
| 70 | + "\n" |
| 71 | + ] |
54 | 72 | }
|
55 | 73 | ],
|
56 | 74 | "source": [
|
|
110 | 128 | },
|
111 | 129 | {
|
112 | 130 | "cell_type": "markdown",
|
| 131 | + "metadata": {}, |
113 | 132 | "source": [
|
114 | 133 | "### 1 GPU"
|
115 |
| - ], |
116 |
| - "metadata": { |
117 |
| - "collapsed": false |
118 |
| - } |
| 134 | + ] |
119 | 135 | },
|
120 | 136 | {
|
121 | 137 | "cell_type": "code",
|
122 |
| - "source": [ |
123 |
| - "opt = torch.optim.Adam(net.parameters(), lr)\n", |
124 |
| - "trainer = create_multigpu_supervised_trainer(net, opt, fake_loss, [torch.device(\"cuda:0\")])\n", |
125 |
| - "trainer.run(fake_data_stream(), 2, 2)\n" |
126 |
| - ], |
| 138 | + "execution_count": 4, |
127 | 139 | "metadata": {
|
128 |
| - "collapsed": false, |
129 | 140 | "pycharm": {
|
130 | 141 | "name": "#%%\n"
|
131 | 142 | }
|
132 | 143 | },
|
133 |
| - "execution_count": 4, |
134 | 144 | "outputs": [
|
135 | 145 | {
|
136 |
| - "output_type": "execute_result", |
137 | 146 | "data": {
|
138 |
| - "text/plain": "State:\n\titeration: 4\n\tepoch: 2\n\tepoch_length: 2\n\tmax_epochs: 2\n\toutput: 40707.8984375\n\tbatch: <class 'tuple'>\n\tmetrics: <class 'dict'>\n\tdataloader: <class 'generator'>\n\tseed: 12" |
| 147 | + "text/plain": [ |
| 148 | + "State:\n", |
| 149 | + "\titeration: 4\n", |
| 150 | + "\tepoch: 2\n", |
| 151 | + "\tepoch_length: 2\n", |
| 152 | + "\tmax_epochs: 2\n", |
| 153 | + "\toutput: 40707.8984375\n", |
| 154 | + "\tbatch: <class 'tuple'>\n", |
| 155 | + "\tmetrics: <class 'dict'>\n", |
| 156 | + "\tdataloader: <class 'generator'>\n", |
| 157 | + "\tseed: 12" |
| 158 | + ] |
139 | 159 | },
|
| 160 | + "execution_count": 4, |
140 | 161 | "metadata": {},
|
141 |
| - "execution_count": 4 |
| 162 | + "output_type": "execute_result" |
142 | 163 | }
|
| 164 | + ], |
| 165 | + "source": [ |
| 166 | + "opt = torch.optim.Adam(net.parameters(), lr)\n", |
| 167 | + "trainer = create_multigpu_supervised_trainer(net, opt, fake_loss, [torch.device(\"cuda:0\")])\n", |
| 168 | + "trainer.run(fake_data_stream(), 2, 2)\n" |
143 | 169 | ]
|
144 | 170 | },
|
145 | 171 | {
|
146 | 172 | "cell_type": "markdown",
|
| 173 | + "metadata": {}, |
147 | 174 | "source": [
|
148 | 175 | "### all GPUs"
|
149 |
| - ], |
150 |
| - "metadata": { |
151 |
| - "collapsed": false |
152 |
| - } |
| 176 | + ] |
153 | 177 | },
|
154 | 178 | {
|
155 | 179 | "cell_type": "code",
|
156 |
| - "source": [ |
157 |
| - "opt = torch.optim.Adam(net.parameters(), lr)\n", |
158 |
| - "trainer = create_multigpu_supervised_trainer(net, opt, fake_loss, None)\n", |
159 |
| - "trainer.run(fake_data_stream(), 2, 2)\n" |
160 |
| - ], |
| 180 | + "execution_count": 5, |
161 | 181 | "metadata": {
|
162 |
| - "collapsed": false, |
163 | 182 | "pycharm": {
|
164 | 183 | "name": "#%%\n"
|
165 | 184 | },
|
166 | 185 | "tags": []
|
167 | 186 | },
|
168 |
| - "execution_count": 5, |
169 | 187 | "outputs": [
|
170 | 188 | {
|
171 |
| - "output_type": "execute_result", |
172 | 189 | "data": {
|
173 |
| - "text/plain": "State:\n\titeration: 4\n\tepoch: 2\n\tepoch_length: 2\n\tmax_epochs: 2\n\toutput: 35669.37109375\n\tbatch: <class 'tuple'>\n\tmetrics: <class 'dict'>\n\tdataloader: <class 'generator'>\n\tseed: 12" |
| 190 | + "text/plain": [ |
| 191 | + "State:\n", |
| 192 | + "\titeration: 4\n", |
| 193 | + "\tepoch: 2\n", |
| 194 | + "\tepoch_length: 2\n", |
| 195 | + "\tmax_epochs: 2\n", |
| 196 | + "\toutput: 35669.37109375\n", |
| 197 | + "\tbatch: <class 'tuple'>\n", |
| 198 | + "\tmetrics: <class 'dict'>\n", |
| 199 | + "\tdataloader: <class 'generator'>\n", |
| 200 | + "\tseed: 12" |
| 201 | + ] |
174 | 202 | },
|
| 203 | + "execution_count": 5, |
175 | 204 | "metadata": {},
|
176 |
| - "execution_count": 5 |
| 205 | + "output_type": "execute_result" |
177 | 206 | }
|
| 207 | + ], |
| 208 | + "source": [ |
| 209 | + "opt = torch.optim.Adam(net.parameters(), lr)\n", |
| 210 | + "trainer = create_multigpu_supervised_trainer(net, opt, fake_loss, None)\n", |
| 211 | + "trainer.run(fake_data_stream(), 2, 2)\n" |
178 | 212 | ]
|
179 | 213 | },
|
180 | 214 | {
|
181 | 215 | "cell_type": "markdown",
|
| 216 | + "metadata": {}, |
182 | 217 | "source": [
|
183 | 218 | "### CPU"
|
184 |
| - ], |
185 |
| - "metadata": { |
186 |
| - "collapsed": false |
187 |
| - } |
| 219 | + ] |
188 | 220 | },
|
189 | 221 | {
|
190 | 222 | "cell_type": "code",
|
191 |
| - "source": [ |
192 |
| - "opt = torch.optim.Adam(net.parameters(), lr)\n", |
193 |
| - "trainer = create_multigpu_supervised_trainer(net, opt, fake_loss, [])\n", |
194 |
| - "trainer.run(fake_data_stream(), 2, 2)" |
195 |
| - ], |
| 223 | + "execution_count": 6, |
196 | 224 | "metadata": {
|
197 |
| - "collapsed": false, |
198 | 225 | "pycharm": {
|
199 | 226 | "name": "#%%\n"
|
200 | 227 | }
|
201 | 228 | },
|
202 |
| - "execution_count": 6, |
203 | 229 | "outputs": [
|
204 | 230 | {
|
205 |
| - "output_type": "execute_result", |
206 | 231 | "data": {
|
207 |
| - "text/plain": "State:\n\titeration: 4\n\tepoch: 2\n\tepoch_length: 2\n\tmax_epochs: 2\n\toutput: 29662.359375\n\tbatch: <class 'tuple'>\n\tmetrics: <class 'dict'>\n\tdataloader: <class 'generator'>\n\tseed: 12" |
| 232 | + "text/plain": [ |
| 233 | + "State:\n", |
| 234 | + "\titeration: 4\n", |
| 235 | + "\tepoch: 2\n", |
| 236 | + "\tepoch_length: 2\n", |
| 237 | + "\tmax_epochs: 2\n", |
| 238 | + "\toutput: 29662.359375\n", |
| 239 | + "\tbatch: <class 'tuple'>\n", |
| 240 | + "\tmetrics: <class 'dict'>\n", |
| 241 | + "\tdataloader: <class 'generator'>\n", |
| 242 | + "\tseed: 12" |
| 243 | + ] |
208 | 244 | },
|
| 245 | + "execution_count": 6, |
209 | 246 | "metadata": {},
|
210 |
| - "execution_count": 6 |
| 247 | + "output_type": "execute_result" |
211 | 248 | }
|
| 249 | + ], |
| 250 | + "source": [ |
| 251 | + "opt = torch.optim.Adam(net.parameters(), lr)\n", |
| 252 | + "trainer = create_multigpu_supervised_trainer(net, opt, fake_loss, [])\n", |
| 253 | + "trainer.run(fake_data_stream(), 2, 2)" |
212 | 254 | ]
|
213 | 255 | }
|
214 | 256 | ],
|
|
0 commit comments