Skip to content

Commit 0b8f593

Browse files
amirafzalifacebook-github-bot
authored andcommitted
include pytest-asyncio and timeout (#11)
Summary: Pull Request resolved: #11 Differential Revision: D75028810
1 parent 6c8a85f commit 0b8f593

File tree

4 files changed

+18
-2
lines changed

4 files changed

+18
-2
lines changed

.github/workflows/test.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,7 @@ jobs:
5555
pip install pyzmq requests numpy pyre-extensions
5656
5757
# Test dependencies
58-
pip install pytest cloudpickle
58+
pip install pytest cloudpickle pytest-timeout pytest-asyncio
5959
6060
# Build and install monarch
6161
python setup.py install

README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ pip install setuptools-rust
3131
# install torch, can use conda or build it yourself or whatever
3232
pip install torch
3333
# install other deps, see pyproject.toml for latest
34-
pip install pyzmq requests numpy pyre-extensions pytest-timeout cloudpickle
34+
pip install pyzmq requests numpy pyre-extensions pytest-timeout cloudpickle pytest-asyncio
3535

3636
# install the package
3737
python setup.py install

python/tests/_monarch/test_hyperactor.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
# pyre-strict
2+
# pyre-ignore-all-errors[56]
23

34
import multiprocessing
45
import os
@@ -69,12 +70,14 @@ def test_fn() -> None:
6970
assert code == signal.SIGTERM, code
7071

7172

73+
@pytest.mark.asyncio
7274
async def test_allocator() -> None:
7375
spec = AllocSpec(AllocConstraints(), replica=2)
7476
allocator = monarch.LocalAllocator()
7577
_ = await allocator.allocate(spec)
7678

7779

80+
@pytest.mark.asyncio
7881
async def test_proc_mesh() -> None:
7982
spec = AllocSpec(AllocConstraints(), replica=2)
8083
allocator = monarch.LocalAllocator()
@@ -83,6 +86,7 @@ async def test_proc_mesh() -> None:
8386
assert str(proc_mesh) == "<ProcMesh { shape: {replica=2} }>"
8487

8588

89+
@pytest.mark.asyncio
8690
async def test_actor_mesh() -> None:
8791
spec = AllocSpec(AllocConstraints(), replica=2)
8892
allocator = monarch.LocalAllocator()

python/tests/test_python_actors.py

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,8 @@
11
import operator
22
from types import ModuleType
33

4+
import pytest
5+
46
import torch
57

68
from monarch.proc_mesh import local_proc_mesh, proc_mesh
@@ -54,6 +56,7 @@ async def get_grad_buffer(self) -> torch.Tensor:
5456
return self.grad_buffer
5557

5658

59+
@pytest.mark.asyncio
5760
async def test_choose():
5861
proc = await local_proc_mesh(gpus=2)
5962
v = await proc.spawn("counter", Counter, 3)
@@ -65,6 +68,7 @@ async def test_choose():
6568
assert result == result2
6669

6770

71+
@pytest.mark.asyncio
6872
async def test_stream():
6973
proc = await local_proc_mesh(gpus=2)
7074
v = await proc.spawn("counter2", Counter, 3)
@@ -94,6 +98,7 @@ async def get_buffer(self):
9498
return self.buffer
9599

96100

101+
@pytest.mark.asyncio
97102
async def test_proc_mesh_rdma():
98103
proc = await proc_mesh(gpus=1)
99104
server = await proc.spawn("server", ParameterServer)
@@ -156,6 +161,7 @@ async def get(self, to: To):
156161
return [x async for x in to.whoami.stream()]
157162

158163

164+
@pytest.mark.asyncio
159165
async def test_mesh_passed_to_mesh():
160166
proc = await local_proc_mesh(gpus=2)
161167
f = await proc.spawn("from", From)
@@ -165,6 +171,7 @@ async def test_mesh_passed_to_mesh():
165171
assert all[0] != all[1]
166172

167173

174+
@pytest.mark.asyncio
168175
async def test_mesh_passed_to_mesh_on_different_proc_mesh():
169176
proc = await local_proc_mesh(gpus=2)
170177
proc2 = await local_proc_mesh(gpus=2)
@@ -175,6 +182,7 @@ async def test_mesh_passed_to_mesh_on_different_proc_mesh():
175182
assert all[0] != all[1]
176183

177184

185+
@pytest.mark.asyncio
178186
async def test_actor_slicing():
179187
proc = await local_proc_mesh(gpus=2)
180188
proc2 = await local_proc_mesh(gpus=2)
@@ -190,6 +198,7 @@ async def test_actor_slicing():
190198
assert result[0] == result[1]
191199

192200

201+
@pytest.mark.asyncio
193202
async def test_aggregate():
194203
proc = await local_proc_mesh(gpus=2)
195204
counter = await proc.spawn("counter", Counter, 1)
@@ -205,6 +214,7 @@ async def run(self, fn):
205214
return fn()
206215

207216

217+
@pytest.mark.asyncio
208218
async def test_rank_size():
209219
proc = await local_proc_mesh(gpus=2)
210220
r = await proc.spawn("runit", RunIt)
@@ -262,6 +272,7 @@ async def update_weights(self):
262272
), f"{torch.sum(self.generator.weight.data)=}, {self.step=}"
263273

264274

275+
@pytest.mark.asyncio
265276
async def test_gpu_trainer_generator():
266277
trainer_proc = await proc_mesh(gpus=1)
267278
gen_proc = await proc_mesh(gpus=1)
@@ -283,6 +294,7 @@ def sync_endpoint(self, a_counter: Counter):
283294
return a_counter.value.choose().get()
284295

285296

297+
@pytest.mark.asyncio
286298
async def test_sync_actor():
287299
proc = await local_proc_mesh(gpus=2)
288300
a = await proc.spawn("actor", SyncActor)

0 commit comments

Comments
 (0)