@@ -323,7 +323,7 @@ def transform_points_ndc(
323
323
return world_to_ndc_transform .transform_points (points , eps = eps )
324
324
325
325
def transform_points_screen (
326
- self , points , eps : Optional [float ] = None , ** kwargs
326
+ self , points , eps : Optional [float ] = None , with_xyflip : bool = True , ** kwargs
327
327
) -> torch .Tensor :
328
328
"""
329
329
Transforms points from PyTorch3D world/camera space to screen space.
@@ -341,14 +341,19 @@ def transform_points_screen(
341
341
stabilizes gradients since it leads to avoiding division
342
342
by excessively low numbers for points close to the
343
343
camera plane.
344
+ with_xyflip: If True, flip x and y directions. In world/camera/ndc coords,
345
+ +x points to the left and +y up. If with_xyflip is true, in screen
346
+ coords +x points right, and +y down, following the usual RGB image
347
+ convention. Warning: do not set to False unless you know what you're
348
+ doing!
344
349
345
350
Returns
346
351
new_points: transformed points with the same shape as the input.
347
352
"""
348
353
points_ndc = self .transform_points_ndc (points , eps = eps , ** kwargs )
349
354
image_size = kwargs .get ("image_size" , self .get_image_size ())
350
355
return get_ndc_to_screen_transform (
351
- self , with_xyflip = True , image_size = image_size
356
+ self , with_xyflip = with_xyflip , image_size = image_size
352
357
).transform_points (points_ndc , eps = eps )
353
358
354
359
def clone (self ):
0 commit comments