@@ -92,6 +92,7 @@ def window_partition(hidden_states, window_size):
92
92
# Adapted from https://github.com/LAION-AI/CLAP/blob/6ad05a971ba0622f6acee8c41993e0d02bbed639/src/open_clip/htsat.py#L263
93
93
def window_reverse (windows , window_size , height , width ):
94
94
"""
95
+ Merges windows to produce higher resolution features.
95
96
Args:
96
97
windows (`torch.FloatTensor` of shape `(num_windows * batch_size, window_size, window_size, num_channels)`):
97
98
Input windows
@@ -102,11 +103,10 @@ def window_reverse(windows, window_size, height, width):
102
103
width (`int`):
103
104
Width of the resized audio
104
105
"""
105
- batch_size = int (windows .shape [0 ] / (height * width / window_size / window_size ))
106
-
107
- hidden_states = windows .view (batch_size , height // window_size , width // window_size , window_size , window_size , - 1 )
108
- hidden_states = hidden_states .permute (0 , 1 , 3 , 2 , 4 , 5 ).contiguous ().view (batch_size , height , width , - 1 )
109
- return hidden_states
106
+ num_channels = windows .shape [- 1 ]
107
+ windows = windows .view (- 1 , height // window_size , width // window_size , window_size , window_size , num_channels )
108
+ windows = windows .permute (0 , 1 , 3 , 2 , 4 , 5 ).contiguous ().view (- 1 , height , width , num_channels )
109
+ return windows
110
110
111
111
112
112
# Copied from transformers.models.roberta.modeling_roberta.create_position_ids_from_input_ids
0 commit comments