@@ -128,10 +128,8 @@ static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector)
128
128
static bool __send_ipi_mask (const struct cpumask * mask , int vector )
129
129
{
130
130
int cur_cpu , vcpu ;
131
- struct ipi_arg_non_ex * * arg ;
132
- struct ipi_arg_non_ex * ipi_arg ;
131
+ struct ipi_arg_non_ex ipi_arg ;
133
132
int ret = 1 ;
134
- unsigned long flags ;
135
133
136
134
if (cpumask_empty (mask ))
137
135
return true;
@@ -145,16 +143,8 @@ static bool __send_ipi_mask(const struct cpumask *mask, int vector)
145
143
if ((ms_hyperv .hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED ))
146
144
return __send_ipi_mask_ex (mask , vector );
147
145
148
- local_irq_save (flags );
149
- arg = (struct ipi_arg_non_ex * * )this_cpu_ptr (hyperv_pcpu_input_arg );
150
-
151
- ipi_arg = * arg ;
152
- if (unlikely (!ipi_arg ))
153
- goto ipi_mask_done ;
154
-
155
- ipi_arg -> vector = vector ;
156
- ipi_arg -> reserved = 0 ;
157
- ipi_arg -> cpu_mask = 0 ;
146
+ ipi_arg .vector = vector ;
147
+ ipi_arg .cpu_mask = 0 ;
158
148
159
149
for_each_cpu (cur_cpu , mask ) {
160
150
vcpu = hv_cpu_number_to_vp_number (cur_cpu );
@@ -165,13 +155,13 @@ static bool __send_ipi_mask(const struct cpumask *mask, int vector)
165
155
if (vcpu >= 64 )
166
156
goto ipi_mask_done ;
167
157
168
- __set_bit (vcpu , (unsigned long * )& ipi_arg -> cpu_mask );
158
+ __set_bit (vcpu , (unsigned long * )& ipi_arg . cpu_mask );
169
159
}
170
160
171
- ret = hv_do_hypercall (HVCALL_SEND_IPI , ipi_arg , NULL );
161
+ ret = hv_do_fast_hypercall16 (HVCALL_SEND_IPI , ipi_arg .vector ,
162
+ ipi_arg .cpu_mask );
172
163
173
164
ipi_mask_done :
174
- local_irq_restore (flags );
175
165
return ((ret == 0 ) ? true : false);
176
166
}
177
167
0 commit comments