Skip to content

Commit 8b6a877

Browse files
andrea-parriliuw
authored andcommitted
Drivers: hv: vmbus: Replace the per-CPU channel lists with a global array of channels
When Hyper-V sends an interrupt to the guest, the guest has to figure out which channel the interrupt is associated with. Hyper-V sets a bit in a memory page that is shared with the guest, indicating a particular "relid" that the interrupt is associated with. The current Linux code then uses a set of per-CPU linked lists to map a given "relid" to a pointer to a channel structure. This design introduces a synchronization problem if the CPU that Hyper-V will interrupt for a certain channel is changed. If the interrupt comes on the "old CPU" and the channel was already moved to the per-CPU list of the "new CPU", then the relid -> channel mapping will fail and the interrupt is dropped. Similarly, if the interrupt comes on the new CPU but the channel was not moved to the per-CPU list of the new CPU, then the mapping will fail and the interrupt is dropped. Relids are integers ranging from 0 to 2047. The mapping from relids to channel structures can be done by setting up an array with 2048 entries, each entry being a pointer to a channel structure (hence total size ~16K bytes, which is not a problem). The array is global, so there are no per-CPU linked lists to update. The array can be searched and updated by loading from/storing to the array at the specified index. With no per-CPU data structures, the above mentioned synchronization problem is avoided and the relid2channel() function gets simpler. Suggested-by: Michael Kelley <[email protected]> Signed-off-by: Andrea Parri (Microsoft) <[email protected]> Link: https://lore.kernel.org/r/[email protected] Reviewed-by: Michael Kelley <[email protected]> Signed-off-by: Wei Liu <[email protected]>
1 parent b9fa1b8 commit 8b6a877

File tree

6 files changed

+160
-126
lines changed

6 files changed

+160
-126
lines changed

drivers/hv/channel_mgmt.c

Lines changed: 112 additions & 67 deletions
Original file line numberDiff line numberDiff line change
@@ -319,7 +319,6 @@ static struct vmbus_channel *alloc_channel(void)
319319
init_completion(&channel->rescind_event);
320320

321321
INIT_LIST_HEAD(&channel->sc_list);
322-
INIT_LIST_HEAD(&channel->percpu_list);
323322

324323
tasklet_init(&channel->callback_event,
325324
vmbus_on_event, (unsigned long)channel);
@@ -340,23 +339,49 @@ static void free_channel(struct vmbus_channel *channel)
340339
kobject_put(&channel->kobj);
341340
}
342341

343-
static void percpu_channel_enq(void *arg)
342+
void vmbus_channel_map_relid(struct vmbus_channel *channel)
344343
{
345-
struct vmbus_channel *channel = arg;
346-
struct hv_per_cpu_context *hv_cpu
347-
= this_cpu_ptr(hv_context.cpu_context);
348-
349-
list_add_tail_rcu(&channel->percpu_list, &hv_cpu->chan_list);
344+
if (WARN_ON(channel->offermsg.child_relid >= MAX_CHANNEL_RELIDS))
345+
return;
346+
/*
347+
* The mapping of the channel's relid is visible from the CPUs that
348+
* execute vmbus_chan_sched() by the time that vmbus_chan_sched() will
349+
* execute:
350+
*
351+
* (a) In the "normal (i.e., not resuming from hibernation)" path,
352+
* the full barrier in smp_store_mb() guarantees that the store
353+
* is propagated to all CPUs before the add_channel_work work
354+
* is queued. In turn, add_channel_work is queued before the
355+
* channel's ring buffer is allocated/initialized and the
356+
* OPENCHANNEL message for the channel is sent in vmbus_open().
357+
* Hyper-V won't start sending the interrupts for the channel
358+
* before the OPENCHANNEL message is acked. The memory barrier
359+
* in vmbus_chan_sched() -> sync_test_and_clear_bit() ensures
360+
* that vmbus_chan_sched() must find the channel's relid in
361+
* recv_int_page before retrieving the channel pointer from the
362+
* array of channels.
363+
*
364+
* (b) In the "resuming from hibernation" path, the smp_store_mb()
365+
* guarantees that the store is propagated to all CPUs before
366+
* the VMBus connection is marked as ready for the resume event
367+
* (cf. check_ready_for_resume_event()). The interrupt handler
368+
* of the VMBus driver and vmbus_chan_sched() can not run before
369+
* vmbus_bus_resume() has completed execution (cf. resume_noirq).
370+
*/
371+
smp_store_mb(
372+
vmbus_connection.channels[channel->offermsg.child_relid],
373+
channel);
350374
}
351375

352-
static void percpu_channel_deq(void *arg)
376+
void vmbus_channel_unmap_relid(struct vmbus_channel *channel)
353377
{
354-
struct vmbus_channel *channel = arg;
355-
356-
list_del_rcu(&channel->percpu_list);
378+
if (WARN_ON(channel->offermsg.child_relid >= MAX_CHANNEL_RELIDS))
379+
return;
380+
WRITE_ONCE(
381+
vmbus_connection.channels[channel->offermsg.child_relid],
382+
NULL);
357383
}
358384

359-
360385
static void vmbus_release_relid(u32 relid)
361386
{
362387
struct vmbus_channel_relid_released msg;
@@ -376,17 +401,25 @@ void hv_process_channel_removal(struct vmbus_channel *channel)
376401
struct vmbus_channel *primary_channel;
377402
unsigned long flags;
378403

379-
BUG_ON(!mutex_is_locked(&vmbus_connection.channel_mutex));
404+
lockdep_assert_held(&vmbus_connection.channel_mutex);
380405
BUG_ON(!channel->rescind);
381406

382-
if (channel->target_cpu != get_cpu()) {
383-
put_cpu();
384-
smp_call_function_single(channel->target_cpu,
385-
percpu_channel_deq, channel, true);
386-
} else {
387-
percpu_channel_deq(channel);
388-
put_cpu();
389-
}
407+
/*
408+
* hv_process_channel_removal() could find INVALID_RELID only for
409+
* hv_sock channels. See the inline comments in vmbus_onoffer().
410+
*/
411+
WARN_ON(channel->offermsg.child_relid == INVALID_RELID &&
412+
!is_hvsock_channel(channel));
413+
414+
/*
415+
* Upon suspend, an in-use hv_sock channel is removed from the array of
416+
* channels and the relid is invalidated. After hibernation, when the
417+
* user-space appplication destroys the channel, it's unnecessary and
418+
* unsafe to remove the channel from the array of channels. See also
419+
* the inline comments before the call of vmbus_release_relid() below.
420+
*/
421+
if (channel->offermsg.child_relid != INVALID_RELID)
422+
vmbus_channel_unmap_relid(channel);
390423

391424
if (channel->primary_channel == NULL) {
392425
list_del(&channel->listentry);
@@ -447,16 +480,6 @@ static void vmbus_add_channel_work(struct work_struct *work)
447480

448481
init_vp_index(newchannel, dev_type);
449482

450-
if (newchannel->target_cpu != get_cpu()) {
451-
put_cpu();
452-
smp_call_function_single(newchannel->target_cpu,
453-
percpu_channel_enq,
454-
newchannel, true);
455-
} else {
456-
percpu_channel_enq(newchannel);
457-
put_cpu();
458-
}
459-
460483
/*
461484
* This state is used to indicate a successful open
462485
* so that when we do close the channel normally, we
@@ -523,17 +546,10 @@ static void vmbus_add_channel_work(struct work_struct *work)
523546
spin_unlock_irqrestore(&primary_channel->lock, flags);
524547
}
525548

526-
mutex_unlock(&vmbus_connection.channel_mutex);
549+
/* vmbus_process_offer() has mapped the channel. */
550+
vmbus_channel_unmap_relid(newchannel);
527551

528-
if (newchannel->target_cpu != get_cpu()) {
529-
put_cpu();
530-
smp_call_function_single(newchannel->target_cpu,
531-
percpu_channel_deq,
532-
newchannel, true);
533-
} else {
534-
percpu_channel_deq(newchannel);
535-
put_cpu();
536-
}
552+
mutex_unlock(&vmbus_connection.channel_mutex);
537553

538554
vmbus_release_relid(newchannel->offermsg.child_relid);
539555

@@ -599,6 +615,8 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
599615
spin_unlock_irqrestore(&channel->lock, flags);
600616
}
601617

618+
vmbus_channel_map_relid(newchannel);
619+
602620
mutex_unlock(&vmbus_connection.channel_mutex);
603621

604622
/*
@@ -940,45 +958,72 @@ static void vmbus_onoffer(struct vmbus_channel_message_header *hdr)
940958
oldchannel = find_primary_channel_by_offer(offer);
941959

942960
if (oldchannel != NULL) {
943-
atomic_dec(&vmbus_connection.offer_in_progress);
944-
945961
/*
946962
* We're resuming from hibernation: all the sub-channel and
947963
* hv_sock channels we had before the hibernation should have
948964
* been cleaned up, and now we must be seeing a re-offered
949965
* primary channel that we had before the hibernation.
950966
*/
951967

968+
/*
969+
* { Initially: channel relid = INVALID_RELID,
970+
* channels[valid_relid] = NULL }
971+
*
972+
* CPU1 CPU2
973+
*
974+
* [vmbus_onoffer()] [vmbus_device_release()]
975+
*
976+
* LOCK channel_mutex LOCK channel_mutex
977+
* STORE channel relid = valid_relid LOAD r1 = channel relid
978+
* MAP_RELID channel if (r1 != INVALID_RELID)
979+
* UNLOCK channel_mutex UNMAP_RELID channel
980+
* UNLOCK channel_mutex
981+
*
982+
* Forbids: r1 == valid_relid &&
983+
* channels[valid_relid] == channel
984+
*
985+
* Note. r1 can be INVALID_RELID only for an hv_sock channel.
986+
* None of the hv_sock channels which were present before the
987+
* suspend are re-offered upon the resume. See the WARN_ON()
988+
* in hv_process_channel_removal().
989+
*/
990+
mutex_lock(&vmbus_connection.channel_mutex);
991+
992+
atomic_dec(&vmbus_connection.offer_in_progress);
993+
952994
WARN_ON(oldchannel->offermsg.child_relid != INVALID_RELID);
953995
/* Fix up the relid. */
954996
oldchannel->offermsg.child_relid = offer->child_relid;
955997

956998
offer_sz = sizeof(*offer);
957-
if (memcmp(offer, &oldchannel->offermsg, offer_sz) == 0) {
958-
check_ready_for_resume_event();
959-
return;
999+
if (memcmp(offer, &oldchannel->offermsg, offer_sz) != 0) {
1000+
/*
1001+
* This is not an error, since the host can also change
1002+
* the other field(s) of the offer, e.g. on WS RS5
1003+
* (Build 17763), the offer->connection_id of the
1004+
* Mellanox VF vmbus device can change when the host
1005+
* reoffers the device upon resume.
1006+
*/
1007+
pr_debug("vmbus offer changed: relid=%d\n",
1008+
offer->child_relid);
1009+
1010+
print_hex_dump_debug("Old vmbus offer: ",
1011+
DUMP_PREFIX_OFFSET, 16, 4,
1012+
&oldchannel->offermsg, offer_sz,
1013+
false);
1014+
print_hex_dump_debug("New vmbus offer: ",
1015+
DUMP_PREFIX_OFFSET, 16, 4,
1016+
offer, offer_sz, false);
1017+
1018+
/* Fix up the old channel. */
1019+
vmbus_setup_channel_state(oldchannel, offer);
9601020
}
9611021

962-
/*
963-
* This is not an error, since the host can also change the
964-
* other field(s) of the offer, e.g. on WS RS5 (Build 17763),
965-
* the offer->connection_id of the Mellanox VF vmbus device
966-
* can change when the host reoffers the device upon resume.
967-
*/
968-
pr_debug("vmbus offer changed: relid=%d\n",
969-
offer->child_relid);
970-
971-
print_hex_dump_debug("Old vmbus offer: ", DUMP_PREFIX_OFFSET,
972-
16, 4, &oldchannel->offermsg, offer_sz,
973-
false);
974-
print_hex_dump_debug("New vmbus offer: ", DUMP_PREFIX_OFFSET,
975-
16, 4, offer, offer_sz, false);
976-
977-
/* Fix up the old channel. */
978-
vmbus_setup_channel_state(oldchannel, offer);
979-
1022+
/* Add the channel back to the array of channels. */
1023+
vmbus_channel_map_relid(oldchannel);
9801024
check_ready_for_resume_event();
9811025

1026+
mutex_unlock(&vmbus_connection.channel_mutex);
9821027
return;
9831028
}
9841029

@@ -1036,14 +1081,14 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
10361081
*
10371082
* CPU1 CPU2
10381083
*
1039-
* [vmbus_process_offer()] [vmbus_onoffer_rescind()]
1084+
* [vmbus_onoffer()] [vmbus_onoffer_rescind()]
10401085
*
10411086
* LOCK channel_mutex WAIT_ON offer_in_progress == 0
10421087
* DECREMENT offer_in_progress LOCK channel_mutex
1043-
* INSERT chn_list SEARCH chn_list
1088+
* STORE channels[] LOAD channels[]
10441089
* UNLOCK channel_mutex UNLOCK channel_mutex
10451090
*
1046-
* Forbids: CPU2's SEARCH from *not* seeing CPU1's INSERT
1091+
* Forbids: CPU2's LOAD from *not* seeing CPU1's STORE
10471092
*/
10481093

10491094
while (atomic_read(&vmbus_connection.offer_in_progress) != 0) {

drivers/hv/connection.c

Lines changed: 11 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -248,6 +248,14 @@ int vmbus_connect(void)
248248
pr_info("Vmbus version:%d.%d\n",
249249
version >> 16, version & 0xFFFF);
250250

251+
vmbus_connection.channels = kcalloc(MAX_CHANNEL_RELIDS,
252+
sizeof(struct vmbus_channel *),
253+
GFP_KERNEL);
254+
if (vmbus_connection.channels == NULL) {
255+
ret = -ENOMEM;
256+
goto cleanup;
257+
}
258+
251259
kfree(msginfo);
252260
return 0;
253261

@@ -295,33 +303,9 @@ void vmbus_disconnect(void)
295303
*/
296304
struct vmbus_channel *relid2channel(u32 relid)
297305
{
298-
struct vmbus_channel *channel;
299-
struct vmbus_channel *found_channel = NULL;
300-
struct list_head *cur, *tmp;
301-
struct vmbus_channel *cur_sc;
302-
303-
BUG_ON(!mutex_is_locked(&vmbus_connection.channel_mutex));
304-
305-
list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
306-
if (channel->offermsg.child_relid == relid) {
307-
found_channel = channel;
308-
break;
309-
} else if (!list_empty(&channel->sc_list)) {
310-
/*
311-
* Deal with sub-channels.
312-
*/
313-
list_for_each_safe(cur, tmp, &channel->sc_list) {
314-
cur_sc = list_entry(cur, struct vmbus_channel,
315-
sc_list);
316-
if (cur_sc->offermsg.child_relid == relid) {
317-
found_channel = cur_sc;
318-
break;
319-
}
320-
}
321-
}
322-
}
323-
324-
return found_channel;
306+
if (WARN_ON(relid >= MAX_CHANNEL_RELIDS))
307+
return NULL;
308+
return READ_ONCE(vmbus_connection.channels[relid]);
325309
}
326310

327311
/*

drivers/hv/hv.c

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -117,8 +117,6 @@ int hv_synic_alloc(void)
117117
pr_err("Unable to allocate post msg page\n");
118118
goto err;
119119
}
120-
121-
INIT_LIST_HEAD(&hv_cpu->chan_list);
122120
}
123121

124122
return 0;

drivers/hv/hyperv_vmbus.h

Lines changed: 8 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -132,12 +132,6 @@ struct hv_per_cpu_context {
132132
* basis.
133133
*/
134134
struct tasklet_struct msg_dpc;
135-
136-
/*
137-
* To optimize the mapping of relid to channel, maintain
138-
* per-cpu list of the channels based on their CPU affinity.
139-
*/
140-
struct list_head chan_list;
141135
};
142136

143137
struct hv_context {
@@ -202,6 +196,8 @@ int hv_ringbuffer_read(struct vmbus_channel *channel,
202196
/* TODO: Need to make this configurable */
203197
#define MAX_NUM_CHANNELS_SUPPORTED 256
204198

199+
#define MAX_CHANNEL_RELIDS \
200+
max(MAX_NUM_CHANNELS_SUPPORTED, HV_EVENT_FLAGS_COUNT)
205201

206202
enum vmbus_connect_state {
207203
DISCONNECTED,
@@ -251,6 +247,9 @@ struct vmbus_connection {
251247
struct list_head chn_list;
252248
struct mutex channel_mutex;
253249

250+
/* Array of channels */
251+
struct vmbus_channel **channels;
252+
254253
/*
255254
* An offer message is handled first on the work_queue, and then
256255
* is further handled on handle_primary_chan_wq or
@@ -338,6 +337,9 @@ int vmbus_add_channel_kobj(struct hv_device *device_obj,
338337

339338
void vmbus_remove_channel_attr_group(struct vmbus_channel *channel);
340339

340+
void vmbus_channel_map_relid(struct vmbus_channel *channel);
341+
void vmbus_channel_unmap_relid(struct vmbus_channel *channel);
342+
341343
struct vmbus_channel *relid2channel(u32 relid);
342344

343345
void vmbus_free_channels(void);

0 commit comments

Comments
 (0)