@@ -439,82 +439,34 @@ static int kvm_mips_mkold_gpa_pt(struct kvm *kvm, gfn_t start_gfn,
439
439
end_gfn << PAGE_SHIFT );
440
440
}
441
441
442
- static int handle_hva_to_gpa (struct kvm * kvm ,
443
- unsigned long start ,
444
- unsigned long end ,
445
- int (* handler )(struct kvm * kvm , gfn_t gfn ,
446
- gpa_t gfn_end ,
447
- struct kvm_memory_slot * memslot ,
448
- void * data ),
449
- void * data )
442
+ bool kvm_unmap_gfn_range (struct kvm * kvm , struct kvm_gfn_range * range )
450
443
{
451
- struct kvm_memslots * slots ;
452
- struct kvm_memory_slot * memslot ;
453
- int ret = 0 ;
454
-
455
- slots = kvm_memslots (kvm );
456
-
457
- /* we only care about the pages that the guest sees */
458
- kvm_for_each_memslot (memslot , slots ) {
459
- unsigned long hva_start , hva_end ;
460
- gfn_t gfn , gfn_end ;
461
-
462
- hva_start = max (start , memslot -> userspace_addr );
463
- hva_end = min (end , memslot -> userspace_addr +
464
- (memslot -> npages << PAGE_SHIFT ));
465
- if (hva_start >= hva_end )
466
- continue ;
467
-
468
- /*
469
- * {gfn(page) | page intersects with [hva_start, hva_end)} =
470
- * {gfn_start, gfn_start+1, ..., gfn_end-1}.
471
- */
472
- gfn = hva_to_gfn_memslot (hva_start , memslot );
473
- gfn_end = hva_to_gfn_memslot (hva_end + PAGE_SIZE - 1 , memslot );
474
-
475
- ret |= handler (kvm , gfn , gfn_end , memslot , data );
476
- }
477
-
478
- return ret ;
479
- }
480
-
481
-
482
- static int kvm_unmap_hva_handler (struct kvm * kvm , gfn_t gfn , gfn_t gfn_end ,
483
- struct kvm_memory_slot * memslot , void * data )
484
- {
485
- kvm_mips_flush_gpa_pt (kvm , gfn , gfn_end );
444
+ kvm_mips_flush_gpa_pt (kvm , range -> start , range -> end );
486
445
return 1 ;
487
446
}
488
447
489
- int kvm_unmap_hva_range (struct kvm * kvm , unsigned long start , unsigned long end ,
490
- unsigned flags )
491
- {
492
- return handle_hva_to_gpa (kvm , start , end , & kvm_unmap_hva_handler , NULL );
493
- }
494
-
495
- static int kvm_set_spte_handler (struct kvm * kvm , gfn_t gfn , gfn_t gfn_end ,
496
- struct kvm_memory_slot * memslot , void * data )
448
+ bool kvm_set_spte_gfn (struct kvm * kvm , struct kvm_gfn_range * range )
497
449
{
498
- gpa_t gpa = gfn << PAGE_SHIFT ;
499
- pte_t hva_pte = * ( pte_t * ) data ;
450
+ gpa_t gpa = range -> start << PAGE_SHIFT ;
451
+ pte_t hva_pte = range -> pte ;
500
452
pte_t * gpa_pte = kvm_mips_pte_for_gpa (kvm , NULL , gpa );
501
453
pte_t old_pte ;
502
454
503
455
if (!gpa_pte )
504
- return 0 ;
456
+ return false ;
505
457
506
458
/* Mapping may need adjusting depending on memslot flags */
507
459
old_pte = * gpa_pte ;
508
- if (memslot -> flags & KVM_MEM_LOG_DIRTY_PAGES && !pte_dirty (old_pte ))
460
+ if (range -> slot -> flags & KVM_MEM_LOG_DIRTY_PAGES && !pte_dirty (old_pte ))
509
461
hva_pte = pte_mkclean (hva_pte );
510
- else if (memslot -> flags & KVM_MEM_READONLY )
462
+ else if (range -> slot -> flags & KVM_MEM_READONLY )
511
463
hva_pte = pte_wrprotect (hva_pte );
512
464
513
465
set_pte (gpa_pte , hva_pte );
514
466
515
467
/* Replacing an absent or old page doesn't need flushes */
516
468
if (!pte_present (old_pte ) || !pte_young (old_pte ))
517
- return 0 ;
469
+ return false ;
518
470
519
471
/* Pages swapped, aged, moved, or cleaned require flushes */
520
472
return !pte_present (hva_pte ) ||
@@ -523,39 +475,21 @@ static int kvm_set_spte_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end,
523
475
(pte_dirty (old_pte ) && !pte_dirty (hva_pte ));
524
476
}
525
477
526
- int kvm_set_spte_hva (struct kvm * kvm , unsigned long hva , pte_t pte )
478
+ bool kvm_age_gfn (struct kvm * kvm , struct kvm_gfn_range * range )
527
479
{
528
- unsigned long end = hva + PAGE_SIZE ;
529
- return handle_hva_to_gpa (kvm , hva , end , & kvm_set_spte_handler , & pte );
480
+ return kvm_mips_mkold_gpa_pt (kvm , range -> start , range -> end );
530
481
}
531
482
532
- static int kvm_age_hva_handler (struct kvm * kvm , gfn_t gfn , gfn_t gfn_end ,
533
- struct kvm_memory_slot * memslot , void * data )
483
+ bool kvm_test_age_gfn (struct kvm * kvm , struct kvm_gfn_range * range )
534
484
{
535
- return kvm_mips_mkold_gpa_pt (kvm , gfn , gfn_end );
536
- }
537
-
538
- static int kvm_test_age_hva_handler (struct kvm * kvm , gfn_t gfn , gfn_t gfn_end ,
539
- struct kvm_memory_slot * memslot , void * data )
540
- {
541
- gpa_t gpa = gfn << PAGE_SHIFT ;
485
+ gpa_t gpa = range -> start << PAGE_SHIFT ;
542
486
pte_t * gpa_pte = kvm_mips_pte_for_gpa (kvm , NULL , gpa );
543
487
544
488
if (!gpa_pte )
545
489
return 0 ;
546
490
return pte_young (* gpa_pte );
547
491
}
548
492
549
- int kvm_age_hva (struct kvm * kvm , unsigned long start , unsigned long end )
550
- {
551
- return handle_hva_to_gpa (kvm , start , end , kvm_age_hva_handler , NULL );
552
- }
553
-
554
- int kvm_test_age_hva (struct kvm * kvm , unsigned long hva )
555
- {
556
- return handle_hva_to_gpa (kvm , hva , hva , kvm_test_age_hva_handler , NULL );
557
- }
558
-
559
493
/**
560
494
* _kvm_mips_map_page_fast() - Fast path GPA fault handler.
561
495
* @vcpu: VCPU pointer.
0 commit comments