Skip to content

Commit ab8e523

Browse files
author
Martin Schwidefsky
committed
s390/mm,gmap: segment mapping race
The gmap_map_segment function creates a special invalid segment table entry with the address of the requested target location in the process address space. The first access will create the connection between the gmap segment table and the target page table of the main process. If two threads do this concurrently both will walk the page tables and allocate a gmap_rmap structure for the same segment table entry. To avoid the race recheck the segment table entry after taking to page table lock. Signed-off-by: Martin Schwidefsky <[email protected]>
1 parent c503494 commit ab8e523

File tree

1 file changed

+91
-69
lines changed

1 file changed

+91
-69
lines changed

arch/s390/mm/pgtable.c

Lines changed: 91 additions & 69 deletions
Original file line numberDiff line numberDiff line change
@@ -454,12 +454,11 @@ unsigned long gmap_translate(unsigned long address, struct gmap *gmap)
454454
}
455455
EXPORT_SYMBOL_GPL(gmap_translate);
456456

457-
/*
458-
* this function is assumed to be called with mmap_sem held
459-
*/
460-
unsigned long __gmap_fault(unsigned long address, struct gmap *gmap)
457+
static int gmap_connect_pgtable(unsigned long segment,
458+
unsigned long *segment_ptr,
459+
struct gmap *gmap)
461460
{
462-
unsigned long *segment_ptr, vmaddr, segment;
461+
unsigned long vmaddr;
463462
struct vm_area_struct *vma;
464463
struct gmap_pgtable *mp;
465464
struct gmap_rmap *rmap;
@@ -469,48 +468,94 @@ unsigned long __gmap_fault(unsigned long address, struct gmap *gmap)
469468
pud_t *pud;
470469
pmd_t *pmd;
471470

471+
mm = gmap->mm;
472+
vmaddr = segment & _SEGMENT_ENTRY_ORIGIN;
473+
vma = find_vma(mm, vmaddr);
474+
if (!vma || vma->vm_start > vmaddr)
475+
return -EFAULT;
476+
/* Walk the parent mm page table */
477+
pgd = pgd_offset(mm, vmaddr);
478+
pud = pud_alloc(mm, pgd, vmaddr);
479+
if (!pud)
480+
return -ENOMEM;
481+
pmd = pmd_alloc(mm, pud, vmaddr);
482+
if (!pmd)
483+
return -ENOMEM;
484+
if (!pmd_present(*pmd) &&
485+
__pte_alloc(mm, vma, pmd, vmaddr))
486+
return -ENOMEM;
487+
/* pmd now points to a valid segment table entry. */
488+
rmap = kmalloc(sizeof(*rmap), GFP_KERNEL|__GFP_REPEAT);
489+
if (!rmap)
490+
return -ENOMEM;
491+
/* Link gmap segment table entry location to page table. */
492+
page = pmd_page(*pmd);
493+
mp = (struct gmap_pgtable *) page->index;
494+
rmap->entry = segment_ptr;
495+
spin_lock(&mm->page_table_lock);
496+
if (*segment_ptr == segment) {
497+
list_add(&rmap->list, &mp->mapper);
498+
/* Set gmap segment table entry to page table. */
499+
*segment_ptr = pmd_val(*pmd) & PAGE_MASK;
500+
rmap = NULL;
501+
}
502+
spin_unlock(&mm->page_table_lock);
503+
kfree(rmap);
504+
return 0;
505+
}
506+
507+
static void gmap_disconnect_pgtable(struct mm_struct *mm, unsigned long *table)
508+
{
509+
struct gmap_rmap *rmap, *next;
510+
struct gmap_pgtable *mp;
511+
struct page *page;
512+
int flush;
513+
514+
flush = 0;
515+
spin_lock(&mm->page_table_lock);
516+
page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
517+
mp = (struct gmap_pgtable *) page->index;
518+
list_for_each_entry_safe(rmap, next, &mp->mapper, list) {
519+
*rmap->entry =
520+
_SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr;
521+
list_del(&rmap->list);
522+
kfree(rmap);
523+
flush = 1;
524+
}
525+
spin_unlock(&mm->page_table_lock);
526+
if (flush)
527+
__tlb_flush_global();
528+
}
529+
530+
/*
531+
* this function is assumed to be called with mmap_sem held
532+
*/
533+
unsigned long __gmap_fault(unsigned long address, struct gmap *gmap)
534+
{
535+
unsigned long *segment_ptr, segment;
536+
struct gmap_pgtable *mp;
537+
struct page *page;
538+
int rc;
539+
472540
current->thread.gmap_addr = address;
473541
segment_ptr = gmap_table_walk(address, gmap);
474542
if (IS_ERR(segment_ptr))
475543
return -EFAULT;
476544
/* Convert the gmap address to an mm address. */
477-
segment = *segment_ptr;
478-
if (!(segment & _SEGMENT_ENTRY_INV)) {
479-
page = pfn_to_page(segment >> PAGE_SHIFT);
480-
mp = (struct gmap_pgtable *) page->index;
481-
return mp->vmaddr | (address & ~PMD_MASK);
482-
} else if (segment & _SEGMENT_ENTRY_RO) {
483-
mm = gmap->mm;
484-
vmaddr = segment & _SEGMENT_ENTRY_ORIGIN;
485-
vma = find_vma(mm, vmaddr);
486-
if (!vma || vma->vm_start > vmaddr)
487-
return -EFAULT;
488-
489-
/* Walk the parent mm page table */
490-
pgd = pgd_offset(mm, vmaddr);
491-
pud = pud_alloc(mm, pgd, vmaddr);
492-
if (!pud)
493-
return -ENOMEM;
494-
pmd = pmd_alloc(mm, pud, vmaddr);
495-
if (!pmd)
496-
return -ENOMEM;
497-
if (!pmd_present(*pmd) &&
498-
__pte_alloc(mm, vma, pmd, vmaddr))
499-
return -ENOMEM;
500-
/* pmd now points to a valid segment table entry. */
501-
rmap = kmalloc(sizeof(*rmap), GFP_KERNEL|__GFP_REPEAT);
502-
if (!rmap)
503-
return -ENOMEM;
504-
/* Link gmap segment table entry location to page table. */
505-
page = pmd_page(*pmd);
506-
mp = (struct gmap_pgtable *) page->index;
507-
rmap->entry = segment_ptr;
508-
spin_lock(&mm->page_table_lock);
509-
list_add(&rmap->list, &mp->mapper);
510-
spin_unlock(&mm->page_table_lock);
511-
/* Set gmap segment table entry to page table. */
512-
*segment_ptr = pmd_val(*pmd) & PAGE_MASK;
513-
return vmaddr | (address & ~PMD_MASK);
545+
while (1) {
546+
segment = *segment_ptr;
547+
if (!(segment & _SEGMENT_ENTRY_INV)) {
548+
/* Page table is present */
549+
page = pfn_to_page(segment >> PAGE_SHIFT);
550+
mp = (struct gmap_pgtable *) page->index;
551+
return mp->vmaddr | (address & ~PMD_MASK);
552+
}
553+
if (!(segment & _SEGMENT_ENTRY_RO))
554+
/* Nothing mapped in the gmap address space. */
555+
break;
556+
rc = gmap_connect_pgtable(segment, segment_ptr, gmap);
557+
if (rc)
558+
return rc;
514559
}
515560
return -EFAULT;
516561
}
@@ -574,29 +619,6 @@ void gmap_discard(unsigned long from, unsigned long to, struct gmap *gmap)
574619
}
575620
EXPORT_SYMBOL_GPL(gmap_discard);
576621

577-
void gmap_unmap_notifier(struct mm_struct *mm, unsigned long *table)
578-
{
579-
struct gmap_rmap *rmap, *next;
580-
struct gmap_pgtable *mp;
581-
struct page *page;
582-
int flush;
583-
584-
flush = 0;
585-
spin_lock(&mm->page_table_lock);
586-
page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
587-
mp = (struct gmap_pgtable *) page->index;
588-
list_for_each_entry_safe(rmap, next, &mp->mapper, list) {
589-
*rmap->entry =
590-
_SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr;
591-
list_del(&rmap->list);
592-
kfree(rmap);
593-
flush = 1;
594-
}
595-
spin_unlock(&mm->page_table_lock);
596-
if (flush)
597-
__tlb_flush_global();
598-
}
599-
600622
static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm,
601623
unsigned long vmaddr)
602624
{
@@ -649,8 +671,8 @@ static inline void page_table_free_pgste(unsigned long *table)
649671
{
650672
}
651673

652-
static inline void gmap_unmap_notifier(struct mm_struct *mm,
653-
unsigned long *table)
674+
static inline void gmap_disconnect_pgtable(struct mm_struct *mm,
675+
unsigned long *table)
654676
{
655677
}
656678

@@ -716,7 +738,7 @@ void page_table_free(struct mm_struct *mm, unsigned long *table)
716738
unsigned int bit, mask;
717739

718740
if (mm_has_pgste(mm)) {
719-
gmap_unmap_notifier(mm, table);
741+
gmap_disconnect_pgtable(mm, table);
720742
return page_table_free_pgste(table);
721743
}
722744
/* Free 1K/2K page table fragment of a 4K page */
@@ -759,7 +781,7 @@ void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table)
759781

760782
mm = tlb->mm;
761783
if (mm_has_pgste(mm)) {
762-
gmap_unmap_notifier(mm, table);
784+
gmap_disconnect_pgtable(mm, table);
763785
table = (unsigned long *) (__pa(table) | FRAG_MASK);
764786
tlb_remove_table(tlb, table);
765787
return;

0 commit comments

Comments
 (0)