@@ -256,6 +256,19 @@ struct vm_struct *get_vm_area_node(unsigned long size, unsigned long flags, int
256
256
return __get_vm_area_node (size , flags , VMALLOC_START , VMALLOC_END , node );
257
257
}
258
258
259
+ /* Caller must hold vmlist_lock */
260
+ static struct vm_struct * __find_vm_area (void * addr )
261
+ {
262
+ struct vm_struct * tmp ;
263
+
264
+ for (tmp = vmlist ; tmp != NULL ; tmp = tmp -> next ) {
265
+ if (tmp -> addr == addr )
266
+ break ;
267
+ }
268
+
269
+ return tmp ;
270
+ }
271
+
259
272
/* Caller must hold vmlist_lock */
260
273
struct vm_struct * __remove_vm_area (void * addr )
261
274
{
@@ -498,10 +511,32 @@ EXPORT_SYMBOL(__vmalloc);
498
511
*/
499
512
void * vmalloc (unsigned long size )
500
513
{
501
- return __vmalloc (size , GFP_KERNEL | __GFP_HIGHMEM , PAGE_KERNEL );
514
+ return __vmalloc (size , GFP_KERNEL | __GFP_HIGHMEM , PAGE_KERNEL );
502
515
}
503
516
EXPORT_SYMBOL (vmalloc );
504
517
518
+ /**
519
+ * vmalloc_user - allocate virtually contiguous memory which has
520
+ * been zeroed so it can be mapped to userspace without
521
+ * leaking data.
522
+ *
523
+ * @size: allocation size
524
+ */
525
+ void * vmalloc_user (unsigned long size )
526
+ {
527
+ struct vm_struct * area ;
528
+ void * ret ;
529
+
530
+ ret = __vmalloc (size , GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO , PAGE_KERNEL );
531
+ write_lock (& vmlist_lock );
532
+ area = __find_vm_area (ret );
533
+ area -> flags |= VM_USERMAP ;
534
+ write_unlock (& vmlist_lock );
535
+
536
+ return ret ;
537
+ }
538
+ EXPORT_SYMBOL (vmalloc_user );
539
+
505
540
/**
506
541
* vmalloc_node - allocate memory on a specific node
507
542
*
@@ -516,7 +551,7 @@ EXPORT_SYMBOL(vmalloc);
516
551
*/
517
552
void * vmalloc_node (unsigned long size , int node )
518
553
{
519
- return __vmalloc_node (size , GFP_KERNEL | __GFP_HIGHMEM , PAGE_KERNEL , node );
554
+ return __vmalloc_node (size , GFP_KERNEL | __GFP_HIGHMEM , PAGE_KERNEL , node );
520
555
}
521
556
EXPORT_SYMBOL (vmalloc_node );
522
557
@@ -556,6 +591,28 @@ void *vmalloc_32(unsigned long size)
556
591
}
557
592
EXPORT_SYMBOL (vmalloc_32 );
558
593
594
+ /**
595
+ * vmalloc_32_user - allocate virtually contiguous memory (32bit
596
+ * addressable) which is zeroed so it can be
597
+ * mapped to userspace without leaking data.
598
+ *
599
+ * @size: allocation size
600
+ */
601
+ void * vmalloc_32_user (unsigned long size )
602
+ {
603
+ struct vm_struct * area ;
604
+ void * ret ;
605
+
606
+ ret = __vmalloc (size , GFP_KERNEL | __GFP_ZERO , PAGE_KERNEL );
607
+ write_lock (& vmlist_lock );
608
+ area = __find_vm_area (ret );
609
+ area -> flags |= VM_USERMAP ;
610
+ write_unlock (& vmlist_lock );
611
+
612
+ return ret ;
613
+ }
614
+ EXPORT_SYMBOL (vmalloc_32_user );
615
+
559
616
long vread (char * buf , char * addr , unsigned long count )
560
617
{
561
618
struct vm_struct * tmp ;
@@ -630,3 +687,64 @@ long vwrite(char *buf, char *addr, unsigned long count)
630
687
read_unlock (& vmlist_lock );
631
688
return buf - buf_start ;
632
689
}
690
+
691
+ /**
692
+ * remap_vmalloc_range - map vmalloc pages to userspace
693
+ *
694
+ * @vma: vma to cover (map full range of vma)
695
+ * @addr: vmalloc memory
696
+ * @pgoff: number of pages into addr before first page to map
697
+ * @returns: 0 for success, -Exxx on failure
698
+ *
699
+ * This function checks that addr is a valid vmalloc'ed area, and
700
+ * that it is big enough to cover the vma. Will return failure if
701
+ * that criteria isn't met.
702
+ *
703
+ * Similar to remap_pfn_range (see mm/memory.c)
704
+ */
705
+ int remap_vmalloc_range (struct vm_area_struct * vma , void * addr ,
706
+ unsigned long pgoff )
707
+ {
708
+ struct vm_struct * area ;
709
+ unsigned long uaddr = vma -> vm_start ;
710
+ unsigned long usize = vma -> vm_end - vma -> vm_start ;
711
+ int ret ;
712
+
713
+ if ((PAGE_SIZE - 1 ) & (unsigned long )addr )
714
+ return - EINVAL ;
715
+
716
+ read_lock (& vmlist_lock );
717
+ area = __find_vm_area (addr );
718
+ if (!area )
719
+ goto out_einval_locked ;
720
+
721
+ if (!(area -> flags & VM_USERMAP ))
722
+ goto out_einval_locked ;
723
+
724
+ if (usize + (pgoff << PAGE_SHIFT ) > area -> size - PAGE_SIZE )
725
+ goto out_einval_locked ;
726
+ read_unlock (& vmlist_lock );
727
+
728
+ addr += pgoff << PAGE_SHIFT ;
729
+ do {
730
+ struct page * page = vmalloc_to_page (addr );
731
+ ret = vm_insert_page (vma , uaddr , page );
732
+ if (ret )
733
+ return ret ;
734
+
735
+ uaddr += PAGE_SIZE ;
736
+ addr += PAGE_SIZE ;
737
+ usize -= PAGE_SIZE ;
738
+ } while (usize > 0 );
739
+
740
+ /* Prevent "things" like memory migration? VM_flags need a cleanup... */
741
+ vma -> vm_flags |= VM_RESERVED ;
742
+
743
+ return ret ;
744
+
745
+ out_einval_locked :
746
+ read_unlock (& vmlist_lock );
747
+ return - EINVAL ;
748
+ }
749
+ EXPORT_SYMBOL (remap_vmalloc_range );
750
+
0 commit comments