@@ -40,15 +40,6 @@ struct relocation_handlers {
40
40
long buffer );
41
41
};
42
42
43
- unsigned int initialize_relocation_hashtable (unsigned int num_relocations );
44
- void process_accumulated_relocations (struct module * me );
45
- int add_relocation_to_accumulate (struct module * me , int type , void * location ,
46
- unsigned int hashtable_bits , Elf_Addr v );
47
-
48
- struct hlist_head * relocation_hashtable ;
49
-
50
- struct list_head used_buckets_list ;
51
-
52
43
/*
53
44
* The auipc+jalr instruction pair can reach any PC-relative offset
54
45
* in the range [-2^31 - 2^11, 2^31 - 2^11)
@@ -64,7 +55,7 @@ static bool riscv_insn_valid_32bit_offset(ptrdiff_t val)
64
55
65
56
static int riscv_insn_rmw (void * location , u32 keep , u32 set )
66
57
{
67
- u16 * parcel = location ;
58
+ __le16 * parcel = location ;
68
59
u32 insn = (u32 )le16_to_cpu (parcel [0 ]) | (u32 )le16_to_cpu (parcel [1 ]) << 16 ;
69
60
70
61
insn &= keep ;
@@ -77,7 +68,7 @@ static int riscv_insn_rmw(void *location, u32 keep, u32 set)
77
68
78
69
static int riscv_insn_rvc_rmw (void * location , u16 keep , u16 set )
79
70
{
80
- u16 * parcel = location ;
71
+ __le16 * parcel = location ;
81
72
u16 insn = le16_to_cpu (* parcel );
82
73
83
74
insn &= keep ;
@@ -604,7 +595,10 @@ static const struct relocation_handlers reloc_handlers[] = {
604
595
/* 192-255 nonstandard ABI extensions */
605
596
};
606
597
607
- void process_accumulated_relocations (struct module * me )
598
+ static void
599
+ process_accumulated_relocations (struct module * me ,
600
+ struct hlist_head * * relocation_hashtable ,
601
+ struct list_head * used_buckets_list )
608
602
{
609
603
/*
610
604
* Only ADD/SUB/SET/ULEB128 should end up here.
@@ -624,18 +618,25 @@ void process_accumulated_relocations(struct module *me)
624
618
* - Each relocation entry for a location address
625
619
*/
626
620
struct used_bucket * bucket_iter ;
621
+ struct used_bucket * bucket_iter_tmp ;
627
622
struct relocation_head * rel_head_iter ;
623
+ struct hlist_node * rel_head_iter_tmp ;
628
624
struct relocation_entry * rel_entry_iter ;
625
+ struct relocation_entry * rel_entry_iter_tmp ;
629
626
int curr_type ;
630
627
void * location ;
631
628
long buffer ;
632
629
633
- list_for_each_entry (bucket_iter , & used_buckets_list , head ) {
634
- hlist_for_each_entry (rel_head_iter , bucket_iter -> bucket , node ) {
630
+ list_for_each_entry_safe (bucket_iter , bucket_iter_tmp ,
631
+ used_buckets_list , head ) {
632
+ hlist_for_each_entry_safe (rel_head_iter , rel_head_iter_tmp ,
633
+ bucket_iter -> bucket , node ) {
635
634
buffer = 0 ;
636
635
location = rel_head_iter -> location ;
637
- list_for_each_entry (rel_entry_iter ,
638
- rel_head_iter -> rel_entry , head ) {
636
+ list_for_each_entry_safe (rel_entry_iter ,
637
+ rel_entry_iter_tmp ,
638
+ rel_head_iter -> rel_entry ,
639
+ head ) {
639
640
curr_type = rel_entry_iter -> type ;
640
641
reloc_handlers [curr_type ].reloc_handler (
641
642
me , & buffer , rel_entry_iter -> value );
@@ -648,11 +649,14 @@ void process_accumulated_relocations(struct module *me)
648
649
kfree (bucket_iter );
649
650
}
650
651
651
- kfree (relocation_hashtable );
652
+ kfree (* relocation_hashtable );
652
653
}
653
654
654
- int add_relocation_to_accumulate (struct module * me , int type , void * location ,
655
- unsigned int hashtable_bits , Elf_Addr v )
655
+ static int add_relocation_to_accumulate (struct module * me , int type ,
656
+ void * location ,
657
+ unsigned int hashtable_bits , Elf_Addr v ,
658
+ struct hlist_head * relocation_hashtable ,
659
+ struct list_head * used_buckets_list )
656
660
{
657
661
struct relocation_entry * entry ;
658
662
struct relocation_head * rel_head ;
@@ -661,6 +665,10 @@ int add_relocation_to_accumulate(struct module *me, int type, void *location,
661
665
unsigned long hash ;
662
666
663
667
entry = kmalloc (sizeof (* entry ), GFP_KERNEL );
668
+
669
+ if (!entry )
670
+ return - ENOMEM ;
671
+
664
672
INIT_LIST_HEAD (& entry -> head );
665
673
entry -> type = type ;
666
674
entry -> value = v ;
@@ -669,7 +677,10 @@ int add_relocation_to_accumulate(struct module *me, int type, void *location,
669
677
670
678
current_head = & relocation_hashtable [hash ];
671
679
672
- /* Find matching location (if any) */
680
+ /*
681
+ * Search for the relocation_head for the relocations that happen at the
682
+ * provided location
683
+ */
673
684
bool found = false;
674
685
struct relocation_head * rel_head_iter ;
675
686
@@ -681,19 +692,45 @@ int add_relocation_to_accumulate(struct module *me, int type, void *location,
681
692
}
682
693
}
683
694
695
+ /*
696
+ * If there has not yet been any relocations at the provided location,
697
+ * create a relocation_head for that location and populate it with this
698
+ * relocation_entry.
699
+ */
684
700
if (!found ) {
685
701
rel_head = kmalloc (sizeof (* rel_head ), GFP_KERNEL );
702
+
703
+ if (!rel_head ) {
704
+ kfree (entry );
705
+ return - ENOMEM ;
706
+ }
707
+
686
708
rel_head -> rel_entry =
687
709
kmalloc (sizeof (struct list_head ), GFP_KERNEL );
710
+
711
+ if (!rel_head -> rel_entry ) {
712
+ kfree (entry );
713
+ kfree (rel_head );
714
+ return - ENOMEM ;
715
+ }
716
+
688
717
INIT_LIST_HEAD (rel_head -> rel_entry );
689
718
rel_head -> location = location ;
690
719
INIT_HLIST_NODE (& rel_head -> node );
691
720
if (!current_head -> first ) {
692
721
bucket =
693
722
kmalloc (sizeof (struct used_bucket ), GFP_KERNEL );
723
+
724
+ if (!bucket ) {
725
+ kfree (entry );
726
+ kfree (rel_head );
727
+ kfree (rel_head -> rel_entry );
728
+ return - ENOMEM ;
729
+ }
730
+
694
731
INIT_LIST_HEAD (& bucket -> head );
695
732
bucket -> bucket = current_head ;
696
- list_add (& bucket -> head , & used_buckets_list );
733
+ list_add (& bucket -> head , used_buckets_list );
697
734
}
698
735
hlist_add_head (& rel_head -> node , current_head );
699
736
}
@@ -704,7 +741,9 @@ int add_relocation_to_accumulate(struct module *me, int type, void *location,
704
741
return 0 ;
705
742
}
706
743
707
- unsigned int initialize_relocation_hashtable (unsigned int num_relocations )
744
+ static unsigned int
745
+ initialize_relocation_hashtable (unsigned int num_relocations ,
746
+ struct hlist_head * * relocation_hashtable )
708
747
{
709
748
/* Can safely assume that bits is not greater than sizeof(long) */
710
749
unsigned long hashtable_size = roundup_pow_of_two (num_relocations );
@@ -720,12 +759,13 @@ unsigned int initialize_relocation_hashtable(unsigned int num_relocations)
720
759
721
760
hashtable_size <<= should_double_size ;
722
761
723
- relocation_hashtable = kmalloc_array (hashtable_size ,
724
- sizeof (* relocation_hashtable ),
725
- GFP_KERNEL );
726
- __hash_init (relocation_hashtable , hashtable_size );
762
+ * relocation_hashtable = kmalloc_array (hashtable_size ,
763
+ sizeof (* relocation_hashtable ),
764
+ GFP_KERNEL );
765
+ if (!* relocation_hashtable )
766
+ return - ENOMEM ;
727
767
728
- INIT_LIST_HEAD ( & used_buckets_list );
768
+ __hash_init ( * relocation_hashtable , hashtable_size );
729
769
730
770
return hashtable_bits ;
731
771
}
@@ -742,7 +782,17 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
742
782
Elf_Addr v ;
743
783
int res ;
744
784
unsigned int num_relocations = sechdrs [relsec ].sh_size / sizeof (* rel );
745
- unsigned int hashtable_bits = initialize_relocation_hashtable (num_relocations );
785
+ struct hlist_head * relocation_hashtable ;
786
+ struct list_head used_buckets_list ;
787
+ unsigned int hashtable_bits ;
788
+
789
+ hashtable_bits = initialize_relocation_hashtable (num_relocations ,
790
+ & relocation_hashtable );
791
+
792
+ if (hashtable_bits < 0 )
793
+ return hashtable_bits ;
794
+
795
+ INIT_LIST_HEAD (& used_buckets_list );
746
796
747
797
pr_debug ("Applying relocate section %u to %u\n" , relsec ,
748
798
sechdrs [relsec ].sh_info );
@@ -823,14 +873,18 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
823
873
}
824
874
825
875
if (reloc_handlers [type ].accumulate_handler )
826
- res = add_relocation_to_accumulate (me , type , location , hashtable_bits , v );
876
+ res = add_relocation_to_accumulate (me , type , location ,
877
+ hashtable_bits , v ,
878
+ relocation_hashtable ,
879
+ & used_buckets_list );
827
880
else
828
881
res = handler (me , location , v );
829
882
if (res )
830
883
return res ;
831
884
}
832
885
833
- process_accumulated_relocations (me );
886
+ process_accumulated_relocations (me , & relocation_hashtable ,
887
+ & used_buckets_list );
834
888
835
889
return 0 ;
836
890
}
@@ -840,7 +894,8 @@ void *module_alloc(unsigned long size)
840
894
{
841
895
return __vmalloc_node_range (size , 1 , MODULES_VADDR ,
842
896
MODULES_END , GFP_KERNEL ,
843
- PAGE_KERNEL , 0 , NUMA_NO_NODE ,
897
+ PAGE_KERNEL , VM_FLUSH_RESET_PERMS ,
898
+ NUMA_NO_NODE ,
844
899
__builtin_return_address (0 ));
845
900
}
846
901
#endif
0 commit comments