@@ -407,15 +407,11 @@ static void __init map_pages(unsigned long start_vaddr,
407
407
unsigned long vaddr ;
408
408
unsigned long ro_start ;
409
409
unsigned long ro_end ;
410
- unsigned long fv_addr ;
411
- unsigned long gw_addr ;
412
- extern const unsigned long fault_vector_20 ;
413
- extern void * const linux_gateway_page ;
410
+ unsigned long kernel_end ;
414
411
415
412
ro_start = __pa ((unsigned long )_text );
416
413
ro_end = __pa ((unsigned long )& data_start );
417
- fv_addr = __pa ((unsigned long )& fault_vector_20 ) & PAGE_MASK ;
418
- gw_addr = __pa ((unsigned long )& linux_gateway_page ) & PAGE_MASK ;
414
+ kernel_end = __pa ((unsigned long )& _end );
419
415
420
416
end_paddr = start_paddr + size ;
421
417
@@ -473,24 +469,25 @@ static void __init map_pages(unsigned long start_vaddr,
473
469
for (tmp2 = start_pte ; tmp2 < PTRS_PER_PTE ; tmp2 ++ , pg_table ++ ) {
474
470
pte_t pte ;
475
471
476
- /*
477
- * Map the fault vector writable so we can
478
- * write the HPMC checksum.
479
- */
480
472
if (force )
481
473
pte = __mk_pte (address , pgprot );
482
- else if (parisc_text_address (vaddr ) &&
483
- address != fv_addr )
474
+ else if (parisc_text_address (vaddr )) {
484
475
pte = __mk_pte (address , PAGE_KERNEL_EXEC );
476
+ if (address >= ro_start && address < kernel_end )
477
+ pte = pte_mkhuge (pte );
478
+ }
485
479
else
486
480
#if defined(CONFIG_PARISC_PAGE_SIZE_4KB )
487
- if (address >= ro_start && address < ro_end
488
- && address != fv_addr
489
- && address != gw_addr )
490
- pte = __mk_pte (address , PAGE_KERNEL_RO );
491
- else
481
+ if (address >= ro_start && address < ro_end ) {
482
+ pte = __mk_pte (address , PAGE_KERNEL_EXEC );
483
+ pte = pte_mkhuge (pte );
484
+ } else
492
485
#endif
486
+ {
493
487
pte = __mk_pte (address , pgprot );
488
+ if (address >= ro_start && address < kernel_end )
489
+ pte = pte_mkhuge (pte );
490
+ }
494
491
495
492
if (address >= end_paddr ) {
496
493
if (force )
@@ -534,15 +531,12 @@ void free_initmem(void)
534
531
535
532
/* force the kernel to see the new TLB entries */
536
533
__flush_tlb_range (0 , init_begin , init_end );
537
- /* Attempt to catch anyone trying to execute code here
538
- * by filling the page with BRK insns.
539
- */
540
- memset ((void * )init_begin , 0x00 , init_end - init_begin );
534
+
541
535
/* finally dump all the instructions which were cached, since the
542
536
* pages are no-longer executable */
543
537
flush_icache_range (init_begin , init_end );
544
538
545
- free_initmem_default (-1 );
539
+ free_initmem_default (POISON_FREE_INITMEM );
546
540
547
541
/* set up a new led state on systems shipped LED State panel */
548
542
pdc_chassis_send_status (PDC_CHASSIS_DIRECT_BCOMPLETE );
@@ -712,8 +706,8 @@ static void __init pagetable_init(void)
712
706
unsigned long size ;
713
707
714
708
start_paddr = pmem_ranges [range ].start_pfn << PAGE_SHIFT ;
715
- end_paddr = start_paddr + (pmem_ranges [range ].pages << PAGE_SHIFT );
716
709
size = pmem_ranges [range ].pages << PAGE_SHIFT ;
710
+ end_paddr = start_paddr + size ;
717
711
718
712
map_pages ((unsigned long )__va (start_paddr ), start_paddr ,
719
713
size , PAGE_KERNEL , 0 );
0 commit comments