@@ -39,7 +39,7 @@ __naked void simple(void)
39
39
: __clobber_all );
40
40
}
41
41
42
- /* The logic for detecting and verifying nocsr pattern is the same for
42
+ /* The logic for detecting and verifying bpf_fastcall pattern is the same for
43
43
* any arch, however x86 differs from arm64 or riscv64 in a way
44
44
* bpf_get_smp_processor_id is rewritten:
45
45
* - on x86 it is done by verifier
@@ -52,7 +52,7 @@ __naked void simple(void)
52
52
*
53
53
* It is really desirable to check instruction indexes in the xlated
54
54
* patterns, so add this canary test to check that function rewrite by
55
- * jit is correctly processed by nocsr logic, keep the rest of the
55
+ * jit is correctly processed by bpf_fastcall logic, keep the rest of the
56
56
* tests as x86.
57
57
*/
58
58
SEC ("raw_tp" )
@@ -463,7 +463,7 @@ __naked static void bad_write_in_subprog_aux(void)
463
463
{
464
464
asm volatile (
465
465
"r0 = 1;"
466
- "*(u64 *)(r1 - 0) = r0;" /* invalidates nocsr contract for caller: */
466
+ "*(u64 *)(r1 - 0) = r0;" /* invalidates bpf_fastcall contract for caller: */
467
467
"exit;" /* caller stack at -8 used outside of the pattern */
468
468
::: __clobber_all );
469
469
}
@@ -480,15 +480,15 @@ __naked void bad_helper_write(void)
480
480
{
481
481
asm volatile (
482
482
"r1 = 1;"
483
- /* nocsr pattern with stack offset -8 */
483
+ /* bpf_fastcall pattern with stack offset -8 */
484
484
"*(u64 *)(r10 - 8) = r1;"
485
485
"call %[bpf_get_smp_processor_id];"
486
486
"r1 = *(u64 *)(r10 - 8);"
487
487
"r1 = r10;"
488
488
"r1 += -8;"
489
489
"r2 = 1;"
490
490
"r3 = 42;"
491
- /* read dst is fp[-8], thus nocsr rewrite not applied */
491
+ /* read dst is fp[-8], thus bpf_fastcall rewrite not applied */
492
492
"call %[bpf_probe_read_kernel];"
493
493
"exit;"
494
494
:
@@ -598,7 +598,7 @@ __arch_x86_64
598
598
__log_level (4 ) __msg ("stack depth 8" )
599
599
__xlated ("2: r0 = &(void __percpu *)(r0)" )
600
600
__success
601
- __naked void helper_call_does_not_prevent_nocsr (void )
601
+ __naked void helper_call_does_not_prevent_bpf_fastcall (void )
602
602
{
603
603
asm volatile (
604
604
"r1 = 1;"
@@ -689,7 +689,7 @@ __naked int bpf_loop_interaction1(void)
689
689
{
690
690
asm volatile (
691
691
"r1 = 1;"
692
- /* nocsr stack region at -16, but could be removed */
692
+ /* bpf_fastcall stack region at -16, but could be removed */
693
693
"*(u64 *)(r10 - 16) = r1;"
694
694
"call %[bpf_get_smp_processor_id];"
695
695
"r1 = *(u64 *)(r10 - 16);"
@@ -729,7 +729,7 @@ __naked int bpf_loop_interaction2(void)
729
729
{
730
730
asm volatile (
731
731
"r1 = 42;"
732
- /* nocsr stack region at -16, cannot be removed */
732
+ /* bpf_fastcall stack region at -16, cannot be removed */
733
733
"*(u64 *)(r10 - 16) = r1;"
734
734
"call %[bpf_get_smp_processor_id];"
735
735
"r1 = *(u64 *)(r10 - 16);"
@@ -759,8 +759,8 @@ __msg("stack depth 512+0")
759
759
__xlated ("r0 = &(void __percpu *)(r0)" )
760
760
__success
761
761
/* cumulative_stack_depth() stack usage is MAX_BPF_STACK,
762
- * called subprogram uses an additional slot for nocsr spill/fill,
763
- * since nocsr spill/fill could be removed the program still fits
762
+ * called subprogram uses an additional slot for bpf_fastcall spill/fill,
763
+ * since bpf_fastcall spill/fill could be removed the program still fits
764
764
* in MAX_BPF_STACK and should be accepted.
765
765
*/
766
766
__naked int cumulative_stack_depth (void )
@@ -798,7 +798,7 @@ __xlated("3: r0 = &(void __percpu *)(r0)")
798
798
__xlated ("4: r0 = *(u32 *)(r0 +0)" )
799
799
__xlated ("5: exit" )
800
800
__success
801
- __naked int nocsr_max_stack_ok (void )
801
+ __naked int bpf_fastcall_max_stack_ok (void )
802
802
{
803
803
asm volatile (
804
804
"r1 = 42;"
@@ -820,15 +820,15 @@ __arch_x86_64
820
820
__log_level (4 )
821
821
__msg ("stack depth 520" )
822
822
__failure
823
- __naked int nocsr_max_stack_fail (void )
823
+ __naked int bpf_fastcall_max_stack_fail (void )
824
824
{
825
825
asm volatile (
826
826
"r1 = 42;"
827
827
"*(u64 *)(r10 - %[max_bpf_stack]) = r1;"
828
828
"*(u64 *)(r10 - %[max_bpf_stack_8]) = r1;"
829
829
"call %[bpf_get_smp_processor_id];"
830
830
"r1 = *(u64 *)(r10 - %[max_bpf_stack_8]);"
831
- /* call to prandom blocks nocsr rewrite */
831
+ /* call to prandom blocks bpf_fastcall rewrite */
832
832
"*(u64 *)(r10 - %[max_bpf_stack_8]) = r1;"
833
833
"call %[bpf_get_prandom_u32];"
834
834
"r1 = *(u64 *)(r10 - %[max_bpf_stack_8]);"
0 commit comments