@@ -39,7 +39,7 @@ __naked void simple(void)
39
39
: __clobber_all );
40
40
}
41
41
42
- /* The logic for detecting and verifying nocsr pattern is the same for
42
+ /* The logic for detecting and verifying bpf_fastcall pattern is the same for
43
43
* any arch, however x86 differs from arm64 or riscv64 in a way
44
44
* bpf_get_smp_processor_id is rewritten:
45
45
* - on x86 it is done by verifier
@@ -52,7 +52,7 @@ __naked void simple(void)
52
52
*
53
53
* It is really desirable to check instruction indexes in the xlated
54
54
* patterns, so add this canary test to check that function rewrite by
55
- * jit is correctly processed by nocsr logic, keep the rest of the
55
+ * jit is correctly processed by bpf_fastcall logic, keep the rest of the
56
56
* tests as x86.
57
57
*/
58
58
SEC ("raw_tp" )
@@ -430,7 +430,7 @@ __naked static void bad_write_in_subprog_aux(void)
430
430
{
431
431
asm volatile (
432
432
"r0 = 1;"
433
- "*(u64 *)(r1 - 0) = r0;" /* invalidates nocsr contract for caller: */
433
+ "*(u64 *)(r1 - 0) = r0;" /* invalidates bpf_fastcall contract for caller: */
434
434
"exit;" /* caller stack at -8 used outside of the pattern */
435
435
::: __clobber_all );
436
436
}
@@ -445,15 +445,15 @@ __naked void bad_helper_write(void)
445
445
{
446
446
asm volatile (
447
447
"r1 = 1;"
448
- /* nocsr pattern with stack offset -8 */
448
+ /* bpf_fastcall pattern with stack offset -8 */
449
449
"*(u64 *)(r10 - 8) = r1;"
450
450
"call %[bpf_get_smp_processor_id];"
451
451
"r1 = *(u64 *)(r10 - 8);"
452
452
"r1 = r10;"
453
453
"r1 += -8;"
454
454
"r2 = 1;"
455
455
"r3 = 42;"
456
- /* read dst is fp[-8], thus nocsr rewrite not applied */
456
+ /* read dst is fp[-8], thus bpf_fastcall rewrite not applied */
457
457
"call %[bpf_probe_read_kernel];"
458
458
"exit;"
459
459
:
@@ -553,7 +553,7 @@ __arch_x86_64
553
553
__log_level (4 ) __msg ("stack depth 8" )
554
554
__xlated ("2: r0 = &(void __percpu *)(r0)" )
555
555
__success
556
- __naked void helper_call_does_not_prevent_nocsr (void )
556
+ __naked void helper_call_does_not_prevent_bpf_fastcall (void )
557
557
{
558
558
asm volatile (
559
559
"r1 = 1;"
@@ -640,7 +640,7 @@ __naked int bpf_loop_interaction1(void)
640
640
{
641
641
asm volatile (
642
642
"r1 = 1;"
643
- /* nocsr stack region at -16, but could be removed */
643
+ /* bpf_fastcall stack region at -16, but could be removed */
644
644
"*(u64 *)(r10 - 16) = r1;"
645
645
"call %[bpf_get_smp_processor_id];"
646
646
"r1 = *(u64 *)(r10 - 16);"
@@ -680,7 +680,7 @@ __naked int bpf_loop_interaction2(void)
680
680
{
681
681
asm volatile (
682
682
"r1 = 42;"
683
- /* nocsr stack region at -16, cannot be removed */
683
+ /* bpf_fastcall stack region at -16, cannot be removed */
684
684
"*(u64 *)(r10 - 16) = r1;"
685
685
"call %[bpf_get_smp_processor_id];"
686
686
"r1 = *(u64 *)(r10 - 16);"
@@ -710,8 +710,8 @@ __msg("stack depth 512+0")
710
710
__xlated ("r0 = &(void __percpu *)(r0)" )
711
711
__success
712
712
/* cumulative_stack_depth() stack usage is MAX_BPF_STACK,
713
- * called subprogram uses an additional slot for nocsr spill/fill,
714
- * since nocsr spill/fill could be removed the program still fits
713
+ * called subprogram uses an additional slot for bpf_fastcall spill/fill,
714
+ * since bpf_fastcall spill/fill could be removed the program still fits
715
715
* in MAX_BPF_STACK and should be accepted.
716
716
*/
717
717
__naked int cumulative_stack_depth (void )
@@ -749,7 +749,7 @@ __xlated("3: r0 = &(void __percpu *)(r0)")
749
749
__xlated ("4: r0 = *(u32 *)(r0 +0)" )
750
750
__xlated ("5: exit" )
751
751
__success
752
- __naked int nocsr_max_stack_ok (void )
752
+ __naked int bpf_fastcall_max_stack_ok (void )
753
753
{
754
754
asm volatile (
755
755
"r1 = 42;"
@@ -771,15 +771,15 @@ __arch_x86_64
771
771
__log_level (4 )
772
772
__msg ("stack depth 520" )
773
773
__failure
774
- __naked int nocsr_max_stack_fail (void )
774
+ __naked int bpf_fastcall_max_stack_fail (void )
775
775
{
776
776
asm volatile (
777
777
"r1 = 42;"
778
778
"*(u64 *)(r10 - %[max_bpf_stack]) = r1;"
779
779
"*(u64 *)(r10 - %[max_bpf_stack_8]) = r1;"
780
780
"call %[bpf_get_smp_processor_id];"
781
781
"r1 = *(u64 *)(r10 - %[max_bpf_stack_8]);"
782
- /* call to prandom blocks nocsr rewrite */
782
+ /* call to prandom blocks bpf_fastcall rewrite */
783
783
"*(u64 *)(r10 - %[max_bpf_stack_8]) = r1;"
784
784
"call %[bpf_get_prandom_u32];"
785
785
"r1 = *(u64 *)(r10 - %[max_bpf_stack_8]);"
0 commit comments