@@ -822,36 +822,6 @@ static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off,
822
822
return err ;
823
823
}
824
824
825
- static bool analyzer_is_valid_access (struct bpf_verifier_env * env , int off ,
826
- struct bpf_insn_access_aux * info )
827
- {
828
- switch (env -> prog -> type ) {
829
- case BPF_PROG_TYPE_XDP :
830
- switch (off ) {
831
- case offsetof(struct xdp_buff , data ):
832
- info -> reg_type = PTR_TO_PACKET ;
833
- return true;
834
- case offsetof(struct xdp_buff , data_end ):
835
- info -> reg_type = PTR_TO_PACKET_END ;
836
- return true;
837
- }
838
- return false;
839
- case BPF_PROG_TYPE_SCHED_CLS :
840
- switch (off ) {
841
- case offsetof(struct sk_buff , data ):
842
- info -> reg_type = PTR_TO_PACKET ;
843
- return true;
844
- case offsetof(struct sk_buff , cb ) +
845
- offsetof(struct bpf_skb_data_end , data_end ):
846
- info -> reg_type = PTR_TO_PACKET_END ;
847
- return true;
848
- }
849
- return false;
850
- default :
851
- return false;
852
- }
853
- }
854
-
855
825
/* check access to 'struct bpf_context' fields. Supports fixed offsets only */
856
826
static int check_ctx_access (struct bpf_verifier_env * env , int insn_idx , int off , int size ,
857
827
enum bpf_access_type t , enum bpf_reg_type * reg_type )
@@ -860,23 +830,21 @@ static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off,
860
830
.reg_type = * reg_type ,
861
831
};
862
832
863
- if (env -> analyzer_ops ) {
864
- if (analyzer_is_valid_access (env , off , & info )) {
865
- * reg_type = info .reg_type ;
866
- return 0 ;
867
- }
868
- } else if (env -> ops -> is_valid_access &&
869
- env -> ops -> is_valid_access (off , size , t , & info )) {
833
+ if (env -> ops -> is_valid_access &&
834
+ env -> ops -> is_valid_access (off , size , t , & info )) {
870
835
/* A non zero info.ctx_field_size indicates that this field is a
871
836
* candidate for later verifier transformation to load the whole
872
837
* field and then apply a mask when accessed with a narrower
873
838
* access than actual ctx access size. A zero info.ctx_field_size
874
839
* will only allow for whole field access and rejects any other
875
840
* type of narrower access.
876
841
*/
877
- env -> insn_aux_data [insn_idx ].ctx_field_size = info .ctx_field_size ;
878
842
* reg_type = info .reg_type ;
879
843
844
+ if (env -> analyzer_ops )
845
+ return 0 ;
846
+
847
+ env -> insn_aux_data [insn_idx ].ctx_field_size = info .ctx_field_size ;
880
848
/* remember the offset of last byte accessed in ctx */
881
849
if (env -> prog -> aux -> max_ctx_offset < off + size )
882
850
env -> prog -> aux -> max_ctx_offset = off + size ;
@@ -4400,12 +4368,21 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr)
4400
4368
return ret ;
4401
4369
}
4402
4370
4371
+ static const struct bpf_verifier_ops * const bpf_analyzer_ops [] = {
4372
+ [BPF_PROG_TYPE_XDP ] = & xdp_analyzer_ops ,
4373
+ [BPF_PROG_TYPE_SCHED_CLS ] = & tc_cls_act_analyzer_ops ,
4374
+ };
4375
+
4403
4376
int bpf_analyzer (struct bpf_prog * prog , const struct bpf_ext_analyzer_ops * ops ,
4404
4377
void * priv )
4405
4378
{
4406
4379
struct bpf_verifier_env * env ;
4407
4380
int ret ;
4408
4381
4382
+ if (prog -> type >= ARRAY_SIZE (bpf_analyzer_ops ) ||
4383
+ !bpf_analyzer_ops [prog -> type ])
4384
+ return - EOPNOTSUPP ;
4385
+
4409
4386
env = kzalloc (sizeof (struct bpf_verifier_env ), GFP_KERNEL );
4410
4387
if (!env )
4411
4388
return - ENOMEM ;
@@ -4416,7 +4393,7 @@ int bpf_analyzer(struct bpf_prog *prog, const struct bpf_ext_analyzer_ops *ops,
4416
4393
if (!env -> insn_aux_data )
4417
4394
goto err_free_env ;
4418
4395
env -> prog = prog ;
4419
- env -> ops = bpf_verifier_ops [env -> prog -> type ];
4396
+ env -> ops = bpf_analyzer_ops [env -> prog -> type ];
4420
4397
env -> analyzer_ops = ops ;
4421
4398
env -> analyzer_priv = priv ;
4422
4399
0 commit comments