@@ -797,29 +797,6 @@ const struct bpf_func_proto bpf_task_pt_regs_proto = {
797
797
.ret_btf_id = & bpf_task_pt_regs_ids [0 ],
798
798
};
799
799
800
- BPF_CALL_2 (bpf_current_task_under_cgroup , struct bpf_map * , map , u32 , idx )
801
- {
802
- struct bpf_array * array = container_of (map , struct bpf_array , map );
803
- struct cgroup * cgrp ;
804
-
805
- if (unlikely (idx >= array -> map .max_entries ))
806
- return - E2BIG ;
807
-
808
- cgrp = READ_ONCE (array -> ptrs [idx ]);
809
- if (unlikely (!cgrp ))
810
- return - EAGAIN ;
811
-
812
- return task_under_cgroup_hierarchy (current , cgrp );
813
- }
814
-
815
- static const struct bpf_func_proto bpf_current_task_under_cgroup_proto = {
816
- .func = bpf_current_task_under_cgroup ,
817
- .gpl_only = false,
818
- .ret_type = RET_INTEGER ,
819
- .arg1_type = ARG_CONST_MAP_PTR ,
820
- .arg2_type = ARG_ANYTHING ,
821
- };
822
-
823
800
struct send_signal_irq_work {
824
801
struct irq_work irq_work ;
825
802
struct task_struct * task ;
@@ -1480,8 +1457,6 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1480
1457
return & bpf_get_numa_node_id_proto ;
1481
1458
case BPF_FUNC_perf_event_read :
1482
1459
return & bpf_perf_event_read_proto ;
1483
- case BPF_FUNC_current_task_under_cgroup :
1484
- return & bpf_current_task_under_cgroup_proto ;
1485
1460
case BPF_FUNC_get_prandom_u32 :
1486
1461
return & bpf_get_prandom_u32_proto ;
1487
1462
case BPF_FUNC_probe_write_user :
@@ -1510,6 +1485,8 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1510
1485
return & bpf_cgrp_storage_get_proto ;
1511
1486
case BPF_FUNC_cgrp_storage_delete :
1512
1487
return & bpf_cgrp_storage_delete_proto ;
1488
+ case BPF_FUNC_current_task_under_cgroup :
1489
+ return & bpf_current_task_under_cgroup_proto ;
1513
1490
#endif
1514
1491
case BPF_FUNC_send_signal :
1515
1492
return & bpf_send_signal_proto ;
0 commit comments