73
73
#define noinline __attribute__((noinline))
74
74
#endif
75
75
76
+ #ifndef __nocf_check
77
+ #define __nocf_check __attribute__((nocf_check))
78
+ #endif
79
+
80
+ #ifndef __naked
81
+ #define __naked __attribute__((__naked__))
82
+ #endif
83
+
76
84
#ifndef PR_SET_NO_NEW_PRIVS
77
85
#define PR_SET_NO_NEW_PRIVS 38
78
86
#define PR_GET_NO_NEW_PRIVS 39
@@ -4896,7 +4904,36 @@ TEST(tsync_vs_dead_thread_leader)
4896
4904
EXPECT_EQ (0 , status );
4897
4905
}
4898
4906
4899
- noinline int probed (void )
4907
+ #ifdef __x86_64__
4908
+
4909
+ /*
4910
+ * We need naked probed_uprobe function. Using __nocf_check
4911
+ * check to skip possible endbr64 instruction and ignoring
4912
+ * -Wattributes, otherwise the compilation might fail.
4913
+ */
4914
+ #pragma GCC diagnostic push
4915
+ #pragma GCC diagnostic ignored "-Wattributes"
4916
+
4917
+ __naked __nocf_check noinline int probed_uprobe (void )
4918
+ {
4919
+ /*
4920
+ * Optimized uprobe is possible only on top of nop5 instruction.
4921
+ */
4922
+ asm volatile (" \n"
4923
+ ".byte 0x0f, 0x1f, 0x44, 0x00, 0x00 \n"
4924
+ "ret \n"
4925
+ );
4926
+ }
4927
+ #pragma GCC diagnostic pop
4928
+
4929
+ #else
4930
+ noinline int probed_uprobe (void )
4931
+ {
4932
+ return 1 ;
4933
+ }
4934
+ #endif
4935
+
4936
+ noinline int probed_uretprobe (void )
4900
4937
{
4901
4938
return 1 ;
4902
4939
}
@@ -4949,35 +4986,46 @@ static ssize_t get_uprobe_offset(const void *addr)
4949
4986
return found ? (uintptr_t )addr - start + base : -1 ;
4950
4987
}
4951
4988
4952
- FIXTURE (URETPROBE ) {
4989
+ FIXTURE (UPROBE ) {
4953
4990
int fd ;
4954
4991
};
4955
4992
4956
- FIXTURE_VARIANT (URETPROBE ) {
4993
+ FIXTURE_VARIANT (UPROBE ) {
4957
4994
/*
4958
- * All of the URETPROBE behaviors can be tested with either
4959
- * uretprobe attached or not
4995
+ * All of the U(RET)PROBE behaviors can be tested with either
4996
+ * u(ret)probe attached or not
4960
4997
*/
4961
4998
bool attach ;
4999
+ /*
5000
+ * Test both uprobe and uretprobe.
5001
+ */
5002
+ bool uretprobe ;
4962
5003
};
4963
5004
4964
- FIXTURE_VARIANT_ADD (URETPROBE , attached ) {
5005
+ FIXTURE_VARIANT_ADD (UPROBE , not_attached ) {
5006
+ .attach = false,
5007
+ .uretprobe = false,
5008
+ };
5009
+
5010
+ FIXTURE_VARIANT_ADD (UPROBE , uprobe_attached ) {
4965
5011
.attach = true,
5012
+ .uretprobe = false,
4966
5013
};
4967
5014
4968
- FIXTURE_VARIANT_ADD (URETPROBE , not_attached ) {
4969
- .attach = false,
5015
+ FIXTURE_VARIANT_ADD (UPROBE , uretprobe_attached ) {
5016
+ .attach = true,
5017
+ .uretprobe = true,
4970
5018
};
4971
5019
4972
- FIXTURE_SETUP (URETPROBE )
5020
+ FIXTURE_SETUP (UPROBE )
4973
5021
{
4974
5022
const size_t attr_sz = sizeof (struct perf_event_attr );
4975
5023
struct perf_event_attr attr ;
4976
5024
ssize_t offset ;
4977
5025
int type , bit ;
4978
5026
4979
- #ifndef __NR_uretprobe
4980
- SKIP (return , "__NR_uretprobe syscall not defined" );
5027
+ #if !defined( __NR_uprobe ) || !defined( __NR_uretprobe )
5028
+ SKIP (return , "__NR_uprobe ot __NR_uretprobe syscalls not defined" );
4981
5029
#endif
4982
5030
4983
5031
if (!variant -> attach )
@@ -4987,12 +5035,17 @@ FIXTURE_SETUP(URETPROBE)
4987
5035
4988
5036
type = determine_uprobe_perf_type ();
4989
5037
ASSERT_GE (type , 0 );
4990
- bit = determine_uprobe_retprobe_bit ();
4991
- ASSERT_GE (bit , 0 );
4992
- offset = get_uprobe_offset (probed );
5038
+
5039
+ if (variant -> uretprobe ) {
5040
+ bit = determine_uprobe_retprobe_bit ();
5041
+ ASSERT_GE (bit , 0 );
5042
+ }
5043
+
5044
+ offset = get_uprobe_offset (variant -> uretprobe ? probed_uretprobe : probed_uprobe );
4993
5045
ASSERT_GE (offset , 0 );
4994
5046
4995
- attr .config |= 1 << bit ;
5047
+ if (variant -> uretprobe )
5048
+ attr .config |= 1 << bit ;
4996
5049
attr .size = attr_sz ;
4997
5050
attr .type = type ;
4998
5051
attr .config1 = ptr_to_u64 ("/proc/self/exe" );
@@ -5003,7 +5056,7 @@ FIXTURE_SETUP(URETPROBE)
5003
5056
PERF_FLAG_FD_CLOEXEC );
5004
5057
}
5005
5058
5006
- FIXTURE_TEARDOWN (URETPROBE )
5059
+ FIXTURE_TEARDOWN (UPROBE )
5007
5060
{
5008
5061
/* we could call close(self->fd), but we'd need extra filter for
5009
5062
* that and since we are calling _exit right away..
@@ -5017,11 +5070,17 @@ static int run_probed_with_filter(struct sock_fprog *prog)
5017
5070
return -1 ;
5018
5071
}
5019
5072
5020
- probed ();
5073
+ /*
5074
+ * Uprobe is optimized after first hit, so let's hit twice.
5075
+ */
5076
+ probed_uprobe ();
5077
+ probed_uprobe ();
5078
+
5079
+ probed_uretprobe ();
5021
5080
return 0 ;
5022
5081
}
5023
5082
5024
- TEST_F (URETPROBE , uretprobe_default_allow )
5083
+ TEST_F (UPROBE , uprobe_default_allow )
5025
5084
{
5026
5085
struct sock_filter filter [] = {
5027
5086
BPF_STMT (BPF_RET |BPF_K , SECCOMP_RET_ALLOW ),
@@ -5034,7 +5093,7 @@ TEST_F(URETPROBE, uretprobe_default_allow)
5034
5093
ASSERT_EQ (0 , run_probed_with_filter (& prog ));
5035
5094
}
5036
5095
5037
- TEST_F (URETPROBE , uretprobe_default_block )
5096
+ TEST_F (UPROBE , uprobe_default_block )
5038
5097
{
5039
5098
struct sock_filter filter [] = {
5040
5099
BPF_STMT (BPF_LD |BPF_W |BPF_ABS ,
@@ -5051,11 +5110,14 @@ TEST_F(URETPROBE, uretprobe_default_block)
5051
5110
ASSERT_EQ (0 , run_probed_with_filter (& prog ));
5052
5111
}
5053
5112
5054
- TEST_F (URETPROBE , uretprobe_block_uretprobe_syscall )
5113
+ TEST_F (UPROBE , uprobe_block_syscall )
5055
5114
{
5056
5115
struct sock_filter filter [] = {
5057
5116
BPF_STMT (BPF_LD |BPF_W |BPF_ABS ,
5058
5117
offsetof(struct seccomp_data , nr )),
5118
+ #ifdef __NR_uprobe
5119
+ BPF_JUMP (BPF_JMP |BPF_JEQ |BPF_K , __NR_uprobe , 1 , 2 ),
5120
+ #endif
5059
5121
#ifdef __NR_uretprobe
5060
5122
BPF_JUMP (BPF_JMP |BPF_JEQ |BPF_K , __NR_uretprobe , 0 , 1 ),
5061
5123
#endif
@@ -5070,11 +5132,14 @@ TEST_F(URETPROBE, uretprobe_block_uretprobe_syscall)
5070
5132
ASSERT_EQ (0 , run_probed_with_filter (& prog ));
5071
5133
}
5072
5134
5073
- TEST_F (URETPROBE , uretprobe_default_block_with_uretprobe_syscall )
5135
+ TEST_F (UPROBE , uprobe_default_block_with_syscall )
5074
5136
{
5075
5137
struct sock_filter filter [] = {
5076
5138
BPF_STMT (BPF_LD |BPF_W |BPF_ABS ,
5077
5139
offsetof(struct seccomp_data , nr )),
5140
+ #ifdef __NR_uprobe
5141
+ BPF_JUMP (BPF_JMP |BPF_JEQ |BPF_K , __NR_uprobe , 3 , 0 ),
5142
+ #endif
5078
5143
#ifdef __NR_uretprobe
5079
5144
BPF_JUMP (BPF_JMP |BPF_JEQ |BPF_K , __NR_uretprobe , 2 , 0 ),
5080
5145
#endif
0 commit comments