Skip to content

Update the adaptation of the some locks under the RISCV architecture #88

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 1 commit into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
95 changes: 95 additions & 0 deletions benchmarks/lockhammer/include/atomics.h
Original file line number Diff line number Diff line change
Expand Up @@ -129,6 +129,11 @@ static inline unsigned long fetchadd64_acquire_release (unsigned long *ptr, unsi
: [tmp] "=&r" (tmp), [old] "=&r" (old), [newval] "=&r" (newval), [ptr] "+Q" (*ptr)
: [val] "r" (addend)
: "memory");
#elif defined(__riscv) && !defined(USE_BUILTIN)
asm volatile("amoadd.d.aqrl %[old], %[val], %[ptr]"
: [old] "=&r" (old), [ptr] "+A" (*(ptr))
: [val] "r" (addend)
: "memory");
#else
old = __atomic_fetch_add(ptr, addend, __ATOMIC_ACQ_REL);
#endif
Expand Down Expand Up @@ -162,6 +167,11 @@ static inline unsigned long fetchadd64_acquire (unsigned long *ptr, unsigned lon
: [tmp] "=&r" (tmp), [old] "=&r" (old), [newval] "=&r" (newval), [ptr] "+Q" (*ptr)
: [val] "r" (addend)
: "memory");
#elif defined(__riscv) && !defined(USE_BUILTIN)
asm volatile("amoadd.d.aq %[old], %[val], %[ptr]"
: [old] "=&r" (old), [ptr] "+A" (*(ptr))
: [val] "r" (addend)
: "memory");
#else
old = __atomic_fetch_add(ptr, addend, __ATOMIC_ACQUIRE);
#endif
Expand Down Expand Up @@ -196,6 +206,11 @@ static inline unsigned long fetchadd64_release (unsigned long *ptr, unsigned lon
: [tmp] "=&r" (tmp), [old] "=&r" (old), [newval] "=&r" (newval), [ptr] "+Q" (*ptr)
: [val] "r" (addend)
: "memory");
#elif defined(__riscv) && !defined(USE_BUILTIN)
asm volatile("amoadd.d.rl %[old], %[val], %[ptr]"
: [old] "=&r" (old), [ptr] "+A" (*(ptr))
: [val] "r" (addend)
: "memory");
#else
old = __atomic_fetch_add(ptr, addend, __ATOMIC_RELEASE);
#endif
Expand Down Expand Up @@ -229,6 +244,11 @@ static inline unsigned long fetchadd64 (unsigned long *ptr, unsigned long addend
: [tmp] "=&r" (tmp), [old] "=&r" (old), [newval] "=&r" (newval), [ptr] "+Q" (*ptr)
: [val] "r" (addend)
: "memory");
#elif defined(__riscv) && !defined(USE_BUILTIN)
asm volatile("amoadd.d %[old], %[val], %[ptr]"
: [old] "=&r" (old), [ptr] "+A" (*(ptr))
: [val] "r" (addend)
: "memory");
#else
old = __atomic_fetch_add(ptr, addend, __ATOMIC_RELAXED);
#endif
Expand Down Expand Up @@ -265,6 +285,12 @@ static inline unsigned long fetchsub64 (unsigned long *ptr, unsigned long addend
: [tmp] "=&r" (tmp), [old] "=&r" (old), [newval] "=&r" (newval), [ptr] "+Q" (*ptr)
: [val] "r" (addend)
: "memory");
#elif defined(__riscv) && !defined(USE_BUILTIN)
addend = (unsigned long) (-(long) addend);
asm volatile("amoadd.d %[old], %[val], %[ptr]"
: [old] "=&r" (old), [ptr] "+A" (*(ptr))
: [val] "r" (addend)
: "memory");
#else
old = __atomic_fetch_sub(ptr, addend, __ATOMIC_RELAXED);
#endif
Expand Down Expand Up @@ -296,6 +322,11 @@ static inline unsigned long swap64 (unsigned long *ptr, unsigned long val) {
: [tmp] "=&r" (tmp), [old] "=&r" (old), [ptr] "+Q" (*ptr)
: [val] "r" (val)
: "memory");
#elif defined(__riscv) && !defined(USE_BUILTIN)
asm volatile("amoswap.d.aqrl %[old], %[val], %[ptr]"
: [old] "=&r" (old), [ptr] "+A" (*(ptr))
: [val] "r" (val)
: "memory");
#else
old = __atomic_exchange_n(ptr, val, __ATOMIC_ACQ_REL);
#endif
Expand Down Expand Up @@ -330,6 +361,22 @@ static inline unsigned long cas64 (unsigned long *ptr, unsigned long newval, uns
: [tmp] "=&r" (tmp), [old] "=&r" (old), [ptr] "+Q" (*ptr)
: [exp] "r" (expected), [val] "r" (newval)
: "memory");
#elif defined(__riscv) && !defined(USE_BUILTIN) && !defined(__riscv_zacas)
unsigned long tmp;

asm volatile ( "1: lr.d %[old], %[ptr]\n"
" bne %[old], %[exp], 2f\n"
" sc.d %[tmp], %[val], %[ptr]\n"
" bnez %[tmp], 1b\n"
"2:"
: [old] "=&r" (old), [tmp] "=&r" (tmp), [ptr] "+A" (*(ptr))
: [exp] "r" (expected), [val] "r" (newval)
: "memory");
#elif defined(__riscv) && !defined(USE_BUILTIN) && defined(__riscv_zacas)
asm volatile("amocas.d %[exp], %[val], %[ptr]"
: [exp] "=&r" (old), [ptr] "+A" (*(ptr))
: "r[exp]" (expected), [val] "r" (newval)
: "memory");
#else
old = expected;
__atomic_compare_exchange_n(ptr, &old, expected, true, __ATOMIC_RELAXED, __ATOMIC_RELAXED);
Expand Down Expand Up @@ -365,6 +412,22 @@ static inline unsigned long cas64_acquire (unsigned long *ptr, unsigned long val
: [tmp] "=&r" (tmp), [old] "=&r" (old), [ptr] "+Q" (*ptr)
: [exp] "r" (exp), [val] "r" (val)
: "memory");
#elif defined(__riscv) && !defined(USE_BUILTIN) && !defined(__riscv_zacas)
unsigned long tmp;

asm volatile ( "1: lr.d.aq %[old], %[ptr]\n"
" bne %[old], %[exp], 2f\n"
" sc.d %[tmp], %[newval], %[ptr]\n"
" bnez %[tmp], 1b\n"
"2:"
: [old] "=&r" (old), [tmp] "=&r" (tmp), [ptr] "+A" (*(ptr))
: [exp] "r" (exp), [newval] "r" (val)
: "memory");
#elif defined(__riscv) && !defined(USE_BUILTIN) && defined(__riscv_zacas)
asm volatile("amocas.d %[exp], %[val], %[ptr]"
: [exp] "=&r" (old), [ptr] "+A" (*(ptr))
: "r[exp]" (exp), [val] "r" (val)
: "memory");
#else
old = exp;
__atomic_compare_exchange_n(ptr, &old, val, true, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
Expand Down Expand Up @@ -400,6 +463,22 @@ static inline unsigned long cas64_release (unsigned long *ptr, unsigned long val
: [tmp] "=&r" (tmp), [old] "=&r" (old), [ptr] "+Q" (*ptr)
: [exp] "r" (exp), [val] "r" (val)
: "memory");
#elif defined(__riscv) && !defined(USE_BUILTIN) && !defined(__riscv_zacas)
unsigned long tmp;

asm volatile ( "1: lr.d %[old], %[ptr]\n"
" bne %[old], %[exp], 2f\n"
" sc.d.rl %[tmp], %[val], %[ptr]\n"
" bnez %[tmp], 1b\n"
"2:"
: [old] "=&r" (old), [tmp] "=&r" (tmp), [ptr] "+A" (*(ptr))
: [exp] "r" (exp), [val] "r" (val)
: "memory");
#elif defined(__riscv) && !defined(USE_BUILTIN) && defined(__riscv_zacas)
asm volatile("amocas.d.rl %[exp], %[val], %[ptr]"
: [exp] "=&r" (old), [ptr] "+A" (*(ptr))
: "r[exp]" (exp), [val] "r" (val)
: "memory");
#else
old = exp;
__atomic_compare_exchange_n(ptr, &old, val, true, __ATOMIC_RELEASE, __ATOMIC_RELAXED); // XXX: is relaxed for failure OK?
Expand Down Expand Up @@ -435,6 +514,22 @@ static inline unsigned long cas64_acquire_release (unsigned long *ptr, unsigned
: [tmp] "=&r" (tmp), [old] "=&r" (old), [ptr] "+Q" (*ptr)
: [exp] "r" (exp), [val] "r" (val)
: "memory");
#elif defined(__riscv) && !defined(USE_BUILTIN) && !defined(__riscv_zacas)
unsigned long tmp;

asm volatile ( "1: lr.d.aq %[old], %[ptr]\n"
" bne %[old], %[exp], 2f\n"
" sc.d.rl %[tmp], %[val], %[ptr]\n"
" bnez %[tmp], 1b\n"
"2:"
: [old] "=&r" (old), [tmp] "=&r" (tmp), [ptr] "+A" (*(ptr))
: [exp] "r" (exp), [val] "r" (val)
: "memory");
#elif defined(__riscv) && !defined(USE_BUILTIN) && defined(__riscv_zacas)
asm volatile("amocas.d.aqrl %[exp], %[val], %[ptr]"
: [exp] "=&r" (old), [ptr] "+A" (*(ptr))
: "r[exp]" (exp), [val] "r" (val)
: "memory");
#else
old = exp;
__atomic_compare_exchange_n(ptr, &old, val, true, __ATOMIC_ACQ_REL,
Expand Down
11 changes: 10 additions & 1 deletion benchmarks/lockhammer/include/cpu_relax.h
Original file line number Diff line number Diff line change
Expand Up @@ -63,9 +63,18 @@ static inline void __cpu_relax(void) {
#endif
#endif // __x86_64__

#ifdef __riscv
#if defined(RELAX_IS_EMPTY)
asm volatile ("" : : : "memory");
#elif defined(RELAX_IS_NOP)
asm volatile ("nop" : : : "memory");
#elif defined(RELAX_IS_NOTHING)

#endif
#endif // __riscv

}
}

#endif // CPU_RELAX_H

/* vim: set tabstop=4 shiftwidth=4 softtabstop=4 expandtab: */
49 changes: 48 additions & 1 deletion benchmarks/lockhammer/include/perf_timer.h
Original file line number Diff line number Diff line change
Expand Up @@ -200,13 +200,32 @@ get_raw_counter(void) {
#endif


#ifdef __riscv
static inline uint64_t __attribute__((always_inline))
get_raw_counter(void) {
uint64_t t;
asm volatile(
"fence.i\n"
"fence r, r\n"
"rdtime %0"
: "=r"(t) : :);
return t;
}
#endif

static inline void __attribute__((always_inline))
timer_reset_counter()
{
#ifdef __aarch64__
__asm__ __volatile__ ("isb; mrs %0, cntvct_el0" : "=r" (prev_tsc));
#elif __x86_64__
prev_tsc = rdtscp();
#elif __riscv
asm volatile(
"fence.i\,"
"fence r, r\n"
"rdtime %0"
: "=r"(prev_tsc) : :);
#endif
}

Expand All @@ -221,7 +240,14 @@ timer_get_counter()
__asm__ __volatile__ ("isb; mrs %0, cntvct_el0" : "=r" (counter_value));
#elif __x86_64__
uint64_t counter_value = rdtscp(); // assume constant_tsc
#endif
#elif __riscv
uint64_t counter_value;
asm volatile(
"fence.i\n"
"fence r, r\n"
"rdtime %0"
: "=r"(counter_value) : :);
#endif
return counter_value;
}

Expand All @@ -236,6 +262,14 @@ timer_get_counter_start()
__asm__ __volatile__ ("dsb ish; isb; mrs %0, cntvct_el0" : "=r" (counter_value));
#elif __x86_64__
uint64_t counter_value = rdtscp_start(); // assume constant_tsc
#elif __riscv
uint64_t counter_value;
asm volatile(
"fence rw, rw\n"
"fence.i\n"
"fence r,r\n"
"rdtime %0"
: "=r"(counter_value) : :);
#endif
return counter_value;
}
Expand All @@ -252,6 +286,15 @@ timer_get_counter_end()
__asm__ __volatile__ ("isb; mrs %0, cntvct_el0; isb" : "=r" (counter_value));
#elif __x86_64__
uint64_t counter_value = rdtscp_end(); // assume constant_tsc
#elif __riscv
uint64_t counter_value;
asm volatile(
"fence.i\n"
"fence r, r\n"
"rdtime %0\n"
"fence.i\n"
"fence r, r"
: "=r"(counter_value) : :);
#endif
return counter_value;
}
Expand Down Expand Up @@ -286,6 +329,10 @@ timer_get_timer_freq(void)

const struct timeval measurement_duration = { .tv_sec = 0, .tv_usec = 100000 };

hwtimer_frequency = estimate_hwclock_freq(1, 0, measurement_duration);
#elif __riscv
const struct timeval measurement_duration = { .tv_sec = 0, .tv_usec = 100000 };

hwtimer_frequency = estimate_hwclock_freq(1, 0, measurement_duration);
#else
#error "ERROR: timer_get_timer_freq() is not implemented for this system!"
Expand Down
2 changes: 2 additions & 0 deletions benchmarks/lockhammer/src/args.c
Original file line number Diff line number Diff line change
Expand Up @@ -129,6 +129,8 @@ static size_t get_ctr_erg_bytes(void) {
return ERG_words * 4;
#elif defined(__x86_64__)
return 64;
#elif defined(__riscv)
return 64;
#else
#error neither __aarch64__ nor __x86_64__ are defined in get_ctr_erg_bytes()
#endif
Expand Down
3 changes: 3 additions & 0 deletions benchmarks/lockhammer/src/measure.c
Original file line number Diff line number Diff line change
Expand Up @@ -203,6 +203,9 @@ void NOINLINE blackhole(unsigned long iters) {
#endif
#elif __x86_64__
asm volatile (".p2align 4; 1: add $-1, %0; jne 1b" : "+r" (iters) );
#elif __riscv
asm volatile (
".p2align 4; 1: addi %0, %0, -1; bnez %0, 1b" :"+r" (iters) : "0" (iters));
#endif
}

Expand Down
19 changes: 19 additions & 0 deletions ext/jvm/jvm_objectmonitor.h
Original file line number Diff line number Diff line change
Expand Up @@ -278,6 +278,12 @@ inline static void OrderAccess_fence(void) {
}
#endif

#ifdef __riscv
inline static void OrderAccess_fence(void) {
__asm__ volatile ("fence rw,rw" : : : "memory");
}
#endif

inline static void storeload(void) {
OrderAccess_fence();
}
Expand All @@ -301,6 +307,17 @@ inline static int int_xchg(int exchange_value, volatile int* dest) {
FULL_MEM_BARRIER;
return res;
}
#elif defined(__riscv)
inline static int int_xchg(int exchange_value, volatile int* dest) {
int result;
__asm__ __volatile__ (
"amoswap.w.aqrl %0, %1, (%2)"
: "=r" (result)
: "r" (exchange_value), "r" (dest)
: "memory"
);
return result;
}
#endif

/*
Expand Down Expand Up @@ -636,6 +653,8 @@ static inline int SpinPause(void) {
return 0;
#elif __x86_64__
return 1;
#elif __riscv
return 2;
#else
#error "unsupported instruction set architecture"
#endif
Expand Down
Loading