Skip to content

Commit b0a0594

Browse files
authoredOct 9, 2022
v8: update to v10.7.193.13. (#310)
Signed-off-by: Dhi Aurrahman <[email protected]>
1 parent 25d6a99 commit b0a0594

File tree

4 files changed

+10
-294
lines changed

4 files changed

+10
-294
lines changed
 

‎bazel/external/v8.patch

-263
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,5 @@
11
# 1. Disable pointer compression (limits the maximum number of WasmVMs).
22
# 2. Don't expose Wasm C API (only Wasm C++ API).
3-
# 3. Fix cross-compilation (https://crrev.com/c/3735165).
4-
# 4. Fix build errors in SIMD IndexOf/includes (https://crrev.com/c/3749192).
53

64
diff --git a/BUILD.bazel b/BUILD.bazel
75
index 5fb10d3940..a19930d36e 100644
@@ -35,264 +33,3 @@ index ce3f569fd5..dc8a4c4f6a 100644
3533
} // extern "C"
3634
+
3735
+#endif
38-
diff --git a/src/execution/clobber-registers.cc b/src/execution/clobber-registers.cc
39-
index 8f7fba765f..a7f5bf80cf 100644
40-
--- a/src/execution/clobber-registers.cc
41-
+++ b/src/execution/clobber-registers.cc
42-
@@ -5,19 +5,22 @@
43-
44-
#include "src/base/build_config.h"
45-
46-
-#if V8_HOST_ARCH_ARM
47-
+// Check both {HOST_ARCH} and {TARGET_ARCH} to disable the functionality of this
48-
+// file for cross-compilation. The reason is that the inline assembly code below
49-
+// does not work for cross-compilation.
50-
+#if V8_HOST_ARCH_ARM && V8_TARGET_ARCH_ARM
51-
#include "src/codegen/arm/register-arm.h"
52-
-#elif V8_HOST_ARCH_ARM64
53-
+#elif V8_HOST_ARCH_ARM64 && V8_TARGET_ARCH_ARM64
54-
#include "src/codegen/arm64/register-arm64.h"
55-
-#elif V8_HOST_ARCH_IA32
56-
+#elif V8_HOST_ARCH_IA32 && V8_TARGET_ARCH_IA32
57-
#include "src/codegen/ia32/register-ia32.h"
58-
-#elif V8_HOST_ARCH_X64
59-
+#elif V8_HOST_ARCH_X64 && V8_TARGET_ARCH_X64
60-
#include "src/codegen/x64/register-x64.h"
61-
-#elif V8_HOST_ARCH_LOONG64
62-
+#elif V8_HOST_ARCH_LOONG64 && V8_TARGET_ARCH_LOONG64
63-
#include "src/codegen/loong64/register-loong64.h"
64-
-#elif V8_HOST_ARCH_MIPS
65-
+#elif V8_HOST_ARCH_MIPS && V8_TARGET_ARCH_MIPS
66-
#include "src/codegen/mips/register-mips.h"
67-
-#elif V8_HOST_ARCH_MIPS64
68-
+#elif V8_HOST_ARCH_MIPS64 && V8_TARGET_ARCH_MIPS64
69-
#include "src/codegen/mips64/register-mips64.h"
70-
#endif
71-
72-
@@ -26,14 +29,15 @@ namespace internal {
73-
74-
#if V8_CC_MSVC
75-
// msvc only support inline assembly on x86
76-
-#if V8_HOST_ARCH_IA32
77-
+#if V8_HOST_ARCH_IA32 && V8_TARGET_ARCH_IA32
78-
#define CLOBBER_REGISTER(R) __asm xorps R, R
79-
80-
#endif
81-
82-
#else // !V8_CC_MSVC
83-
84-
-#if V8_HOST_ARCH_X64 || V8_HOST_ARCH_IA32
85-
+#if (V8_HOST_ARCH_X64 && V8_TARGET_ARCH_X64) || \
86-
+ (V8_HOST_ARCH_IA32 && V8_TARGET_ARCH_IA32)
87-
#define CLOBBER_REGISTER(R) \
88-
__asm__ volatile( \
89-
"xorps " \
90-
@@ -42,20 +46,19 @@ namespace internal {
91-
"%%" #R :: \
92-
:);
93-
94-
-#elif V8_HOST_ARCH_ARM64
95-
+#elif V8_HOST_ARCH_ARM64 && V8_TARGET_ARCH_ARM64
96-
#define CLOBBER_REGISTER(R) __asm__ volatile("fmov " #R ",xzr" :::);
97-
98-
-#elif V8_HOST_ARCH_LOONG64
99-
+#elif V8_HOST_ARCH_LOONG64 && V8_TARGET_ARCH_LOONG64
100-
#define CLOBBER_REGISTER(R) __asm__ volatile("movgr2fr.d $" #R ",$zero" :::);
101-
102-
-#elif V8_HOST_ARCH_MIPS
103-
+#elif V8_HOST_ARCH_MIPS && V8_TARGET_ARCH_MIPS
104-
#define CLOBBER_USE_REGISTER(R) __asm__ volatile("mtc1 $zero,$" #R :::);
105-
106-
-#elif V8_HOST_ARCH_MIPS64
107-
+#elif V8_HOST_ARCH_MIPS64 && V8_TARGET_ARCH_MIPS64
108-
#define CLOBBER_USE_REGISTER(R) __asm__ volatile("dmtc1 $zero,$" #R :::);
109-
110-
-#endif // V8_HOST_ARCH_X64 || V8_HOST_ARCH_IA32 || V8_HOST_ARCH_ARM64 ||
111-
- // V8_HOST_ARCH_LOONG64 || V8_HOST_ARCH_MIPS || V8_HOST_ARCH_MIPS64
112-
+#endif // V8_HOST_ARCH_XXX && V8_TARGET_ARCH_XXX
113-
114-
#endif // V8_CC_MSVC
115-
116-
diff --git a/src/objects/simd.cc b/src/objects/simd.cc
117-
index 0a73b9c686..be6b72d157 100644
118-
--- a/src/objects/simd.cc
119-
+++ b/src/objects/simd.cc
120-
@@ -354,8 +354,13 @@ Address ArrayIndexOfIncludes(Address array_start, uintptr_t array_len,
121-
if (reinterpret_cast<uintptr_t>(array) % sizeof(double) != 0) {
122-
// Slow scalar search for unaligned double array.
123-
for (; from_index < array_len; from_index++) {
124-
- if (fixed_array.get_representation(static_cast<int>(from_index)) ==
125-
- *reinterpret_cast<uint64_t*>(&search_num)) {
126-
+ if (fixed_array.is_the_hole(static_cast<int>(from_index))) {
127-
+ // |search_num| cannot be NaN, so there is no need to check against
128-
+ // holes.
129-
+ continue;
130-
+ }
131-
+ if (fixed_array.get_scalar(static_cast<int>(from_index)) ==
132-
+ search_num) {
133-
return from_index;
134-
}
135-
}
136-
diff --git a/src/objects/simd.cc b/src/objects/simd.cc
137-
index d3cedfe330..0a73b9c686 100644
138-
--- a/src/objects/simd.cc
139-
+++ b/src/objects/simd.cc
140-
@@ -95,24 +95,21 @@ inline int extract_first_nonzero_index(T v) {
141-
}
142-
143-
template <>
144-
-inline int extract_first_nonzero_index(int32x4_t v) {
145-
- int32x4_t mask = {4, 3, 2, 1};
146-
+inline int extract_first_nonzero_index(uint32x4_t v) {
147-
+ uint32x4_t mask = {4, 3, 2, 1};
148-
mask = vandq_u32(mask, v);
149-
return 4 - vmaxvq_u32(mask);
150-
}
151-
152-
template <>
153-
-inline int extract_first_nonzero_index(int64x2_t v) {
154-
- int32x4_t mask = {2, 0, 1, 0}; // Could also be {2,2,1,1} or {0,2,0,1}
155-
- mask = vandq_u32(mask, vreinterpretq_s32_s64(v));
156-
+inline int extract_first_nonzero_index(uint64x2_t v) {
157-
+ uint32x4_t mask = {2, 0, 1, 0}; // Could also be {2,2,1,1} or {0,2,0,1}
158-
+ mask = vandq_u32(mask, vreinterpretq_u32_u64(v));
159-
return 2 - vmaxvq_u32(mask);
160-
}
161-
162-
-template <>
163-
-inline int extract_first_nonzero_index(float64x2_t v) {
164-
- int32x4_t mask = {2, 0, 1, 0}; // Could also be {2,2,1,1} or {0,2,0,1}
165-
- mask = vandq_u32(mask, vreinterpretq_s32_f64(v));
166-
- return 2 - vmaxvq_u32(mask);
167-
+inline int32_t reinterpret_vmaxvq_u64(uint64x2_t v) {
168-
+ return vmaxvq_u32(vreinterpretq_u32_u64(v));
169-
}
170-
#endif
171-
172-
@@ -204,14 +201,14 @@ inline uintptr_t fast_search_noavx(T* array, uintptr_t array_len,
173-
}
174-
#elif defined(NEON64)
175-
if constexpr (std::is_same<T, uint32_t>::value) {
176-
- VECTORIZED_LOOP_Neon(int32x4_t, int32x4_t, vdupq_n_u32, vceqq_u32,
177-
+ VECTORIZED_LOOP_Neon(uint32x4_t, uint32x4_t, vdupq_n_u32, vceqq_u32,
178-
vmaxvq_u32)
179-
} else if constexpr (std::is_same<T, uint64_t>::value) {
180-
- VECTORIZED_LOOP_Neon(int64x2_t, int64x2_t, vdupq_n_u64, vceqq_u64,
181-
- vmaxvq_u32)
182-
+ VECTORIZED_LOOP_Neon(uint64x2_t, uint64x2_t, vdupq_n_u64, vceqq_u64,
183-
+ reinterpret_vmaxvq_u64)
184-
} else if constexpr (std::is_same<T, double>::value) {
185-
- VECTORIZED_LOOP_Neon(float64x2_t, float64x2_t, vdupq_n_f64, vceqq_f64,
186-
- vmaxvq_f64)
187-
+ VECTORIZED_LOOP_Neon(float64x2_t, uint64x2_t, vdupq_n_f64, vceqq_f64,
188-
+ reinterpret_vmaxvq_u64)
189-
}
190-
#else
191-
UNREACHABLE();
192-
diff --git a/src/objects/simd.cc b/src/objects/simd.cc
193-
index be6b72d157..a71968fd10 100644
194-
--- a/src/objects/simd.cc
195-
+++ b/src/objects/simd.cc
196-
@@ -148,9 +148,14 @@ inline int32_t reinterpret_vmaxvq_u64(uint64x2_t v) {
197-
template <typename T>
198-
inline uintptr_t fast_search_noavx(T* array, uintptr_t array_len,
199-
uintptr_t index, T search_element) {
200-
- static_assert(std::is_same<T, uint32_t>::value ||
201-
- std::is_same<T, uint64_t>::value ||
202-
- std::is_same<T, double>::value);
203-
+ static constexpr bool is_uint32 =
204-
+ sizeof(T) == sizeof(uint32_t) && std::is_integral<T>::value;
205-
+ static constexpr bool is_uint64 =
206-
+ sizeof(T) == sizeof(uint64_t) && std::is_integral<T>::value;
207-
+ static constexpr bool is_double =
208-
+ sizeof(T) == sizeof(double) && std::is_floating_point<T>::value;
209-
+
210-
+ static_assert(is_uint32 || is_uint64 || is_double);
211-
212-
#if !(defined(__SSE3__) || defined(NEON64))
213-
// No SIMD available.
214-
@@ -178,14 +183,14 @@ inline uintptr_t fast_search_noavx(T* array, uintptr_t array_len,
215-
216-
// Inserting one of the vectorized loop
217-
#ifdef __SSE3__
218-
- if constexpr (std::is_same<T, uint32_t>::value) {
219-
+ if constexpr (is_uint32) {
220-
#define MOVEMASK(x) _mm_movemask_ps(_mm_castsi128_ps(x))
221-
#define EXTRACT(x) base::bits::CountTrailingZeros32(x)
222-
VECTORIZED_LOOP_x86(__m128i, __m128i, _mm_set1_epi32, _mm_cmpeq_epi32,
223-
MOVEMASK, EXTRACT)
224-
#undef MOVEMASK
225-
#undef EXTRACT
226-
- } else if constexpr (std::is_same<T, uint64_t>::value) {
227-
+ } else if constexpr (is_uint64) {
228-
#define SET1(x) _mm_castsi128_ps(_mm_set1_epi64x(x))
229-
#define CMP(a, b) _mm_cmpeq_pd(_mm_castps_pd(a), _mm_castps_pd(b))
230-
#define EXTRACT(x) base::bits::CountTrailingZeros32(x)
231-
@@ -193,20 +198,20 @@ inline uintptr_t fast_search_noavx(T* array, uintptr_t array_len,
232-
#undef SET1
233-
#undef CMP
234-
#undef EXTRACT
235-
- } else if constexpr (std::is_same<T, double>::value) {
236-
+ } else if constexpr (is_double) {
237-
#define EXTRACT(x) base::bits::CountTrailingZeros32(x)
238-
VECTORIZED_LOOP_x86(__m128d, __m128d, _mm_set1_pd, _mm_cmpeq_pd,
239-
_mm_movemask_pd, EXTRACT)
240-
#undef EXTRACT
241-
}
242-
#elif defined(NEON64)
243-
- if constexpr (std::is_same<T, uint32_t>::value) {
244-
+ if constexpr (is_uint32) {
245-
VECTORIZED_LOOP_Neon(uint32x4_t, uint32x4_t, vdupq_n_u32, vceqq_u32,
246-
vmaxvq_u32)
247-
- } else if constexpr (std::is_same<T, uint64_t>::value) {
248-
+ } else if constexpr (is_uint64) {
249-
VECTORIZED_LOOP_Neon(uint64x2_t, uint64x2_t, vdupq_n_u64, vceqq_u64,
250-
reinterpret_vmaxvq_u64)
251-
- } else if constexpr (std::is_same<T, double>::value) {
252-
+ } else if constexpr (is_double) {
253-
VECTORIZED_LOOP_Neon(float64x2_t, uint64x2_t, vdupq_n_f64, vceqq_f64,
254-
reinterpret_vmaxvq_u64)
255-
}
256-
@@ -240,9 +245,14 @@ template <typename T>
257-
TARGET_AVX2 inline uintptr_t fast_search_avx(T* array, uintptr_t array_len,
258-
uintptr_t index,
259-
T search_element) {
260-
- static_assert(std::is_same<T, uint32_t>::value ||
261-
- std::is_same<T, uint64_t>::value ||
262-
- std::is_same<T, double>::value);
263-
+ static constexpr bool is_uint32 =
264-
+ sizeof(T) == sizeof(uint32_t) && std::is_integral<T>::value;
265-
+ static constexpr bool is_uint64 =
266-
+ sizeof(T) == sizeof(uint64_t) && std::is_integral<T>::value;
267-
+ static constexpr bool is_double =
268-
+ sizeof(T) == sizeof(double) && std::is_floating_point<T>::value;
269-
+
270-
+ static_assert(is_uint32 || is_uint64 || is_double);
271-
272-
const int target_align = 32;
273-
// Scalar loop to reach desired alignment
274-
@@ -256,21 +266,21 @@ TARGET_AVX2 inline uintptr_t fast_search_avx(T* array, uintptr_t array_len,
275-
}
276-
277-
// Generating vectorized loop
278-
- if constexpr (std::is_same<T, uint32_t>::value) {
279-
+ if constexpr (is_uint32) {
280-
#define MOVEMASK(x) _mm256_movemask_ps(_mm256_castsi256_ps(x))
281-
#define EXTRACT(x) base::bits::CountTrailingZeros32(x)
282-
VECTORIZED_LOOP_x86(__m256i, __m256i, _mm256_set1_epi32, _mm256_cmpeq_epi32,
283-
MOVEMASK, EXTRACT)
284-
#undef MOVEMASK
285-
#undef EXTRACT
286-
- } else if constexpr (std::is_same<T, uint64_t>::value) {
287-
+ } else if constexpr (is_uint64) {
288-
#define MOVEMASK(x) _mm256_movemask_pd(_mm256_castsi256_pd(x))
289-
#define EXTRACT(x) base::bits::CountTrailingZeros32(x)
290-
VECTORIZED_LOOP_x86(__m256i, __m256i, _mm256_set1_epi64x,
291-
_mm256_cmpeq_epi64, MOVEMASK, EXTRACT)
292-
#undef MOVEMASK
293-
#undef EXTRACT
294-
- } else if constexpr (std::is_same<T, double>::value) {
295-
+ } else if constexpr (is_double) {
296-
#define CMP(a, b) _mm256_cmp_pd(a, b, _CMP_EQ_OQ)
297-
#define EXTRACT(x) base::bits::CountTrailingZeros32(x)
298-
VECTORIZED_LOOP_x86(__m256d, __m256d, _mm256_set1_pd, CMP,

‎bazel/repositories.bzl

+5-24
Original file line numberDiff line numberDiff line change
@@ -126,10 +126,10 @@ def proxy_wasm_cpp_host_repositories():
126126
maybe(
127127
git_repository,
128128
name = "v8",
129-
# 10.4.132.18
130-
commit = "ce33dd2c08521fbe7f616bcd5941f2f388338030",
129+
# 10.7.193.13
130+
commit = "6c8b357a84847a479cd329478522feefc1c3195a",
131131
remote = "https://chromium.googlesource.com/v8/v8",
132-
shallow_since = "1657561920 +0000",
132+
shallow_since = "1664374400 +0000",
133133
patches = ["@proxy_wasm_cpp_host//bazel/external:v8.patch"],
134134
patch_args = ["-p1"],
135135
)
@@ -143,35 +143,16 @@ def proxy_wasm_cpp_host_repositories():
143143
new_git_repository,
144144
name = "com_googlesource_chromium_base_trace_event_common",
145145
build_file = "@v8//:bazel/BUILD.trace_event_common",
146-
commit = "d115b033c4e53666b535cbd1985ffe60badad082",
146+
commit = "521ac34ebd795939c7e16b37d9d3ddb40e8ed556",
147147
remote = "https://chromium.googlesource.com/chromium/src/base/trace_event/common.git",
148-
shallow_since = "1642576054 -0800",
148+
shallow_since = "1662508800 +0000",
149149
)
150150

151151
native.bind(
152152
name = "base_trace_event_common",
153153
actual = "@com_googlesource_chromium_base_trace_event_common//:trace_event_common",
154154
)
155155

156-
maybe(
157-
new_git_repository,
158-
name = "com_googlesource_chromium_zlib",
159-
build_file = "@v8//:bazel/BUILD.zlib",
160-
commit = "64bbf988543996eb8df9a86877b32917187eba8f",
161-
remote = "https://chromium.googlesource.com/chromium/src/third_party/zlib.git",
162-
shallow_since = "1653988038 -0700",
163-
)
164-
165-
native.bind(
166-
name = "zlib",
167-
actual = "@com_googlesource_chromium_zlib//:zlib",
168-
)
169-
170-
native.bind(
171-
name = "zlib_compression_utils",
172-
actual = "@com_googlesource_chromium_zlib//:zlib_compression_utils",
173-
)
174-
175156
# WAMR with dependencies.
176157

177158
maybe(

‎src/v8/v8.cc

+3-7
Original file line numberDiff line numberDiff line change
@@ -30,14 +30,10 @@
3030

3131
#include "include/v8-version.h"
3232
#include "include/v8.h"
33+
#include "src/flags/flags.h"
3334
#include "src/wasm/c-api.h"
3435
#include "wasm-api/wasm.hh"
3536

36-
namespace v8::internal {
37-
extern bool FLAG_liftoff;
38-
extern unsigned int FLAG_wasm_max_mem_pages;
39-
} // namespace v8::internal
40-
4137
namespace proxy_wasm {
4238
namespace v8 {
4339

@@ -46,8 +42,8 @@ wasm::Engine *engine() {
4642
static wasm::own<wasm::Engine> engine;
4743

4844
std::call_once(init, []() {
49-
::v8::internal::FLAG_liftoff = false;
50-
::v8::internal::FLAG_wasm_max_mem_pages =
45+
::v8::internal::v8_flags.liftoff = false;
46+
::v8::internal::v8_flags.wasm_max_mem_pages =
5147
PROXY_WASM_HOST_MAX_WASM_MEMORY_SIZE_BYTES / PROXY_WASM_HOST_WASM_MEMORY_PAGE_SIZE_BYTES;
5248
::v8::V8::EnableWebAssemblyTrapHandler(true);
5349
engine = wasm::Engine::make();

‎test/BUILD

+2
Original file line numberDiff line numberDiff line change
@@ -52,6 +52,7 @@ cc_test(
5252

5353
cc_test(
5454
name = "runtime_test",
55+
timeout = "long",
5556
srcs = ["runtime_test.cc"],
5657
data = [
5758
"//test/test_data:callback.wasm",
@@ -152,6 +153,7 @@ cc_test(
152153

153154
cc_test(
154155
name = "wasm_vm_test",
156+
timeout = "long",
155157
srcs = ["wasm_vm_test.cc"],
156158
data = [
157159
"//test/test_data:abi_export.wasm",

0 commit comments

Comments
 (0)
Please sign in to comment.