Stream: git-wasmtime

Topic: wasmtime / issue #6415 cranelift-fuzzgen fuzzbug: Timeout...


view this post on Zulip Wasmtime GitHub notifications bot (May 19 2023 at 14:51):

afonso360 opened issue #6415:

:wave: Hey,

This testcase is running with testcase.compare_against_host = false so, we essentially run the original CLIF in the interpreter, run it through the optimizer and then run it again.

It looks like pre-optimization we execute fewer instructions and the testcase successfully passes, but after optimizations we run into the instruction limit for the interpreter and run into a timeout.

I think we execute more instructions because we expand all stack_store's in u1:1 into stack_addr+store which count as two instructions instead of 1. And that function gets called a bunch of times.

I suspect this issue was introduced back in #5998, and I'm not quite sure why OSS-Fuzz hasn't caught it in the past 2 months. It took around an hour to find in my machine.

<details>
<summary>Test case input</summary>

GAEAERAC//88////8QAB//j////////4APEB//8AERD/////cHBwcHBwcHBwcHAAAAAAAAAAAHBw
cHBw4ODg4AAAAAAsYgAZAAAA////ABEQ/////3BwcHBwcHBwcHBwAAAAAAAAAAAAAAAAAP//

</details>

<details>
<summary>cargo +nightly fuzz fmt output</summary>

<!-- If you can, please paste the output of cargo +nightly fuzz fmt <target> <input> in the code-block below. This will help reviewers more quickly triage this report. -->

;; Testing against optimized version
;; Run test case

test interpret
test run
set opt_level=speed
set enable_alias_analysis=false
set enable_simd=true
set enable_safepoints=true
set enable_llvm_abi_extensions=true
set unwind_info=false
set machine_code_cfg_info=true
set enable_table_access_spectre_mitigation=false
set enable_incremental_compilation_cache_checks=true
target x86_64 has_sse42 has_avx has_avx2 has_fma has_popcnt has_bmi1 has_bmi2 has_lzcnt

function u1:1(i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i8 sext, i8 sext) system_v {
    ss0 = explicit_slot 112
    ss1 = explicit_slot 95
    ss2 = explicit_slot 95
    ss3 = explicit_slot 95
    sig0 = (f32) -> f32 system_v
    sig1 = (f64) -> f64 system_v
    sig2 = (f32) -> f32 system_v
    sig3 = (f64) -> f64 system_v
    sig4 = (f32) -> f32 system_v
    sig5 = (f64) -> f64 system_v
    fn0 = %CeilF32 sig0
    fn1 = %CeilF64 sig1
    fn2 = %FloorF32 sig2
    fn3 = %FloorF64 sig3
    fn4 = %TruncF32 sig4
    fn5 = %TruncF64 sig5

block0(v0: i16x8, v1: i16x8, v2: i16x8, v3: i16x8, v4: i16x8, v5: i16x8, v6: i16x8, v7: i16x8, v8: i16x8, v9: i16x8, v10: i16x8, v11: i16x8, v12: i16x8, v13: i16x8, v14: i8, v15: i8):
    v16 = iconst.i8 0
    v17 = iconst.i8 0
    v18 = f32const 0x0.3200c4p-126
    v19 = iconst.i8 0
    v20 = iconst.i16 0
    v21 = iconst.i32 0
    v22 = iconst.i64 0
    v23 = uextend.i128 v22  ; v22 = 0
    stack_store v23, ss1
    stack_store v23, ss1+16
    stack_store v23, ss1+32
    stack_store v23, ss1+48
    stack_store v23, ss1+64
    stack_store v22, ss1+80  ; v22 = 0
    stack_store v21, ss1+88  ; v21 = 0
    stack_store v20, ss1+92  ; v20 = 0
    stack_store v19, ss1+94  ; v19 = 0
    stack_store v23, ss2
    stack_store v23, ss2+16
    stack_store v23, ss2+32
    stack_store v23, ss2+48
    stack_store v23, ss2+64
    stack_store v22, ss2+80  ; v22 = 0
    stack_store v21, ss2+88  ; v21 = 0
    stack_store v20, ss2+92  ; v20 = 0
    stack_store v19, ss2+94  ; v19 = 0
    stack_store v23, ss3
    stack_store v23, ss3+16
    stack_store v23, ss3+32
    stack_store v23, ss3+48
    stack_store v23, ss3+64
    stack_store v22, ss3+80  ; v22 = 0
    stack_store v21, ss3+88  ; v21 = 0
    stack_store v20, ss3+92  ; v20 = 0
    stack_store v19, ss3+94  ; v19 = 0
    stack_store v23, ss0
    stack_store v23, ss0+16
    stack_store v23, ss0+32
    stack_store v23, ss0+48
    stack_store v23, ss0+64
    stack_store v23, ss0+80
    stack_store v23, ss0+96
    return
}


function u1:0(i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i8 sext, i8 sext) system_v {
    sig0 = (i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i8 sext, i8 sext) system_v
    sig1 = (f32) -> f32 system_v
    sig2 = (f64) -> f64 system_v
    sig3 = (f32) -> f32 system_v
    sig4 = (f64) -> f64 system_v
    sig5 = (f32) -> f32 system_v
    sig6 = (f64) -> f64 system_v
    fn0 = u1:1 sig0
    fn1 = %CeilF32 sig1
    fn2 = %CeilF64 sig2
    fn3 = %FloorF32 sig3
    fn4 = %FloorF64 sig4
    fn5 = %TruncF32 sig5
    fn6 = %TruncF64 sig6

block0(v0: i16x8, v1: i16x8, v2: i16x8, v3: i16x8, v4: i16x8, v5: i16x8, v6: i16x8, v7: i16x8, v8: i16x8, v9: i16x8, v10: i16x8, v11: i16x8, v12: i16x8, v13: i16x8, v14: i8, v15: i8):
    v16 = iconst.i8 0
    v17 = iconst.i16 0
    v18 = iconst.i32 0
    v19 = iconst.i64 0
    v20 = uextend.i128 v19  ; v19 = 0
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    return
}


; Note: the results in the below test cases are simply a placeholder and probably will be wrong

; run: u1:0(0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000
[message truncated]

view this post on Zulip Wasmtime GitHub notifications bot (May 19 2023 at 14:51):

afonso360 labeled issue #6415:

:wave: Hey,

This testcase is running with testcase.compare_against_host = false so, we essentially run the original CLIF in the interpreter, run it through the optimizer and then run it again.

It looks like pre-optimization we execute fewer instructions and the testcase successfully passes, but after optimizations we run into the instruction limit for the interpreter and run into a timeout.

I think we execute more instructions because we expand all stack_store's in u1:1 into stack_addr+store which count as two instructions instead of 1. And that function gets called a bunch of times.

I suspect this issue was introduced back in #5998, and I'm not quite sure why OSS-Fuzz hasn't caught it in the past 2 months. It took around an hour to find in my machine.

<details>
<summary>Test case input</summary>

GAEAERAC//88////8QAB//j////////4APEB//8AERD/////cHBwcHBwcHBwcHAAAAAAAAAAAHBw
cHBw4ODg4AAAAAAsYgAZAAAA////ABEQ/////3BwcHBwcHBwcHBwAAAAAAAAAAAAAAAAAP//

</details>

<details>
<summary>cargo +nightly fuzz fmt output</summary>

<!-- If you can, please paste the output of cargo +nightly fuzz fmt <target> <input> in the code-block below. This will help reviewers more quickly triage this report. -->

;; Testing against optimized version
;; Run test case

test interpret
test run
set opt_level=speed
set enable_alias_analysis=false
set enable_simd=true
set enable_safepoints=true
set enable_llvm_abi_extensions=true
set unwind_info=false
set machine_code_cfg_info=true
set enable_table_access_spectre_mitigation=false
set enable_incremental_compilation_cache_checks=true
target x86_64 has_sse42 has_avx has_avx2 has_fma has_popcnt has_bmi1 has_bmi2 has_lzcnt

function u1:1(i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i8 sext, i8 sext) system_v {
    ss0 = explicit_slot 112
    ss1 = explicit_slot 95
    ss2 = explicit_slot 95
    ss3 = explicit_slot 95
    sig0 = (f32) -> f32 system_v
    sig1 = (f64) -> f64 system_v
    sig2 = (f32) -> f32 system_v
    sig3 = (f64) -> f64 system_v
    sig4 = (f32) -> f32 system_v
    sig5 = (f64) -> f64 system_v
    fn0 = %CeilF32 sig0
    fn1 = %CeilF64 sig1
    fn2 = %FloorF32 sig2
    fn3 = %FloorF64 sig3
    fn4 = %TruncF32 sig4
    fn5 = %TruncF64 sig5

block0(v0: i16x8, v1: i16x8, v2: i16x8, v3: i16x8, v4: i16x8, v5: i16x8, v6: i16x8, v7: i16x8, v8: i16x8, v9: i16x8, v10: i16x8, v11: i16x8, v12: i16x8, v13: i16x8, v14: i8, v15: i8):
    v16 = iconst.i8 0
    v17 = iconst.i8 0
    v18 = f32const 0x0.3200c4p-126
    v19 = iconst.i8 0
    v20 = iconst.i16 0
    v21 = iconst.i32 0
    v22 = iconst.i64 0
    v23 = uextend.i128 v22  ; v22 = 0
    stack_store v23, ss1
    stack_store v23, ss1+16
    stack_store v23, ss1+32
    stack_store v23, ss1+48
    stack_store v23, ss1+64
    stack_store v22, ss1+80  ; v22 = 0
    stack_store v21, ss1+88  ; v21 = 0
    stack_store v20, ss1+92  ; v20 = 0
    stack_store v19, ss1+94  ; v19 = 0
    stack_store v23, ss2
    stack_store v23, ss2+16
    stack_store v23, ss2+32
    stack_store v23, ss2+48
    stack_store v23, ss2+64
    stack_store v22, ss2+80  ; v22 = 0
    stack_store v21, ss2+88  ; v21 = 0
    stack_store v20, ss2+92  ; v20 = 0
    stack_store v19, ss2+94  ; v19 = 0
    stack_store v23, ss3
    stack_store v23, ss3+16
    stack_store v23, ss3+32
    stack_store v23, ss3+48
    stack_store v23, ss3+64
    stack_store v22, ss3+80  ; v22 = 0
    stack_store v21, ss3+88  ; v21 = 0
    stack_store v20, ss3+92  ; v20 = 0
    stack_store v19, ss3+94  ; v19 = 0
    stack_store v23, ss0
    stack_store v23, ss0+16
    stack_store v23, ss0+32
    stack_store v23, ss0+48
    stack_store v23, ss0+64
    stack_store v23, ss0+80
    stack_store v23, ss0+96
    return
}


function u1:0(i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i8 sext, i8 sext) system_v {
    sig0 = (i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i8 sext, i8 sext) system_v
    sig1 = (f32) -> f32 system_v
    sig2 = (f64) -> f64 system_v
    sig3 = (f32) -> f32 system_v
    sig4 = (f64) -> f64 system_v
    sig5 = (f32) -> f32 system_v
    sig6 = (f64) -> f64 system_v
    fn0 = u1:1 sig0
    fn1 = %CeilF32 sig1
    fn2 = %CeilF64 sig2
    fn3 = %FloorF32 sig3
    fn4 = %FloorF64 sig4
    fn5 = %TruncF32 sig5
    fn6 = %TruncF64 sig6

block0(v0: i16x8, v1: i16x8, v2: i16x8, v3: i16x8, v4: i16x8, v5: i16x8, v6: i16x8, v7: i16x8, v8: i16x8, v9: i16x8, v10: i16x8, v11: i16x8, v12: i16x8, v13: i16x8, v14: i8, v15: i8):
    v16 = iconst.i8 0
    v17 = iconst.i16 0
    v18 = iconst.i32 0
    v19 = iconst.i64 0
    v20 = uextend.i128 v19  ; v19 = 0
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    return
}


; Note: the results in the below test cases are simply a placeholder and probably will be wrong

; run: u1:0(0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x0000000000000000000
[message truncated]

view this post on Zulip Wasmtime GitHub notifications bot (May 19 2023 at 14:51):

afonso360 labeled issue #6415:

:wave: Hey,

This testcase is running with testcase.compare_against_host = false so, we essentially run the original CLIF in the interpreter, run it through the optimizer and then run it again.

It looks like pre-optimization we execute fewer instructions and the testcase successfully passes, but after optimizations we run into the instruction limit for the interpreter and run into a timeout.

I think we execute more instructions because we expand all stack_store's in u1:1 into stack_addr+store which count as two instructions instead of 1. And that function gets called a bunch of times.

I suspect this issue was introduced back in #5998, and I'm not quite sure why OSS-Fuzz hasn't caught it in the past 2 months. It took around an hour to find in my machine.

<details>
<summary>Test case input</summary>

GAEAERAC//88////8QAB//j////////4APEB//8AERD/////cHBwcHBwcHBwcHAAAAAAAAAAAHBw
cHBw4ODg4AAAAAAsYgAZAAAA////ABEQ/////3BwcHBwcHBwcHBwAAAAAAAAAAAAAAAAAP//

</details>

<details>
<summary>cargo +nightly fuzz fmt output</summary>

<!-- If you can, please paste the output of cargo +nightly fuzz fmt <target> <input> in the code-block below. This will help reviewers more quickly triage this report. -->

;; Testing against optimized version
;; Run test case

test interpret
test run
set opt_level=speed
set enable_alias_analysis=false
set enable_simd=true
set enable_safepoints=true
set enable_llvm_abi_extensions=true
set unwind_info=false
set machine_code_cfg_info=true
set enable_table_access_spectre_mitigation=false
set enable_incremental_compilation_cache_checks=true
target x86_64 has_sse42 has_avx has_avx2 has_fma has_popcnt has_bmi1 has_bmi2 has_lzcnt

function u1:1(i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i8 sext, i8 sext) system_v {
    ss0 = explicit_slot 112
    ss1 = explicit_slot 95
    ss2 = explicit_slot 95
    ss3 = explicit_slot 95
    sig0 = (f32) -> f32 system_v
    sig1 = (f64) -> f64 system_v
    sig2 = (f32) -> f32 system_v
    sig3 = (f64) -> f64 system_v
    sig4 = (f32) -> f32 system_v
    sig5 = (f64) -> f64 system_v
    fn0 = %CeilF32 sig0
    fn1 = %CeilF64 sig1
    fn2 = %FloorF32 sig2
    fn3 = %FloorF64 sig3
    fn4 = %TruncF32 sig4
    fn5 = %TruncF64 sig5

block0(v0: i16x8, v1: i16x8, v2: i16x8, v3: i16x8, v4: i16x8, v5: i16x8, v6: i16x8, v7: i16x8, v8: i16x8, v9: i16x8, v10: i16x8, v11: i16x8, v12: i16x8, v13: i16x8, v14: i8, v15: i8):
    v16 = iconst.i8 0
    v17 = iconst.i8 0
    v18 = f32const 0x0.3200c4p-126
    v19 = iconst.i8 0
    v20 = iconst.i16 0
    v21 = iconst.i32 0
    v22 = iconst.i64 0
    v23 = uextend.i128 v22  ; v22 = 0
    stack_store v23, ss1
    stack_store v23, ss1+16
    stack_store v23, ss1+32
    stack_store v23, ss1+48
    stack_store v23, ss1+64
    stack_store v22, ss1+80  ; v22 = 0
    stack_store v21, ss1+88  ; v21 = 0
    stack_store v20, ss1+92  ; v20 = 0
    stack_store v19, ss1+94  ; v19 = 0
    stack_store v23, ss2
    stack_store v23, ss2+16
    stack_store v23, ss2+32
    stack_store v23, ss2+48
    stack_store v23, ss2+64
    stack_store v22, ss2+80  ; v22 = 0
    stack_store v21, ss2+88  ; v21 = 0
    stack_store v20, ss2+92  ; v20 = 0
    stack_store v19, ss2+94  ; v19 = 0
    stack_store v23, ss3
    stack_store v23, ss3+16
    stack_store v23, ss3+32
    stack_store v23, ss3+48
    stack_store v23, ss3+64
    stack_store v22, ss3+80  ; v22 = 0
    stack_store v21, ss3+88  ; v21 = 0
    stack_store v20, ss3+92  ; v20 = 0
    stack_store v19, ss3+94  ; v19 = 0
    stack_store v23, ss0
    stack_store v23, ss0+16
    stack_store v23, ss0+32
    stack_store v23, ss0+48
    stack_store v23, ss0+64
    stack_store v23, ss0+80
    stack_store v23, ss0+96
    return
}


function u1:0(i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i8 sext, i8 sext) system_v {
    sig0 = (i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i8 sext, i8 sext) system_v
    sig1 = (f32) -> f32 system_v
    sig2 = (f64) -> f64 system_v
    sig3 = (f32) -> f32 system_v
    sig4 = (f64) -> f64 system_v
    sig5 = (f32) -> f32 system_v
    sig6 = (f64) -> f64 system_v
    fn0 = u1:1 sig0
    fn1 = %CeilF32 sig1
    fn2 = %CeilF64 sig2
    fn3 = %FloorF32 sig3
    fn4 = %FloorF64 sig4
    fn5 = %TruncF32 sig5
    fn6 = %TruncF64 sig6

block0(v0: i16x8, v1: i16x8, v2: i16x8, v3: i16x8, v4: i16x8, v5: i16x8, v6: i16x8, v7: i16x8, v8: i16x8, v9: i16x8, v10: i16x8, v11: i16x8, v12: i16x8, v13: i16x8, v14: i8, v15: i8):
    v16 = iconst.i8 0
    v17 = iconst.i16 0
    v18 = iconst.i32 0
    v19 = iconst.i64 0
    v20 = uextend.i128 v19  ; v19 = 0
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    return
}


; Note: the results in the below test cases are simply a placeholder and probably will be wrong

; run: u1:0(0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x0000000000000000000
[message truncated]

view this post on Zulip Wasmtime GitHub notifications bot (May 19 2023 at 14:56):

afonso360 edited issue #6415:

:wave: Hey,

This testcase is running with testcase.compare_against_host = false so, we essentially run the original CLIF in the interpreter, run it through the optimizer and then run it again.

It looks like pre-optimization we execute fewer instructions and the testcase successfully passes, but after optimizations we run into the instruction limit for the interpreter and run into a timeout.

I think we execute more instructions because we expand all stack_store's in u1:1 into stack_addr+store which count as two instructions instead of 1. And that function gets called a bunch of times.

I suspect this issue was introduced back in #5998, and I'm not quite sure why OSS-Fuzz hasn't caught it in the past 2 months. It took around an hour to find in my machine.

Edit: This test case reproduces on commit 28931a4ae64f32dfd4d825df526429c05b3d4945

<details>
<summary>Test case input</summary>

GAEAERAC//88////8QAB//j////////4APEB//8AERD/////cHBwcHBwcHBwcHAAAAAAAAAAAHBw
cHBw4ODg4AAAAAAsYgAZAAAA////ABEQ/////3BwcHBwcHBwcHBwAAAAAAAAAAAAAAAAAP//

</details>

<details>
<summary>cargo +nightly fuzz fmt output</summary>

<!-- If you can, please paste the output of cargo +nightly fuzz fmt <target> <input> in the code-block below. This will help reviewers more quickly triage this report. -->

;; Testing against optimized version
;; Run test case

test interpret
test run
set opt_level=speed
set enable_alias_analysis=false
set enable_simd=true
set enable_safepoints=true
set enable_llvm_abi_extensions=true
set unwind_info=false
set machine_code_cfg_info=true
set enable_table_access_spectre_mitigation=false
set enable_incremental_compilation_cache_checks=true
target x86_64 has_sse42 has_avx has_avx2 has_fma has_popcnt has_bmi1 has_bmi2 has_lzcnt

function u1:1(i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i8 sext, i8 sext) system_v {
    ss0 = explicit_slot 112
    ss1 = explicit_slot 95
    ss2 = explicit_slot 95
    ss3 = explicit_slot 95
    sig0 = (f32) -> f32 system_v
    sig1 = (f64) -> f64 system_v
    sig2 = (f32) -> f32 system_v
    sig3 = (f64) -> f64 system_v
    sig4 = (f32) -> f32 system_v
    sig5 = (f64) -> f64 system_v
    fn0 = %CeilF32 sig0
    fn1 = %CeilF64 sig1
    fn2 = %FloorF32 sig2
    fn3 = %FloorF64 sig3
    fn4 = %TruncF32 sig4
    fn5 = %TruncF64 sig5

block0(v0: i16x8, v1: i16x8, v2: i16x8, v3: i16x8, v4: i16x8, v5: i16x8, v6: i16x8, v7: i16x8, v8: i16x8, v9: i16x8, v10: i16x8, v11: i16x8, v12: i16x8, v13: i16x8, v14: i8, v15: i8):
    v16 = iconst.i8 0
    v17 = iconst.i8 0
    v18 = f32const 0x0.3200c4p-126
    v19 = iconst.i8 0
    v20 = iconst.i16 0
    v21 = iconst.i32 0
    v22 = iconst.i64 0
    v23 = uextend.i128 v22  ; v22 = 0
    stack_store v23, ss1
    stack_store v23, ss1+16
    stack_store v23, ss1+32
    stack_store v23, ss1+48
    stack_store v23, ss1+64
    stack_store v22, ss1+80  ; v22 = 0
    stack_store v21, ss1+88  ; v21 = 0
    stack_store v20, ss1+92  ; v20 = 0
    stack_store v19, ss1+94  ; v19 = 0
    stack_store v23, ss2
    stack_store v23, ss2+16
    stack_store v23, ss2+32
    stack_store v23, ss2+48
    stack_store v23, ss2+64
    stack_store v22, ss2+80  ; v22 = 0
    stack_store v21, ss2+88  ; v21 = 0
    stack_store v20, ss2+92  ; v20 = 0
    stack_store v19, ss2+94  ; v19 = 0
    stack_store v23, ss3
    stack_store v23, ss3+16
    stack_store v23, ss3+32
    stack_store v23, ss3+48
    stack_store v23, ss3+64
    stack_store v22, ss3+80  ; v22 = 0
    stack_store v21, ss3+88  ; v21 = 0
    stack_store v20, ss3+92  ; v20 = 0
    stack_store v19, ss3+94  ; v19 = 0
    stack_store v23, ss0
    stack_store v23, ss0+16
    stack_store v23, ss0+32
    stack_store v23, ss0+48
    stack_store v23, ss0+64
    stack_store v23, ss0+80
    stack_store v23, ss0+96
    return
}


function u1:0(i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i8 sext, i8 sext) system_v {
    sig0 = (i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i8 sext, i8 sext) system_v
    sig1 = (f32) -> f32 system_v
    sig2 = (f64) -> f64 system_v
    sig3 = (f32) -> f32 system_v
    sig4 = (f64) -> f64 system_v
    sig5 = (f32) -> f32 system_v
    sig6 = (f64) -> f64 system_v
    fn0 = u1:1 sig0
    fn1 = %CeilF32 sig1
    fn2 = %CeilF64 sig2
    fn3 = %FloorF32 sig3
    fn4 = %FloorF64 sig4
    fn5 = %TruncF32 sig5
    fn6 = %TruncF64 sig6

block0(v0: i16x8, v1: i16x8, v2: i16x8, v3: i16x8, v4: i16x8, v5: i16x8, v6: i16x8, v7: i16x8, v8: i16x8, v9: i16x8, v10: i16x8, v11: i16x8, v12: i16x8, v13: i16x8, v14: i8, v15: i8):
    v16 = iconst.i8 0
    v17 = iconst.i16 0
    v18 = iconst.i32 0
    v19 = iconst.i64 0
    v20 = uextend.i128 v19  ; v19 = 0
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    return
}


; Note: the results in the below test cases are simply a placeholder and probably will be wrong

; run: u1:0(0x000000
[message truncated]

view this post on Zulip Wasmtime GitHub notifications bot (May 19 2023 at 14:56):

afonso360 edited issue #6415:

:wave: Hey,

This testcase is running with testcase.compare_against_host = false so, we essentially run the original CLIF in the interpreter, run it through the optimizer and then run it again.

It looks like pre-optimization we execute fewer instructions and the testcase successfully passes, but after optimizations we run into the instruction limit for the interpreter and run into a timeout.

I think we execute more instructions because we expand all stack_store's in u1:1 into stack_addr+store which count as two instructions instead of 1. And that function gets called a bunch of times.

I suspect this issue was introduced back in #5998, and I'm not quite sure why OSS-Fuzz hasn't caught it in the past 2 months. It took around an hour to find in my machine.

Edit: This test case is reproducible on commit 28931a4ae64f32dfd4d825df526429c05b3d4945

<details>
<summary>Test case input</summary>

GAEAERAC//88////8QAB//j////////4APEB//8AERD/////cHBwcHBwcHBwcHAAAAAAAAAAAHBw
cHBw4ODg4AAAAAAsYgAZAAAA////ABEQ/////3BwcHBwcHBwcHBwAAAAAAAAAAAAAAAAAP//

</details>

<details>
<summary>cargo +nightly fuzz fmt output</summary>

<!-- If you can, please paste the output of cargo +nightly fuzz fmt <target> <input> in the code-block below. This will help reviewers more quickly triage this report. -->

;; Testing against optimized version
;; Run test case

test interpret
test run
set opt_level=speed
set enable_alias_analysis=false
set enable_simd=true
set enable_safepoints=true
set enable_llvm_abi_extensions=true
set unwind_info=false
set machine_code_cfg_info=true
set enable_table_access_spectre_mitigation=false
set enable_incremental_compilation_cache_checks=true
target x86_64 has_sse42 has_avx has_avx2 has_fma has_popcnt has_bmi1 has_bmi2 has_lzcnt

function u1:1(i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i8 sext, i8 sext) system_v {
    ss0 = explicit_slot 112
    ss1 = explicit_slot 95
    ss2 = explicit_slot 95
    ss3 = explicit_slot 95
    sig0 = (f32) -> f32 system_v
    sig1 = (f64) -> f64 system_v
    sig2 = (f32) -> f32 system_v
    sig3 = (f64) -> f64 system_v
    sig4 = (f32) -> f32 system_v
    sig5 = (f64) -> f64 system_v
    fn0 = %CeilF32 sig0
    fn1 = %CeilF64 sig1
    fn2 = %FloorF32 sig2
    fn3 = %FloorF64 sig3
    fn4 = %TruncF32 sig4
    fn5 = %TruncF64 sig5

block0(v0: i16x8, v1: i16x8, v2: i16x8, v3: i16x8, v4: i16x8, v5: i16x8, v6: i16x8, v7: i16x8, v8: i16x8, v9: i16x8, v10: i16x8, v11: i16x8, v12: i16x8, v13: i16x8, v14: i8, v15: i8):
    v16 = iconst.i8 0
    v17 = iconst.i8 0
    v18 = f32const 0x0.3200c4p-126
    v19 = iconst.i8 0
    v20 = iconst.i16 0
    v21 = iconst.i32 0
    v22 = iconst.i64 0
    v23 = uextend.i128 v22  ; v22 = 0
    stack_store v23, ss1
    stack_store v23, ss1+16
    stack_store v23, ss1+32
    stack_store v23, ss1+48
    stack_store v23, ss1+64
    stack_store v22, ss1+80  ; v22 = 0
    stack_store v21, ss1+88  ; v21 = 0
    stack_store v20, ss1+92  ; v20 = 0
    stack_store v19, ss1+94  ; v19 = 0
    stack_store v23, ss2
    stack_store v23, ss2+16
    stack_store v23, ss2+32
    stack_store v23, ss2+48
    stack_store v23, ss2+64
    stack_store v22, ss2+80  ; v22 = 0
    stack_store v21, ss2+88  ; v21 = 0
    stack_store v20, ss2+92  ; v20 = 0
    stack_store v19, ss2+94  ; v19 = 0
    stack_store v23, ss3
    stack_store v23, ss3+16
    stack_store v23, ss3+32
    stack_store v23, ss3+48
    stack_store v23, ss3+64
    stack_store v22, ss3+80  ; v22 = 0
    stack_store v21, ss3+88  ; v21 = 0
    stack_store v20, ss3+92  ; v20 = 0
    stack_store v19, ss3+94  ; v19 = 0
    stack_store v23, ss0
    stack_store v23, ss0+16
    stack_store v23, ss0+32
    stack_store v23, ss0+48
    stack_store v23, ss0+64
    stack_store v23, ss0+80
    stack_store v23, ss0+96
    return
}


function u1:0(i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i8 sext, i8 sext) system_v {
    sig0 = (i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i8 sext, i8 sext) system_v
    sig1 = (f32) -> f32 system_v
    sig2 = (f64) -> f64 system_v
    sig3 = (f32) -> f32 system_v
    sig4 = (f64) -> f64 system_v
    sig5 = (f32) -> f32 system_v
    sig6 = (f64) -> f64 system_v
    fn0 = u1:1 sig0
    fn1 = %CeilF32 sig1
    fn2 = %CeilF64 sig2
    fn3 = %FloorF32 sig3
    fn4 = %FloorF64 sig4
    fn5 = %TruncF32 sig5
    fn6 = %TruncF64 sig6

block0(v0: i16x8, v1: i16x8, v2: i16x8, v3: i16x8, v4: i16x8, v5: i16x8, v6: i16x8, v7: i16x8, v8: i16x8, v9: i16x8, v10: i16x8, v11: i16x8, v12: i16x8, v13: i16x8, v14: i8, v15: i8):
    v16 = iconst.i8 0
    v17 = iconst.i16 0
    v18 = iconst.i32 0
    v19 = iconst.i64 0
    v20 = uextend.i128 v19  ; v19 = 0
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    return
}


; Note: the results in the below test cases are simply a placeholder and probably will be wrong

; run: u1:0(0x0
[message truncated]

view this post on Zulip Wasmtime GitHub notifications bot (Jun 06 2023 at 18:22):

afonso360 closed issue #6415:

:wave: Hey,

This testcase is running with testcase.compare_against_host = false so, we essentially run the original CLIF in the interpreter, run it through the optimizer and then run it again.

It looks like pre-optimization we execute fewer instructions and the testcase successfully passes, but after optimizations we run into the instruction limit for the interpreter and run into a timeout.

I think we execute more instructions because we expand all stack_store's in u1:1 into stack_addr+store which count as two instructions instead of 1. And that function gets called a bunch of times.

I suspect this issue was introduced back in #5998, and I'm not quite sure why OSS-Fuzz hasn't caught it in the past 2 months. It took around an hour to find in my machine.

Edit: This test case is reproducible on commit 28931a4ae64f32dfd4d825df526429c05b3d4945

<details>
<summary>Test case input</summary>

GAEAERAC//88////8QAB//j////////4APEB//8AERD/////cHBwcHBwcHBwcHAAAAAAAAAAAHBw
cHBw4ODg4AAAAAAsYgAZAAAA////ABEQ/////3BwcHBwcHBwcHBwAAAAAAAAAAAAAAAAAP//

</details>

<details>
<summary>cargo +nightly fuzz fmt output</summary>

<!-- If you can, please paste the output of cargo +nightly fuzz fmt <target> <input> in the code-block below. This will help reviewers more quickly triage this report. -->

;; Testing against optimized version
;; Run test case

test interpret
test run
set opt_level=speed
set enable_alias_analysis=false
set enable_simd=true
set enable_safepoints=true
set enable_llvm_abi_extensions=true
set unwind_info=false
set machine_code_cfg_info=true
set enable_table_access_spectre_mitigation=false
set enable_incremental_compilation_cache_checks=true
target x86_64 has_sse42 has_avx has_avx2 has_fma has_popcnt has_bmi1 has_bmi2 has_lzcnt

function u1:1(i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i8 sext, i8 sext) system_v {
    ss0 = explicit_slot 112
    ss1 = explicit_slot 95
    ss2 = explicit_slot 95
    ss3 = explicit_slot 95
    sig0 = (f32) -> f32 system_v
    sig1 = (f64) -> f64 system_v
    sig2 = (f32) -> f32 system_v
    sig3 = (f64) -> f64 system_v
    sig4 = (f32) -> f32 system_v
    sig5 = (f64) -> f64 system_v
    fn0 = %CeilF32 sig0
    fn1 = %CeilF64 sig1
    fn2 = %FloorF32 sig2
    fn3 = %FloorF64 sig3
    fn4 = %TruncF32 sig4
    fn5 = %TruncF64 sig5

block0(v0: i16x8, v1: i16x8, v2: i16x8, v3: i16x8, v4: i16x8, v5: i16x8, v6: i16x8, v7: i16x8, v8: i16x8, v9: i16x8, v10: i16x8, v11: i16x8, v12: i16x8, v13: i16x8, v14: i8, v15: i8):
    v16 = iconst.i8 0
    v17 = iconst.i8 0
    v18 = f32const 0x0.3200c4p-126
    v19 = iconst.i8 0
    v20 = iconst.i16 0
    v21 = iconst.i32 0
    v22 = iconst.i64 0
    v23 = uextend.i128 v22  ; v22 = 0
    stack_store v23, ss1
    stack_store v23, ss1+16
    stack_store v23, ss1+32
    stack_store v23, ss1+48
    stack_store v23, ss1+64
    stack_store v22, ss1+80  ; v22 = 0
    stack_store v21, ss1+88  ; v21 = 0
    stack_store v20, ss1+92  ; v20 = 0
    stack_store v19, ss1+94  ; v19 = 0
    stack_store v23, ss2
    stack_store v23, ss2+16
    stack_store v23, ss2+32
    stack_store v23, ss2+48
    stack_store v23, ss2+64
    stack_store v22, ss2+80  ; v22 = 0
    stack_store v21, ss2+88  ; v21 = 0
    stack_store v20, ss2+92  ; v20 = 0
    stack_store v19, ss2+94  ; v19 = 0
    stack_store v23, ss3
    stack_store v23, ss3+16
    stack_store v23, ss3+32
    stack_store v23, ss3+48
    stack_store v23, ss3+64
    stack_store v22, ss3+80  ; v22 = 0
    stack_store v21, ss3+88  ; v21 = 0
    stack_store v20, ss3+92  ; v20 = 0
    stack_store v19, ss3+94  ; v19 = 0
    stack_store v23, ss0
    stack_store v23, ss0+16
    stack_store v23, ss0+32
    stack_store v23, ss0+48
    stack_store v23, ss0+64
    stack_store v23, ss0+80
    stack_store v23, ss0+96
    return
}


function u1:0(i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i8 sext, i8 sext) system_v {
    sig0 = (i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i16x8, i8 sext, i8 sext) system_v
    sig1 = (f32) -> f32 system_v
    sig2 = (f64) -> f64 system_v
    sig3 = (f32) -> f32 system_v
    sig4 = (f64) -> f64 system_v
    sig5 = (f32) -> f32 system_v
    sig6 = (f64) -> f64 system_v
    fn0 = u1:1 sig0
    fn1 = %CeilF32 sig1
    fn2 = %CeilF64 sig2
    fn3 = %FloorF32 sig3
    fn4 = %FloorF64 sig4
    fn5 = %TruncF32 sig5
    fn6 = %TruncF64 sig6

block0(v0: i16x8, v1: i16x8, v2: i16x8, v3: i16x8, v4: i16x8, v5: i16x8, v6: i16x8, v7: i16x8, v8: i16x8, v9: i16x8, v10: i16x8, v11: i16x8, v12: i16x8, v13: i16x8, v14: i8, v15: i8):
    v16 = iconst.i8 0
    v17 = iconst.i16 0
    v18 = iconst.i32 0
    v19 = iconst.i64 0
    v20 = uextend.i128 v19  ; v19 = 0
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    call fn0(v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v0, v14, v14)
    return
}


; Note: the results in the below test cases are simply a placeholder and probably will be wrong

; run: u1:0(0x0
[message truncated]


Last updated: Dec 23 2024 at 12:05 UTC