alexcrichton opened issue #6155:
Given this clif (fuzz-generated):
;; Run test case test interpret test run set opt_level=speed_and_size set use_egraphs=false set enable_simd=true set enable_llvm_abi_extensions=true set unwind_info=false set machine_code_cfg_info=true set enable_jump_tables=false set enable_table_access_spectre_mitigation=false set enable_incremental_compilation_cache_checks=true target x86_64 has_avx has_avx2 has_fma has_avx512dq has_avx512vl has_avx512f has_popcnt has_bmi1 has_bmi2 has_lzcnt function u1:1(f32x4, i32x4, i16 sext, i32 sext, i64 sext, f32, i16 uext, i64x2, i8 sext, f64) -> i8 uext, i8 sext, i16x8, i8 sext, i64 sext, i8 sext, i8 sext, i8 sext, i8, i128 sext, i128 sext, i16x8, i16x8, i32 sext, i128 sext, i128 sext system_v { ss0 = explicit_slot 69 ss1 = explicit_slot 69 ss2 = explicit_slot 69 ss3 = explicit_slot 69 ss4 = explicit_slot 69 ss5 = explicit_slot 34 sig0 = (f32) -> f32 system_v sig1 = (f64) -> f64 system_v sig2 = (f32) -> f32 system_v sig3 = (f64) -> f64 system_v sig4 = (f32) -> f32 system_v sig5 = (f64) -> f64 system_v fn0 = %CeilF32 sig0 fn1 = %CeilF64 sig1 fn2 = %FloorF32 sig2 fn3 = colocated %FloorF64 sig3 fn4 = colocated %TruncF32 sig4 fn5 = colocated %TruncF64 sig5 const0 = 0x87001b008000000000ffffffffffffff block0(v0: f32x4, v1: i32x4, v2: i16, v3: i32, v4: i64, v5: f32, v6: i16, v7: i64x2, v8: i8, v9: f64): v10 = f32const -0x1.434342p-60 v11 = f32const -0x1.434342p-60 v12 = f32const -0x1.434342p-60 v13 = f32const -0x1.434342p-60 v14 = iconst.i64 0x4d4d_0008_0000_3000 v15 = iconst.i64 0x004d_4d6d v16 = iconcat v15, v14 ; v15 = 0x004d_4d6d, v14 = 0x4d4d_0008_0000_3000 v17 = iconst.i64 -1 v18 = iconst.i64 -1 v19 = iconcat v18, v17 ; v18 = -1, v17 = -1 v20 = vconst.i16x8 const0 v21 = iconst.i32 0xffff_ffff_e281_be2a v22 = iconst.i8 0 v23 = iconst.i16 0 v24 = iconst.i32 0 v25 = iconst.i64 0 v26 = uextend.i128 v25 ; v25 = 0 stack_store v26, ss5 stack_store v26, ss5+16 stack_store v23, ss5+32 ; v23 = 0 stack_store v26, ss0 stack_store v26, ss0+16 stack_store v26, ss0+32 stack_store v26, ss0+48 stack_store v24, ss0+64 ; v24 = 0 stack_store v22, ss0+68 ; v22 = 0 stack_store v26, ss1 stack_store v26, ss1+16 stack_store v26, ss1+32 stack_store v26, ss1+48 stack_store v24, ss1+64 ; v24 = 0 stack_store v22, ss1+68 ; v22 = 0 stack_store v26, ss2 stack_store v26, ss2+16 stack_store v26, ss2+32 stack_store v26, ss2+48 stack_store v24, ss2+64 ; v24 = 0 stack_store v22, ss2+68 ; v22 = 0 stack_store v26, ss3 stack_store v26, ss3+16 stack_store v26, ss3+32 stack_store v26, ss3+48 stack_store v24, ss3+64 ; v24 = 0 stack_store v22, ss3+68 ; v22 = 0 stack_store v26, ss4 stack_store v26, ss4+16 stack_store v26, ss4+32 stack_store v26, ss4+48 stack_store v24, ss4+64 ; v24 = 0 stack_store v22, ss4+68 ; v22 = 0 v27 = sshr v4, v2 v28 = isub v19, v19 v91 = fsub v9, v9 v92 = fcmp ne v91, v91 v93 = f64const +NaN v29 = select v92, v93, v91 ; v93 = +NaN v94 = fsub v29, v29 v95 = fcmp ne v94, v94 v96 = f64const +NaN v30 = select v95, v96, v94 ; v96 = +NaN v31 = rotr v16, v2 v32 = ishl v27, v2 v33 = stack_addr.i64 ss4+38 store notrap aligned v8, v33 v34 = stack_addr.i64 ss2+24 store notrap aligned v30, v34 v35 = rotr v31, v8 v36 = bmask.i16 v32 v97 = fsub v30, v30 v98 = fcmp ne v97, v97 v99 = f64const +NaN v37 = select v98, v99, v97 ; v99 = +NaN v100 = fsub v37, v37 v101 = fcmp ne v100, v100 v102 = f64const +NaN v38 = select v101, v102, v100 ; v102 = +NaN v103 = fsub v38, v38 v104 = fcmp ne v103, v103 v105 = f64const +NaN v39 = select v104, v105, v103 ; v105 = +NaN v106 = fsub v39, v39 v107 = fcmp ne v106, v106 v108 = f64const +NaN v40 = select v107, v108, v106 ; v108 = +NaN v109 = fsub v40, v40 v110 = fcmp ne v109, v109 v111 = f64const +NaN v41 = select v110, v111, v109 ; v111 = +NaN v112 = fsub v41, v41 v113 = fcmp ne v112, v112 v114 = f64const +NaN v42 = select v113, v114, v112 ; v114 = +NaN v115 = fsub v42, v42 v116 = fcmp ne v115, v115 v117 = f64const +NaN v43 = select v116, v117, v115 ; v117 = +NaN v118 = fsub v43, v43 v119 = fcmp ne v118, v118 v120 = f64const +NaN v44 = select v119, v120, v118 ; v120 = +NaN v121 = fsub v44, v44 v122 = fcmp ne v121, v121 v123 = f64const +NaN v45 = select v122, v123, v121 ; v123 = +NaN v124 = fsub v45, v45 v125 = fcmp ne v124, v124 v126 = f64const +NaN v46 = select v125, v126, v124 ; v126 = +NaN v127 = fsub v46, v46 v128 = fcmp ne v127, v127 v129 = f64const +NaN v47 = select v128, v129, v127 ; v129 = +NaN v130 = fsub v47, v47 v131 = fcmp ne v130, v130 v132 = f64const +NaN v48 = select v131, v132, v130 ; v132 = +NaN v133 = fsub v48, v48 v134 = fcmp ne v133, v133 v135 = f64const +NaN v49 = select v134, v135, v133 ; v135 = +NaN v136 = fsub v49, v49 v137 = fcmp ne v136, v136 v138 = f64const +NaN v50 = select v137, v138, v136 ; v138 = +NaN v139 = fsub v50, v50 v140 = fcmp ne v139, v139 v141 = f64const +NaN v51 = select v140, v141, v139 ; v141 = +NaN v142 = fsub v51, v51 v143 = fcmp ne v142, v142 v144 = f64const +NaN v52 = select v143, v144, v142 ; v144 = +NaN v145 = fsub v52, v52 v146 = fcmp ne v145, v145 v147 = f64const +NaN v53 = select v146, v147, v145 ; v147 = +NaN v148 = fsub v53, v53 v149 = fcmp ne v148, v148 v150 = f64const +NaN v54 = select v149, v150, v148 ; v150 = +NaN v151 = fsub v54, v54 v152 = fcmp ne v151, v151 v153 = f64const +NaN v55 = select v152, v153, v151 ; v153 = +NaN v154 = fsub v55, v55 v155 = fcmp ne v154, v154 v156 = f64const +NaN v56 = select v155, v156, v154 ; v156 = +NaN v157 = fsub v56, v56 v158 = fcmp ne v157, v157 v159 = f64const +NaN v57 = select v158, v159, v157 ; v159 = +NaN v160 = fsub v57, v57 v161 = fcmp ne v160, v160 v162 = f64const +NaN v58 = select v161, v162, v160 ; v162 = +NaN v163 = fsub v58, v58 v164 = fcmp ne v163, v163 v165 = f64const +NaN v59 = select v164, v165, v163 ; v165 = +NaN v166 = fsub v59, v59 v167 = fcmp ne v166, v166 v168 = f64const +NaN v60 = select v167, v168, v166 ; v168 = +NaN v169 = fsub v60, v60 v170 = fcmp ne v169, v169 v171 = f64const +NaN v61 = select v170, v171, v169 ; v171 = +NaN v172 = fsub v61, v61 v173 = fcmp ne v172, v172 v174 = f64const +NaN v62 = select v173, v174, v172 ; v174 = +NaN v175 = fsub v62, v62 v176 = fcmp ne v175, v175 v177 = f64const +NaN v63 = select v176, v177, v175 ; v177 = +NaN v178 = fsub v63, v63 v179 = fcmp ne v178, v178 v180 = f64const +NaN v64 = select v179, v180, v178 ; v180 = +NaN v181 = fsub v64, v64 v182 = fcmp ne v181, v181 v183 = f64const +NaN v65 = select v182, v183, v181 ; v183 = +NaN v184 = fsub v65, v65 v185 = fcmp ne v184, v184 v186 = f64const +NaN v66 = select v185, v186, v184 ; v186 = +NaN v187 = fsub v66, v66 v188 = fcmp ne v187, v187 v189 = f64const +NaN v67 = select v188, v189, v187 ; v189 = +NaN v190 = fsub v67, v67 v191 = fcmp ne v190, v190 v192 = f64const +NaN v68 = select v191, v192, v190 ; v192 = +NaN v193 = fsub v68, v68 v194 = fcmp ne v193, v193 v195 = f64const +NaN v69 = select v194, v195, v193 ; v195 = +NaN v70 = ctz v21 ; v21 = 0xffff_ffff_e281_be2a v196 = fsub v69, v69 v197 = fcmp ne v196, v196 v198 = f64const +NaN v71 = select v197, v198, v196 ; v198 = +NaN v199 = fsub v71, v71 v200 = fcmp ne v199, v199 v201 = f64const +NaN v72 = select v200, v201, v199 ; v201 = +NaN v202 = fsub v72, v72 v203 = fcmp ne v202, v202 v204 = f64const +NaN v73 = select v203, v204, v202 ; v204 = +NaN v205 = fsub v73, v73 v206 = fcmp ne v205, v205 v207 = f64const +NaN v74 = select v206, v207, v205 ; v207 = +NaN v208 = fsub v74, v74 v209 = fcmp ne v208, v208 v210 = f64const +NaN v75 = select v209, v210, v208 ; v210 = +NaN v211 = fsub v75, v75 v212 = fcmp ne v211, v211 v213 = f64const +NaN v76 = select v212, v213, v211 ; v213 = +NaN v214 = fsub v76, v76 v215 = fcmp ne v214, v214 v216 = f64const +NaN v77 = select v215, v216, v214 ; v216 = +NaN v217 = fsub v77, v77 v218 = fcmp ne v217, v217 v219 = f64const +NaN v78 = select v218, v219, v217 ; v219 = +NaN v220 = fsub v78, v78 v221 = fcmp ne v220, v220 v222 = f64const +NaN v79 = select v221, v222, v220 ; v222 = +NaN v223 = fsub v79, v79 v224 = fcmp ne v223, v223 v225 = f64const +NaN v80 = select v224, v225, v223 ; v225 = +NaN v226 = fsub v80, v80 v227 = fcmp ne v226, v226 v228 = f64const +NaN v81 = select v227, v228, v226 ; v228 = +NaN v229 = fsub v81, v81 v230 = fcmp ne v229, v229 v231 = f64const +NaN v82 = select v230, v231, v229 ; v231 = +NaN v83 = popcnt v36 v84 = bmask.i8 v83 v85 = stack_load.i32 ss1+52 v86 = func_addr.i64 fn0 v87 = call_indirect sig0, v86(v5) v88 = call fn3(v82) v89 = sel [message truncated]
alexcrichton commented on issue #6155:
According to oss-fuzz the regression range for this is https://github.com/bytecodealliance/wasmtime/compare/1ed7c89e3d07981bc821a8e12f531ddaa188a88c...2fde25311e823ad8b2c3446df63779089402ad63 and I suspect it's https://github.com/bytecodealliance/wasmtime/pull/6077 (but am not certain)
bjorn3 commented on issue #6155:
While working on the interpreter a couple of days ago I noticed that if a called function emits a trap, you will get this exact panic as a return was expected as control flow rather than a trap. Maybe that is what is happening here?
afonso360 commented on issue #6155:
Here's a minimal reproducer:
test interpret function %u2() -> f32 system_v { ss0 = explicit_slot 69 ss1 = explicit_slot 69 ss2 = explicit_slot 69 block0: v0 = f32const -0x1.434342p-60 v1 = stack_addr.i64 ss2+24 store notrap aligned v0, v1 return v0 } function %u1() -> f32 system_v { sig0 = () -> f32 system_v fn0 = colocated %u2 sig0 block0: v57 = call fn0() return v57 } ; run: %u1() == -0x1.434342p-60 ; r-un: %u2() == -0x1.434342p-60
Enabling the
%u2
test traps as expected withTrap(User(HeapMisaligned))
, but when running only%u1
we get the error. So yeah, it looks like it's what @bjorn3 was saying, we don't properly trap across calls.I'll post a PR fixing this soon
afonso360 edited a comment on issue #6155:
Here's a minimal reproducer:
test interpret function %u2() -> f32 system_v { ss0 = explicit_slot 69 ss1 = explicit_slot 69 ss2 = explicit_slot 69 block0: v0 = f32const -0x1.434342p-60 v1 = stack_addr.i64 ss2+24 store notrap aligned v0, v1 return v0 } function %u1() -> f32 system_v { sig0 = () -> f32 system_v fn0 = colocated %u2 sig0 block0: v57 = call fn0() return v57 } ; run: %u1() == -0x1.434342p-60 ; r-un: %u2() == -0x1.434342p-60
Running the
%u2
test traps as expected withTrap(User(HeapMisaligned))
, but when running only%u1
we get the error. So yeah, it looks like it's what @bjorn3 was saying, we don't properly trap across calls.I'll post a PR fixing this soon
cfallin closed issue #6155:
Given this clif (fuzz-generated):
;; Run test case test interpret test run set opt_level=speed_and_size set use_egraphs=false set enable_simd=true set enable_llvm_abi_extensions=true set unwind_info=false set machine_code_cfg_info=true set enable_jump_tables=false set enable_table_access_spectre_mitigation=false set enable_incremental_compilation_cache_checks=true target x86_64 has_avx has_avx2 has_fma has_avx512dq has_avx512vl has_avx512f has_popcnt has_bmi1 has_bmi2 has_lzcnt function u1:1(f32x4, i32x4, i16 sext, i32 sext, i64 sext, f32, i16 uext, i64x2, i8 sext, f64) -> i8 uext, i8 sext, i16x8, i8 sext, i64 sext, i8 sext, i8 sext, i8 sext, i8, i128 sext, i128 sext, i16x8, i16x8, i32 sext, i128 sext, i128 sext system_v { ss0 = explicit_slot 69 ss1 = explicit_slot 69 ss2 = explicit_slot 69 ss3 = explicit_slot 69 ss4 = explicit_slot 69 ss5 = explicit_slot 34 sig0 = (f32) -> f32 system_v sig1 = (f64) -> f64 system_v sig2 = (f32) -> f32 system_v sig3 = (f64) -> f64 system_v sig4 = (f32) -> f32 system_v sig5 = (f64) -> f64 system_v fn0 = %CeilF32 sig0 fn1 = %CeilF64 sig1 fn2 = %FloorF32 sig2 fn3 = colocated %FloorF64 sig3 fn4 = colocated %TruncF32 sig4 fn5 = colocated %TruncF64 sig5 const0 = 0x87001b008000000000ffffffffffffff block0(v0: f32x4, v1: i32x4, v2: i16, v3: i32, v4: i64, v5: f32, v6: i16, v7: i64x2, v8: i8, v9: f64): v10 = f32const -0x1.434342p-60 v11 = f32const -0x1.434342p-60 v12 = f32const -0x1.434342p-60 v13 = f32const -0x1.434342p-60 v14 = iconst.i64 0x4d4d_0008_0000_3000 v15 = iconst.i64 0x004d_4d6d v16 = iconcat v15, v14 ; v15 = 0x004d_4d6d, v14 = 0x4d4d_0008_0000_3000 v17 = iconst.i64 -1 v18 = iconst.i64 -1 v19 = iconcat v18, v17 ; v18 = -1, v17 = -1 v20 = vconst.i16x8 const0 v21 = iconst.i32 0xffff_ffff_e281_be2a v22 = iconst.i8 0 v23 = iconst.i16 0 v24 = iconst.i32 0 v25 = iconst.i64 0 v26 = uextend.i128 v25 ; v25 = 0 stack_store v26, ss5 stack_store v26, ss5+16 stack_store v23, ss5+32 ; v23 = 0 stack_store v26, ss0 stack_store v26, ss0+16 stack_store v26, ss0+32 stack_store v26, ss0+48 stack_store v24, ss0+64 ; v24 = 0 stack_store v22, ss0+68 ; v22 = 0 stack_store v26, ss1 stack_store v26, ss1+16 stack_store v26, ss1+32 stack_store v26, ss1+48 stack_store v24, ss1+64 ; v24 = 0 stack_store v22, ss1+68 ; v22 = 0 stack_store v26, ss2 stack_store v26, ss2+16 stack_store v26, ss2+32 stack_store v26, ss2+48 stack_store v24, ss2+64 ; v24 = 0 stack_store v22, ss2+68 ; v22 = 0 stack_store v26, ss3 stack_store v26, ss3+16 stack_store v26, ss3+32 stack_store v26, ss3+48 stack_store v24, ss3+64 ; v24 = 0 stack_store v22, ss3+68 ; v22 = 0 stack_store v26, ss4 stack_store v26, ss4+16 stack_store v26, ss4+32 stack_store v26, ss4+48 stack_store v24, ss4+64 ; v24 = 0 stack_store v22, ss4+68 ; v22 = 0 v27 = sshr v4, v2 v28 = isub v19, v19 v91 = fsub v9, v9 v92 = fcmp ne v91, v91 v93 = f64const +NaN v29 = select v92, v93, v91 ; v93 = +NaN v94 = fsub v29, v29 v95 = fcmp ne v94, v94 v96 = f64const +NaN v30 = select v95, v96, v94 ; v96 = +NaN v31 = rotr v16, v2 v32 = ishl v27, v2 v33 = stack_addr.i64 ss4+38 store notrap aligned v8, v33 v34 = stack_addr.i64 ss2+24 store notrap aligned v30, v34 v35 = rotr v31, v8 v36 = bmask.i16 v32 v97 = fsub v30, v30 v98 = fcmp ne v97, v97 v99 = f64const +NaN v37 = select v98, v99, v97 ; v99 = +NaN v100 = fsub v37, v37 v101 = fcmp ne v100, v100 v102 = f64const +NaN v38 = select v101, v102, v100 ; v102 = +NaN v103 = fsub v38, v38 v104 = fcmp ne v103, v103 v105 = f64const +NaN v39 = select v104, v105, v103 ; v105 = +NaN v106 = fsub v39, v39 v107 = fcmp ne v106, v106 v108 = f64const +NaN v40 = select v107, v108, v106 ; v108 = +NaN v109 = fsub v40, v40 v110 = fcmp ne v109, v109 v111 = f64const +NaN v41 = select v110, v111, v109 ; v111 = +NaN v112 = fsub v41, v41 v113 = fcmp ne v112, v112 v114 = f64const +NaN v42 = select v113, v114, v112 ; v114 = +NaN v115 = fsub v42, v42 v116 = fcmp ne v115, v115 v117 = f64const +NaN v43 = select v116, v117, v115 ; v117 = +NaN v118 = fsub v43, v43 v119 = fcmp ne v118, v118 v120 = f64const +NaN v44 = select v119, v120, v118 ; v120 = +NaN v121 = fsub v44, v44 v122 = fcmp ne v121, v121 v123 = f64const +NaN v45 = select v122, v123, v121 ; v123 = +NaN v124 = fsub v45, v45 v125 = fcmp ne v124, v124 v126 = f64const +NaN v46 = select v125, v126, v124 ; v126 = +NaN v127 = fsub v46, v46 v128 = fcmp ne v127, v127 v129 = f64const +NaN v47 = select v128, v129, v127 ; v129 = +NaN v130 = fsub v47, v47 v131 = fcmp ne v130, v130 v132 = f64const +NaN v48 = select v131, v132, v130 ; v132 = +NaN v133 = fsub v48, v48 v134 = fcmp ne v133, v133 v135 = f64const +NaN v49 = select v134, v135, v133 ; v135 = +NaN v136 = fsub v49, v49 v137 = fcmp ne v136, v136 v138 = f64const +NaN v50 = select v137, v138, v136 ; v138 = +NaN v139 = fsub v50, v50 v140 = fcmp ne v139, v139 v141 = f64const +NaN v51 = select v140, v141, v139 ; v141 = +NaN v142 = fsub v51, v51 v143 = fcmp ne v142, v142 v144 = f64const +NaN v52 = select v143, v144, v142 ; v144 = +NaN v145 = fsub v52, v52 v146 = fcmp ne v145, v145 v147 = f64const +NaN v53 = select v146, v147, v145 ; v147 = +NaN v148 = fsub v53, v53 v149 = fcmp ne v148, v148 v150 = f64const +NaN v54 = select v149, v150, v148 ; v150 = +NaN v151 = fsub v54, v54 v152 = fcmp ne v151, v151 v153 = f64const +NaN v55 = select v152, v153, v151 ; v153 = +NaN v154 = fsub v55, v55 v155 = fcmp ne v154, v154 v156 = f64const +NaN v56 = select v155, v156, v154 ; v156 = +NaN v157 = fsub v56, v56 v158 = fcmp ne v157, v157 v159 = f64const +NaN v57 = select v158, v159, v157 ; v159 = +NaN v160 = fsub v57, v57 v161 = fcmp ne v160, v160 v162 = f64const +NaN v58 = select v161, v162, v160 ; v162 = +NaN v163 = fsub v58, v58 v164 = fcmp ne v163, v163 v165 = f64const +NaN v59 = select v164, v165, v163 ; v165 = +NaN v166 = fsub v59, v59 v167 = fcmp ne v166, v166 v168 = f64const +NaN v60 = select v167, v168, v166 ; v168 = +NaN v169 = fsub v60, v60 v170 = fcmp ne v169, v169 v171 = f64const +NaN v61 = select v170, v171, v169 ; v171 = +NaN v172 = fsub v61, v61 v173 = fcmp ne v172, v172 v174 = f64const +NaN v62 = select v173, v174, v172 ; v174 = +NaN v175 = fsub v62, v62 v176 = fcmp ne v175, v175 v177 = f64const +NaN v63 = select v176, v177, v175 ; v177 = +NaN v178 = fsub v63, v63 v179 = fcmp ne v178, v178 v180 = f64const +NaN v64 = select v179, v180, v178 ; v180 = +NaN v181 = fsub v64, v64 v182 = fcmp ne v181, v181 v183 = f64const +NaN v65 = select v182, v183, v181 ; v183 = +NaN v184 = fsub v65, v65 v185 = fcmp ne v184, v184 v186 = f64const +NaN v66 = select v185, v186, v184 ; v186 = +NaN v187 = fsub v66, v66 v188 = fcmp ne v187, v187 v189 = f64const +NaN v67 = select v188, v189, v187 ; v189 = +NaN v190 = fsub v67, v67 v191 = fcmp ne v190, v190 v192 = f64const +NaN v68 = select v191, v192, v190 ; v192 = +NaN v193 = fsub v68, v68 v194 = fcmp ne v193, v193 v195 = f64const +NaN v69 = select v194, v195, v193 ; v195 = +NaN v70 = ctz v21 ; v21 = 0xffff_ffff_e281_be2a v196 = fsub v69, v69 v197 = fcmp ne v196, v196 v198 = f64const +NaN v71 = select v197, v198, v196 ; v198 = +NaN v199 = fsub v71, v71 v200 = fcmp ne v199, v199 v201 = f64const +NaN v72 = select v200, v201, v199 ; v201 = +NaN v202 = fsub v72, v72 v203 = fcmp ne v202, v202 v204 = f64const +NaN v73 = select v203, v204, v202 ; v204 = +NaN v205 = fsub v73, v73 v206 = fcmp ne v205, v205 v207 = f64const +NaN v74 = select v206, v207, v205 ; v207 = +NaN v208 = fsub v74, v74 v209 = fcmp ne v208, v208 v210 = f64const +NaN v75 = select v209, v210, v208 ; v210 = +NaN v211 = fsub v75, v75 v212 = fcmp ne v211, v211 v213 = f64const +NaN v76 = select v212, v213, v211 ; v213 = +NaN v214 = fsub v76, v76 v215 = fcmp ne v214, v214 v216 = f64const +NaN v77 = select v215, v216, v214 ; v216 = +NaN v217 = fsub v77, v77 v218 = fcmp ne v217, v217 v219 = f64const +NaN v78 = select v218, v219, v217 ; v219 = +NaN v220 = fsub v78, v78 v221 = fcmp ne v220, v220 v222 = f64const +NaN v79 = select v221, v222, v220 ; v222 = +NaN v223 = fsub v79, v79 v224 = fcmp ne v223, v223 v225 = f64const +NaN v80 = select v224, v225, v223 ; v225 = +NaN v226 = fsub v80, v80 v227 = fcmp ne v226, v226 v228 = f64const +NaN v81 = select v227, v228, v226 ; v228 = +NaN v229 = fsub v81, v81 v230 = fcmp ne v229, v229 v231 = f64const +NaN v82 = select v230, v231, v229 ; v231 = +NaN v83 = popcnt v36 v84 = bmask.i8 v83 v85 = stack_load.i32 ss1+52 v86 = func_addr.i64 fn0 v87 = call_indirect sig0, v86(v5) v88 = call fn3(v82) v89 = select v [message truncated]
Last updated: Dec 23 2024 at 12:05 UTC