alexcrichton added the fuzz-bug label to Issue #9090.
alexcrichton opened issue #9090:
This input:
<details>
<summary><code>input.clif</code></summary>
;;Output of `std::fmt::Debug`: ;; Compile test case test compile set opt_level=speed_and_size set regalloc_checker=true set preserve_frame_pointers=true set machine_code_cfg_info=true set enable_heap_access_spectre_mitigation=false set enable_table_access_spectre_mitigation=false target s390x has_vxrs_ext2 function u1:0(i64x2, i128 uext, i16x8, f64, i64, i16, i32x4, f32, f32x4, i8x16, f64x2, i32, i8 sext) -> i8x16, i8, i64x2, i64x2, i64x2, i64x2, i64x2, i64x2, i64x2, i64x2, i64x2, i64x2, i64x2, i64x2 system_v { ss0 = explicit_slot 17, align = 64 ss1 = explicit_slot 17, align = 64 ss2 = explicit_slot 17, align = 4 ss3 = explicit_slot 1 ss4 = explicit_slot 67, align = 512 ss5 = explicit_slot 67, align = 512 ss6 = explicit_slot 67, align = 512 ss7 = explicit_slot 67, align = 512 ss8 = explicit_slot 16, align = 16 ss9 = explicit_slot 16, align = 16 ss10 = explicit_slot 4, align = 4 ss11 = explicit_slot 2, align = 2 ss12 = explicit_slot 8, align = 8 ss13 = explicit_slot 16, align = 16 ss14 = explicit_slot 2, align = 2 ss15 = explicit_slot 16, align = 16 ss16 = explicit_slot 2, align = 2 ss17 = explicit_slot 1 ss18 = explicit_slot 16, align = 16 ss19 = explicit_slot 16, align = 16 ss20 = explicit_slot 2, align = 2 sig0 = (i8 uext, i64x2, f32, i16 uext, i16, i8, i64x2, f64, f64, f64, f64x2, i64 uext, i16x8, i16, i64x2, i16x8) -> i8, f32x4, i128 uext, i16x8, i8 sext, i32x4, f64, f32, i32x4 tail sig1 = (i16 sext) -> i16x8, f32, f32 system_v sig2 = (f32, f32, i8x16, i16x8, i128 sext, i8 sext, f32, f64, i8 sext, i8 sext) -> i8 sext, f64x2, i64 sext, i8 sext, i8 sext, i8 sext, f64x2, i128 sext, i128, i128 fast sig3 = () cold sig4 = () cold sig5 = () cold sig6 = () -> i16 sext, f32x4, i8 sext, i8, i64x2, i16 uext, f64, f64x2, i64 uext, i16x8, i16, i64x2 cold sig7 = (f32) -> f32 system_v sig8 = (f64) -> f64 system_v sig9 = (f32) -> f32 system_v sig10 = (f64) -> f64 system_v sig11 = (f32) -> f32 system_v sig12 = (f64) -> f64 system_v sig13 = (f32) -> f32 system_v sig14 = (f64) -> f64 system_v sig15 = (f32, f32, f32) -> f32 system_v sig16 = (f64, f64, f64) -> f64 system_v fn0 = u2:0 sig0 fn1 = u2:1 sig1 fn2 = u2:2 sig2 fn3 = u2:3 sig3 fn4 = u2:4 sig4 fn5 = u2:5 sig5 fn6 = u2:6 sig6 fn7 = %CeilF32 sig7 fn8 = %CeilF64 sig8 fn9 = %FloorF32 sig9 fn10 = %FloorF64 sig10 fn11 = %TruncF32 sig11 fn12 = %TruncF64 sig12 fn13 = %NearestF32 sig13 fn14 = %NearestF64 sig14 fn15 = %FmaF32 sig15 fn16 = %FmaF64 sig16 const0 = 0x019331766373c02c0e63736b26fdd628 const1 = 0x00e40000cb5d198973aecc564f382afd const2 = 0xe4e48c8c8c8c8c8c8c8c8c8c8c8c8c8c const3 = 0xd1d1d1e4e4e4e48c8c8c8c8c8c8c8ce4 block0(v0: i64x2, v1: i128, v2: i16x8, v3: f64, v4: i64, v5: i16, v6: i32x4, v7: f32, v8: f32x4, v9: i8x16, v10: f64x2, v11: i32, v12: i8): stack_store v0, ss13 stack_store v4, ss12 stack_store v5, ss20 stack_store v9, ss18 stack_store v10, ss9 v47 = iconst.i16 -15164 v48 = iconst.i16 -15164 stack_store v48, ss16 ; v48 = -15164 v49 = iconst.i16 -15164 stack_store v49, ss14 ; v49 = -15164 v50 = iconst.i16 0 stack_store v50, ss11 ; v50 = 0 v51 = iconst.i8 44 stack_store v51, ss17 ; v51 = 44 v52 = vconst.i64x2 const0 v53 = vconst.i16x8 const1 v54 = vconst.i32x4 const2 v55 = vconst.i8x16 const3 v56 = iconst.i8 0 v57 = iconst.i16 0 v58 = iconst.i32 0 v59 = iconst.i64 0 v60 = uextend.i128 v59 ; v59 = 0 v61 = stack_addr.i64 ss3 store notrap v56, v61 ; v56 = 0 v62 = stack_addr.i64 ss0 store notrap heap v60, v62 v63 = stack_addr.i64 ss0+16 store notrap heap v56, v63 ; v56 = 0 v64 = stack_addr.i64 ss1 store notrap heap v60, v64 v65 = stack_addr.i64 ss1+16 store notrap heap v56, v65 ; v56 = 0 v66 = stack_addr.i64 ss2 store notrap heap v60, v66 v67 = stack_addr.i64 ss2+16 store notrap heap v56, v67 ; v56 = 0 v68 = stack_addr.i64 ss4 store notrap v60, v68 v69 = stack_addr.i64 ss4+16 store notrap v60, v69 v70 = stack_addr.i64 ss4+32 store notrap v60, v70 v71 = stack_addr.i64 ss4+48 store notrap v60, v71 v72 = stack_addr.i64 ss4+64 store notrap v57, v72 ; v57 = 0 v73 = stack_addr.i64 ss4+66 store notrap v56, v73 ; v56 = 0 v74 = stack_addr.i64 ss5 store notrap v60, v74 v75 = stack_addr.i64 ss5+16 store notrap v60, v75 v76 = stack_addr.i64 ss5+32 store notrap v60, v76 v77 = stack_addr.i64 ss5+48 store notrap v60, v77 v78 = stack_addr.i64 ss5+64 store notrap v57, v78 ; v57 = 0 v79 = stack_addr.i64 ss5+66 store notrap v56, v79 ; v56 = 0 v80 = stack_addr.i64 ss6 store notrap v60, v80 v81 = stack_addr.i64 ss6+16 store notrap v60, v81 v82 = stack_addr.i64 ss6+32 store notrap v60, v82 v83 = stack_addr.i64 ss6+48 store notrap v60, v83 v84 = stack_addr.i64 ss6+64 store notrap v57, v84 ; v57 = 0 v85 = stack_addr.i64 ss6+66 store notrap v56, v85 ; v56 = 0 v86 = stack_addr.i64 ss7 store notrap v60, v86 v87 = stack_addr.i64 ss7+16 store notrap v60, v87 v88 = stack_addr.i64 ss7+32 store notrap v60, v88 v89 = stack_addr.i64 ss7+48 store notrap v60, v89 v90 = stack_addr.i64 ss7+64 store notrap v57, v90 ; v57 = 0 v91 = stack_addr.i64 ss7+66 store notrap v56, v91 ; v56 = 0 v92 = select v1, v52, v52 ; v52 = const0, v52 = const0 v93 = select v1, v92, v92 v94 = select v1, v93, v93 v95 = select v1, v94, v94 v96 = iadd v11, v11 v97 = ineg v96 v495 = stack_load.i8 ss17 v98 = ishl v53, v495 ; v53 = const1 v494 = stack_load.i8 ss17 v99 = ishl v98, v494 v493 = stack_load.i8 ss17 v100 = ishl v99, v493 v101 = select v1, v95, v95 v102 = select v1, v101, v101 v103 = select v1, v102, v102 v104 = select v1, v103, v103 v105 = select v1, v104, v104 v106 = select v1, v105, v105 v107 = select v1, v106, v106 v108 = select v1, v107, v107 stack_store v108, ss15 v492 = stack_load.i8 ss17 v109 = ishl v100, v492 stack_store v109, ss8 v491 = stack_load.i16x8 ss8 v110 = ishl v491, v12 v482 = stack_load.i8 ss17 v483 = stack_load.i64x2 ss15 v484 = stack_load.i16 ss20 v485 = stack_load.i16 ss20 v486 = stack_load.i64x2 ss13 v487 = stack_load.f64x2 ss9 v488 = stack_load.i64 ss12 v489 = stack_load.i16 ss14 v490 = stack_load.i64x2 ss13 v111, v112, v113, v114, v115, v116, v117, v118, v119 = call fn0(v482, v483, v7, v484, v485, v12, v486, v3, v3, v3, v487, v488, v110, v489, v490, v110), stack_map=[i64x2 @ ss13+0, i64 @ ss12+0, i16 @ ss20+0, i8x16 @ ss18+0, f64x2 @ ss9+0, i16 @ ss16+0, i16 @ ss14+0, i16 @ ss11+0, i8 @ ss17+0, i64x2 @ ss15+0, i16x8 @ ss8+0] v481 = stack_load.i8x16 ss18 v120 = bnot v481 v121 = bnot v120 v122 = bnot v121 v123 = bnot v122 v124 = vhigh_bits.i8 v123 v125 = bnot v123 stack_store v125, ss18 v480 = stack_load.i8x16 ss18 v126 = bnot v480 stack_store v126, ss19 v478 = stack_load.i16x8 ss8 v479 = stack_load.i8 ss17 v127 = ishl v478, v479 v128 = vhigh_bits.i8 v119 v129 = fcvt_to_sint.i32 v118 v130 = fcvt_to_sint.i32 v118 stack_store v130, ss10 v477 = stack_load.i16x8 ss8 v131 = uwiden_low v477 v476 = stack_load.i16x8 ss8 v132 = ishl v476, v128 v468 = stack_load.i64x2 ss13 v469 = stack_load.i16 ss20 v470 = stack_load.i16 ss20 v471 = stack_load.i64x2 ss15 v472 = stack_load.f64x2 ss9 v473 = stack_load.i64 ss12 v474 = stack_load.i16 ss16 v475 = stack_load.i64x2 ss15 v133, v134, v135, v136, v137, v138, v139, v140, v141 = call fn0(v124, v468, v118, v469, v470, v128, v471, v117, v117, v117, v472, v473, v132, v474, v475, v132), stack_map=[i64x2 @ ss13+0, i64 @ ss12+0, i16 @ ss20+0, f64x2 @ ss9+0, i16 @ ss16+0, i16 @ ss14+0, i16 @ ss11+0, i64x2 @ ss15+0, i8x16 @ ss18+0, i8x16 @ ss19+0, i32 @ ss10+0] stack_store v136, ss8 v466 = stack_load.i8x16 ss19 v467 = stack_load.i8x16 ss18 v142 = bxor_not v466, v467 stack_store v142, ss19 v456 = stack_load.i64x2 ss13 v457 = stack_load.i16 ss16 v458 = stack_load.i16 ss16 v459 = stack_load.i64x2 ss15 v460 = stack_load.f64x2 ss9 v461 = stack_load.i64 ss12 v462 = stack_load.i16x8 ss8 v463 = stack_load.i16 ss16 v464 = stack_load.i64x2 ss15 v465 = stack_load.i16x8 ss8 v143, v144, v145, v146, v147, v148, v149, v150, v151 = call fn0(v137, v456, v140, v457, v458, v133, v459, v139, v139, v139, v460, v461, v462, v463, v464, v465), stack_map=[i64x2 @ ss13+0, i64 @ ss12+0, i16 @ ss20+0, f64x2 @ ss9+0, i16 @ ss16+0, i16 @ ss14+0, i16 @ ss11+0, i64x2 @ ss15+0, i32 @ ss10+0, i16x8 @ ss8+0, i8x16 @ ss19+0] stack_store v147, ss17 v454 = stack_load.i8x16 ss19 v455 = stack_load.i8 ss17 v152 = ushr v454, v455 stack_store v152, ss18 v452 = stack_load.i64x2 ss15 v453 = stack_load.i64x2 ss15 v153 = select v145, v452, v453 v154 = select v145, v153, v153 v155 = select v145, v154, v154 v156 = select v145, v155, v155 v157 = select v145, v156, v156 v158 = select v145, v157, v157 v159 = select v145, v158, v158 stack_store v159, ss15 v450 = stack_load.i16x8 ss8 v451 = stack_lo [message truncated]
afonso360 commented on issue #9090:
Here's a minimized version of the above:
test compile target s390x function u1:0() -> i8x16, i8, i64x2, i64x2, i64x2, i64x2, i64x2, i64x2, i64x2, i64x2, i64x2, i64x2, i64x2, i64x2 { sig0 = (i8, i64x2, f32, i16, i16, i8, i64x2, f64, f64, f64, f64x2, i64, i16x8, i16, i64x2, i16x8) -> i8, f32x4, i128, i16x8, i8, i32x4, f64, f32, i32x4 tail fn0 = u2:0 sig0 const0 = 0x00000000000000000000000000000000 block0: v0 = iconst.i8 0 v1 = iconst.i16 0 v2 = iconst.i64 0 v3 = f32const 0.0 v4 = f64const 0.0 v5 = vconst.i16x8 const0 v6 = vconst.i64x2 const0 v7 = vconst.f64x2 const0 v8, v9, v10, v11, v12, v13, v14, v15, v16 = call fn0(v0, v6, v3, v1, v1, v0, v6, v4, v4, v4, v7, v2, v5, v1, v6, v5) trap user0 }
Having the
tail
CC on the call seems to make a difference.
cfallin closed issue #9090:
This input:
<details>
<summary><code>input.clif</code></summary>
;;Output of `std::fmt::Debug`: ;; Compile test case test compile set opt_level=speed_and_size set regalloc_checker=true set preserve_frame_pointers=true set machine_code_cfg_info=true set enable_heap_access_spectre_mitigation=false set enable_table_access_spectre_mitigation=false target s390x has_vxrs_ext2 function u1:0(i64x2, i128 uext, i16x8, f64, i64, i16, i32x4, f32, f32x4, i8x16, f64x2, i32, i8 sext) -> i8x16, i8, i64x2, i64x2, i64x2, i64x2, i64x2, i64x2, i64x2, i64x2, i64x2, i64x2, i64x2, i64x2 system_v { ss0 = explicit_slot 17, align = 64 ss1 = explicit_slot 17, align = 64 ss2 = explicit_slot 17, align = 4 ss3 = explicit_slot 1 ss4 = explicit_slot 67, align = 512 ss5 = explicit_slot 67, align = 512 ss6 = explicit_slot 67, align = 512 ss7 = explicit_slot 67, align = 512 ss8 = explicit_slot 16, align = 16 ss9 = explicit_slot 16, align = 16 ss10 = explicit_slot 4, align = 4 ss11 = explicit_slot 2, align = 2 ss12 = explicit_slot 8, align = 8 ss13 = explicit_slot 16, align = 16 ss14 = explicit_slot 2, align = 2 ss15 = explicit_slot 16, align = 16 ss16 = explicit_slot 2, align = 2 ss17 = explicit_slot 1 ss18 = explicit_slot 16, align = 16 ss19 = explicit_slot 16, align = 16 ss20 = explicit_slot 2, align = 2 sig0 = (i8 uext, i64x2, f32, i16 uext, i16, i8, i64x2, f64, f64, f64, f64x2, i64 uext, i16x8, i16, i64x2, i16x8) -> i8, f32x4, i128 uext, i16x8, i8 sext, i32x4, f64, f32, i32x4 tail sig1 = (i16 sext) -> i16x8, f32, f32 system_v sig2 = (f32, f32, i8x16, i16x8, i128 sext, i8 sext, f32, f64, i8 sext, i8 sext) -> i8 sext, f64x2, i64 sext, i8 sext, i8 sext, i8 sext, f64x2, i128 sext, i128, i128 fast sig3 = () cold sig4 = () cold sig5 = () cold sig6 = () -> i16 sext, f32x4, i8 sext, i8, i64x2, i16 uext, f64, f64x2, i64 uext, i16x8, i16, i64x2 cold sig7 = (f32) -> f32 system_v sig8 = (f64) -> f64 system_v sig9 = (f32) -> f32 system_v sig10 = (f64) -> f64 system_v sig11 = (f32) -> f32 system_v sig12 = (f64) -> f64 system_v sig13 = (f32) -> f32 system_v sig14 = (f64) -> f64 system_v sig15 = (f32, f32, f32) -> f32 system_v sig16 = (f64, f64, f64) -> f64 system_v fn0 = u2:0 sig0 fn1 = u2:1 sig1 fn2 = u2:2 sig2 fn3 = u2:3 sig3 fn4 = u2:4 sig4 fn5 = u2:5 sig5 fn6 = u2:6 sig6 fn7 = %CeilF32 sig7 fn8 = %CeilF64 sig8 fn9 = %FloorF32 sig9 fn10 = %FloorF64 sig10 fn11 = %TruncF32 sig11 fn12 = %TruncF64 sig12 fn13 = %NearestF32 sig13 fn14 = %NearestF64 sig14 fn15 = %FmaF32 sig15 fn16 = %FmaF64 sig16 const0 = 0x019331766373c02c0e63736b26fdd628 const1 = 0x00e40000cb5d198973aecc564f382afd const2 = 0xe4e48c8c8c8c8c8c8c8c8c8c8c8c8c8c const3 = 0xd1d1d1e4e4e4e48c8c8c8c8c8c8c8ce4 block0(v0: i64x2, v1: i128, v2: i16x8, v3: f64, v4: i64, v5: i16, v6: i32x4, v7: f32, v8: f32x4, v9: i8x16, v10: f64x2, v11: i32, v12: i8): stack_store v0, ss13 stack_store v4, ss12 stack_store v5, ss20 stack_store v9, ss18 stack_store v10, ss9 v47 = iconst.i16 -15164 v48 = iconst.i16 -15164 stack_store v48, ss16 ; v48 = -15164 v49 = iconst.i16 -15164 stack_store v49, ss14 ; v49 = -15164 v50 = iconst.i16 0 stack_store v50, ss11 ; v50 = 0 v51 = iconst.i8 44 stack_store v51, ss17 ; v51 = 44 v52 = vconst.i64x2 const0 v53 = vconst.i16x8 const1 v54 = vconst.i32x4 const2 v55 = vconst.i8x16 const3 v56 = iconst.i8 0 v57 = iconst.i16 0 v58 = iconst.i32 0 v59 = iconst.i64 0 v60 = uextend.i128 v59 ; v59 = 0 v61 = stack_addr.i64 ss3 store notrap v56, v61 ; v56 = 0 v62 = stack_addr.i64 ss0 store notrap heap v60, v62 v63 = stack_addr.i64 ss0+16 store notrap heap v56, v63 ; v56 = 0 v64 = stack_addr.i64 ss1 store notrap heap v60, v64 v65 = stack_addr.i64 ss1+16 store notrap heap v56, v65 ; v56 = 0 v66 = stack_addr.i64 ss2 store notrap heap v60, v66 v67 = stack_addr.i64 ss2+16 store notrap heap v56, v67 ; v56 = 0 v68 = stack_addr.i64 ss4 store notrap v60, v68 v69 = stack_addr.i64 ss4+16 store notrap v60, v69 v70 = stack_addr.i64 ss4+32 store notrap v60, v70 v71 = stack_addr.i64 ss4+48 store notrap v60, v71 v72 = stack_addr.i64 ss4+64 store notrap v57, v72 ; v57 = 0 v73 = stack_addr.i64 ss4+66 store notrap v56, v73 ; v56 = 0 v74 = stack_addr.i64 ss5 store notrap v60, v74 v75 = stack_addr.i64 ss5+16 store notrap v60, v75 v76 = stack_addr.i64 ss5+32 store notrap v60, v76 v77 = stack_addr.i64 ss5+48 store notrap v60, v77 v78 = stack_addr.i64 ss5+64 store notrap v57, v78 ; v57 = 0 v79 = stack_addr.i64 ss5+66 store notrap v56, v79 ; v56 = 0 v80 = stack_addr.i64 ss6 store notrap v60, v80 v81 = stack_addr.i64 ss6+16 store notrap v60, v81 v82 = stack_addr.i64 ss6+32 store notrap v60, v82 v83 = stack_addr.i64 ss6+48 store notrap v60, v83 v84 = stack_addr.i64 ss6+64 store notrap v57, v84 ; v57 = 0 v85 = stack_addr.i64 ss6+66 store notrap v56, v85 ; v56 = 0 v86 = stack_addr.i64 ss7 store notrap v60, v86 v87 = stack_addr.i64 ss7+16 store notrap v60, v87 v88 = stack_addr.i64 ss7+32 store notrap v60, v88 v89 = stack_addr.i64 ss7+48 store notrap v60, v89 v90 = stack_addr.i64 ss7+64 store notrap v57, v90 ; v57 = 0 v91 = stack_addr.i64 ss7+66 store notrap v56, v91 ; v56 = 0 v92 = select v1, v52, v52 ; v52 = const0, v52 = const0 v93 = select v1, v92, v92 v94 = select v1, v93, v93 v95 = select v1, v94, v94 v96 = iadd v11, v11 v97 = ineg v96 v495 = stack_load.i8 ss17 v98 = ishl v53, v495 ; v53 = const1 v494 = stack_load.i8 ss17 v99 = ishl v98, v494 v493 = stack_load.i8 ss17 v100 = ishl v99, v493 v101 = select v1, v95, v95 v102 = select v1, v101, v101 v103 = select v1, v102, v102 v104 = select v1, v103, v103 v105 = select v1, v104, v104 v106 = select v1, v105, v105 v107 = select v1, v106, v106 v108 = select v1, v107, v107 stack_store v108, ss15 v492 = stack_load.i8 ss17 v109 = ishl v100, v492 stack_store v109, ss8 v491 = stack_load.i16x8 ss8 v110 = ishl v491, v12 v482 = stack_load.i8 ss17 v483 = stack_load.i64x2 ss15 v484 = stack_load.i16 ss20 v485 = stack_load.i16 ss20 v486 = stack_load.i64x2 ss13 v487 = stack_load.f64x2 ss9 v488 = stack_load.i64 ss12 v489 = stack_load.i16 ss14 v490 = stack_load.i64x2 ss13 v111, v112, v113, v114, v115, v116, v117, v118, v119 = call fn0(v482, v483, v7, v484, v485, v12, v486, v3, v3, v3, v487, v488, v110, v489, v490, v110), stack_map=[i64x2 @ ss13+0, i64 @ ss12+0, i16 @ ss20+0, i8x16 @ ss18+0, f64x2 @ ss9+0, i16 @ ss16+0, i16 @ ss14+0, i16 @ ss11+0, i8 @ ss17+0, i64x2 @ ss15+0, i16x8 @ ss8+0] v481 = stack_load.i8x16 ss18 v120 = bnot v481 v121 = bnot v120 v122 = bnot v121 v123 = bnot v122 v124 = vhigh_bits.i8 v123 v125 = bnot v123 stack_store v125, ss18 v480 = stack_load.i8x16 ss18 v126 = bnot v480 stack_store v126, ss19 v478 = stack_load.i16x8 ss8 v479 = stack_load.i8 ss17 v127 = ishl v478, v479 v128 = vhigh_bits.i8 v119 v129 = fcvt_to_sint.i32 v118 v130 = fcvt_to_sint.i32 v118 stack_store v130, ss10 v477 = stack_load.i16x8 ss8 v131 = uwiden_low v477 v476 = stack_load.i16x8 ss8 v132 = ishl v476, v128 v468 = stack_load.i64x2 ss13 v469 = stack_load.i16 ss20 v470 = stack_load.i16 ss20 v471 = stack_load.i64x2 ss15 v472 = stack_load.f64x2 ss9 v473 = stack_load.i64 ss12 v474 = stack_load.i16 ss16 v475 = stack_load.i64x2 ss15 v133, v134, v135, v136, v137, v138, v139, v140, v141 = call fn0(v124, v468, v118, v469, v470, v128, v471, v117, v117, v117, v472, v473, v132, v474, v475, v132), stack_map=[i64x2 @ ss13+0, i64 @ ss12+0, i16 @ ss20+0, f64x2 @ ss9+0, i16 @ ss16+0, i16 @ ss14+0, i16 @ ss11+0, i64x2 @ ss15+0, i8x16 @ ss18+0, i8x16 @ ss19+0, i32 @ ss10+0] stack_store v136, ss8 v466 = stack_load.i8x16 ss19 v467 = stack_load.i8x16 ss18 v142 = bxor_not v466, v467 stack_store v142, ss19 v456 = stack_load.i64x2 ss13 v457 = stack_load.i16 ss16 v458 = stack_load.i16 ss16 v459 = stack_load.i64x2 ss15 v460 = stack_load.f64x2 ss9 v461 = stack_load.i64 ss12 v462 = stack_load.i16x8 ss8 v463 = stack_load.i16 ss16 v464 = stack_load.i64x2 ss15 v465 = stack_load.i16x8 ss8 v143, v144, v145, v146, v147, v148, v149, v150, v151 = call fn0(v137, v456, v140, v457, v458, v133, v459, v139, v139, v139, v460, v461, v462, v463, v464, v465), stack_map=[i64x2 @ ss13+0, i64 @ ss12+0, i16 @ ss20+0, f64x2 @ ss9+0, i16 @ ss16+0, i16 @ ss14+0, i16 @ ss11+0, i64x2 @ ss15+0, i32 @ ss10+0, i16x8 @ ss8+0, i8x16 @ ss19+0] stack_store v147, ss17 v454 = stack_load.i8x16 ss19 v455 = stack_load.i8 ss17 v152 = ushr v454, v455 stack_store v152, ss18 v452 = stack_load.i64x2 ss15 v453 = stack_load.i64x2 ss15 v153 = select v145, v452, v453 v154 = select v145, v153, v153 v155 = select v145, v154, v154 v156 = select v145, v155, v155 v157 = select v145, v156, v156 v158 = select v145, v157, v157 v159 = select v145, v158, v158 stack_store v159, ss15 v450 = stack_load.i16x8 ss8 v451 = stack_load.i8 [message truncated]
Last updated: Nov 22 2024 at 17:03 UTC