From a4f5472e94c77a33bafb28926af6ccc7fd1d2953 Mon Sep 17 00:00:00 2001 From: caheckman <48068198+caheckman@users.noreply.github.com> Date: Wed, 14 Oct 2020 16:21:37 -0400 Subject: [PATCH 1/5] Refactor AARCH64 neon --- .../data/languages/AARCH64instructions.sinc | 265 +- .../AARCH64/data/languages/AARCH64neon.sinc | 38875 +++------------- ...ARCH64EmulateInstructionStateModifier.java | 274 +- 3 files changed, 6688 insertions(+), 32726 deletions(-) diff --git a/Ghidra/Processors/AARCH64/data/languages/AARCH64instructions.sinc b/Ghidra/Processors/AARCH64/data/languages/AARCH64instructions.sinc index a6ea8c2288..85531dc222 100644 --- a/Ghidra/Processors/AARCH64/data/languages/AARCH64instructions.sinc +++ b/Ghidra/Processors/AARCH64/data/languages/AARCH64instructions.sinc @@ -2644,27 +2644,86 @@ vIndexHLM: val is b_2223=0 & b_2121 & b_1111 & b_2020 [ val = b_1111 << 2 | b_21 vIndexHL: val is b_2223=0b01 & b_21 & b_11 [ val = b_11 << 1 | b_21; ] { export *[const]:8 val; } vIndexHL: b_11 is b_2223=0b10 & b_11 { export *[const]:8 b_11; } -Re_VPR128.B.vIndex: Re_VPR128.B^"["^vIndex^"]" is Re_VPR128.B & vIndex { } -Re_VPR128.S.vIndex: Re_VPR128.S^"["^vIndex^"]" is Re_VPR128.S & vIndex { } -Re_VPR128.D.vIndex: Re_VPR128.D^"["^vIndex^"]" is Re_VPR128.D & vIndex { } +@if DATA_ENDIAN == "little" +Re_VPR128.B.sel: Re_VPR128, val is Re_VPR128 & b_2222=0 & b_2121 & b_1111 [ val = 0x5000 + 32*Re_VPR128 + b_1111 * 2 + b_2121; ] { export *[register]:1 val; } +Re_VPR128.B.sel: Re_VPR128, val is Re_VPR128 & b_2222=1 & b_2121=0 & b_1111 [ val = 0x5000 + 32*Re_VPR128 + b_1111; ] { export *[register]:1 val; } +Re_VPR128.S.sel: Re_VPR128, val is Re_VPR128 & b_2222=0 & b_2121 & b_1111 [ val = 0x5000 + 32*Re_VPR128 + (b_1111 * 2 + b_2121) * 4; ] { export *[register]:4 val; } +Re_VPR128.S.sel: Re_VPR128, val is Re_VPR128 & b_2222=1 & b_2121=0 & b_1111 [ val = 0x5000 + 32*Re_VPR128 + b_1111 * 4; ] { export *[register]:4 val; } +Re_VPR128.D.sel: Re_VPR128, val is Re_VPR128 & b_2222=0 & b_2121 & b_1111 [ val = 0x5000 + 32*Re_VPR128 + (b_1111 * 2 + b_2121) * 8; ] { export *[register]:8 val; } +Re_VPR128.D.sel: Re_VPR128, val is Re_VPR128 & b_2222=1 & b_2121=0 & b_1111 [ val = 0x5000 + 32*Re_VPR128 + b_1111 * 8; ] { export *[register]:8 val; } +@else +Re_VPR128.B.sel: Re_VPR128, val is Re_VPR128 & b_2222=0 & b_2121 & b_1111 [ val = 0x501f + 32*Re_VPR128 - b_1111 * 2 - b_2121; ] { export *[register]:1 val; } +Re_VPR128.B.sel: Re_VPR128, val is Re_VPR128 & b_2222=1 & b_2121=0 & b_1111 [ val = 0x501f + 32*Re_VPR128 - b_1111; ] { export *[register]:1 val; } +Re_VPR128.S.sel: Re_VPR128, val is Re_VPR128 & b_2222=0 & b_2121 & b_1111 [ val = 0x501c + 32*Re_VPR128 - (b_1111 * 2 + b_2121) * 4; ] { export *[register]:4 val; } +Re_VPR128.S.sel: Re_VPR128, val is Re_VPR128 & b_2222=1 & b_2121=0 & b_1111 [ val = 0x501c + 32*Re_VPR128 - b_1111 * 4; ] { export *[register]:4 val; } +Re_VPR128.D.sel: Re_VPR128, val is Re_VPR128 & b_2222=0 & b_2121 & b_1111 [ val = 0x5018 + 32*Re_VPR128 - (b_1111 * 2 + b_2121) * 8; ] { export *[register]:8 val; } +Re_VPR128.D.sel: Re_VPR128, val is Re_VPR128 & b_2222=1 & b_2121=0 & b_1111 [ val = 0x5018 + 32*Re_VPR128 - b_1111 * 8; ] { export *[register]:8 val; } +@endif -Rd_VPR128.B.imm_neon_uimm4: Rd_VPR128.B^"["^imm_neon_uimm4^"]" is Rd_VPR128.B & imm_neon_uimm4 { export Rd_VPR128.B; } -Rd_VPR128.H.imm_neon_uimm3: Rd_VPR128.H^"["^imm_neon_uimm3^"]" is Rd_VPR128.H & imm_neon_uimm3 { export Rd_VPR128.H; } -Rd_VPR128.S.imm_neon_uimm2: Rd_VPR128.S^"["^imm_neon_uimm2^"]" is Rd_VPR128.S & imm_neon_uimm2 { export Rd_VPR128.S; } -Rd_VPR128.D.imm_neon_uimm1: Rd_VPR128.D^"["^imm_neon_uimm1^"]" is Rd_VPR128.D & imm_neon_uimm1 { export Rd_VPR128.D; } +Re_VPR128.B.vIndex: Re_VPR128.B^"["^vIndex^"]" is Re_VPR128.B & vIndex & Re_VPR128.B.sel { export Re_VPR128.B.sel; } +Re_VPR128.S.vIndex: Re_VPR128.S^"["^vIndex^"]" is Re_VPR128.S & vIndex & Re_VPR128.S.sel { export Re_VPR128.S.sel; } +Re_VPR128.D.vIndex: Re_VPR128.D^"["^vIndex^"]" is Re_VPR128.D & vIndex & Re_VPR128.D.sel { export Re_VPR128.D.sel; } -Rn_VPR128.B.immN_neon_uimm4: Rn_VPR128.B^"["^immN_neon_uimm4^"]" is Rn_VPR128.B & immN_neon_uimm4 { export Rn_VPR128.B; } -Rn_VPR128.H.immN_neon_uimm3: Rn_VPR128.H^"["^immN_neon_uimm3^"]" is Rn_VPR128.H & immN_neon_uimm3 { export Rn_VPR128.H; } -Rn_VPR128.S.immN_neon_uimm2: Rn_VPR128.S^"["^immN_neon_uimm2^"]" is Rn_VPR128.S & immN_neon_uimm2 { export Rn_VPR128.S; } -Rn_VPR128.D.immN_neon_uimm1: Rn_VPR128.D^"["^immN_neon_uimm1^"]" is Rn_VPR128.D & immN_neon_uimm1 { export Rn_VPR128.D; } +@if DATA_ENDIAN == "little" +Rd_VPR128.B.sel: Rd_VPR128, val is Rd_VPR128 & imm_neon_uimm4 [ val = 0x5000 + 32*Rd_VPR128 + imm_neon_uimm4; ] { export *[register]:1 val; } +Rd_VPR128.H.sel: Rd_VPR128, val is Rd_VPR128 & imm_neon_uimm3 [ val = 0x5000 + 32*Rd_VPR128 + 2*imm_neon_uimm3; ] { export *[register]:2 val; } +Rd_VPR128.S.sel: Rd_VPR128, val is Rd_VPR128 & imm_neon_uimm2 [ val = 0x5000 + 32*Rd_VPR128 + 4*imm_neon_uimm2; ] { export *[register]:4 val; } +Rd_VPR128.D.sel: Rd_VPR128, val is Rd_VPR128 & imm_neon_uimm1 [ val = 0x5000 + 32*Rd_VPR128 + 8*imm_neon_uimm1; ] { export *[register]:8 val; } +@else +Rd_VPR128.B.sel: Rd_VPR128, val is Rd_VPR128 & imm_neon_uimm4 [ val = 0x501f + 32*Rd_VPR128 - imm_neon_uimm4; ] { export *[register]:1 val; } +Rd_VPR128.H.sel: Rd_VPR128, val is Rd_VPR128 & imm_neon_uimm3 [ val = 0x501e + 32*Rd_VPR128 - 2*imm_neon_uimm3; ] { export *[register]:2 val; } +Rd_VPR128.S.sel: Rd_VPR128, val is Rd_VPR128 & imm_neon_uimm2 [ val = 0x501c + 32*Rd_VPR128 - 4*imm_neon_uimm2; ] { export *[register]:4 val; } +Rd_VPR128.D.sel: Rd_VPR128, val is Rd_VPR128 & imm_neon_uimm1 [ val = 0x5018 + 32*Rd_VPR128 - 8*imm_neon_uimm1; ] { export *[register]:8 val; } +@endif +Rd_VPR128.B.imm_neon_uimm4: Rd_VPR128.B^"["^imm_neon_uimm4^"]" is Rd_VPR128.B & imm_neon_uimm4 & Rd_VPR128.B.sel { export Rd_VPR128.B.sel; } +Rd_VPR128.H.imm_neon_uimm3: Rd_VPR128.H^"["^imm_neon_uimm3^"]" is Rd_VPR128.H & imm_neon_uimm3 & Rd_VPR128.H.sel { export Rd_VPR128.H.sel; } +Rd_VPR128.S.imm_neon_uimm2: Rd_VPR128.S^"["^imm_neon_uimm2^"]" is Rd_VPR128.S & imm_neon_uimm2 & Rd_VPR128.S.sel { export Rd_VPR128.S.sel; } +Rd_VPR128.D.imm_neon_uimm1: Rd_VPR128.D^"["^imm_neon_uimm1^"]" is Rd_VPR128.D & imm_neon_uimm1 & Rd_VPR128.D.sel { export Rd_VPR128.D.sel; } -Rn_VPR128.B.imm_neon_uimm4: Rn_VPR128.B^"["^imm_neon_uimm4^"]" is Rn_VPR128.B & imm_neon_uimm4 { export Rn_VPR128.B; } -Rn_VPR128.H.imm_neon_uimm3: Rn_VPR128.H^"["^imm_neon_uimm3^"]" is Rn_VPR128.H & imm_neon_uimm3 { export Rn_VPR128.H; } -Rn_VPR128.S.imm_neon_uimm2: Rn_VPR128.S^"["^imm_neon_uimm2^"]" is Rn_VPR128.S & imm_neon_uimm2 { export Rn_VPR128.S; } -Rn_VPR128.D.imm_neon_uimm1: Rn_VPR128.D^"["^imm_neon_uimm1^"]" is Rn_VPR128.D & imm_neon_uimm1 { export Rn_VPR128.D; } +@if DATA_ENDIAN == "little" +Rn_VPR128.B.selN: Rn_VPR128, val is Rn_VPR128 & immN_neon_uimm4 [ val = 0x5000 + 32*Rn_VPR128 + immN_neon_uimm4; ] { export *[register]:1 val; } +Rn_VPR128.H.selN: Rn_VPR128, val is Rn_VPR128 & immN_neon_uimm3 [ val = 0x5000 + 32*Rn_VPR128 + 2*immN_neon_uimm3; ] { export *[register]:2 val; } +Rn_VPR128.S.selN: Rn_VPR128, val is Rn_VPR128 & immN_neon_uimm2 [ val = 0x5000 + 32*Rn_VPR128 + 4*immN_neon_uimm2; ] { export *[register]:4 val; } +Rn_VPR128.D.selN: Rn_VPR128, val is Rn_VPR128 & immN_neon_uimm1 [ val = 0x5000 + 32*Rn_VPR128 + 8*immN_neon_uimm1; ] { export *[register]:8 val; } +@else +Rn_VPR128.B.selN: Rn_VPR128, val is Rn_VPR128 & immN_neon_uimm4 [ val = 0x501f + 32*Rn_VPR128 - immN_neon_uimm4; ] { export *[register]:1 val; } +Rn_VPR128.H.selN: Rn_VPR128, val is Rn_VPR128 & immN_neon_uimm3 [ val = 0x501e + 32*Rn_VPR128 - 2*immN_neon_uimm3; ] { export *[register]:2 val; } +Rn_VPR128.S.selN: Rn_VPR128, val is Rn_VPR128 & immN_neon_uimm2 [ val = 0x501c + 32*Rn_VPR128 - 4*immN_neon_uimm2; ] { export *[register]:4 val; } +Rn_VPR128.D.selN: Rn_VPR128, val is Rn_VPR128 & immN_neon_uimm1 [ val = 0x5018 + 32*Rn_VPR128 - 8*immN_neon_uimm1; ] { export *[register]:8 val; } +@endif +Rn_VPR128.B.immN_neon_uimm4: Rn_VPR128.B^"["^immN_neon_uimm4^"]" is Rn_VPR128.B & immN_neon_uimm4 & Rn_VPR128.B.selN { export Rn_VPR128.B.selN; } +Rn_VPR128.H.immN_neon_uimm3: Rn_VPR128.H^"["^immN_neon_uimm3^"]" is Rn_VPR128.H & immN_neon_uimm3 & Rn_VPR128.H.selN { export Rn_VPR128.H.selN; } +Rn_VPR128.S.immN_neon_uimm2: Rn_VPR128.S^"["^immN_neon_uimm2^"]" is Rn_VPR128.S & immN_neon_uimm2 & Rn_VPR128.S.selN { export Rn_VPR128.S.selN; } +Rn_VPR128.D.immN_neon_uimm1: Rn_VPR128.D^"["^immN_neon_uimm1^"]" is Rn_VPR128.D & immN_neon_uimm1 & Rn_VPR128.D.selN { export Rn_VPR128.D.selN; } + +@if DATA_ENDIAN == "little" +Rn_VPR128.B.sel: Rn_VPR128, val is Rn_VPR128 & imm_neon_uimm4 [ val = 0x5000 + 32*Rn_VPR128 + imm_neon_uimm4; ] { export *[register]:1 val; } +Rn_VPR128.H.sel: Rn_VPR128, val is Rn_VPR128 & imm_neon_uimm3 [ val = 0x5000 + 32*Rn_VPR128 + 2*imm_neon_uimm3; ] { export *[register]:2 val; } +Rn_VPR128.S.sel: Rn_VPR128, val is Rn_VPR128 & imm_neon_uimm2 [ val = 0x5000 + 32*Rn_VPR128 + 4*imm_neon_uimm2; ] { export *[register]:4 val; } +Rn_VPR128.D.sel: Rn_VPR128, val is Rn_VPR128 & imm_neon_uimm1 [ val = 0x5000 + 32*Rn_VPR128 + 8*imm_neon_uimm1; ] { export *[register]:8 val; } +@else +Rn_VPR128.B.sel: Rn_VPR128, val is Rn_VPR128 & imm_neon_uimm4 [ val = 0x501f + 32*Rn_VPR128 - imm_neon_uimm4; ] { export *[register]:1 val; } +Rn_VPR128.H.sel: Rn_VPR128, val is Rn_VPR128 & imm_neon_uimm3 [ val = 0x501e + 32*Rn_VPR128 - 2*imm_neon_uimm3; ] { export *[register]:2 val; } +Rn_VPR128.S.sel: Rn_VPR128, val is Rn_VPR128 & imm_neon_uimm2 [ val = 0x501c + 32*Rn_VPR128 - 4*imm_neon_uimm2; ] { export *[register]:4 val; } +Rn_VPR128.D.sel: Rn_VPR128, val is Rn_VPR128 & imm_neon_uimm1 [ val = 0x5018 + 32*Rn_VPR128 - 8*imm_neon_uimm1; ] { export *[register]:8 val; } +@endif +Rn_VPR128.B.imm_neon_uimm4: Rn_VPR128.B^"["^imm_neon_uimm4^"]" is Rn_VPR128.B & imm_neon_uimm4 & Rn_VPR128.B.sel { export Rn_VPR128.B.sel; } +Rn_VPR128.H.imm_neon_uimm3: Rn_VPR128.H^"["^imm_neon_uimm3^"]" is Rn_VPR128.H & imm_neon_uimm3 & Rn_VPR128.H.sel { export Rn_VPR128.H.sel; } +Rn_VPR128.S.imm_neon_uimm2: Rn_VPR128.S^"["^imm_neon_uimm2^"]" is Rn_VPR128.S & imm_neon_uimm2 & Rn_VPR128.S.sel { export Rn_VPR128.S.sel; } +Rn_VPR128.D.imm_neon_uimm1: Rn_VPR128.D^"["^imm_neon_uimm1^"]" is Rn_VPR128.D & imm_neon_uimm1 & Rn_VPR128.D.sel { export Rn_VPR128.D.sel; } Re_VPR128.H.vIndexHL: Re_VPR128.H^"["^vIndexHL^"]" is Re_VPR128.H & vIndexHL { } -Re_VPR128Lo.H.vIndexHLM: Re_VPR128Lo.H^"["^vIndexHLM^"]" is Re_VPR128Lo.H & vIndexHLM { } + +@if DATA_ENDIAN == "little" +Re_VPR128Lo.H.sel: Re_VPR128, val is Re_VPR128 & b_2223=2 & b_2121 & b_1111 [ val = 0x5000 + 32*Re_VPR128 + (b_1111 * 2 + b_2121)*2; ] { export *[register]:2 val; } +Re_VPR128Lo.H.sel: Re_VPR128, val is Re_VPR128 & b_2223=1 & b_2121 & b_1111 & b_2020 [ val = 0x5000 + 32*Re_VPR128 + (b_1111*4 + b_2121*2 + b_2020)*2; ] { export *[register]:2 val; } +Re_VPR128Lo.H.sel: Re_VPR128, val is Re_VPR128 & b_2223=0 & b_2121 & b_1111 & b_2020 [ val = 0x5000 + 32*Re_VPR128 + (b_1111*4 + b_2121*2 + b_2020)*2; ] { export *[register]:2 val; } +@else +Re_VPR128Lo.H.sel: Re_VPR128, val is Re_VPR128 & b_2223=2 & b_2121 & b_1111 [ val = 0x501e + 32*Re_VPR128 - (b_1111 * 2 + b_2121)*2; ] { export *[register]:2 val; } +Re_VPR128Lo.H.sel: Re_VPR128, val is Re_VPR128 & b_2223=1 & b_2121 & b_1111 & b_2020 [ val = 0x501e + 32*Re_VPR128 - (b_1111*4 + b_2121*2 + b_2020)*2; ] { export *[register]:2 val; } +Re_VPR128Lo.H.sel: Re_VPR128, val is Re_VPR128 & b_2223=0 & b_2121 & b_1111 & b_2020 [ val = 0x501e + 32*Re_VPR128 - (b_1111*4 + b_2121*2 + b_2020)*2; ] { export *[register]:2 val; } +@endif +Re_VPR128Lo.H.vIndexHLM: Re_VPR128Lo.H^"["^vIndexHLM^"]" is Re_VPR128Lo.H & vIndexHLM & Re_VPR128Lo.H.sel { export Re_VPR128Lo.H.sel; } FBitsOp: "#"^fbits is Scale [ fbits = 64 - Scale; ] { export *[const]:2 fbits; } @@ -3193,55 +3252,17 @@ PACIXSP_BTITARGETS: is ShowBTI=0 { } # These pseudo ops are used in neon -define pcodeop SIMD_COPY; -define pcodeop SIMD_FLOAT; -define pcodeop SIMD_FLOAT2FLOAT; -define pcodeop SIMD_FLOAT_ABS; -define pcodeop SIMD_FLOAT_ADD; -define pcodeop SIMD_FLOAT_DIV; -define pcodeop SIMD_FLOAT_MULT; -define pcodeop SIMD_FLOAT_NEG; -define pcodeop SIMD_FLOAT_SUB; -define pcodeop SIMD_INT; -define pcodeop SIMD_INT_2COMP; -define pcodeop SIMD_INT_ABS; -define pcodeop SIMD_INT_ADD; -define pcodeop SIMD_INT_AND; -define pcodeop SIMD_INT_LEFT; -define pcodeop SIMD_INT_LESS; -define pcodeop SIMD_INT_MULT; -define pcodeop SIMD_INT_NEGATE; -define pcodeop SIMD_INT_OR; -define pcodeop SIMD_INT_RIGHT; -define pcodeop SIMD_INT_SEXT; -define pcodeop SIMD_INT_SLESS; -define pcodeop SIMD_INT_SRIGHT; -define pcodeop SIMD_INT_SUB; -define pcodeop SIMD_INT_XOR; -define pcodeop SIMD_INT_ZEXT; define pcodeop SIMD_PIECE; -define pcodeop SIMD_TRUNC; -define pcodeop NEON_abs; -define pcodeop NEON_add; -define pcodeop NEON_addhn; -define pcodeop NEON_addhn2; -define pcodeop NEON_addp; define pcodeop NEON_addv; define pcodeop NEON_aesd; define pcodeop NEON_aese; define pcodeop NEON_aesimc; define pcodeop NEON_aesmc; -define pcodeop NEON_and; -define pcodeop NEON_bcax; -define pcodeop NEON_bfcvt; -define pcodeop NEON_bfcvtn; -define pcodeop NEON_bfcvtn2; define pcodeop NEON_bfdot; define pcodeop NEON_bfmlalb; define pcodeop NEON_bfmlalt; define pcodeop NEON_bfmmla; -define pcodeop NEON_bic; define pcodeop NEON_bif; define pcodeop NEON_bit; define pcodeop NEON_bsl; @@ -3256,40 +3277,18 @@ define pcodeop NEON_cmle; define pcodeop NEON_cmlt; define pcodeop NEON_cmtst; define pcodeop NEON_cnt; -define pcodeop NEON_dup; -define pcodeop NEON_eor; -define pcodeop NEON_eor3; define pcodeop NEON_ext; -define pcodeop NEON_fabd; -define pcodeop NEON_fabs; define pcodeop NEON_facge; define pcodeop NEON_facgt; -define pcodeop NEON_fadd; -define pcodeop NEON_faddp; define pcodeop NEON_fcadd; -define pcodeop NEON_fccmp; -define pcodeop NEON_fccmpe; define pcodeop NEON_fcmeq; define pcodeop NEON_fcmge; define pcodeop NEON_fcmgt; define pcodeop NEON_fcmla; define pcodeop NEON_fcmle; define pcodeop NEON_fcmlt; -define pcodeop NEON_fcmp; -define pcodeop NEON_fcmpe; -define pcodeop NEON_fcsel; -define pcodeop NEON_fcvt; -define pcodeop NEON_fcvt_amnpz_su; -define pcodeop NEON_fcvtl; -define pcodeop NEON_fcvtl2; -define pcodeop NEON_fcvtn; -define pcodeop NEON_fcvtn2; -define pcodeop NEON_fcvtxn; -define pcodeop NEON_fcvtxn2; define pcodeop NEON_fcvtzs; define pcodeop NEON_fcvtzu; -define pcodeop NEON_fdiv; -define pcodeop NEON_fjcvtzs; define pcodeop NEON_fmadd; define pcodeop NEON_fmax; define pcodeop NEON_fmaxnm; @@ -3303,50 +3302,22 @@ define pcodeop NEON_fminnmp; define pcodeop NEON_fminnmv; define pcodeop NEON_fminp; define pcodeop NEON_fminv; -define pcodeop NEON_fmla; -define pcodeop NEON_fmlal; -define pcodeop NEON_fmlal2; -define pcodeop NEON_fmls; -define pcodeop NEON_fmlsl; -define pcodeop NEON_fmlsl2; define pcodeop NEON_fmov; define pcodeop NEON_fmsub; -define pcodeop NEON_fmul; define pcodeop NEON_fmulx; -define pcodeop NEON_fneg; define pcodeop NEON_fnmadd; define pcodeop NEON_fnmsub; -define pcodeop NEON_fnmul; define pcodeop NEON_frecpe; define pcodeop NEON_frecps; define pcodeop NEON_frecpx; -define pcodeop NEON_frint_aimnpxz; define pcodeop NEON_frsqrte; define pcodeop NEON_frsqrts; define pcodeop NEON_fsqrt; -define pcodeop NEON_fsub; -define pcodeop NEON_ldnp1; -define pcodeop NEON_ldnp2; -define pcodeop NEON_ldp1; -define pcodeop NEON_ldp2; -define pcodeop NEON_ldr; -define pcodeop NEON_ldur; -define pcodeop NEON_mla; -define pcodeop NEON_mls; -define pcodeop NEON_mov; -define pcodeop NEON_movi; -define pcodeop NEON_mul; -define pcodeop NEON_mvn; -define pcodeop NEON_mvni; define pcodeop NEON_neg; -define pcodeop NEON_orn; -define pcodeop NEON_orr; define pcodeop NEON_pmul; define pcodeop NEON_pmull; define pcodeop NEON_pmull2; define pcodeop NEON_raddhn; -define pcodeop NEON_raddhn2; -define pcodeop NEON_rax1; define pcodeop NEON_rbit; define pcodeop NEON_rev16; define pcodeop NEON_rev32; @@ -3356,22 +3327,11 @@ define pcodeop NEON_rshrn2; define pcodeop NEON_rsubhn; define pcodeop NEON_rsubhn2; define pcodeop NEON_saba; -define pcodeop NEON_sabal; -define pcodeop NEON_sabal2; define pcodeop NEON_sabd; -define pcodeop NEON_sabdl; -define pcodeop NEON_sabdl2; -define pcodeop NEON_sadalp; -define pcodeop NEON_saddl; -define pcodeop NEON_saddl2; -define pcodeop NEON_saddlp; define pcodeop NEON_saddlv; -define pcodeop NEON_saddw; -define pcodeop NEON_saddw2; define pcodeop NEON_scvtf; define pcodeop NEON_sdot; define pcodeop NEON_sha1c; -define pcodeop NEON_sha1h; define pcodeop NEON_sha1m; define pcodeop NEON_sha1p; define pcodeop NEON_sha1su0; @@ -3386,10 +3346,6 @@ define pcodeop NEON_sha512su0; define pcodeop NEON_sha512su1; define pcodeop NEON_shadd; define pcodeop NEON_shl; -define pcodeop NEON_shll; -define pcodeop NEON_shll2; -define pcodeop NEON_shrn; -define pcodeop NEON_shrn2; define pcodeop NEON_shsub; define pcodeop NEON_sli; define pcodeop NEON_sm3partw1; @@ -3407,24 +3363,10 @@ define pcodeop NEON_smaxv; define pcodeop NEON_smin; define pcodeop NEON_sminp; define pcodeop NEON_sminv; -define pcodeop NEON_smlal; -define pcodeop NEON_smlal2; -define pcodeop NEON_smlsl; -define pcodeop NEON_smlsl2; -define pcodeop NEON_smov; define pcodeop NEON_smmla; -define pcodeop NEON_smull; -define pcodeop NEON_smull2; -define pcodeop NEON_sqabs; define pcodeop NEON_sqadd; -define pcodeop NEON_sqdmlal; -define pcodeop NEON_sqdmlal2; -define pcodeop NEON_sqdmlsl; -define pcodeop NEON_sqdmlsl2; define pcodeop NEON_sqdmulh; define pcodeop NEON_sqdmull; -define pcodeop NEON_sqdmull2; -define pcodeop NEON_sqneg; define pcodeop NEON_sqrdml_as_h; define pcodeop NEON_sqrdmulh; define pcodeop NEON_sqrshl; @@ -3447,45 +3389,12 @@ define pcodeop NEON_srhadd; define pcodeop NEON_sri; define pcodeop NEON_srshl; define pcodeop NEON_srshr; -define pcodeop NEON_srsra; define pcodeop NEON_sshl; -define pcodeop NEON_sshll; -define pcodeop NEON_sshll2; define pcodeop NEON_sshr; -define pcodeop NEON_ssra; -define pcodeop NEON_ssubl; -define pcodeop NEON_ssubl2; -define pcodeop NEON_ssubw; -define pcodeop NEON_ssubw2; -define pcodeop NEON_stnp1; -define pcodeop NEON_stnp2; -define pcodeop NEON_stp1; -define pcodeop NEON_stp2; -define pcodeop NEON_str; -define pcodeop NEON_stur; -define pcodeop NEON_sub; -define pcodeop NEON_subhn; -define pcodeop NEON_subhn2; define pcodeop NEON_sudot; -define pcodeop NEON_suqadd; -define pcodeop NEON_sxtl; -define pcodeop NEON_sxtl2; -define pcodeop NEON_tblx; -define pcodeop NEON_trn1; -define pcodeop NEON_trn2; define pcodeop NEON_uaba; -define pcodeop NEON_uabal; -define pcodeop NEON_uabal2; define pcodeop NEON_uabd; -define pcodeop NEON_uabdl; -define pcodeop NEON_uabdl2; -define pcodeop NEON_uadalp; -define pcodeop NEON_uaddl; -define pcodeop NEON_uaddl2; -define pcodeop NEON_uaddlp; define pcodeop NEON_uaddlv; -define pcodeop NEON_uaddw; -define pcodeop NEON_uaddw2; define pcodeop NEON_ucvtf; define pcodeop NEON_udot; define pcodeop NEON_uhadd; @@ -3496,14 +3405,8 @@ define pcodeop NEON_umaxv; define pcodeop NEON_umin; define pcodeop NEON_uminp; define pcodeop NEON_uminv; -define pcodeop NEON_umlal; -define pcodeop NEON_umlal2; -define pcodeop NEON_umlsl; -define pcodeop NEON_umlsl2; define pcodeop NEON_ummla; -define pcodeop NEON_umov; define pcodeop NEON_umull; -define pcodeop NEON_umull2; define pcodeop NEON_uqadd; define pcodeop NEON_uqrshl; define pcodeop NEON_uqrshrn; @@ -3519,28 +3422,10 @@ define pcodeop NEON_urhadd; define pcodeop NEON_urshl; define pcodeop NEON_urshr; define pcodeop NEON_ursqrte; -define pcodeop NEON_ursra; define pcodeop NEON_usdot; define pcodeop NEON_ushl; -define pcodeop NEON_ushll; -define pcodeop NEON_ushll2; -define pcodeop NEON_ushr; define pcodeop NEON_usmmla; define pcodeop NEON_usqadd; -define pcodeop NEON_usra; -define pcodeop NEON_usubl; -define pcodeop NEON_usubl2; -define pcodeop NEON_usubw; -define pcodeop NEON_usubw2; -define pcodeop NEON_uxtl; -define pcodeop NEON_uxtl2; -define pcodeop NEON_uzp1; -define pcodeop NEON_uzp2; -define pcodeop NEON_xar; -define pcodeop NEON_xtn; -define pcodeop NEON_xtn2; -define pcodeop NEON_zip1; -define pcodeop NEON_zip2; # These pseudo ops are automatically generated diff --git a/Ghidra/Processors/AARCH64/data/languages/AARCH64neon.sinc b/Ghidra/Processors/AARCH64/data/languages/AARCH64neon.sinc index 577544ad39..a6c78a047b 100644 --- a/Ghidra/Processors/AARCH64/data/languages/AARCH64neon.sinc +++ b/Ghidra/Processors/AARCH64/data/languages/AARCH64neon.sinc @@ -12,13 +12,6 @@ # # (aunit.py may require a local copy of a current andre exhaust). -# uncomment only one of these: - -@define SEMANTIC_force "" # force-semantics -# @define SEMANTIC_primitive "" # adds 21 MB (LE) and 32 MB (BE) -@define SEMANTIC_pcode "" # adds 3 MB (LE) and 3 MB (BE) -# @define SEMANTIC_pseudo "" # adds 1 MB (LE) and 2 MB (BE) - # C7.2.1 ABS page C7-1399 line 77427 MATCH x5e20b800/mask=xff3ffc00 # CONSTRUCT x5ee0b800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =abs @@ -29,15 +22,8 @@ :abs Rd_FPR64, Rn_FPR64 is b_2431=0b01011110 & b_2223=0b11 & b_1021=0b100000101110 & Rd_FPR64 & Rn_FPR64 & Zd { -@if defined(SEMANTIC_primitive) Rd_FPR64 = MP_INT_ABS(Rn_FPR64); zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - Rd_FPR64 = MP_INT_ABS(Rn_FPR64); - zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pseudo) - Rd_FPR64 = NEON_abs(Rn_FPR64); -@endif } # C7.2.1 ABS page C7-1399 line 77427 MATCH x0e20b800/mask=xbf3ffc00 @@ -50,41 +36,16 @@ is b_2431=0b01011110 & b_2223=0b11 & b_1021=0b100000101110 & Rd_FPR64 & Rn_FPR64 :abs Rd_VPR64.8B, Rn_VPR64.8B is b_31=0 & b_30=0 & b_2429=0b001110 & b_2223=0b00 & b_1021=0b100000101110 & Rd_VPR64.8B & Rn_VPR64.8B & Zd { -@if defined(SEMANTIC_primitive) # simd unary Rd_VPR64.8B = MP_INT_ABS(Rn_VPR64.8B) on lane size 1 - local tmp1:4 = 0; - local tmp2:4 = 0; - simd_address_at(tmp1, Rn_VPR64.8B, 0, 1, 8); - simd_address_at(tmp2, Rd_VPR64.8B, 0, 1, 8); - * [register]:1 tmp2 = MP_INT_ABS(* [register]:1 tmp1); - simd_address_at(tmp1, Rn_VPR64.8B, 1, 1, 8); - simd_address_at(tmp2, Rd_VPR64.8B, 1, 1, 8); - * [register]:1 tmp2 = MP_INT_ABS(* [register]:1 tmp1); - simd_address_at(tmp1, Rn_VPR64.8B, 2, 1, 8); - simd_address_at(tmp2, Rd_VPR64.8B, 2, 1, 8); - * [register]:1 tmp2 = MP_INT_ABS(* [register]:1 tmp1); - simd_address_at(tmp1, Rn_VPR64.8B, 3, 1, 8); - simd_address_at(tmp2, Rd_VPR64.8B, 3, 1, 8); - * [register]:1 tmp2 = MP_INT_ABS(* [register]:1 tmp1); - simd_address_at(tmp1, Rn_VPR64.8B, 4, 1, 8); - simd_address_at(tmp2, Rd_VPR64.8B, 4, 1, 8); - * [register]:1 tmp2 = MP_INT_ABS(* [register]:1 tmp1); - simd_address_at(tmp1, Rn_VPR64.8B, 5, 1, 8); - simd_address_at(tmp2, Rd_VPR64.8B, 5, 1, 8); - * [register]:1 tmp2 = MP_INT_ABS(* [register]:1 tmp1); - simd_address_at(tmp1, Rn_VPR64.8B, 6, 1, 8); - simd_address_at(tmp2, Rd_VPR64.8B, 6, 1, 8); - * [register]:1 tmp2 = MP_INT_ABS(* [register]:1 tmp1); - simd_address_at(tmp1, Rn_VPR64.8B, 7, 1, 8); - simd_address_at(tmp2, Rd_VPR64.8B, 7, 1, 8); - * [register]:1 tmp2 = MP_INT_ABS(* [register]:1 tmp1); + Rd_VPR64.8B[0,8] = MP_INT_ABS(Rn_VPR64.8B[0,8]); + Rd_VPR64.8B[8,8] = MP_INT_ABS(Rn_VPR64.8B[8,8]); + Rd_VPR64.8B[16,8] = MP_INT_ABS(Rn_VPR64.8B[16,8]); + Rd_VPR64.8B[24,8] = MP_INT_ABS(Rn_VPR64.8B[24,8]); + Rd_VPR64.8B[32,8] = MP_INT_ABS(Rn_VPR64.8B[32,8]); + Rd_VPR64.8B[40,8] = MP_INT_ABS(Rn_VPR64.8B[40,8]); + Rd_VPR64.8B[48,8] = MP_INT_ABS(Rn_VPR64.8B[48,8]); + Rd_VPR64.8B[56,8] = MP_INT_ABS(Rn_VPR64.8B[56,8]); zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:8 = SIMD_INT_ABS(Rn_VPR64.8B, 1:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.8B -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.8B = NEON_abs(Rn_VPR64.8B, 1:1); -@endif } # C7.2.1 ABS page C7-1399 line 77427 MATCH x0e20b800/mask=xbf3ffc00 @@ -97,65 +58,24 @@ is b_31=0 & b_30=0 & b_2429=0b001110 & b_2223=0b00 & b_1021=0b100000101110 & Rd_ :abs Rd_VPR128.16B, Rn_VPR128.16B is b_31=0 & b_30=1 & b_2429=0b001110 & b_2223=0b00 & b_1021=0b100000101110 & Rd_VPR128.16B & Rn_VPR128.16B & Zd { -@if defined(SEMANTIC_primitive) # simd unary Rd_VPR128.16B = MP_INT_ABS(Rn_VPR128.16B) on lane size 1 - local tmp1:4 = 0; - local tmp2:4 = 0; - simd_address_at(tmp1, Rn_VPR128.16B, 0, 1, 16); - simd_address_at(tmp2, Rd_VPR128.16B, 0, 1, 16); - * [register]:1 tmp2 = MP_INT_ABS(* [register]:1 tmp1); - simd_address_at(tmp1, Rn_VPR128.16B, 1, 1, 16); - simd_address_at(tmp2, Rd_VPR128.16B, 1, 1, 16); - * [register]:1 tmp2 = MP_INT_ABS(* [register]:1 tmp1); - simd_address_at(tmp1, Rn_VPR128.16B, 2, 1, 16); - simd_address_at(tmp2, Rd_VPR128.16B, 2, 1, 16); - * [register]:1 tmp2 = MP_INT_ABS(* [register]:1 tmp1); - simd_address_at(tmp1, Rn_VPR128.16B, 3, 1, 16); - simd_address_at(tmp2, Rd_VPR128.16B, 3, 1, 16); - * [register]:1 tmp2 = MP_INT_ABS(* [register]:1 tmp1); - simd_address_at(tmp1, Rn_VPR128.16B, 4, 1, 16); - simd_address_at(tmp2, Rd_VPR128.16B, 4, 1, 16); - * [register]:1 tmp2 = MP_INT_ABS(* [register]:1 tmp1); - simd_address_at(tmp1, Rn_VPR128.16B, 5, 1, 16); - simd_address_at(tmp2, Rd_VPR128.16B, 5, 1, 16); - * [register]:1 tmp2 = MP_INT_ABS(* [register]:1 tmp1); - simd_address_at(tmp1, Rn_VPR128.16B, 6, 1, 16); - simd_address_at(tmp2, Rd_VPR128.16B, 6, 1, 16); - * [register]:1 tmp2 = MP_INT_ABS(* [register]:1 tmp1); - simd_address_at(tmp1, Rn_VPR128.16B, 7, 1, 16); - simd_address_at(tmp2, Rd_VPR128.16B, 7, 1, 16); - * [register]:1 tmp2 = MP_INT_ABS(* [register]:1 tmp1); - simd_address_at(tmp1, Rn_VPR128.16B, 8, 1, 16); - simd_address_at(tmp2, Rd_VPR128.16B, 8, 1, 16); - * [register]:1 tmp2 = MP_INT_ABS(* [register]:1 tmp1); - simd_address_at(tmp1, Rn_VPR128.16B, 9, 1, 16); - simd_address_at(tmp2, Rd_VPR128.16B, 9, 1, 16); - * [register]:1 tmp2 = MP_INT_ABS(* [register]:1 tmp1); - simd_address_at(tmp1, Rn_VPR128.16B, 10, 1, 16); - simd_address_at(tmp2, Rd_VPR128.16B, 10, 1, 16); - * [register]:1 tmp2 = MP_INT_ABS(* [register]:1 tmp1); - simd_address_at(tmp1, Rn_VPR128.16B, 11, 1, 16); - simd_address_at(tmp2, Rd_VPR128.16B, 11, 1, 16); - * [register]:1 tmp2 = MP_INT_ABS(* [register]:1 tmp1); - simd_address_at(tmp1, Rn_VPR128.16B, 12, 1, 16); - simd_address_at(tmp2, Rd_VPR128.16B, 12, 1, 16); - * [register]:1 tmp2 = MP_INT_ABS(* [register]:1 tmp1); - simd_address_at(tmp1, Rn_VPR128.16B, 13, 1, 16); - simd_address_at(tmp2, Rd_VPR128.16B, 13, 1, 16); - * [register]:1 tmp2 = MP_INT_ABS(* [register]:1 tmp1); - simd_address_at(tmp1, Rn_VPR128.16B, 14, 1, 16); - simd_address_at(tmp2, Rd_VPR128.16B, 14, 1, 16); - * [register]:1 tmp2 = MP_INT_ABS(* [register]:1 tmp1); - simd_address_at(tmp1, Rn_VPR128.16B, 15, 1, 16); - simd_address_at(tmp2, Rd_VPR128.16B, 15, 1, 16); - * [register]:1 tmp2 = MP_INT_ABS(* [register]:1 tmp1); + Rd_VPR128.16B[0,8] = MP_INT_ABS(Rn_VPR128.16B[0,8]); + Rd_VPR128.16B[8,8] = MP_INT_ABS(Rn_VPR128.16B[8,8]); + Rd_VPR128.16B[16,8] = MP_INT_ABS(Rn_VPR128.16B[16,8]); + Rd_VPR128.16B[24,8] = MP_INT_ABS(Rn_VPR128.16B[24,8]); + Rd_VPR128.16B[32,8] = MP_INT_ABS(Rn_VPR128.16B[32,8]); + Rd_VPR128.16B[40,8] = MP_INT_ABS(Rn_VPR128.16B[40,8]); + Rd_VPR128.16B[48,8] = MP_INT_ABS(Rn_VPR128.16B[48,8]); + Rd_VPR128.16B[56,8] = MP_INT_ABS(Rn_VPR128.16B[56,8]); + Rd_VPR128.16B[64,8] = MP_INT_ABS(Rn_VPR128.16B[64,8]); + Rd_VPR128.16B[72,8] = MP_INT_ABS(Rn_VPR128.16B[72,8]); + Rd_VPR128.16B[80,8] = MP_INT_ABS(Rn_VPR128.16B[80,8]); + Rd_VPR128.16B[88,8] = MP_INT_ABS(Rn_VPR128.16B[88,8]); + Rd_VPR128.16B[96,8] = MP_INT_ABS(Rn_VPR128.16B[96,8]); + Rd_VPR128.16B[104,8] = MP_INT_ABS(Rn_VPR128.16B[104,8]); + Rd_VPR128.16B[112,8] = MP_INT_ABS(Rn_VPR128.16B[112,8]); + Rd_VPR128.16B[120,8] = MP_INT_ABS(Rn_VPR128.16B[120,8]); zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:16 = SIMD_INT_ABS(Rn_VPR128.16B, 1:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.16B -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.16B = NEON_abs(Rn_VPR128.16B, 1:1); -@endif } # C7.2.1 ABS page C7-1399 line 77427 MATCH x0e20b800/mask=xbf3ffc00 @@ -168,29 +88,12 @@ is b_31=0 & b_30=1 & b_2429=0b001110 & b_2223=0b00 & b_1021=0b100000101110 & Rd_ :abs Rd_VPR64.4H, Rn_VPR64.4H is b_31=0 & b_30=0 & b_2429=0b001110 & b_2223=0b01 & b_1021=0b100000101110 & Rd_VPR64.4H & Rn_VPR64.4H & Zd { -@if defined(SEMANTIC_primitive) # simd unary Rd_VPR64.4H = MP_INT_ABS(Rn_VPR64.4H) on lane size 2 - local tmp1:4 = 0; - local tmp2:4 = 0; - simd_address_at(tmp1, Rn_VPR64.4H, 0, 2, 8); - simd_address_at(tmp2, Rd_VPR64.4H, 0, 2, 8); - * [register]:2 tmp2 = MP_INT_ABS(* [register]:2 tmp1); - simd_address_at(tmp1, Rn_VPR64.4H, 1, 2, 8); - simd_address_at(tmp2, Rd_VPR64.4H, 1, 2, 8); - * [register]:2 tmp2 = MP_INT_ABS(* [register]:2 tmp1); - simd_address_at(tmp1, Rn_VPR64.4H, 2, 2, 8); - simd_address_at(tmp2, Rd_VPR64.4H, 2, 2, 8); - * [register]:2 tmp2 = MP_INT_ABS(* [register]:2 tmp1); - simd_address_at(tmp1, Rn_VPR64.4H, 3, 2, 8); - simd_address_at(tmp2, Rd_VPR64.4H, 3, 2, 8); - * [register]:2 tmp2 = MP_INT_ABS(* [register]:2 tmp1); + Rd_VPR64.4H[0,16] = MP_INT_ABS(Rn_VPR64.4H[0,16]); + Rd_VPR64.4H[16,16] = MP_INT_ABS(Rn_VPR64.4H[16,16]); + Rd_VPR64.4H[32,16] = MP_INT_ABS(Rn_VPR64.4H[32,16]); + Rd_VPR64.4H[48,16] = MP_INT_ABS(Rn_VPR64.4H[48,16]); zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:8 = SIMD_INT_ABS(Rn_VPR64.4H, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.4H -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.4H = NEON_abs(Rn_VPR64.4H, 2:1); -@endif } # C7.2.1 ABS page C7-1399 line 77427 MATCH x0e20b800/mask=xbf3ffc00 @@ -203,41 +106,16 @@ is b_31=0 & b_30=0 & b_2429=0b001110 & b_2223=0b01 & b_1021=0b100000101110 & Rd_ :abs Rd_VPR128.8H, Rn_VPR128.8H is b_31=0 & b_30=1 & b_2429=0b001110 & b_2223=0b01 & b_1021=0b100000101110 & Rd_VPR128.8H & Rn_VPR128.8H & Zd { -@if defined(SEMANTIC_primitive) # simd unary Rd_VPR128.8H = MP_INT_ABS(Rn_VPR128.8H) on lane size 2 - local tmp1:4 = 0; - local tmp2:4 = 0; - simd_address_at(tmp1, Rn_VPR128.8H, 0, 2, 16); - simd_address_at(tmp2, Rd_VPR128.8H, 0, 2, 16); - * [register]:2 tmp2 = MP_INT_ABS(* [register]:2 tmp1); - simd_address_at(tmp1, Rn_VPR128.8H, 1, 2, 16); - simd_address_at(tmp2, Rd_VPR128.8H, 1, 2, 16); - * [register]:2 tmp2 = MP_INT_ABS(* [register]:2 tmp1); - simd_address_at(tmp1, Rn_VPR128.8H, 2, 2, 16); - simd_address_at(tmp2, Rd_VPR128.8H, 2, 2, 16); - * [register]:2 tmp2 = MP_INT_ABS(* [register]:2 tmp1); - simd_address_at(tmp1, Rn_VPR128.8H, 3, 2, 16); - simd_address_at(tmp2, Rd_VPR128.8H, 3, 2, 16); - * [register]:2 tmp2 = MP_INT_ABS(* [register]:2 tmp1); - simd_address_at(tmp1, Rn_VPR128.8H, 4, 2, 16); - simd_address_at(tmp2, Rd_VPR128.8H, 4, 2, 16); - * [register]:2 tmp2 = MP_INT_ABS(* [register]:2 tmp1); - simd_address_at(tmp1, Rn_VPR128.8H, 5, 2, 16); - simd_address_at(tmp2, Rd_VPR128.8H, 5, 2, 16); - * [register]:2 tmp2 = MP_INT_ABS(* [register]:2 tmp1); - simd_address_at(tmp1, Rn_VPR128.8H, 6, 2, 16); - simd_address_at(tmp2, Rd_VPR128.8H, 6, 2, 16); - * [register]:2 tmp2 = MP_INT_ABS(* [register]:2 tmp1); - simd_address_at(tmp1, Rn_VPR128.8H, 7, 2, 16); - simd_address_at(tmp2, Rd_VPR128.8H, 7, 2, 16); - * [register]:2 tmp2 = MP_INT_ABS(* [register]:2 tmp1); + Rd_VPR128.8H[0,16] = MP_INT_ABS(Rn_VPR128.8H[0,16]); + Rd_VPR128.8H[16,16] = MP_INT_ABS(Rn_VPR128.8H[16,16]); + Rd_VPR128.8H[32,16] = MP_INT_ABS(Rn_VPR128.8H[32,16]); + Rd_VPR128.8H[48,16] = MP_INT_ABS(Rn_VPR128.8H[48,16]); + Rd_VPR128.8H[64,16] = MP_INT_ABS(Rn_VPR128.8H[64,16]); + Rd_VPR128.8H[80,16] = MP_INT_ABS(Rn_VPR128.8H[80,16]); + Rd_VPR128.8H[96,16] = MP_INT_ABS(Rn_VPR128.8H[96,16]); + Rd_VPR128.8H[112,16] = MP_INT_ABS(Rn_VPR128.8H[112,16]); zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:16 = SIMD_INT_ABS(Rn_VPR128.8H, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.8H -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.8H = NEON_abs(Rn_VPR128.8H, 2:1); -@endif } # C7.2.1 ABS page C7-1399 line 77427 MATCH x0e20b800/mask=xbf3ffc00 @@ -250,23 +128,10 @@ is b_31=0 & b_30=1 & b_2429=0b001110 & b_2223=0b01 & b_1021=0b100000101110 & Rd_ :abs Rd_VPR64.2S, Rn_VPR64.2S is b_31=0 & b_30=0 & b_2429=0b001110 & b_2223=0b10 & b_1021=0b100000101110 & Rd_VPR64.2S & Rn_VPR64.2S & Zd { -@if defined(SEMANTIC_primitive) # simd unary Rd_VPR64.2S = MP_INT_ABS(Rn_VPR64.2S) on lane size 4 - local tmp1:4 = 0; - local tmp2:4 = 0; - simd_address_at(tmp1, Rn_VPR64.2S, 0, 4, 8); - simd_address_at(tmp2, Rd_VPR64.2S, 0, 4, 8); - * [register]:4 tmp2 = MP_INT_ABS(* [register]:4 tmp1); - simd_address_at(tmp1, Rn_VPR64.2S, 1, 4, 8); - simd_address_at(tmp2, Rd_VPR64.2S, 1, 4, 8); - * [register]:4 tmp2 = MP_INT_ABS(* [register]:4 tmp1); + Rd_VPR64.2S[0,32] = MP_INT_ABS(Rn_VPR64.2S[0,32]); + Rd_VPR64.2S[32,32] = MP_INT_ABS(Rn_VPR64.2S[32,32]); zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:8 = SIMD_INT_ABS(Rn_VPR64.2S, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.2S -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.2S = NEON_abs(Rn_VPR64.2S, 4:1); -@endif } # C7.2.1 ABS page C7-1399 line 77427 MATCH x0e20b800/mask=xbf3ffc00 @@ -279,29 +144,12 @@ is b_31=0 & b_30=0 & b_2429=0b001110 & b_2223=0b10 & b_1021=0b100000101110 & Rd_ :abs Rd_VPR128.4S, Rn_VPR128.4S is b_31=0 & b_30=1 & b_2429=0b001110 & b_2223=0b10 & b_1021=0b100000101110 & Rd_VPR128.4S & Rn_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) # simd unary Rd_VPR128.4S = MP_INT_ABS(Rn_VPR128.4S) on lane size 4 - local tmp1:4 = 0; - local tmp2:4 = 0; - simd_address_at(tmp1, Rn_VPR128.4S, 0, 4, 16); - simd_address_at(tmp2, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp2 = MP_INT_ABS(* [register]:4 tmp1); - simd_address_at(tmp1, Rn_VPR128.4S, 1, 4, 16); - simd_address_at(tmp2, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp2 = MP_INT_ABS(* [register]:4 tmp1); - simd_address_at(tmp1, Rn_VPR128.4S, 2, 4, 16); - simd_address_at(tmp2, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp2 = MP_INT_ABS(* [register]:4 tmp1); - simd_address_at(tmp1, Rn_VPR128.4S, 3, 4, 16); - simd_address_at(tmp2, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp2 = MP_INT_ABS(* [register]:4 tmp1); + Rd_VPR128.4S[0,32] = MP_INT_ABS(Rn_VPR128.4S[0,32]); + Rd_VPR128.4S[32,32] = MP_INT_ABS(Rn_VPR128.4S[32,32]); + Rd_VPR128.4S[64,32] = MP_INT_ABS(Rn_VPR128.4S[64,32]); + Rd_VPR128.4S[96,32] = MP_INT_ABS(Rn_VPR128.4S[96,32]); zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:16 = SIMD_INT_ABS(Rn_VPR128.4S, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_abs(Rn_VPR128.4S, 4:1); -@endif } # C7.2.1 ABS page C7-1399 line 77427 MATCH x0e20b800/mask=xbf3ffc00 @@ -314,23 +162,10 @@ is b_31=0 & b_30=1 & b_2429=0b001110 & b_2223=0b10 & b_1021=0b100000101110 & Rd_ :abs Rd_VPR128.2D, Rn_VPR128.2D is b_31=0 & b_30=1 & b_2429=0b001110 & b_2223=0b11 & b_1021=0b100000101110 & Rd_VPR128.2D & Rn_VPR128.2D & Zd { -@if defined(SEMANTIC_primitive) # simd unary Rd_VPR128.2D = MP_INT_ABS(Rn_VPR128.2D) on lane size 8 - local tmp1:4 = 0; - local tmp2:4 = 0; - simd_address_at(tmp1, Rn_VPR128.2D, 0, 8, 16); - simd_address_at(tmp2, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp2 = MP_INT_ABS(* [register]:8 tmp1); - simd_address_at(tmp1, Rn_VPR128.2D, 1, 8, 16); - simd_address_at(tmp2, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp2 = MP_INT_ABS(* [register]:8 tmp1); + Rd_VPR128.2D[0,64] = MP_INT_ABS(Rn_VPR128.2D[0,64]); + Rd_VPR128.2D[64,64] = MP_INT_ABS(Rn_VPR128.2D[64,64]); zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:16 = SIMD_INT_ABS(Rn_VPR128.2D, 8:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.2D = NEON_abs(Rn_VPR128.2D, 8:1); -@endif } # C7.2.2 ADD (vector) page C7-1401 line 77555 MATCH x5e208400/mask=xff20fc00 @@ -342,15 +177,8 @@ is b_31=0 & b_30=1 & b_2429=0b001110 & b_2223=0b11 & b_1021=0b100000101110 & Rd_ :add Rd_FPR64, Rn_FPR64, Rm_FPR64 is b_3031=1 & u=0 & b_2428=0x1e & advSIMD3.size=3 & b_2121=1 & Rm_FPR64 & b_1115=0x10 & b_1010=1 & Rn_FPR64 & Rd_FPR64 & Zd { -@if defined(SEMANTIC_primitive) Rd_FPR64 = Rn_FPR64 + Rm_FPR64; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:8 = Rn_FPR64 + Rm_FPR64; - Zd = zext(tmpd); # assigning to Rd_FPR64 -@elif defined(SEMANTIC_pseudo) - Rd_FPR64 = NEON_add(Rn_FPR64, Rm_FPR64); -@endif } # C7.2.2 ADD (vector) page C7-1401 line 77555 MATCH x0e208400/mask=xbf20fc00 @@ -362,82 +190,24 @@ is b_3031=1 & u=0 & b_2428=0x1e & advSIMD3.size=3 & b_2121=1 & Rm_FPR64 & b_1115 :add Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0x10 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { -@if defined(SEMANTIC_primitive) || defined(SEMANTIC_force) # simd infix Rd_VPR128.16B = Rn_VPR128.16B + Rm_VPR128.16B on lane size 1 - local tmp1:4 = 0; - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp1, Rn_VPR128.16B, 0, 1, 16); - simd_address_at(tmp2, Rm_VPR128.16B, 0, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 0, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) + (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR128.16B, 1, 1, 16); - simd_address_at(tmp2, Rm_VPR128.16B, 1, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 1, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) + (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR128.16B, 2, 1, 16); - simd_address_at(tmp2, Rm_VPR128.16B, 2, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 2, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) + (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR128.16B, 3, 1, 16); - simd_address_at(tmp2, Rm_VPR128.16B, 3, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 3, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) + (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR128.16B, 4, 1, 16); - simd_address_at(tmp2, Rm_VPR128.16B, 4, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 4, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) + (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR128.16B, 5, 1, 16); - simd_address_at(tmp2, Rm_VPR128.16B, 5, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 5, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) + (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR128.16B, 6, 1, 16); - simd_address_at(tmp2, Rm_VPR128.16B, 6, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 6, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) + (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR128.16B, 7, 1, 16); - simd_address_at(tmp2, Rm_VPR128.16B, 7, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 7, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) + (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR128.16B, 8, 1, 16); - simd_address_at(tmp2, Rm_VPR128.16B, 8, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 8, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) + (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR128.16B, 9, 1, 16); - simd_address_at(tmp2, Rm_VPR128.16B, 9, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 9, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) + (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR128.16B, 10, 1, 16); - simd_address_at(tmp2, Rm_VPR128.16B, 10, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 10, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) + (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR128.16B, 11, 1, 16); - simd_address_at(tmp2, Rm_VPR128.16B, 11, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 11, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) + (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR128.16B, 12, 1, 16); - simd_address_at(tmp2, Rm_VPR128.16B, 12, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 12, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) + (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR128.16B, 13, 1, 16); - simd_address_at(tmp2, Rm_VPR128.16B, 13, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 13, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) + (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR128.16B, 14, 1, 16); - simd_address_at(tmp2, Rm_VPR128.16B, 14, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 14, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) + (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR128.16B, 15, 1, 16); - simd_address_at(tmp2, Rm_VPR128.16B, 15, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 15, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) + (* [register]:1 tmp2); + Rd_VPR128.16B[0,8] = Rn_VPR128.16B[0,8] + Rm_VPR128.16B[0,8]; + Rd_VPR128.16B[8,8] = Rn_VPR128.16B[8,8] + Rm_VPR128.16B[8,8]; + Rd_VPR128.16B[16,8] = Rn_VPR128.16B[16,8] + Rm_VPR128.16B[16,8]; + Rd_VPR128.16B[24,8] = Rn_VPR128.16B[24,8] + Rm_VPR128.16B[24,8]; + Rd_VPR128.16B[32,8] = Rn_VPR128.16B[32,8] + Rm_VPR128.16B[32,8]; + Rd_VPR128.16B[40,8] = Rn_VPR128.16B[40,8] + Rm_VPR128.16B[40,8]; + Rd_VPR128.16B[48,8] = Rn_VPR128.16B[48,8] + Rm_VPR128.16B[48,8]; + Rd_VPR128.16B[56,8] = Rn_VPR128.16B[56,8] + Rm_VPR128.16B[56,8]; + Rd_VPR128.16B[64,8] = Rn_VPR128.16B[64,8] + Rm_VPR128.16B[64,8]; + Rd_VPR128.16B[72,8] = Rn_VPR128.16B[72,8] + Rm_VPR128.16B[72,8]; + Rd_VPR128.16B[80,8] = Rn_VPR128.16B[80,8] + Rm_VPR128.16B[80,8]; + Rd_VPR128.16B[88,8] = Rn_VPR128.16B[88,8] + Rm_VPR128.16B[88,8]; + Rd_VPR128.16B[96,8] = Rn_VPR128.16B[96,8] + Rm_VPR128.16B[96,8]; + Rd_VPR128.16B[104,8] = Rn_VPR128.16B[104,8] + Rm_VPR128.16B[104,8]; + Rd_VPR128.16B[112,8] = Rn_VPR128.16B[112,8] + Rm_VPR128.16B[112,8]; + Rd_VPR128.16B[120,8] = Rn_VPR128.16B[120,8] + Rm_VPR128.16B[120,8]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:16 = SIMD_INT_ADD(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.16B -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.16B = NEON_add(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); -@endif } # C7.2.2 ADD (vector) page C7-1401 line 77555 MATCH x0e208400/mask=xbf20fc00 @@ -449,50 +219,16 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16 :add Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0x10 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_primitive) # simd infix Rd_VPR128.8H = Rn_VPR128.8H + Rm_VPR128.8H on lane size 2 - local tmp1:4 = 0; - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp1, Rn_VPR128.8H, 0, 2, 16); - simd_address_at(tmp2, Rm_VPR128.8H, 0, 2, 16); - simd_address_at(tmp3, Rd_VPR128.8H, 0, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp1) + (* [register]:2 tmp2); - simd_address_at(tmp1, Rn_VPR128.8H, 1, 2, 16); - simd_address_at(tmp2, Rm_VPR128.8H, 1, 2, 16); - simd_address_at(tmp3, Rd_VPR128.8H, 1, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp1) + (* [register]:2 tmp2); - simd_address_at(tmp1, Rn_VPR128.8H, 2, 2, 16); - simd_address_at(tmp2, Rm_VPR128.8H, 2, 2, 16); - simd_address_at(tmp3, Rd_VPR128.8H, 2, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp1) + (* [register]:2 tmp2); - simd_address_at(tmp1, Rn_VPR128.8H, 3, 2, 16); - simd_address_at(tmp2, Rm_VPR128.8H, 3, 2, 16); - simd_address_at(tmp3, Rd_VPR128.8H, 3, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp1) + (* [register]:2 tmp2); - simd_address_at(tmp1, Rn_VPR128.8H, 4, 2, 16); - simd_address_at(tmp2, Rm_VPR128.8H, 4, 2, 16); - simd_address_at(tmp3, Rd_VPR128.8H, 4, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp1) + (* [register]:2 tmp2); - simd_address_at(tmp1, Rn_VPR128.8H, 5, 2, 16); - simd_address_at(tmp2, Rm_VPR128.8H, 5, 2, 16); - simd_address_at(tmp3, Rd_VPR128.8H, 5, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp1) + (* [register]:2 tmp2); - simd_address_at(tmp1, Rn_VPR128.8H, 6, 2, 16); - simd_address_at(tmp2, Rm_VPR128.8H, 6, 2, 16); - simd_address_at(tmp3, Rd_VPR128.8H, 6, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp1) + (* [register]:2 tmp2); - simd_address_at(tmp1, Rn_VPR128.8H, 7, 2, 16); - simd_address_at(tmp2, Rm_VPR128.8H, 7, 2, 16); - simd_address_at(tmp3, Rd_VPR128.8H, 7, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp1) + (* [register]:2 tmp2); + Rd_VPR128.8H[0,16] = Rn_VPR128.8H[0,16] + Rm_VPR128.8H[0,16]; + Rd_VPR128.8H[16,16] = Rn_VPR128.8H[16,16] + Rm_VPR128.8H[16,16]; + Rd_VPR128.8H[32,16] = Rn_VPR128.8H[32,16] + Rm_VPR128.8H[32,16]; + Rd_VPR128.8H[48,16] = Rn_VPR128.8H[48,16] + Rm_VPR128.8H[48,16]; + Rd_VPR128.8H[64,16] = Rn_VPR128.8H[64,16] + Rm_VPR128.8H[64,16]; + Rd_VPR128.8H[80,16] = Rn_VPR128.8H[80,16] + Rm_VPR128.8H[80,16]; + Rd_VPR128.8H[96,16] = Rn_VPR128.8H[96,16] + Rm_VPR128.8H[96,16]; + Rd_VPR128.8H[112,16] = Rn_VPR128.8H[112,16] + Rm_VPR128.8H[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:16 = SIMD_INT_ADD(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.8H -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.8H = NEON_add(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); -@endif } # C7.2.2 ADD (vector) page C7-1401 line 77555 MATCH x0e208400/mask=xbf20fc00 @@ -504,34 +240,12 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H :add Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0x10 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) || defined(SEMANTIC_force) # simd infix Rd_VPR128.4S = Rn_VPR128.4S + Rm_VPR128.4S on lane size 4 - local tmp1:4 = 0; - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp1, Rn_VPR128.4S, 0, 4, 16); - simd_address_at(tmp2, Rm_VPR128.4S, 0, 4, 16); - simd_address_at(tmp3, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp3 = (* [register]:4 tmp1) + (* [register]:4 tmp2); - simd_address_at(tmp1, Rn_VPR128.4S, 1, 4, 16); - simd_address_at(tmp2, Rm_VPR128.4S, 1, 4, 16); - simd_address_at(tmp3, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp3 = (* [register]:4 tmp1) + (* [register]:4 tmp2); - simd_address_at(tmp1, Rn_VPR128.4S, 2, 4, 16); - simd_address_at(tmp2, Rm_VPR128.4S, 2, 4, 16); - simd_address_at(tmp3, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp3 = (* [register]:4 tmp1) + (* [register]:4 tmp2); - simd_address_at(tmp1, Rn_VPR128.4S, 3, 4, 16); - simd_address_at(tmp2, Rm_VPR128.4S, 3, 4, 16); - simd_address_at(tmp3, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp3 = (* [register]:4 tmp1) + (* [register]:4 tmp2); + Rd_VPR128.4S[0,32] = Rn_VPR128.4S[0,32] + Rm_VPR128.4S[0,32]; + Rd_VPR128.4S[32,32] = Rn_VPR128.4S[32,32] + Rm_VPR128.4S[32,32]; + Rd_VPR128.4S[64,32] = Rn_VPR128.4S[64,32] + Rm_VPR128.4S[64,32]; + Rd_VPR128.4S[96,32] = Rn_VPR128.4S[96,32] + Rm_VPR128.4S[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:16 = SIMD_INT_ADD(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_add(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); -@endif } # C7.2.2 ADD (vector) page C7-1401 line 77555 MATCH x0e208400/mask=xbf20fc00 @@ -543,26 +257,10 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S :add Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=3 & b_2121=1 & Rm_VPR128.2D & b_1115=0x10 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_primitive) # simd infix Rd_VPR128.2D = Rn_VPR128.2D + Rm_VPR128.2D on lane size 8 - local tmp1:4 = 0; - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp1, Rn_VPR128.2D, 0, 8, 16); - simd_address_at(tmp2, Rm_VPR128.2D, 0, 8, 16); - simd_address_at(tmp3, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp3 = (* [register]:8 tmp1) + (* [register]:8 tmp2); - simd_address_at(tmp1, Rn_VPR128.2D, 1, 8, 16); - simd_address_at(tmp2, Rm_VPR128.2D, 1, 8, 16); - simd_address_at(tmp3, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp3 = (* [register]:8 tmp1) + (* [register]:8 tmp2); + Rd_VPR128.2D[0,64] = Rn_VPR128.2D[0,64] + Rm_VPR128.2D[0,64]; + Rd_VPR128.2D[64,64] = Rn_VPR128.2D[64,64] + Rm_VPR128.2D[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:16 = SIMD_INT_ADD(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.2D = NEON_add(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); -@endif } # C7.2.2 ADD (vector) page C7-1401 line 77555 MATCH x0e208400/mask=xbf20fc00 @@ -574,50 +272,16 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=3 & b_2121=1 & Rm_VPR128.2D :add Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0x10 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { -@if defined(SEMANTIC_primitive) # simd infix Rd_VPR64.8B = Rn_VPR64.8B + Rm_VPR64.8B on lane size 1 - local tmp1:4 = 0; - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp1, Rn_VPR64.8B, 0, 1, 8); - simd_address_at(tmp2, Rm_VPR64.8B, 0, 1, 8); - simd_address_at(tmp3, Rd_VPR64.8B, 0, 1, 8); - * [register]:1 tmp3 = (* [register]:1 tmp1) + (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR64.8B, 1, 1, 8); - simd_address_at(tmp2, Rm_VPR64.8B, 1, 1, 8); - simd_address_at(tmp3, Rd_VPR64.8B, 1, 1, 8); - * [register]:1 tmp3 = (* [register]:1 tmp1) + (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR64.8B, 2, 1, 8); - simd_address_at(tmp2, Rm_VPR64.8B, 2, 1, 8); - simd_address_at(tmp3, Rd_VPR64.8B, 2, 1, 8); - * [register]:1 tmp3 = (* [register]:1 tmp1) + (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR64.8B, 3, 1, 8); - simd_address_at(tmp2, Rm_VPR64.8B, 3, 1, 8); - simd_address_at(tmp3, Rd_VPR64.8B, 3, 1, 8); - * [register]:1 tmp3 = (* [register]:1 tmp1) + (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR64.8B, 4, 1, 8); - simd_address_at(tmp2, Rm_VPR64.8B, 4, 1, 8); - simd_address_at(tmp3, Rd_VPR64.8B, 4, 1, 8); - * [register]:1 tmp3 = (* [register]:1 tmp1) + (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR64.8B, 5, 1, 8); - simd_address_at(tmp2, Rm_VPR64.8B, 5, 1, 8); - simd_address_at(tmp3, Rd_VPR64.8B, 5, 1, 8); - * [register]:1 tmp3 = (* [register]:1 tmp1) + (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR64.8B, 6, 1, 8); - simd_address_at(tmp2, Rm_VPR64.8B, 6, 1, 8); - simd_address_at(tmp3, Rd_VPR64.8B, 6, 1, 8); - * [register]:1 tmp3 = (* [register]:1 tmp1) + (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR64.8B, 7, 1, 8); - simd_address_at(tmp2, Rm_VPR64.8B, 7, 1, 8); - simd_address_at(tmp3, Rd_VPR64.8B, 7, 1, 8); - * [register]:1 tmp3 = (* [register]:1 tmp1) + (* [register]:1 tmp2); + Rd_VPR64.8B[0,8] = Rn_VPR64.8B[0,8] + Rm_VPR64.8B[0,8]; + Rd_VPR64.8B[8,8] = Rn_VPR64.8B[8,8] + Rm_VPR64.8B[8,8]; + Rd_VPR64.8B[16,8] = Rn_VPR64.8B[16,8] + Rm_VPR64.8B[16,8]; + Rd_VPR64.8B[24,8] = Rn_VPR64.8B[24,8] + Rm_VPR64.8B[24,8]; + Rd_VPR64.8B[32,8] = Rn_VPR64.8B[32,8] + Rm_VPR64.8B[32,8]; + Rd_VPR64.8B[40,8] = Rn_VPR64.8B[40,8] + Rm_VPR64.8B[40,8]; + Rd_VPR64.8B[48,8] = Rn_VPR64.8B[48,8] + Rm_VPR64.8B[48,8]; + Rd_VPR64.8B[56,8] = Rn_VPR64.8B[56,8] + Rm_VPR64.8B[56,8]; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:8 = SIMD_INT_ADD(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.8B -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.8B = NEON_add(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); -@endif } # C7.2.2 ADD (vector) page C7-1401 line 77555 MATCH x0e208400/mask=xbf20fc00 @@ -629,34 +293,12 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B :add Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0x10 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { -@if defined(SEMANTIC_primitive) # simd infix Rd_VPR64.4H = Rn_VPR64.4H + Rm_VPR64.4H on lane size 2 - local tmp1:4 = 0; - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp1, Rn_VPR64.4H, 0, 2, 8); - simd_address_at(tmp2, Rm_VPR64.4H, 0, 2, 8); - simd_address_at(tmp3, Rd_VPR64.4H, 0, 2, 8); - * [register]:2 tmp3 = (* [register]:2 tmp1) + (* [register]:2 tmp2); - simd_address_at(tmp1, Rn_VPR64.4H, 1, 2, 8); - simd_address_at(tmp2, Rm_VPR64.4H, 1, 2, 8); - simd_address_at(tmp3, Rd_VPR64.4H, 1, 2, 8); - * [register]:2 tmp3 = (* [register]:2 tmp1) + (* [register]:2 tmp2); - simd_address_at(tmp1, Rn_VPR64.4H, 2, 2, 8); - simd_address_at(tmp2, Rm_VPR64.4H, 2, 2, 8); - simd_address_at(tmp3, Rd_VPR64.4H, 2, 2, 8); - * [register]:2 tmp3 = (* [register]:2 tmp1) + (* [register]:2 tmp2); - simd_address_at(tmp1, Rn_VPR64.4H, 3, 2, 8); - simd_address_at(tmp2, Rm_VPR64.4H, 3, 2, 8); - simd_address_at(tmp3, Rd_VPR64.4H, 3, 2, 8); - * [register]:2 tmp3 = (* [register]:2 tmp1) + (* [register]:2 tmp2); + Rd_VPR64.4H[0,16] = Rn_VPR64.4H[0,16] + Rm_VPR64.4H[0,16]; + Rd_VPR64.4H[16,16] = Rn_VPR64.4H[16,16] + Rm_VPR64.4H[16,16]; + Rd_VPR64.4H[32,16] = Rn_VPR64.4H[32,16] + Rm_VPR64.4H[32,16]; + Rd_VPR64.4H[48,16] = Rn_VPR64.4H[48,16] + Rm_VPR64.4H[48,16]; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:8 = SIMD_INT_ADD(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.4H -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.4H = NEON_add(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); -@endif } # C7.2.2 ADD (vector) page C7-1401 line 77555 MATCH x0e208400/mask=xbf20fc00 @@ -668,26 +310,10 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H :add Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0x10 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_primitive) || defined(SEMANTIC_force) # simd infix Rd_VPR64.2S = Rn_VPR64.2S + Rm_VPR64.2S on lane size 4 - local tmp1:4 = 0; - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp1, Rn_VPR64.2S, 0, 4, 8); - simd_address_at(tmp2, Rm_VPR64.2S, 0, 4, 8); - simd_address_at(tmp3, Rd_VPR64.2S, 0, 4, 8); - * [register]:4 tmp3 = (* [register]:4 tmp1) + (* [register]:4 tmp2); - simd_address_at(tmp1, Rn_VPR64.2S, 1, 4, 8); - simd_address_at(tmp2, Rm_VPR64.2S, 1, 4, 8); - simd_address_at(tmp3, Rd_VPR64.2S, 1, 4, 8); - * [register]:4 tmp3 = (* [register]:4 tmp1) + (* [register]:4 tmp2); + Rd_VPR64.2S[0,32] = Rn_VPR64.2S[0,32] + Rm_VPR64.2S[0,32]; + Rd_VPR64.2S[32,32] = Rn_VPR64.2S[32,32] + Rm_VPR64.2S[32,32]; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:8 = SIMD_INT_ADD(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.2S -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.2S = NEON_add(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); -@endif } # C7.2.3 ADDHN, ADDHN2 page C7-1403 line 77689 MATCH x0e204000/mask=xbf20fc00 @@ -699,39 +325,13 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S :addhn Rd_VPR64.2S, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.2D & b_1215=0x4 & b_1011=0 & Rn_VPR128.2D & Rd_VPR64.2S & Rd_VPR128 & Zd { -@if defined(SEMANTIC_primitive) # simd infix TMPQ1 = Rn_VPR128.2D + Rm_VPR128.2D on lane size 8 - local tmp2:4 = 0; - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp2, Rn_VPR128.2D, 0, 8, 16); - simd_address_at(tmp3, Rm_VPR128.2D, 0, 8, 16); - simd_address_at(tmp4, TMPQ1, 0, 8, 16); - * [register]:8 tmp4 = (* [register]:8 tmp2) + (* [register]:8 tmp3); - simd_address_at(tmp2, Rn_VPR128.2D, 1, 8, 16); - simd_address_at(tmp3, Rm_VPR128.2D, 1, 8, 16); - simd_address_at(tmp4, TMPQ1, 1, 8, 16); - * [register]:8 tmp4 = (* [register]:8 tmp2) + (* [register]:8 tmp3); + TMPQ1[0,64] = Rn_VPR128.2D[0,64] + Rm_VPR128.2D[0,64]; + TMPQ1[64,64] = Rn_VPR128.2D[64,64] + Rm_VPR128.2D[64,64]; # simd shuffle Rd_VPR64.2S = TMPQ1 (@1-0@3-1) lane size 4 - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, TMPQ1, 1, 4, 16); - simd_address_at(tmp6, Rd_VPR64.2S, 0, 4, 8); - * [register]:4 tmp6 = * [register]:4 tmp5; - simd_address_at(tmp5, TMPQ1, 3, 4, 16); - simd_address_at(tmp6, Rd_VPR64.2S, 1, 4, 8); - * [register]:4 tmp6 = * [register]:4 tmp5; + Rd_VPR64.2S[0,32] = TMPQ1[32,32]; + Rd_VPR64.2S[32,32] = TMPQ1[96,32]; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_ADD(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); - local tmp2:4 = 0; - local tmpd:8 = Rd_VPR64.2S; - tmp2 = SIMD_PIECE(tmp1, 1:1); tmpd = SIMD_COPY(tmpd, tmp2, 0:1); - tmp2 = SIMD_PIECE(tmp1, 3:1); tmpd = SIMD_COPY(tmpd, tmp2, 1:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.2S -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.2S = NEON_addhn(Rd_VPR64.2S, Rn_VPR128.2D, Rm_VPR128.2D, 8:1); -@endif } # C7.2.3 ADDHN, ADDHN2 page C7-1403 line 77689 MATCH x0e204000/mask=xbf20fc00 @@ -743,55 +343,17 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.2D :addhn Rd_VPR64.4H, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.4S & b_1215=0x4 & b_1011=0 & Rn_VPR128.4S & Rd_VPR64.4H & Rd_VPR128 & Zd { -@if defined(SEMANTIC_primitive) # simd infix TMPQ1 = Rn_VPR128.4S + Rm_VPR128.4S on lane size 4 - local tmp2:4 = 0; - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp2, Rn_VPR128.4S, 0, 4, 16); - simd_address_at(tmp3, Rm_VPR128.4S, 0, 4, 16); - simd_address_at(tmp4, TMPQ1, 0, 4, 16); - * [register]:4 tmp4 = (* [register]:4 tmp2) + (* [register]:4 tmp3); - simd_address_at(tmp2, Rn_VPR128.4S, 1, 4, 16); - simd_address_at(tmp3, Rm_VPR128.4S, 1, 4, 16); - simd_address_at(tmp4, TMPQ1, 1, 4, 16); - * [register]:4 tmp4 = (* [register]:4 tmp2) + (* [register]:4 tmp3); - simd_address_at(tmp2, Rn_VPR128.4S, 2, 4, 16); - simd_address_at(tmp3, Rm_VPR128.4S, 2, 4, 16); - simd_address_at(tmp4, TMPQ1, 2, 4, 16); - * [register]:4 tmp4 = (* [register]:4 tmp2) + (* [register]:4 tmp3); - simd_address_at(tmp2, Rn_VPR128.4S, 3, 4, 16); - simd_address_at(tmp3, Rm_VPR128.4S, 3, 4, 16); - simd_address_at(tmp4, TMPQ1, 3, 4, 16); - * [register]:4 tmp4 = (* [register]:4 tmp2) + (* [register]:4 tmp3); + TMPQ1[0,32] = Rn_VPR128.4S[0,32] + Rm_VPR128.4S[0,32]; + TMPQ1[32,32] = Rn_VPR128.4S[32,32] + Rm_VPR128.4S[32,32]; + TMPQ1[64,32] = Rn_VPR128.4S[64,32] + Rm_VPR128.4S[64,32]; + TMPQ1[96,32] = Rn_VPR128.4S[96,32] + Rm_VPR128.4S[96,32]; # simd shuffle Rd_VPR64.4H = TMPQ1 (@1-0@3-1@5-2@7-3) lane size 2 - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, TMPQ1, 1, 2, 16); - simd_address_at(tmp6, Rd_VPR64.4H, 0, 2, 8); - * [register]:2 tmp6 = * [register]:2 tmp5; - simd_address_at(tmp5, TMPQ1, 3, 2, 16); - simd_address_at(tmp6, Rd_VPR64.4H, 1, 2, 8); - * [register]:2 tmp6 = * [register]:2 tmp5; - simd_address_at(tmp5, TMPQ1, 5, 2, 16); - simd_address_at(tmp6, Rd_VPR64.4H, 2, 2, 8); - * [register]:2 tmp6 = * [register]:2 tmp5; - simd_address_at(tmp5, TMPQ1, 7, 2, 16); - simd_address_at(tmp6, Rd_VPR64.4H, 3, 2, 8); - * [register]:2 tmp6 = * [register]:2 tmp5; + Rd_VPR64.4H[0,16] = TMPQ1[16,16]; + Rd_VPR64.4H[16,16] = TMPQ1[48,16]; + Rd_VPR64.4H[32,16] = TMPQ1[80,16]; + Rd_VPR64.4H[48,16] = TMPQ1[112,16]; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_ADD(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); - local tmp2:2 = 0; - local tmpd:8 = Rd_VPR64.4H; - tmp2 = SIMD_PIECE(tmp1, 1:1); tmpd = SIMD_COPY(tmpd, tmp2, 0:1); - tmp2 = SIMD_PIECE(tmp1, 3:1); tmpd = SIMD_COPY(tmpd, tmp2, 1:1); - tmp2 = SIMD_PIECE(tmp1, 5:1); tmpd = SIMD_COPY(tmpd, tmp2, 2:1); - tmp2 = SIMD_PIECE(tmp1, 7:1); tmpd = SIMD_COPY(tmpd, tmp2, 3:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.4H -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.4H = NEON_addhn(Rd_VPR64.4H, Rn_VPR128.4S, Rm_VPR128.4S, 4:1); -@endif } # C7.2.3 ADDHN, ADDHN2 page C7-1403 line 77689 MATCH x0e204000/mask=xbf20fc00 @@ -803,87 +365,25 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.4S :addhn Rd_VPR64.8B, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.8H & b_1215=0x4 & b_1011=0 & Rn_VPR128.8H & Rd_VPR64.8B & Rd_VPR128 & Zd { -@if defined(SEMANTIC_primitive) # simd infix TMPQ1 = Rn_VPR128.8H + Rm_VPR128.8H on lane size 2 - local tmp2:4 = 0; - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp2, Rn_VPR128.8H, 0, 2, 16); - simd_address_at(tmp3, Rm_VPR128.8H, 0, 2, 16); - simd_address_at(tmp4, TMPQ1, 0, 2, 16); - * [register]:2 tmp4 = (* [register]:2 tmp2) + (* [register]:2 tmp3); - simd_address_at(tmp2, Rn_VPR128.8H, 1, 2, 16); - simd_address_at(tmp3, Rm_VPR128.8H, 1, 2, 16); - simd_address_at(tmp4, TMPQ1, 1, 2, 16); - * [register]:2 tmp4 = (* [register]:2 tmp2) + (* [register]:2 tmp3); - simd_address_at(tmp2, Rn_VPR128.8H, 2, 2, 16); - simd_address_at(tmp3, Rm_VPR128.8H, 2, 2, 16); - simd_address_at(tmp4, TMPQ1, 2, 2, 16); - * [register]:2 tmp4 = (* [register]:2 tmp2) + (* [register]:2 tmp3); - simd_address_at(tmp2, Rn_VPR128.8H, 3, 2, 16); - simd_address_at(tmp3, Rm_VPR128.8H, 3, 2, 16); - simd_address_at(tmp4, TMPQ1, 3, 2, 16); - * [register]:2 tmp4 = (* [register]:2 tmp2) + (* [register]:2 tmp3); - simd_address_at(tmp2, Rn_VPR128.8H, 4, 2, 16); - simd_address_at(tmp3, Rm_VPR128.8H, 4, 2, 16); - simd_address_at(tmp4, TMPQ1, 4, 2, 16); - * [register]:2 tmp4 = (* [register]:2 tmp2) + (* [register]:2 tmp3); - simd_address_at(tmp2, Rn_VPR128.8H, 5, 2, 16); - simd_address_at(tmp3, Rm_VPR128.8H, 5, 2, 16); - simd_address_at(tmp4, TMPQ1, 5, 2, 16); - * [register]:2 tmp4 = (* [register]:2 tmp2) + (* [register]:2 tmp3); - simd_address_at(tmp2, Rn_VPR128.8H, 6, 2, 16); - simd_address_at(tmp3, Rm_VPR128.8H, 6, 2, 16); - simd_address_at(tmp4, TMPQ1, 6, 2, 16); - * [register]:2 tmp4 = (* [register]:2 tmp2) + (* [register]:2 tmp3); - simd_address_at(tmp2, Rn_VPR128.8H, 7, 2, 16); - simd_address_at(tmp3, Rm_VPR128.8H, 7, 2, 16); - simd_address_at(tmp4, TMPQ1, 7, 2, 16); - * [register]:2 tmp4 = (* [register]:2 tmp2) + (* [register]:2 tmp3); + TMPQ1[0,16] = Rn_VPR128.8H[0,16] + Rm_VPR128.8H[0,16]; + TMPQ1[16,16] = Rn_VPR128.8H[16,16] + Rm_VPR128.8H[16,16]; + TMPQ1[32,16] = Rn_VPR128.8H[32,16] + Rm_VPR128.8H[32,16]; + TMPQ1[48,16] = Rn_VPR128.8H[48,16] + Rm_VPR128.8H[48,16]; + TMPQ1[64,16] = Rn_VPR128.8H[64,16] + Rm_VPR128.8H[64,16]; + TMPQ1[80,16] = Rn_VPR128.8H[80,16] + Rm_VPR128.8H[80,16]; + TMPQ1[96,16] = Rn_VPR128.8H[96,16] + Rm_VPR128.8H[96,16]; + TMPQ1[112,16] = Rn_VPR128.8H[112,16] + Rm_VPR128.8H[112,16]; # simd shuffle Rd_VPR64.8B = TMPQ1 (@1-0@3-1@5-2@7-3@9-4@11-5@13-6@15-7) lane size 1 - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, TMPQ1, 1, 1, 16); - simd_address_at(tmp6, Rd_VPR64.8B, 0, 1, 8); - * [register]:1 tmp6 = * [register]:1 tmp5; - simd_address_at(tmp5, TMPQ1, 3, 1, 16); - simd_address_at(tmp6, Rd_VPR64.8B, 1, 1, 8); - * [register]:1 tmp6 = * [register]:1 tmp5; - simd_address_at(tmp5, TMPQ1, 5, 1, 16); - simd_address_at(tmp6, Rd_VPR64.8B, 2, 1, 8); - * [register]:1 tmp6 = * [register]:1 tmp5; - simd_address_at(tmp5, TMPQ1, 7, 1, 16); - simd_address_at(tmp6, Rd_VPR64.8B, 3, 1, 8); - * [register]:1 tmp6 = * [register]:1 tmp5; - simd_address_at(tmp5, TMPQ1, 9, 1, 16); - simd_address_at(tmp6, Rd_VPR64.8B, 4, 1, 8); - * [register]:1 tmp6 = * [register]:1 tmp5; - simd_address_at(tmp5, TMPQ1, 11, 1, 16); - simd_address_at(tmp6, Rd_VPR64.8B, 5, 1, 8); - * [register]:1 tmp6 = * [register]:1 tmp5; - simd_address_at(tmp5, TMPQ1, 13, 1, 16); - simd_address_at(tmp6, Rd_VPR64.8B, 6, 1, 8); - * [register]:1 tmp6 = * [register]:1 tmp5; - simd_address_at(tmp5, TMPQ1, 15, 1, 16); - simd_address_at(tmp6, Rd_VPR64.8B, 7, 1, 8); - * [register]:1 tmp6 = * [register]:1 tmp5; + Rd_VPR64.8B[0,8] = TMPQ1[8,8]; + Rd_VPR64.8B[8,8] = TMPQ1[24,8]; + Rd_VPR64.8B[16,8] = TMPQ1[40,8]; + Rd_VPR64.8B[24,8] = TMPQ1[56,8]; + Rd_VPR64.8B[32,8] = TMPQ1[72,8]; + Rd_VPR64.8B[40,8] = TMPQ1[88,8]; + Rd_VPR64.8B[48,8] = TMPQ1[104,8]; + Rd_VPR64.8B[56,8] = TMPQ1[120,8]; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_ADD(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); - local tmp2:1 = 0; - local tmpd:8 = Rd_VPR64.8B; - tmp2 = SIMD_PIECE(tmp1, 1:1); tmpd = SIMD_COPY(tmpd, tmp2, 0:1); - tmp2 = SIMD_PIECE(tmp1, 3:1); tmpd = SIMD_COPY(tmpd, tmp2, 1:1); - tmp2 = SIMD_PIECE(tmp1, 5:1); tmpd = SIMD_COPY(tmpd, tmp2, 2:1); - tmp2 = SIMD_PIECE(tmp1, 7:1); tmpd = SIMD_COPY(tmpd, tmp2, 3:1); - tmp2 = SIMD_PIECE(tmp1, 9:1); tmpd = SIMD_COPY(tmpd, tmp2, 4:1); - tmp2 = SIMD_PIECE(tmp1, 11:1); tmpd = SIMD_COPY(tmpd, tmp2, 5:1); - tmp2 = SIMD_PIECE(tmp1, 13:1); tmpd = SIMD_COPY(tmpd, tmp2, 6:1); - tmp2 = SIMD_PIECE(tmp1, 15:1); tmpd = SIMD_COPY(tmpd, tmp2, 7:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.8B -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.8B = NEON_addhn(Rd_VPR64.8B, Rn_VPR128.8H, Rm_VPR128.8H, 2:1); -@endif } # C7.2.3 ADDHN, ADDHN2 page C7-1403 line 77689 MATCH x0e204000/mask=xbf20fc00 @@ -895,87 +395,25 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.8H :addhn2 Rd_VPR128.16B, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.8H & b_1215=0x4 & b_1011=0 & Rn_VPR128.8H & Rd_VPR128.16B & Rd_VPR128 & Zd { -@if defined(SEMANTIC_primitive) # simd infix TMPQ1 = Rn_VPR128.8H + Rm_VPR128.8H on lane size 2 - local tmp2:4 = 0; - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp2, Rn_VPR128.8H, 0, 2, 16); - simd_address_at(tmp3, Rm_VPR128.8H, 0, 2, 16); - simd_address_at(tmp4, TMPQ1, 0, 2, 16); - * [register]:2 tmp4 = (* [register]:2 tmp2) + (* [register]:2 tmp3); - simd_address_at(tmp2, Rn_VPR128.8H, 1, 2, 16); - simd_address_at(tmp3, Rm_VPR128.8H, 1, 2, 16); - simd_address_at(tmp4, TMPQ1, 1, 2, 16); - * [register]:2 tmp4 = (* [register]:2 tmp2) + (* [register]:2 tmp3); - simd_address_at(tmp2, Rn_VPR128.8H, 2, 2, 16); - simd_address_at(tmp3, Rm_VPR128.8H, 2, 2, 16); - simd_address_at(tmp4, TMPQ1, 2, 2, 16); - * [register]:2 tmp4 = (* [register]:2 tmp2) + (* [register]:2 tmp3); - simd_address_at(tmp2, Rn_VPR128.8H, 3, 2, 16); - simd_address_at(tmp3, Rm_VPR128.8H, 3, 2, 16); - simd_address_at(tmp4, TMPQ1, 3, 2, 16); - * [register]:2 tmp4 = (* [register]:2 tmp2) + (* [register]:2 tmp3); - simd_address_at(tmp2, Rn_VPR128.8H, 4, 2, 16); - simd_address_at(tmp3, Rm_VPR128.8H, 4, 2, 16); - simd_address_at(tmp4, TMPQ1, 4, 2, 16); - * [register]:2 tmp4 = (* [register]:2 tmp2) + (* [register]:2 tmp3); - simd_address_at(tmp2, Rn_VPR128.8H, 5, 2, 16); - simd_address_at(tmp3, Rm_VPR128.8H, 5, 2, 16); - simd_address_at(tmp4, TMPQ1, 5, 2, 16); - * [register]:2 tmp4 = (* [register]:2 tmp2) + (* [register]:2 tmp3); - simd_address_at(tmp2, Rn_VPR128.8H, 6, 2, 16); - simd_address_at(tmp3, Rm_VPR128.8H, 6, 2, 16); - simd_address_at(tmp4, TMPQ1, 6, 2, 16); - * [register]:2 tmp4 = (* [register]:2 tmp2) + (* [register]:2 tmp3); - simd_address_at(tmp2, Rn_VPR128.8H, 7, 2, 16); - simd_address_at(tmp3, Rm_VPR128.8H, 7, 2, 16); - simd_address_at(tmp4, TMPQ1, 7, 2, 16); - * [register]:2 tmp4 = (* [register]:2 tmp2) + (* [register]:2 tmp3); + TMPQ1[0,16] = Rn_VPR128.8H[0,16] + Rm_VPR128.8H[0,16]; + TMPQ1[16,16] = Rn_VPR128.8H[16,16] + Rm_VPR128.8H[16,16]; + TMPQ1[32,16] = Rn_VPR128.8H[32,16] + Rm_VPR128.8H[32,16]; + TMPQ1[48,16] = Rn_VPR128.8H[48,16] + Rm_VPR128.8H[48,16]; + TMPQ1[64,16] = Rn_VPR128.8H[64,16] + Rm_VPR128.8H[64,16]; + TMPQ1[80,16] = Rn_VPR128.8H[80,16] + Rm_VPR128.8H[80,16]; + TMPQ1[96,16] = Rn_VPR128.8H[96,16] + Rm_VPR128.8H[96,16]; + TMPQ1[112,16] = Rn_VPR128.8H[112,16] + Rm_VPR128.8H[112,16]; # simd shuffle Rd_VPR128.16B = TMPQ1 (@1-8@3-9@5-10@7-11@9-12@11-13@13-14@15-15) lane size 1 - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, TMPQ1, 1, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 8, 1, 16); - * [register]:1 tmp6 = * [register]:1 tmp5; - simd_address_at(tmp5, TMPQ1, 3, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 9, 1, 16); - * [register]:1 tmp6 = * [register]:1 tmp5; - simd_address_at(tmp5, TMPQ1, 5, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 10, 1, 16); - * [register]:1 tmp6 = * [register]:1 tmp5; - simd_address_at(tmp5, TMPQ1, 7, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 11, 1, 16); - * [register]:1 tmp6 = * [register]:1 tmp5; - simd_address_at(tmp5, TMPQ1, 9, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 12, 1, 16); - * [register]:1 tmp6 = * [register]:1 tmp5; - simd_address_at(tmp5, TMPQ1, 11, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 13, 1, 16); - * [register]:1 tmp6 = * [register]:1 tmp5; - simd_address_at(tmp5, TMPQ1, 13, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 14, 1, 16); - * [register]:1 tmp6 = * [register]:1 tmp5; - simd_address_at(tmp5, TMPQ1, 15, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 15, 1, 16); - * [register]:1 tmp6 = * [register]:1 tmp5; + Rd_VPR128.16B[64,8] = TMPQ1[8,8]; + Rd_VPR128.16B[72,8] = TMPQ1[24,8]; + Rd_VPR128.16B[80,8] = TMPQ1[40,8]; + Rd_VPR128.16B[88,8] = TMPQ1[56,8]; + Rd_VPR128.16B[96,8] = TMPQ1[72,8]; + Rd_VPR128.16B[104,8] = TMPQ1[88,8]; + Rd_VPR128.16B[112,8] = TMPQ1[104,8]; + Rd_VPR128.16B[120,8] = TMPQ1[120,8]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_ADD(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); - local tmp2:1 = 0; - local tmpd:16 = Rd_VPR128.16B; - tmp2 = SIMD_PIECE(tmp1, 1:1); tmpd = SIMD_COPY(tmpd, tmp2, 8:1); - tmp2 = SIMD_PIECE(tmp1, 3:1); tmpd = SIMD_COPY(tmpd, tmp2, 9:1); - tmp2 = SIMD_PIECE(tmp1, 5:1); tmpd = SIMD_COPY(tmpd, tmp2, 10:1); - tmp2 = SIMD_PIECE(tmp1, 7:1); tmpd = SIMD_COPY(tmpd, tmp2, 11:1); - tmp2 = SIMD_PIECE(tmp1, 9:1); tmpd = SIMD_COPY(tmpd, tmp2, 12:1); - tmp2 = SIMD_PIECE(tmp1, 11:1); tmpd = SIMD_COPY(tmpd, tmp2, 13:1); - tmp2 = SIMD_PIECE(tmp1, 13:1); tmpd = SIMD_COPY(tmpd, tmp2, 14:1); - tmp2 = SIMD_PIECE(tmp1, 15:1); tmpd = SIMD_COPY(tmpd, tmp2, 15:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.16B -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.16B = NEON_addhn2(Rd_VPR128.16B, Rn_VPR128.8H, Rm_VPR128.8H, 2:1); -@endif } # C7.2.3 ADDHN, ADDHN2 page C7-1403 line 77689 MATCH x0e204000/mask=xbf20fc00 @@ -987,39 +425,13 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.8H :addhn2 Rd_VPR128.4S, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.2D & b_1215=0x4 & b_1011=0 & Rn_VPR128.2D & Rd_VPR128.4S & Rd_VPR128 & Zd { -@if defined(SEMANTIC_primitive) # simd infix TMPQ1 = Rn_VPR128.2D + Rm_VPR128.2D on lane size 8 - local tmp2:4 = 0; - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp2, Rn_VPR128.2D, 0, 8, 16); - simd_address_at(tmp3, Rm_VPR128.2D, 0, 8, 16); - simd_address_at(tmp4, TMPQ1, 0, 8, 16); - * [register]:8 tmp4 = (* [register]:8 tmp2) + (* [register]:8 tmp3); - simd_address_at(tmp2, Rn_VPR128.2D, 1, 8, 16); - simd_address_at(tmp3, Rm_VPR128.2D, 1, 8, 16); - simd_address_at(tmp4, TMPQ1, 1, 8, 16); - * [register]:8 tmp4 = (* [register]:8 tmp2) + (* [register]:8 tmp3); + TMPQ1[0,64] = Rn_VPR128.2D[0,64] + Rm_VPR128.2D[0,64]; + TMPQ1[64,64] = Rn_VPR128.2D[64,64] + Rm_VPR128.2D[64,64]; # simd shuffle Rd_VPR128.4S = TMPQ1 (@1-2@3-3) lane size 4 - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, TMPQ1, 1, 4, 16); - simd_address_at(tmp6, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp6 = * [register]:4 tmp5; - simd_address_at(tmp5, TMPQ1, 3, 4, 16); - simd_address_at(tmp6, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp6 = * [register]:4 tmp5; + Rd_VPR128.4S[64,32] = TMPQ1[32,32]; + Rd_VPR128.4S[96,32] = TMPQ1[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_ADD(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); - local tmp2:4 = 0; - local tmpd:16 = Rd_VPR128.4S; - tmp2 = SIMD_PIECE(tmp1, 1:1); tmpd = SIMD_COPY(tmpd, tmp2, 2:1); - tmp2 = SIMD_PIECE(tmp1, 3:1); tmpd = SIMD_COPY(tmpd, tmp2, 3:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_addhn2(Rd_VPR128.4S, Rn_VPR128.2D, Rm_VPR128.2D, 8:1); -@endif } # C7.2.3 ADDHN, ADDHN2 page C7-1403 line 77689 MATCH x0e204000/mask=xbf20fc00 @@ -1031,55 +443,17 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.2D :addhn2 Rd_VPR128.8H, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.4S & b_1215=0x4 & b_1011=0 & Rn_VPR128.4S & Rd_VPR128.8H & Rd_VPR128 & Zd { -@if defined(SEMANTIC_primitive) # simd infix TMPQ1 = Rn_VPR128.4S + Rm_VPR128.4S on lane size 4 - local tmp2:4 = 0; - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp2, Rn_VPR128.4S, 0, 4, 16); - simd_address_at(tmp3, Rm_VPR128.4S, 0, 4, 16); - simd_address_at(tmp4, TMPQ1, 0, 4, 16); - * [register]:4 tmp4 = (* [register]:4 tmp2) + (* [register]:4 tmp3); - simd_address_at(tmp2, Rn_VPR128.4S, 1, 4, 16); - simd_address_at(tmp3, Rm_VPR128.4S, 1, 4, 16); - simd_address_at(tmp4, TMPQ1, 1, 4, 16); - * [register]:4 tmp4 = (* [register]:4 tmp2) + (* [register]:4 tmp3); - simd_address_at(tmp2, Rn_VPR128.4S, 2, 4, 16); - simd_address_at(tmp3, Rm_VPR128.4S, 2, 4, 16); - simd_address_at(tmp4, TMPQ1, 2, 4, 16); - * [register]:4 tmp4 = (* [register]:4 tmp2) + (* [register]:4 tmp3); - simd_address_at(tmp2, Rn_VPR128.4S, 3, 4, 16); - simd_address_at(tmp3, Rm_VPR128.4S, 3, 4, 16); - simd_address_at(tmp4, TMPQ1, 3, 4, 16); - * [register]:4 tmp4 = (* [register]:4 tmp2) + (* [register]:4 tmp3); + TMPQ1[0,32] = Rn_VPR128.4S[0,32] + Rm_VPR128.4S[0,32]; + TMPQ1[32,32] = Rn_VPR128.4S[32,32] + Rm_VPR128.4S[32,32]; + TMPQ1[64,32] = Rn_VPR128.4S[64,32] + Rm_VPR128.4S[64,32]; + TMPQ1[96,32] = Rn_VPR128.4S[96,32] + Rm_VPR128.4S[96,32]; # simd shuffle Rd_VPR128.8H = TMPQ1 (@1-4@3-5@5-6@7-7) lane size 2 - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, TMPQ1, 1, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 4, 2, 16); - * [register]:2 tmp6 = * [register]:2 tmp5; - simd_address_at(tmp5, TMPQ1, 3, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 5, 2, 16); - * [register]:2 tmp6 = * [register]:2 tmp5; - simd_address_at(tmp5, TMPQ1, 5, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 6, 2, 16); - * [register]:2 tmp6 = * [register]:2 tmp5; - simd_address_at(tmp5, TMPQ1, 7, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 7, 2, 16); - * [register]:2 tmp6 = * [register]:2 tmp5; + Rd_VPR128.8H[64,16] = TMPQ1[16,16]; + Rd_VPR128.8H[80,16] = TMPQ1[48,16]; + Rd_VPR128.8H[96,16] = TMPQ1[80,16]; + Rd_VPR128.8H[112,16] = TMPQ1[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_ADD(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); - local tmp2:2 = 0; - local tmpd:16 = Rd_VPR128.8H; - tmp2 = SIMD_PIECE(tmp1, 1:1); tmpd = SIMD_COPY(tmpd, tmp2, 4:1); - tmp2 = SIMD_PIECE(tmp1, 3:1); tmpd = SIMD_COPY(tmpd, tmp2, 5:1); - tmp2 = SIMD_PIECE(tmp1, 5:1); tmpd = SIMD_COPY(tmpd, tmp2, 6:1); - tmp2 = SIMD_PIECE(tmp1, 7:1); tmpd = SIMD_COPY(tmpd, tmp2, 7:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.8H -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.8H = NEON_addhn2(Rd_VPR128.8H, Rn_VPR128.4S, Rm_VPR128.4S, 4:1); -@endif } # C7.2.4 ADDP (scalar) page C7-1405 line 77812 MATCH x5e31b800/mask=xff3ffc00 @@ -1091,26 +465,11 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.4S :addp Rd_FPR64, Rn_VPR128.2D is b_3031=1 & u=0 & b_2428=0x1e & b_23=1 & b_1722=0x38 & b_1216=0x1b & b_1011=2 & Rn_VPR128.2D & Rd_FPR64 & Zd { -@if defined(SEMANTIC_primitive) # sipd infix Rd_FPR64 = +(Rn_VPR128.2D) on pairs lane size (8 to 8) - local tmp1:4 = 0; - local tmp2:4 = 0; - local tmp3:4 = 0; - local tmp4:8 = 0; - local tmp5:8 = 0; - simd_address_at(tmp1, Rn_VPR128.2D, 0, 8, 16); - simd_address_at(tmp2, Rn_VPR128.2D, 1, 8, 16); - simd_address_at(tmp3, Rd_FPR64, 0, 8, 8); - tmp4 = * [register]:8 tmp1; - tmp5 = * [register]:8 tmp2; - * [register]:8 tmp3 = tmp4 + tmp5; + tmp1 = Rn_VPR128.2D[0,64]; + tmp2 = Rn_VPR128.2D[64,64]; + Rd_FPR64[0,64] = tmp1 + tmp2; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:8 = SIMD_INT_ADD(Rn_VPR128.2D); - Zd = zext(tmpd); # assigning to Rd_FPR64 -@elif defined(SEMANTIC_pseudo) - Rd_FPR64 = NEON_addp(Rn_VPR128.2D, 8:1); -@endif } # C7.2.5 ADDP (vector) page C7-1407 line 77897 MATCH x0e20bc00/mask=xbf20fc00 @@ -1122,120 +481,58 @@ is b_3031=1 & u=0 & b_2428=0x1e & b_23=1 & b_1722=0x38 & b_1216=0x1b & b_1011=2 :addp Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0x17 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { -@if defined(SEMANTIC_primitive) TMPQ1 = 0; # sipd infix TMPQ1 = +(Rn_VPR128.16B,Rm_VPR128.16B) on pairs lane size (1 to 1) - local tmp2:4 = 0; - local tmp3:4 = 0; - local tmp4:4 = 0; - local tmp5:1 = 0; - local tmp6:1 = 0; - simd_address_at(tmp2, Rn_VPR128.16B, 0, 1, 16); - simd_address_at(tmp3, Rn_VPR128.16B, 1, 1, 16); - simd_address_at(tmp4, TMPQ1, 0, 1, 16); - tmp5 = * [register]:1 tmp2; - tmp6 = * [register]:1 tmp3; - * [register]:1 tmp4 = tmp5 + tmp6; - simd_address_at(tmp2, Rn_VPR128.16B, 2, 1, 16); - simd_address_at(tmp3, Rn_VPR128.16B, 3, 1, 16); - simd_address_at(tmp4, TMPQ1, 1, 1, 16); - tmp5 = * [register]:1 tmp2; - tmp6 = * [register]:1 tmp3; - * [register]:1 tmp4 = tmp5 + tmp6; - simd_address_at(tmp2, Rn_VPR128.16B, 4, 1, 16); - simd_address_at(tmp3, Rn_VPR128.16B, 5, 1, 16); - simd_address_at(tmp4, TMPQ1, 2, 1, 16); - tmp5 = * [register]:1 tmp2; - tmp6 = * [register]:1 tmp3; - * [register]:1 tmp4 = tmp5 + tmp6; - simd_address_at(tmp2, Rn_VPR128.16B, 6, 1, 16); - simd_address_at(tmp3, Rn_VPR128.16B, 7, 1, 16); - simd_address_at(tmp4, TMPQ1, 3, 1, 16); - tmp5 = * [register]:1 tmp2; - tmp6 = * [register]:1 tmp3; - * [register]:1 tmp4 = tmp5 + tmp6; - simd_address_at(tmp2, Rn_VPR128.16B, 8, 1, 16); - simd_address_at(tmp3, Rn_VPR128.16B, 9, 1, 16); - simd_address_at(tmp4, TMPQ1, 4, 1, 16); - tmp5 = * [register]:1 tmp2; - tmp6 = * [register]:1 tmp3; - * [register]:1 tmp4 = tmp5 + tmp6; - simd_address_at(tmp2, Rn_VPR128.16B, 10, 1, 16); - simd_address_at(tmp3, Rn_VPR128.16B, 11, 1, 16); - simd_address_at(tmp4, TMPQ1, 5, 1, 16); - tmp5 = * [register]:1 tmp2; - tmp6 = * [register]:1 tmp3; - * [register]:1 tmp4 = tmp5 + tmp6; - simd_address_at(tmp2, Rn_VPR128.16B, 12, 1, 16); - simd_address_at(tmp3, Rn_VPR128.16B, 13, 1, 16); - simd_address_at(tmp4, TMPQ1, 6, 1, 16); - tmp5 = * [register]:1 tmp2; - tmp6 = * [register]:1 tmp3; - * [register]:1 tmp4 = tmp5 + tmp6; - simd_address_at(tmp2, Rn_VPR128.16B, 14, 1, 16); - simd_address_at(tmp3, Rn_VPR128.16B, 15, 1, 16); - simd_address_at(tmp4, TMPQ1, 7, 1, 16); - tmp5 = * [register]:1 tmp2; - tmp6 = * [register]:1 tmp3; - * [register]:1 tmp4 = tmp5 + tmp6; - simd_address_at(tmp2, Rm_VPR128.16B, 0, 1, 16); - simd_address_at(tmp3, Rm_VPR128.16B, 1, 1, 16); - simd_address_at(tmp4, TMPQ1, 8, 1, 16); - tmp5 = * [register]:1 tmp2; - tmp6 = * [register]:1 tmp3; - * [register]:1 tmp4 = tmp5 + tmp6; - simd_address_at(tmp2, Rm_VPR128.16B, 2, 1, 16); - simd_address_at(tmp3, Rm_VPR128.16B, 3, 1, 16); - simd_address_at(tmp4, TMPQ1, 9, 1, 16); - tmp5 = * [register]:1 tmp2; - tmp6 = * [register]:1 tmp3; - * [register]:1 tmp4 = tmp5 + tmp6; - simd_address_at(tmp2, Rm_VPR128.16B, 4, 1, 16); - simd_address_at(tmp3, Rm_VPR128.16B, 5, 1, 16); - simd_address_at(tmp4, TMPQ1, 10, 1, 16); - tmp5 = * [register]:1 tmp2; - tmp6 = * [register]:1 tmp3; - * [register]:1 tmp4 = tmp5 + tmp6; - simd_address_at(tmp2, Rm_VPR128.16B, 6, 1, 16); - simd_address_at(tmp3, Rm_VPR128.16B, 7, 1, 16); - simd_address_at(tmp4, TMPQ1, 11, 1, 16); - tmp5 = * [register]:1 tmp2; - tmp6 = * [register]:1 tmp3; - * [register]:1 tmp4 = tmp5 + tmp6; - simd_address_at(tmp2, Rm_VPR128.16B, 8, 1, 16); - simd_address_at(tmp3, Rm_VPR128.16B, 9, 1, 16); - simd_address_at(tmp4, TMPQ1, 12, 1, 16); - tmp5 = * [register]:1 tmp2; - tmp6 = * [register]:1 tmp3; - * [register]:1 tmp4 = tmp5 + tmp6; - simd_address_at(tmp2, Rm_VPR128.16B, 10, 1, 16); - simd_address_at(tmp3, Rm_VPR128.16B, 11, 1, 16); - simd_address_at(tmp4, TMPQ1, 13, 1, 16); - tmp5 = * [register]:1 tmp2; - tmp6 = * [register]:1 tmp3; - * [register]:1 tmp4 = tmp5 + tmp6; - simd_address_at(tmp2, Rm_VPR128.16B, 12, 1, 16); - simd_address_at(tmp3, Rm_VPR128.16B, 13, 1, 16); - simd_address_at(tmp4, TMPQ1, 14, 1, 16); - tmp5 = * [register]:1 tmp2; - tmp6 = * [register]:1 tmp3; - * [register]:1 tmp4 = tmp5 + tmp6; - simd_address_at(tmp2, Rm_VPR128.16B, 14, 1, 16); - simd_address_at(tmp3, Rm_VPR128.16B, 15, 1, 16); - simd_address_at(tmp4, TMPQ1, 15, 1, 16); - tmp5 = * [register]:1 tmp2; - tmp6 = * [register]:1 tmp3; - * [register]:1 tmp4 = tmp5 + tmp6; + tmp2 = Rn_VPR128.16B[0,8]; + tmp3 = Rn_VPR128.16B[8,8]; + TMPQ1[0,8] = tmp2 + tmp3; + tmp2 = Rn_VPR128.16B[16,8]; + tmp3 = Rn_VPR128.16B[24,8]; + TMPQ1[8,8] = tmp2 + tmp3; + tmp2 = Rn_VPR128.16B[32,8]; + tmp3 = Rn_VPR128.16B[40,8]; + TMPQ1[16,8] = tmp2 + tmp3; + tmp2 = Rn_VPR128.16B[48,8]; + tmp3 = Rn_VPR128.16B[56,8]; + TMPQ1[24,8] = tmp2 + tmp3; + tmp2 = Rn_VPR128.16B[64,8]; + tmp3 = Rn_VPR128.16B[72,8]; + TMPQ1[32,8] = tmp2 + tmp3; + tmp2 = Rn_VPR128.16B[80,8]; + tmp3 = Rn_VPR128.16B[88,8]; + TMPQ1[40,8] = tmp2 + tmp3; + tmp2 = Rn_VPR128.16B[96,8]; + tmp3 = Rn_VPR128.16B[104,8]; + TMPQ1[48,8] = tmp2 + tmp3; + tmp2 = Rn_VPR128.16B[112,8]; + tmp3 = Rn_VPR128.16B[120,8]; + TMPQ1[56,8] = tmp2 + tmp3; + tmp2 = Rm_VPR128.16B[0,8]; + tmp3 = Rm_VPR128.16B[8,8]; + TMPQ1[64,8] = tmp2 + tmp3; + tmp2 = Rm_VPR128.16B[16,8]; + tmp3 = Rm_VPR128.16B[24,8]; + TMPQ1[72,8] = tmp2 + tmp3; + tmp2 = Rm_VPR128.16B[32,8]; + tmp3 = Rm_VPR128.16B[40,8]; + TMPQ1[80,8] = tmp2 + tmp3; + tmp2 = Rm_VPR128.16B[48,8]; + tmp3 = Rm_VPR128.16B[56,8]; + TMPQ1[88,8] = tmp2 + tmp3; + tmp2 = Rm_VPR128.16B[64,8]; + tmp3 = Rm_VPR128.16B[72,8]; + TMPQ1[96,8] = tmp2 + tmp3; + tmp2 = Rm_VPR128.16B[80,8]; + tmp3 = Rm_VPR128.16B[88,8]; + TMPQ1[104,8] = tmp2 + tmp3; + tmp2 = Rm_VPR128.16B[96,8]; + tmp3 = Rm_VPR128.16B[104,8]; + TMPQ1[112,8] = tmp2 + tmp3; + tmp2 = Rm_VPR128.16B[112,8]; + tmp3 = Rm_VPR128.16B[120,8]; + TMPQ1[120,8] = tmp2 + tmp3; Rd_VPR128.16B = TMPQ1; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = 0; - tmp1 = SIMD_INT_ADD(Rn_VPR128.16B, Rm_VPR128.16B); - local tmpd:16 = tmp1; - Zd = zext(tmpd); # assigning to Rd_VPR128.16B -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.16B = NEON_addp(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); -@endif } # C7.2.5 ADDP (vector) page C7-1407 line 77897 MATCH x0e20bc00/mask=xbf20fc00 @@ -1247,36 +544,16 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16 :addp Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=3 & b_2121=1 & Rm_VPR128.2D & b_1115=0x17 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_primitive) TMPQ1 = 0; # sipd infix TMPQ1 = +(Rn_VPR128.2D,Rm_VPR128.2D) on pairs lane size (8 to 8) - local tmp2:4 = 0; - local tmp3:4 = 0; - local tmp4:4 = 0; - local tmp5:8 = 0; - local tmp6:8 = 0; - simd_address_at(tmp2, Rn_VPR128.2D, 0, 8, 16); - simd_address_at(tmp3, Rn_VPR128.2D, 1, 8, 16); - simd_address_at(tmp4, TMPQ1, 0, 8, 16); - tmp5 = * [register]:8 tmp2; - tmp6 = * [register]:8 tmp3; - * [register]:8 tmp4 = tmp5 + tmp6; - simd_address_at(tmp2, Rm_VPR128.2D, 0, 8, 16); - simd_address_at(tmp3, Rm_VPR128.2D, 1, 8, 16); - simd_address_at(tmp4, TMPQ1, 1, 8, 16); - tmp5 = * [register]:8 tmp2; - tmp6 = * [register]:8 tmp3; - * [register]:8 tmp4 = tmp5 + tmp6; + tmp2 = Rn_VPR128.2D[0,64]; + tmp3 = Rn_VPR128.2D[64,64]; + TMPQ1[0,64] = tmp2 + tmp3; + tmp2 = Rm_VPR128.2D[0,64]; + tmp3 = Rm_VPR128.2D[64,64]; + TMPQ1[64,64] = tmp2 + tmp3; Rd_VPR128.2D = TMPQ1; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = 0; - tmp1 = SIMD_INT_ADD(Rn_VPR128.2D, Rm_VPR128.2D); - local tmpd:16 = tmp1; - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.2D = NEON_addp(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); -@endif } # C7.2.5 ADDP (vector) page C7-1407 line 77897 MATCH x0e20bc00/mask=xbf20fc00 @@ -1288,36 +565,16 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=3 & b_2121=1 & Rm_VPR128.2D :addp Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0x17 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_primitive) TMPD1 = 0; # sipd infix TMPD1 = +(Rn_VPR64.2S,Rm_VPR64.2S) on pairs lane size (4 to 4) - local tmp2:4 = 0; - local tmp3:4 = 0; - local tmp4:4 = 0; - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp2, Rn_VPR64.2S, 0, 4, 8); - simd_address_at(tmp3, Rn_VPR64.2S, 1, 4, 8); - simd_address_at(tmp4, TMPD1, 0, 4, 8); - tmp5 = * [register]:4 tmp2; - tmp6 = * [register]:4 tmp3; - * [register]:4 tmp4 = tmp5 + tmp6; - simd_address_at(tmp2, Rm_VPR64.2S, 0, 4, 8); - simd_address_at(tmp3, Rm_VPR64.2S, 1, 4, 8); - simd_address_at(tmp4, TMPD1, 1, 4, 8); - tmp5 = * [register]:4 tmp2; - tmp6 = * [register]:4 tmp3; - * [register]:4 tmp4 = tmp5 + tmp6; + tmp2 = Rn_VPR64.2S[0,32]; + tmp3 = Rn_VPR64.2S[32,32]; + TMPD1[0,32] = tmp2 + tmp3; + tmp2 = Rm_VPR64.2S[0,32]; + tmp3 = Rm_VPR64.2S[32,32]; + TMPD1[32,32] = tmp2 + tmp3; Rd_VPR64.2S = TMPD1; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = 0; - tmp1 = SIMD_INT_ADD(Rn_VPR64.2S, Rm_VPR64.2S); - local tmpd:8 = tmp1; - Zd = zext(tmpd); # assigning to Rd_VPR64.2S -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.2S = NEON_addp(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); -@endif } # C7.2.5 ADDP (vector) page C7-1407 line 77897 MATCH x0e20bc00/mask=xbf20fc00 @@ -1329,48 +586,22 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S :addp Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0x17 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { -@if defined(SEMANTIC_primitive) TMPD1 = 0; # sipd infix TMPD1 = +(Rn_VPR64.4H,Rm_VPR64.4H) on pairs lane size (2 to 2) - local tmp2:4 = 0; - local tmp3:4 = 0; - local tmp4:4 = 0; - local tmp5:2 = 0; - local tmp6:2 = 0; - simd_address_at(tmp2, Rn_VPR64.4H, 0, 2, 8); - simd_address_at(tmp3, Rn_VPR64.4H, 1, 2, 8); - simd_address_at(tmp4, TMPD1, 0, 2, 8); - tmp5 = * [register]:2 tmp2; - tmp6 = * [register]:2 tmp3; - * [register]:2 tmp4 = tmp5 + tmp6; - simd_address_at(tmp2, Rn_VPR64.4H, 2, 2, 8); - simd_address_at(tmp3, Rn_VPR64.4H, 3, 2, 8); - simd_address_at(tmp4, TMPD1, 1, 2, 8); - tmp5 = * [register]:2 tmp2; - tmp6 = * [register]:2 tmp3; - * [register]:2 tmp4 = tmp5 + tmp6; - simd_address_at(tmp2, Rm_VPR64.4H, 0, 2, 8); - simd_address_at(tmp3, Rm_VPR64.4H, 1, 2, 8); - simd_address_at(tmp4, TMPD1, 2, 2, 8); - tmp5 = * [register]:2 tmp2; - tmp6 = * [register]:2 tmp3; - * [register]:2 tmp4 = tmp5 + tmp6; - simd_address_at(tmp2, Rm_VPR64.4H, 2, 2, 8); - simd_address_at(tmp3, Rm_VPR64.4H, 3, 2, 8); - simd_address_at(tmp4, TMPD1, 3, 2, 8); - tmp5 = * [register]:2 tmp2; - tmp6 = * [register]:2 tmp3; - * [register]:2 tmp4 = tmp5 + tmp6; + tmp2 = Rn_VPR64.4H[0,16]; + tmp3 = Rn_VPR64.4H[16,16]; + TMPD1[0,16] = tmp2 + tmp3; + tmp2 = Rn_VPR64.4H[32,16]; + tmp3 = Rn_VPR64.4H[48,16]; + TMPD1[16,16] = tmp2 + tmp3; + tmp2 = Rm_VPR64.4H[0,16]; + tmp3 = Rm_VPR64.4H[16,16]; + TMPD1[32,16] = tmp2 + tmp3; + tmp2 = Rm_VPR64.4H[32,16]; + tmp3 = Rm_VPR64.4H[48,16]; + TMPD1[48,16] = tmp2 + tmp3; Rd_VPR64.4H = TMPD1; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = 0; - tmp1 = SIMD_INT_ADD(Rn_VPR64.4H, Rm_VPR64.4H); - local tmpd:8 = tmp1; - Zd = zext(tmpd); # assigning to Rd_VPR64.4H -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.4H = NEON_addp(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); -@endif } # C7.2.5 ADDP (vector) page C7-1407 line 77897 MATCH x0e20bc00/mask=xbf20fc00 @@ -1382,48 +613,22 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H :addp Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0x17 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) TMPQ1 = 0; # sipd infix TMPQ1 = +(Rn_VPR128.4S,Rm_VPR128.4S) on pairs lane size (4 to 4) - local tmp2:4 = 0; - local tmp3:4 = 0; - local tmp4:4 = 0; - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp2, Rn_VPR128.4S, 0, 4, 16); - simd_address_at(tmp3, Rn_VPR128.4S, 1, 4, 16); - simd_address_at(tmp4, TMPQ1, 0, 4, 16); - tmp5 = * [register]:4 tmp2; - tmp6 = * [register]:4 tmp3; - * [register]:4 tmp4 = tmp5 + tmp6; - simd_address_at(tmp2, Rn_VPR128.4S, 2, 4, 16); - simd_address_at(tmp3, Rn_VPR128.4S, 3, 4, 16); - simd_address_at(tmp4, TMPQ1, 1, 4, 16); - tmp5 = * [register]:4 tmp2; - tmp6 = * [register]:4 tmp3; - * [register]:4 tmp4 = tmp5 + tmp6; - simd_address_at(tmp2, Rm_VPR128.4S, 0, 4, 16); - simd_address_at(tmp3, Rm_VPR128.4S, 1, 4, 16); - simd_address_at(tmp4, TMPQ1, 2, 4, 16); - tmp5 = * [register]:4 tmp2; - tmp6 = * [register]:4 tmp3; - * [register]:4 tmp4 = tmp5 + tmp6; - simd_address_at(tmp2, Rm_VPR128.4S, 2, 4, 16); - simd_address_at(tmp3, Rm_VPR128.4S, 3, 4, 16); - simd_address_at(tmp4, TMPQ1, 3, 4, 16); - tmp5 = * [register]:4 tmp2; - tmp6 = * [register]:4 tmp3; - * [register]:4 tmp4 = tmp5 + tmp6; + tmp2 = Rn_VPR128.4S[0,32]; + tmp3 = Rn_VPR128.4S[32,32]; + TMPQ1[0,32] = tmp2 + tmp3; + tmp2 = Rn_VPR128.4S[64,32]; + tmp3 = Rn_VPR128.4S[96,32]; + TMPQ1[32,32] = tmp2 + tmp3; + tmp2 = Rm_VPR128.4S[0,32]; + tmp3 = Rm_VPR128.4S[32,32]; + TMPQ1[64,32] = tmp2 + tmp3; + tmp2 = Rm_VPR128.4S[64,32]; + tmp3 = Rm_VPR128.4S[96,32]; + TMPQ1[96,32] = tmp2 + tmp3; Rd_VPR128.4S = TMPQ1; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = 0; - tmp1 = SIMD_INT_ADD(Rn_VPR128.4S, Rm_VPR128.4S); - local tmpd:16 = tmp1; - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_addp(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); -@endif } # C7.2.5 ADDP (vector) page C7-1407 line 77897 MATCH x0e20bc00/mask=xbf20fc00 @@ -1435,72 +640,34 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S :addp Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0x17 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { -@if defined(SEMANTIC_primitive) TMPD1 = 0; # sipd infix TMPD1 = +(Rn_VPR64.8B,Rm_VPR64.8B) on pairs lane size (1 to 1) - local tmp2:4 = 0; - local tmp3:4 = 0; - local tmp4:4 = 0; - local tmp5:1 = 0; - local tmp6:1 = 0; - simd_address_at(tmp2, Rn_VPR64.8B, 0, 1, 8); - simd_address_at(tmp3, Rn_VPR64.8B, 1, 1, 8); - simd_address_at(tmp4, TMPD1, 0, 1, 8); - tmp5 = * [register]:1 tmp2; - tmp6 = * [register]:1 tmp3; - * [register]:1 tmp4 = tmp5 + tmp6; - simd_address_at(tmp2, Rn_VPR64.8B, 2, 1, 8); - simd_address_at(tmp3, Rn_VPR64.8B, 3, 1, 8); - simd_address_at(tmp4, TMPD1, 1, 1, 8); - tmp5 = * [register]:1 tmp2; - tmp6 = * [register]:1 tmp3; - * [register]:1 tmp4 = tmp5 + tmp6; - simd_address_at(tmp2, Rn_VPR64.8B, 4, 1, 8); - simd_address_at(tmp3, Rn_VPR64.8B, 5, 1, 8); - simd_address_at(tmp4, TMPD1, 2, 1, 8); - tmp5 = * [register]:1 tmp2; - tmp6 = * [register]:1 tmp3; - * [register]:1 tmp4 = tmp5 + tmp6; - simd_address_at(tmp2, Rn_VPR64.8B, 6, 1, 8); - simd_address_at(tmp3, Rn_VPR64.8B, 7, 1, 8); - simd_address_at(tmp4, TMPD1, 3, 1, 8); - tmp5 = * [register]:1 tmp2; - tmp6 = * [register]:1 tmp3; - * [register]:1 tmp4 = tmp5 + tmp6; - simd_address_at(tmp2, Rm_VPR64.8B, 0, 1, 8); - simd_address_at(tmp3, Rm_VPR64.8B, 1, 1, 8); - simd_address_at(tmp4, TMPD1, 4, 1, 8); - tmp5 = * [register]:1 tmp2; - tmp6 = * [register]:1 tmp3; - * [register]:1 tmp4 = tmp5 + tmp6; - simd_address_at(tmp2, Rm_VPR64.8B, 2, 1, 8); - simd_address_at(tmp3, Rm_VPR64.8B, 3, 1, 8); - simd_address_at(tmp4, TMPD1, 5, 1, 8); - tmp5 = * [register]:1 tmp2; - tmp6 = * [register]:1 tmp3; - * [register]:1 tmp4 = tmp5 + tmp6; - simd_address_at(tmp2, Rm_VPR64.8B, 4, 1, 8); - simd_address_at(tmp3, Rm_VPR64.8B, 5, 1, 8); - simd_address_at(tmp4, TMPD1, 6, 1, 8); - tmp5 = * [register]:1 tmp2; - tmp6 = * [register]:1 tmp3; - * [register]:1 tmp4 = tmp5 + tmp6; - simd_address_at(tmp2, Rm_VPR64.8B, 6, 1, 8); - simd_address_at(tmp3, Rm_VPR64.8B, 7, 1, 8); - simd_address_at(tmp4, TMPD1, 7, 1, 8); - tmp5 = * [register]:1 tmp2; - tmp6 = * [register]:1 tmp3; - * [register]:1 tmp4 = tmp5 + tmp6; + tmp2 = Rn_VPR64.8B[0,8]; + tmp3 = Rn_VPR64.8B[8,8]; + TMPD1[0,8] = tmp2 + tmp3; + tmp2 = Rn_VPR64.8B[16,8]; + tmp3 = Rn_VPR64.8B[24,8]; + TMPD1[8,8] = tmp2 + tmp3; + tmp2 = Rn_VPR64.8B[32,8]; + tmp3 = Rn_VPR64.8B[40,8]; + TMPD1[16,8] = tmp2 + tmp3; + tmp2 = Rn_VPR64.8B[48,8]; + tmp3 = Rn_VPR64.8B[56,8]; + TMPD1[24,8] = tmp2 + tmp3; + tmp2 = Rm_VPR64.8B[0,8]; + tmp3 = Rm_VPR64.8B[8,8]; + TMPD1[32,8] = tmp2 + tmp3; + tmp2 = Rm_VPR64.8B[16,8]; + tmp3 = Rm_VPR64.8B[24,8]; + TMPD1[40,8] = tmp2 + tmp3; + tmp2 = Rm_VPR64.8B[32,8]; + tmp3 = Rm_VPR64.8B[40,8]; + TMPD1[48,8] = tmp2 + tmp3; + tmp2 = Rm_VPR64.8B[48,8]; + tmp3 = Rm_VPR64.8B[56,8]; + TMPD1[56,8] = tmp2 + tmp3; Rd_VPR64.8B = TMPD1; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = 0; - tmp1 = SIMD_INT_ADD(Rn_VPR64.8B, Rm_VPR64.8B); - local tmpd:8 = tmp1; - Zd = zext(tmpd); # assigning to Rd_VPR64.8B -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.8B = NEON_addp(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); -@endif } # C7.2.5 ADDP (vector) page C7-1407 line 77897 MATCH x0e20bc00/mask=xbf20fc00 @@ -1512,72 +679,34 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B :addp Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0x17 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_primitive) TMPQ1 = 0; # sipd infix TMPQ1 = +(Rn_VPR128.8H,Rm_VPR128.8H) on pairs lane size (2 to 2) - local tmp2:4 = 0; - local tmp3:4 = 0; - local tmp4:4 = 0; - local tmp5:2 = 0; - local tmp6:2 = 0; - simd_address_at(tmp2, Rn_VPR128.8H, 0, 2, 16); - simd_address_at(tmp3, Rn_VPR128.8H, 1, 2, 16); - simd_address_at(tmp4, TMPQ1, 0, 2, 16); - tmp5 = * [register]:2 tmp2; - tmp6 = * [register]:2 tmp3; - * [register]:2 tmp4 = tmp5 + tmp6; - simd_address_at(tmp2, Rn_VPR128.8H, 2, 2, 16); - simd_address_at(tmp3, Rn_VPR128.8H, 3, 2, 16); - simd_address_at(tmp4, TMPQ1, 1, 2, 16); - tmp5 = * [register]:2 tmp2; - tmp6 = * [register]:2 tmp3; - * [register]:2 tmp4 = tmp5 + tmp6; - simd_address_at(tmp2, Rn_VPR128.8H, 4, 2, 16); - simd_address_at(tmp3, Rn_VPR128.8H, 5, 2, 16); - simd_address_at(tmp4, TMPQ1, 2, 2, 16); - tmp5 = * [register]:2 tmp2; - tmp6 = * [register]:2 tmp3; - * [register]:2 tmp4 = tmp5 + tmp6; - simd_address_at(tmp2, Rn_VPR128.8H, 6, 2, 16); - simd_address_at(tmp3, Rn_VPR128.8H, 7, 2, 16); - simd_address_at(tmp4, TMPQ1, 3, 2, 16); - tmp5 = * [register]:2 tmp2; - tmp6 = * [register]:2 tmp3; - * [register]:2 tmp4 = tmp5 + tmp6; - simd_address_at(tmp2, Rm_VPR128.8H, 0, 2, 16); - simd_address_at(tmp3, Rm_VPR128.8H, 1, 2, 16); - simd_address_at(tmp4, TMPQ1, 4, 2, 16); - tmp5 = * [register]:2 tmp2; - tmp6 = * [register]:2 tmp3; - * [register]:2 tmp4 = tmp5 + tmp6; - simd_address_at(tmp2, Rm_VPR128.8H, 2, 2, 16); - simd_address_at(tmp3, Rm_VPR128.8H, 3, 2, 16); - simd_address_at(tmp4, TMPQ1, 5, 2, 16); - tmp5 = * [register]:2 tmp2; - tmp6 = * [register]:2 tmp3; - * [register]:2 tmp4 = tmp5 + tmp6; - simd_address_at(tmp2, Rm_VPR128.8H, 4, 2, 16); - simd_address_at(tmp3, Rm_VPR128.8H, 5, 2, 16); - simd_address_at(tmp4, TMPQ1, 6, 2, 16); - tmp5 = * [register]:2 tmp2; - tmp6 = * [register]:2 tmp3; - * [register]:2 tmp4 = tmp5 + tmp6; - simd_address_at(tmp2, Rm_VPR128.8H, 6, 2, 16); - simd_address_at(tmp3, Rm_VPR128.8H, 7, 2, 16); - simd_address_at(tmp4, TMPQ1, 7, 2, 16); - tmp5 = * [register]:2 tmp2; - tmp6 = * [register]:2 tmp3; - * [register]:2 tmp4 = tmp5 + tmp6; + tmp2 = Rn_VPR128.8H[0,16]; + tmp3 = Rn_VPR128.8H[16,16]; + TMPQ1[0,16] = tmp2 + tmp3; + tmp2 = Rn_VPR128.8H[32,16]; + tmp3 = Rn_VPR128.8H[48,16]; + TMPQ1[16,16] = tmp2 + tmp3; + tmp2 = Rn_VPR128.8H[64,16]; + tmp3 = Rn_VPR128.8H[80,16]; + TMPQ1[32,16] = tmp2 + tmp3; + tmp2 = Rn_VPR128.8H[96,16]; + tmp3 = Rn_VPR128.8H[112,16]; + TMPQ1[48,16] = tmp2 + tmp3; + tmp2 = Rm_VPR128.8H[0,16]; + tmp3 = Rm_VPR128.8H[16,16]; + TMPQ1[64,16] = tmp2 + tmp3; + tmp2 = Rm_VPR128.8H[32,16]; + tmp3 = Rm_VPR128.8H[48,16]; + TMPQ1[80,16] = tmp2 + tmp3; + tmp2 = Rm_VPR128.8H[64,16]; + tmp3 = Rm_VPR128.8H[80,16]; + TMPQ1[96,16] = tmp2 + tmp3; + tmp2 = Rm_VPR128.8H[96,16]; + tmp3 = Rm_VPR128.8H[112,16]; + TMPQ1[112,16] = tmp2 + tmp3; Rd_VPR128.8H = TMPQ1; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = 0; - tmp1 = SIMD_INT_ADD(Rn_VPR128.8H, Rm_VPR128.8H); - local tmpd:16 = tmp1; - Zd = zext(tmpd); # assigning to Rd_VPR128.8H -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.8H = NEON_addp(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); -@endif } # C7.2.6 ADDV page C7-1409 line 77996 MATCH x0e31b800/mask=xbf3ffc00 @@ -1588,9 +717,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H :addv Rd_FPR8, Rn_VPR128.16B is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x18 & b_1216=0x1b & b_1011=2 & Rn_VPR128.16B & Rd_FPR8 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR8 = NEON_addv(Rn_VPR128.16B, 1:1); -@endif } # C7.2.6 ADDV page C7-1409 line 77996 MATCH x0e31b800/mask=xbf3ffc00 @@ -1601,9 +728,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x18 & b_1216=0x :addv Rd_FPR8, Rn_VPR64.8B is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x18 & b_1216=0x1b & b_1011=2 & Rn_VPR64.8B & Rd_FPR8 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR8 = NEON_addv(Rn_VPR64.8B, 1:1); -@endif } # C7.2.6 ADDV page C7-1409 line 77996 MATCH x0e31b800/mask=xbf3ffc00 @@ -1614,9 +739,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x18 & b_1216=0x :addv Rd_FPR16, Rn_VPR64.4H is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x18 & b_1216=0x1b & b_1011=2 & Rn_VPR64.4H & Rd_FPR16 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR16 = NEON_addv(Rn_VPR64.4H, 2:1); -@endif } # C7.2.6 ADDV page C7-1409 line 77996 MATCH x0e31b800/mask=xbf3ffc00 @@ -1627,9 +750,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x18 & b_1216=0x :addv Rd_FPR16, Rn_VPR128.8H is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x18 & b_1216=0x1b & b_1011=2 & Rn_VPR128.8H & Rd_FPR16 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR16 = NEON_addv(Rn_VPR128.8H, 2:1); -@endif } # C7.2.6 ADDV page C7-1409 line 77996 MATCH x0e31b800/mask=xbf3ffc00 @@ -1641,35 +762,14 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x18 & b_1216=0x :addv Rd_FPR32, Rn_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x18 & b_1216=0x1b & b_1011=2 & Rn_VPR128.4S & Rd_FPR32 & Zd { -@if defined(SEMANTIC_primitive) || defined(SEMANTIC_force) - local tmp1:4 = 0; - simd_address_at(tmp1, Rn_VPR128.4S, 0, 4, 16); - local tmp2:4 = * [register]:4 tmp1; - local tmp3:4 = 0; - simd_address_at(tmp3, Rn_VPR128.4S, 1, 4, 16); - local tmp4:4 = * [register]:4 tmp3; - local tmp5:4 = tmp2 + tmp4; - local tmp6:4 = 0; - simd_address_at(tmp6, Rn_VPR128.4S, 2, 4, 16); - local tmp7:4 = * [register]:4 tmp6; - local tmp8:4 = 0; - simd_address_at(tmp8, Rn_VPR128.4S, 3, 4, 16); - local tmp9:4 = * [register]:4 tmp8; - local tmp10:4 = tmp7 + tmp9; - Rd_FPR32 = tmp5 + tmp10; - zext_zs(Zd); # zero upper 28 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:4 = SIMD_PIECE(Rn_VPR128.4S, 0:1); - local tmp2:4 = SIMD_PIECE(Rn_VPR128.4S, 1:1); + local tmp1:4 = Rn_VPR128.4S[0,32]; + local tmp2:4 = Rn_VPR128.4S[32,32]; local tmp3:4 = tmp1 + tmp2; - local tmp4:4 = SIMD_PIECE(Rn_VPR128.4S, 2:1); - local tmp5:4 = SIMD_PIECE(Rn_VPR128.4S, 3:1); + local tmp4:4 = Rn_VPR128.4S[64,32]; + local tmp5:4 = Rn_VPR128.4S[96,32]; local tmp6:4 = tmp4 + tmp5; - local tmpd:4 = tmp3 + tmp6; - Zd = zext(tmpd); # assigning to Rd_FPR32 -@elif defined(SEMANTIC_pseudo) - Rd_FPR32 = NEON_addv(Rn_VPR128.4S, 4:1); -@endif + Rd_FPR32 = tmp3 + tmp6; + zext_zs(Zd); # zero upper 28 bytes of Zd } # C7.2.7 AESD page C7-1411 line 78085 MATCH x4e285800/mask=xfffffc00 @@ -1680,9 +780,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x18 & b_1216=0x :aesd Rd_VPR128.16B, Rn_VPR128.16B is b_2431=0b01001110 & b_2223=0b00 & b_1721=0b10100 & b_1216=5 & b_1011=2 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.16B = NEON_aesd(Rd_VPR128.16B, Rn_VPR128.16B); -@endif } # C7.2.8 AESE page C7-1412 line 78145 MATCH x4e284800/mask=xfffffc00 @@ -1693,9 +791,7 @@ is b_2431=0b01001110 & b_2223=0b00 & b_1721=0b10100 & b_1216=5 & b_1011=2 & Rn_V :aese Rd_VPR128.16B, Rn_VPR128.16B is b_2431=0b01001110 & b_2223=0b00 & b_1721=0b10100 & b_1216=4 & b_1011=2 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.16B = NEON_aese(Rd_VPR128.16B, Rn_VPR128.16B); -@endif } # C7.2.9 AESIMC page C7-1413 line 78206 MATCH x4e287800/mask=xfffffc00 @@ -1706,9 +802,7 @@ is b_2431=0b01001110 & b_2223=0b00 & b_1721=0b10100 & b_1216=4 & b_1011=2 & Rn_V :aesimc Rd_VPR128.16B, Rn_VPR128.16B is b_2431=0b01001110 & b_2223=0b00 & b_1721=0b10100 & b_1216=7 & b_1011=2 & Rn_VPR128.16B & Rd_VPR128.16B & Rd_VPR128 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.16B = NEON_aesimc(Rd_VPR128.16B, Rn_VPR128.16B); -@endif } # C7.2.10 AESMC page C7-1414 line 78264 MATCH x4e286800/mask=xfffffc00 @@ -1719,9 +813,7 @@ is b_2431=0b01001110 & b_2223=0b00 & b_1721=0b10100 & b_1216=7 & b_1011=2 & Rn_V :aesmc Rd_VPR128.16B, Rn_VPR128.16B is b_2431=0b01001110 & b_2223=0b00 & b_1721=0b10100 & b_1216=6 & b_1011=2 & Rn_VPR128.16B & Rd_VPR128.16B & Rd_VPR128 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.16B = NEON_aesmc(Rd_VPR128.16B, Rn_VPR128.16B); -@endif } # C7.2.11 AND (vector) page C7-1415 line 78322 MATCH x0e201c00/mask=xbfe0fc00 @@ -1733,82 +825,24 @@ is b_2431=0b01001110 & b_2223=0b00 & b_1721=0b10100 & b_1216=6 & b_1011=2 & Rn_V :and Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0x3 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { -@if defined(SEMANTIC_primitive) # simd infix Rd_VPR128.16B = Rn_VPR128.16B & Rm_VPR128.16B on lane size 1 - local tmp1:4 = 0; - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp1, Rn_VPR128.16B, 0, 1, 16); - simd_address_at(tmp2, Rm_VPR128.16B, 0, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 0, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) & (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR128.16B, 1, 1, 16); - simd_address_at(tmp2, Rm_VPR128.16B, 1, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 1, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) & (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR128.16B, 2, 1, 16); - simd_address_at(tmp2, Rm_VPR128.16B, 2, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 2, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) & (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR128.16B, 3, 1, 16); - simd_address_at(tmp2, Rm_VPR128.16B, 3, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 3, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) & (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR128.16B, 4, 1, 16); - simd_address_at(tmp2, Rm_VPR128.16B, 4, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 4, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) & (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR128.16B, 5, 1, 16); - simd_address_at(tmp2, Rm_VPR128.16B, 5, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 5, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) & (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR128.16B, 6, 1, 16); - simd_address_at(tmp2, Rm_VPR128.16B, 6, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 6, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) & (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR128.16B, 7, 1, 16); - simd_address_at(tmp2, Rm_VPR128.16B, 7, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 7, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) & (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR128.16B, 8, 1, 16); - simd_address_at(tmp2, Rm_VPR128.16B, 8, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 8, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) & (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR128.16B, 9, 1, 16); - simd_address_at(tmp2, Rm_VPR128.16B, 9, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 9, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) & (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR128.16B, 10, 1, 16); - simd_address_at(tmp2, Rm_VPR128.16B, 10, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 10, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) & (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR128.16B, 11, 1, 16); - simd_address_at(tmp2, Rm_VPR128.16B, 11, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 11, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) & (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR128.16B, 12, 1, 16); - simd_address_at(tmp2, Rm_VPR128.16B, 12, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 12, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) & (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR128.16B, 13, 1, 16); - simd_address_at(tmp2, Rm_VPR128.16B, 13, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 13, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) & (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR128.16B, 14, 1, 16); - simd_address_at(tmp2, Rm_VPR128.16B, 14, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 14, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) & (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR128.16B, 15, 1, 16); - simd_address_at(tmp2, Rm_VPR128.16B, 15, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 15, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) & (* [register]:1 tmp2); + Rd_VPR128.16B[0,8] = Rn_VPR128.16B[0,8] & Rm_VPR128.16B[0,8]; + Rd_VPR128.16B[8,8] = Rn_VPR128.16B[8,8] & Rm_VPR128.16B[8,8]; + Rd_VPR128.16B[16,8] = Rn_VPR128.16B[16,8] & Rm_VPR128.16B[16,8]; + Rd_VPR128.16B[24,8] = Rn_VPR128.16B[24,8] & Rm_VPR128.16B[24,8]; + Rd_VPR128.16B[32,8] = Rn_VPR128.16B[32,8] & Rm_VPR128.16B[32,8]; + Rd_VPR128.16B[40,8] = Rn_VPR128.16B[40,8] & Rm_VPR128.16B[40,8]; + Rd_VPR128.16B[48,8] = Rn_VPR128.16B[48,8] & Rm_VPR128.16B[48,8]; + Rd_VPR128.16B[56,8] = Rn_VPR128.16B[56,8] & Rm_VPR128.16B[56,8]; + Rd_VPR128.16B[64,8] = Rn_VPR128.16B[64,8] & Rm_VPR128.16B[64,8]; + Rd_VPR128.16B[72,8] = Rn_VPR128.16B[72,8] & Rm_VPR128.16B[72,8]; + Rd_VPR128.16B[80,8] = Rn_VPR128.16B[80,8] & Rm_VPR128.16B[80,8]; + Rd_VPR128.16B[88,8] = Rn_VPR128.16B[88,8] & Rm_VPR128.16B[88,8]; + Rd_VPR128.16B[96,8] = Rn_VPR128.16B[96,8] & Rm_VPR128.16B[96,8]; + Rd_VPR128.16B[104,8] = Rn_VPR128.16B[104,8] & Rm_VPR128.16B[104,8]; + Rd_VPR128.16B[112,8] = Rn_VPR128.16B[112,8] & Rm_VPR128.16B[112,8]; + Rd_VPR128.16B[120,8] = Rn_VPR128.16B[120,8] & Rm_VPR128.16B[120,8]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:16 = SIMD_INT_AND(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.16B -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.16B = NEON_and(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); -@endif } # C7.2.11 AND (vector) page C7-1415 line 78322 MATCH x0e201c00/mask=xbfe0fc00 @@ -1820,15 +854,8 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16 :and Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0x3 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { -@if defined(SEMANTIC_primitive) Rd_VPR64.8B = Rn_VPR64.8B & Rm_VPR64.8B; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:8 = Rn_VPR64.8B & Rm_VPR64.8B; - Zd = zext(tmpd); # assigning to Rd_VPR64.8B -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.8B = NEON_and(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); -@endif } # C7.2.12 BCAX page C7-1416 line 78391 MATCH xce200000/mask=xffe08000 @@ -1840,203 +867,58 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B :bcax Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B, Ra_VPR128.16B is b_2131=0b11001110001 & b_15=0 & Rd_VPR128.16B & Rn_VPR128.16B & Rm_VPR128.16B & Ra_VPR128.16B & Zd { -@if defined(SEMANTIC_primitive) # simd unary TMPQ1 = ~(Ra_VPR128.16B) on lane size 1 - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Ra_VPR128.16B, 0, 1, 16); - simd_address_at(tmp3, TMPQ1, 0, 1, 16); - * [register]:1 tmp3 = ~(* [register]:1 tmp2); - simd_address_at(tmp2, Ra_VPR128.16B, 1, 1, 16); - simd_address_at(tmp3, TMPQ1, 1, 1, 16); - * [register]:1 tmp3 = ~(* [register]:1 tmp2); - simd_address_at(tmp2, Ra_VPR128.16B, 2, 1, 16); - simd_address_at(tmp3, TMPQ1, 2, 1, 16); - * [register]:1 tmp3 = ~(* [register]:1 tmp2); - simd_address_at(tmp2, Ra_VPR128.16B, 3, 1, 16); - simd_address_at(tmp3, TMPQ1, 3, 1, 16); - * [register]:1 tmp3 = ~(* [register]:1 tmp2); - simd_address_at(tmp2, Ra_VPR128.16B, 4, 1, 16); - simd_address_at(tmp3, TMPQ1, 4, 1, 16); - * [register]:1 tmp3 = ~(* [register]:1 tmp2); - simd_address_at(tmp2, Ra_VPR128.16B, 5, 1, 16); - simd_address_at(tmp3, TMPQ1, 5, 1, 16); - * [register]:1 tmp3 = ~(* [register]:1 tmp2); - simd_address_at(tmp2, Ra_VPR128.16B, 6, 1, 16); - simd_address_at(tmp3, TMPQ1, 6, 1, 16); - * [register]:1 tmp3 = ~(* [register]:1 tmp2); - simd_address_at(tmp2, Ra_VPR128.16B, 7, 1, 16); - simd_address_at(tmp3, TMPQ1, 7, 1, 16); - * [register]:1 tmp3 = ~(* [register]:1 tmp2); - simd_address_at(tmp2, Ra_VPR128.16B, 8, 1, 16); - simd_address_at(tmp3, TMPQ1, 8, 1, 16); - * [register]:1 tmp3 = ~(* [register]:1 tmp2); - simd_address_at(tmp2, Ra_VPR128.16B, 9, 1, 16); - simd_address_at(tmp3, TMPQ1, 9, 1, 16); - * [register]:1 tmp3 = ~(* [register]:1 tmp2); - simd_address_at(tmp2, Ra_VPR128.16B, 10, 1, 16); - simd_address_at(tmp3, TMPQ1, 10, 1, 16); - * [register]:1 tmp3 = ~(* [register]:1 tmp2); - simd_address_at(tmp2, Ra_VPR128.16B, 11, 1, 16); - simd_address_at(tmp3, TMPQ1, 11, 1, 16); - * [register]:1 tmp3 = ~(* [register]:1 tmp2); - simd_address_at(tmp2, Ra_VPR128.16B, 12, 1, 16); - simd_address_at(tmp3, TMPQ1, 12, 1, 16); - * [register]:1 tmp3 = ~(* [register]:1 tmp2); - simd_address_at(tmp2, Ra_VPR128.16B, 13, 1, 16); - simd_address_at(tmp3, TMPQ1, 13, 1, 16); - * [register]:1 tmp3 = ~(* [register]:1 tmp2); - simd_address_at(tmp2, Ra_VPR128.16B, 14, 1, 16); - simd_address_at(tmp3, TMPQ1, 14, 1, 16); - * [register]:1 tmp3 = ~(* [register]:1 tmp2); - simd_address_at(tmp2, Ra_VPR128.16B, 15, 1, 16); - simd_address_at(tmp3, TMPQ1, 15, 1, 16); - * [register]:1 tmp3 = ~(* [register]:1 tmp2); + TMPQ1[0,8] = ~(Ra_VPR128.16B[0,8]); + TMPQ1[8,8] = ~(Ra_VPR128.16B[8,8]); + TMPQ1[16,8] = ~(Ra_VPR128.16B[16,8]); + TMPQ1[24,8] = ~(Ra_VPR128.16B[24,8]); + TMPQ1[32,8] = ~(Ra_VPR128.16B[32,8]); + TMPQ1[40,8] = ~(Ra_VPR128.16B[40,8]); + TMPQ1[48,8] = ~(Ra_VPR128.16B[48,8]); + TMPQ1[56,8] = ~(Ra_VPR128.16B[56,8]); + TMPQ1[64,8] = ~(Ra_VPR128.16B[64,8]); + TMPQ1[72,8] = ~(Ra_VPR128.16B[72,8]); + TMPQ1[80,8] = ~(Ra_VPR128.16B[80,8]); + TMPQ1[88,8] = ~(Ra_VPR128.16B[88,8]); + TMPQ1[96,8] = ~(Ra_VPR128.16B[96,8]); + TMPQ1[104,8] = ~(Ra_VPR128.16B[104,8]); + TMPQ1[112,8] = ~(Ra_VPR128.16B[112,8]); + TMPQ1[120,8] = ~(Ra_VPR128.16B[120,8]); # simd infix TMPQ2 = Rm_VPR128.16B & TMPQ1 on lane size 1 - local tmp5:4 = 0; - local tmp6:4 = 0; - local tmp7:4 = 0; - simd_address_at(tmp5, Rm_VPR128.16B, 0, 1, 16); - simd_address_at(tmp6, TMPQ1, 0, 1, 16); - simd_address_at(tmp7, TMPQ2, 0, 1, 16); - * [register]:1 tmp7 = (* [register]:1 tmp5) & (* [register]:1 tmp6); - simd_address_at(tmp5, Rm_VPR128.16B, 1, 1, 16); - simd_address_at(tmp6, TMPQ1, 1, 1, 16); - simd_address_at(tmp7, TMPQ2, 1, 1, 16); - * [register]:1 tmp7 = (* [register]:1 tmp5) & (* [register]:1 tmp6); - simd_address_at(tmp5, Rm_VPR128.16B, 2, 1, 16); - simd_address_at(tmp6, TMPQ1, 2, 1, 16); - simd_address_at(tmp7, TMPQ2, 2, 1, 16); - * [register]:1 tmp7 = (* [register]:1 tmp5) & (* [register]:1 tmp6); - simd_address_at(tmp5, Rm_VPR128.16B, 3, 1, 16); - simd_address_at(tmp6, TMPQ1, 3, 1, 16); - simd_address_at(tmp7, TMPQ2, 3, 1, 16); - * [register]:1 tmp7 = (* [register]:1 tmp5) & (* [register]:1 tmp6); - simd_address_at(tmp5, Rm_VPR128.16B, 4, 1, 16); - simd_address_at(tmp6, TMPQ1, 4, 1, 16); - simd_address_at(tmp7, TMPQ2, 4, 1, 16); - * [register]:1 tmp7 = (* [register]:1 tmp5) & (* [register]:1 tmp6); - simd_address_at(tmp5, Rm_VPR128.16B, 5, 1, 16); - simd_address_at(tmp6, TMPQ1, 5, 1, 16); - simd_address_at(tmp7, TMPQ2, 5, 1, 16); - * [register]:1 tmp7 = (* [register]:1 tmp5) & (* [register]:1 tmp6); - simd_address_at(tmp5, Rm_VPR128.16B, 6, 1, 16); - simd_address_at(tmp6, TMPQ1, 6, 1, 16); - simd_address_at(tmp7, TMPQ2, 6, 1, 16); - * [register]:1 tmp7 = (* [register]:1 tmp5) & (* [register]:1 tmp6); - simd_address_at(tmp5, Rm_VPR128.16B, 7, 1, 16); - simd_address_at(tmp6, TMPQ1, 7, 1, 16); - simd_address_at(tmp7, TMPQ2, 7, 1, 16); - * [register]:1 tmp7 = (* [register]:1 tmp5) & (* [register]:1 tmp6); - simd_address_at(tmp5, Rm_VPR128.16B, 8, 1, 16); - simd_address_at(tmp6, TMPQ1, 8, 1, 16); - simd_address_at(tmp7, TMPQ2, 8, 1, 16); - * [register]:1 tmp7 = (* [register]:1 tmp5) & (* [register]:1 tmp6); - simd_address_at(tmp5, Rm_VPR128.16B, 9, 1, 16); - simd_address_at(tmp6, TMPQ1, 9, 1, 16); - simd_address_at(tmp7, TMPQ2, 9, 1, 16); - * [register]:1 tmp7 = (* [register]:1 tmp5) & (* [register]:1 tmp6); - simd_address_at(tmp5, Rm_VPR128.16B, 10, 1, 16); - simd_address_at(tmp6, TMPQ1, 10, 1, 16); - simd_address_at(tmp7, TMPQ2, 10, 1, 16); - * [register]:1 tmp7 = (* [register]:1 tmp5) & (* [register]:1 tmp6); - simd_address_at(tmp5, Rm_VPR128.16B, 11, 1, 16); - simd_address_at(tmp6, TMPQ1, 11, 1, 16); - simd_address_at(tmp7, TMPQ2, 11, 1, 16); - * [register]:1 tmp7 = (* [register]:1 tmp5) & (* [register]:1 tmp6); - simd_address_at(tmp5, Rm_VPR128.16B, 12, 1, 16); - simd_address_at(tmp6, TMPQ1, 12, 1, 16); - simd_address_at(tmp7, TMPQ2, 12, 1, 16); - * [register]:1 tmp7 = (* [register]:1 tmp5) & (* [register]:1 tmp6); - simd_address_at(tmp5, Rm_VPR128.16B, 13, 1, 16); - simd_address_at(tmp6, TMPQ1, 13, 1, 16); - simd_address_at(tmp7, TMPQ2, 13, 1, 16); - * [register]:1 tmp7 = (* [register]:1 tmp5) & (* [register]:1 tmp6); - simd_address_at(tmp5, Rm_VPR128.16B, 14, 1, 16); - simd_address_at(tmp6, TMPQ1, 14, 1, 16); - simd_address_at(tmp7, TMPQ2, 14, 1, 16); - * [register]:1 tmp7 = (* [register]:1 tmp5) & (* [register]:1 tmp6); - simd_address_at(tmp5, Rm_VPR128.16B, 15, 1, 16); - simd_address_at(tmp6, TMPQ1, 15, 1, 16); - simd_address_at(tmp7, TMPQ2, 15, 1, 16); - * [register]:1 tmp7 = (* [register]:1 tmp5) & (* [register]:1 tmp6); + TMPQ2[0,8] = Rm_VPR128.16B[0,8] & TMPQ1[0,8]; + TMPQ2[8,8] = Rm_VPR128.16B[8,8] & TMPQ1[8,8]; + TMPQ2[16,8] = Rm_VPR128.16B[16,8] & TMPQ1[16,8]; + TMPQ2[24,8] = Rm_VPR128.16B[24,8] & TMPQ1[24,8]; + TMPQ2[32,8] = Rm_VPR128.16B[32,8] & TMPQ1[32,8]; + TMPQ2[40,8] = Rm_VPR128.16B[40,8] & TMPQ1[40,8]; + TMPQ2[48,8] = Rm_VPR128.16B[48,8] & TMPQ1[48,8]; + TMPQ2[56,8] = Rm_VPR128.16B[56,8] & TMPQ1[56,8]; + TMPQ2[64,8] = Rm_VPR128.16B[64,8] & TMPQ1[64,8]; + TMPQ2[72,8] = Rm_VPR128.16B[72,8] & TMPQ1[72,8]; + TMPQ2[80,8] = Rm_VPR128.16B[80,8] & TMPQ1[80,8]; + TMPQ2[88,8] = Rm_VPR128.16B[88,8] & TMPQ1[88,8]; + TMPQ2[96,8] = Rm_VPR128.16B[96,8] & TMPQ1[96,8]; + TMPQ2[104,8] = Rm_VPR128.16B[104,8] & TMPQ1[104,8]; + TMPQ2[112,8] = Rm_VPR128.16B[112,8] & TMPQ1[112,8]; + TMPQ2[120,8] = Rm_VPR128.16B[120,8] & TMPQ1[120,8]; # simd infix Rd_VPR128.16B = Rn_VPR128.16B | TMPQ2 on lane size 1 - local tmp8:4 = 0; - local tmp9:4 = 0; - local tmp10:4 = 0; - simd_address_at(tmp8, Rn_VPR128.16B, 0, 1, 16); - simd_address_at(tmp9, TMPQ2, 0, 1, 16); - simd_address_at(tmp10, Rd_VPR128.16B, 0, 1, 16); - * [register]:1 tmp10 = (* [register]:1 tmp8) | (* [register]:1 tmp9); - simd_address_at(tmp8, Rn_VPR128.16B, 1, 1, 16); - simd_address_at(tmp9, TMPQ2, 1, 1, 16); - simd_address_at(tmp10, Rd_VPR128.16B, 1, 1, 16); - * [register]:1 tmp10 = (* [register]:1 tmp8) | (* [register]:1 tmp9); - simd_address_at(tmp8, Rn_VPR128.16B, 2, 1, 16); - simd_address_at(tmp9, TMPQ2, 2, 1, 16); - simd_address_at(tmp10, Rd_VPR128.16B, 2, 1, 16); - * [register]:1 tmp10 = (* [register]:1 tmp8) | (* [register]:1 tmp9); - simd_address_at(tmp8, Rn_VPR128.16B, 3, 1, 16); - simd_address_at(tmp9, TMPQ2, 3, 1, 16); - simd_address_at(tmp10, Rd_VPR128.16B, 3, 1, 16); - * [register]:1 tmp10 = (* [register]:1 tmp8) | (* [register]:1 tmp9); - simd_address_at(tmp8, Rn_VPR128.16B, 4, 1, 16); - simd_address_at(tmp9, TMPQ2, 4, 1, 16); - simd_address_at(tmp10, Rd_VPR128.16B, 4, 1, 16); - * [register]:1 tmp10 = (* [register]:1 tmp8) | (* [register]:1 tmp9); - simd_address_at(tmp8, Rn_VPR128.16B, 5, 1, 16); - simd_address_at(tmp9, TMPQ2, 5, 1, 16); - simd_address_at(tmp10, Rd_VPR128.16B, 5, 1, 16); - * [register]:1 tmp10 = (* [register]:1 tmp8) | (* [register]:1 tmp9); - simd_address_at(tmp8, Rn_VPR128.16B, 6, 1, 16); - simd_address_at(tmp9, TMPQ2, 6, 1, 16); - simd_address_at(tmp10, Rd_VPR128.16B, 6, 1, 16); - * [register]:1 tmp10 = (* [register]:1 tmp8) | (* [register]:1 tmp9); - simd_address_at(tmp8, Rn_VPR128.16B, 7, 1, 16); - simd_address_at(tmp9, TMPQ2, 7, 1, 16); - simd_address_at(tmp10, Rd_VPR128.16B, 7, 1, 16); - * [register]:1 tmp10 = (* [register]:1 tmp8) | (* [register]:1 tmp9); - simd_address_at(tmp8, Rn_VPR128.16B, 8, 1, 16); - simd_address_at(tmp9, TMPQ2, 8, 1, 16); - simd_address_at(tmp10, Rd_VPR128.16B, 8, 1, 16); - * [register]:1 tmp10 = (* [register]:1 tmp8) | (* [register]:1 tmp9); - simd_address_at(tmp8, Rn_VPR128.16B, 9, 1, 16); - simd_address_at(tmp9, TMPQ2, 9, 1, 16); - simd_address_at(tmp10, Rd_VPR128.16B, 9, 1, 16); - * [register]:1 tmp10 = (* [register]:1 tmp8) | (* [register]:1 tmp9); - simd_address_at(tmp8, Rn_VPR128.16B, 10, 1, 16); - simd_address_at(tmp9, TMPQ2, 10, 1, 16); - simd_address_at(tmp10, Rd_VPR128.16B, 10, 1, 16); - * [register]:1 tmp10 = (* [register]:1 tmp8) | (* [register]:1 tmp9); - simd_address_at(tmp8, Rn_VPR128.16B, 11, 1, 16); - simd_address_at(tmp9, TMPQ2, 11, 1, 16); - simd_address_at(tmp10, Rd_VPR128.16B, 11, 1, 16); - * [register]:1 tmp10 = (* [register]:1 tmp8) | (* [register]:1 tmp9); - simd_address_at(tmp8, Rn_VPR128.16B, 12, 1, 16); - simd_address_at(tmp9, TMPQ2, 12, 1, 16); - simd_address_at(tmp10, Rd_VPR128.16B, 12, 1, 16); - * [register]:1 tmp10 = (* [register]:1 tmp8) | (* [register]:1 tmp9); - simd_address_at(tmp8, Rn_VPR128.16B, 13, 1, 16); - simd_address_at(tmp9, TMPQ2, 13, 1, 16); - simd_address_at(tmp10, Rd_VPR128.16B, 13, 1, 16); - * [register]:1 tmp10 = (* [register]:1 tmp8) | (* [register]:1 tmp9); - simd_address_at(tmp8, Rn_VPR128.16B, 14, 1, 16); - simd_address_at(tmp9, TMPQ2, 14, 1, 16); - simd_address_at(tmp10, Rd_VPR128.16B, 14, 1, 16); - * [register]:1 tmp10 = (* [register]:1 tmp8) | (* [register]:1 tmp9); - simd_address_at(tmp8, Rn_VPR128.16B, 15, 1, 16); - simd_address_at(tmp9, TMPQ2, 15, 1, 16); - simd_address_at(tmp10, Rd_VPR128.16B, 15, 1, 16); - * [register]:1 tmp10 = (* [register]:1 tmp8) | (* [register]:1 tmp9); + Rd_VPR128.16B[0,8] = Rn_VPR128.16B[0,8] | TMPQ2[0,8]; + Rd_VPR128.16B[8,8] = Rn_VPR128.16B[8,8] | TMPQ2[8,8]; + Rd_VPR128.16B[16,8] = Rn_VPR128.16B[16,8] | TMPQ2[16,8]; + Rd_VPR128.16B[24,8] = Rn_VPR128.16B[24,8] | TMPQ2[24,8]; + Rd_VPR128.16B[32,8] = Rn_VPR128.16B[32,8] | TMPQ2[32,8]; + Rd_VPR128.16B[40,8] = Rn_VPR128.16B[40,8] | TMPQ2[40,8]; + Rd_VPR128.16B[48,8] = Rn_VPR128.16B[48,8] | TMPQ2[48,8]; + Rd_VPR128.16B[56,8] = Rn_VPR128.16B[56,8] | TMPQ2[56,8]; + Rd_VPR128.16B[64,8] = Rn_VPR128.16B[64,8] | TMPQ2[64,8]; + Rd_VPR128.16B[72,8] = Rn_VPR128.16B[72,8] | TMPQ2[72,8]; + Rd_VPR128.16B[80,8] = Rn_VPR128.16B[80,8] | TMPQ2[80,8]; + Rd_VPR128.16B[88,8] = Rn_VPR128.16B[88,8] | TMPQ2[88,8]; + Rd_VPR128.16B[96,8] = Rn_VPR128.16B[96,8] | TMPQ2[96,8]; + Rd_VPR128.16B[104,8] = Rn_VPR128.16B[104,8] | TMPQ2[104,8]; + Rd_VPR128.16B[112,8] = Rn_VPR128.16B[112,8] | TMPQ2[112,8]; + Rd_VPR128.16B[120,8] = Rn_VPR128.16B[120,8] | TMPQ2[120,8]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_NEGATE(Ra_VPR128.16B, 1:1); - local tmp2:16 = SIMD_INT_AND(Rm_VPR128.16B, tmp1, 1:1); - local tmpd:16 = SIMD_INT_OR(Rn_VPR128.16B, tmp2, 1:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.16B -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.16B = NEON_bcax(Rn_VPR128.16B, Rm_VPR128.16B, Rm_VPR128.16B, 1:1); -@endif } # C7.2.20 BIC (vector, immediate) page C7-1428 line 79003 MATCH x2f001400/mask=xbff81c00 @@ -2054,25 +936,11 @@ is b_2131=0b11001110001 & b_15=0 & Rd_VPR128.16B & Rn_VPR128.16B & Rm_VPR128.16B :bic Rd_VPR64.2S, abcdefgh is b_3131=0 & q=0 & b_29=1 & b_2428=0xf & b_1923=0x0 & b_1515=0 & abcdefgh & Imm_neon_uimm8Shift & b_1012=5 & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_primitive) local tmp1:4 = ~ Imm_neon_uimm8Shift:4; # simd infix Rd_VPR64.2S = Rd_VPR64.2S & tmp1 on lane size 4 - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rd_VPR64.2S, 0, 4, 8); - simd_address_at(tmp3, Rd_VPR64.2S, 0, 4, 8); - * [register]:4 tmp3 = (* [register]:4 tmp2) & tmp1; - simd_address_at(tmp2, Rd_VPR64.2S, 1, 4, 8); - simd_address_at(tmp3, Rd_VPR64.2S, 1, 4, 8); - * [register]:4 tmp3 = (* [register]:4 tmp2) & tmp1; + Rd_VPR64.2S[0,32] = Rd_VPR64.2S[0,32] & tmp1; + Rd_VPR64.2S[32,32] = Rd_VPR64.2S[32,32] & tmp1; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:4 = ~ Imm_neon_uimm8Shift:4; - local tmpd:8 = SIMD_INT_AND(Rd_VPR64.2S, tmp1); - Zd = zext(tmpd); # assigning to Rd_VPR64.2S -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.2S = NEON_bic(Rd_VPR64.2S, Imm_neon_uimm8Shift:4, 4:1); -@endif } # C7.2.20 BIC (vector, immediate) page C7-1428 line 79003 MATCH x2f001400/mask=xbff81c00 @@ -2087,31 +955,13 @@ is b_3131=0 & q=0 & b_29=1 & b_2428=0xf & b_1923=0x0 & b_1515=0 & abcdefgh & Imm :bic Rd_VPR64.4H, abcdefgh is b_3131=0 & q=0 & b_29=1 & b_2428=0xf & abcdefgh & b_1923=0x0 & b_1415=2 & Imm_neon_uimm8Shift & b_1012=5 & Rd_VPR64.4H & Zd { -@if defined(SEMANTIC_primitive) local tmp1:2 = ~ Imm_neon_uimm8Shift:2; # simd infix Rd_VPR64.4H = Rd_VPR64.4H & tmp1 on lane size 2 - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rd_VPR64.4H, 0, 2, 8); - simd_address_at(tmp3, Rd_VPR64.4H, 0, 2, 8); - * [register]:2 tmp3 = (* [register]:2 tmp2) & tmp1; - simd_address_at(tmp2, Rd_VPR64.4H, 1, 2, 8); - simd_address_at(tmp3, Rd_VPR64.4H, 1, 2, 8); - * [register]:2 tmp3 = (* [register]:2 tmp2) & tmp1; - simd_address_at(tmp2, Rd_VPR64.4H, 2, 2, 8); - simd_address_at(tmp3, Rd_VPR64.4H, 2, 2, 8); - * [register]:2 tmp3 = (* [register]:2 tmp2) & tmp1; - simd_address_at(tmp2, Rd_VPR64.4H, 3, 2, 8); - simd_address_at(tmp3, Rd_VPR64.4H, 3, 2, 8); - * [register]:2 tmp3 = (* [register]:2 tmp2) & tmp1; + Rd_VPR64.4H[0,16] = Rd_VPR64.4H[0,16] & tmp1; + Rd_VPR64.4H[16,16] = Rd_VPR64.4H[16,16] & tmp1; + Rd_VPR64.4H[32,16] = Rd_VPR64.4H[32,16] & tmp1; + Rd_VPR64.4H[48,16] = Rd_VPR64.4H[48,16] & tmp1; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:2 = ~ Imm_neon_uimm8Shift:2; - local tmpd:8 = SIMD_INT_AND(Rd_VPR64.4H, tmp1); - Zd = zext(tmpd); # assigning to Rd_VPR64.4H -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.4H = NEON_bic(Rd_VPR64.4H, Imm_neon_uimm8Shift:2, 2:1); -@endif } # C7.2.20 BIC (vector, immediate) page C7-1428 line 79003 MATCH x2f001400/mask=xbff81c00 @@ -2129,31 +979,13 @@ is b_3131=0 & q=0 & b_29=1 & b_2428=0xf & abcdefgh & b_1923=0x0 & b_1415=2 & Imm :bic Rd_VPR128.4S, abcdefgh is b_3131=0 & q=1 & b_29=1 & b_2428=0xf & b_1923=0x0 & b_1515=0 & abcdefgh & Imm_neon_uimm8Shift & b_1012=5 & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) local tmp1:4 = ~ Imm_neon_uimm8Shift:4; # simd infix Rd_VPR128.4S = Rd_VPR128.4S & tmp1 on lane size 4 - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rd_VPR128.4S, 0, 4, 16); - simd_address_at(tmp3, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp3 = (* [register]:4 tmp2) & tmp1; - simd_address_at(tmp2, Rd_VPR128.4S, 1, 4, 16); - simd_address_at(tmp3, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp3 = (* [register]:4 tmp2) & tmp1; - simd_address_at(tmp2, Rd_VPR128.4S, 2, 4, 16); - simd_address_at(tmp3, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp3 = (* [register]:4 tmp2) & tmp1; - simd_address_at(tmp2, Rd_VPR128.4S, 3, 4, 16); - simd_address_at(tmp3, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp3 = (* [register]:4 tmp2) & tmp1; + Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] & tmp1; + Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] & tmp1; + Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] & tmp1; + Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] & tmp1; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:4 = ~ Imm_neon_uimm8Shift:4; - local tmpd:16 = SIMD_INT_AND(Rd_VPR128.4S, tmp1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_bic(Rd_VPR128.4S, Imm_neon_uimm8Shift:4, 4:1); -@endif } # C7.2.20 BIC (vector, immediate) page C7-1428 line 79003 MATCH x2f001400/mask=xbff81c00 @@ -2168,43 +1000,17 @@ is b_3131=0 & q=1 & b_29=1 & b_2428=0xf & b_1923=0x0 & b_1515=0 & abcdefgh & Imm :bic Rd_VPR128.8H, abcdefgh is b_3131=0 & q=1 & b_29=1 & b_2428=0xf & b_1923=0x0 & abcdefgh & b_1415=2 & Imm_neon_uimm8Shift & b_1012=5 & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_primitive) local tmp1:2 = ~ Imm_neon_uimm8Shift:2; # simd infix Rd_VPR128.8H = Rd_VPR128.8H & tmp1 on lane size 2 - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rd_VPR128.8H, 0, 2, 16); - simd_address_at(tmp3, Rd_VPR128.8H, 0, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp2) & tmp1; - simd_address_at(tmp2, Rd_VPR128.8H, 1, 2, 16); - simd_address_at(tmp3, Rd_VPR128.8H, 1, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp2) & tmp1; - simd_address_at(tmp2, Rd_VPR128.8H, 2, 2, 16); - simd_address_at(tmp3, Rd_VPR128.8H, 2, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp2) & tmp1; - simd_address_at(tmp2, Rd_VPR128.8H, 3, 2, 16); - simd_address_at(tmp3, Rd_VPR128.8H, 3, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp2) & tmp1; - simd_address_at(tmp2, Rd_VPR128.8H, 4, 2, 16); - simd_address_at(tmp3, Rd_VPR128.8H, 4, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp2) & tmp1; - simd_address_at(tmp2, Rd_VPR128.8H, 5, 2, 16); - simd_address_at(tmp3, Rd_VPR128.8H, 5, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp2) & tmp1; - simd_address_at(tmp2, Rd_VPR128.8H, 6, 2, 16); - simd_address_at(tmp3, Rd_VPR128.8H, 6, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp2) & tmp1; - simd_address_at(tmp2, Rd_VPR128.8H, 7, 2, 16); - simd_address_at(tmp3, Rd_VPR128.8H, 7, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp2) & tmp1; + Rd_VPR128.8H[0,16] = Rd_VPR128.8H[0,16] & tmp1; + Rd_VPR128.8H[16,16] = Rd_VPR128.8H[16,16] & tmp1; + Rd_VPR128.8H[32,16] = Rd_VPR128.8H[32,16] & tmp1; + Rd_VPR128.8H[48,16] = Rd_VPR128.8H[48,16] & tmp1; + Rd_VPR128.8H[64,16] = Rd_VPR128.8H[64,16] & tmp1; + Rd_VPR128.8H[80,16] = Rd_VPR128.8H[80,16] & tmp1; + Rd_VPR128.8H[96,16] = Rd_VPR128.8H[96,16] & tmp1; + Rd_VPR128.8H[112,16] = Rd_VPR128.8H[112,16] & tmp1; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:2 = ~ Imm_neon_uimm8Shift:2; - local tmpd:16 = SIMD_INT_AND(Rd_VPR128.8H, tmp1); - Zd = zext(tmpd); # assigning to Rd_VPR128.8H -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.8H = NEON_bic(Rd_VPR128.8H, Imm_neon_uimm8Shift:2, 2:1); -@endif } # C7.2.21 BIC (vector, register) page C7-1430 line 79136 MATCH x0e601c00/mask=xbfe0fc00 @@ -2216,134 +1022,41 @@ is b_3131=0 & q=1 & b_29=1 & b_2428=0xf & b_1923=0x0 & abcdefgh & b_1415=2 & Imm :bic Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.16B & b_1115=0x3 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { -@if defined(SEMANTIC_primitive) # simd unary TMPQ1 = ~(Rm_VPR128.16B) on lane size 1 - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rm_VPR128.16B, 0, 1, 16); - simd_address_at(tmp3, TMPQ1, 0, 1, 16); - * [register]:1 tmp3 = ~(* [register]:1 tmp2); - simd_address_at(tmp2, Rm_VPR128.16B, 1, 1, 16); - simd_address_at(tmp3, TMPQ1, 1, 1, 16); - * [register]:1 tmp3 = ~(* [register]:1 tmp2); - simd_address_at(tmp2, Rm_VPR128.16B, 2, 1, 16); - simd_address_at(tmp3, TMPQ1, 2, 1, 16); - * [register]:1 tmp3 = ~(* [register]:1 tmp2); - simd_address_at(tmp2, Rm_VPR128.16B, 3, 1, 16); - simd_address_at(tmp3, TMPQ1, 3, 1, 16); - * [register]:1 tmp3 = ~(* [register]:1 tmp2); - simd_address_at(tmp2, Rm_VPR128.16B, 4, 1, 16); - simd_address_at(tmp3, TMPQ1, 4, 1, 16); - * [register]:1 tmp3 = ~(* [register]:1 tmp2); - simd_address_at(tmp2, Rm_VPR128.16B, 5, 1, 16); - simd_address_at(tmp3, TMPQ1, 5, 1, 16); - * [register]:1 tmp3 = ~(* [register]:1 tmp2); - simd_address_at(tmp2, Rm_VPR128.16B, 6, 1, 16); - simd_address_at(tmp3, TMPQ1, 6, 1, 16); - * [register]:1 tmp3 = ~(* [register]:1 tmp2); - simd_address_at(tmp2, Rm_VPR128.16B, 7, 1, 16); - simd_address_at(tmp3, TMPQ1, 7, 1, 16); - * [register]:1 tmp3 = ~(* [register]:1 tmp2); - simd_address_at(tmp2, Rm_VPR128.16B, 8, 1, 16); - simd_address_at(tmp3, TMPQ1, 8, 1, 16); - * [register]:1 tmp3 = ~(* [register]:1 tmp2); - simd_address_at(tmp2, Rm_VPR128.16B, 9, 1, 16); - simd_address_at(tmp3, TMPQ1, 9, 1, 16); - * [register]:1 tmp3 = ~(* [register]:1 tmp2); - simd_address_at(tmp2, Rm_VPR128.16B, 10, 1, 16); - simd_address_at(tmp3, TMPQ1, 10, 1, 16); - * [register]:1 tmp3 = ~(* [register]:1 tmp2); - simd_address_at(tmp2, Rm_VPR128.16B, 11, 1, 16); - simd_address_at(tmp3, TMPQ1, 11, 1, 16); - * [register]:1 tmp3 = ~(* [register]:1 tmp2); - simd_address_at(tmp2, Rm_VPR128.16B, 12, 1, 16); - simd_address_at(tmp3, TMPQ1, 12, 1, 16); - * [register]:1 tmp3 = ~(* [register]:1 tmp2); - simd_address_at(tmp2, Rm_VPR128.16B, 13, 1, 16); - simd_address_at(tmp3, TMPQ1, 13, 1, 16); - * [register]:1 tmp3 = ~(* [register]:1 tmp2); - simd_address_at(tmp2, Rm_VPR128.16B, 14, 1, 16); - simd_address_at(tmp3, TMPQ1, 14, 1, 16); - * [register]:1 tmp3 = ~(* [register]:1 tmp2); - simd_address_at(tmp2, Rm_VPR128.16B, 15, 1, 16); - simd_address_at(tmp3, TMPQ1, 15, 1, 16); - * [register]:1 tmp3 = ~(* [register]:1 tmp2); + TMPQ1[0,8] = ~(Rm_VPR128.16B[0,8]); + TMPQ1[8,8] = ~(Rm_VPR128.16B[8,8]); + TMPQ1[16,8] = ~(Rm_VPR128.16B[16,8]); + TMPQ1[24,8] = ~(Rm_VPR128.16B[24,8]); + TMPQ1[32,8] = ~(Rm_VPR128.16B[32,8]); + TMPQ1[40,8] = ~(Rm_VPR128.16B[40,8]); + TMPQ1[48,8] = ~(Rm_VPR128.16B[48,8]); + TMPQ1[56,8] = ~(Rm_VPR128.16B[56,8]); + TMPQ1[64,8] = ~(Rm_VPR128.16B[64,8]); + TMPQ1[72,8] = ~(Rm_VPR128.16B[72,8]); + TMPQ1[80,8] = ~(Rm_VPR128.16B[80,8]); + TMPQ1[88,8] = ~(Rm_VPR128.16B[88,8]); + TMPQ1[96,8] = ~(Rm_VPR128.16B[96,8]); + TMPQ1[104,8] = ~(Rm_VPR128.16B[104,8]); + TMPQ1[112,8] = ~(Rm_VPR128.16B[112,8]); + TMPQ1[120,8] = ~(Rm_VPR128.16B[120,8]); # simd infix Rd_VPR128.16B = Rn_VPR128.16B & TMPQ1 on lane size 1 - local tmp4:4 = 0; - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp4, Rn_VPR128.16B, 0, 1, 16); - simd_address_at(tmp5, TMPQ1, 0, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 0, 1, 16); - * [register]:1 tmp6 = (* [register]:1 tmp4) & (* [register]:1 tmp5); - simd_address_at(tmp4, Rn_VPR128.16B, 1, 1, 16); - simd_address_at(tmp5, TMPQ1, 1, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 1, 1, 16); - * [register]:1 tmp6 = (* [register]:1 tmp4) & (* [register]:1 tmp5); - simd_address_at(tmp4, Rn_VPR128.16B, 2, 1, 16); - simd_address_at(tmp5, TMPQ1, 2, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 2, 1, 16); - * [register]:1 tmp6 = (* [register]:1 tmp4) & (* [register]:1 tmp5); - simd_address_at(tmp4, Rn_VPR128.16B, 3, 1, 16); - simd_address_at(tmp5, TMPQ1, 3, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 3, 1, 16); - * [register]:1 tmp6 = (* [register]:1 tmp4) & (* [register]:1 tmp5); - simd_address_at(tmp4, Rn_VPR128.16B, 4, 1, 16); - simd_address_at(tmp5, TMPQ1, 4, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 4, 1, 16); - * [register]:1 tmp6 = (* [register]:1 tmp4) & (* [register]:1 tmp5); - simd_address_at(tmp4, Rn_VPR128.16B, 5, 1, 16); - simd_address_at(tmp5, TMPQ1, 5, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 5, 1, 16); - * [register]:1 tmp6 = (* [register]:1 tmp4) & (* [register]:1 tmp5); - simd_address_at(tmp4, Rn_VPR128.16B, 6, 1, 16); - simd_address_at(tmp5, TMPQ1, 6, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 6, 1, 16); - * [register]:1 tmp6 = (* [register]:1 tmp4) & (* [register]:1 tmp5); - simd_address_at(tmp4, Rn_VPR128.16B, 7, 1, 16); - simd_address_at(tmp5, TMPQ1, 7, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 7, 1, 16); - * [register]:1 tmp6 = (* [register]:1 tmp4) & (* [register]:1 tmp5); - simd_address_at(tmp4, Rn_VPR128.16B, 8, 1, 16); - simd_address_at(tmp5, TMPQ1, 8, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 8, 1, 16); - * [register]:1 tmp6 = (* [register]:1 tmp4) & (* [register]:1 tmp5); - simd_address_at(tmp4, Rn_VPR128.16B, 9, 1, 16); - simd_address_at(tmp5, TMPQ1, 9, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 9, 1, 16); - * [register]:1 tmp6 = (* [register]:1 tmp4) & (* [register]:1 tmp5); - simd_address_at(tmp4, Rn_VPR128.16B, 10, 1, 16); - simd_address_at(tmp5, TMPQ1, 10, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 10, 1, 16); - * [register]:1 tmp6 = (* [register]:1 tmp4) & (* [register]:1 tmp5); - simd_address_at(tmp4, Rn_VPR128.16B, 11, 1, 16); - simd_address_at(tmp5, TMPQ1, 11, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 11, 1, 16); - * [register]:1 tmp6 = (* [register]:1 tmp4) & (* [register]:1 tmp5); - simd_address_at(tmp4, Rn_VPR128.16B, 12, 1, 16); - simd_address_at(tmp5, TMPQ1, 12, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 12, 1, 16); - * [register]:1 tmp6 = (* [register]:1 tmp4) & (* [register]:1 tmp5); - simd_address_at(tmp4, Rn_VPR128.16B, 13, 1, 16); - simd_address_at(tmp5, TMPQ1, 13, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 13, 1, 16); - * [register]:1 tmp6 = (* [register]:1 tmp4) & (* [register]:1 tmp5); - simd_address_at(tmp4, Rn_VPR128.16B, 14, 1, 16); - simd_address_at(tmp5, TMPQ1, 14, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 14, 1, 16); - * [register]:1 tmp6 = (* [register]:1 tmp4) & (* [register]:1 tmp5); - simd_address_at(tmp4, Rn_VPR128.16B, 15, 1, 16); - simd_address_at(tmp5, TMPQ1, 15, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 15, 1, 16); - * [register]:1 tmp6 = (* [register]:1 tmp4) & (* [register]:1 tmp5); + Rd_VPR128.16B[0,8] = Rn_VPR128.16B[0,8] & TMPQ1[0,8]; + Rd_VPR128.16B[8,8] = Rn_VPR128.16B[8,8] & TMPQ1[8,8]; + Rd_VPR128.16B[16,8] = Rn_VPR128.16B[16,8] & TMPQ1[16,8]; + Rd_VPR128.16B[24,8] = Rn_VPR128.16B[24,8] & TMPQ1[24,8]; + Rd_VPR128.16B[32,8] = Rn_VPR128.16B[32,8] & TMPQ1[32,8]; + Rd_VPR128.16B[40,8] = Rn_VPR128.16B[40,8] & TMPQ1[40,8]; + Rd_VPR128.16B[48,8] = Rn_VPR128.16B[48,8] & TMPQ1[48,8]; + Rd_VPR128.16B[56,8] = Rn_VPR128.16B[56,8] & TMPQ1[56,8]; + Rd_VPR128.16B[64,8] = Rn_VPR128.16B[64,8] & TMPQ1[64,8]; + Rd_VPR128.16B[72,8] = Rn_VPR128.16B[72,8] & TMPQ1[72,8]; + Rd_VPR128.16B[80,8] = Rn_VPR128.16B[80,8] & TMPQ1[80,8]; + Rd_VPR128.16B[88,8] = Rn_VPR128.16B[88,8] & TMPQ1[88,8]; + Rd_VPR128.16B[96,8] = Rn_VPR128.16B[96,8] & TMPQ1[96,8]; + Rd_VPR128.16B[104,8] = Rn_VPR128.16B[104,8] & TMPQ1[104,8]; + Rd_VPR128.16B[112,8] = Rn_VPR128.16B[112,8] & TMPQ1[112,8]; + Rd_VPR128.16B[120,8] = Rn_VPR128.16B[120,8] & TMPQ1[120,8]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_NEGATE(Rm_VPR128.16B, 1:1); - local tmpd:16 = SIMD_INT_AND(Rn_VPR128.16B, tmp1, 1:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.16B -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.16B = NEON_bic(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); -@endif } # C7.2.21 BIC (vector, register) page C7-1430 line 79136 MATCH x0e601c00/mask=xbfe0fc00 @@ -2355,78 +1068,25 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.16 :bic Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.8B & b_1115=0x3 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { -@if defined(SEMANTIC_primitive) # simd unary TMPD1 = ~(Rm_VPR64.8B) on lane size 1 - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rm_VPR64.8B, 0, 1, 8); - simd_address_at(tmp3, TMPD1, 0, 1, 8); - * [register]:1 tmp3 = ~(* [register]:1 tmp2); - simd_address_at(tmp2, Rm_VPR64.8B, 1, 1, 8); - simd_address_at(tmp3, TMPD1, 1, 1, 8); - * [register]:1 tmp3 = ~(* [register]:1 tmp2); - simd_address_at(tmp2, Rm_VPR64.8B, 2, 1, 8); - simd_address_at(tmp3, TMPD1, 2, 1, 8); - * [register]:1 tmp3 = ~(* [register]:1 tmp2); - simd_address_at(tmp2, Rm_VPR64.8B, 3, 1, 8); - simd_address_at(tmp3, TMPD1, 3, 1, 8); - * [register]:1 tmp3 = ~(* [register]:1 tmp2); - simd_address_at(tmp2, Rm_VPR64.8B, 4, 1, 8); - simd_address_at(tmp3, TMPD1, 4, 1, 8); - * [register]:1 tmp3 = ~(* [register]:1 tmp2); - simd_address_at(tmp2, Rm_VPR64.8B, 5, 1, 8); - simd_address_at(tmp3, TMPD1, 5, 1, 8); - * [register]:1 tmp3 = ~(* [register]:1 tmp2); - simd_address_at(tmp2, Rm_VPR64.8B, 6, 1, 8); - simd_address_at(tmp3, TMPD1, 6, 1, 8); - * [register]:1 tmp3 = ~(* [register]:1 tmp2); - simd_address_at(tmp2, Rm_VPR64.8B, 7, 1, 8); - simd_address_at(tmp3, TMPD1, 7, 1, 8); - * [register]:1 tmp3 = ~(* [register]:1 tmp2); + TMPD1[0,8] = ~(Rm_VPR64.8B[0,8]); + TMPD1[8,8] = ~(Rm_VPR64.8B[8,8]); + TMPD1[16,8] = ~(Rm_VPR64.8B[16,8]); + TMPD1[24,8] = ~(Rm_VPR64.8B[24,8]); + TMPD1[32,8] = ~(Rm_VPR64.8B[32,8]); + TMPD1[40,8] = ~(Rm_VPR64.8B[40,8]); + TMPD1[48,8] = ~(Rm_VPR64.8B[48,8]); + TMPD1[56,8] = ~(Rm_VPR64.8B[56,8]); # simd infix Rd_VPR64.8B = Rn_VPR64.8B & TMPD1 on lane size 1 - local tmp4:4 = 0; - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp4, Rn_VPR64.8B, 0, 1, 8); - simd_address_at(tmp5, TMPD1, 0, 1, 8); - simd_address_at(tmp6, Rd_VPR64.8B, 0, 1, 8); - * [register]:1 tmp6 = (* [register]:1 tmp4) & (* [register]:1 tmp5); - simd_address_at(tmp4, Rn_VPR64.8B, 1, 1, 8); - simd_address_at(tmp5, TMPD1, 1, 1, 8); - simd_address_at(tmp6, Rd_VPR64.8B, 1, 1, 8); - * [register]:1 tmp6 = (* [register]:1 tmp4) & (* [register]:1 tmp5); - simd_address_at(tmp4, Rn_VPR64.8B, 2, 1, 8); - simd_address_at(tmp5, TMPD1, 2, 1, 8); - simd_address_at(tmp6, Rd_VPR64.8B, 2, 1, 8); - * [register]:1 tmp6 = (* [register]:1 tmp4) & (* [register]:1 tmp5); - simd_address_at(tmp4, Rn_VPR64.8B, 3, 1, 8); - simd_address_at(tmp5, TMPD1, 3, 1, 8); - simd_address_at(tmp6, Rd_VPR64.8B, 3, 1, 8); - * [register]:1 tmp6 = (* [register]:1 tmp4) & (* [register]:1 tmp5); - simd_address_at(tmp4, Rn_VPR64.8B, 4, 1, 8); - simd_address_at(tmp5, TMPD1, 4, 1, 8); - simd_address_at(tmp6, Rd_VPR64.8B, 4, 1, 8); - * [register]:1 tmp6 = (* [register]:1 tmp4) & (* [register]:1 tmp5); - simd_address_at(tmp4, Rn_VPR64.8B, 5, 1, 8); - simd_address_at(tmp5, TMPD1, 5, 1, 8); - simd_address_at(tmp6, Rd_VPR64.8B, 5, 1, 8); - * [register]:1 tmp6 = (* [register]:1 tmp4) & (* [register]:1 tmp5); - simd_address_at(tmp4, Rn_VPR64.8B, 6, 1, 8); - simd_address_at(tmp5, TMPD1, 6, 1, 8); - simd_address_at(tmp6, Rd_VPR64.8B, 6, 1, 8); - * [register]:1 tmp6 = (* [register]:1 tmp4) & (* [register]:1 tmp5); - simd_address_at(tmp4, Rn_VPR64.8B, 7, 1, 8); - simd_address_at(tmp5, TMPD1, 7, 1, 8); - simd_address_at(tmp6, Rd_VPR64.8B, 7, 1, 8); - * [register]:1 tmp6 = (* [register]:1 tmp4) & (* [register]:1 tmp5); + Rd_VPR64.8B[0,8] = Rn_VPR64.8B[0,8] & TMPD1[0,8]; + Rd_VPR64.8B[8,8] = Rn_VPR64.8B[8,8] & TMPD1[8,8]; + Rd_VPR64.8B[16,8] = Rn_VPR64.8B[16,8] & TMPD1[16,8]; + Rd_VPR64.8B[24,8] = Rn_VPR64.8B[24,8] & TMPD1[24,8]; + Rd_VPR64.8B[32,8] = Rn_VPR64.8B[32,8] & TMPD1[32,8]; + Rd_VPR64.8B[40,8] = Rn_VPR64.8B[40,8] & TMPD1[40,8]; + Rd_VPR64.8B[48,8] = Rn_VPR64.8B[48,8] & TMPD1[48,8]; + Rd_VPR64.8B[56,8] = Rn_VPR64.8B[56,8] & TMPD1[56,8]; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_INT_NEGATE(Rm_VPR64.8B, 1:1); - local tmpd:8 = SIMD_INT_AND(Rn_VPR64.8B, tmp1, 1:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.8B -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.8B = NEON_bic(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); -@endif } # C7.2.22 BIF page C7-1432 line 79219 MATCH x2ee01c00/mask=xbfe0fc00 @@ -2437,9 +1097,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.8B :bif Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=3 & b_2121=1 & Rm_VPR128.16B & b_1115=0x3 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.16B = NEON_bif(Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B, 1:1); -@endif } # C7.2.22 BIF page C7-1432 line 79219 MATCH x2ee01c00/mask=xbfe0fc00 @@ -2450,9 +1108,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=3 & b_2121=1 & Rm_VPR128.16 :bif Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=3 & b_2121=1 & Rm_VPR64.8B & b_1115=0x3 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.8B = NEON_bif(Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B, 1:1); -@endif } # C7.2.23 BIT page C7-1434 line 79302 MATCH x2ea01c00/mask=xbfe0fc00 @@ -2463,9 +1119,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=3 & b_2121=1 & Rm_VPR64.8B :bit Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.16B & b_1115=0x3 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.16B = NEON_bit(Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B, 1:1); -@endif } # C7.2.23 BIT page C7-1434 line 79302 MATCH x2ea01c00/mask=xbfe0fc00 @@ -2476,9 +1130,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.16 :bit Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.8B & b_1115=0x3 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.8B = NEON_bit(Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B, 1:1); -@endif } # C7.2.24 BSL page C7-1436 line 79384 MATCH x2e601c00/mask=xbfe0fc00 @@ -2489,9 +1141,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.8B :bsl Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.16B & b_1115=0x3 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.16B = NEON_bsl(Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B, 1:1); -@endif } # C7.2.24 BSL page C7-1436 line 79384 MATCH x2e601c00/mask=xbfe0fc00 @@ -2502,9 +1152,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.16 :bsl Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.8B & b_1115=0x3 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.8B = NEON_bsl(Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B, 1:1); -@endif } # C7.2.25 CLS (vector) page C7-1438 line 79466 MATCH x0e204800/mask=xbf3ffc00 @@ -2516,9 +1164,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.8B :cls Rd_VPR64.8B, Rn_VPR64.8B is b_31=0 & b_30=0 & b_2429=0b001110 & b_2223=0b00 & b_1021=0b100000010010 & Rd_VPR64.8B & Rn_VPR64.8B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.8B = NEON_cls(Rn_VPR64.8B, 1:1); -@endif } # C7.2.25 CLS (vector) page C7-1438 line 79466 MATCH x0e204800/mask=xbf3ffc00 @@ -2530,9 +1176,7 @@ is b_31=0 & b_30=0 & b_2429=0b001110 & b_2223=0b00 & b_1021=0b100000010010 & Rd_ :cls Rd_VPR128.16B, Rn_VPR128.16B is b_31=0 & b_30=1 & b_2429=0b001110 & b_2223=0b00 & b_1021=0b100000010010 & Rd_VPR128.16B & Rn_VPR128.16B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.16B = NEON_cls(Rn_VPR128.16B, 1:1); -@endif } # C7.2.25 CLS (vector) page C7-1438 line 79466 MATCH x0e204800/mask=xbf3ffc00 @@ -2544,9 +1188,7 @@ is b_31=0 & b_30=1 & b_2429=0b001110 & b_2223=0b00 & b_1021=0b100000010010 & Rd_ :cls Rd_VPR64.4H, Rn_VPR64.4H is b_31=0 & b_30=0 & b_2429=0b001110 & b_2223=0b01 & b_1021=0b100000010010 & Rd_VPR64.4H & Rn_VPR64.4H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.4H = NEON_cls(Rn_VPR64.4H, 2:1); -@endif } # C7.2.25 CLS (vector) page C7-1438 line 79466 MATCH x0e204800/mask=xbf3ffc00 @@ -2558,9 +1200,7 @@ is b_31=0 & b_30=0 & b_2429=0b001110 & b_2223=0b01 & b_1021=0b100000010010 & Rd_ :cls Rd_VPR128.8H, Rn_VPR128.8H is b_31=0 & b_30=1 & b_2429=0b001110 & b_2223=0b01 & b_1021=0b100000010010 & Rd_VPR128.8H & Rn_VPR128.8H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.8H = NEON_cls(Rn_VPR128.8H, 2:1); -@endif } # C7.2.25 CLS (vector) page C7-1438 line 79466 MATCH x0e204800/mask=xbf3ffc00 @@ -2572,9 +1212,7 @@ is b_31=0 & b_30=1 & b_2429=0b001110 & b_2223=0b01 & b_1021=0b100000010010 & Rd_ :cls Rd_VPR64.2S, Rn_VPR64.2S is b_31=0 & b_30=0 & b_2429=0b001110 & b_2223=0b10 & b_1021=0b100000010010 & Rd_VPR64.2S & Rn_VPR64.2S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.2S = NEON_cls(Rn_VPR64.2S, 4:1); -@endif } # C7.2.25 CLS (vector) page C7-1438 line 79466 MATCH x0e204800/mask=xbf3ffc00 @@ -2586,9 +1224,7 @@ is b_31=0 & b_30=0 & b_2429=0b001110 & b_2223=0b10 & b_1021=0b100000010010 & Rd_ :cls Rd_VPR128.4S, Rn_VPR128.4S is b_31=0 & b_30=1 & b_2429=0b001110 & b_2223=0b10 & b_1021=0b100000010010 & Rd_VPR128.4S & Rn_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_cls(Rn_VPR128.4S, 4:1); -@endif } # C7.2.26 CLZ (vector) page C7-1440 line 79562 MATCH x2e204800/mask=xbf3ffc00 @@ -2600,9 +1236,7 @@ is b_31=0 & b_30=1 & b_2429=0b001110 & b_2223=0b10 & b_1021=0b100000010010 & Rd_ :clz Rd_VPR64.8B, Rn_VPR64.8B is b_31=0 & b_30=0 & b_2429=0b101110 & b_2223=0b00 & b_1021=0b100000010010 & Rd_VPR64.8B & Rn_VPR64.8B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.8B = NEON_clz(Rn_VPR64.8B, 1:1); -@endif } # C7.2.26 CLZ (vector) page C7-1440 line 79562 MATCH x2e204800/mask=xbf3ffc00 @@ -2614,9 +1248,7 @@ is b_31=0 & b_30=0 & b_2429=0b101110 & b_2223=0b00 & b_1021=0b100000010010 & Rd_ :clz Rd_VPR128.16B, Rn_VPR128.16B is b_31=0 & b_30=1 & b_2429=0b101110 & b_2223=0b00 & b_1021=0b100000010010 & Rd_VPR128.16B & Rn_VPR128.16B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.16B = NEON_clz(Rn_VPR128.16B, 1:1); -@endif } # C7.2.26 CLZ (vector) page C7-1440 line 79562 MATCH x2e204800/mask=xbf3ffc00 @@ -2628,9 +1260,7 @@ is b_31=0 & b_30=1 & b_2429=0b101110 & b_2223=0b00 & b_1021=0b100000010010 & Rd_ :clz Rd_VPR64.4H, Rn_VPR64.4H is b_31=0 & b_30=0 & b_2429=0b101110 & b_2223=0b01 & b_1021=0b100000010010 & Rd_VPR64.4H & Rn_VPR64.4H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.4H = NEON_clz(Rn_VPR64.4H, 2:1); -@endif } # C7.2.26 CLZ (vector) page C7-1440 line 79562 MATCH x2e204800/mask=xbf3ffc00 @@ -2642,9 +1272,7 @@ is b_31=0 & b_30=0 & b_2429=0b101110 & b_2223=0b01 & b_1021=0b100000010010 & Rd_ :clz Rd_VPR128.8H, Rn_VPR128.8H is b_31=0 & b_30=1 & b_2429=0b101110 & b_2223=0b01 & b_1021=0b100000010010 & Rd_VPR128.8H & Rn_VPR128.8H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.8H = NEON_clz(Rn_VPR128.8H, 2:1); -@endif } # C7.2.26 CLZ (vector) page C7-1440 line 79562 MATCH x2e204800/mask=xbf3ffc00 @@ -2656,9 +1284,7 @@ is b_31=0 & b_30=1 & b_2429=0b101110 & b_2223=0b01 & b_1021=0b100000010010 & Rd_ :clz Rd_VPR64.2S, Rn_VPR64.2S is b_31=0 & b_30=0 & b_2429=0b101110 & b_2223=0b10 & b_1021=0b100000010010 & Rd_VPR64.2S & Rn_VPR64.2S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.2S = NEON_clz(Rn_VPR64.2S, 4:1); -@endif } # C7.2.26 CLZ (vector) page C7-1440 line 79562 MATCH x2e204800/mask=xbf3ffc00 @@ -2670,9 +1296,7 @@ is b_31=0 & b_30=0 & b_2429=0b101110 & b_2223=0b10 & b_1021=0b100000010010 & Rd_ :clz Rd_VPR128.4S, Rn_VPR128.4S is b_31=0 & b_30=1 & b_2429=0b101110 & b_2223=0b10 & b_1021=0b100000010010 & Rd_VPR128.4S & Rn_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_clz(Rn_VPR128.4S, 4:1); -@endif } # C7.2.27 CMEQ (register) page C7-1442 line 79657 MATCH x7e208c00/mask=xff20fc00 @@ -2685,21 +1309,11 @@ is b_31=0 & b_30=1 & b_2429=0b101110 & b_2223=0b10 & b_1021=0b100000010010 & Rd_ :cmeq Rd_FPR64, Rn_FPR64, Rm_FPR64 is b_2431=0b01111110 & b_2223=0b11 & b_21=1 & b_1015=0b100011 & Rd_FPR64 & Rn_FPR64 & Rm_FPR64 & Zd { -@if defined(SEMANTIC_primitive) local tmp1:1 = Rn_FPR64 == Rm_FPR64; local tmp2:8 = zext(tmp1); local tmp3:8 = ~ 0:8; Rd_FPR64 = tmp2 * tmp3; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:1 = Rn_FPR64 == Rm_FPR64; - local tmp2:8 = zext(tmp1); - local tmp3:8 = ~ 0:8; - local tmpd:8 = tmp2 * tmp3; - Zd = zext(tmpd); # assigning to Rd_FPR64 -@elif defined(SEMANTIC_pseudo) - Rd_FPR64 = NEON_cmeq(Rn_FPR64, Rm_FPR64); -@endif } # C7.2.27 CMEQ (register) page C7-1442 line 79657 MATCH x2e208c00/mask=xbf20fc00 @@ -2711,9 +1325,7 @@ is b_2431=0b01111110 & b_2223=0b11 & b_21=1 & b_1015=0b100011 & Rd_FPR64 & Rn_FP :cmeq Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_31=0 & b_30=0 & b_2429=0b101110 & b_2223=0b00 & b_21=1 & b_1015=0b100011 & Rd_VPR64.8B & Rn_VPR64.8B & Rm_VPR64.8B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.8B = NEON_cmeq(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); -@endif } # C7.2.27 CMEQ (register) page C7-1442 line 79657 MATCH x2e208c00/mask=xbf20fc00 @@ -2725,9 +1337,7 @@ is b_31=0 & b_30=0 & b_2429=0b101110 & b_2223=0b00 & b_21=1 & b_1015=0b100011 & :cmeq Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_31=0 & b_30=1 & b_2429=0b101110 & b_2223=0b00 & b_21=1 & b_1015=0b100011 & Rd_VPR128.16B & Rn_VPR128.16B & Rm_VPR128.16B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.16B = NEON_cmeq(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); -@endif } # C7.2.27 CMEQ (register) page C7-1442 line 79657 MATCH x2e208c00/mask=xbf20fc00 @@ -2739,9 +1349,7 @@ is b_31=0 & b_30=1 & b_2429=0b101110 & b_2223=0b00 & b_21=1 & b_1015=0b100011 & :cmeq Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_31=0 & b_30=0 & b_2429=0b101110 & b_2223=0b01 & b_21=1 & b_1015=0b100011 & Rd_VPR64.4H & Rn_VPR64.4H & Rm_VPR64.4H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.4H = NEON_cmeq(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); -@endif } # C7.2.27 CMEQ (register) page C7-1442 line 79657 MATCH x2e208c00/mask=xbf20fc00 @@ -2753,9 +1361,7 @@ is b_31=0 & b_30=0 & b_2429=0b101110 & b_2223=0b01 & b_21=1 & b_1015=0b100011 & :cmeq Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_31=0 & b_30=1 & b_2429=0b101110 & b_2223=0b01 & b_21=1 & b_1015=0b100011 & Rd_VPR128.8H & Rn_VPR128.8H & Rm_VPR128.8H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.8H = NEON_cmeq(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); -@endif } # C7.2.27 CMEQ (register) page C7-1442 line 79657 MATCH x2e208c00/mask=xbf20fc00 @@ -2767,9 +1373,7 @@ is b_31=0 & b_30=1 & b_2429=0b101110 & b_2223=0b01 & b_21=1 & b_1015=0b100011 & :cmeq Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_31=0 & b_30=0 & b_2429=0b101110 & b_2223=0b10 & b_21=1 & b_1015=0b100011 & Rd_VPR64.2S & Rn_VPR64.2S & Rm_VPR64.2S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.2S = NEON_cmeq(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); -@endif } # C7.2.27 CMEQ (register) page C7-1442 line 79657 MATCH x2e208c00/mask=xbf20fc00 @@ -2781,9 +1385,7 @@ is b_31=0 & b_30=0 & b_2429=0b101110 & b_2223=0b10 & b_21=1 & b_1015=0b100011 & :cmeq Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_31=0 & b_30=1 & b_2429=0b101110 & b_2223=0b10 & b_21=1 & b_1015=0b100011 & Rd_VPR128.4S & Rn_VPR128.4S & Rm_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_cmeq(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); -@endif } # C7.2.27 CMEQ (register) page C7-1442 line 79657 MATCH x2e208c00/mask=xbf20fc00 @@ -2795,9 +1397,7 @@ is b_31=0 & b_30=1 & b_2429=0b101110 & b_2223=0b10 & b_21=1 & b_1015=0b100011 & :cmeq Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D is b_31=0 & b_30=1 & b_2429=0b101110 & b_2223=0b11 & b_21=1 & b_1015=0b100011 & Rd_VPR128.2D & Rn_VPR128.2D & Rm_VPR128.2D & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.2D = NEON_cmeq(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); -@endif } # C7.2.28 CMEQ (zero) page C7-1444 line 79796 MATCH x0e209800/mask=xbf3ffc00 @@ -2808,9 +1408,7 @@ is b_31=0 & b_30=1 & b_2429=0b101110 & b_2223=0b11 & b_21=1 & b_1015=0b100011 & :cmeq Rd_VPR128.16B, Rn_VPR128.16B, "#0" is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x9 & b_1011=2 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.16B = NEON_cmeq(Rn_VPR128.16B, 0:1, 1:1); -@endif } # C7.2.28 CMEQ (zero) page C7-1444 line 79796 MATCH x0e209800/mask=xbf3ffc00 @@ -2821,9 +1419,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x :cmeq Rd_VPR128.2D, Rn_VPR128.2D, "#0" is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=3 & b_1721=0x10 & b_1216=0x9 & b_1011=2 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.2D = NEON_cmeq(Rn_VPR128.2D, 0:8, 8:1); -@endif } # C7.2.28 CMEQ (zero) page C7-1444 line 79796 MATCH x0e209800/mask=xbf3ffc00 @@ -2834,9 +1430,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=3 & b_1721=0x10 & b_1216=0x :cmeq Rd_VPR64.2S, Rn_VPR64.2S, "#0" is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x10 & b_1216=0x9 & b_1011=2 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.2S = NEON_cmeq(Rn_VPR64.2S, 0:4, 4:1); -@endif } # C7.2.28 CMEQ (zero) page C7-1444 line 79796 MATCH x0e209800/mask=xbf3ffc00 @@ -2847,9 +1441,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x10 & b_1216=0x :cmeq Rd_VPR64.4H, Rn_VPR64.4H, "#0" is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=0x9 & b_1011=2 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.4H = NEON_cmeq(Rn_VPR64.4H, 0:2, 2:1); -@endif } # C7.2.28 CMEQ (zero) page C7-1444 line 79796 MATCH x0e209800/mask=xbf3ffc00 @@ -2860,9 +1452,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=0x :cmeq Rd_VPR128.4S, Rn_VPR128.4S, "#0" is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x10 & b_1216=0x9 & b_1011=2 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_cmeq(Rn_VPR128.4S, 0:4, 2:1); -@endif } # C7.2.28 CMEQ (zero) page C7-1444 line 79796 MATCH x0e209800/mask=xbf3ffc00 @@ -2873,9 +1463,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x10 & b_1216=0x :cmeq Rd_VPR64.8B, Rn_VPR64.8B, "#0" is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x9 & b_1011=2 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.8B = NEON_cmeq(Rn_VPR64.8B, 0:1, 1:1); -@endif } # C7.2.28 CMEQ (zero) page C7-1444 line 79796 MATCH x0e209800/mask=xbf3ffc00 @@ -2886,9 +1474,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x :cmeq Rd_VPR128.8H, Rn_VPR128.8H, "#0" is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=0x9 & b_1011=2 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.8H = NEON_cmeq(Rn_VPR128.8H, 0:2, 2:1); -@endif } # C7.2.28 CMEQ (zero) page C7-1444 line 79796 MATCH x5e209800/mask=xff3ffc00 @@ -2899,9 +1485,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=0x :cmeq Rd_FPR64, Rn_FPR64, "#0" is b_2431=0b01011110 & b_2223=0b11 & b_1021=0b100000100110 & Rd_FPR64 & Rn_FPR64 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR64 = NEON_cmeq(Rn_FPR64, 0:4); -@endif } # C7.2.29 CMGE (register) page C7-1447 line 79951 MATCH x0e203c00/mask=xbf20fc00 @@ -2912,9 +1496,7 @@ is b_2431=0b01011110 & b_2223=0b11 & b_1021=0b100000100110 & Rd_FPR64 & Rn_FPR64 :cmge Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0x7 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.16B = NEON_cmge(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); -@endif } # C7.2.29 CMGE (register) page C7-1447 line 79951 MATCH x0e203c00/mask=xbf20fc00 @@ -2925,9 +1507,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16 :cmge Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=3 & b_2121=1 & Rm_VPR128.2D & b_1115=0x7 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.2D = NEON_cmge(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); -@endif } # C7.2.29 CMGE (register) page C7-1447 line 79951 MATCH x0e203c00/mask=xbf20fc00 @@ -2938,9 +1518,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=3 & b_2121=1 & Rm_VPR128.2D :cmge Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0x7 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.2S = NEON_cmge(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); -@endif } # C7.2.29 CMGE (register) page C7-1447 line 79951 MATCH x0e203c00/mask=xbf20fc00 @@ -2951,9 +1529,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S :cmge Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0x7 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.4H = NEON_cmge(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); -@endif } # C7.2.29 CMGE (register) page C7-1447 line 79951 MATCH x0e203c00/mask=xbf20fc00 @@ -2964,9 +1540,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H :cmge Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0x7 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_cmge(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); -@endif } # C7.2.29 CMGE (register) page C7-1447 line 79951 MATCH x0e203c00/mask=xbf20fc00 @@ -2977,9 +1551,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S :cmge Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0x7 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.8B = NEON_cmge(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); -@endif } # C7.2.29 CMGE (register) page C7-1447 line 79951 MATCH x0e203c00/mask=xbf20fc00 @@ -2990,9 +1562,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B :cmge Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0x7 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.8H = NEON_cmge(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); -@endif } # C7.2.29 CMGE (register) page C7-1447 line 79951 MATCH x5e203c00/mask=xff20fc00 @@ -3003,9 +1573,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H :cmge Rd_FPR64, Rn_FPR64, Rm_FPR64 is b_2431=0b01011110 & b_2223=0b11 & b_21=1 & b_1015=0b001111 & Rd_FPR64 & Rn_FPR64 & Rm_FPR64 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR64 = NEON_cmge(Rn_FPR64, Rm_FPR64); -@endif } # C7.2.30 CMGE (zero) page C7-1449 line 80089 MATCH x2e208800/mask=xbf3ffc00 @@ -3016,9 +1584,7 @@ is b_2431=0b01011110 & b_2223=0b11 & b_21=1 & b_1015=0b001111 & Rd_FPR64 & Rn_FP :cmge Rd_VPR128.16B, Rn_VPR128.16B, "#0" is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x8 & b_1011=2 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.16B = NEON_cmge(Rn_VPR128.16B, 0:1, 1:1); -@endif } # C7.2.30 CMGE (zero) page C7-1449 line 80089 MATCH x2e208800/mask=xbf3ffc00 @@ -3029,9 +1595,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x :cmge Rd_VPR128.2D, Rn_VPR128.2D, "#0" is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=3 & b_1721=0x10 & b_1216=0x8 & b_1011=2 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.2D = NEON_cmge(Rn_VPR128.2D, 0:8, 8:1); -@endif } # C7.2.30 CMGE (zero) page C7-1449 line 80089 MATCH x2e208800/mask=xbf3ffc00 @@ -3042,9 +1606,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=3 & b_1721=0x10 & b_1216=0x :cmge Rd_VPR64.2S, Rn_VPR64.2S, "#0" is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x10 & b_1216=0x8 & b_1011=2 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.2S = NEON_cmge(Rn_VPR64.2S, 0:4, 4:1); -@endif } # C7.2.30 CMGE (zero) page C7-1449 line 80089 MATCH x2e208800/mask=xbf3ffc00 @@ -3055,9 +1617,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x10 & b_1216=0x :cmge Rd_VPR64.4H, Rn_VPR64.4H, "#0" is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=0x8 & b_1011=2 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.4H = NEON_cmge(Rn_VPR64.4H, 0:2, 2:1); -@endif } # C7.2.30 CMGE (zero) page C7-1449 line 80089 MATCH x2e208800/mask=xbf3ffc00 @@ -3068,9 +1628,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=0x :cmge Rd_VPR128.4S, Rn_VPR128.4S, "#0" is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x10 & b_1216=0x8 & b_1011=2 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_cmge(Rn_VPR128.4S, 0:4, 4:1); -@endif } # C7.2.30 CMGE (zero) page C7-1449 line 80089 MATCH x2e208800/mask=xbf3ffc00 @@ -3081,9 +1639,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x10 & b_1216=0x :cmge Rd_VPR64.8B, Rn_VPR64.8B, "#0" is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x8 & b_1011=2 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.8B = NEON_cmge(Rn_VPR64.8B, 0:1, 1:1); -@endif } # C7.2.30 CMGE (zero) page C7-1449 line 80089 MATCH x2e208800/mask=xbf3ffc00 @@ -3094,9 +1650,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x :cmge Rd_VPR128.8H, Rn_VPR128.8H, "#0" is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=0x8 & b_1011=2 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.8H = NEON_cmge(Rn_VPR128.8H, 0:2, 2:1); -@endif } # C7.2.30 CMGE (zero) page C7-1449 line 80089 MATCH x7e208800/mask=xff3ffc00 @@ -3107,9 +1661,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=0x :cmge Rd_FPR64, Rn_FPR64, "#0" is b_2431=0b01111110 & b_2223=0b11 & b_1021=0b100000100010 & Rd_FPR64 & Rn_FPR64 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR64 = NEON_cmge(Rn_FPR64, 0:4); -@endif } # C7.2.31 CMGT (register) page C7-1452 line 80244 MATCH x0e203400/mask=xbf20fc00 @@ -3120,9 +1672,7 @@ is b_2431=0b01111110 & b_2223=0b11 & b_1021=0b100000100010 & Rd_FPR64 & Rn_FPR64 :cmgt Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0x6 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.16B = NEON_cmgt(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); -@endif } # C7.2.31 CMGT (register) page C7-1452 line 80244 MATCH x0e203400/mask=xbf20fc00 @@ -3133,9 +1683,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16 :cmgt Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=3 & b_2121=1 & Rm_VPR128.2D & b_1115=0x6 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.2D = NEON_cmgt(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); -@endif } # C7.2.31 CMGT (register) page C7-1452 line 80244 MATCH x0e203400/mask=xbf20fc00 @@ -3146,9 +1694,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=3 & b_2121=1 & Rm_VPR128.2D :cmgt Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0x6 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.2S = NEON_cmgt(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); -@endif } # C7.2.31 CMGT (register) page C7-1452 line 80244 MATCH x0e203400/mask=xbf20fc00 @@ -3159,9 +1705,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S :cmgt Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0x6 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.4H = NEON_cmgt(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); -@endif } # C7.2.31 CMGT (register) page C7-1452 line 80244 MATCH x0e203400/mask=xbf20fc00 @@ -3172,9 +1716,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H :cmgt Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0x6 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_cmgt(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); -@endif } # C7.2.31 CMGT (register) page C7-1452 line 80244 MATCH x0e203400/mask=xbf20fc00 @@ -3185,9 +1727,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S :cmgt Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0x6 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.8B = NEON_cmgt(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); -@endif } # C7.2.31 CMGT (register) page C7-1452 line 80244 MATCH x0e203400/mask=xbf20fc00 @@ -3198,9 +1738,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B :cmgt Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0x6 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.8H = NEON_cmgt(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); -@endif } # C7.2.31 CMGT (register) page C7-1452 line 80244 MATCH x5e203400/mask=xff20fc00 @@ -3211,9 +1749,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H :cmgt Rd_FPR64, Rn_FPR64, Rm_FPR64 is b_2431=0b01011110 & b_2223=0b11 & b_21=1 & b_1015=0b001101 & Rd_FPR64 & Rn_FPR64 & Rm_FPR64 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR64 = NEON_cmgt(Rn_FPR64, Rm_FPR64); -@endif } # C7.2.32 CMGT (zero) page C7-1454 line 80382 MATCH x0e208800/mask=xbf3ffc00 @@ -3224,9 +1760,7 @@ is b_2431=0b01011110 & b_2223=0b11 & b_21=1 & b_1015=0b001101 & Rd_FPR64 & Rn_FP :cmgt Rd_VPR128.16B, Rn_VPR128.16B, "#0" is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x8 & b_1011=2 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.16B = NEON_cmgt(Rn_VPR128.16B, 0:1, 1:1); -@endif } # C7.2.32 CMGT (zero) page C7-1454 line 80382 MATCH x0e208800/mask=xbf3ffc00 @@ -3237,9 +1771,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x :cmgt Rd_VPR128.2D, Rn_VPR128.2D, "#0" is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=3 & b_1721=0x10 & b_1216=0x8 & b_1011=2 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.2D = NEON_cmgt(Rn_VPR128.2D, 0:8, 8:1); -@endif } # C7.2.32 CMGT (zero) page C7-1454 line 80382 MATCH x0e208800/mask=xbf3ffc00 @@ -3250,9 +1782,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=3 & b_1721=0x10 & b_1216=0x :cmgt Rd_VPR64.2S, Rn_VPR64.2S, "#0" is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x10 & b_1216=0x8 & b_1011=2 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.2S = NEON_cmgt(Rn_VPR64.2S, 0:4, 4:1); -@endif } # C7.2.32 CMGT (zero) page C7-1454 line 80382 MATCH x0e208800/mask=xbf3ffc00 @@ -3263,9 +1793,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x10 & b_1216=0x :cmgt Rd_VPR64.4H, Rn_VPR64.4H, "#0" is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=0x8 & b_1011=2 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.4H = NEON_cmgt(Rn_VPR64.4H, 0:2, 2:1); -@endif } # C7.2.32 CMGT (zero) page C7-1454 line 80382 MATCH x0e208800/mask=xbf3ffc00 @@ -3276,9 +1804,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=0x :cmgt Rd_VPR128.4S, Rn_VPR128.4S, "#0" is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x10 & b_1216=0x8 & b_1011=2 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_cmgt(Rn_VPR128.4S, 0:4, 4:1); -@endif } # C7.2.32 CMGT (zero) page C7-1454 line 80382 MATCH x0e208800/mask=xbf3ffc00 @@ -3289,9 +1815,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x10 & b_1216=0x :cmgt Rd_VPR64.8B, Rn_VPR64.8B, "#0" is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x8 & b_1011=2 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.8B = NEON_cmgt(Rn_VPR64.8B, 0:1, 1:1); -@endif } # C7.2.32 CMGT (zero) page C7-1454 line 80382 MATCH x0e208800/mask=xbf3ffc00 @@ -3302,9 +1826,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x :cmgt Rd_VPR128.8H, Rn_VPR128.8H, "#0" is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=0x8 & b_1011=2 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.8H = NEON_cmgt(Rn_VPR128.8H, 0:2, 2:1); -@endif } # C7.2.32 CMGT (zero) page C7-1454 line 80382 MATCH x5e208800/mask=xff3ffc00 @@ -3315,9 +1837,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=0x :cmgt Rd_FPR64, Rn_FPR64, "#0" is b_2431=0b01011110 & b_2223=0b11 & b_1021=0b100000100010 & Rd_FPR64 & Rn_FPR64 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR64 = NEON_cmgt(Rn_FPR64, 0:8); -@endif } # C7.2.33 CMHI (register) page C7-1457 line 80537 MATCH x2e203400/mask=xbf20fc00 @@ -3328,9 +1848,7 @@ is b_2431=0b01011110 & b_2223=0b11 & b_1021=0b100000100010 & Rd_FPR64 & Rn_FPR64 :cmhi Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0x6 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.16B = NEON_cmhi(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); -@endif } # C7.2.33 CMHI (register) page C7-1457 line 80537 MATCH x2e203400/mask=xbf20fc00 @@ -3341,9 +1859,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16 :cmhi Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=3 & b_2121=1 & Rm_VPR128.2D & b_1115=0x6 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.2D = NEON_cmhi(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); -@endif } # C7.2.33 CMHI (register) page C7-1457 line 80537 MATCH x2e203400/mask=xbf20fc00 @@ -3354,9 +1870,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=3 & b_2121=1 & Rm_VPR128.2D :cmhi Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0x6 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.2S = NEON_cmhi(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); -@endif } # C7.2.33 CMHI (register) page C7-1457 line 80537 MATCH x2e203400/mask=xbf20fc00 @@ -3367,9 +1881,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S :cmhi Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0x6 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.4H = NEON_cmhi(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); -@endif } # C7.2.33 CMHI (register) page C7-1457 line 80537 MATCH x2e203400/mask=xbf20fc00 @@ -3380,9 +1892,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H :cmhi Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0x6 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_cmhi(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); -@endif } # C7.2.33 CMHI (register) page C7-1457 line 80537 MATCH x2e203400/mask=xbf20fc00 @@ -3393,9 +1903,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S :cmhi Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0x6 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.8B = NEON_cmhi(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); -@endif } # C7.2.33 CMHI (register) page C7-1457 line 80537 MATCH x2e203400/mask=xbf20fc00 @@ -3406,9 +1914,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B :cmhi Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0x6 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.8H = NEON_cmhi(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); -@endif } # C7.2.33 CMHI (register) page C7-1457 line 80537 MATCH x7e203400/mask=xff20fc00 @@ -3419,9 +1925,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H :cmhi Rd_FPR64, Rn_FPR64, Rm_FPR64 is b_2431=0b01111110 & b_2223=0b11 & b_21=1 & b_1015=0b001101 & Rd_FPR64 & Rn_FPR64 & Rm_FPR64 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR64 = NEON_cmhi(Rn_FPR64, Rm_FPR64); -@endif } # C7.2.34 CMHS (register) page C7-1459 line 80675 MATCH x2e203c00/mask=xbf20fc00 @@ -3432,9 +1936,7 @@ is b_2431=0b01111110 & b_2223=0b11 & b_21=1 & b_1015=0b001101 & Rd_FPR64 & Rn_FP :cmhs Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0x7 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.16B = NEON_cmhs(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); -@endif } # C7.2.34 CMHS (register) page C7-1459 line 80675 MATCH x2e203c00/mask=xbf20fc00 @@ -3445,9 +1947,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16 :cmhs Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=3 & b_2121=1 & Rm_VPR128.2D & b_1115=0x7 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.2D = NEON_cmhs(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); -@endif } # C7.2.34 CMHS (register) page C7-1459 line 80675 MATCH x2e203c00/mask=xbf20fc00 @@ -3458,9 +1958,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=3 & b_2121=1 & Rm_VPR128.2D :cmhs Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0x7 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.2S = NEON_cmhs(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); -@endif } # C7.2.34 CMHS (register) page C7-1459 line 80675 MATCH x2e203c00/mask=xbf20fc00 @@ -3471,9 +1969,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S :cmhs Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0x7 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.4H = NEON_cmhs(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); -@endif } # C7.2.34 CMHS (register) page C7-1459 line 80675 MATCH x2e203c00/mask=xbf20fc00 @@ -3484,9 +1980,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H :cmhs Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0x7 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_cmhs(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); -@endif } # C7.2.34 CMHS (register) page C7-1459 line 80675 MATCH x2e203c00/mask=xbf20fc00 @@ -3497,9 +1991,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S :cmhs Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0x7 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.8B = NEON_cmhs(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); -@endif } # C7.2.34 CMHS (register) page C7-1459 line 80675 MATCH x2e203c00/mask=xbf20fc00 @@ -3510,9 +2002,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B :cmhs Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0x7 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.8H = NEON_cmhs(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); -@endif } # C7.2.34 CMHS (register) page C7-1459 line 80675 MATCH x7e203c00/mask=xff20fc00 @@ -3523,9 +2013,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H :cmhs Rd_FPR64, Rn_FPR64, Rm_FPR64 is b_2431=0b01111110 & b_2223=0b11 & b_21=1 & b_1015=0b001111 & Rd_FPR64 & Rn_FPR64 & Rm_FPR64 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR64 = NEON_cmhs(Rn_FPR64, Rm_FPR64); -@endif } # C7.2.35 CMLE (zero) page C7-1461 line 80813 MATCH x2e209800/mask=xbf3ffc00 @@ -3536,9 +2024,7 @@ is b_2431=0b01111110 & b_2223=0b11 & b_21=1 & b_1015=0b001111 & Rd_FPR64 & Rn_FP :cmle Rd_VPR128.16B, Rn_VPR128.16B, "#0" is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x9 & b_1011=2 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.16B = NEON_cmle(Rn_VPR128.16B, 0:1, 1:1); -@endif } # C7.2.35 CMLE (zero) page C7-1461 line 80813 MATCH x2e209800/mask=xbf3ffc00 @@ -3549,9 +2035,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x :cmle Rd_VPR128.2D, Rn_VPR128.2D, "#0" is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=3 & b_1721=0x10 & b_1216=0x9 & b_1011=2 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.2D = NEON_cmle(Rn_VPR128.2D, 0:8, 8:1); -@endif } # C7.2.35 CMLE (zero) page C7-1461 line 80813 MATCH x2e209800/mask=xbf3ffc00 @@ -3562,9 +2046,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=3 & b_1721=0x10 & b_1216=0x :cmle Rd_VPR64.2S, Rn_VPR64.2S, "#0" is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x10 & b_1216=0x9 & b_1011=2 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.2S = NEON_cmle(Rn_VPR64.2S, 0:4, 4:1); -@endif } # C7.2.35 CMLE (zero) page C7-1461 line 80813 MATCH x2e209800/mask=xbf3ffc00 @@ -3575,9 +2057,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x10 & b_1216=0x :cmle Rd_VPR64.4H, Rn_VPR64.4H, "#0" is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=0x9 & b_1011=2 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.4H = NEON_cmle(Rn_VPR64.4H, 0:2, 2:1); -@endif } # C7.2.35 CMLE (zero) page C7-1461 line 80813 MATCH x2e209800/mask=xbf3ffc00 @@ -3588,9 +2068,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=0x :cmle Rd_VPR128.4S, Rn_VPR128.4S, "#0" is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x10 & b_1216=0x9 & b_1011=2 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_cmle(Rn_VPR128.4S, 0:4, 4:1); -@endif } # C7.2.35 CMLE (zero) page C7-1461 line 80813 MATCH x2e209800/mask=xbf3ffc00 @@ -3601,9 +2079,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x10 & b_1216=0x :cmle Rd_VPR64.8B, Rn_VPR64.8B, "#0" is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x9 & b_1011=2 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.8B = NEON_cmle(Rn_VPR64.8B, 0:1, 1:1); -@endif } # C7.2.35 CMLE (zero) page C7-1461 line 80813 MATCH x2e209800/mask=xbf3ffc00 @@ -3614,9 +2090,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x :cmle Rd_VPR128.8H, Rn_VPR128.8H, "#0" is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=0x9 & b_1011=2 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.8H = NEON_cmle(Rn_VPR128.8H, 0:2, 2:1); -@endif } # C7.2.35 CMLE (zero) page C7-1461 line 80813 MATCH x7e209800/mask=xff3ffc00 @@ -3627,9 +2101,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=0x :cmle Rd_FPR64, Rn_FPR64, "#0" is b_2431=0b01111110 & b_2223=0b11 & b_1021=0b100000100110 & Rd_FPR64 & Rn_FPR64 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR64 = NEON_cmle(Rn_FPR64, 0:8); -@endif } # C7.2.36 CMLT (zero) page C7-1464 line 80968 MATCH x0e20a800/mask=xbf3ffc00 @@ -3640,9 +2112,7 @@ is b_2431=0b01111110 & b_2223=0b11 & b_1021=0b100000100110 & Rd_FPR64 & Rn_FPR64 :cmlt Rd_VPR128.16B, Rn_VPR128.16B, "#0" is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0xa & b_1011=2 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.16B = NEON_cmlt(Rn_VPR128.16B, 0:1, 1:1); -@endif } # C7.2.36 CMLT (zero) page C7-1464 line 80968 MATCH x0e20a800/mask=xbf3ffc00 @@ -3653,9 +2123,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x :cmlt Rd_VPR128.2D, Rn_VPR128.2D, "#0" is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=3 & b_1721=0x10 & b_1216=0xa & b_1011=2 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.2D = NEON_cmlt(Rn_VPR128.2D, 0:8, 8:1); -@endif } # C7.2.36 CMLT (zero) page C7-1464 line 80968 MATCH x0e20a800/mask=xbf3ffc00 @@ -3666,9 +2134,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=3 & b_1721=0x10 & b_1216=0x :cmlt Rd_VPR64.2S, Rn_VPR64.2S, "#0" is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x10 & b_1216=0xa & b_1011=2 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.2S = NEON_cmlt(Rn_VPR64.2S, 0:4, 4:1); -@endif } # C7.2.36 CMLT (zero) page C7-1464 line 80968 MATCH x0e20a800/mask=xbf3ffc00 @@ -3679,9 +2145,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x10 & b_1216=0x :cmlt Rd_VPR64.4H, Rn_VPR64.4H, "#0" is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=0xa & b_1011=2 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.4H = NEON_cmlt(Rn_VPR64.4H, 0:2, 2:1); -@endif } # C7.2.36 CMLT (zero) page C7-1464 line 80968 MATCH x0e20a800/mask=xbf3ffc00 @@ -3692,9 +2156,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=0x :cmlt Rd_VPR128.4S, Rn_VPR128.4S, "#0" is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x10 & b_1216=0xa & b_1011=2 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_cmlt(Rn_VPR128.4S, 0:4, 4:1); -@endif } # C7.2.36 CMLT (zero) page C7-1464 line 80968 MATCH x0e20a800/mask=xbf3ffc00 @@ -3705,9 +2167,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x10 & b_1216=0x :cmlt Rd_VPR64.8B, Rn_VPR64.8B, "#0" is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0xa & b_1011=2 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.8B = NEON_cmlt(Rn_VPR64.8B, 0:1, 1:1); -@endif } # C7.2.36 CMLT (zero) page C7-1464 line 80968 MATCH x0e20a800/mask=xbf3ffc00 @@ -3718,9 +2178,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x :cmlt Rd_VPR128.8H, Rn_VPR128.8H, "#0" is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=0xa & b_1011=2 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.8H = NEON_cmlt(Rn_VPR128.8H, 0:2, 2:1); -@endif } # C7.2.36 CMLT (zero) page C7-1464 line 80968 MATCH x5e20a800/mask=xff3ffc00 @@ -3731,9 +2189,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=0x :cmlt Rd_FPR64, Rn_FPR64, "#0" is b_2431=0b01011110 & b_2223=0b11 & b_1021=0b100000101010 & Rd_FPR64 & Rn_FPR64 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR64 = NEON_cmlt(Rn_FPR64, 0:8); -@endif } # C7.2.37 CMTST page C7-1466 line 81106 MATCH x0e208c00/mask=xbf20fc00 @@ -3744,9 +2200,7 @@ is b_2431=0b01011110 & b_2223=0b11 & b_1021=0b100000101010 & Rd_FPR64 & Rn_FPR64 :cmtst Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0x11 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.16B = NEON_cmtst(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); -@endif } # C7.2.37 CMTST page C7-1466 line 81106 MATCH x0e208c00/mask=xbf20fc00 @@ -3757,9 +2211,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16 :cmtst Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=3 & b_2121=1 & Rm_VPR128.2D & b_1115=0x11 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.2D = NEON_cmtst(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); -@endif } # C7.2.37 CMTST page C7-1466 line 81106 MATCH x0e208c00/mask=xbf20fc00 @@ -3770,9 +2222,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=3 & b_2121=1 & Rm_VPR128.2D :cmtst Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0x11 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.2S = NEON_cmtst(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); -@endif } # C7.2.37 CMTST page C7-1466 line 81106 MATCH x0e208c00/mask=xbf20fc00 @@ -3783,9 +2233,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S :cmtst Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0x11 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.4H = NEON_cmtst(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); -@endif } # C7.2.37 CMTST page C7-1466 line 81106 MATCH x0e208c00/mask=xbf20fc00 @@ -3796,9 +2244,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H :cmtst Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0x11 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_cmtst(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); -@endif } # C7.2.37 CMTST page C7-1466 line 81106 MATCH x0e208c00/mask=xbf20fc00 @@ -3809,9 +2255,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S :cmtst Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0x11 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.8B = NEON_cmtst(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); -@endif } # C7.2.37 CMTST page C7-1466 line 81106 MATCH x0e208c00/mask=xbf20fc00 @@ -3822,9 +2266,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B :cmtst Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0x11 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.8H = NEON_cmtst(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); -@endif } # C7.2.37 CMTST page C7-1466 line 81106 MATCH x5e208c00/mask=xff20fc00 @@ -3835,9 +2277,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H :cmtst Rd_FPR64, Rn_FPR64, Rm_FPR64 is b_2431=0b01011110 & b_2223=0b11 & b_21=1 & b_1015=0b100011 & Rd_FPR64 & Rn_FPR64 & Rm_FPR64 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR64 = NEON_cmtst(Rn_FPR64, Rm_FPR64); -@endif } # C7.2.38 CNT page C7-1468 line 81245 MATCH x0e205800/mask=xbf3ffc00 @@ -3848,9 +2288,7 @@ is b_2431=0b01011110 & b_2223=0b11 & b_21=1 & b_1015=0b100011 & Rd_FPR64 & Rn_FP :cnt Rd_VPR128.16B, Rn_VPR128.16B is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x5 & b_1011=2 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.16B = NEON_cnt(Rn_VPR128.16B, 1:1); -@endif } # C7.2.38 CNT page C7-1468 line 81245 MATCH x0e205800/mask=xbf3ffc00 @@ -3861,9 +2299,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x :cnt Rd_VPR64.8B, Rn_VPR64.8B is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x5 & b_1011=2 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.8B = NEON_cnt(Rn_VPR64.8B, 1:1); -@endif } # C7.2.39 DUP (element) page C7-1470 line 81332 MATCH x0e000400/mask=xbfe0fc00 @@ -3873,56 +2309,28 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x # AUNIT --inst x4e010400/mask=xffe1fc00 --status pass :dup Rd_VPR128.16B, Rn_VPR128.B.imm_neon_uimm4 -is b_3131=0 & Q=1 & b_29=0 & b_2428=0xe & b_2123=0 & Rn_VPR128.B.imm_neon_uimm4 & imm_neon_uimm4 & b_1616=1 & b_1515=0 & imm4=0x0 & b_1010=1 & Rn_VPR128 & Rd_VPR128.16B & Zd +is b_3131=0 & Q=1 & b_29=0 & b_2428=0xe & b_2123=0 & Rn_VPR128.B.imm_neon_uimm4 & b_1616=1 & b_1515=0 & imm4=0x0 & b_1010=1 & Rn_VPR128 & Rd_VPR128.16B & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; # simd element Rn_VPR128[imm_neon_uimm4] lane size 1 - simd_address_at(tmp1, Rn_VPR128, imm_neon_uimm4:4, 1, 16); - local tmp2:1 = * [register]:1 tmp1; - # simd duplicate Rd_VPR128.16B = all elements tmp2 (lane size 1) - local tmp3:4 = 0; - simd_address_at(tmp3, Rd_VPR128.16B, 0, 1, 16); - * [register]:1 tmp3 = tmp2; - simd_address_at(tmp3, Rd_VPR128.16B, 1, 1, 16); - * [register]:1 tmp3 = tmp2; - simd_address_at(tmp3, Rd_VPR128.16B, 2, 1, 16); - * [register]:1 tmp3 = tmp2; - simd_address_at(tmp3, Rd_VPR128.16B, 3, 1, 16); - * [register]:1 tmp3 = tmp2; - simd_address_at(tmp3, Rd_VPR128.16B, 4, 1, 16); - * [register]:1 tmp3 = tmp2; - simd_address_at(tmp3, Rd_VPR128.16B, 5, 1, 16); - * [register]:1 tmp3 = tmp2; - simd_address_at(tmp3, Rd_VPR128.16B, 6, 1, 16); - * [register]:1 tmp3 = tmp2; - simd_address_at(tmp3, Rd_VPR128.16B, 7, 1, 16); - * [register]:1 tmp3 = tmp2; - simd_address_at(tmp3, Rd_VPR128.16B, 8, 1, 16); - * [register]:1 tmp3 = tmp2; - simd_address_at(tmp3, Rd_VPR128.16B, 9, 1, 16); - * [register]:1 tmp3 = tmp2; - simd_address_at(tmp3, Rd_VPR128.16B, 10, 1, 16); - * [register]:1 tmp3 = tmp2; - simd_address_at(tmp3, Rd_VPR128.16B, 11, 1, 16); - * [register]:1 tmp3 = tmp2; - simd_address_at(tmp3, Rd_VPR128.16B, 12, 1, 16); - * [register]:1 tmp3 = tmp2; - simd_address_at(tmp3, Rd_VPR128.16B, 13, 1, 16); - * [register]:1 tmp3 = tmp2; - simd_address_at(tmp3, Rd_VPR128.16B, 14, 1, 16); - * [register]:1 tmp3 = tmp2; - simd_address_at(tmp3, Rd_VPR128.16B, 15, 1, 16); - * [register]:1 tmp3 = tmp2; + local tmp1:1 = Rn_VPR128.B.imm_neon_uimm4; + # simd duplicate Rd_VPR128.16B = all elements tmp1 (lane size 1) + Rd_VPR128.16B[0,8] = tmp1; + Rd_VPR128.16B[8,8] = tmp1; + Rd_VPR128.16B[16,8] = tmp1; + Rd_VPR128.16B[24,8] = tmp1; + Rd_VPR128.16B[32,8] = tmp1; + Rd_VPR128.16B[40,8] = tmp1; + Rd_VPR128.16B[48,8] = tmp1; + Rd_VPR128.16B[56,8] = tmp1; + Rd_VPR128.16B[64,8] = tmp1; + Rd_VPR128.16B[72,8] = tmp1; + Rd_VPR128.16B[80,8] = tmp1; + Rd_VPR128.16B[88,8] = tmp1; + Rd_VPR128.16B[96,8] = tmp1; + Rd_VPR128.16B[104,8] = tmp1; + Rd_VPR128.16B[112,8] = tmp1; + Rd_VPR128.16B[120,8] = tmp1; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:1 = SIMD_PIECE(Rn_VPR128, imm_neon_uimm4:1); - local tmpd:16 = SIMD_COPY(Rd_VPR128.16B, tmp1); - Zd = zext(tmpd); # assigning to Rd_VPR128.16B -@elif defined(SEMANTIC_pseudo) - local tmp1:1 = SIMD_PIECE(Rn_VPR128, imm_neon_uimm4:1); - Rd_VPR128.16B = NEON_dup(Rd_VPR128.16B, tmp1, 1:1); -@endif } # C7.2.39 DUP (element) page C7-1470 line 81332 MATCH x0e000400/mask=xbfe0fc00 @@ -3932,28 +2340,14 @@ is b_3131=0 & Q=1 & b_29=0 & b_2428=0xe & b_2123=0 & Rn_VPR128.B.imm_neon_uimm4 # AUNIT --inst x4e080400/mask=xffeffc00 --status pass :dup Rd_VPR128.2D, Rn_VPR128.D.imm_neon_uimm1 -is b_3131=0 & Q=1 & b_29=0 & b_2428=0xe & b_2123=0 & Rn_VPR128.D.imm_neon_uimm1 & imm_neon_uimm1 & b_1619=0x8 & b_1515=0 & imm4=0x0 & b_1010=1 & Rn_VPR128 & Rd_VPR128.2D & Zd +is b_3131=0 & Q=1 & b_29=0 & b_2428=0xe & b_2123=0 & Rn_VPR128.D.imm_neon_uimm1 & b_1619=0x8 & b_1515=0 & imm4=0x0 & b_1010=1 & Rn_VPR128 & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; # simd element Rn_VPR128[imm_neon_uimm1] lane size 8 - simd_address_at(tmp1, Rn_VPR128, imm_neon_uimm1:4, 8, 16); - local tmp2:8 = * [register]:8 tmp1; - # simd duplicate Rd_VPR128.2D = all elements tmp2 (lane size 8) - local tmp3:4 = 0; - simd_address_at(tmp3, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp3 = tmp2; - simd_address_at(tmp3, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp3 = tmp2; + local tmp1:8 = Rn_VPR128.D.imm_neon_uimm1; + # simd duplicate Rd_VPR128.2D = all elements tmp1 (lane size 8) + Rd_VPR128.2D[0,64] = tmp1; + Rd_VPR128.2D[64,64] = tmp1; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Rn_VPR128, imm_neon_uimm1:1); - local tmpd:16 = SIMD_COPY(Rd_VPR128.2D, tmp1); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - local tmp1:8 = SIMD_PIECE(Rn_VPR128, imm_neon_uimm1:1); - Rd_VPR128.2D = NEON_dup(Rd_VPR128.2D, tmp1, 8:1); -@endif } # C7.2.39 DUP (element) page C7-1470 line 81332 MATCH x0e000400/mask=xbfe0fc00 @@ -3963,28 +2357,14 @@ is b_3131=0 & Q=1 & b_29=0 & b_2428=0xe & b_2123=0 & Rn_VPR128.D.imm_neon_uimm1 # AUNIT --inst x0e040400/mask=xffe7fc00 --status pass :dup Rd_VPR64.2S, Rn_VPR128.S.imm_neon_uimm2 -is b_3131=0 & Q=0 & b_29=0 & b_2428=0xe & b_2123=0 & Rn_VPR128.S.imm_neon_uimm2 & imm_neon_uimm2 & b_1618=4 & b_1515=0 & imm4=0x0 & b_1010=1 & Rn_VPR128 & Rd_VPR64.2S & Zd +is b_3131=0 & Q=0 & b_29=0 & b_2428=0xe & b_2123=0 & Rn_VPR128.S.imm_neon_uimm2 & b_1618=4 & b_1515=0 & imm4=0x0 & b_1010=1 & Rn_VPR128 & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; # simd element Rn_VPR128[imm_neon_uimm2] lane size 4 - simd_address_at(tmp1, Rn_VPR128, imm_neon_uimm2:4, 4, 16); - local tmp2:4 = * [register]:4 tmp1; - # simd duplicate Rd_VPR64.2S = all elements tmp2 (lane size 4) - local tmp3:4 = 0; - simd_address_at(tmp3, Rd_VPR64.2S, 0, 4, 8); - * [register]:4 tmp3 = tmp2; - simd_address_at(tmp3, Rd_VPR64.2S, 1, 4, 8); - * [register]:4 tmp3 = tmp2; + local tmp1:4 = Rn_VPR128.S.imm_neon_uimm2; + # simd duplicate Rd_VPR64.2S = all elements tmp1 (lane size 4) + Rd_VPR64.2S[0,32] = tmp1; + Rd_VPR64.2S[32,32] = tmp1; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:4 = SIMD_PIECE(Rn_VPR128, imm_neon_uimm2:1); - local tmpd:8 = SIMD_COPY(Rd_VPR64.2S, tmp1); - Zd = zext(tmpd); # assigning to Rd_VPR64.2S -@elif defined(SEMANTIC_pseudo) - local tmp1:4 = SIMD_PIECE(Rn_VPR128, imm_neon_uimm2:1); - Rd_VPR64.2S = NEON_dup(Rd_VPR64.2S, tmp1, 4:1); -@endif } # C7.2.39 DUP (element) page C7-1470 line 81332 MATCH x0e000400/mask=xbfe0fc00 @@ -3994,32 +2374,16 @@ is b_3131=0 & Q=0 & b_29=0 & b_2428=0xe & b_2123=0 & Rn_VPR128.S.imm_neon_uimm2 # AUNIT --inst x0e020400/mask=xffe3fc00 --status pass :dup Rd_VPR64.4H, Rn_VPR128.H.imm_neon_uimm3 -is b_3131=0 & Q=0 & b_29=0 & b_2428=0xe & b_2123=0 & Rn_VPR128.H.imm_neon_uimm3 & imm_neon_uimm3 & b_1617=2 & b_1515=0 & imm4=0x0 & b_1010=1 & Rn_VPR128 & Rd_VPR64.4H & Zd +is b_3131=0 & Q=0 & b_29=0 & b_2428=0xe & b_2123=0 & Rn_VPR128.H.imm_neon_uimm3 & b_1617=2 & b_1515=0 & imm4=0x0 & b_1010=1 & Rn_VPR128 & Rd_VPR64.4H & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; # simd element Rn_VPR128[imm_neon_uimm3] lane size 2 - simd_address_at(tmp1, Rn_VPR128, imm_neon_uimm3:4, 2, 16); - local tmp2:2 = * [register]:2 tmp1; - # simd duplicate Rd_VPR64.4H = all elements tmp2 (lane size 2) - local tmp3:4 = 0; - simd_address_at(tmp3, Rd_VPR64.4H, 0, 2, 8); - * [register]:2 tmp3 = tmp2; - simd_address_at(tmp3, Rd_VPR64.4H, 1, 2, 8); - * [register]:2 tmp3 = tmp2; - simd_address_at(tmp3, Rd_VPR64.4H, 2, 2, 8); - * [register]:2 tmp3 = tmp2; - simd_address_at(tmp3, Rd_VPR64.4H, 3, 2, 8); - * [register]:2 tmp3 = tmp2; + local tmp1:2 = Rn_VPR128.H.imm_neon_uimm3; + # simd duplicate Rd_VPR64.4H = all elements tmp1 (lane size 2) + Rd_VPR64.4H[0,16] = tmp1; + Rd_VPR64.4H[16,16] = tmp1; + Rd_VPR64.4H[32,16] = tmp1; + Rd_VPR64.4H[48,16] = tmp1; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:2 = SIMD_PIECE(Rn_VPR128, imm_neon_uimm3:1); - local tmpd:8 = SIMD_COPY(Rd_VPR64.4H, tmp1); - Zd = zext(tmpd); # assigning to Rd_VPR64.4H -@elif defined(SEMANTIC_pseudo) - local tmp1:2 = SIMD_PIECE(Rn_VPR128, imm_neon_uimm3:1); - Rd_VPR64.4H = NEON_dup(Rd_VPR64.4H, tmp1, 2:1); -@endif } # C7.2.39 DUP (element) page C7-1470 line 81332 MATCH x0e000400/mask=xbfe0fc00 @@ -4029,32 +2393,16 @@ is b_3131=0 & Q=0 & b_29=0 & b_2428=0xe & b_2123=0 & Rn_VPR128.H.imm_neon_uimm3 # AUNIT --inst x4e040400/mask=xffe7fc00 --status pass :dup Rd_VPR128.4S, Rn_VPR128.S.imm_neon_uimm2 -is b_3131=0 & Q=1 & b_29=0 & b_2428=0xe & b_2123=0 & Rn_VPR128.S.imm_neon_uimm2 & imm_neon_uimm2 & b_1618=4 & b_1515=0 & imm4=0x0 & b_1010=1 & Rn_VPR128 & Rd_VPR128.4S & Zd +is b_3131=0 & Q=1 & b_29=0 & b_2428=0xe & b_2123=0 & Rn_VPR128.S.imm_neon_uimm2 & b_1618=4 & b_1515=0 & imm4=0x0 & b_1010=1 & Rn_VPR128 & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; # simd element Rn_VPR128[imm_neon_uimm2] lane size 4 - simd_address_at(tmp1, Rn_VPR128, imm_neon_uimm2:4, 4, 16); - local tmp2:4 = * [register]:4 tmp1; - # simd duplicate Rd_VPR128.4S = all elements tmp2 (lane size 4) - local tmp3:4 = 0; - simd_address_at(tmp3, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp3 = tmp2; - simd_address_at(tmp3, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp3 = tmp2; - simd_address_at(tmp3, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp3 = tmp2; - simd_address_at(tmp3, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp3 = tmp2; + local tmp1:4 = Rn_VPR128.S.imm_neon_uimm2; + # simd duplicate Rd_VPR128.4S = all elements tmp1 (lane size 4) + Rd_VPR128.4S[0,32] = tmp1; + Rd_VPR128.4S[32,32] = tmp1; + Rd_VPR128.4S[64,32] = tmp1; + Rd_VPR128.4S[96,32] = tmp1; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:4 = SIMD_PIECE(Rn_VPR128, imm_neon_uimm2:1); - local tmpd:16 = SIMD_COPY(Rd_VPR128.4S, tmp1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - local tmp1:4 = SIMD_PIECE(Rn_VPR128, imm_neon_uimm2:1); - Rd_VPR128.4S = NEON_dup(Rd_VPR128.4S, tmp1, 4:1); -@endif } # C7.2.39 DUP (element) page C7-1470 line 81332 MATCH x0e000400/mask=xbfe0fc00 @@ -4064,40 +2412,20 @@ is b_3131=0 & Q=1 & b_29=0 & b_2428=0xe & b_2123=0 & Rn_VPR128.S.imm_neon_uimm2 # AUNIT --inst x0e010400/mask=xffe1fc00 --status pass :dup Rd_VPR64.8B, Rn_VPR128.B.imm_neon_uimm4 -is b_3131=0 & Q=0 & b_29=0 & b_2428=0xe & b_2123=0 & Rn_VPR128.B.imm_neon_uimm4 & imm_neon_uimm4 & b_1616=1 & b_1515=0 & imm4=0x0 & b_1010=1 & Rn_VPR128 & Rd_VPR64.8B & Zd +is b_3131=0 & Q=0 & b_29=0 & b_2428=0xe & b_2123=0 & Rn_VPR128.B.imm_neon_uimm4 & b_1616=1 & b_1515=0 & imm4=0x0 & b_1010=1 & Rn_VPR128 & Rd_VPR64.8B & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; # simd element Rn_VPR128[imm_neon_uimm4] lane size 1 - simd_address_at(tmp1, Rn_VPR128, imm_neon_uimm4:4, 1, 16); - local tmp2:1 = * [register]:1 tmp1; - # simd duplicate Rd_VPR64.8B = all elements tmp2 (lane size 1) - local tmp3:4 = 0; - simd_address_at(tmp3, Rd_VPR64.8B, 0, 1, 8); - * [register]:1 tmp3 = tmp2; - simd_address_at(tmp3, Rd_VPR64.8B, 1, 1, 8); - * [register]:1 tmp3 = tmp2; - simd_address_at(tmp3, Rd_VPR64.8B, 2, 1, 8); - * [register]:1 tmp3 = tmp2; - simd_address_at(tmp3, Rd_VPR64.8B, 3, 1, 8); - * [register]:1 tmp3 = tmp2; - simd_address_at(tmp3, Rd_VPR64.8B, 4, 1, 8); - * [register]:1 tmp3 = tmp2; - simd_address_at(tmp3, Rd_VPR64.8B, 5, 1, 8); - * [register]:1 tmp3 = tmp2; - simd_address_at(tmp3, Rd_VPR64.8B, 6, 1, 8); - * [register]:1 tmp3 = tmp2; - simd_address_at(tmp3, Rd_VPR64.8B, 7, 1, 8); - * [register]:1 tmp3 = tmp2; + local tmp1:1 = Rn_VPR128.B.imm_neon_uimm4; + # simd duplicate Rd_VPR64.8B = all elements tmp1 (lane size 1) + Rd_VPR64.8B[0,8] = tmp1; + Rd_VPR64.8B[8,8] = tmp1; + Rd_VPR64.8B[16,8] = tmp1; + Rd_VPR64.8B[24,8] = tmp1; + Rd_VPR64.8B[32,8] = tmp1; + Rd_VPR64.8B[40,8] = tmp1; + Rd_VPR64.8B[48,8] = tmp1; + Rd_VPR64.8B[56,8] = tmp1; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:1 = SIMD_PIECE(Rn_VPR128, imm_neon_uimm4:1); - local tmpd:8 = SIMD_COPY(Rd_VPR64.8B, tmp1); - Zd = zext(tmpd); # assigning to Rd_VPR64.8B -@elif defined(SEMANTIC_pseudo) - local tmp1:1 = SIMD_PIECE(Rn_VPR128, imm_neon_uimm4:1); - Rd_VPR64.8B = NEON_dup(Rd_VPR64.8B, tmp1, 1:1); -@endif } # C7.2.39 DUP (element) page C7-1470 line 81332 MATCH x0e000400/mask=xbfe0fc00 @@ -4107,40 +2435,20 @@ is b_3131=0 & Q=0 & b_29=0 & b_2428=0xe & b_2123=0 & Rn_VPR128.B.imm_neon_uimm4 # AUNIT --inst x4e020400/mask=xffe3fc00 --status pass :dup Rd_VPR128.8H, Rn_VPR128.H.imm_neon_uimm3 -is b_3131=0 & Q=1 & b_29=0 & b_2428=0xe & b_2123=0 & Rn_VPR128.H.imm_neon_uimm3 & imm_neon_uimm3 & b_1617=2 & b_1515=0 & imm4=0x0 & b_1010=1 & Rn_VPR128 & Rd_VPR128.8H & Zd +is b_3131=0 & Q=1 & b_29=0 & b_2428=0xe & b_2123=0 & Rn_VPR128.H.imm_neon_uimm3 & b_1617=2 & b_1515=0 & imm4=0x0 & b_1010=1 & Rn_VPR128 & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; # simd element Rn_VPR128[imm_neon_uimm3] lane size 2 - simd_address_at(tmp1, Rn_VPR128, imm_neon_uimm3:4, 2, 16); - local tmp2:2 = * [register]:2 tmp1; - # simd duplicate Rd_VPR128.8H = all elements tmp2 (lane size 2) - local tmp3:4 = 0; - simd_address_at(tmp3, Rd_VPR128.8H, 0, 2, 16); - * [register]:2 tmp3 = tmp2; - simd_address_at(tmp3, Rd_VPR128.8H, 1, 2, 16); - * [register]:2 tmp3 = tmp2; - simd_address_at(tmp3, Rd_VPR128.8H, 2, 2, 16); - * [register]:2 tmp3 = tmp2; - simd_address_at(tmp3, Rd_VPR128.8H, 3, 2, 16); - * [register]:2 tmp3 = tmp2; - simd_address_at(tmp3, Rd_VPR128.8H, 4, 2, 16); - * [register]:2 tmp3 = tmp2; - simd_address_at(tmp3, Rd_VPR128.8H, 5, 2, 16); - * [register]:2 tmp3 = tmp2; - simd_address_at(tmp3, Rd_VPR128.8H, 6, 2, 16); - * [register]:2 tmp3 = tmp2; - simd_address_at(tmp3, Rd_VPR128.8H, 7, 2, 16); - * [register]:2 tmp3 = tmp2; + local tmp1:2 = Rn_VPR128.H.imm_neon_uimm3; + # simd duplicate Rd_VPR128.8H = all elements tmp1 (lane size 2) + Rd_VPR128.8H[0,16] = tmp1; + Rd_VPR128.8H[16,16] = tmp1; + Rd_VPR128.8H[32,16] = tmp1; + Rd_VPR128.8H[48,16] = tmp1; + Rd_VPR128.8H[64,16] = tmp1; + Rd_VPR128.8H[80,16] = tmp1; + Rd_VPR128.8H[96,16] = tmp1; + Rd_VPR128.8H[112,16] = tmp1; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:2 = SIMD_PIECE(Rn_VPR128, imm_neon_uimm3:1); - local tmpd:16 = SIMD_COPY(Rd_VPR128.8H, tmp1); - Zd = zext(tmpd); # assigning to Rd_VPR128.8H -@elif defined(SEMANTIC_pseudo) - local tmp1:2 = SIMD_PIECE(Rn_VPR128, imm_neon_uimm3:1); - Rd_VPR128.8H = NEON_dup(Rd_VPR128.8H, tmp1, 2:1); -@endif } # C7.2.39 DUP (element) page C7-1470 line 81332 MATCH x5e000400/mask=xffe0fc00 @@ -4151,21 +2459,11 @@ is b_3131=0 & Q=1 & b_29=0 & b_2428=0xe & b_2123=0 & Rn_VPR128.H.imm_neon_uimm3 # AUNIT --inst x5e010400/mask=xffe1fc00 --status pass :dup Rd_FPR8, Rn_VPR128.B.imm_neon_uimm4 -is b_3131=0 & q=1 & b_29=0 & b_2428=0x1e & b_2123=0 & Rn_VPR128.B.imm_neon_uimm4 & imm_neon_uimm4 & b_1616=1 & b_1515=0 & imm4=0x0 & b_1010=1 & Rn_VPR128 & Rd_FPR8 & Zd +is b_3131=0 & q=1 & b_29=0 & b_2428=0x1e & b_2123=0 & Rn_VPR128.B.imm_neon_uimm4 & b_1616=1 & b_1515=0 & imm4=0x0 & b_1010=1 & Rn_VPR128 & Rd_FPR8 & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; # simd element Rn_VPR128[imm_neon_uimm4] lane size 1 - simd_address_at(tmp1, Rn_VPR128, imm_neon_uimm4:4, 1, 16); - Rd_FPR8 = * [register]:1 tmp1; + Rd_FPR8 = Rn_VPR128.B.imm_neon_uimm4; zext_zb(Zd); # zero upper 31 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:1 = SIMD_PIECE(Rn_VPR128, imm_neon_uimm4:1); - Zd = zext(tmpd); # assigning to Rd_FPR8 -@elif defined(SEMANTIC_pseudo) - local tmp1:1 = SIMD_PIECE(Rn_VPR128, imm_neon_uimm4:1); - Rd_FPR8 = NEON_dup(Rd_FPR8, tmp1, 1:1); -@endif } # C7.2.39 DUP (element) page C7-1470 line 81332 MATCH x5e000400/mask=xffe0fc00 @@ -4176,21 +2474,11 @@ is b_3131=0 & q=1 & b_29=0 & b_2428=0x1e & b_2123=0 & Rn_VPR128.B.imm_neon_uimm4 # AUNIT --inst x5e080400/mask=xffeffc00 --status pass :dup Rd_FPR64, Rn_VPR128.D.imm_neon_uimm1 -is b_3131=0 & q=1 & b_29=0 & b_2428=0x1e & b_2123=0 & Rn_VPR128.D.imm_neon_uimm1 & imm_neon_uimm1 & b_1619=0x8 & b_1515=0 & imm4=0x0 & b_1010=1 & Rn_VPR128 & Rd_FPR64 & Zd +is b_3131=0 & q=1 & b_29=0 & b_2428=0x1e & b_2123=0 & Rn_VPR128.D.imm_neon_uimm1 & b_1619=0x8 & b_1515=0 & imm4=0x0 & b_1010=1 & Rn_VPR128 & Rd_FPR64 & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; # simd element Rn_VPR128[imm_neon_uimm1] lane size 8 - simd_address_at(tmp1, Rn_VPR128, imm_neon_uimm1:4, 8, 16); - Rd_FPR64 = * [register]:8 tmp1; + Rd_FPR64 = Rn_VPR128.D.imm_neon_uimm1; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:8 = SIMD_PIECE(Rn_VPR128, imm_neon_uimm1:1); - Zd = zext(tmpd); # assigning to Rd_FPR64 -@elif defined(SEMANTIC_pseudo) - local tmp1:8 = SIMD_PIECE(Rn_VPR128, imm_neon_uimm1:1); - Rd_FPR64 = NEON_dup(Rd_FPR64, tmp1, 8:1); -@endif } # C7.2.39 DUP (element) page C7-1470 line 81332 MATCH x5e000400/mask=xffe0fc00 @@ -4201,21 +2489,11 @@ is b_3131=0 & q=1 & b_29=0 & b_2428=0x1e & b_2123=0 & Rn_VPR128.D.imm_neon_uimm1 # AUNIT --inst x5e020400/mask=xffe3fc00 --status pass :dup Rd_FPR16, Rn_VPR128.H.imm_neon_uimm3 -is b_3131=0 & q=1 & b_29=0 & b_2428=0x1e & b_2123=0 & Rn_VPR128.H.imm_neon_uimm3 & imm_neon_uimm3 & b_1617=2 & b_1515=0 & imm4=0x0 & b_1010=1 & Rn_VPR128 & Rd_FPR16 & Zd +is b_3131=0 & q=1 & b_29=0 & b_2428=0x1e & b_2123=0 & Rn_VPR128.H.imm_neon_uimm3 & b_1617=2 & b_1515=0 & imm4=0x0 & b_1010=1 & Rn_VPR128 & Rd_FPR16 & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; # simd element Rn_VPR128[imm_neon_uimm3] lane size 2 - simd_address_at(tmp1, Rn_VPR128, imm_neon_uimm3:4, 2, 16); - Rd_FPR16 = * [register]:2 tmp1; + Rd_FPR16 = Rn_VPR128.H.imm_neon_uimm3; zext_zh(Zd); # zero upper 30 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:2 = SIMD_PIECE(Rn_VPR128, imm_neon_uimm3:1); - Zd = zext(tmpd); # assigning to Rd_FPR16 -@elif defined(SEMANTIC_pseudo) - local tmp1:2 = SIMD_PIECE(Rn_VPR128, imm_neon_uimm3:1); - Rd_FPR16 = NEON_dup(Rd_FPR16, tmp1, 2:1); -@endif } # C7.2.39 DUP (element) page C7-1470 line 81332 MATCH x5e000400/mask=xffe0fc00 @@ -4226,21 +2504,11 @@ is b_3131=0 & q=1 & b_29=0 & b_2428=0x1e & b_2123=0 & Rn_VPR128.H.imm_neon_uimm3 # AUNIT --inst x5e040400/mask=xffe7fc00 --status pass :dup Rd_FPR32, Rn_VPR128.S.imm_neon_uimm2 -is b_3131=0 & q=1 & b_29=0 & b_2428=0x1e & b_2123=0 & Rn_VPR128.S.imm_neon_uimm2 & imm_neon_uimm2 & b_1618=4 & b_1515=0 & imm4=0x0 & b_1010=1 & Rn_VPR128 & Rd_FPR32 & Zd +is b_3131=0 & q=1 & b_29=0 & b_2428=0x1e & b_2123=0 & Rn_VPR128.S.imm_neon_uimm2 & b_1618=4 & b_1515=0 & imm4=0x0 & b_1010=1 & Rn_VPR128 & Rd_FPR32 & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; # simd element Rn_VPR128[imm_neon_uimm2] lane size 4 - simd_address_at(tmp1, Rn_VPR128, imm_neon_uimm2:4, 4, 16); - Rd_FPR32 = * [register]:4 tmp1; + Rd_FPR32 = Rn_VPR128.S.imm_neon_uimm2; zext_zs(Zd); # zero upper 28 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:4 = SIMD_PIECE(Rn_VPR128, imm_neon_uimm2:1); - Zd = zext(tmpd); # assigning to Rd_FPR32 -@elif defined(SEMANTIC_pseudo) - local tmp1:4 = SIMD_PIECE(Rn_VPR128, imm_neon_uimm2:1); - Rd_FPR32 = NEON_dup(Rd_FPR32, tmp1, 4:1); -@endif } # C7.2.40 DUP (general) page C7-1473 line 81499 MATCH x0e000c00/mask=xbfe0fc00 @@ -4252,52 +2520,25 @@ is b_3131=0 & q=1 & b_29=0 & b_2428=0x1e & b_2123=0 & Rn_VPR128.S.imm_neon_uimm2 :dup Rd_VPR128.16B, Rn_GPR32 is b_3131=0 & Q=1 & b_29=0 & b_2428=0xe & b_2123=0 & b_16=1 & b_1515=0 & imm4=0x1 & b_1010=1 & Rn_GPR32 & Rd_VPR128.16B & Zd { -@if defined(SEMANTIC_primitive) || defined(SEMANTIC_force) - local tmp1:4 = 0; - simd_address_at(tmp1, Rn_GPR32, 0, 1, 4); - local tmp2:1 = * [register]:1 tmp1; - # simd duplicate Rd_VPR128.16B = all elements tmp2 (lane size 1) - local tmp3:4 = 0; - simd_address_at(tmp3, Rd_VPR128.16B, 0, 1, 16); - * [register]:1 tmp3 = tmp2; - simd_address_at(tmp3, Rd_VPR128.16B, 1, 1, 16); - * [register]:1 tmp3 = tmp2; - simd_address_at(tmp3, Rd_VPR128.16B, 2, 1, 16); - * [register]:1 tmp3 = tmp2; - simd_address_at(tmp3, Rd_VPR128.16B, 3, 1, 16); - * [register]:1 tmp3 = tmp2; - simd_address_at(tmp3, Rd_VPR128.16B, 4, 1, 16); - * [register]:1 tmp3 = tmp2; - simd_address_at(tmp3, Rd_VPR128.16B, 5, 1, 16); - * [register]:1 tmp3 = tmp2; - simd_address_at(tmp3, Rd_VPR128.16B, 6, 1, 16); - * [register]:1 tmp3 = tmp2; - simd_address_at(tmp3, Rd_VPR128.16B, 7, 1, 16); - * [register]:1 tmp3 = tmp2; - simd_address_at(tmp3, Rd_VPR128.16B, 8, 1, 16); - * [register]:1 tmp3 = tmp2; - simd_address_at(tmp3, Rd_VPR128.16B, 9, 1, 16); - * [register]:1 tmp3 = tmp2; - simd_address_at(tmp3, Rd_VPR128.16B, 10, 1, 16); - * [register]:1 tmp3 = tmp2; - simd_address_at(tmp3, Rd_VPR128.16B, 11, 1, 16); - * [register]:1 tmp3 = tmp2; - simd_address_at(tmp3, Rd_VPR128.16B, 12, 1, 16); - * [register]:1 tmp3 = tmp2; - simd_address_at(tmp3, Rd_VPR128.16B, 13, 1, 16); - * [register]:1 tmp3 = tmp2; - simd_address_at(tmp3, Rd_VPR128.16B, 14, 1, 16); - * [register]:1 tmp3 = tmp2; - simd_address_at(tmp3, Rd_VPR128.16B, 15, 1, 16); - * [register]:1 tmp3 = tmp2; + local tmp1:1 = Rn_GPR32[0,8]; + # simd duplicate Rd_VPR128.16B = all elements tmp1 (lane size 1) + Rd_VPR128.16B[0,8] = tmp1; + Rd_VPR128.16B[8,8] = tmp1; + Rd_VPR128.16B[16,8] = tmp1; + Rd_VPR128.16B[24,8] = tmp1; + Rd_VPR128.16B[32,8] = tmp1; + Rd_VPR128.16B[40,8] = tmp1; + Rd_VPR128.16B[48,8] = tmp1; + Rd_VPR128.16B[56,8] = tmp1; + Rd_VPR128.16B[64,8] = tmp1; + Rd_VPR128.16B[72,8] = tmp1; + Rd_VPR128.16B[80,8] = tmp1; + Rd_VPR128.16B[88,8] = tmp1; + Rd_VPR128.16B[96,8] = tmp1; + Rd_VPR128.16B[104,8] = tmp1; + Rd_VPR128.16B[112,8] = tmp1; + Rd_VPR128.16B[120,8] = tmp1; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:1 = SIMD_PIECE(Rn_GPR32, 0:1); - local tmpd:16 = SIMD_COPY(Rd_VPR128.16B, tmp1); - Zd = zext(tmpd); # assigning to Rd_VPR128.16B -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.16B = NEON_dup(Rd_VPR128.16B, Rn_GPR32, 1:1); -@endif } # C7.2.40 DUP (general) page C7-1473 line 81499 MATCH x0e000c00/mask=xbfe0fc00 @@ -4309,20 +2550,10 @@ is b_3131=0 & Q=1 & b_29=0 & b_2428=0xe & b_2123=0 & b_16=1 & b_1515=0 & imm4=0x :dup Rd_VPR128.2D, Rn_GPR64 is b_3131=0 & Q=1 & b_29=0 & b_2428=0xe & b_2123=0 & b_1619=0b1000 & b_1515=0 & imm4=0x1 & b_1010=1 & Rn_GPR64 & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_primitive) # simd duplicate Rd_VPR128.2D = all elements Rn_GPR64 (lane size 8) - local tmp1:4 = 0; - simd_address_at(tmp1, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp1 = Rn_GPR64; - simd_address_at(tmp1, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp1 = Rn_GPR64; + Rd_VPR128.2D[0,64] = Rn_GPR64; + Rd_VPR128.2D[64,64] = Rn_GPR64; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:16 = SIMD_COPY(Rd_VPR128.2D, Rn_GPR64); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.2D = NEON_dup(Rd_VPR128.2D, Rn_GPR64, 8:1); -@endif } # C7.2.40 DUP (general) page C7-1473 line 81499 MATCH x0e000c00/mask=xbfe0fc00 @@ -4334,20 +2565,10 @@ is b_3131=0 & Q=1 & b_29=0 & b_2428=0xe & b_2123=0 & b_1619=0b1000 & b_1515=0 & :dup Rd_VPR64.2S, Rn_GPR32 is b_3131=0 & Q=0 & b_29=0 & b_2428=0xe & b_2123=0 & b_1618=0b100 & b_1515=0 & imm4=0x1 & b_1010=1 & Rn_GPR32 & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_primitive) # simd duplicate Rd_VPR64.2S = all elements Rn_GPR32 (lane size 4) - local tmp1:4 = 0; - simd_address_at(tmp1, Rd_VPR64.2S, 0, 4, 8); - * [register]:4 tmp1 = Rn_GPR32; - simd_address_at(tmp1, Rd_VPR64.2S, 1, 4, 8); - * [register]:4 tmp1 = Rn_GPR32; + Rd_VPR64.2S[0,32] = Rn_GPR32; + Rd_VPR64.2S[32,32] = Rn_GPR32; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:8 = SIMD_COPY(Rd_VPR64.2S, Rn_GPR32); - Zd = zext(tmpd); # assigning to Rd_VPR64.2S -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.2S = NEON_dup(Rd_VPR64.2S, Rn_GPR32, 4:1); -@endif } # C7.2.40 DUP (general) page C7-1473 line 81499 MATCH x0e000c00/mask=xbfe0fc00 @@ -4359,28 +2580,13 @@ is b_3131=0 & Q=0 & b_29=0 & b_2428=0xe & b_2123=0 & b_1618=0b100 & b_1515=0 & i :dup Rd_VPR64.4H, Rn_GPR32 is b_3131=0 & Q=0 & b_29=0 & b_2428=0xe & b_2123=0 & b_1617=0b10 & b_1515=0 & imm4=0x1 & b_1010=1 & Rn_GPR32 & Rd_VPR64.4H & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rn_GPR32, 0, 2, 4); - local tmp2:2 = * [register]:2 tmp1; - # simd duplicate Rd_VPR64.4H = all elements tmp2 (lane size 2) - local tmp3:4 = 0; - simd_address_at(tmp3, Rd_VPR64.4H, 0, 2, 8); - * [register]:2 tmp3 = tmp2; - simd_address_at(tmp3, Rd_VPR64.4H, 1, 2, 8); - * [register]:2 tmp3 = tmp2; - simd_address_at(tmp3, Rd_VPR64.4H, 2, 2, 8); - * [register]:2 tmp3 = tmp2; - simd_address_at(tmp3, Rd_VPR64.4H, 3, 2, 8); - * [register]:2 tmp3 = tmp2; + local tmp1:2 = Rn_GPR32[0,16]; + # simd duplicate Rd_VPR64.4H = all elements tmp1 (lane size 2) + Rd_VPR64.4H[0,16] = tmp1; + Rd_VPR64.4H[16,16] = tmp1; + Rd_VPR64.4H[32,16] = tmp1; + Rd_VPR64.4H[48,16] = tmp1; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:2 = SIMD_PIECE(Rn_GPR32, 0:1); - local tmpd:8 = SIMD_COPY(Rd_VPR64.4H, tmp1); - Zd = zext(tmpd); # assigning to Rd_VPR64.4H -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.4H = NEON_dup(Rd_VPR64.4H, Rn_GPR32, 2:1); -@endif } # C7.2.40 DUP (general) page C7-1473 line 81499 MATCH x0e000c00/mask=xbfe0fc00 @@ -4392,24 +2598,12 @@ is b_3131=0 & Q=0 & b_29=0 & b_2428=0xe & b_2123=0 & b_1617=0b10 & b_1515=0 & im :dup Rd_VPR128.4S, Rn_GPR32 is b_3131=0 & Q=1 & b_29=0 & b_2428=0xe & b_2123=0 & b_1618=0b100 & b_1515=0 & imm4=0x1 & b_1010=1 & Rn_GPR32 & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) # simd duplicate Rd_VPR128.4S = all elements Rn_GPR32 (lane size 4) - local tmp1:4 = 0; - simd_address_at(tmp1, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp1 = Rn_GPR32; - simd_address_at(tmp1, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp1 = Rn_GPR32; - simd_address_at(tmp1, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp1 = Rn_GPR32; - simd_address_at(tmp1, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp1 = Rn_GPR32; + Rd_VPR128.4S[0,32] = Rn_GPR32; + Rd_VPR128.4S[32,32] = Rn_GPR32; + Rd_VPR128.4S[64,32] = Rn_GPR32; + Rd_VPR128.4S[96,32] = Rn_GPR32; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:16 = SIMD_COPY(Rd_VPR128.4S, Rn_GPR32); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_dup(Rd_VPR128.4S, Rn_GPR32, 4:1); -@endif } # C7.2.40 DUP (general) page C7-1473 line 81499 MATCH x0e000c00/mask=xbfe0fc00 @@ -4421,36 +2615,17 @@ is b_3131=0 & Q=1 & b_29=0 & b_2428=0xe & b_2123=0 & b_1618=0b100 & b_1515=0 & i :dup Rd_VPR64.8B, Rn_GPR32 is b_3131=0 & Q=0 & b_29=0 & b_2428=0xe & b_2123=0 & b_16=1 & b_1515=0 & imm4=0x1 & b_1010=1 & Rn_GPR32 & Rd_VPR64.8B & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rn_GPR32, 0, 1, 4); - local tmp2:1 = * [register]:1 tmp1; - # simd duplicate Rd_VPR64.8B = all elements tmp2 (lane size 1) - local tmp3:4 = 0; - simd_address_at(tmp3, Rd_VPR64.8B, 0, 1, 8); - * [register]:1 tmp3 = tmp2; - simd_address_at(tmp3, Rd_VPR64.8B, 1, 1, 8); - * [register]:1 tmp3 = tmp2; - simd_address_at(tmp3, Rd_VPR64.8B, 2, 1, 8); - * [register]:1 tmp3 = tmp2; - simd_address_at(tmp3, Rd_VPR64.8B, 3, 1, 8); - * [register]:1 tmp3 = tmp2; - simd_address_at(tmp3, Rd_VPR64.8B, 4, 1, 8); - * [register]:1 tmp3 = tmp2; - simd_address_at(tmp3, Rd_VPR64.8B, 5, 1, 8); - * [register]:1 tmp3 = tmp2; - simd_address_at(tmp3, Rd_VPR64.8B, 6, 1, 8); - * [register]:1 tmp3 = tmp2; - simd_address_at(tmp3, Rd_VPR64.8B, 7, 1, 8); - * [register]:1 tmp3 = tmp2; + local tmp1:1 = Rn_GPR32[0,8]; + # simd duplicate Rd_VPR64.8B = all elements tmp1 (lane size 1) + Rd_VPR64.8B[0,8] = tmp1; + Rd_VPR64.8B[8,8] = tmp1; + Rd_VPR64.8B[16,8] = tmp1; + Rd_VPR64.8B[24,8] = tmp1; + Rd_VPR64.8B[32,8] = tmp1; + Rd_VPR64.8B[40,8] = tmp1; + Rd_VPR64.8B[48,8] = tmp1; + Rd_VPR64.8B[56,8] = tmp1; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:1 = SIMD_PIECE(Rn_GPR32, 0:1); - local tmpd:8 = SIMD_COPY(Rd_VPR64.8B, tmp1); - Zd = zext(tmpd); # assigning to Rd_VPR64.8B -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.8B = NEON_dup(Rd_VPR64.8B, Rn_GPR32, 1:1); -@endif } # C7.2.40 DUP (general) page C7-1473 line 81499 MATCH x0e000c00/mask=xbfe0fc00 @@ -4462,36 +2637,17 @@ is b_3131=0 & Q=0 & b_29=0 & b_2428=0xe & b_2123=0 & b_16=1 & b_1515=0 & imm4=0x :dup Rd_VPR128.8H, Rn_GPR32 is b_3131=0 & Q=1 & b_29=0 & b_2428=0xe & b_2123=0 & b_1617=0b10 & b_1515=0 & imm4=0x1 & b_1010=1 & Rn_GPR32 & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rn_GPR32, 0, 2, 4); - local tmp2:2 = * [register]:2 tmp1; - # simd duplicate Rd_VPR128.8H = all elements tmp2 (lane size 2) - local tmp3:4 = 0; - simd_address_at(tmp3, Rd_VPR128.8H, 0, 2, 16); - * [register]:2 tmp3 = tmp2; - simd_address_at(tmp3, Rd_VPR128.8H, 1, 2, 16); - * [register]:2 tmp3 = tmp2; - simd_address_at(tmp3, Rd_VPR128.8H, 2, 2, 16); - * [register]:2 tmp3 = tmp2; - simd_address_at(tmp3, Rd_VPR128.8H, 3, 2, 16); - * [register]:2 tmp3 = tmp2; - simd_address_at(tmp3, Rd_VPR128.8H, 4, 2, 16); - * [register]:2 tmp3 = tmp2; - simd_address_at(tmp3, Rd_VPR128.8H, 5, 2, 16); - * [register]:2 tmp3 = tmp2; - simd_address_at(tmp3, Rd_VPR128.8H, 6, 2, 16); - * [register]:2 tmp3 = tmp2; - simd_address_at(tmp3, Rd_VPR128.8H, 7, 2, 16); - * [register]:2 tmp3 = tmp2; + local tmp1:2 = Rn_GPR32[0,16]; + # simd duplicate Rd_VPR128.8H = all elements tmp1 (lane size 2) + Rd_VPR128.8H[0,16] = tmp1; + Rd_VPR128.8H[16,16] = tmp1; + Rd_VPR128.8H[32,16] = tmp1; + Rd_VPR128.8H[48,16] = tmp1; + Rd_VPR128.8H[64,16] = tmp1; + Rd_VPR128.8H[80,16] = tmp1; + Rd_VPR128.8H[96,16] = tmp1; + Rd_VPR128.8H[112,16] = tmp1; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:2 = SIMD_PIECE(Rn_GPR32, 0:1); - local tmpd:16 = SIMD_COPY(Rd_VPR128.8H, tmp1); - Zd = zext(tmpd); # assigning to Rd_VPR128.8H -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.8H = NEON_dup(Rd_VPR128.8H, Rn_GPR32, 2:1); -@endif } # C7.2.41 EOR (vector) page C7-1475 line 81603 MATCH x2e201c00/mask=xbfe0fc00 @@ -4503,82 +2659,24 @@ is b_3131=0 & Q=1 & b_29=0 & b_2428=0xe & b_2123=0 & b_1617=0b10 & b_1515=0 & im :eor Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0x3 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { -@if defined(SEMANTIC_primitive) # simd infix Rd_VPR128.16B = Rn_VPR128.16B ^ Rm_VPR128.16B on lane size 1 - local tmp1:4 = 0; - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp1, Rn_VPR128.16B, 0, 1, 16); - simd_address_at(tmp2, Rm_VPR128.16B, 0, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 0, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) ^ (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR128.16B, 1, 1, 16); - simd_address_at(tmp2, Rm_VPR128.16B, 1, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 1, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) ^ (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR128.16B, 2, 1, 16); - simd_address_at(tmp2, Rm_VPR128.16B, 2, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 2, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) ^ (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR128.16B, 3, 1, 16); - simd_address_at(tmp2, Rm_VPR128.16B, 3, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 3, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) ^ (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR128.16B, 4, 1, 16); - simd_address_at(tmp2, Rm_VPR128.16B, 4, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 4, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) ^ (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR128.16B, 5, 1, 16); - simd_address_at(tmp2, Rm_VPR128.16B, 5, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 5, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) ^ (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR128.16B, 6, 1, 16); - simd_address_at(tmp2, Rm_VPR128.16B, 6, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 6, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) ^ (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR128.16B, 7, 1, 16); - simd_address_at(tmp2, Rm_VPR128.16B, 7, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 7, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) ^ (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR128.16B, 8, 1, 16); - simd_address_at(tmp2, Rm_VPR128.16B, 8, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 8, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) ^ (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR128.16B, 9, 1, 16); - simd_address_at(tmp2, Rm_VPR128.16B, 9, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 9, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) ^ (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR128.16B, 10, 1, 16); - simd_address_at(tmp2, Rm_VPR128.16B, 10, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 10, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) ^ (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR128.16B, 11, 1, 16); - simd_address_at(tmp2, Rm_VPR128.16B, 11, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 11, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) ^ (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR128.16B, 12, 1, 16); - simd_address_at(tmp2, Rm_VPR128.16B, 12, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 12, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) ^ (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR128.16B, 13, 1, 16); - simd_address_at(tmp2, Rm_VPR128.16B, 13, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 13, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) ^ (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR128.16B, 14, 1, 16); - simd_address_at(tmp2, Rm_VPR128.16B, 14, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 14, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) ^ (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR128.16B, 15, 1, 16); - simd_address_at(tmp2, Rm_VPR128.16B, 15, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 15, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) ^ (* [register]:1 tmp2); + Rd_VPR128.16B[0,8] = Rn_VPR128.16B[0,8] ^ Rm_VPR128.16B[0,8]; + Rd_VPR128.16B[8,8] = Rn_VPR128.16B[8,8] ^ Rm_VPR128.16B[8,8]; + Rd_VPR128.16B[16,8] = Rn_VPR128.16B[16,8] ^ Rm_VPR128.16B[16,8]; + Rd_VPR128.16B[24,8] = Rn_VPR128.16B[24,8] ^ Rm_VPR128.16B[24,8]; + Rd_VPR128.16B[32,8] = Rn_VPR128.16B[32,8] ^ Rm_VPR128.16B[32,8]; + Rd_VPR128.16B[40,8] = Rn_VPR128.16B[40,8] ^ Rm_VPR128.16B[40,8]; + Rd_VPR128.16B[48,8] = Rn_VPR128.16B[48,8] ^ Rm_VPR128.16B[48,8]; + Rd_VPR128.16B[56,8] = Rn_VPR128.16B[56,8] ^ Rm_VPR128.16B[56,8]; + Rd_VPR128.16B[64,8] = Rn_VPR128.16B[64,8] ^ Rm_VPR128.16B[64,8]; + Rd_VPR128.16B[72,8] = Rn_VPR128.16B[72,8] ^ Rm_VPR128.16B[72,8]; + Rd_VPR128.16B[80,8] = Rn_VPR128.16B[80,8] ^ Rm_VPR128.16B[80,8]; + Rd_VPR128.16B[88,8] = Rn_VPR128.16B[88,8] ^ Rm_VPR128.16B[88,8]; + Rd_VPR128.16B[96,8] = Rn_VPR128.16B[96,8] ^ Rm_VPR128.16B[96,8]; + Rd_VPR128.16B[104,8] = Rn_VPR128.16B[104,8] ^ Rm_VPR128.16B[104,8]; + Rd_VPR128.16B[112,8] = Rn_VPR128.16B[112,8] ^ Rm_VPR128.16B[112,8]; + Rd_VPR128.16B[120,8] = Rn_VPR128.16B[120,8] ^ Rm_VPR128.16B[120,8]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:16 = SIMD_INT_XOR(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.16B -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.16B = NEON_eor(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); -@endif } # C7.2.41 EOR (vector) page C7-1475 line 81603 MATCH x2e201c00/mask=xbfe0fc00 @@ -4590,50 +2688,16 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16 :eor Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0x3 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { -@if defined(SEMANTIC_primitive) # simd infix Rd_VPR64.8B = Rn_VPR64.8B ^ Rm_VPR64.8B on lane size 1 - local tmp1:4 = 0; - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp1, Rn_VPR64.8B, 0, 1, 8); - simd_address_at(tmp2, Rm_VPR64.8B, 0, 1, 8); - simd_address_at(tmp3, Rd_VPR64.8B, 0, 1, 8); - * [register]:1 tmp3 = (* [register]:1 tmp1) ^ (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR64.8B, 1, 1, 8); - simd_address_at(tmp2, Rm_VPR64.8B, 1, 1, 8); - simd_address_at(tmp3, Rd_VPR64.8B, 1, 1, 8); - * [register]:1 tmp3 = (* [register]:1 tmp1) ^ (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR64.8B, 2, 1, 8); - simd_address_at(tmp2, Rm_VPR64.8B, 2, 1, 8); - simd_address_at(tmp3, Rd_VPR64.8B, 2, 1, 8); - * [register]:1 tmp3 = (* [register]:1 tmp1) ^ (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR64.8B, 3, 1, 8); - simd_address_at(tmp2, Rm_VPR64.8B, 3, 1, 8); - simd_address_at(tmp3, Rd_VPR64.8B, 3, 1, 8); - * [register]:1 tmp3 = (* [register]:1 tmp1) ^ (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR64.8B, 4, 1, 8); - simd_address_at(tmp2, Rm_VPR64.8B, 4, 1, 8); - simd_address_at(tmp3, Rd_VPR64.8B, 4, 1, 8); - * [register]:1 tmp3 = (* [register]:1 tmp1) ^ (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR64.8B, 5, 1, 8); - simd_address_at(tmp2, Rm_VPR64.8B, 5, 1, 8); - simd_address_at(tmp3, Rd_VPR64.8B, 5, 1, 8); - * [register]:1 tmp3 = (* [register]:1 tmp1) ^ (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR64.8B, 6, 1, 8); - simd_address_at(tmp2, Rm_VPR64.8B, 6, 1, 8); - simd_address_at(tmp3, Rd_VPR64.8B, 6, 1, 8); - * [register]:1 tmp3 = (* [register]:1 tmp1) ^ (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR64.8B, 7, 1, 8); - simd_address_at(tmp2, Rm_VPR64.8B, 7, 1, 8); - simd_address_at(tmp3, Rd_VPR64.8B, 7, 1, 8); - * [register]:1 tmp3 = (* [register]:1 tmp1) ^ (* [register]:1 tmp2); + Rd_VPR64.8B[0,8] = Rn_VPR64.8B[0,8] ^ Rm_VPR64.8B[0,8]; + Rd_VPR64.8B[8,8] = Rn_VPR64.8B[8,8] ^ Rm_VPR64.8B[8,8]; + Rd_VPR64.8B[16,8] = Rn_VPR64.8B[16,8] ^ Rm_VPR64.8B[16,8]; + Rd_VPR64.8B[24,8] = Rn_VPR64.8B[24,8] ^ Rm_VPR64.8B[24,8]; + Rd_VPR64.8B[32,8] = Rn_VPR64.8B[32,8] ^ Rm_VPR64.8B[32,8]; + Rd_VPR64.8B[40,8] = Rn_VPR64.8B[40,8] ^ Rm_VPR64.8B[40,8]; + Rd_VPR64.8B[48,8] = Rn_VPR64.8B[48,8] ^ Rm_VPR64.8B[48,8]; + Rd_VPR64.8B[56,8] = Rn_VPR64.8B[56,8] ^ Rm_VPR64.8B[56,8]; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:8 = SIMD_INT_XOR(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.8B -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.8B = NEON_eor(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); -@endif } # C7.2.42 EOR3 page C7-1477 line 81685 MATCH xce000000/mask=xffe08000 @@ -4645,151 +2709,41 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B :eor3 Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B, Ra_VPR128.16B is b_2131=0b11001110000 & b_15=0 & Rd_VPR128.16B & Rn_VPR128.16B & Rm_VPR128.16B & Ra_VPR128.16B & Zd { -@if defined(SEMANTIC_primitive) # simd infix TMPQ1 = Rm_VPR128.16B | Ra_VPR128.16B on lane size 1 - local tmp2:4 = 0; - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp2, Rm_VPR128.16B, 0, 1, 16); - simd_address_at(tmp3, Ra_VPR128.16B, 0, 1, 16); - simd_address_at(tmp4, TMPQ1, 0, 1, 16); - * [register]:1 tmp4 = (* [register]:1 tmp2) | (* [register]:1 tmp3); - simd_address_at(tmp2, Rm_VPR128.16B, 1, 1, 16); - simd_address_at(tmp3, Ra_VPR128.16B, 1, 1, 16); - simd_address_at(tmp4, TMPQ1, 1, 1, 16); - * [register]:1 tmp4 = (* [register]:1 tmp2) | (* [register]:1 tmp3); - simd_address_at(tmp2, Rm_VPR128.16B, 2, 1, 16); - simd_address_at(tmp3, Ra_VPR128.16B, 2, 1, 16); - simd_address_at(tmp4, TMPQ1, 2, 1, 16); - * [register]:1 tmp4 = (* [register]:1 tmp2) | (* [register]:1 tmp3); - simd_address_at(tmp2, Rm_VPR128.16B, 3, 1, 16); - simd_address_at(tmp3, Ra_VPR128.16B, 3, 1, 16); - simd_address_at(tmp4, TMPQ1, 3, 1, 16); - * [register]:1 tmp4 = (* [register]:1 tmp2) | (* [register]:1 tmp3); - simd_address_at(tmp2, Rm_VPR128.16B, 4, 1, 16); - simd_address_at(tmp3, Ra_VPR128.16B, 4, 1, 16); - simd_address_at(tmp4, TMPQ1, 4, 1, 16); - * [register]:1 tmp4 = (* [register]:1 tmp2) | (* [register]:1 tmp3); - simd_address_at(tmp2, Rm_VPR128.16B, 5, 1, 16); - simd_address_at(tmp3, Ra_VPR128.16B, 5, 1, 16); - simd_address_at(tmp4, TMPQ1, 5, 1, 16); - * [register]:1 tmp4 = (* [register]:1 tmp2) | (* [register]:1 tmp3); - simd_address_at(tmp2, Rm_VPR128.16B, 6, 1, 16); - simd_address_at(tmp3, Ra_VPR128.16B, 6, 1, 16); - simd_address_at(tmp4, TMPQ1, 6, 1, 16); - * [register]:1 tmp4 = (* [register]:1 tmp2) | (* [register]:1 tmp3); - simd_address_at(tmp2, Rm_VPR128.16B, 7, 1, 16); - simd_address_at(tmp3, Ra_VPR128.16B, 7, 1, 16); - simd_address_at(tmp4, TMPQ1, 7, 1, 16); - * [register]:1 tmp4 = (* [register]:1 tmp2) | (* [register]:1 tmp3); - simd_address_at(tmp2, Rm_VPR128.16B, 8, 1, 16); - simd_address_at(tmp3, Ra_VPR128.16B, 8, 1, 16); - simd_address_at(tmp4, TMPQ1, 8, 1, 16); - * [register]:1 tmp4 = (* [register]:1 tmp2) | (* [register]:1 tmp3); - simd_address_at(tmp2, Rm_VPR128.16B, 9, 1, 16); - simd_address_at(tmp3, Ra_VPR128.16B, 9, 1, 16); - simd_address_at(tmp4, TMPQ1, 9, 1, 16); - * [register]:1 tmp4 = (* [register]:1 tmp2) | (* [register]:1 tmp3); - simd_address_at(tmp2, Rm_VPR128.16B, 10, 1, 16); - simd_address_at(tmp3, Ra_VPR128.16B, 10, 1, 16); - simd_address_at(tmp4, TMPQ1, 10, 1, 16); - * [register]:1 tmp4 = (* [register]:1 tmp2) | (* [register]:1 tmp3); - simd_address_at(tmp2, Rm_VPR128.16B, 11, 1, 16); - simd_address_at(tmp3, Ra_VPR128.16B, 11, 1, 16); - simd_address_at(tmp4, TMPQ1, 11, 1, 16); - * [register]:1 tmp4 = (* [register]:1 tmp2) | (* [register]:1 tmp3); - simd_address_at(tmp2, Rm_VPR128.16B, 12, 1, 16); - simd_address_at(tmp3, Ra_VPR128.16B, 12, 1, 16); - simd_address_at(tmp4, TMPQ1, 12, 1, 16); - * [register]:1 tmp4 = (* [register]:1 tmp2) | (* [register]:1 tmp3); - simd_address_at(tmp2, Rm_VPR128.16B, 13, 1, 16); - simd_address_at(tmp3, Ra_VPR128.16B, 13, 1, 16); - simd_address_at(tmp4, TMPQ1, 13, 1, 16); - * [register]:1 tmp4 = (* [register]:1 tmp2) | (* [register]:1 tmp3); - simd_address_at(tmp2, Rm_VPR128.16B, 14, 1, 16); - simd_address_at(tmp3, Ra_VPR128.16B, 14, 1, 16); - simd_address_at(tmp4, TMPQ1, 14, 1, 16); - * [register]:1 tmp4 = (* [register]:1 tmp2) | (* [register]:1 tmp3); - simd_address_at(tmp2, Rm_VPR128.16B, 15, 1, 16); - simd_address_at(tmp3, Ra_VPR128.16B, 15, 1, 16); - simd_address_at(tmp4, TMPQ1, 15, 1, 16); - * [register]:1 tmp4 = (* [register]:1 tmp2) | (* [register]:1 tmp3); + TMPQ1[0,8] = Rm_VPR128.16B[0,8] | Ra_VPR128.16B[0,8]; + TMPQ1[8,8] = Rm_VPR128.16B[8,8] | Ra_VPR128.16B[8,8]; + TMPQ1[16,8] = Rm_VPR128.16B[16,8] | Ra_VPR128.16B[16,8]; + TMPQ1[24,8] = Rm_VPR128.16B[24,8] | Ra_VPR128.16B[24,8]; + TMPQ1[32,8] = Rm_VPR128.16B[32,8] | Ra_VPR128.16B[32,8]; + TMPQ1[40,8] = Rm_VPR128.16B[40,8] | Ra_VPR128.16B[40,8]; + TMPQ1[48,8] = Rm_VPR128.16B[48,8] | Ra_VPR128.16B[48,8]; + TMPQ1[56,8] = Rm_VPR128.16B[56,8] | Ra_VPR128.16B[56,8]; + TMPQ1[64,8] = Rm_VPR128.16B[64,8] | Ra_VPR128.16B[64,8]; + TMPQ1[72,8] = Rm_VPR128.16B[72,8] | Ra_VPR128.16B[72,8]; + TMPQ1[80,8] = Rm_VPR128.16B[80,8] | Ra_VPR128.16B[80,8]; + TMPQ1[88,8] = Rm_VPR128.16B[88,8] | Ra_VPR128.16B[88,8]; + TMPQ1[96,8] = Rm_VPR128.16B[96,8] | Ra_VPR128.16B[96,8]; + TMPQ1[104,8] = Rm_VPR128.16B[104,8] | Ra_VPR128.16B[104,8]; + TMPQ1[112,8] = Rm_VPR128.16B[112,8] | Ra_VPR128.16B[112,8]; + TMPQ1[120,8] = Rm_VPR128.16B[120,8] | Ra_VPR128.16B[120,8]; # simd infix Rd_VPR128.16B = Rn_VPR128.16B | TMPQ1 on lane size 1 - local tmp5:4 = 0; - local tmp6:4 = 0; - local tmp7:4 = 0; - simd_address_at(tmp5, Rn_VPR128.16B, 0, 1, 16); - simd_address_at(tmp6, TMPQ1, 0, 1, 16); - simd_address_at(tmp7, Rd_VPR128.16B, 0, 1, 16); - * [register]:1 tmp7 = (* [register]:1 tmp5) | (* [register]:1 tmp6); - simd_address_at(tmp5, Rn_VPR128.16B, 1, 1, 16); - simd_address_at(tmp6, TMPQ1, 1, 1, 16); - simd_address_at(tmp7, Rd_VPR128.16B, 1, 1, 16); - * [register]:1 tmp7 = (* [register]:1 tmp5) | (* [register]:1 tmp6); - simd_address_at(tmp5, Rn_VPR128.16B, 2, 1, 16); - simd_address_at(tmp6, TMPQ1, 2, 1, 16); - simd_address_at(tmp7, Rd_VPR128.16B, 2, 1, 16); - * [register]:1 tmp7 = (* [register]:1 tmp5) | (* [register]:1 tmp6); - simd_address_at(tmp5, Rn_VPR128.16B, 3, 1, 16); - simd_address_at(tmp6, TMPQ1, 3, 1, 16); - simd_address_at(tmp7, Rd_VPR128.16B, 3, 1, 16); - * [register]:1 tmp7 = (* [register]:1 tmp5) | (* [register]:1 tmp6); - simd_address_at(tmp5, Rn_VPR128.16B, 4, 1, 16); - simd_address_at(tmp6, TMPQ1, 4, 1, 16); - simd_address_at(tmp7, Rd_VPR128.16B, 4, 1, 16); - * [register]:1 tmp7 = (* [register]:1 tmp5) | (* [register]:1 tmp6); - simd_address_at(tmp5, Rn_VPR128.16B, 5, 1, 16); - simd_address_at(tmp6, TMPQ1, 5, 1, 16); - simd_address_at(tmp7, Rd_VPR128.16B, 5, 1, 16); - * [register]:1 tmp7 = (* [register]:1 tmp5) | (* [register]:1 tmp6); - simd_address_at(tmp5, Rn_VPR128.16B, 6, 1, 16); - simd_address_at(tmp6, TMPQ1, 6, 1, 16); - simd_address_at(tmp7, Rd_VPR128.16B, 6, 1, 16); - * [register]:1 tmp7 = (* [register]:1 tmp5) | (* [register]:1 tmp6); - simd_address_at(tmp5, Rn_VPR128.16B, 7, 1, 16); - simd_address_at(tmp6, TMPQ1, 7, 1, 16); - simd_address_at(tmp7, Rd_VPR128.16B, 7, 1, 16); - * [register]:1 tmp7 = (* [register]:1 tmp5) | (* [register]:1 tmp6); - simd_address_at(tmp5, Rn_VPR128.16B, 8, 1, 16); - simd_address_at(tmp6, TMPQ1, 8, 1, 16); - simd_address_at(tmp7, Rd_VPR128.16B, 8, 1, 16); - * [register]:1 tmp7 = (* [register]:1 tmp5) | (* [register]:1 tmp6); - simd_address_at(tmp5, Rn_VPR128.16B, 9, 1, 16); - simd_address_at(tmp6, TMPQ1, 9, 1, 16); - simd_address_at(tmp7, Rd_VPR128.16B, 9, 1, 16); - * [register]:1 tmp7 = (* [register]:1 tmp5) | (* [register]:1 tmp6); - simd_address_at(tmp5, Rn_VPR128.16B, 10, 1, 16); - simd_address_at(tmp6, TMPQ1, 10, 1, 16); - simd_address_at(tmp7, Rd_VPR128.16B, 10, 1, 16); - * [register]:1 tmp7 = (* [register]:1 tmp5) | (* [register]:1 tmp6); - simd_address_at(tmp5, Rn_VPR128.16B, 11, 1, 16); - simd_address_at(tmp6, TMPQ1, 11, 1, 16); - simd_address_at(tmp7, Rd_VPR128.16B, 11, 1, 16); - * [register]:1 tmp7 = (* [register]:1 tmp5) | (* [register]:1 tmp6); - simd_address_at(tmp5, Rn_VPR128.16B, 12, 1, 16); - simd_address_at(tmp6, TMPQ1, 12, 1, 16); - simd_address_at(tmp7, Rd_VPR128.16B, 12, 1, 16); - * [register]:1 tmp7 = (* [register]:1 tmp5) | (* [register]:1 tmp6); - simd_address_at(tmp5, Rn_VPR128.16B, 13, 1, 16); - simd_address_at(tmp6, TMPQ1, 13, 1, 16); - simd_address_at(tmp7, Rd_VPR128.16B, 13, 1, 16); - * [register]:1 tmp7 = (* [register]:1 tmp5) | (* [register]:1 tmp6); - simd_address_at(tmp5, Rn_VPR128.16B, 14, 1, 16); - simd_address_at(tmp6, TMPQ1, 14, 1, 16); - simd_address_at(tmp7, Rd_VPR128.16B, 14, 1, 16); - * [register]:1 tmp7 = (* [register]:1 tmp5) | (* [register]:1 tmp6); - simd_address_at(tmp5, Rn_VPR128.16B, 15, 1, 16); - simd_address_at(tmp6, TMPQ1, 15, 1, 16); - simd_address_at(tmp7, Rd_VPR128.16B, 15, 1, 16); - * [register]:1 tmp7 = (* [register]:1 tmp5) | (* [register]:1 tmp6); + Rd_VPR128.16B[0,8] = Rn_VPR128.16B[0,8] | TMPQ1[0,8]; + Rd_VPR128.16B[8,8] = Rn_VPR128.16B[8,8] | TMPQ1[8,8]; + Rd_VPR128.16B[16,8] = Rn_VPR128.16B[16,8] | TMPQ1[16,8]; + Rd_VPR128.16B[24,8] = Rn_VPR128.16B[24,8] | TMPQ1[24,8]; + Rd_VPR128.16B[32,8] = Rn_VPR128.16B[32,8] | TMPQ1[32,8]; + Rd_VPR128.16B[40,8] = Rn_VPR128.16B[40,8] | TMPQ1[40,8]; + Rd_VPR128.16B[48,8] = Rn_VPR128.16B[48,8] | TMPQ1[48,8]; + Rd_VPR128.16B[56,8] = Rn_VPR128.16B[56,8] | TMPQ1[56,8]; + Rd_VPR128.16B[64,8] = Rn_VPR128.16B[64,8] | TMPQ1[64,8]; + Rd_VPR128.16B[72,8] = Rn_VPR128.16B[72,8] | TMPQ1[72,8]; + Rd_VPR128.16B[80,8] = Rn_VPR128.16B[80,8] | TMPQ1[80,8]; + Rd_VPR128.16B[88,8] = Rn_VPR128.16B[88,8] | TMPQ1[88,8]; + Rd_VPR128.16B[96,8] = Rn_VPR128.16B[96,8] | TMPQ1[96,8]; + Rd_VPR128.16B[104,8] = Rn_VPR128.16B[104,8] | TMPQ1[104,8]; + Rd_VPR128.16B[112,8] = Rn_VPR128.16B[112,8] | TMPQ1[112,8]; + Rd_VPR128.16B[120,8] = Rn_VPR128.16B[120,8] | TMPQ1[120,8]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_OR(Rm_VPR128.16B, Ra_VPR128.16B, 1:1); - local tmpd:16 = SIMD_INT_OR(Rn_VPR128.16B, tmp1, 1:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.16B -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.16B = NEON_eor3(Rn_VPR128.16B, Rm_VPR128.16B, Ra_VPR128.16B, 1:1); -@endif } # C7.2.43 EXT page C7-1478 line 81756 MATCH x2e000000/mask=xbfe08400 @@ -4800,9 +2754,7 @@ is b_2131=0b11001110000 & b_15=0 & Rd_VPR128.16B & Rn_VPR128.16B & Rm_VPR128.16B :ext Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B, imm4 is b_3131=0 & q=1 & b_2429=0x2e & b_2223=0b00 & b_2121=0 & Rm_VPR128.16B & b_1515=0 & imm4 & b_1010=0 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.16B = NEON_ext(Rn_VPR128.16B, Rm_VPR128.16B, imm4:1, 1:1); -@endif } # C7.2.43 EXT page C7-1478 line 81756 MATCH x2e000000/mask=xbfe08400 @@ -4813,9 +2765,7 @@ is b_3131=0 & q=1 & b_2429=0x2e & b_2223=0b00 & b_2121=0 & Rm_VPR128.16B & b_151 :ext Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B, imm4 is b_3131=0 & q=0 & b_2429=0x2e & b_2223=0b00 & b_2121=0 & Rm_VPR64.8B & b_1415=0 & imm4 & b_1010=0 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.8B = NEON_ext(Rn_VPR64.8B, Rm_VPR64.8B, imm4:1, 1:1); -@endif } # C7.2.44 FABD page C7-1480 line 81859 MATCH x2ec01400/mask=xbfe0fc00 @@ -4828,50 +2778,17 @@ is b_3131=0 & q=0 & b_2429=0x2e & b_2223=0b00 & b_2121=0 & Rm_VPR64.8B & b_1415= :fabd Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_31=0 & b_30=0 & b_2329=0b1011101 & b_22=1 & b_21=0 & b_1015=0b000101 & Rd_VPR64.4H & Rn_VPR64.4H & Rm_VPR64.4H & Zd { -@if defined(SEMANTIC_primitive) # simd infix TMPD1 = Rn_VPR64.4H f- Rm_VPR64.4H on lane size 2 - local tmp2:4 = 0; - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp2, Rn_VPR64.4H, 0, 2, 8); - simd_address_at(tmp3, Rm_VPR64.4H, 0, 2, 8); - simd_address_at(tmp4, TMPD1, 0, 2, 8); - * [register]:2 tmp4 = (* [register]:2 tmp2) f- (* [register]:2 tmp3); - simd_address_at(tmp2, Rn_VPR64.4H, 1, 2, 8); - simd_address_at(tmp3, Rm_VPR64.4H, 1, 2, 8); - simd_address_at(tmp4, TMPD1, 1, 2, 8); - * [register]:2 tmp4 = (* [register]:2 tmp2) f- (* [register]:2 tmp3); - simd_address_at(tmp2, Rn_VPR64.4H, 2, 2, 8); - simd_address_at(tmp3, Rm_VPR64.4H, 2, 2, 8); - simd_address_at(tmp4, TMPD1, 2, 2, 8); - * [register]:2 tmp4 = (* [register]:2 tmp2) f- (* [register]:2 tmp3); - simd_address_at(tmp2, Rn_VPR64.4H, 3, 2, 8); - simd_address_at(tmp3, Rm_VPR64.4H, 3, 2, 8); - simd_address_at(tmp4, TMPD1, 3, 2, 8); - * [register]:2 tmp4 = (* [register]:2 tmp2) f- (* [register]:2 tmp3); + TMPD1[0,16] = Rn_VPR64.4H[0,16] f- Rm_VPR64.4H[0,16]; + TMPD1[16,16] = Rn_VPR64.4H[16,16] f- Rm_VPR64.4H[16,16]; + TMPD1[32,16] = Rn_VPR64.4H[32,16] f- Rm_VPR64.4H[32,16]; + TMPD1[48,16] = Rn_VPR64.4H[48,16] f- Rm_VPR64.4H[48,16]; # simd unary Rd_VPR64.4H = abs(TMPD1) on lane size 2 - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, TMPD1, 0, 2, 8); - simd_address_at(tmp6, Rd_VPR64.4H, 0, 2, 8); - * [register]:2 tmp6 = abs(* [register]:2 tmp5); - simd_address_at(tmp5, TMPD1, 1, 2, 8); - simd_address_at(tmp6, Rd_VPR64.4H, 1, 2, 8); - * [register]:2 tmp6 = abs(* [register]:2 tmp5); - simd_address_at(tmp5, TMPD1, 2, 2, 8); - simd_address_at(tmp6, Rd_VPR64.4H, 2, 2, 8); - * [register]:2 tmp6 = abs(* [register]:2 tmp5); - simd_address_at(tmp5, TMPD1, 3, 2, 8); - simd_address_at(tmp6, Rd_VPR64.4H, 3, 2, 8); - * [register]:2 tmp6 = abs(* [register]:2 tmp5); + Rd_VPR64.4H[0,16] = abs(TMPD1[0,16]); + Rd_VPR64.4H[16,16] = abs(TMPD1[16,16]); + Rd_VPR64.4H[32,16] = abs(TMPD1[32,16]); + Rd_VPR64.4H[48,16] = abs(TMPD1[48,16]); zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_FLOAT_SUB(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); - local tmpd:8 = SIMD_FLOAT_ABS(tmp1, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.4H -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.4H = NEON_fabd(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); -@endif } # C7.2.44 FABD page C7-1480 line 81859 MATCH x2ec01400/mask=xbfe0fc00 @@ -4884,78 +2801,25 @@ is b_31=0 & b_30=0 & b_2329=0b1011101 & b_22=1 & b_21=0 & b_1015=0b000101 & Rd_V :fabd Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_31=0 & b_30=1 & b_2329=0b1011101 & b_22=1 & b_21=0 & b_1015=0b000101 & Rd_VPR128.8H & Rn_VPR128.8H & Rm_VPR128.8H & Zd { -@if defined(SEMANTIC_primitive) # simd infix TMPQ1 = Rn_VPR128.8H f- Rm_VPR128.8H on lane size 2 - local tmp2:4 = 0; - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp2, Rn_VPR128.8H, 0, 2, 16); - simd_address_at(tmp3, Rm_VPR128.8H, 0, 2, 16); - simd_address_at(tmp4, TMPQ1, 0, 2, 16); - * [register]:2 tmp4 = (* [register]:2 tmp2) f- (* [register]:2 tmp3); - simd_address_at(tmp2, Rn_VPR128.8H, 1, 2, 16); - simd_address_at(tmp3, Rm_VPR128.8H, 1, 2, 16); - simd_address_at(tmp4, TMPQ1, 1, 2, 16); - * [register]:2 tmp4 = (* [register]:2 tmp2) f- (* [register]:2 tmp3); - simd_address_at(tmp2, Rn_VPR128.8H, 2, 2, 16); - simd_address_at(tmp3, Rm_VPR128.8H, 2, 2, 16); - simd_address_at(tmp4, TMPQ1, 2, 2, 16); - * [register]:2 tmp4 = (* [register]:2 tmp2) f- (* [register]:2 tmp3); - simd_address_at(tmp2, Rn_VPR128.8H, 3, 2, 16); - simd_address_at(tmp3, Rm_VPR128.8H, 3, 2, 16); - simd_address_at(tmp4, TMPQ1, 3, 2, 16); - * [register]:2 tmp4 = (* [register]:2 tmp2) f- (* [register]:2 tmp3); - simd_address_at(tmp2, Rn_VPR128.8H, 4, 2, 16); - simd_address_at(tmp3, Rm_VPR128.8H, 4, 2, 16); - simd_address_at(tmp4, TMPQ1, 4, 2, 16); - * [register]:2 tmp4 = (* [register]:2 tmp2) f- (* [register]:2 tmp3); - simd_address_at(tmp2, Rn_VPR128.8H, 5, 2, 16); - simd_address_at(tmp3, Rm_VPR128.8H, 5, 2, 16); - simd_address_at(tmp4, TMPQ1, 5, 2, 16); - * [register]:2 tmp4 = (* [register]:2 tmp2) f- (* [register]:2 tmp3); - simd_address_at(tmp2, Rn_VPR128.8H, 6, 2, 16); - simd_address_at(tmp3, Rm_VPR128.8H, 6, 2, 16); - simd_address_at(tmp4, TMPQ1, 6, 2, 16); - * [register]:2 tmp4 = (* [register]:2 tmp2) f- (* [register]:2 tmp3); - simd_address_at(tmp2, Rn_VPR128.8H, 7, 2, 16); - simd_address_at(tmp3, Rm_VPR128.8H, 7, 2, 16); - simd_address_at(tmp4, TMPQ1, 7, 2, 16); - * [register]:2 tmp4 = (* [register]:2 tmp2) f- (* [register]:2 tmp3); + TMPQ1[0,16] = Rn_VPR128.8H[0,16] f- Rm_VPR128.8H[0,16]; + TMPQ1[16,16] = Rn_VPR128.8H[16,16] f- Rm_VPR128.8H[16,16]; + TMPQ1[32,16] = Rn_VPR128.8H[32,16] f- Rm_VPR128.8H[32,16]; + TMPQ1[48,16] = Rn_VPR128.8H[48,16] f- Rm_VPR128.8H[48,16]; + TMPQ1[64,16] = Rn_VPR128.8H[64,16] f- Rm_VPR128.8H[64,16]; + TMPQ1[80,16] = Rn_VPR128.8H[80,16] f- Rm_VPR128.8H[80,16]; + TMPQ1[96,16] = Rn_VPR128.8H[96,16] f- Rm_VPR128.8H[96,16]; + TMPQ1[112,16] = Rn_VPR128.8H[112,16] f- Rm_VPR128.8H[112,16]; # simd unary Rd_VPR128.8H = abs(TMPQ1) on lane size 2 - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, TMPQ1, 0, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 0, 2, 16); - * [register]:2 tmp6 = abs(* [register]:2 tmp5); - simd_address_at(tmp5, TMPQ1, 1, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 1, 2, 16); - * [register]:2 tmp6 = abs(* [register]:2 tmp5); - simd_address_at(tmp5, TMPQ1, 2, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 2, 2, 16); - * [register]:2 tmp6 = abs(* [register]:2 tmp5); - simd_address_at(tmp5, TMPQ1, 3, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 3, 2, 16); - * [register]:2 tmp6 = abs(* [register]:2 tmp5); - simd_address_at(tmp5, TMPQ1, 4, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 4, 2, 16); - * [register]:2 tmp6 = abs(* [register]:2 tmp5); - simd_address_at(tmp5, TMPQ1, 5, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 5, 2, 16); - * [register]:2 tmp6 = abs(* [register]:2 tmp5); - simd_address_at(tmp5, TMPQ1, 6, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 6, 2, 16); - * [register]:2 tmp6 = abs(* [register]:2 tmp5); - simd_address_at(tmp5, TMPQ1, 7, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 7, 2, 16); - * [register]:2 tmp6 = abs(* [register]:2 tmp5); + Rd_VPR128.8H[0,16] = abs(TMPQ1[0,16]); + Rd_VPR128.8H[16,16] = abs(TMPQ1[16,16]); + Rd_VPR128.8H[32,16] = abs(TMPQ1[32,16]); + Rd_VPR128.8H[48,16] = abs(TMPQ1[48,16]); + Rd_VPR128.8H[64,16] = abs(TMPQ1[64,16]); + Rd_VPR128.8H[80,16] = abs(TMPQ1[80,16]); + Rd_VPR128.8H[96,16] = abs(TMPQ1[96,16]); + Rd_VPR128.8H[112,16] = abs(TMPQ1[112,16]); zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_FLOAT_SUB(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); - local tmpd:16 = SIMD_FLOAT_ABS(tmp1, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.8H -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.8H = NEON_fabd(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); -@endif } # C7.2.44 FABD page C7-1480 line 81859 MATCH x2ea0d400/mask=xbfa0fc00 @@ -4968,36 +2832,13 @@ is b_31=0 & b_30=1 & b_2329=0b1011101 & b_22=1 & b_21=0 & b_1015=0b000101 & Rd_V :fabd Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_31=0 & b_30=0 & b_2329=0b1011101 & b_22=0 & b_21=1 & b_1015=0b110101 & Rd_VPR64.2S & Rn_VPR64.2S & Rm_VPR64.2S & Zd { -@if defined(SEMANTIC_primitive) # simd infix TMPD1 = Rn_VPR64.2S f- Rm_VPR64.2S on lane size 4 - local tmp2:4 = 0; - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp2, Rn_VPR64.2S, 0, 4, 8); - simd_address_at(tmp3, Rm_VPR64.2S, 0, 4, 8); - simd_address_at(tmp4, TMPD1, 0, 4, 8); - * [register]:4 tmp4 = (* [register]:4 tmp2) f- (* [register]:4 tmp3); - simd_address_at(tmp2, Rn_VPR64.2S, 1, 4, 8); - simd_address_at(tmp3, Rm_VPR64.2S, 1, 4, 8); - simd_address_at(tmp4, TMPD1, 1, 4, 8); - * [register]:4 tmp4 = (* [register]:4 tmp2) f- (* [register]:4 tmp3); + TMPD1[0,32] = Rn_VPR64.2S[0,32] f- Rm_VPR64.2S[0,32]; + TMPD1[32,32] = Rn_VPR64.2S[32,32] f- Rm_VPR64.2S[32,32]; # simd unary Rd_VPR64.2S = abs(TMPD1) on lane size 4 - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, TMPD1, 0, 4, 8); - simd_address_at(tmp6, Rd_VPR64.2S, 0, 4, 8); - * [register]:4 tmp6 = abs(* [register]:4 tmp5); - simd_address_at(tmp5, TMPD1, 1, 4, 8); - simd_address_at(tmp6, Rd_VPR64.2S, 1, 4, 8); - * [register]:4 tmp6 = abs(* [register]:4 tmp5); + Rd_VPR64.2S[0,32] = abs(TMPD1[0,32]); + Rd_VPR64.2S[32,32] = abs(TMPD1[32,32]); zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_FLOAT_SUB(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); - local tmpd:8 = SIMD_FLOAT_ABS(tmp1, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.2S -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.2S = NEON_fabd(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); -@endif } # C7.2.44 FABD page C7-1480 line 81859 MATCH x2ea0d400/mask=xbfa0fc00 @@ -5010,50 +2851,17 @@ is b_31=0 & b_30=0 & b_2329=0b1011101 & b_22=0 & b_21=1 & b_1015=0b110101 & Rd_V :fabd Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_31=0 & b_30=1 & b_2329=0b1011101 & b_22=0 & b_21=1 & b_1015=0b110101 & Rd_VPR128.4S & Rn_VPR128.4S & Rm_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) # simd infix TMPQ1 = Rn_VPR128.4S f- Rm_VPR128.4S on lane size 4 - local tmp2:4 = 0; - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp2, Rn_VPR128.4S, 0, 4, 16); - simd_address_at(tmp3, Rm_VPR128.4S, 0, 4, 16); - simd_address_at(tmp4, TMPQ1, 0, 4, 16); - * [register]:4 tmp4 = (* [register]:4 tmp2) f- (* [register]:4 tmp3); - simd_address_at(tmp2, Rn_VPR128.4S, 1, 4, 16); - simd_address_at(tmp3, Rm_VPR128.4S, 1, 4, 16); - simd_address_at(tmp4, TMPQ1, 1, 4, 16); - * [register]:4 tmp4 = (* [register]:4 tmp2) f- (* [register]:4 tmp3); - simd_address_at(tmp2, Rn_VPR128.4S, 2, 4, 16); - simd_address_at(tmp3, Rm_VPR128.4S, 2, 4, 16); - simd_address_at(tmp4, TMPQ1, 2, 4, 16); - * [register]:4 tmp4 = (* [register]:4 tmp2) f- (* [register]:4 tmp3); - simd_address_at(tmp2, Rn_VPR128.4S, 3, 4, 16); - simd_address_at(tmp3, Rm_VPR128.4S, 3, 4, 16); - simd_address_at(tmp4, TMPQ1, 3, 4, 16); - * [register]:4 tmp4 = (* [register]:4 tmp2) f- (* [register]:4 tmp3); + TMPQ1[0,32] = Rn_VPR128.4S[0,32] f- Rm_VPR128.4S[0,32]; + TMPQ1[32,32] = Rn_VPR128.4S[32,32] f- Rm_VPR128.4S[32,32]; + TMPQ1[64,32] = Rn_VPR128.4S[64,32] f- Rm_VPR128.4S[64,32]; + TMPQ1[96,32] = Rn_VPR128.4S[96,32] f- Rm_VPR128.4S[96,32]; # simd unary Rd_VPR128.4S = abs(TMPQ1) on lane size 4 - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, TMPQ1, 0, 4, 16); - simd_address_at(tmp6, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp6 = abs(* [register]:4 tmp5); - simd_address_at(tmp5, TMPQ1, 1, 4, 16); - simd_address_at(tmp6, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp6 = abs(* [register]:4 tmp5); - simd_address_at(tmp5, TMPQ1, 2, 4, 16); - simd_address_at(tmp6, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp6 = abs(* [register]:4 tmp5); - simd_address_at(tmp5, TMPQ1, 3, 4, 16); - simd_address_at(tmp6, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp6 = abs(* [register]:4 tmp5); + Rd_VPR128.4S[0,32] = abs(TMPQ1[0,32]); + Rd_VPR128.4S[32,32] = abs(TMPQ1[32,32]); + Rd_VPR128.4S[64,32] = abs(TMPQ1[64,32]); + Rd_VPR128.4S[96,32] = abs(TMPQ1[96,32]); zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_FLOAT_SUB(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); - local tmpd:16 = SIMD_FLOAT_ABS(tmp1, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_fabd(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); -@endif } # C7.2.44 FABD page C7-1480 line 81859 MATCH x2ea0d400/mask=xbfa0fc00 @@ -5066,36 +2874,13 @@ is b_31=0 & b_30=1 & b_2329=0b1011101 & b_22=0 & b_21=1 & b_1015=0b110101 & Rd_V :fabd Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D is b_31=0 & b_30=1 & b_2329=0b1011101 & b_22=1 & b_21=1 & b_1015=0b110101 & Rd_VPR128.2D & Rn_VPR128.2D & Rm_VPR128.2D & Zd { -@if defined(SEMANTIC_primitive) # simd infix TMPQ1 = Rn_VPR128.2D f- Rm_VPR128.2D on lane size 8 - local tmp2:4 = 0; - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp2, Rn_VPR128.2D, 0, 8, 16); - simd_address_at(tmp3, Rm_VPR128.2D, 0, 8, 16); - simd_address_at(tmp4, TMPQ1, 0, 8, 16); - * [register]:8 tmp4 = (* [register]:8 tmp2) f- (* [register]:8 tmp3); - simd_address_at(tmp2, Rn_VPR128.2D, 1, 8, 16); - simd_address_at(tmp3, Rm_VPR128.2D, 1, 8, 16); - simd_address_at(tmp4, TMPQ1, 1, 8, 16); - * [register]:8 tmp4 = (* [register]:8 tmp2) f- (* [register]:8 tmp3); + TMPQ1[0,64] = Rn_VPR128.2D[0,64] f- Rm_VPR128.2D[0,64]; + TMPQ1[64,64] = Rn_VPR128.2D[64,64] f- Rm_VPR128.2D[64,64]; # simd unary Rd_VPR128.2D = abs(TMPQ1) on lane size 8 - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, TMPQ1, 0, 8, 16); - simd_address_at(tmp6, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp6 = abs(* [register]:8 tmp5); - simd_address_at(tmp5, TMPQ1, 1, 8, 16); - simd_address_at(tmp6, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp6 = abs(* [register]:8 tmp5); + Rd_VPR128.2D[0,64] = abs(TMPQ1[0,64]); + Rd_VPR128.2D[64,64] = abs(TMPQ1[64,64]); zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_FLOAT_SUB(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); - local tmpd:16 = SIMD_FLOAT_ABS(tmp1, 8:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.2D = NEON_fabd(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); -@endif } # C7.2.44 FABD page C7-1480 line 81859 MATCH x7ec01400/mask=xffe0fc00 @@ -5108,17 +2893,9 @@ is b_31=0 & b_30=1 & b_2329=0b1011101 & b_22=1 & b_21=1 & b_1015=0b110101 & Rd_V :fabd Rd_FPR16, Rn_FPR16, Rm_FPR16 is b_2131=0b01111110110 & b_1015=0b000101 & Rd_FPR16 & Rn_FPR16 & Rm_FPR16 & Zd { -@if defined(SEMANTIC_primitive) local tmp1:2 = Rn_FPR16 f- Rm_FPR16; Rd_FPR16 = abs(tmp1); zext_zh(Zd); # zero upper 30 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:2 = Rn_FPR16 f- Rm_FPR16; - local tmpd:2 = abs(tmp1); - Zd = zext(tmpd); # assigning to Rd_FPR16 -@elif defined(SEMANTIC_pseudo) - Rd_FPR16 = NEON_fabd(Rn_FPR16, Rm_FPR16); -@endif } # C7.2.44 FABD page C7-1480 line 81859 MATCH x7ea0d400/mask=xffa0fc00 @@ -5131,17 +2908,9 @@ is b_2131=0b01111110110 & b_1015=0b000101 & Rd_FPR16 & Rn_FPR16 & Rm_FPR16 & Zd :fabd Rd_FPR32, Rn_FPR32, Rm_FPR32 is b_2131=0b01111110101 & b_1015=0b110101 & Rd_FPR32 & Rn_FPR32 & Rm_FPR32 & Zd { -@if defined(SEMANTIC_primitive) local tmp1:4 = Rn_FPR32 f- Rm_FPR32; Rd_FPR32 = abs(tmp1); zext_zs(Zd); # zero upper 28 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:4 = Rn_FPR32 f- Rm_FPR32; - local tmpd:4 = abs(tmp1); - Zd = zext(tmpd); # assigning to Rd_FPR32 -@elif defined(SEMANTIC_pseudo) - Rd_FPR32 = NEON_fabd(Rn_FPR32, Rm_FPR32); -@endif } # C7.2.44 FABD page C7-1480 line 81859 MATCH x7ea0d400/mask=xffa0fc00 @@ -5154,17 +2923,9 @@ is b_2131=0b01111110101 & b_1015=0b110101 & Rd_FPR32 & Rn_FPR32 & Rm_FPR32 & Zd :fabd Rd_FPR64, Rn_FPR64, Rm_FPR64 is b_2131=0b01111110111 & b_1015=0b110101 & Rd_FPR64 & Rn_FPR64 & Rm_FPR64 & Zd { -@if defined(SEMANTIC_primitive) local tmp1:8 = Rn_FPR64 f- Rm_FPR64; Rd_FPR64 = abs(tmp1); zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = Rn_FPR64 f- Rm_FPR64; - local tmpd:8 = abs(tmp1); - Zd = zext(tmpd); # assigning to Rd_FPR64 -@elif defined(SEMANTIC_pseudo) - Rd_FPR64 = NEON_fabd(Rn_FPR64, Rm_FPR64); -@endif } # C7.2.45 FABS (vector) page C7-1483 line 82050 MATCH x0ea0f800/mask=xbfbffc00 @@ -5176,23 +2937,10 @@ is b_2131=0b01111110111 & b_1015=0b110101 & Rd_FPR64 & Rn_FPR64 & Rm_FPR64 & Zd :fabs Rd_VPR128.2D, Rn_VPR128.2D is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=3 & b_1721=0x10 & b_1216=0xf & b_1011=2 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_primitive) # simd unary Rd_VPR128.2D = abs(Rn_VPR128.2D) on lane size 8 - local tmp1:4 = 0; - local tmp2:4 = 0; - simd_address_at(tmp1, Rn_VPR128.2D, 0, 8, 16); - simd_address_at(tmp2, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp2 = abs(* [register]:8 tmp1); - simd_address_at(tmp1, Rn_VPR128.2D, 1, 8, 16); - simd_address_at(tmp2, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp2 = abs(* [register]:8 tmp1); + Rd_VPR128.2D[0,64] = abs(Rn_VPR128.2D[0,64]); + Rd_VPR128.2D[64,64] = abs(Rn_VPR128.2D[64,64]); zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:16 = SIMD_FLOAT_ABS(Rn_VPR128.2D, 8:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.2D = NEON_fabs(Rn_VPR128.2D, 8:1); -@endif } # C7.2.45 FABS (vector) page C7-1483 line 82050 MATCH x0ea0f800/mask=xbfbffc00 @@ -5204,23 +2952,10 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=3 & b_1721=0x10 & b_1216=0x :fabs Rd_VPR64.2S, Rn_VPR64.2S is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x10 & b_1216=0xf & b_1011=2 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_primitive) # simd unary Rd_VPR64.2S = abs(Rn_VPR64.2S) on lane size 4 - local tmp1:4 = 0; - local tmp2:4 = 0; - simd_address_at(tmp1, Rn_VPR64.2S, 0, 4, 8); - simd_address_at(tmp2, Rd_VPR64.2S, 0, 4, 8); - * [register]:4 tmp2 = abs(* [register]:4 tmp1); - simd_address_at(tmp1, Rn_VPR64.2S, 1, 4, 8); - simd_address_at(tmp2, Rd_VPR64.2S, 1, 4, 8); - * [register]:4 tmp2 = abs(* [register]:4 tmp1); + Rd_VPR64.2S[0,32] = abs(Rn_VPR64.2S[0,32]); + Rd_VPR64.2S[32,32] = abs(Rn_VPR64.2S[32,32]); zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:8 = SIMD_FLOAT_ABS(Rn_VPR64.2S, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.2S -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.2S = NEON_fabs(Rn_VPR64.2S, 4:1); -@endif } # C7.2.45 FABS (vector) page C7-1483 line 82050 MATCH x0ea0f800/mask=xbfbffc00 @@ -5232,29 +2967,12 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x10 & b_1216=0x :fabs Rd_VPR128.4S, Rn_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x10 & b_1216=0xf & b_1011=2 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) # simd unary Rd_VPR128.4S = abs(Rn_VPR128.4S) on lane size 4 - local tmp1:4 = 0; - local tmp2:4 = 0; - simd_address_at(tmp1, Rn_VPR128.4S, 0, 4, 16); - simd_address_at(tmp2, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp2 = abs(* [register]:4 tmp1); - simd_address_at(tmp1, Rn_VPR128.4S, 1, 4, 16); - simd_address_at(tmp2, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp2 = abs(* [register]:4 tmp1); - simd_address_at(tmp1, Rn_VPR128.4S, 2, 4, 16); - simd_address_at(tmp2, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp2 = abs(* [register]:4 tmp1); - simd_address_at(tmp1, Rn_VPR128.4S, 3, 4, 16); - simd_address_at(tmp2, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp2 = abs(* [register]:4 tmp1); + Rd_VPR128.4S[0,32] = abs(Rn_VPR128.4S[0,32]); + Rd_VPR128.4S[32,32] = abs(Rn_VPR128.4S[32,32]); + Rd_VPR128.4S[64,32] = abs(Rn_VPR128.4S[64,32]); + Rd_VPR128.4S[96,32] = abs(Rn_VPR128.4S[96,32]); zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:16 = SIMD_FLOAT_ABS(Rn_VPR128.4S, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_fabs(Rn_VPR128.4S, 4:1); -@endif } # C7.2.45 FABS (vector) page C7-1483 line 82050 MATCH x0ef8f800/mask=xbffffc00 @@ -5267,29 +2985,12 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x10 & b_1216=0x :fabs Rd_VPR64.4H, Rn_VPR64.4H is b_31=0 & b_30=0 & b_1029=0b00111011111000111110 & Rd_VPR64.4H & Rn_VPR64.4H & Zd { -@if defined(SEMANTIC_primitive) # simd unary Rd_VPR64.4H = abs(Rn_VPR64.4H) on lane size 2 - local tmp1:4 = 0; - local tmp2:4 = 0; - simd_address_at(tmp1, Rn_VPR64.4H, 0, 2, 8); - simd_address_at(tmp2, Rd_VPR64.4H, 0, 2, 8); - * [register]:2 tmp2 = abs(* [register]:2 tmp1); - simd_address_at(tmp1, Rn_VPR64.4H, 1, 2, 8); - simd_address_at(tmp2, Rd_VPR64.4H, 1, 2, 8); - * [register]:2 tmp2 = abs(* [register]:2 tmp1); - simd_address_at(tmp1, Rn_VPR64.4H, 2, 2, 8); - simd_address_at(tmp2, Rd_VPR64.4H, 2, 2, 8); - * [register]:2 tmp2 = abs(* [register]:2 tmp1); - simd_address_at(tmp1, Rn_VPR64.4H, 3, 2, 8); - simd_address_at(tmp2, Rd_VPR64.4H, 3, 2, 8); - * [register]:2 tmp2 = abs(* [register]:2 tmp1); + Rd_VPR64.4H[0,16] = abs(Rn_VPR64.4H[0,16]); + Rd_VPR64.4H[16,16] = abs(Rn_VPR64.4H[16,16]); + Rd_VPR64.4H[32,16] = abs(Rn_VPR64.4H[32,16]); + Rd_VPR64.4H[48,16] = abs(Rn_VPR64.4H[48,16]); zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:8 = SIMD_FLOAT_ABS(Rn_VPR64.4H, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.4H -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.4H = NEON_fabs(Rn_VPR64.4H, 2:1); -@endif } # C7.2.45 FABS (vector) page C7-1483 line 82050 MATCH x0ef8f800/mask=xbffffc00 @@ -5302,41 +3003,16 @@ is b_31=0 & b_30=0 & b_1029=0b00111011111000111110 & Rd_VPR64.4H & Rn_VPR64.4H & :fabs Rd_VPR128.8H, Rn_VPR128.8H is b_31=0 & b_30=1 & b_1029=0b00111011111000111110 & Rd_VPR128.8H & Rn_VPR128.8H & Zd { -@if defined(SEMANTIC_primitive) # simd unary Rd_VPR128.8H = abs(Rn_VPR128.8H) on lane size 2 - local tmp1:4 = 0; - local tmp2:4 = 0; - simd_address_at(tmp1, Rn_VPR128.8H, 0, 2, 16); - simd_address_at(tmp2, Rd_VPR128.8H, 0, 2, 16); - * [register]:2 tmp2 = abs(* [register]:2 tmp1); - simd_address_at(tmp1, Rn_VPR128.8H, 1, 2, 16); - simd_address_at(tmp2, Rd_VPR128.8H, 1, 2, 16); - * [register]:2 tmp2 = abs(* [register]:2 tmp1); - simd_address_at(tmp1, Rn_VPR128.8H, 2, 2, 16); - simd_address_at(tmp2, Rd_VPR128.8H, 2, 2, 16); - * [register]:2 tmp2 = abs(* [register]:2 tmp1); - simd_address_at(tmp1, Rn_VPR128.8H, 3, 2, 16); - simd_address_at(tmp2, Rd_VPR128.8H, 3, 2, 16); - * [register]:2 tmp2 = abs(* [register]:2 tmp1); - simd_address_at(tmp1, Rn_VPR128.8H, 4, 2, 16); - simd_address_at(tmp2, Rd_VPR128.8H, 4, 2, 16); - * [register]:2 tmp2 = abs(* [register]:2 tmp1); - simd_address_at(tmp1, Rn_VPR128.8H, 5, 2, 16); - simd_address_at(tmp2, Rd_VPR128.8H, 5, 2, 16); - * [register]:2 tmp2 = abs(* [register]:2 tmp1); - simd_address_at(tmp1, Rn_VPR128.8H, 6, 2, 16); - simd_address_at(tmp2, Rd_VPR128.8H, 6, 2, 16); - * [register]:2 tmp2 = abs(* [register]:2 tmp1); - simd_address_at(tmp1, Rn_VPR128.8H, 7, 2, 16); - simd_address_at(tmp2, Rd_VPR128.8H, 7, 2, 16); - * [register]:2 tmp2 = abs(* [register]:2 tmp1); + Rd_VPR128.8H[0,16] = abs(Rn_VPR128.8H[0,16]); + Rd_VPR128.8H[16,16] = abs(Rn_VPR128.8H[16,16]); + Rd_VPR128.8H[32,16] = abs(Rn_VPR128.8H[32,16]); + Rd_VPR128.8H[48,16] = abs(Rn_VPR128.8H[48,16]); + Rd_VPR128.8H[64,16] = abs(Rn_VPR128.8H[64,16]); + Rd_VPR128.8H[80,16] = abs(Rn_VPR128.8H[80,16]); + Rd_VPR128.8H[96,16] = abs(Rn_VPR128.8H[96,16]); + Rd_VPR128.8H[112,16] = abs(Rn_VPR128.8H[112,16]); zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:16 = SIMD_FLOAT_ABS(Rn_VPR128.8H, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.8H -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.8H = NEON_fabs(Rn_VPR128.8H, 2:1); -@endif } # C7.2.46 FABS (scalar) page C7-1485 line 82158 MATCH x1e20c000/mask=xff3ffc00 @@ -5348,15 +3024,8 @@ is b_31=0 & b_30=1 & b_1029=0b00111011111000111110 & Rd_VPR128.8H & Rn_VPR128.8H :fabs Rd_FPR16, Rn_FPR16 is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=1 & fpDpOpcode=0x1 & b_1014=0x10 & Rn_FPR16 & Rd_FPR16 & Zd { -@if defined(SEMANTIC_primitive) Rd_FPR16 = abs(Rn_FPR16); zext_zh(Zd); # zero upper 30 bytes of Zd -@elif defined(SEMANTIC_pcode) - Rd_FPR16 = abs(Rn_FPR16); - zext_zh(Zd); # zero upper 30 bytes of Zd -@elif defined(SEMANTIC_pseudo) - Rd_FPR16 = NEON_fabs(Rn_FPR16); -@endif } # C7.2.46 FABS (scalar) page C7-1485 line 82158 MATCH x1e20c000/mask=xff3ffc00 @@ -5368,15 +3037,8 @@ is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=1 & fpDpOpcode=0x1 & b_ :fabs Rd_FPR64, Rn_FPR64 is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=1 & fpDpOpcode=0x1 & b_1014=0x10 & Rn_FPR64 & Rd_FPR64 & Zd { -@if defined(SEMANTIC_primitive) Rd_FPR64 = abs(Rn_FPR64); zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - Rd_FPR64 = abs(Rn_FPR64); - zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pseudo) - Rd_FPR64 = NEON_fabs(Rn_FPR64); -@endif } # C7.2.46 FABS (scalar) page C7-1485 line 82158 MATCH x1e20c000/mask=xff3ffc00 @@ -5388,15 +3050,8 @@ is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=1 & fpDpOpcode=0x1 & b_ :fabs Rd_FPR32, Rn_FPR32 is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=1 & fpDpOpcode=0x1 & b_1014=0x10 & Rn_FPR32 & Rd_FPR32 & Zd { -@if defined(SEMANTIC_primitive) Rd_FPR32 = abs(Rn_FPR32); zext_zs(Zd); # zero upper 28 bytes of Zd -@elif defined(SEMANTIC_pcode) - Rd_FPR32 = abs(Rn_FPR32); - zext_zs(Zd); # zero upper 28 bytes of Zd -@elif defined(SEMANTIC_pseudo) - Rd_FPR32 = NEON_fabs(Rn_FPR32); -@endif } # C7.2.47 FACGE page C7-1487 line 82250 MATCH x2e20ec00/mask=xbfa0fc00 @@ -5407,9 +3062,7 @@ is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=1 & fpDpOpcode=0x1 & b_ :facge Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_21=1 & Rm_VPR128.2D & b_1115=0x1d & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.2D = NEON_facge(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); -@endif } # C7.2.47 FACGE page C7-1487 line 82250 MATCH x2e20ec00/mask=xbfa0fc00 @@ -5420,9 +3073,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_21=1 & Rm_VPR128.2D & :facge Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_21=1 & Rm_VPR64.2S & b_1115=0x1d & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.2S = NEON_facge(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); -@endif } # C7.2.47 FACGE page C7-1487 line 82250 MATCH x2e20ec00/mask=xbfa0fc00 @@ -5433,9 +3084,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_21=1 & Rm_VPR64.2S & :facge Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_21=1 & Rm_VPR128.4S & b_1115=0x1d & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_facge(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); -@endif } # C7.2.47 FACGE page C7-1487 line 82250 MATCH x7e402c00/mask=xffe0fc00 @@ -5447,9 +3096,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_21=1 & Rm_VPR128.4S & :facge Rd_FPR16, Rn_FPR16, Rm_FPR16 is b_2131=0b01111110010 & b_1015=0b001011 & Rd_FPR16 & Rn_FPR16 & Rm_FPR16 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR16 = NEON_facge(Rn_FPR16, Rm_FPR16); -@endif } # C7.2.47 FACGE page C7-1487 line 82250 MATCH x7e20ec00/mask=xffa0fc00 @@ -5461,9 +3108,7 @@ is b_2131=0b01111110010 & b_1015=0b001011 & Rd_FPR16 & Rn_FPR16 & Rm_FPR16 & Zd :facge Rd_FPR32, Rn_FPR32, Rm_FPR32 is b_2331=0b011111100 & b_22=0 & b_21=1 & b_1015=0b111011 & Rd_FPR32 & Rn_FPR32 & Rm_FPR32 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR32 = NEON_facge(Rn_FPR32, Rm_FPR32); -@endif } # C7.2.47 FACGE page C7-1487 line 82250 MATCH x7e20ec00/mask=xffa0fc00 @@ -5475,9 +3120,7 @@ is b_2331=0b011111100 & b_22=0 & b_21=1 & b_1015=0b111011 & Rd_FPR32 & Rn_FPR32 :facge Rd_FPR64, Rn_FPR64, Rm_FPR64 is b_2331=0b011111100 & b_22=1 & b_21=1 & b_1015=0b111011 & Rd_FPR64 & Rn_FPR64 & Rm_FPR64 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR64 = NEON_facge(Rn_FPR64, Rm_FPR64); -@endif } # C7.2.47 FACGE page C7-1487 line 82250 MATCH x2e402c00/mask=xbfe0fc00 @@ -5489,9 +3132,7 @@ is b_2331=0b011111100 & b_22=1 & b_21=1 & b_1015=0b111011 & Rd_FPR64 & Rn_FPR64 :facge Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_31=0 & b_30=0 & b_2129=0b101110010 & b_1015=0b001011 & Rd_VPR64.4H & Rn_VPR64.4H & Rm_VPR64.4H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.4H = NEON_facge(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); -@endif } # C7.2.47 FACGE page C7-1487 line 82250 MATCH x2e402c00/mask=xbfe0fc00 @@ -5503,9 +3144,7 @@ is b_31=0 & b_30=0 & b_2129=0b101110010 & b_1015=0b001011 & Rd_VPR64.4H & Rn_VPR :facge Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_31=0 & b_30=1 & b_2129=0b101110010 & b_1015=0b001011 & Rd_VPR128.8H & Rn_VPR128.8H & Rm_VPR128.8H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.8H = NEON_facge(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); -@endif } # C7.2.48 FACGT page C7-1491 line 82494 MATCH x2ea0ec00/mask=xbfa0fc00 @@ -5516,9 +3155,7 @@ is b_31=0 & b_30=1 & b_2129=0b101110010 & b_1015=0b001011 & Rd_VPR128.8H & Rn_VP :facgt Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=3 & b_21=1 & Rm_VPR128.2D & b_1115=0x1d & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.2D = NEON_facgt(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); -@endif } # C7.2.48 FACGT page C7-1491 line 82494 MATCH x2ea0ec00/mask=xbfa0fc00 @@ -5529,9 +3166,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=3 & b_21=1 & Rm_VPR128.2D & :facgt Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_21=1 & Rm_VPR64.2S & b_1115=0x1d & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.2S = NEON_facgt(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); -@endif } # C7.2.48 FACGT page C7-1491 line 82494 MATCH x2ea0ec00/mask=xbfa0fc00 @@ -5542,9 +3177,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_21=1 & Rm_VPR64.2S & :facgt Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_21=1 & Rm_VPR128.4S & b_1115=0x1d & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_facgt(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); -@endif } # C7.2.48 FACGT page C7-1491 line 82494 MATCH x7ec02c00/mask=xffe0fc00 @@ -5556,9 +3189,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_21=1 & Rm_VPR128.4S & :facgt Rd_FPR16, Rn_FPR16, Rm_FPR16 is b_2131=0b01111110110 & b_1015=0b001011 & Rd_FPR16 & Rn_FPR16 & Rm_FPR16 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR16 = NEON_facgt(Rn_FPR16, Rm_FPR16); -@endif } # C7.2.48 FACGT page C7-1491 line 82494 MATCH x7ea0ec00/mask=xffa0fc00 @@ -5570,9 +3201,7 @@ is b_2131=0b01111110110 & b_1015=0b001011 & Rd_FPR16 & Rn_FPR16 & Rm_FPR16 & Zd :facgt Rd_FPR32, Rn_FPR32, Rm_FPR32 is b_2331=0b011111101 & b_22=0 & b_21=1 & b_1015=0b111011 & Rd_FPR32 & Rn_FPR32 & Rm_FPR32 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR32 = NEON_facgt(Rn_FPR32, Rm_FPR32); -@endif } # C7.2.48 FACGT page C7-1491 line 82494 MATCH x7ea0ec00/mask=xffa0fc00 @@ -5584,9 +3213,7 @@ is b_2331=0b011111101 & b_22=0 & b_21=1 & b_1015=0b111011 & Rd_FPR32 & Rn_FPR32 :facgt Rd_FPR64, Rn_FPR64, Rm_FPR64 is b_2331=0b011111101 & b_22=1 & b_21=1 & b_1015=0b111011 & Rd_FPR64 & Rn_FPR64 & Rm_FPR64 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR64 = NEON_facgt(Rn_FPR64, Rm_FPR64); -@endif } # C7.2.48 FACGT page C7-1491 line 82494 MATCH x2ec02c00/mask=xbfe0fc00 @@ -5598,9 +3225,7 @@ is b_2331=0b011111101 & b_22=1 & b_21=1 & b_1015=0b111011 & Rd_FPR64 & Rn_FPR64 :facgt Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_31=0 & b_30=0 & b_2129=0b101110110 & b_1015=0b001011 & Rd_VPR64.4H & Rn_VPR64.4H & Rm_VPR64.4H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.4H = NEON_facgt(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); -@endif } # C7.2.48 FACGT page C7-1491 line 82494 MATCH x2ec02c00/mask=xbfe0fc00 @@ -5612,9 +3237,7 @@ is b_31=0 & b_30=0 & b_2129=0b101110110 & b_1015=0b001011 & Rd_VPR64.4H & Rn_VPR :facgt Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_31=0 & b_30=1 & b_2129=0b101110110 & b_1015=0b001011 & Rd_VPR128.8H & Rn_VPR128.8H & Rm_VPR128.8H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.8H = NEON_facgt(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); -@endif } # C7.2.49 FADD (vector) page C7-1495 line 82738 MATCH x0e20d400/mask=xbfa0fc00 @@ -5626,26 +3249,10 @@ is b_31=0 & b_30=1 & b_2129=0b101110110 & b_1015=0b001011 & Rd_VPR128.8H & Rn_VP :fadd Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_21=1 & Rm_VPR128.2D & b_1115=0x1a & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_primitive) # simd infix Rd_VPR128.2D = Rn_VPR128.2D f+ Rm_VPR128.2D on lane size 8 - local tmp1:4 = 0; - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp1, Rn_VPR128.2D, 0, 8, 16); - simd_address_at(tmp2, Rm_VPR128.2D, 0, 8, 16); - simd_address_at(tmp3, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp3 = (* [register]:8 tmp1) f+ (* [register]:8 tmp2); - simd_address_at(tmp1, Rn_VPR128.2D, 1, 8, 16); - simd_address_at(tmp2, Rm_VPR128.2D, 1, 8, 16); - simd_address_at(tmp3, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp3 = (* [register]:8 tmp1) f+ (* [register]:8 tmp2); + Rd_VPR128.2D[0,64] = Rn_VPR128.2D[0,64] f+ Rm_VPR128.2D[0,64]; + Rd_VPR128.2D[64,64] = Rn_VPR128.2D[64,64] f+ Rm_VPR128.2D[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:16 = SIMD_FLOAT_ADD(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.2D = NEON_fadd(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); -@endif } # C7.2.49 FADD (vector) page C7-1495 line 82738 MATCH x0e20d400/mask=xbfa0fc00 @@ -5657,26 +3264,10 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_21=1 & Rm_VPR128.2D & :fadd Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_21=1 & Rm_VPR64.2S & b_1115=0x1a & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_primitive) # simd infix Rd_VPR64.2S = Rn_VPR64.2S f+ Rm_VPR64.2S on lane size 4 - local tmp1:4 = 0; - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp1, Rn_VPR64.2S, 0, 4, 8); - simd_address_at(tmp2, Rm_VPR64.2S, 0, 4, 8); - simd_address_at(tmp3, Rd_VPR64.2S, 0, 4, 8); - * [register]:4 tmp3 = (* [register]:4 tmp1) f+ (* [register]:4 tmp2); - simd_address_at(tmp1, Rn_VPR64.2S, 1, 4, 8); - simd_address_at(tmp2, Rm_VPR64.2S, 1, 4, 8); - simd_address_at(tmp3, Rd_VPR64.2S, 1, 4, 8); - * [register]:4 tmp3 = (* [register]:4 tmp1) f+ (* [register]:4 tmp2); + Rd_VPR64.2S[0,32] = Rn_VPR64.2S[0,32] f+ Rm_VPR64.2S[0,32]; + Rd_VPR64.2S[32,32] = Rn_VPR64.2S[32,32] f+ Rm_VPR64.2S[32,32]; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:8 = SIMD_FLOAT_ADD(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.2S -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.2S = NEON_fadd(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); -@endif } # C7.2.49 FADD (vector) page C7-1495 line 82738 MATCH x0e20d400/mask=xbfa0fc00 @@ -5688,34 +3279,12 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_21=1 & Rm_VPR64.2S & :fadd Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_21=1 & Rm_VPR128.4S & b_1115=0x1a & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) # simd infix Rd_VPR128.4S = Rn_VPR128.4S f+ Rm_VPR128.4S on lane size 4 - local tmp1:4 = 0; - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp1, Rn_VPR128.4S, 0, 4, 16); - simd_address_at(tmp2, Rm_VPR128.4S, 0, 4, 16); - simd_address_at(tmp3, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp3 = (* [register]:4 tmp1) f+ (* [register]:4 tmp2); - simd_address_at(tmp1, Rn_VPR128.4S, 1, 4, 16); - simd_address_at(tmp2, Rm_VPR128.4S, 1, 4, 16); - simd_address_at(tmp3, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp3 = (* [register]:4 tmp1) f+ (* [register]:4 tmp2); - simd_address_at(tmp1, Rn_VPR128.4S, 2, 4, 16); - simd_address_at(tmp2, Rm_VPR128.4S, 2, 4, 16); - simd_address_at(tmp3, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp3 = (* [register]:4 tmp1) f+ (* [register]:4 tmp2); - simd_address_at(tmp1, Rn_VPR128.4S, 3, 4, 16); - simd_address_at(tmp2, Rm_VPR128.4S, 3, 4, 16); - simd_address_at(tmp3, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp3 = (* [register]:4 tmp1) f+ (* [register]:4 tmp2); + Rd_VPR128.4S[0,32] = Rn_VPR128.4S[0,32] f+ Rm_VPR128.4S[0,32]; + Rd_VPR128.4S[32,32] = Rn_VPR128.4S[32,32] f+ Rm_VPR128.4S[32,32]; + Rd_VPR128.4S[64,32] = Rn_VPR128.4S[64,32] f+ Rm_VPR128.4S[64,32]; + Rd_VPR128.4S[96,32] = Rn_VPR128.4S[96,32] f+ Rm_VPR128.4S[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:16 = SIMD_FLOAT_ADD(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_fadd(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); -@endif } # C7.2.49 FADD (vector) page C7-1495 line 82738 MATCH x0e401400/mask=xbfe0fc00 @@ -5728,34 +3297,12 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_21=1 & Rm_VPR128.4S & :fadd Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_31=0 & b_30=0 & b_2129=0b001110010 & b_1015=0b000101 & Rd_VPR64.4H & Rn_VPR64.4H & Rm_VPR64.4H & Zd { -@if defined(SEMANTIC_primitive) # simd infix Rd_VPR64.4H = Rn_VPR64.4H f+ Rm_VPR64.4H on lane size 2 - local tmp1:4 = 0; - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp1, Rn_VPR64.4H, 0, 2, 8); - simd_address_at(tmp2, Rm_VPR64.4H, 0, 2, 8); - simd_address_at(tmp3, Rd_VPR64.4H, 0, 2, 8); - * [register]:2 tmp3 = (* [register]:2 tmp1) f+ (* [register]:2 tmp2); - simd_address_at(tmp1, Rn_VPR64.4H, 1, 2, 8); - simd_address_at(tmp2, Rm_VPR64.4H, 1, 2, 8); - simd_address_at(tmp3, Rd_VPR64.4H, 1, 2, 8); - * [register]:2 tmp3 = (* [register]:2 tmp1) f+ (* [register]:2 tmp2); - simd_address_at(tmp1, Rn_VPR64.4H, 2, 2, 8); - simd_address_at(tmp2, Rm_VPR64.4H, 2, 2, 8); - simd_address_at(tmp3, Rd_VPR64.4H, 2, 2, 8); - * [register]:2 tmp3 = (* [register]:2 tmp1) f+ (* [register]:2 tmp2); - simd_address_at(tmp1, Rn_VPR64.4H, 3, 2, 8); - simd_address_at(tmp2, Rm_VPR64.4H, 3, 2, 8); - simd_address_at(tmp3, Rd_VPR64.4H, 3, 2, 8); - * [register]:2 tmp3 = (* [register]:2 tmp1) f+ (* [register]:2 tmp2); + Rd_VPR64.4H[0,16] = Rn_VPR64.4H[0,16] f+ Rm_VPR64.4H[0,16]; + Rd_VPR64.4H[16,16] = Rn_VPR64.4H[16,16] f+ Rm_VPR64.4H[16,16]; + Rd_VPR64.4H[32,16] = Rn_VPR64.4H[32,16] f+ Rm_VPR64.4H[32,16]; + Rd_VPR64.4H[48,16] = Rn_VPR64.4H[48,16] f+ Rm_VPR64.4H[48,16]; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:8 = SIMD_FLOAT_ADD(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.4H -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.4H = NEON_fadd(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); -@endif } # C7.2.49 FADD (vector) page C7-1495 line 82738 MATCH x0e401400/mask=xbfe0fc00 @@ -5768,50 +3315,16 @@ is b_31=0 & b_30=0 & b_2129=0b001110010 & b_1015=0b000101 & Rd_VPR64.4H & Rn_VPR :fadd Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_31=0 & b_30=1 & b_2129=0b001110010 & b_1015=0b000101 & Rd_VPR128.8H & Rn_VPR128.8H & Rm_VPR128.8H & Zd { -@if defined(SEMANTIC_primitive) # simd infix Rd_VPR128.8H = Rn_VPR128.8H f+ Rm_VPR128.8H on lane size 2 - local tmp1:4 = 0; - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp1, Rn_VPR128.8H, 0, 2, 16); - simd_address_at(tmp2, Rm_VPR128.8H, 0, 2, 16); - simd_address_at(tmp3, Rd_VPR128.8H, 0, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp1) f+ (* [register]:2 tmp2); - simd_address_at(tmp1, Rn_VPR128.8H, 1, 2, 16); - simd_address_at(tmp2, Rm_VPR128.8H, 1, 2, 16); - simd_address_at(tmp3, Rd_VPR128.8H, 1, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp1) f+ (* [register]:2 tmp2); - simd_address_at(tmp1, Rn_VPR128.8H, 2, 2, 16); - simd_address_at(tmp2, Rm_VPR128.8H, 2, 2, 16); - simd_address_at(tmp3, Rd_VPR128.8H, 2, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp1) f+ (* [register]:2 tmp2); - simd_address_at(tmp1, Rn_VPR128.8H, 3, 2, 16); - simd_address_at(tmp2, Rm_VPR128.8H, 3, 2, 16); - simd_address_at(tmp3, Rd_VPR128.8H, 3, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp1) f+ (* [register]:2 tmp2); - simd_address_at(tmp1, Rn_VPR128.8H, 4, 2, 16); - simd_address_at(tmp2, Rm_VPR128.8H, 4, 2, 16); - simd_address_at(tmp3, Rd_VPR128.8H, 4, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp1) f+ (* [register]:2 tmp2); - simd_address_at(tmp1, Rn_VPR128.8H, 5, 2, 16); - simd_address_at(tmp2, Rm_VPR128.8H, 5, 2, 16); - simd_address_at(tmp3, Rd_VPR128.8H, 5, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp1) f+ (* [register]:2 tmp2); - simd_address_at(tmp1, Rn_VPR128.8H, 6, 2, 16); - simd_address_at(tmp2, Rm_VPR128.8H, 6, 2, 16); - simd_address_at(tmp3, Rd_VPR128.8H, 6, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp1) f+ (* [register]:2 tmp2); - simd_address_at(tmp1, Rn_VPR128.8H, 7, 2, 16); - simd_address_at(tmp2, Rm_VPR128.8H, 7, 2, 16); - simd_address_at(tmp3, Rd_VPR128.8H, 7, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp1) f+ (* [register]:2 tmp2); + Rd_VPR128.8H[0,16] = Rn_VPR128.8H[0,16] f+ Rm_VPR128.8H[0,16]; + Rd_VPR128.8H[16,16] = Rn_VPR128.8H[16,16] f+ Rm_VPR128.8H[16,16]; + Rd_VPR128.8H[32,16] = Rn_VPR128.8H[32,16] f+ Rm_VPR128.8H[32,16]; + Rd_VPR128.8H[48,16] = Rn_VPR128.8H[48,16] f+ Rm_VPR128.8H[48,16]; + Rd_VPR128.8H[64,16] = Rn_VPR128.8H[64,16] f+ Rm_VPR128.8H[64,16]; + Rd_VPR128.8H[80,16] = Rn_VPR128.8H[80,16] f+ Rm_VPR128.8H[80,16]; + Rd_VPR128.8H[96,16] = Rn_VPR128.8H[96,16] f+ Rm_VPR128.8H[96,16]; + Rd_VPR128.8H[112,16] = Rn_VPR128.8H[112,16] f+ Rm_VPR128.8H[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:16 = SIMD_FLOAT_ADD(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.8H -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.8H = NEON_fadd(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); -@endif } # C7.2.50 FADD (scalar) page C7-1497 line 82859 MATCH x1e202800/mask=xff20fc00 @@ -5823,15 +3336,8 @@ is b_31=0 & b_30=1 & b_2129=0b001110010 & b_1015=0b000101 & Rd_VPR128.8H & Rn_VP :fadd Rd_FPR64, Rn_FPR64, Rm_FPR64 is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=1 & Rm_FPR64 & b_1215=0x2 & b_1011=2 & Rn_FPR64 & Rd_FPR64 & Zd { -@if defined(SEMANTIC_primitive) Rd_FPR64 = Rn_FPR64 f+ Rm_FPR64; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - Rd_FPR64 = Rn_FPR64 f+ Rm_FPR64; - zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pseudo) - Rd_FPR64 = NEON_fadd(Rn_FPR64, Rm_FPR64); -@endif } # C7.2.50 FADD (scalar) page C7-1497 line 82859 MATCH x1e202800/mask=xff20fc00 @@ -5843,15 +3349,8 @@ is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=1 & Rm_FPR64 & b_1215=0 :fadd Rd_FPR16, Rn_FPR16, Rm_FPR16 is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=1 & Rm_FPR16 & b_1215=0x2 & b_1011=2 & Rn_FPR16 & Rd_FPR16 & Zd { -@if defined(SEMANTIC_primitive) Rd_FPR16 = Rn_FPR16 f+ Rm_FPR16; zext_zh(Zd); # zero upper 30 bytes of Zd -@elif defined(SEMANTIC_pcode) - Rd_FPR16 = Rn_FPR16 f+ Rm_FPR16; - zext_zh(Zd); # zero upper 30 bytes of Zd -@elif defined(SEMANTIC_pseudo) - Rd_FPR16 = NEON_fadd(Rn_FPR16, Rm_FPR16); -@endif } # C7.2.50 FADD (scalar) page C7-1497 line 82859 MATCH x1e202800/mask=xff20fc00 @@ -5863,15 +3362,8 @@ is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=1 & Rm_FPR16 & b_1215=0 :fadd Rd_FPR32, Rn_FPR32, Rm_FPR32 is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=1 & Rm_FPR32 & b_1215=0x2 & b_1011=2 & Rn_FPR32 & Rd_FPR32 & Zd { -@if defined(SEMANTIC_primitive) Rd_FPR32 = Rn_FPR32 f+ Rm_FPR32; zext_zs(Zd); # zero upper 28 bytes of Zd -@elif defined(SEMANTIC_pcode) - Rd_FPR32 = Rn_FPR32 f+ Rm_FPR32; - zext_zs(Zd); # zero upper 28 bytes of Zd -@elif defined(SEMANTIC_pseudo) - Rd_FPR32 = NEON_fadd(Rn_FPR32, Rm_FPR32); -@endif } # C7.2.51 FADDP (scalar) page C7-1499 line 82962 MATCH x7e30d800/mask=xffbffc00 @@ -5883,26 +3375,11 @@ is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=1 & Rm_FPR32 & b_1215=0 :faddp Rd_FPR64, Rn_VPR128.2D is b_3031=1 & u=1 & b_2428=0x1e & b_23=0 & b_1722=0x38 & b_1216=0xd & b_1011=2 & Rn_VPR128.2D & Rd_FPR64 & Zd { -@if defined(SEMANTIC_primitive) # sipd infix Rd_FPR64 = f+(Rn_VPR128.2D) on pairs lane size (8 to 8) - local tmp1:4 = 0; - local tmp2:4 = 0; - local tmp3:4 = 0; - local tmp4:8 = 0; - local tmp5:8 = 0; - simd_address_at(tmp1, Rn_VPR128.2D, 0, 8, 16); - simd_address_at(tmp2, Rn_VPR128.2D, 1, 8, 16); - simd_address_at(tmp3, Rd_FPR64, 0, 8, 8); - tmp4 = * [register]:8 tmp1; - tmp5 = * [register]:8 tmp2; - * [register]:8 tmp3 = tmp4 f+ tmp5; + tmp1 = Rn_VPR128.2D[0,64]; + tmp2 = Rn_VPR128.2D[64,64]; + Rd_FPR64[0,64] = tmp1 f+ tmp2; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:8 = SIMD_FLOAT_ADD(Rn_VPR128.2D); - Zd = zext(tmpd); # assigning to Rd_FPR64 -@elif defined(SEMANTIC_pseudo) - Rd_FPR64 = NEON_faddp(Rn_VPR128.2D, 8:1); -@endif } # C7.2.51 FADDP (scalar) page C7-1499 line 82962 MATCH x7e30d800/mask=xffbffc00 @@ -5914,26 +3391,11 @@ is b_3031=1 & u=1 & b_2428=0x1e & b_23=0 & b_1722=0x38 & b_1216=0xd & b_1011=2 & :faddp Rd_FPR32, Rn_VPR64.2S is b_3031=1 & u=1 & b_2428=0x1e & b_23=0 & b_1722=0x18 & b_1216=0xd & b_1011=2 & Rn_VPR64.2S & Rd_FPR32 & Zd { -@if defined(SEMANTIC_primitive) # sipd infix Rd_FPR32 = f+(Rn_VPR64.2S) on pairs lane size (4 to 4) - local tmp1:4 = 0; - local tmp2:4 = 0; - local tmp3:4 = 0; - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp1, Rn_VPR64.2S, 0, 4, 8); - simd_address_at(tmp2, Rn_VPR64.2S, 1, 4, 8); - simd_address_at(tmp3, Rd_FPR32, 0, 4, 4); - tmp4 = * [register]:4 tmp1; - tmp5 = * [register]:4 tmp2; - * [register]:4 tmp3 = tmp4 f+ tmp5; + tmp1 = Rn_VPR64.2S[0,32]; + tmp2 = Rn_VPR64.2S[32,32]; + Rd_FPR32[0,32] = tmp1 f+ tmp2; zext_zs(Zd); # zero upper 28 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:4 = SIMD_FLOAT_ADD(Rn_VPR64.2S); - Zd = zext(tmpd); # assigning to Rd_FPR32 -@elif defined(SEMANTIC_pseudo) - Rd_FPR32 = NEON_faddp(Rn_VPR64.2S, 4:1); -@endif } # C7.2.51 FADDP (scalar) page C7-1499 line 82962 MATCH x5e30d800/mask=xfffffc00 @@ -5946,26 +3408,11 @@ is b_3031=1 & u=1 & b_2428=0x1e & b_23=0 & b_1722=0x18 & b_1216=0xd & b_1011=2 & :faddp Rd_FPR16, vRn_VPR128^".2H" is b_1031=0b0101111000110000110110 & Rd_FPR16 & vRn_VPR128 & Rn_FPR32 & Zd { -@if defined(SEMANTIC_primitive) # sipd infix Rd_FPR16 = f+(Rn_FPR32) on pairs lane size (2 to 2) - local tmp1:4 = 0; - local tmp2:4 = 0; - local tmp3:4 = 0; - local tmp4:2 = 0; - local tmp5:2 = 0; - simd_address_at(tmp1, Rn_FPR32, 0, 2, 4); - simd_address_at(tmp2, Rn_FPR32, 1, 2, 4); - simd_address_at(tmp3, Rd_FPR16, 0, 2, 2); - tmp4 = * [register]:2 tmp1; - tmp5 = * [register]:2 tmp2; - * [register]:2 tmp3 = tmp4 f+ tmp5; + tmp1 = Rn_FPR32[0,16]; + tmp2 = Rn_FPR32[16,16]; + Rd_FPR16[0,16] = tmp1 f+ tmp2; zext_zh(Zd); # zero upper 30 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:2 = SIMD_FLOAT_ADD(Rn_FPR32, 2:1); - Zd = zext(tmpd); # assigning to Rd_FPR16 -@elif defined(SEMANTIC_pseudo) - Rd_FPR16 = NEON_faddp(Rn_FPR32, 2:1); -@endif } # C7.2.52 FADDP (vector) page C7-1501 line 83067 MATCH x2e20d400/mask=xbfa0fc00 @@ -5977,36 +3424,16 @@ is b_1031=0b0101111000110000110110 & Rd_FPR16 & vRn_VPR128 & Rn_FPR32 & Zd :faddp Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_21=1 & Rm_VPR128.2D & b_1115=0x1a & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_primitive) TMPQ1 = 0; # sipd infix TMPQ1 = f+(Rn_VPR128.2D,Rm_VPR128.2D) on pairs lane size (8 to 8) - local tmp2:4 = 0; - local tmp3:4 = 0; - local tmp4:4 = 0; - local tmp5:8 = 0; - local tmp6:8 = 0; - simd_address_at(tmp2, Rn_VPR128.2D, 0, 8, 16); - simd_address_at(tmp3, Rn_VPR128.2D, 1, 8, 16); - simd_address_at(tmp4, TMPQ1, 0, 8, 16); - tmp5 = * [register]:8 tmp2; - tmp6 = * [register]:8 tmp3; - * [register]:8 tmp4 = tmp5 f+ tmp6; - simd_address_at(tmp2, Rm_VPR128.2D, 0, 8, 16); - simd_address_at(tmp3, Rm_VPR128.2D, 1, 8, 16); - simd_address_at(tmp4, TMPQ1, 1, 8, 16); - tmp5 = * [register]:8 tmp2; - tmp6 = * [register]:8 tmp3; - * [register]:8 tmp4 = tmp5 f+ tmp6; + tmp2 = Rn_VPR128.2D[0,64]; + tmp3 = Rn_VPR128.2D[64,64]; + TMPQ1[0,64] = tmp2 f+ tmp3; + tmp2 = Rm_VPR128.2D[0,64]; + tmp3 = Rm_VPR128.2D[64,64]; + TMPQ1[64,64] = tmp2 f+ tmp3; Rd_VPR128.2D = TMPQ1; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = 0; - tmp1 = SIMD_FLOAT_ADD(Rn_VPR128.2D, Rm_VPR128.2D); - local tmpd:16 = tmp1; - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.2D = NEON_faddp(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); -@endif } # C7.2.52 FADDP (vector) page C7-1501 line 83067 MATCH x2e20d400/mask=xbfa0fc00 @@ -6018,36 +3445,16 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_21=1 & Rm_VPR128.2D & :faddp Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_21=1 & Rm_VPR64.2S & b_1115=0x1a & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_primitive) TMPD1 = 0; # sipd infix TMPD1 = f+(Rn_VPR64.2S,Rm_VPR64.2S) on pairs lane size (4 to 4) - local tmp2:4 = 0; - local tmp3:4 = 0; - local tmp4:4 = 0; - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp2, Rn_VPR64.2S, 0, 4, 8); - simd_address_at(tmp3, Rn_VPR64.2S, 1, 4, 8); - simd_address_at(tmp4, TMPD1, 0, 4, 8); - tmp5 = * [register]:4 tmp2; - tmp6 = * [register]:4 tmp3; - * [register]:4 tmp4 = tmp5 f+ tmp6; - simd_address_at(tmp2, Rm_VPR64.2S, 0, 4, 8); - simd_address_at(tmp3, Rm_VPR64.2S, 1, 4, 8); - simd_address_at(tmp4, TMPD1, 1, 4, 8); - tmp5 = * [register]:4 tmp2; - tmp6 = * [register]:4 tmp3; - * [register]:4 tmp4 = tmp5 f+ tmp6; + tmp2 = Rn_VPR64.2S[0,32]; + tmp3 = Rn_VPR64.2S[32,32]; + TMPD1[0,32] = tmp2 f+ tmp3; + tmp2 = Rm_VPR64.2S[0,32]; + tmp3 = Rm_VPR64.2S[32,32]; + TMPD1[32,32] = tmp2 f+ tmp3; Rd_VPR64.2S = TMPD1; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = 0; - tmp1 = SIMD_FLOAT_ADD(Rn_VPR64.2S, Rm_VPR64.2S); - local tmpd:8 = tmp1; - Zd = zext(tmpd); # assigning to Rd_VPR64.2S -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.2S = NEON_faddp(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); -@endif } # C7.2.52 FADDP (vector) page C7-1501 line 83067 MATCH x2e20d400/mask=xbfa0fc00 @@ -6059,48 +3466,22 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_21=1 & Rm_VPR64.2S & :faddp Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_21=1 & Rm_VPR128.4S & b_1115=0x1a & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) TMPQ1 = 0; # sipd infix TMPQ1 = f+(Rn_VPR128.4S,Rm_VPR128.4S) on pairs lane size (4 to 4) - local tmp2:4 = 0; - local tmp3:4 = 0; - local tmp4:4 = 0; - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp2, Rn_VPR128.4S, 0, 4, 16); - simd_address_at(tmp3, Rn_VPR128.4S, 1, 4, 16); - simd_address_at(tmp4, TMPQ1, 0, 4, 16); - tmp5 = * [register]:4 tmp2; - tmp6 = * [register]:4 tmp3; - * [register]:4 tmp4 = tmp5 f+ tmp6; - simd_address_at(tmp2, Rn_VPR128.4S, 2, 4, 16); - simd_address_at(tmp3, Rn_VPR128.4S, 3, 4, 16); - simd_address_at(tmp4, TMPQ1, 1, 4, 16); - tmp5 = * [register]:4 tmp2; - tmp6 = * [register]:4 tmp3; - * [register]:4 tmp4 = tmp5 f+ tmp6; - simd_address_at(tmp2, Rm_VPR128.4S, 0, 4, 16); - simd_address_at(tmp3, Rm_VPR128.4S, 1, 4, 16); - simd_address_at(tmp4, TMPQ1, 2, 4, 16); - tmp5 = * [register]:4 tmp2; - tmp6 = * [register]:4 tmp3; - * [register]:4 tmp4 = tmp5 f+ tmp6; - simd_address_at(tmp2, Rm_VPR128.4S, 2, 4, 16); - simd_address_at(tmp3, Rm_VPR128.4S, 3, 4, 16); - simd_address_at(tmp4, TMPQ1, 3, 4, 16); - tmp5 = * [register]:4 tmp2; - tmp6 = * [register]:4 tmp3; - * [register]:4 tmp4 = tmp5 f+ tmp6; + tmp2 = Rn_VPR128.4S[0,32]; + tmp3 = Rn_VPR128.4S[32,32]; + TMPQ1[0,32] = tmp2 f+ tmp3; + tmp2 = Rn_VPR128.4S[64,32]; + tmp3 = Rn_VPR128.4S[96,32]; + TMPQ1[32,32] = tmp2 f+ tmp3; + tmp2 = Rm_VPR128.4S[0,32]; + tmp3 = Rm_VPR128.4S[32,32]; + TMPQ1[64,32] = tmp2 f+ tmp3; + tmp2 = Rm_VPR128.4S[64,32]; + tmp3 = Rm_VPR128.4S[96,32]; + TMPQ1[96,32] = tmp2 f+ tmp3; Rd_VPR128.4S = TMPQ1; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = 0; - tmp1 = SIMD_FLOAT_ADD(Rn_VPR128.4S, Rm_VPR128.4S); - local tmpd:16 = tmp1; - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_faddp(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); -@endif } # C7.2.52 FADDP (vector) page C7-1501 line 83067 MATCH x2e401400/mask=xbfe0fc00 @@ -6113,48 +3494,22 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_21=1 & Rm_VPR128.4S & :faddp Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_31=0 & b_30=0 & b_2129=0b101110010 & b_1015=0b000101 & Rd_VPR64.4H & Rn_VPR64.4H & Rm_VPR64.4H & Zd { -@if defined(SEMANTIC_primitive) TMPD1 = 0; # sipd infix TMPD1 = f+(Rm_VPR64.4H,Rn_VPR64.4H) on pairs lane size (2 to 2) - local tmp2:4 = 0; - local tmp3:4 = 0; - local tmp4:4 = 0; - local tmp5:2 = 0; - local tmp6:2 = 0; - simd_address_at(tmp2, Rm_VPR64.4H, 0, 2, 8); - simd_address_at(tmp3, Rm_VPR64.4H, 1, 2, 8); - simd_address_at(tmp4, TMPD1, 0, 2, 8); - tmp5 = * [register]:2 tmp2; - tmp6 = * [register]:2 tmp3; - * [register]:2 tmp4 = tmp5 f+ tmp6; - simd_address_at(tmp2, Rm_VPR64.4H, 2, 2, 8); - simd_address_at(tmp3, Rm_VPR64.4H, 3, 2, 8); - simd_address_at(tmp4, TMPD1, 1, 2, 8); - tmp5 = * [register]:2 tmp2; - tmp6 = * [register]:2 tmp3; - * [register]:2 tmp4 = tmp5 f+ tmp6; - simd_address_at(tmp2, Rn_VPR64.4H, 0, 2, 8); - simd_address_at(tmp3, Rn_VPR64.4H, 1, 2, 8); - simd_address_at(tmp4, TMPD1, 2, 2, 8); - tmp5 = * [register]:2 tmp2; - tmp6 = * [register]:2 tmp3; - * [register]:2 tmp4 = tmp5 f+ tmp6; - simd_address_at(tmp2, Rn_VPR64.4H, 2, 2, 8); - simd_address_at(tmp3, Rn_VPR64.4H, 3, 2, 8); - simd_address_at(tmp4, TMPD1, 3, 2, 8); - tmp5 = * [register]:2 tmp2; - tmp6 = * [register]:2 tmp3; - * [register]:2 tmp4 = tmp5 f+ tmp6; + tmp2 = Rm_VPR64.4H[0,16]; + tmp3 = Rm_VPR64.4H[16,16]; + TMPD1[0,16] = tmp2 f+ tmp3; + tmp2 = Rm_VPR64.4H[32,16]; + tmp3 = Rm_VPR64.4H[48,16]; + TMPD1[16,16] = tmp2 f+ tmp3; + tmp2 = Rn_VPR64.4H[0,16]; + tmp3 = Rn_VPR64.4H[16,16]; + TMPD1[32,16] = tmp2 f+ tmp3; + tmp2 = Rn_VPR64.4H[32,16]; + tmp3 = Rn_VPR64.4H[48,16]; + TMPD1[48,16] = tmp2 f+ tmp3; Rd_VPR64.4H = TMPD1; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = 0; - tmp1 = SIMD_FLOAT_ADD(Rm_VPR64.4H, Rn_VPR64.4H); - local tmpd:8 = tmp1; - Zd = zext(tmpd); # assigning to Rd_VPR64.4H -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.4H = NEON_faddp(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); -@endif } # C7.2.52 FADDP (vector) page C7-1501 line 83067 MATCH x2e401400/mask=xbfe0fc00 @@ -6167,72 +3522,34 @@ is b_31=0 & b_30=0 & b_2129=0b101110010 & b_1015=0b000101 & Rd_VPR64.4H & Rn_VPR :faddp Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_31=0 & b_30=1 & b_2129=0b101110010 & b_1015=0b000101 & Rd_VPR128.8H & Rn_VPR128.8H & Rm_VPR128.8H & Zd { -@if defined(SEMANTIC_primitive) TMPQ1 = 0; # sipd infix TMPQ1 = f+(Rn_VPR128.8H,Rm_VPR128.8H) on pairs lane size (2 to 2) - local tmp2:4 = 0; - local tmp3:4 = 0; - local tmp4:4 = 0; - local tmp5:2 = 0; - local tmp6:2 = 0; - simd_address_at(tmp2, Rn_VPR128.8H, 0, 2, 16); - simd_address_at(tmp3, Rn_VPR128.8H, 1, 2, 16); - simd_address_at(tmp4, TMPQ1, 0, 2, 16); - tmp5 = * [register]:2 tmp2; - tmp6 = * [register]:2 tmp3; - * [register]:2 tmp4 = tmp5 f+ tmp6; - simd_address_at(tmp2, Rn_VPR128.8H, 2, 2, 16); - simd_address_at(tmp3, Rn_VPR128.8H, 3, 2, 16); - simd_address_at(tmp4, TMPQ1, 1, 2, 16); - tmp5 = * [register]:2 tmp2; - tmp6 = * [register]:2 tmp3; - * [register]:2 tmp4 = tmp5 f+ tmp6; - simd_address_at(tmp2, Rn_VPR128.8H, 4, 2, 16); - simd_address_at(tmp3, Rn_VPR128.8H, 5, 2, 16); - simd_address_at(tmp4, TMPQ1, 2, 2, 16); - tmp5 = * [register]:2 tmp2; - tmp6 = * [register]:2 tmp3; - * [register]:2 tmp4 = tmp5 f+ tmp6; - simd_address_at(tmp2, Rn_VPR128.8H, 6, 2, 16); - simd_address_at(tmp3, Rn_VPR128.8H, 7, 2, 16); - simd_address_at(tmp4, TMPQ1, 3, 2, 16); - tmp5 = * [register]:2 tmp2; - tmp6 = * [register]:2 tmp3; - * [register]:2 tmp4 = tmp5 f+ tmp6; - simd_address_at(tmp2, Rm_VPR128.8H, 0, 2, 16); - simd_address_at(tmp3, Rm_VPR128.8H, 1, 2, 16); - simd_address_at(tmp4, TMPQ1, 4, 2, 16); - tmp5 = * [register]:2 tmp2; - tmp6 = * [register]:2 tmp3; - * [register]:2 tmp4 = tmp5 f+ tmp6; - simd_address_at(tmp2, Rm_VPR128.8H, 2, 2, 16); - simd_address_at(tmp3, Rm_VPR128.8H, 3, 2, 16); - simd_address_at(tmp4, TMPQ1, 5, 2, 16); - tmp5 = * [register]:2 tmp2; - tmp6 = * [register]:2 tmp3; - * [register]:2 tmp4 = tmp5 f+ tmp6; - simd_address_at(tmp2, Rm_VPR128.8H, 4, 2, 16); - simd_address_at(tmp3, Rm_VPR128.8H, 5, 2, 16); - simd_address_at(tmp4, TMPQ1, 6, 2, 16); - tmp5 = * [register]:2 tmp2; - tmp6 = * [register]:2 tmp3; - * [register]:2 tmp4 = tmp5 f+ tmp6; - simd_address_at(tmp2, Rm_VPR128.8H, 6, 2, 16); - simd_address_at(tmp3, Rm_VPR128.8H, 7, 2, 16); - simd_address_at(tmp4, TMPQ1, 7, 2, 16); - tmp5 = * [register]:2 tmp2; - tmp6 = * [register]:2 tmp3; - * [register]:2 tmp4 = tmp5 f+ tmp6; + tmp2 = Rn_VPR128.8H[0,16]; + tmp3 = Rn_VPR128.8H[16,16]; + TMPQ1[0,16] = tmp2 f+ tmp3; + tmp2 = Rn_VPR128.8H[32,16]; + tmp3 = Rn_VPR128.8H[48,16]; + TMPQ1[16,16] = tmp2 f+ tmp3; + tmp2 = Rn_VPR128.8H[64,16]; + tmp3 = Rn_VPR128.8H[80,16]; + TMPQ1[32,16] = tmp2 f+ tmp3; + tmp2 = Rn_VPR128.8H[96,16]; + tmp3 = Rn_VPR128.8H[112,16]; + TMPQ1[48,16] = tmp2 f+ tmp3; + tmp2 = Rm_VPR128.8H[0,16]; + tmp3 = Rm_VPR128.8H[16,16]; + TMPQ1[64,16] = tmp2 f+ tmp3; + tmp2 = Rm_VPR128.8H[32,16]; + tmp3 = Rm_VPR128.8H[48,16]; + TMPQ1[80,16] = tmp2 f+ tmp3; + tmp2 = Rm_VPR128.8H[64,16]; + tmp3 = Rm_VPR128.8H[80,16]; + TMPQ1[96,16] = tmp2 f+ tmp3; + tmp2 = Rm_VPR128.8H[96,16]; + tmp3 = Rm_VPR128.8H[112,16]; + TMPQ1[112,16] = tmp2 f+ tmp3; Rd_VPR128.8H = TMPQ1; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = 0; - tmp1 = SIMD_FLOAT_ADD(Rn_VPR128.8H, Rm_VPR128.8H); - local tmpd:16 = tmp1; - Zd = zext(tmpd); # assigning to Rd_VPR128.8H -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.8H = NEON_faddp(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); -@endif } # C7.2.46 FCADD page C7-1090 line 63037 KEEPWITH @@ -6249,9 +3566,7 @@ fcadd_rotate: #270 is b_12=1 { export 270:1; } :fcadd Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H, fcadd_rotate is b_31=0 & b_30=0 & b_2429=0b101110 & b_2223=0b01 & b_21=0 & b_1315=0b111 & b_1011=0b01 & fcadd_rotate & Rd_VPR64.4H & Rn_VPR64.4H & Rm_VPR64.4H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.4H = NEON_fcadd(Rn_VPR64.4H, Rm_VPR64.4H, fcadd_rotate, 2:1); -@endif } # C7.2.53 FCADD page C7-1503 line 83189 MATCH x2e00e400/mask=xbf20ec00 @@ -6263,9 +3578,7 @@ is b_31=0 & b_30=0 & b_2429=0b101110 & b_2223=0b01 & b_21=0 & b_1315=0b111 & b_1 :fcadd Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H, fcadd_rotate is b_31=0 & b_30=1 & b_2429=0b101110 & b_2223=0b01 & b_21=0 & b_1315=0b111 & b_1011=0b01 & fcadd_rotate & Rd_VPR128.8H & Rn_VPR128.8H & Rm_VPR128.8H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.8H = NEON_fcadd(Rn_VPR128.8H, Rm_VPR128.8H, fcadd_rotate, 2:1); -@endif } # C7.2.53 FCADD page C7-1503 line 83189 MATCH x2e00e400/mask=xbf20ec00 @@ -6277,9 +3590,7 @@ is b_31=0 & b_30=1 & b_2429=0b101110 & b_2223=0b01 & b_21=0 & b_1315=0b111 & b_1 :fcadd Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S, fcadd_rotate is b_31=0 & b_30=0 & b_2429=0b101110 & b_2223=0b10 & b_21=0 & b_1315=0b111 & b_1011=0b01 & fcadd_rotate & Rd_VPR64.2S & Rn_VPR64.2S & Rm_VPR64.2S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.2S = NEON_fcadd(Rn_VPR64.2S, Rm_VPR64.2S, fcadd_rotate, 4:1); -@endif } # C7.2.53 FCADD page C7-1503 line 83189 MATCH x2e00e400/mask=xbf20ec00 @@ -6291,9 +3602,7 @@ is b_31=0 & b_30=0 & b_2429=0b101110 & b_2223=0b10 & b_21=0 & b_1315=0b111 & b_1 :fcadd Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S, fcadd_rotate is b_31=0 & b_30=1 & b_2429=0b101110 & b_2223=0b10 & b_21=0 & b_1315=0b111 & b_1011=0b01 & fcadd_rotate & Rd_VPR128.4S & Rn_VPR128.4S & Rm_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_fcadd(Rn_VPR128.4S, Rm_VPR128.4S, fcadd_rotate, 4:1); -@endif } # C7.2.53 FCADD page C7-1503 line 83189 MATCH x2e00e400/mask=xbf20ec00 @@ -6305,9 +3614,7 @@ is b_31=0 & b_30=1 & b_2429=0b101110 & b_2223=0b10 & b_21=0 & b_1315=0b111 & b_1 :fcadd Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D, fcadd_rotate is b_31=0 & b_30=1 & b_2429=0b101110 & b_2223=0b11 & b_21=0 & b_1315=0b111 & b_1011=0b01 & fcadd_rotate & Rd_VPR128.2D & Rn_VPR128.2D & Rm_VPR128.2D & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.2D = NEON_fcadd(Rn_VPR128.2D, Rm_VPR128.2D, fcadd_rotate, 8:1); -@endif } # C7.2.54 FCCMP page C7-1505 line 83301 MATCH x1e200400/mask=xff200c10 @@ -6319,19 +3626,10 @@ is b_31=0 & b_30=1 & b_2429=0b101110 & b_2223=0b11 & b_21=0 & b_1315=0b111 & b_1 :fccmp Rn_FPR64, Rm_FPR64, NZCVImm_uimm4, CondOp is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=1 & Rm_FPR64 & CondOp & b_1011=1 & Rn_FPR64 & fpccmp.op=0 & NZCVImm_uimm4 { -@if defined(SEMANTIC_primitive) setCC_NZCV(NZCVImm_uimm4:1); local tmp1:1 = ! CondOp:1; if (tmp1) goto inst_next; fcomp(Rn_FPR64, Rm_FPR64); -@elif defined(SEMANTIC_pcode) - setCC_NZCV(NZCVImm_uimm4:1); - local tmp1:1 = ! CondOp:1; - if (tmp1) goto inst_next; - fcomp(Rn_FPR64, Rm_FPR64); -@elif defined(SEMANTIC_pseudo) - NEON_fccmp(Rn_FPR64, Rm_FPR64, NZCVImm_uimm4:1, CondOp:1); -@endif } # C7.2.54 FCCMP page C7-1505 line 83301 MATCH x1e200400/mask=xff200c10 @@ -6343,19 +3641,10 @@ is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=1 & Rm_FPR64 & CondOp & :fccmp Rn_FPR32, Rm_FPR32, NZCVImm_uimm4, CondOp is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=1 & Rm_FPR32 & CondOp & b_1011=1 & Rn_FPR32 & fpccmp.op=0 & NZCVImm_uimm4 { -@if defined(SEMANTIC_primitive) setCC_NZCV(NZCVImm_uimm4:1); local tmp1:1 = ! CondOp:1; if (tmp1) goto inst_next; fcomp(Rn_FPR32, Rm_FPR32); -@elif defined(SEMANTIC_pcode) - setCC_NZCV(NZCVImm_uimm4:1); - local tmp1:1 = ! CondOp:1; - if (tmp1) goto inst_next; - fcomp(Rn_FPR32, Rm_FPR32); -@elif defined(SEMANTIC_pseudo) - NEON_fccmp(Rn_FPR32, Rm_FPR32, NZCVImm_uimm4:1, CondOp:1); -@endif } # C7.2.54 FCCMP page C7-1505 line 83301 MATCH x1e200400/mask=xff200c10 @@ -6367,19 +3656,10 @@ is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=1 & Rm_FPR32 & CondOp & :fccmp Rn_FPR16, Rm_FPR16, NZCVImm_uimm4, CondOp is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=1 & Rm_FPR16 & CondOp & b_1011=1 & Rn_FPR16 & fpccmp.op=0 & NZCVImm_uimm4 { -@if defined(SEMANTIC_primitive) setCC_NZCV(NZCVImm_uimm4:1); local tmp1:1 = ! CondOp:1; if (tmp1) goto inst_next; fcomp(Rn_FPR16, Rm_FPR16); -@elif defined(SEMANTIC_pcode) - setCC_NZCV(NZCVImm_uimm4:1); - local tmp1:1 = ! CondOp:1; - if (tmp1) goto inst_next; - fcomp(Rn_FPR16, Rm_FPR16); -@elif defined(SEMANTIC_pseudo) - NEON_fccmp(Rn_FPR16, Rm_FPR16, NZCVImm_uimm4:1, CondOp:1); -@endif } # C7.2.55 FCCMPE page C7-1507 line 83416 MATCH x1e200410/mask=xff200c10 @@ -6391,21 +3671,11 @@ is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=1 & Rm_FPR16 & CondOp & :fccmpe Rn_FPR64, Rm_FPR64, NZCVImm_uimm4, CondOp is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=1 & Rm_FPR64 & CondOp & b_1011=1 & Rn_FPR64 & fpccmp.op=1 & NZCVImm_uimm4 { -@if defined(SEMANTIC_primitive) setCC_NZCV(NZCVImm_uimm4:1); local tmp1:1 = ! CondOp:1; if (tmp1) goto inst_next; ftestNAN(Rn_FPR64, Rm_FPR64); fcomp(Rn_FPR64, Rm_FPR64); -@elif defined(SEMANTIC_pcode) - setCC_NZCV(NZCVImm_uimm4:1); - local tmp1:1 = ! CondOp:1; - if (tmp1) goto inst_next; - ftestNAN(Rn_FPR64, Rm_FPR64); - fcomp(Rn_FPR64, Rm_FPR64); -@elif defined(SEMANTIC_pseudo) - NEON_fccmpe(Rn_FPR64, Rm_FPR64, NZCVImm_uimm4:1, CondOp:1); -@endif } # C7.2.55 FCCMPE page C7-1507 line 83416 MATCH x1e200410/mask=xff200c10 @@ -6417,21 +3687,11 @@ is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=1 & Rm_FPR64 & CondOp & :fccmpe Rn_FPR32, Rm_FPR32, NZCVImm_uimm4, CondOp is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=1 & Rm_FPR32 & CondOp & b_1011=1 & Rn_FPR32 & fpccmp.op=1 & NZCVImm_uimm4 { -@if defined(SEMANTIC_primitive) setCC_NZCV(NZCVImm_uimm4:1); local tmp1:1 = ! CondOp:1; if (tmp1) goto inst_next; ftestNAN(Rn_FPR32, Rm_FPR32); fcomp(Rn_FPR32, Rm_FPR32); -@elif defined(SEMANTIC_pcode) - setCC_NZCV(NZCVImm_uimm4:1); - local tmp1:1 = ! CondOp:1; - if (tmp1) goto inst_next; - ftestNAN(Rn_FPR32, Rm_FPR32); - fcomp(Rn_FPR32, Rm_FPR32); -@elif defined(SEMANTIC_pseudo) - NEON_fccmpe(Rn_FPR32, Rm_FPR32, NZCVImm_uimm4:1, CondOp:1); -@endif } # C7.2.55 FCCMPE page C7-1507 line 83416 MATCH x1e200410/mask=xff200c10 @@ -6443,21 +3703,11 @@ is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=1 & Rm_FPR32 & CondOp & :fccmpe Rn_FPR16, Rm_FPR16, NZCVImm_uimm4, CondOp is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=1 & Rm_FPR16 & CondOp & b_1011=1 & Rn_FPR16 & fpccmp.op=1 & NZCVImm_uimm4 { -@if defined(SEMANTIC_primitive) setCC_NZCV(NZCVImm_uimm4:1); local tmp1:1 = ! CondOp:1; if (tmp1) goto inst_next; ftestNAN(Rn_FPR16, Rm_FPR16); fcomp(Rn_FPR16, Rm_FPR16); -@elif defined(SEMANTIC_pcode) - setCC_NZCV(NZCVImm_uimm4:1); - local tmp1:1 = ! CondOp:1; - if (tmp1) goto inst_next; - ftestNAN(Rn_FPR16, Rm_FPR16); - fcomp(Rn_FPR16, Rm_FPR16); -@elif defined(SEMANTIC_pseudo) - NEON_fccmpe(Rn_FPR16, Rm_FPR16, NZCVImm_uimm4:1, CondOp:1); -@endif } # C7.2.56 FCMEQ (register) page C7-1509 line 83535 MATCH x0e20e400/mask=xbfa0fc00 @@ -6468,9 +3718,7 @@ is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=1 & Rm_FPR16 & CondOp & :fcmeq Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_21=1 & Rm_VPR128.2D & b_1115=0x1c & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.2D = NEON_fcmeq(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); -@endif } # C7.2.56 FCMEQ (register) page C7-1509 line 83535 MATCH x0e20e400/mask=xbfa0fc00 @@ -6481,9 +3729,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_21=1 & Rm_VPR128.2D & :fcmeq Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_21=1 & Rm_VPR64.2S & b_1115=0x1c & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.2S = NEON_fcmeq(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); -@endif } # C7.2.56 FCMEQ (register) page C7-1509 line 83535 MATCH x0e20e400/mask=xbfa0fc00 @@ -6494,9 +3740,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_21=1 & Rm_VPR64.2S & :fcmeq Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_21=1 & Rm_VPR128.4S & b_1115=0x1c & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_fcmeq(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); -@endif } # C7.2.56 FCMEQ (register) page C7-1509 line 83535 MATCH x5e402400/mask=xffe0fc00 @@ -6508,9 +3752,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_21=1 & Rm_VPR128.4S & :fcmeq Rd_FPR16, Rn_FPR16, Rm_FPR16 is b_2131=0b01011110010 & b_1015=0b001001 & Rd_FPR16 & Rn_FPR16 & Rm_FPR16 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR16 = NEON_fcmeq(Rn_FPR16, Rm_FPR16); -@endif } # C7.2.56 FCMEQ (register) page C7-1509 line 83535 MATCH x5e20e400/mask=xffa0fc00 @@ -6522,9 +3764,7 @@ is b_2131=0b01011110010 & b_1015=0b001001 & Rd_FPR16 & Rn_FPR16 & Rm_FPR16 & Zd :fcmeq Rd_FPR32, Rn_FPR32, Rm_FPR32 is b_2331=0b010111100 & b_22=0 & b_21=1 & b_1015=0b111001 & Rd_FPR32 & Rn_FPR32 & Rm_FPR32 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR32 = NEON_fcmeq(Rn_FPR32, Rm_FPR32); -@endif } # C7.2.56 FCMEQ (register) page C7-1509 line 83535 MATCH x5e20e400/mask=xffa0fc00 @@ -6536,9 +3776,7 @@ is b_2331=0b010111100 & b_22=0 & b_21=1 & b_1015=0b111001 & Rd_FPR32 & Rn_FPR32 :fcmeq Rd_FPR64, Rn_FPR64, Rm_FPR64 is b_2331=0b010111100 & b_22=1 & b_21=1 & b_1015=0b111001 & Rd_FPR64 & Rn_FPR64 & Rm_FPR64 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR64 = NEON_fcmeq(Rn_FPR64, Rm_FPR64); -@endif } # C7.2.56 FCMEQ (register) page C7-1509 line 83535 MATCH x0e402400/mask=xbfe0fc00 @@ -6550,9 +3788,7 @@ is b_2331=0b010111100 & b_22=1 & b_21=1 & b_1015=0b111001 & Rd_FPR64 & Rn_FPR64 :fcmeq Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_31=0 & b_30=0 & b_2129=0b001110010 & b_1015=0b001001 & Rd_VPR64.4H & Rn_VPR64.4H & Rm_VPR64.4H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.4H = NEON_fcmeq(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); -@endif } # C7.2.56 FCMEQ (register) page C7-1509 line 83535 MATCH x0e402400/mask=xbfe0fc00 @@ -6564,9 +3800,7 @@ is b_31=0 & b_30=0 & b_2129=0b001110010 & b_1015=0b001001 & Rd_VPR64.4H & Rn_VPR :fcmeq Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_31=0 & b_30=1 & b_2129=0b001110010 & b_1015=0b001001 & Rd_VPR128.8H & Rn_VPR128.8H & Rm_VPR128.8H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.8H = NEON_fcmeq(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); -@endif } # C7.2.57 FCMEQ (zero) page C7-1513 line 83778 MATCH x0ea0d800/mask=xbfbffc00 @@ -6577,9 +3811,7 @@ is b_31=0 & b_30=1 & b_2129=0b001110010 & b_1015=0b001001 & Rd_VPR128.8H & Rn_VP :fcmeq Rd_VPR128.2D, Rn_VPR128.2D, "#0" is b_3131=0 & q=1 & u=0 & b_2428=0xe & b_23=1 & b_1722=0x30 & b_1216=0xd & b_1011=2 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.2D = NEON_fcmeq(Rn_VPR128.2D, 0:8, 8:1); -@endif } # C7.2.57 FCMEQ (zero) page C7-1513 line 83778 MATCH x0ea0d800/mask=xbfbffc00 @@ -6590,9 +3822,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & b_23=1 & b_1722=0x30 & b_1216=0xd & b_101 :fcmeq Rd_VPR64.2S, Rn_VPR64.2S, "#0" is b_3131=0 & q=0 & u=0 & b_2428=0xe & b_23=1 & b_1722=0x10 & b_1216=0xd & b_1011=2 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.2S = NEON_fcmeq(Rn_VPR64.2S, 0:4, 4:1); -@endif } # C7.2.57 FCMEQ (zero) page C7-1513 line 83778 MATCH x0ea0d800/mask=xbfbffc00 @@ -6603,9 +3833,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & b_23=1 & b_1722=0x10 & b_1216=0xd & b_101 :fcmeq Rd_VPR128.4S, Rn_VPR128.4S, "#0" is b_3131=0 & q=1 & u=0 & b_2428=0xe & b_23=1 & b_1722=0x10 & b_1216=0xd & b_1011=2 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_fcmeq(Rn_VPR128.4S, 0:4, 4:1); -@endif } # C7.2.57 FCMEQ (zero) page C7-1513 line 83778 MATCH x5ef8d800/mask=xfffffc00 @@ -6617,9 +3845,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & b_23=1 & b_1722=0x10 & b_1216=0xd & b_101 :fcmeq Rd_FPR16, Rn_FPR16, "#0.0" is b_1031=0b0101111011111000110110 & Rd_FPR16 & Rn_FPR16 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR16 = NEON_fcmeq(Rn_FPR16, 0:2); -@endif } # C7.2.57 FCMEQ (zero) page C7-1513 line 83778 MATCH x5ea0d800/mask=xffbffc00 @@ -6631,9 +3857,7 @@ is b_1031=0b0101111011111000110110 & Rd_FPR16 & Rn_FPR16 & Zd :fcmeq Rd_FPR32, Rn_FPR32, "#0.0" is b_2331=0b010111101 & b_22=0 & b_1021=0b100000110110 & Rd_FPR32 & Rn_FPR32 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR32 = NEON_fcmeq(Rn_FPR32, 0:4); -@endif } # C7.2.57 FCMEQ (zero) page C7-1513 line 83778 MATCH x5ea0d800/mask=xffbffc00 @@ -6645,9 +3869,7 @@ is b_2331=0b010111101 & b_22=0 & b_1021=0b100000110110 & Rd_FPR32 & Rn_FPR32 & Z :fcmeq Rd_FPR64, Rn_FPR64, "#0.0" is b_2331=0b010111101 & b_22=1 & b_1021=0b100000110110 & Rd_FPR64 & Rn_FPR64 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR64 = NEON_fcmeq(Rn_FPR64, 0:8); -@endif } # C7.2.57 FCMEQ (zero) page C7-1513 line 83778 MATCH x0ef8d800/mask=xbffffc00 @@ -6659,9 +3881,7 @@ is b_2331=0b010111101 & b_22=1 & b_1021=0b100000110110 & Rd_FPR64 & Rn_FPR64 & Z :fcmeq Rd_VPR64.4H, Rn_VPR64.4H, "#0.0" is b_31=0 & b_30=0 & b_1029=0b00111011111000110110 & Rd_VPR64.4H & Rn_VPR64.4H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.4H = NEON_fcmeq(Rn_VPR64.4H, 0:2, 2:1); -@endif } # C7.2.57 FCMEQ (zero) page C7-1513 line 83778 MATCH x0ef8d800/mask=xbffffc00 @@ -6673,9 +3893,7 @@ is b_31=0 & b_30=0 & b_1029=0b00111011111000110110 & Rd_VPR64.4H & Rn_VPR64.4H & :fcmeq Rd_VPR128.8H, Rn_VPR128.8H, "#0.0" is b_31=0 & b_30=1 & b_1029=0b00111011111000110110 & Rd_VPR128.8H & Rn_VPR128.8H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.8H = NEON_fcmeq(Rn_VPR128.8H, 0:2, 2:1); -@endif } # C7.2.58 FCMGE (register) page C7-1516 line 83990 MATCH x2e20e400/mask=xbfa0fc00 @@ -6686,9 +3904,7 @@ is b_31=0 & b_30=1 & b_1029=0b00111011111000110110 & Rd_VPR128.8H & Rn_VPR128.8H :fcmge Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_21=1 & Rm_VPR128.2D & b_1115=0x1c & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.2D = NEON_fcmge(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); -@endif } # C7.2.58 FCMGE (register) page C7-1516 line 83990 MATCH x2e20e400/mask=xbfa0fc00 @@ -6699,9 +3915,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_21=1 & Rm_VPR128.2D & :fcmge Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_21=1 & Rm_VPR64.2S & b_1115=0x1c & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.2S = NEON_fcmge(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); -@endif } # C7.2.58 FCMGE (register) page C7-1516 line 83990 MATCH x2e20e400/mask=xbfa0fc00 @@ -6712,9 +3926,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_21=1 & Rm_VPR64.2S & :fcmge Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_21=1 & Rm_VPR128.4S & b_1115=0x1c & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_fcmge(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); -@endif } # C7.2.58 FCMGE (register) page C7-1516 line 83990 MATCH x7e402400/mask=xffe0fc00 @@ -6726,9 +3938,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_21=1 & Rm_VPR128.4S & :fcmge Rd_FPR16, Rn_FPR16, Rm_FPR16 is b_2131=0b01111110010 & b_1015=0b001001 & Rd_FPR16 & Rn_FPR16 & Rm_FPR16 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR16 = NEON_fcmge(Rn_FPR16, Rm_FPR16); -@endif } # C7.2.58 FCMGE (register) page C7-1516 line 83990 MATCH x7e20e400/mask=xffa0fc00 @@ -6740,9 +3950,7 @@ is b_2131=0b01111110010 & b_1015=0b001001 & Rd_FPR16 & Rn_FPR16 & Rm_FPR16 & Zd :fcmge Rd_FPR32, Rn_FPR32, Rm_FPR32 is b_2331=0b011111100 & b_22=0 & b_21=1 & b_1015=0b111001 & Rd_FPR32 & Rn_FPR32 & Rm_FPR32 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR32 = NEON_fcmge(Rn_FPR32, Rm_FPR32); -@endif } # C7.2.58 FCMGE (register) page C7-1516 line 83990 MATCH x7e20e400/mask=xffa0fc00 @@ -6754,9 +3962,7 @@ is b_2331=0b011111100 & b_22=0 & b_21=1 & b_1015=0b111001 & Rd_FPR32 & Rn_FPR32 :fcmge Rd_FPR64, Rn_FPR64, Rm_FPR64 is b_2331=0b011111100 & b_22=1 & b_21=1 & b_1015=0b111001 & Rd_FPR64 & Rn_FPR64 & Rm_FPR64 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR64 = NEON_fcmge(Rn_FPR64, Rm_FPR64); -@endif } # C7.2.58 FCMGE (register) page C7-1516 line 83990 MATCH x2e402400/mask=xbfe0fc00 @@ -6768,9 +3974,7 @@ is b_2331=0b011111100 & b_22=1 & b_21=1 & b_1015=0b111001 & Rd_FPR64 & Rn_FPR64 :fcmge Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_31=0 & b_30=0 & b_2129=0b101110010 & b_1015=0b001001 & Rd_VPR64.4H & Rn_VPR64.4H & Rm_VPR64.4H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.4H = NEON_fcmge(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); -@endif } # C7.2.58 FCMGE (register) page C7-1516 line 83990 MATCH x2e402400/mask=xbfe0fc00 @@ -6782,9 +3986,7 @@ is b_31=0 & b_30=0 & b_2129=0b101110010 & b_1015=0b001001 & Rd_VPR64.4H & Rn_VPR :fcmge Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_31=0 & b_30=1 & b_2129=0b101110010 & b_1015=0b001001 & Rd_VPR128.8H & Rn_VPR128.8H & Rm_VPR128.8H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.8H = NEON_fcmge(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); -@endif } # C7.2.59 FCMGE (zero) page C7-1520 line 84234 MATCH x2ea0c800/mask=xbfbffc00 @@ -6795,9 +3997,7 @@ is b_31=0 & b_30=1 & b_2129=0b101110010 & b_1015=0b001001 & Rd_VPR128.8H & Rn_VP :fcmge Rd_VPR128.2D, Rn_VPR128.2D, "#0" is b_3131=0 & q=1 & u=1 & b_2428=0xe & b_23=1 & b_1722=0x30 & b_1216=0xc & b_1011=2 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.2D = NEON_fcmge(Rn_VPR128.2D, 0:8, 8:1); -@endif } # C7.2.59 FCMGE (zero) page C7-1520 line 84234 MATCH x2ea0c800/mask=xbfbffc00 @@ -6808,9 +4008,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & b_23=1 & b_1722=0x30 & b_1216=0xc & b_101 :fcmge Rd_VPR64.2S, Rn_VPR64.2S, "#0" is b_3131=0 & q=0 & u=1 & b_2428=0xe & b_23=1 & b_1722=0x10 & b_1216=0xc & b_1011=2 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.2S = NEON_fcmge(Rn_VPR64.2S, 0:4, 4:1); -@endif } # C7.2.59 FCMGE (zero) page C7-1520 line 84234 MATCH x2ea0c800/mask=xbfbffc00 @@ -6821,9 +4019,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & b_23=1 & b_1722=0x10 & b_1216=0xc & b_101 :fcmge Rd_VPR128.4S, Rn_VPR128.4S, "#0" is b_3131=0 & q=1 & u=1 & b_2428=0xe & b_23=1 & b_1722=0x10 & b_1216=0xc & b_1011=2 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_fcmge(Rn_VPR128.4S, 0:4, 4:1); -@endif } # C7.2.59 FCMGE (zero) page C7-1520 line 84234 MATCH x7ef8c800/mask=xfffffc00 @@ -6835,9 +4031,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & b_23=1 & b_1722=0x10 & b_1216=0xc & b_101 :fcmge Rd_FPR16, Rn_FPR16, "#0.0" is b_1031=0b0111111011111000110010 & Rd_FPR16 & Rn_FPR16 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR16 = NEON_fcmge(Rn_FPR16, 0:2); -@endif } # C7.2.59 FCMGE (zero) page C7-1520 line 84234 MATCH x7ea0c800/mask=xffbffc00 @@ -6849,9 +4043,7 @@ is b_1031=0b0111111011111000110010 & Rd_FPR16 & Rn_FPR16 & Zd :fcmge Rd_FPR32, Rn_FPR32, "#0.0" is b_2331=0b011111101 & b_22=0 & b_1021=0b100000110010 & Rd_FPR32 & Rn_FPR32 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR32 = NEON_fcmge(Rn_FPR32, 0:4); -@endif } # C7.2.59 FCMGE (zero) page C7-1520 line 84234 MATCH x7ea0c800/mask=xffbffc00 @@ -6863,9 +4055,7 @@ is b_2331=0b011111101 & b_22=0 & b_1021=0b100000110010 & Rd_FPR32 & Rn_FPR32 & Z :fcmge Rd_FPR64, Rn_FPR64, "#0.0" is b_2331=0b011111101 & b_22=1 & b_1021=0b100000110010 & Rd_FPR64 & Rn_FPR64 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR64 = NEON_fcmge(Rn_FPR64, 0:8); -@endif } # C7.2.59 FCMGE (zero) page C7-1520 line 84234 MATCH x2ef8c800/mask=xbffffc00 @@ -6877,9 +4067,7 @@ is b_2331=0b011111101 & b_22=1 & b_1021=0b100000110010 & Rd_FPR64 & Rn_FPR64 & Z :fcmge Rd_VPR64.4H, Rn_VPR64.4H, "#0.0" is b_31=0 & b_30=0 & b_1029=0b10111011111000110010 & Rd_VPR64.4H & Rn_VPR64.4H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.4H = NEON_fcmge(Rn_VPR64.4H, 0:2, 2:1); -@endif } # C7.2.59 FCMGE (zero) page C7-1520 line 84234 MATCH x2ef8c800/mask=xbffffc00 @@ -6891,9 +4079,7 @@ is b_31=0 & b_30=0 & b_1029=0b10111011111000110010 & Rd_VPR64.4H & Rn_VPR64.4H & :fcmge Rd_VPR128.8H, Rn_VPR128.8H, "#0.0" is b_31=0 & b_30=1 & b_1029=0b10111011111000110010 & Rd_VPR128.8H & Rn_VPR128.8H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.8H = NEON_fcmge(Rn_VPR128.8H, 0:2, 2:1); -@endif } # C7.2.60 FCMGT (register) page C7-1523 line 84446 MATCH x2ea0e400/mask=xbfa0fc00 @@ -6904,9 +4090,7 @@ is b_31=0 & b_30=1 & b_1029=0b10111011111000110010 & Rd_VPR128.8H & Rn_VPR128.8H :fcmgt Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=3 & b_21=1 & Rm_VPR128.2D & b_1115=0x1c & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.2D = NEON_fcmgt(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); -@endif } # C7.2.60 FCMGT (register) page C7-1523 line 84446 MATCH x2ea0e400/mask=xbfa0fc00 @@ -6917,9 +4101,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=3 & b_21=1 & Rm_VPR128.2D & :fcmgt Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_21=1 & Rm_VPR64.2S & b_1115=0x1c & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.2S = NEON_fcmgt(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); -@endif } # C7.2.60 FCMGT (register) page C7-1523 line 84446 MATCH x2ea0e400/mask=xbfa0fc00 @@ -6930,9 +4112,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_21=1 & Rm_VPR64.2S & :fcmgt Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_21=1 & Rm_VPR128.4S & b_1115=0x1c & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_fcmgt(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); -@endif } # C7.2.60 FCMGT (register) page C7-1523 line 84446 MATCH x7ec02400/mask=xffe0fc00 @@ -6944,9 +4124,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_21=1 & Rm_VPR128.4S & :fcmgt Rd_FPR16, Rn_FPR16, Rm_FPR16 is b_2131=0b01111110110 & b_1015=0b001001 & Rd_FPR16 & Rn_FPR16 & Rm_FPR16 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR16 = NEON_fcmgt(Rn_FPR16, Rm_FPR16); -@endif } # C7.2.60 FCMGT (register) page C7-1523 line 84446 MATCH x7ea0e400/mask=xffa0fc00 @@ -6958,9 +4136,7 @@ is b_2131=0b01111110110 & b_1015=0b001001 & Rd_FPR16 & Rn_FPR16 & Rm_FPR16 & Zd :fcmgt Rd_FPR32, Rn_FPR32, Rm_FPR32 is b_2331=0b011111101 & b_22=0 & b_21=1 & b_1015=0b111001 & Rd_FPR32 & Rn_FPR32 & Rm_FPR32 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR32 = NEON_fcmgt(Rn_FPR32, Rm_FPR32); -@endif } # C7.2.60 FCMGT (register) page C7-1523 line 84446 MATCH x7ea0e400/mask=xffa0fc00 @@ -6972,9 +4148,7 @@ is b_2331=0b011111101 & b_22=0 & b_21=1 & b_1015=0b111001 & Rd_FPR32 & Rn_FPR32 :fcmgt Rd_FPR64, Rn_FPR64, Rm_FPR64 is b_2331=0b011111101 & b_22=1 & b_21=1 & b_1015=0b111001 & Rd_FPR64 & Rn_FPR64 & Rm_FPR64 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR64 = NEON_fcmgt(Rn_FPR64, Rm_FPR64); -@endif } # C7.2.60 FCMGT (register) page C7-1523 line 84446 MATCH x2ec02400/mask=xbfe0fc00 @@ -6986,9 +4160,7 @@ is b_2331=0b011111101 & b_22=1 & b_21=1 & b_1015=0b111001 & Rd_FPR64 & Rn_FPR64 :fcmgt Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_31=0 & b_30=0 & b_2129=0b101110110 & b_1015=0b001001 & Rd_VPR64.4H & Rn_VPR64.4H & Rm_VPR64.4H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.4H = NEON_fcmgt(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); -@endif } # C7.2.60 FCMGT (register) page C7-1523 line 84446 MATCH x2ec02400/mask=xbfe0fc00 @@ -7000,9 +4172,7 @@ is b_31=0 & b_30=0 & b_2129=0b101110110 & b_1015=0b001001 & Rd_VPR64.4H & Rn_VPR :fcmgt Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_31=0 & b_30=1 & b_2129=0b101110110 & b_1015=0b001001 & Rd_VPR128.8H & Rn_VPR128.8H & Rm_VPR128.8H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.8H = NEON_fcmgt(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); -@endif } # C7.2.61 FCMGT (zero) page C7-1527 line 84689 MATCH x0ea0c800/mask=xbfbffc00 @@ -7013,9 +4183,7 @@ is b_31=0 & b_30=1 & b_2129=0b101110110 & b_1015=0b001001 & Rd_VPR128.8H & Rn_VP :fcmgt Rd_VPR128.2D, Rn_VPR128.2D, "#0" is b_3131=0 & q=1 & u=0 & b_2428=0xe & b_23=1 & b_1722=0x30 & b_1216=0xc & b_1011=2 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.2D = NEON_fcmgt(Rn_VPR128.2D, 0:8, 8:1); -@endif } # C7.2.61 FCMGT (zero) page C7-1527 line 84689 MATCH x0ea0c800/mask=xbfbffc00 @@ -7026,9 +4194,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & b_23=1 & b_1722=0x30 & b_1216=0xc & b_101 :fcmgt Rd_VPR64.2S, Rn_VPR64.2S, "#0" is b_3131=0 & q=0 & u=0 & b_2428=0xe & b_23=1 & b_1722=0x10 & b_1216=0xc & b_1011=2 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.2S = NEON_fcmgt(Rn_VPR64.2S, 0:4, 4:1); -@endif } # C7.2.61 FCMGT (zero) page C7-1527 line 84689 MATCH x0ea0c800/mask=xbfbffc00 @@ -7039,9 +4205,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & b_23=1 & b_1722=0x10 & b_1216=0xc & b_101 :fcmgt Rd_VPR128.4S, Rn_VPR128.4S, "#0" is b_3131=0 & q=1 & u=0 & b_2428=0xe & b_23=1 & b_1722=0x10 & b_1216=0xc & b_1011=2 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_fcmgt(Rn_VPR128.4S, 0:4, 4:1); -@endif } # C7.2.61 FCMGT (zero) page C7-1527 line 84689 MATCH x5ef8c800/mask=xfffffc00 @@ -7053,9 +4217,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & b_23=1 & b_1722=0x10 & b_1216=0xc & b_101 :fcmgt Rd_FPR16, Rn_FPR16, "#0.0" is b_1031=0b0101111011111000110010 & Rd_FPR16 & Rn_FPR16 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR16 = NEON_fcmgt(Rn_FPR16, 0:2); -@endif } # C7.2.61 FCMGT (zero) page C7-1527 line 84689 MATCH x5ea0c800/mask=xffbffc00 @@ -7067,9 +4229,7 @@ is b_1031=0b0101111011111000110010 & Rd_FPR16 & Rn_FPR16 & Zd :fcmgt Rd_FPR32, Rn_FPR32, "#0.0" is b_2331=0b010111101 & b_22=0 & b_1021=0b100000110010 & Rd_FPR32 & Rn_FPR32 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR32 = NEON_fcmgt(Rn_FPR32, 0:4); -@endif } # C7.2.61 FCMGT (zero) page C7-1527 line 84689 MATCH x5ea0c800/mask=xffbffc00 @@ -7081,9 +4241,7 @@ is b_2331=0b010111101 & b_22=0 & b_1021=0b100000110010 & Rd_FPR32 & Rn_FPR32 & Z :fcmgt Rd_FPR64, Rn_FPR64, "#0.0" is b_2331=0b010111101 & b_22=1 & b_1021=0b100000110010 & Rd_FPR64 & Rn_FPR64 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR64 = NEON_fcmgt(Rn_FPR64, 0:8); -@endif } # C7.2.61 FCMGT (zero) page C7-1527 line 84689 MATCH x0ef8c800/mask=xbffffc00 @@ -7095,9 +4253,7 @@ is b_2331=0b010111101 & b_22=1 & b_1021=0b100000110010 & Rd_FPR64 & Rn_FPR64 & Z :fcmgt Rd_VPR64.4H, Rn_VPR64.4H, "#0.0" is b_31=0 & b_30=0 & b_1029=0b00111011111000110010 & Rd_VPR64.4H & Rn_VPR64.4H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.4H = NEON_fcmgt(Rn_VPR64.4H, 0:2, 2:1); -@endif } # C7.2.61 FCMGT (zero) page C7-1527 line 84689 MATCH x0ef8c800/mask=xbffffc00 @@ -7109,9 +4265,7 @@ is b_31=0 & b_30=0 & b_1029=0b00111011111000110010 & Rd_VPR64.4H & Rn_VPR64.4H & :fcmgt Rd_VPR128.8H, Rn_VPR128.8H, "#0.0" is b_31=0 & b_30=1 & b_1029=0b00111011111000110010 & Rd_VPR128.8H & Rn_VPR128.8H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.8H = NEON_fcmgt(Rn_VPR128.8H, 0:2, 2:1); -@endif } # C7.2.55 FCMLA (by element) page C7-1117 line 64749 KEEPWITH @@ -7137,10 +4291,8 @@ fcmla_rotate: #270 is b_15=1 & b_1112=0b11 { export 270:1; } :fcmla Rd_VPR64.4H, Rn_VPR64.4H, Re_VPR128.H.vIndexHL, fcmla_rotate is b_31=0 & b_30=0 & b_2429=0b101111 & b_2223=0b01 & b_15=0 & b_12=1 & b_11=0 & b_10=0 & fcmla_rotate & Rd_VPR64.4H & Rn_VPR64.4H & Re_VPR128.H.vIndexHL & Re_VPR128.H & vIndexHL & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) local tmp1:2 = SIMD_PIECE(Re_VPR128.H, vIndexHL:1); Rd_VPR64.4H = NEON_fcmla(Rn_VPR64.4H, tmp1, fcmla_rotate, 2:1); -@endif } # C7.2.62 FCMLA (by element) page C7-1530 line 84901 MATCH x2f001000/mask=xbf009400 @@ -7152,10 +4304,8 @@ is b_31=0 & b_30=0 & b_2429=0b101111 & b_2223=0b01 & b_15=0 & b_12=1 & b_11=0 & :fcmla Rd_VPR128.8H, Rn_VPR128.8H, Re_VPR128.H.vIndexHL, fcmla_rotate is b_31=0 & b_30=1 & b_2429=0b101111 & b_2223=0b01 & b_15=0 & b_12=1 & b_10=0 & fcmla_rotate & Rd_VPR128.8H & Rn_VPR128.8H & Re_VPR128.H.vIndexHL & Re_VPR128.H & vIndexHL & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) local tmp1:2 = SIMD_PIECE(Re_VPR128.H, vIndexHL:1); Rd_VPR128.8H = NEON_fcmla(Rn_VPR128.8H, tmp1, fcmla_rotate, 2:1); -@endif } # C7.2.62 FCMLA (by element) page C7-1530 line 84901 MATCH x2f001000/mask=xbf009400 @@ -7168,10 +4318,8 @@ is b_31=0 & b_30=1 & b_2429=0b101111 & b_2223=0b01 & b_15=0 & b_12=1 & b_10=0 & :fcmla Rd_VPR128.4S, Rn_VPR128.4S, Re_VPR128.S.vIndex, fcmla_rotate is b_31=0 & b_30=1 & b_2429=0b101111 & b_2223=0b10 & b_21=0 & b_15=0 & b_12=1 & b_10=0 & fcmla_rotate & Rd_VPR128.4S & Rn_VPR128.4S & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) local tmp1:4 = SIMD_PIECE(Re_VPR128.S, vIndex:1); Rd_VPR128.4S = NEON_fcmla(Rn_VPR128.4S, tmp1, fcmla_rotate, 4:1); -@endif } # C7.2.63 FCMLA page C7-1533 line 85073 MATCH x2e00c400/mask=xbf20e400 @@ -7183,9 +4331,7 @@ is b_31=0 & b_30=1 & b_2429=0b101111 & b_2223=0b10 & b_21=0 & b_15=0 & b_12=1 & :fcmla Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H, fcmla_rotate is b_31=0 & b_30=0 & b_2429=0b101110 & b_2223=0b01 & b_21=0 & b_1315=0b110 & b_10=1 & fcmla_rotate & Rd_VPR64.4H & Rn_VPR64.4H & Rm_VPR64.4H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.4H = NEON_fcmla(Rn_VPR64.4H, Rm_VPR64.4H, fcmla_rotate, 4:1); -@endif } # C7.2.63 FCMLA page C7-1533 line 85073 MATCH x2e00c400/mask=xbf20e400 @@ -7197,9 +4343,7 @@ is b_31=0 & b_30=0 & b_2429=0b101110 & b_2223=0b01 & b_21=0 & b_1315=0b110 & b_1 :fcmla Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H, fcmla_rotate is b_31=0 & b_30=1 & b_2429=0b101110 & b_2223=0b01 & b_21=0 & b_1315=0b110 & b_10=1 & fcmla_rotate & Rd_VPR128.8H & Rn_VPR128.8H & Rm_VPR128.8H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.8H = NEON_fcmla(Rn_VPR128.8H, Rm_VPR128.8H, fcmla_rotate, 4:1); -@endif } # C7.2.63 FCMLA page C7-1533 line 85073 MATCH x2e00c400/mask=xbf20e400 @@ -7211,9 +4355,7 @@ is b_31=0 & b_30=1 & b_2429=0b101110 & b_2223=0b01 & b_21=0 & b_1315=0b110 & b_1 :fcmla Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S, fcmla_rotate is b_31=0 & b_30=0 & b_2429=0b101110 & b_2223=0b10 & b_21=0 & b_1315=0b110 & b_10=1 & fcmla_rotate & Rd_VPR64.2S & Rn_VPR64.2S & Rm_VPR64.2S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.2S = NEON_fcmla(Rn_VPR64.2S, Rm_VPR64.2S, fcmla_rotate, 4:1); -@endif } # C7.2.63 FCMLA page C7-1533 line 85073 MATCH x2e00c400/mask=xbf20e400 @@ -7225,9 +4367,7 @@ is b_31=0 & b_30=0 & b_2429=0b101110 & b_2223=0b10 & b_21=0 & b_1315=0b110 & b_1 :fcmla Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S, fcmla_rotate is b_31=0 & b_30=1 & b_2429=0b101110 & b_2223=0b10 & b_21=0 & b_1315=0b110 & b_10=1 & fcmla_rotate & Rd_VPR128.4S & Rn_VPR128.4S & Rm_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_fcmla(Rn_VPR128.4S, Rm_VPR128.4S, fcmla_rotate, 4:1); -@endif } # C7.2.63 FCMLA page C7-1533 line 85073 MATCH x2e00c400/mask=xbf20e400 @@ -7239,9 +4379,7 @@ is b_31=0 & b_30=1 & b_2429=0b101110 & b_2223=0b10 & b_21=0 & b_1315=0b110 & b_1 :fcmla Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D, fcmla_rotate is b_31=0 & b_30=1 & b_2429=0b101110 & b_2223=0b11 & b_21=0 & b_1315=0b110 & b_10=1 & fcmla_rotate & Rd_VPR128.2D & Rn_VPR128.2D & Rm_VPR128.2D & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.2D = NEON_fcmla(Rn_VPR128.2D, Rm_VPR128.2D, fcmla_rotate, 4:1); -@endif } # C7.2.64 FCMLE (zero) page C7-1535 line 85215 MATCH x2ea0d800/mask=xbfbffc00 @@ -7252,9 +4390,7 @@ is b_31=0 & b_30=1 & b_2429=0b101110 & b_2223=0b11 & b_21=0 & b_1315=0b110 & b_1 :fcmle Rd_VPR128.2D, Rn_VPR128.2D, "#0" is b_3131=0 & q=1 & u=1 & b_2428=0xe & b_23=1 & b_1722=0x30 & b_1216=0xd & b_1011=2 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.2D = NEON_fcmle(Rn_VPR128.2D, 0:8, 8:1); -@endif } # C7.2.64 FCMLE (zero) page C7-1535 line 85215 MATCH x2ea0d800/mask=xbfbffc00 @@ -7265,9 +4401,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & b_23=1 & b_1722=0x30 & b_1216=0xd & b_101 :fcmle Rd_VPR64.2S, Rn_VPR64.2S, "#0" is b_3131=0 & q=0 & u=1 & b_2428=0xe & b_23=1 & b_1722=0x10 & b_1216=0xd & b_1011=2 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.2S = NEON_fcmle(Rn_VPR64.2S, 0:2, 2:1); -@endif } # C7.2.64 FCMLE (zero) page C7-1535 line 85215 MATCH x2ea0d800/mask=xbfbffc00 @@ -7278,9 +4412,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & b_23=1 & b_1722=0x10 & b_1216=0xd & b_101 :fcmle Rd_VPR128.4S, Rn_VPR128.4S, "#0" is b_3131=0 & q=1 & u=1 & b_2428=0xe & b_23=1 & b_1722=0x10 & b_1216=0xd & b_1011=2 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_fcmle(Rn_VPR128.4S, 0:4, 4:1); -@endif } # C7.2.64 FCMLE (zero) page C7-1535 line 85215 MATCH x7ef8d800/mask=xfffffc00 @@ -7292,9 +4424,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & b_23=1 & b_1722=0x10 & b_1216=0xd & b_101 :fcmle Rd_FPR16, Rn_FPR16, "#0.0" is b_1031=0b0111111011111000110110 & Rd_FPR16 & Rn_FPR16 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR16 = NEON_fcmle(Rn_FPR16, 0:2); -@endif } # C7.2.64 FCMLE (zero) page C7-1535 line 85215 MATCH x7ea0d800/mask=xffbffc00 @@ -7306,9 +4436,7 @@ is b_1031=0b0111111011111000110110 & Rd_FPR16 & Rn_FPR16 & Zd :fcmle Rd_FPR32, Rn_FPR32, "#0.0" is b_2331=0b011111101 & b_22=0 & b_1021=0b100000110110 & Rd_FPR32 & Rn_FPR32 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR32 = NEON_fcmle(Rn_FPR32, 0:4); -@endif } # C7.2.64 FCMLE (zero) page C7-1535 line 85215 MATCH x7ea0d800/mask=xffbffc00 @@ -7320,9 +4448,7 @@ is b_2331=0b011111101 & b_22=0 & b_1021=0b100000110110 & Rd_FPR32 & Rn_FPR32 & Z :fcmle Rd_FPR64, Rn_FPR64, "#0.0" is b_2331=0b011111101 & b_22=1 & b_1021=0b100000110110 & Rd_FPR64 & Rn_FPR64 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR64 = NEON_fcmle(Rn_FPR64, 0:8); -@endif } # C7.2.64 FCMLE (zero) page C7-1535 line 85215 MATCH x2ef8d800/mask=xbffffc00 @@ -7334,9 +4460,7 @@ is b_2331=0b011111101 & b_22=1 & b_1021=0b100000110110 & Rd_FPR64 & Rn_FPR64 & Z :fcmle Rd_VPR64.4H, Rn_VPR64.4H, "#0.0" is b_31=0 & b_30=0 & b_1029=0b10111011111000110110 & Rd_VPR64.4H & Rn_VPR64.4H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.4H = NEON_fcmle(Rn_VPR64.4H, 0:2, 2:1); -@endif } # C7.2.64 FCMLE (zero) page C7-1535 line 85215 MATCH x2ef8d800/mask=xbffffc00 @@ -7348,9 +4472,7 @@ is b_31=0 & b_30=0 & b_1029=0b10111011111000110110 & Rd_VPR64.4H & Rn_VPR64.4H & :fcmle Rd_VPR128.8H, Rn_VPR128.8H, "#0.0" is b_31=0 & b_30=1 & b_1029=0b10111011111000110110 & Rd_VPR128.8H & Rn_VPR128.8H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.8H = NEON_fcmle(Rn_VPR128.8H, 0:2, 2:1); -@endif } # C7.2.65 FCMLT (zero) page C7-1538 line 85427 MATCH x0ea0e800/mask=xbfbffc00 @@ -7361,9 +4483,7 @@ is b_31=0 & b_30=1 & b_1029=0b10111011111000110110 & Rd_VPR128.8H & Rn_VPR128.8H :fcmlt Rd_VPR128.2D, Rn_VPR128.2D, "#0" is b_3131=0 & q=1 & u=0 & b_2428=0xe & b_23=1 & b_1722=0x30 & b_1216=0xe & b_1011=2 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.2D = NEON_fcmlt(Rn_VPR128.2D, 0:8, 8:1); -@endif } # C7.2.65 FCMLT (zero) page C7-1538 line 85427 MATCH x0ea0e800/mask=xbfbffc00 @@ -7374,9 +4494,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & b_23=1 & b_1722=0x30 & b_1216=0xe & b_101 :fcmlt Rd_VPR64.2S, Rn_VPR64.2S, "#0" is b_3131=0 & q=0 & u=0 & b_2428=0xe & b_23=1 & b_1722=0x10 & b_1216=0xe & b_1011=2 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.2S = NEON_fcmlt(Rn_VPR64.2S, 0:4, 4:1); -@endif } # C7.2.65 FCMLT (zero) page C7-1538 line 85427 MATCH x0ea0e800/mask=xbfbffc00 @@ -7387,9 +4505,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & b_23=1 & b_1722=0x10 & b_1216=0xe & b_101 :fcmlt Rd_VPR128.4S, Rn_VPR128.4S, "#0" is b_3131=0 & q=1 & u=0 & b_2428=0xe & b_23=1 & b_1722=0x10 & b_1216=0xe & b_1011=2 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_fcmlt(Rn_VPR128.4S, 0:4, 4:1); -@endif } # C7.2.65 FCMLT (zero) page C7-1538 line 85427 MATCH x5ef8e800/mask=xfffffc00 @@ -7401,9 +4517,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & b_23=1 & b_1722=0x10 & b_1216=0xe & b_101 :fcmlt Rd_FPR16, Rn_FPR16, "#0.0" is b_1031=0b0101111011111000111010 & Rd_FPR16 & Rn_FPR16 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR16 = NEON_fcmlt(Rn_FPR16, 0:2); -@endif } # C7.2.65 FCMLT (zero) page C7-1538 line 85427 MATCH x5ea0e800/mask=xffbffc00 @@ -7415,9 +4529,7 @@ is b_1031=0b0101111011111000111010 & Rd_FPR16 & Rn_FPR16 & Zd :fcmlt Rd_FPR32, Rn_FPR32, "#0.0" is b_2331=0b010111101 & b_22=0 & b_1021=0b100000111010 & Rd_FPR32 & Rn_FPR32 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR32 = NEON_fcmlt(Rn_FPR32, 0:4); -@endif } # C7.2.65 FCMLT (zero) page C7-1538 line 85427 MATCH x5ea0e800/mask=xffbffc00 @@ -7429,9 +4541,7 @@ is b_2331=0b010111101 & b_22=0 & b_1021=0b100000111010 & Rd_FPR32 & Rn_FPR32 & Z :fcmlt Rd_FPR64, Rn_FPR64, "#0.0" is b_2331=0b010111101 & b_22=1 & b_1021=0b100000111010 & Rd_FPR64 & Rn_FPR64 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR64 = NEON_fcmlt(Rn_FPR64, 0:8); -@endif } # C7.2.65 FCMLT (zero) page C7-1538 line 85427 MATCH x0ef8e800/mask=xbffffc00 @@ -7443,9 +4553,7 @@ is b_2331=0b010111101 & b_22=1 & b_1021=0b100000111010 & Rd_FPR64 & Rn_FPR64 & Z :fcmlt Rd_VPR64.4H, Rn_VPR64.4H, "#0.0" is b_31=0 & b_30=0 & b_1029=0b00111011111000111010 & Rd_VPR64.4H & Rn_VPR64.4H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.4H = NEON_fcmlt(Rn_VPR64.4H, 0:2, 2:1); -@endif } # C7.2.65 FCMLT (zero) page C7-1538 line 85427 MATCH x0ef8e800/mask=xbffffc00 @@ -7457,9 +4565,7 @@ is b_31=0 & b_30=0 & b_1029=0b00111011111000111010 & Rd_VPR64.4H & Rn_VPR64.4H & :fcmlt Rd_VPR128.8H, Rn_VPR128.8H, "#0.0" is b_31=0 & b_30=1 & b_1029=0b00111011111000111010 & Rd_VPR128.8H & Rn_VPR128.8H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.8H = NEON_fcmlt(Rn_VPR128.8H, 0:2, 2:1); -@endif } # C7.2.66 FCMP page C7-1541 line 85621 MATCH x1e202000/mask=xff20fc17 @@ -7471,13 +4577,7 @@ is b_31=0 & b_30=1 & b_1029=0b00111011111000111010 & Rd_VPR128.8H & Rn_VPR128.8H :fcmp Rn_FPR64, Rm_FPR64 is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=1 & Rm_FPR64 & fpcmp.op=0 & b_1013=0x8 & Rn_FPR64 & fpcmp.opcode2=0x0 { -@if defined(SEMANTIC_primitive) fcomp(Rn_FPR64, Rm_FPR64); -@elif defined(SEMANTIC_pcode) - fcomp(Rn_FPR64, Rm_FPR64); -@elif defined(SEMANTIC_pseudo) - NEON_fcmp(Rn_FPR64, Rm_FPR64); -@endif } # C7.2.66 FCMP page C7-1541 line 85621 MATCH x1e202000/mask=xff20fc17 @@ -7489,13 +4589,7 @@ is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=1 & Rm_FPR64 & fpcmp.op :fcmp Rn_FPR64, Rm_fpz64 is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=1 & Rm_fpz64 & fpcmp.op=0 & b_1013=0x8 & Rn_FPR64 & fpcmp.opcode2=0x8 { -@if defined(SEMANTIC_primitive) fcomp(Rn_FPR64, Rm_fpz64); -@elif defined(SEMANTIC_pcode) - fcomp(Rn_FPR64, Rm_fpz64); -@elif defined(SEMANTIC_pseudo) - NEON_fcmp(Rn_FPR64, Rm_fpz64); -@endif } # C7.2.66 FCMP page C7-1541 line 85621 MATCH x1e202000/mask=xff20fc17 @@ -7507,13 +4601,7 @@ is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=1 & Rm_fpz64 & fpcmp.op :fcmp Rn_FPR32, Rm_fpz32 is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=1 & Rm_fpz32 & fpcmp.op=0 & b_1013=0x8 & Rn_FPR32 & fpcmp.opcode2=0x8 { -@if defined(SEMANTIC_primitive) fcomp(Rn_FPR32, Rm_fpz32); -@elif defined(SEMANTIC_pcode) - fcomp(Rn_FPR32, Rm_fpz32); -@elif defined(SEMANTIC_pseudo) - NEON_fcmp(Rn_FPR32, Rm_fpz32); -@endif } # C7.2.66 FCMP page C7-1541 line 85621 MATCH x1e202000/mask=xff20fc17 @@ -7525,13 +4613,7 @@ is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=1 & Rm_fpz32 & fpcmp.op :fcmp Rn_FPR32, Rm_FPR32 is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=1 & Rm_FPR32 & fpcmp.op=0 & b_1013=0x8 & Rn_FPR32 & fpcmp.opcode2=0x0 { -@if defined(SEMANTIC_primitive) fcomp(Rn_FPR32, Rm_FPR32); -@elif defined(SEMANTIC_pcode) - fcomp(Rn_FPR32, Rm_FPR32); -@elif defined(SEMANTIC_pseudo) - NEON_fcmp(Rn_FPR32, Rm_FPR32); -@endif } # C7.2.66 FCMP page C7-1541 line 85621 MATCH x1e202000/mask=xff20fc17 @@ -7543,13 +4625,7 @@ is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=1 & Rm_FPR32 & fpcmp.op :fcmp Rn_FPR16, Rm_fpz16 is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=1 & Rm_fpz16 & fpcmp.op=0 & b_1013=0x8 & Rn_FPR16 & fpcmp.opcode2=0x8 { -@if defined(SEMANTIC_primitive) fcomp(Rn_FPR16, Rm_fpz16); -@elif defined(SEMANTIC_pcode) - fcomp(Rn_FPR16, Rm_fpz16); -@elif defined(SEMANTIC_pseudo) - NEON_fcmp(Rn_FPR16, Rm_fpz16); -@endif } # C7.2.66 FCMP page C7-1541 line 85621 MATCH x1e202000/mask=xff20fc17 @@ -7561,13 +4637,7 @@ is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=1 & Rm_fpz16 & fpcmp.op :fcmp Rn_FPR16, Rm_FPR16 is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=1 & Rm_FPR16 & fpcmp.op=0 & b_1013=0x8 & Rn_FPR16 & fpcmp.opcode2=0x0 { -@if defined(SEMANTIC_primitive) fcomp(Rn_FPR16, Rm_FPR16); -@elif defined(SEMANTIC_pcode) - fcomp(Rn_FPR16, Rm_FPR16); -@elif defined(SEMANTIC_pseudo) - NEON_fcmp(Rn_FPR16, Rm_FPR16); -@endif } # C7.2.67 FCMPE page C7-1543 line 85756 MATCH x1e202010/mask=xff20fc17 @@ -7579,15 +4649,8 @@ is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=1 & Rm_FPR16 & fpcmp.op :fcmpe Rn_FPR64, Rm_FPR64 is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=1 & Rm_FPR64 & fpcmp.op=0 & b_1013=0x8 & Rn_FPR64 & fpcmp.opcode2=0x10 { -@if defined(SEMANTIC_primitive) ftestNAN(Rn_FPR64, Rm_FPR64); fcomp(Rn_FPR64, Rm_FPR64); -@elif defined(SEMANTIC_pcode) - ftestNAN(Rn_FPR64, Rm_FPR64); - fcomp(Rn_FPR64, Rm_FPR64); -@elif defined(SEMANTIC_pseudo) - NEON_fcmpe(Rn_FPR64, Rm_FPR64); -@endif } # C7.2.67 FCMPE page C7-1543 line 85756 MATCH x1e202010/mask=xff20fc17 @@ -7599,15 +4662,8 @@ is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=1 & Rm_FPR64 & fpcmp.op :fcmpe Rn_FPR64, Rm_fpz64 is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=1 & Rm_fpz64 & fpcmp.op=0 & b_1013=0x8 & Rn_FPR64 & fpcmp.opcode2=0x18 { -@if defined(SEMANTIC_primitive) ftestNAN(Rn_FPR64, Rm_fpz64); fcomp(Rn_FPR64, Rm_fpz64); -@elif defined(SEMANTIC_pcode) - ftestNAN(Rn_FPR64, Rm_fpz64); - fcomp(Rn_FPR64, Rm_fpz64); -@elif defined(SEMANTIC_pseudo) - NEON_fcmpe(Rn_FPR64, Rm_fpz64); -@endif } # C7.2.67 FCMPE page C7-1543 line 85756 MATCH x1e202010/mask=xff20fc17 @@ -7619,15 +4675,8 @@ is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=1 & Rm_fpz64 & fpcmp.op :fcmpe Rn_FPR32, Rm_fpz32 is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=1 & Rm_fpz32 & fpcmp.op=0 & b_1013=0x8 & Rn_FPR32 & fpcmp.opcode2=0x18 { -@if defined(SEMANTIC_primitive) ftestNAN(Rn_FPR32, Rm_fpz32); fcomp(Rn_FPR32, Rm_fpz32); -@elif defined(SEMANTIC_pcode) - ftestNAN(Rn_FPR32, Rm_fpz32); - fcomp(Rn_FPR32, Rm_fpz32); -@elif defined(SEMANTIC_pseudo) - NEON_fcmpe(Rn_FPR32, Rm_fpz32); -@endif } # C7.2.67 FCMPE page C7-1543 line 85756 MATCH x1e202010/mask=xff20fc17 @@ -7639,15 +4688,8 @@ is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=1 & Rm_fpz32 & fpcmp.op :fcmpe Rn_FPR32, Rm_FPR32 is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=1 & Rm_FPR32 & fpcmp.op=0 & b_1013=0x8 & Rn_FPR32 & fpcmp.opcode2=0x10 { -@if defined(SEMANTIC_primitive) ftestNAN(Rn_FPR32, Rm_FPR32); fcomp(Rn_FPR32, Rm_FPR32); -@elif defined(SEMANTIC_pcode) - ftestNAN(Rn_FPR32, Rm_FPR32); - fcomp(Rn_FPR32, Rm_FPR32); -@elif defined(SEMANTIC_pseudo) - NEON_fcmpe(Rn_FPR32, Rm_FPR32); -@endif } # C7.2.67 FCMPE page C7-1543 line 85756 MATCH x1e202010/mask=xff20fc17 @@ -7659,15 +4701,8 @@ is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=1 & Rm_FPR32 & fpcmp.op :fcmpe Rn_FPR16, Rm_fpz16 is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=1 & Rm_fpz16 & fpcmp.op=0 & b_1013=0x8 & Rn_FPR16 & fpcmp.opcode2=0x18 { -@if defined(SEMANTIC_primitive) ftestNAN(Rn_FPR16, Rm_fpz16); fcomp(Rn_FPR16, Rm_fpz16); -@elif defined(SEMANTIC_pcode) - ftestNAN(Rn_FPR16, Rm_fpz16); - fcomp(Rn_FPR16, Rm_fpz16); -@elif defined(SEMANTIC_pseudo) - NEON_fcmpe(Rn_FPR16, Rm_fpz16); -@endif } # C7.2.67 FCMPE page C7-1543 line 85756 MATCH x1e202010/mask=xff20fc17 @@ -7679,15 +4714,8 @@ is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=1 & Rm_fpz16 & fpcmp.op :fcmpe Rn_FPR16, Rm_FPR16 is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=1 & Rm_FPR16 & fpcmp.op=0 & b_1013=0x8 & Rn_FPR16 & fpcmp.opcode2=0x10 { -@if defined(SEMANTIC_primitive) ftestNAN(Rn_FPR16, Rm_FPR16); fcomp(Rn_FPR16, Rm_FPR16); -@elif defined(SEMANTIC_pcode) - ftestNAN(Rn_FPR16, Rm_FPR16); - fcomp(Rn_FPR16, Rm_FPR16); -@elif defined(SEMANTIC_pseudo) - NEON_fcmpe(Rn_FPR16, Rm_FPR16); -@endif } # C7.2.68 FCSEL page C7-1545 line 85895 MATCH x1e200c00/mask=xff200c00 @@ -7700,23 +4728,12 @@ is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=1 & Rm_FPR16 & fpcmp.op :fcsel Rd_FPR64, Rn_FPR64, Rm_FPR64, CondOp is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=1 & Rm_FPR64 & CondOp & b_1011=3 & Rn_FPR64 & Rd_FPR64 & Zd { -@if defined(SEMANTIC_primitive) local tmp1:8 = Rm_FPR64; Rd_FPR64 = Rn_FPR64; zext_zd(Zd); # zero upper 24 bytes of Zd if (CondOp:1) goto inst_next; Rd_FPR64 = tmp1; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = Rm_FPR64; - Rd_FPR64 = Rn_FPR64; - zext_zd(Zd); # zero upper 24 bytes of Zd - if (CondOp:1) goto inst_next; - Rd_FPR64 = tmp1; - zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pseudo) - Rd_FPR64 = NEON_fcsel(Rn_FPR64, Rm_FPR64, CondOp:1); -@endif } # C7.2.68 FCSEL page C7-1545 line 85895 MATCH x1e200c00/mask=xff200c00 @@ -7728,23 +4745,12 @@ is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=1 & Rm_FPR64 & CondOp & :fcsel Rd_FPR32, Rn_FPR32, Rm_FPR32, CondOp is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=1 & Rm_FPR32 & CondOp & b_1011=3 & Rn_FPR32 & Rd_FPR32 & Zd { -@if defined(SEMANTIC_primitive) local tmp1:4 = Rm_FPR32; Rd_FPR32 = Rn_FPR32; zext_zs(Zd); # zero upper 28 bytes of Zd if (CondOp:1) goto inst_next; Rd_FPR32 = tmp1; zext_zs(Zd); # zero upper 28 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:4 = Rm_FPR32; - Rd_FPR32 = Rn_FPR32; - zext_zs(Zd); # zero upper 28 bytes of Zd - if (CondOp:1) goto inst_next; - Rd_FPR32 = tmp1; - zext_zs(Zd); # zero upper 28 bytes of Zd -@elif defined(SEMANTIC_pseudo) - Rd_FPR32 = NEON_fcsel(Rn_FPR32, Rm_FPR32, CondOp:1); -@endif } # C7.2.68 FCSEL page C7-1545 line 85895 MATCH x1e200c00/mask=xff200c00 @@ -7756,23 +4762,12 @@ is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=1 & Rm_FPR32 & CondOp & :fcsel Rd_FPR16, Rn_FPR16, Rm_FPR16, CondOp is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=1 & Rm_FPR16 & CondOp & b_1011=3 & Rn_FPR16 & Rd_FPR16 & Zd { -@if defined(SEMANTIC_primitive) local tmp1:2 = Rm_FPR16; Rd_FPR16 = Rn_FPR16; zext_zh(Zd); # zero upper 30 bytes of Zd if (CondOp:1) goto inst_next; Rd_FPR16 = tmp1; zext_zh(Zd); # zero upper 30 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:2 = Rm_FPR16; - Rd_FPR16 = Rn_FPR16; - zext_zh(Zd); # zero upper 30 bytes of Zd - if (CondOp:1) goto inst_next; - Rd_FPR16 = tmp1; - zext_zh(Zd); # zero upper 30 bytes of Zd -@elif defined(SEMANTIC_pseudo) - Rd_FPR16 = NEON_fcsel(Rn_FPR16, Rm_FPR16, CondOp:1); -@endif } # C7.2.69 FCVT page C7-1547 line 86009 MATCH x1e224000/mask=xff3e7c00 @@ -7784,15 +4779,8 @@ is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=1 & Rm_FPR16 & CondOp & :fcvt Rd_FPR64, Rn_FPR16 is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=1 & fpDpOpcode=0x5 & b_1014=0x10 & Rn_FPR16 & Rd_FPR64 & Zd { -@if defined(SEMANTIC_primitive) Rd_FPR64 = float2float(Rn_FPR16); zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - Rd_FPR64 = float2float(Rn_FPR16); - zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pseudo) - Rd_FPR64 = NEON_fcvt(Rn_FPR16); -@endif } # C7.2.69 FCVT page C7-1547 line 86009 MATCH x1e224000/mask=xff3e7c00 @@ -7804,15 +4792,8 @@ is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=1 & fpDpOpcode=0x5 & b_ :fcvt Rd_FPR64, Rn_FPR32 is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=1 & fpDpOpcode=0x5 & b_1014=0x10 & Rn_FPR32 & Rd_FPR64 & Zd { -@if defined(SEMANTIC_primitive) Rd_FPR64 = float2float(Rn_FPR32); zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - Rd_FPR64 = float2float(Rn_FPR32); - zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pseudo) - Rd_FPR64 = NEON_fcvt(Rn_FPR32); -@endif } # C7.2.69 FCVT page C7-1547 line 86009 MATCH x1e224000/mask=xff3e7c00 @@ -7824,15 +4805,8 @@ is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=1 & fpDpOpcode=0x5 & b_ :fcvt Rd_FPR16, Rn_FPR64 is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=1 & fpDpOpcode=0x7 & b_1014=0x10 & Rn_FPR64 & Rd_FPR16 & Zd { -@if defined(SEMANTIC_primitive) Rd_FPR16 = float2float(Rn_FPR64); zext_zh(Zd); # zero upper 30 bytes of Zd -@elif defined(SEMANTIC_pcode) - Rd_FPR16 = float2float(Rn_FPR64); - zext_zh(Zd); # zero upper 30 bytes of Zd -@elif defined(SEMANTIC_pseudo) - Rd_FPR16 = NEON_fcvt(Rn_FPR64); -@endif } # C7.2.69 FCVT page C7-1547 line 86009 MATCH x1e224000/mask=xff3e7c00 @@ -7844,15 +4818,8 @@ is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=1 & fpDpOpcode=0x7 & b_ :fcvt Rd_FPR16, Rn_FPR32 is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=1 & fpDpOpcode=0x7 & b_1014=0x10 & Rn_FPR32 & Rd_FPR16 & Zd { -@if defined(SEMANTIC_primitive) Rd_FPR16 = float2float(Rn_FPR32); zext_zh(Zd); # zero upper 30 bytes of Zd -@elif defined(SEMANTIC_pcode) - Rd_FPR16 = float2float(Rn_FPR32); - zext_zh(Zd); # zero upper 30 bytes of Zd -@elif defined(SEMANTIC_pseudo) - Rd_FPR16 = NEON_fcvt(Rn_FPR32); -@endif } # C7.2.69 FCVT page C7-1547 line 86009 MATCH x1e224000/mask=xff3e7c00 @@ -7864,15 +4831,8 @@ is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=1 & fpDpOpcode=0x7 & b_ :fcvt Rd_FPR32, Rn_FPR64 is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=1 & fpDpOpcode=0x4 & b_1014=0x10 & Rn_FPR64 & Rd_FPR32 & Zd { -@if defined(SEMANTIC_primitive) Rd_FPR32 = float2float(Rn_FPR64); zext_zs(Zd); # zero upper 28 bytes of Zd -@elif defined(SEMANTIC_pcode) - Rd_FPR32 = float2float(Rn_FPR64); - zext_zs(Zd); # zero upper 28 bytes of Zd -@elif defined(SEMANTIC_pseudo) - Rd_FPR32 = NEON_fcvt(Rn_FPR64); -@endif } # C7.2.69 FCVT page C7-1547 line 86009 MATCH x1e224000/mask=xff3e7c00 @@ -7884,15 +4844,8 @@ is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=1 & fpDpOpcode=0x4 & b_ :fcvt Rd_FPR32, Rn_FPR16 is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=1 & fpDpOpcode=0x4 & b_1014=0x10 & Rn_FPR16 & Rd_FPR32 & Zd { -@if defined(SEMANTIC_primitive) Rd_FPR32 = float2float(Rn_FPR16); zext_zs(Zd); # zero upper 28 bytes of Zd -@elif defined(SEMANTIC_pcode) - Rd_FPR32 = float2float(Rn_FPR16); - zext_zs(Zd); # zero upper 28 bytes of Zd -@elif defined(SEMANTIC_pseudo) - Rd_FPR32 = NEON_fcvt(Rn_FPR16); -@endif } # C7.2.63 FCVTAS (vector) page C7-1136 line 65961 KEEPWITH @@ -7930,15 +4883,8 @@ fcvt_smnemonic: "fcvtzu" is b_1920=0b11 & b_1618=0b001 { } :^fcvt_vmnemonic Rd_FPR16, Rn_FPR16 is b_3031=0b01 & b_1028=0b1111001111001110010 & fcvt_vmnemonic & Rd_FPR16 & Rn_FPR16 & Zd { -@if defined(SEMANTIC_primitive) Rd_FPR16 = trunc(Rn_FPR16); zext_zh(Zd); # zero upper 30 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:2 = trunc(Rn_FPR16); - Zd = zext(tmpd); # assigning to Rd_FPR16 -@elif defined(SEMANTIC_pseudo) - Rd_FPR16 = NEON_fcvt_amnpz_su(Rn_FPR16); -@endif } # C7.2.70 FCVTAS (vector) page C7-1549 line 86125 MATCH x5e21c800/mask=xffbffc00 @@ -7952,15 +4898,8 @@ is b_3031=0b01 & b_1028=0b1111001111001110010 & fcvt_vmnemonic & Rd_FPR16 & Rn_F :^fcvt_vmnemonic Rd_FPR32, Rn_FPR32 is b_3031=0b01 & b_2328=0b111100 & b_22=0 & b_1021=0b100001110010 & fcvt_vmnemonic & Rd_FPR32 & Rn_FPR32 & Zd { -@if defined(SEMANTIC_primitive) Rd_FPR32 = trunc(Rn_FPR32); zext_zs(Zd); # zero upper 28 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:4 = trunc(Rn_FPR32); - Zd = zext(tmpd); # assigning to Rd_FPR32 -@elif defined(SEMANTIC_pseudo) - Rd_FPR32 = NEON_fcvt_amnpz_su(Rn_FPR32); -@endif } # C7.2.70 FCVTAS (vector) page C7-1549 line 86125 MATCH x5e21c800/mask=xffbffc00 @@ -7974,15 +4913,8 @@ is b_3031=0b01 & b_2328=0b111100 & b_22=0 & b_1021=0b100001110010 & fcvt_vmnemon :^fcvt_vmnemonic Rd_FPR64, Rn_FPR64 is b_3031=0b01 & b_2328=0b111100 & b_22=1 & b_1021=0b100001110010 & fcvt_vmnemonic & Rd_FPR64 & Rn_FPR64 & Zd { -@if defined(SEMANTIC_primitive) Rd_FPR64 = trunc(Rn_FPR64); zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:8 = trunc(Rn_FPR64); - Zd = zext(tmpd); # assigning to Rd_FPR64 -@elif defined(SEMANTIC_pseudo) - Rd_FPR64 = NEON_fcvt_amnpz_su(Rn_FPR64); -@endif } # C7.2.70 FCVTAS (vector) page C7-1549 line 86125 MATCH x0e79c800/mask=xbffffc00 @@ -7996,29 +4928,12 @@ is b_3031=0b01 & b_2328=0b111100 & b_22=1 & b_1021=0b100001110010 & fcvt_vmnemon :^fcvt_vmnemonic Rd_VPR64.4H, Rn_VPR64.4H is b_31=0 & b_30=0 & b_2328=0b011100 & b_1022=0b1111001110010 & fcvt_vmnemonic & Rd_VPR64.4H & Rn_VPR64.4H & Zd { -@if defined(SEMANTIC_primitive) # simd unary Rd_VPR64.4H = trunc(Rn_VPR64.4H) on lane size 2 - local tmp1:4 = 0; - local tmp2:4 = 0; - simd_address_at(tmp1, Rn_VPR64.4H, 0, 2, 8); - simd_address_at(tmp2, Rd_VPR64.4H, 0, 2, 8); - * [register]:2 tmp2 = trunc(* [register]:2 tmp1); - simd_address_at(tmp1, Rn_VPR64.4H, 1, 2, 8); - simd_address_at(tmp2, Rd_VPR64.4H, 1, 2, 8); - * [register]:2 tmp2 = trunc(* [register]:2 tmp1); - simd_address_at(tmp1, Rn_VPR64.4H, 2, 2, 8); - simd_address_at(tmp2, Rd_VPR64.4H, 2, 2, 8); - * [register]:2 tmp2 = trunc(* [register]:2 tmp1); - simd_address_at(tmp1, Rn_VPR64.4H, 3, 2, 8); - simd_address_at(tmp2, Rd_VPR64.4H, 3, 2, 8); - * [register]:2 tmp2 = trunc(* [register]:2 tmp1); + Rd_VPR64.4H[0,16] = trunc(Rn_VPR64.4H[0,16]); + Rd_VPR64.4H[16,16] = trunc(Rn_VPR64.4H[16,16]); + Rd_VPR64.4H[32,16] = trunc(Rn_VPR64.4H[32,16]); + Rd_VPR64.4H[48,16] = trunc(Rn_VPR64.4H[48,16]); zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:8 = SIMD_TRUNC(Rn_VPR64.4H, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.4H -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.4H = NEON_fcvt_amnpz_su(Rn_VPR64.4H, 2:1); -@endif } # C7.2.70 FCVTAS (vector) page C7-1549 line 86125 MATCH x0e79c800/mask=xbffffc00 @@ -8032,41 +4947,16 @@ is b_31=0 & b_30=0 & b_2328=0b011100 & b_1022=0b1111001110010 & fcvt_vmnemonic & :^fcvt_vmnemonic Rd_VPR128.8H, Rn_VPR128.8H is b_31=0 & b_30=1 & b_2328=0b011100 & b_1022=0b1111001110010 & fcvt_vmnemonic & Rd_VPR128.8H & Rn_VPR128.8H & Zd { -@if defined(SEMANTIC_primitive) # simd unary Rd_VPR128.8H = trunc(Rn_VPR128.8H) on lane size 2 - local tmp1:4 = 0; - local tmp2:4 = 0; - simd_address_at(tmp1, Rn_VPR128.8H, 0, 2, 16); - simd_address_at(tmp2, Rd_VPR128.8H, 0, 2, 16); - * [register]:2 tmp2 = trunc(* [register]:2 tmp1); - simd_address_at(tmp1, Rn_VPR128.8H, 1, 2, 16); - simd_address_at(tmp2, Rd_VPR128.8H, 1, 2, 16); - * [register]:2 tmp2 = trunc(* [register]:2 tmp1); - simd_address_at(tmp1, Rn_VPR128.8H, 2, 2, 16); - simd_address_at(tmp2, Rd_VPR128.8H, 2, 2, 16); - * [register]:2 tmp2 = trunc(* [register]:2 tmp1); - simd_address_at(tmp1, Rn_VPR128.8H, 3, 2, 16); - simd_address_at(tmp2, Rd_VPR128.8H, 3, 2, 16); - * [register]:2 tmp2 = trunc(* [register]:2 tmp1); - simd_address_at(tmp1, Rn_VPR128.8H, 4, 2, 16); - simd_address_at(tmp2, Rd_VPR128.8H, 4, 2, 16); - * [register]:2 tmp2 = trunc(* [register]:2 tmp1); - simd_address_at(tmp1, Rn_VPR128.8H, 5, 2, 16); - simd_address_at(tmp2, Rd_VPR128.8H, 5, 2, 16); - * [register]:2 tmp2 = trunc(* [register]:2 tmp1); - simd_address_at(tmp1, Rn_VPR128.8H, 6, 2, 16); - simd_address_at(tmp2, Rd_VPR128.8H, 6, 2, 16); - * [register]:2 tmp2 = trunc(* [register]:2 tmp1); - simd_address_at(tmp1, Rn_VPR128.8H, 7, 2, 16); - simd_address_at(tmp2, Rd_VPR128.8H, 7, 2, 16); - * [register]:2 tmp2 = trunc(* [register]:2 tmp1); + Rd_VPR128.8H[0,16] = trunc(Rn_VPR128.8H[0,16]); + Rd_VPR128.8H[16,16] = trunc(Rn_VPR128.8H[16,16]); + Rd_VPR128.8H[32,16] = trunc(Rn_VPR128.8H[32,16]); + Rd_VPR128.8H[48,16] = trunc(Rn_VPR128.8H[48,16]); + Rd_VPR128.8H[64,16] = trunc(Rn_VPR128.8H[64,16]); + Rd_VPR128.8H[80,16] = trunc(Rn_VPR128.8H[80,16]); + Rd_VPR128.8H[96,16] = trunc(Rn_VPR128.8H[96,16]); + Rd_VPR128.8H[112,16] = trunc(Rn_VPR128.8H[112,16]); zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:16 = SIMD_TRUNC(Rn_VPR128.8H, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.8H -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.8H = NEON_fcvt_amnpz_su(Rn_VPR128.8H, 2:1); -@endif } # C7.2.70 FCVTAS (vector) page C7-1549 line 86125 MATCH x0e21c800/mask=xbfbffc00 @@ -8080,23 +4970,10 @@ is b_31=0 & b_30=1 & b_2328=0b011100 & b_1022=0b1111001110010 & fcvt_vmnemonic & :^fcvt_vmnemonic Rd_VPR64.2S, Rn_VPR64.2S is b_31=0 & b_30=0 & b_2328=0b011100 & b_22=0 & b_1021=0b100001110010 & fcvt_vmnemonic & Rd_VPR64.2S & Rn_VPR64.2S & Zd { -@if defined(SEMANTIC_primitive) # simd unary Rd_VPR64.2S = trunc(Rn_VPR64.2S) on lane size 4 - local tmp1:4 = 0; - local tmp2:4 = 0; - simd_address_at(tmp1, Rn_VPR64.2S, 0, 4, 8); - simd_address_at(tmp2, Rd_VPR64.2S, 0, 4, 8); - * [register]:4 tmp2 = trunc(* [register]:4 tmp1); - simd_address_at(tmp1, Rn_VPR64.2S, 1, 4, 8); - simd_address_at(tmp2, Rd_VPR64.2S, 1, 4, 8); - * [register]:4 tmp2 = trunc(* [register]:4 tmp1); + Rd_VPR64.2S[0,32] = trunc(Rn_VPR64.2S[0,32]); + Rd_VPR64.2S[32,32] = trunc(Rn_VPR64.2S[32,32]); zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:8 = SIMD_TRUNC(Rn_VPR64.2S, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.2S -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.2S = NEON_fcvt_amnpz_su(Rn_VPR64.2S, 4:1); -@endif } # C7.2.70 FCVTAS (vector) page C7-1549 line 86125 MATCH x0e21c800/mask=xbfbffc00 @@ -8110,29 +4987,12 @@ is b_31=0 & b_30=0 & b_2328=0b011100 & b_22=0 & b_1021=0b100001110010 & fcvt_vmn :^fcvt_vmnemonic Rd_VPR128.4S, Rn_VPR128.4S is b_31=0 & b_30=1 & b_2328=0b011100 & b_22=0 & b_1021=0b100001110010 & fcvt_vmnemonic & Rd_VPR128.4S & Rn_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) # simd unary Rd_VPR128.4S = trunc(Rn_VPR128.4S) on lane size 4 - local tmp1:4 = 0; - local tmp2:4 = 0; - simd_address_at(tmp1, Rn_VPR128.4S, 0, 4, 16); - simd_address_at(tmp2, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp2 = trunc(* [register]:4 tmp1); - simd_address_at(tmp1, Rn_VPR128.4S, 1, 4, 16); - simd_address_at(tmp2, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp2 = trunc(* [register]:4 tmp1); - simd_address_at(tmp1, Rn_VPR128.4S, 2, 4, 16); - simd_address_at(tmp2, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp2 = trunc(* [register]:4 tmp1); - simd_address_at(tmp1, Rn_VPR128.4S, 3, 4, 16); - simd_address_at(tmp2, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp2 = trunc(* [register]:4 tmp1); + Rd_VPR128.4S[0,32] = trunc(Rn_VPR128.4S[0,32]); + Rd_VPR128.4S[32,32] = trunc(Rn_VPR128.4S[32,32]); + Rd_VPR128.4S[64,32] = trunc(Rn_VPR128.4S[64,32]); + Rd_VPR128.4S[96,32] = trunc(Rn_VPR128.4S[96,32]); zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:16 = SIMD_TRUNC(Rn_VPR128.4S, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_fcvt_amnpz_su(Rn_VPR128.4S, 4:1); -@endif } # C7.2.70 FCVTAS (vector) page C7-1549 line 86125 MATCH x0e21c800/mask=xbfbffc00 @@ -8146,23 +5006,10 @@ is b_31=0 & b_30=1 & b_2328=0b011100 & b_22=0 & b_1021=0b100001110010 & fcvt_vmn :^fcvt_vmnemonic Rd_VPR128.2D, Rn_VPR128.2D is b_31=0 & b_30=1 & b_2328=0b011100 & b_22=1 & b_1021=0b100001110010 & fcvt_vmnemonic & Rd_VPR128.2D & Rn_VPR128.2D & Zd { -@if defined(SEMANTIC_primitive) # simd unary Rd_VPR128.2D = trunc(Rn_VPR128.2D) on lane size 8 - local tmp1:4 = 0; - local tmp2:4 = 0; - simd_address_at(tmp1, Rn_VPR128.2D, 0, 8, 16); - simd_address_at(tmp2, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp2 = trunc(* [register]:8 tmp1); - simd_address_at(tmp1, Rn_VPR128.2D, 1, 8, 16); - simd_address_at(tmp2, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp2 = trunc(* [register]:8 tmp1); + Rd_VPR128.2D[0,64] = trunc(Rn_VPR128.2D[0,64]); + Rd_VPR128.2D[64,64] = trunc(Rn_VPR128.2D[64,64]); zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:16 = SIMD_TRUNC(Rn_VPR128.2D, 8:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.2D = NEON_fcvt_amnpz_su(Rn_VPR128.2D, 8:1); -@endif } # C7.2.71 FCVTAS (scalar) page C7-1552 line 86310 MATCH x1e240000/mask=x7f3ffc00 @@ -8176,15 +5023,8 @@ is b_31=0 & b_30=1 & b_2328=0b011100 & b_22=1 & b_1021=0b100001110010 & fcvt_vmn :^fcvt_smnemonic Rd_GPR32, Rn_FPR16 is b_31=0 & b_2430=0b0011110 & b_2223=0b11 & b_21=1 & b_1720=0b0010 & b_1015=0b000000 & fcvt_smnemonic & Rd_GPR32 & Rn_FPR16 & Rd_GPR64 { -@if defined(SEMANTIC_primitive) Rd_GPR32 = trunc(Rn_FPR16); zext_rs(Rd_GPR64); # zero upper 28 bytes of Rd_GPR64 -@elif defined(SEMANTIC_pcode) - local tmpd:4 = trunc(Rn_FPR16); - Rd_GPR64 = zext(tmpd); # assigning to Rd_GPR32 -@elif defined(SEMANTIC_pseudo) - Rd_GPR32 = NEON_fcvt_amnpz_su(Rn_FPR16); -@endif } # C7.2.71 FCVTAS (scalar) page C7-1552 line 86310 MATCH x1e240000/mask=x7f3ffc00 @@ -8198,13 +5038,7 @@ is b_31=0 & b_2430=0b0011110 & b_2223=0b11 & b_21=1 & b_1720=0b0010 & b_1015=0b0 :^fcvt_smnemonic Rd_GPR64, Rn_FPR16 is b_31=1 & b_2430=0b0011110 & b_2223=0b11 & b_21=1 & b_1720=0b0010 & b_1015=0b000000 & fcvt_smnemonic & Rd_GPR64 & Rn_FPR16 { -@if defined(SEMANTIC_primitive) Rd_GPR64 = trunc(Rn_FPR16); -@elif defined(SEMANTIC_pcode) - Rd_GPR64 = trunc(Rn_FPR16); -@elif defined(SEMANTIC_pseudo) - Rd_GPR64 = NEON_fcvt_amnpz_su(Rn_FPR16); -@endif } # C7.2.71 FCVTAS (scalar) page C7-1552 line 86310 MATCH x1e240000/mask=x7f3ffc00 @@ -8218,15 +5052,8 @@ is b_31=1 & b_2430=0b0011110 & b_2223=0b11 & b_21=1 & b_1720=0b0010 & b_1015=0b0 :^fcvt_smnemonic Rd_GPR32, Rn_FPR32 is b_31=0 & b_2430=0b0011110 & b_2223=0b00 & b_21=1 & b_1720=0b0010 & b_1015=0b000000 & fcvt_smnemonic & Rd_GPR32 & Rn_FPR32 & Rd_GPR64 { -@if defined(SEMANTIC_primitive) Rd_GPR32 = trunc(Rn_FPR32); zext_rs(Rd_GPR64); # zero upper 28 bytes of Rd_GPR64 -@elif defined(SEMANTIC_pcode) - local tmpd:4 = trunc(Rn_FPR32); - Rd_GPR64 = zext(tmpd); # assigning to Rd_GPR32 -@elif defined(SEMANTIC_pseudo) - Rd_GPR32 = NEON_fcvt_amnpz_su(Rn_FPR32); -@endif } # C7.2.71 FCVTAS (scalar) page C7-1552 line 86310 MATCH x1e240000/mask=x7f3ffc00 @@ -8240,13 +5067,7 @@ is b_31=0 & b_2430=0b0011110 & b_2223=0b00 & b_21=1 & b_1720=0b0010 & b_1015=0b0 :^fcvt_smnemonic Rd_GPR64, Rn_FPR32 is b_31=1 & b_2430=0b0011110 & b_2223=0b00 & b_21=1 & b_1720=0b0010 & b_1015=0b000000 & fcvt_smnemonic & Rd_GPR64 & Rn_FPR32 { -@if defined(SEMANTIC_primitive) Rd_GPR64 = trunc(Rn_FPR32); -@elif defined(SEMANTIC_pcode) - Rd_GPR64 = trunc(Rn_FPR32); -@elif defined(SEMANTIC_pseudo) - Rd_GPR64 = NEON_fcvt_amnpz_su(Rn_FPR32); -@endif } # C7.2.71 FCVTAS (scalar) page C7-1552 line 86310 MATCH x1e240000/mask=x7f3ffc00 @@ -8260,15 +5081,8 @@ is b_31=1 & b_2430=0b0011110 & b_2223=0b00 & b_21=1 & b_1720=0b0010 & b_1015=0b0 :^fcvt_smnemonic Rd_GPR32, Rn_FPR64 is b_31=0 & b_2430=0b0011110 & b_2223=0b01 & b_21=1 & b_1720=0b0010 & b_1015=0b000000 & fcvt_smnemonic & Rd_GPR32 & Rn_FPR64 & Rd_GPR64 { -@if defined(SEMANTIC_primitive) Rd_GPR32 = trunc(Rn_FPR64); zext_rs(Rd_GPR64); # zero upper 28 bytes of Rd_GPR64 -@elif defined(SEMANTIC_pcode) - local tmpd:4 = trunc(Rn_FPR64); - Rd_GPR64 = zext(tmpd); # assigning to Rd_GPR32 -@elif defined(SEMANTIC_pseudo) - Rd_GPR32 = NEON_fcvt_amnpz_su(Rn_FPR64); -@endif } # C7.2.71 FCVTAS (scalar) page C7-1552 line 86310 MATCH x1e240000/mask=x7f3ffc00 @@ -8282,13 +5096,7 @@ is b_31=0 & b_2430=0b0011110 & b_2223=0b01 & b_21=1 & b_1720=0b0010 & b_1015=0b0 :^fcvt_smnemonic Rd_GPR64, Rn_FPR64 is b_31=1 & b_2430=0b0011110 & b_2223=0b01 & b_21=1 & b_1720=0b0010 & b_1015=0b000000 & fcvt_smnemonic & Rd_GPR64 & Rn_FPR64 { -@if defined(SEMANTIC_primitive) Rd_GPR64 = trunc(Rn_FPR64); -@elif defined(SEMANTIC_pcode) - Rd_GPR64 = trunc(Rn_FPR64); -@elif defined(SEMANTIC_pseudo) - Rd_GPR64 = NEON_fcvt_amnpz_su(Rn_FPR64); -@endif } # C7.2.74 FCVTL, FCVTL2 page C7-1559 line 86735 MATCH x0e217800/mask=xbfbffc00 @@ -8300,25 +5108,11 @@ is b_31=1 & b_2430=0b0011110 & b_2223=0b01 & b_21=1 & b_1720=0b0010 & b_1015=0b0 :fcvtl Rd_VPR128.2D, Rn_VPR64.2S is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=0x17 & b_1011=2 & Rn_VPR64.2S & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_primitive) TMPD1 = Rn_VPR64.2S; # simd resize Rd_VPR128.2D = float2float(TMPD1) (lane size 4 to 8) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, TMPD1, 0, 4, 8); - simd_address_at(tmp3, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp3 = float2float(* [register]:4 tmp2); - simd_address_at(tmp2, TMPD1, 1, 4, 8); - simd_address_at(tmp3, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp3 = float2float(* [register]:4 tmp2); + Rd_VPR128.2D[0,64] = float2float(TMPD1[0,32]); + Rd_VPR128.2D[64,64] = float2float(TMPD1[32,32]); zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = Rn_VPR64.2S; - local tmpd:16 = SIMD_FLOAT2FLOAT(tmp1, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.2D = NEON_fcvtl(Rn_VPR64.2S, 4:1); -@endif } # C7.2.74 FCVTL, FCVTL2 page C7-1559 line 86735 MATCH x0e217800/mask=xbfbffc00 @@ -8330,27 +5124,11 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=0x :fcvtl2 Rd_VPR128.2D, Rn_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=0x17 & b_1011=2 & Rn_VPR128.4S & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rn_VPR128.4S, 1, 8, 16); - TMPD1 = * [register]:8 tmp1; + TMPD1 = Rn_VPR128.4S[64,64]; # simd resize Rd_VPR128.2D = float2float(TMPD1) (lane size 4 to 8) - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp3, TMPD1, 0, 4, 8); - simd_address_at(tmp4, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp4 = float2float(* [register]:4 tmp3); - simd_address_at(tmp3, TMPD1, 1, 4, 8); - simd_address_at(tmp4, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp4 = float2float(* [register]:4 tmp3); + Rd_VPR128.2D[0,64] = float2float(TMPD1[0,32]); + Rd_VPR128.2D[64,64] = float2float(TMPD1[32,32]); zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Rn_VPR128.4S, 1:1); - local tmpd:16 = SIMD_FLOAT2FLOAT(tmp1, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.2D = NEON_fcvtl2(Rn_VPR128.4S, 8:1); -@endif } # C7.2.74 FCVTL, FCVTL2 page C7-1559 line 86735 MATCH x0e217800/mask=xbfbffc00 @@ -8362,31 +5140,13 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=0x :fcvtl Rd_VPR128.4S, Rn_VPR64.4H is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x17 & b_1011=2 & Rn_VPR64.4H & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) TMPD1 = Rn_VPR64.4H; # simd resize Rd_VPR128.4S = float2float(TMPD1) (lane size 2 to 4) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, TMPD1, 0, 2, 8); - simd_address_at(tmp3, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp3 = float2float(* [register]:2 tmp2); - simd_address_at(tmp2, TMPD1, 1, 2, 8); - simd_address_at(tmp3, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp3 = float2float(* [register]:2 tmp2); - simd_address_at(tmp2, TMPD1, 2, 2, 8); - simd_address_at(tmp3, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp3 = float2float(* [register]:2 tmp2); - simd_address_at(tmp2, TMPD1, 3, 2, 8); - simd_address_at(tmp3, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp3 = float2float(* [register]:2 tmp2); + Rd_VPR128.4S[0,32] = float2float(TMPD1[0,16]); + Rd_VPR128.4S[32,32] = float2float(TMPD1[16,16]); + Rd_VPR128.4S[64,32] = float2float(TMPD1[32,16]); + Rd_VPR128.4S[96,32] = float2float(TMPD1[48,16]); zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = Rn_VPR64.4H; - local tmpd:16 = SIMD_FLOAT2FLOAT(tmp1, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_fcvtl(Rn_VPR64.4H, 4:1); -@endif } # C7.2.74 FCVTL, FCVTL2 page C7-1559 line 86735 MATCH x0e217800/mask=xbfbffc00 @@ -8398,33 +5158,13 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x :fcvtl2 Rd_VPR128.4S, Rn_VPR128.8H is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x17 & b_1011=2 & Rn_VPR128.8H & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rn_VPR128.8H, 1, 8, 16); - TMPD1 = * [register]:8 tmp1; + TMPD1 = Rn_VPR128.8H[64,64]; # simd resize Rd_VPR128.4S = float2float(TMPD1) (lane size 2 to 4) - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp3, TMPD1, 0, 2, 8); - simd_address_at(tmp4, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp4 = float2float(* [register]:2 tmp3); - simd_address_at(tmp3, TMPD1, 1, 2, 8); - simd_address_at(tmp4, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp4 = float2float(* [register]:2 tmp3); - simd_address_at(tmp3, TMPD1, 2, 2, 8); - simd_address_at(tmp4, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp4 = float2float(* [register]:2 tmp3); - simd_address_at(tmp3, TMPD1, 3, 2, 8); - simd_address_at(tmp4, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp4 = float2float(* [register]:2 tmp3); + Rd_VPR128.4S[0,32] = float2float(TMPD1[0,16]); + Rd_VPR128.4S[32,32] = float2float(TMPD1[16,16]); + Rd_VPR128.4S[64,32] = float2float(TMPD1[32,16]); + Rd_VPR128.4S[96,32] = float2float(TMPD1[48,16]); zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Rn_VPR128.8H, 1:1); - local tmpd:16 = SIMD_FLOAT2FLOAT(tmp1, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_fcvtl2(Rn_VPR128.8H, 2:1); -@endif } # C7.2.75 FCVTMS (vector) page C7-1561 line 86825 MATCH x5e79b800/mask=xfffffc00 @@ -8444,15 +5184,8 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x :^fcvt_vmnemonic Rd_FPR16, Rn_FPR16 is b_3031=0b01 & b_2428=0b11110 & b_1322=0b1111001101 & b_1011=0b10 & fcvt_vmnemonic & Rd_FPR16 & Rn_FPR16 & Zd { -@if defined(SEMANTIC_primitive) Rd_FPR16 = trunc(Rn_FPR16); zext_zh(Zd); # zero upper 30 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:2 = trunc(Rn_FPR16); - Zd = zext(tmpd); # assigning to Rd_FPR16 -@elif defined(SEMANTIC_pseudo) - Rd_FPR16 = NEON_fcvt_amnpz_su(Rn_FPR16); -@endif } # C7.2.75 FCVTMS (vector) page C7-1561 line 86825 MATCH x5e21b800/mask=xffbffc00 @@ -8472,15 +5205,8 @@ is b_3031=0b01 & b_2428=0b11110 & b_1322=0b1111001101 & b_1011=0b10 & fcvt_vmnem :^fcvt_vmnemonic Rd_FPR32, Rn_FPR32 is b_3031=0b01 & b_2428=0b11110 & b_22=0 & b_1321=0b100001101 & b_1011=0b10 & fcvt_vmnemonic & Rd_FPR32 & Rn_FPR32 & Zd { -@if defined(SEMANTIC_primitive) Rd_FPR32 = trunc(Rn_FPR32); zext_zs(Zd); # zero upper 28 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:4 = trunc(Rn_FPR32); - Zd = zext(tmpd); # assigning to Rd_FPR32 -@elif defined(SEMANTIC_pseudo) - Rd_FPR32 = NEON_fcvt_amnpz_su(Rn_FPR32); -@endif } # C7.2.75 FCVTMS (vector) page C7-1561 line 86825 MATCH x5e21b800/mask=xffbffc00 @@ -8500,15 +5226,8 @@ is b_3031=0b01 & b_2428=0b11110 & b_22=0 & b_1321=0b100001101 & b_1011=0b10 & fc :^fcvt_vmnemonic Rd_FPR64, Rn_FPR64 is b_3031=0b01 & b_2428=0b11110 & b_22=1 & b_1321=0b100001101 & b_1011=0b10 & fcvt_vmnemonic & Rd_FPR64 & Rn_FPR64 & Zd { -@if defined(SEMANTIC_primitive) Rd_FPR64 = trunc(Rn_FPR64); zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:8 = trunc(Rn_FPR64); - Zd = zext(tmpd); # assigning to Rd_FPR64 -@elif defined(SEMANTIC_pseudo) - Rd_FPR64 = NEON_fcvt_amnpz_su(Rn_FPR64); -@endif } # C7.2.75 FCVTMS (vector) page C7-1561 line 86825 MATCH x0e79b800/mask=xbffffc00 @@ -8528,29 +5247,12 @@ is b_3031=0b01 & b_2428=0b11110 & b_22=1 & b_1321=0b100001101 & b_1011=0b10 & fc :^fcvt_vmnemonic Rd_VPR64.4H, Rn_VPR64.4H is b_31=0 & b_30=0 & b_2428=0b01110 & b_1322=0b1111001101 & b_1011=0b10 & fcvt_vmnemonic & Rd_VPR64.4H & Rn_VPR64.4H & Zd { -@if defined(SEMANTIC_primitive) # simd unary Rd_VPR64.4H = trunc(Rn_VPR64.4H) on lane size 2 - local tmp1:4 = 0; - local tmp2:4 = 0; - simd_address_at(tmp1, Rn_VPR64.4H, 0, 2, 8); - simd_address_at(tmp2, Rd_VPR64.4H, 0, 2, 8); - * [register]:2 tmp2 = trunc(* [register]:2 tmp1); - simd_address_at(tmp1, Rn_VPR64.4H, 1, 2, 8); - simd_address_at(tmp2, Rd_VPR64.4H, 1, 2, 8); - * [register]:2 tmp2 = trunc(* [register]:2 tmp1); - simd_address_at(tmp1, Rn_VPR64.4H, 2, 2, 8); - simd_address_at(tmp2, Rd_VPR64.4H, 2, 2, 8); - * [register]:2 tmp2 = trunc(* [register]:2 tmp1); - simd_address_at(tmp1, Rn_VPR64.4H, 3, 2, 8); - simd_address_at(tmp2, Rd_VPR64.4H, 3, 2, 8); - * [register]:2 tmp2 = trunc(* [register]:2 tmp1); + Rd_VPR64.4H[0,16] = trunc(Rn_VPR64.4H[0,16]); + Rd_VPR64.4H[16,16] = trunc(Rn_VPR64.4H[16,16]); + Rd_VPR64.4H[32,16] = trunc(Rn_VPR64.4H[32,16]); + Rd_VPR64.4H[48,16] = trunc(Rn_VPR64.4H[48,16]); zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:8 = SIMD_TRUNC(Rn_VPR64.4H, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.4H -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.4H = NEON_fcvt_amnpz_su(Rn_VPR64.4H, 2:1); -@endif } # C7.2.75 FCVTMS (vector) page C7-1561 line 86825 MATCH x0e79b800/mask=xbffffc00 @@ -8570,41 +5272,16 @@ is b_31=0 & b_30=0 & b_2428=0b01110 & b_1322=0b1111001101 & b_1011=0b10 & fcvt_v :^fcvt_vmnemonic Rd_VPR128.8H, Rn_VPR128.8H is b_31=0 & b_30=1 & b_2428=0b01110 & b_1322=0b1111001101 & b_1011=0b10 & fcvt_vmnemonic & Rd_VPR128.8H & Rn_VPR128.8H & Zd { -@if defined(SEMANTIC_primitive) # simd unary Rd_VPR128.8H = trunc(Rn_VPR128.8H) on lane size 2 - local tmp1:4 = 0; - local tmp2:4 = 0; - simd_address_at(tmp1, Rn_VPR128.8H, 0, 2, 16); - simd_address_at(tmp2, Rd_VPR128.8H, 0, 2, 16); - * [register]:2 tmp2 = trunc(* [register]:2 tmp1); - simd_address_at(tmp1, Rn_VPR128.8H, 1, 2, 16); - simd_address_at(tmp2, Rd_VPR128.8H, 1, 2, 16); - * [register]:2 tmp2 = trunc(* [register]:2 tmp1); - simd_address_at(tmp1, Rn_VPR128.8H, 2, 2, 16); - simd_address_at(tmp2, Rd_VPR128.8H, 2, 2, 16); - * [register]:2 tmp2 = trunc(* [register]:2 tmp1); - simd_address_at(tmp1, Rn_VPR128.8H, 3, 2, 16); - simd_address_at(tmp2, Rd_VPR128.8H, 3, 2, 16); - * [register]:2 tmp2 = trunc(* [register]:2 tmp1); - simd_address_at(tmp1, Rn_VPR128.8H, 4, 2, 16); - simd_address_at(tmp2, Rd_VPR128.8H, 4, 2, 16); - * [register]:2 tmp2 = trunc(* [register]:2 tmp1); - simd_address_at(tmp1, Rn_VPR128.8H, 5, 2, 16); - simd_address_at(tmp2, Rd_VPR128.8H, 5, 2, 16); - * [register]:2 tmp2 = trunc(* [register]:2 tmp1); - simd_address_at(tmp1, Rn_VPR128.8H, 6, 2, 16); - simd_address_at(tmp2, Rd_VPR128.8H, 6, 2, 16); - * [register]:2 tmp2 = trunc(* [register]:2 tmp1); - simd_address_at(tmp1, Rn_VPR128.8H, 7, 2, 16); - simd_address_at(tmp2, Rd_VPR128.8H, 7, 2, 16); - * [register]:2 tmp2 = trunc(* [register]:2 tmp1); + Rd_VPR128.8H[0,16] = trunc(Rn_VPR128.8H[0,16]); + Rd_VPR128.8H[16,16] = trunc(Rn_VPR128.8H[16,16]); + Rd_VPR128.8H[32,16] = trunc(Rn_VPR128.8H[32,16]); + Rd_VPR128.8H[48,16] = trunc(Rn_VPR128.8H[48,16]); + Rd_VPR128.8H[64,16] = trunc(Rn_VPR128.8H[64,16]); + Rd_VPR128.8H[80,16] = trunc(Rn_VPR128.8H[80,16]); + Rd_VPR128.8H[96,16] = trunc(Rn_VPR128.8H[96,16]); + Rd_VPR128.8H[112,16] = trunc(Rn_VPR128.8H[112,16]); zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:16 = SIMD_TRUNC(Rn_VPR128.8H, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.8H -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.8H = NEON_fcvt_amnpz_su(Rn_VPR128.8H, 2:1); -@endif } # C7.2.75 FCVTMS (vector) page C7-1561 line 86825 MATCH x0e21b800/mask=xbfbffc00 @@ -8624,23 +5301,10 @@ is b_31=0 & b_30=1 & b_2428=0b01110 & b_1322=0b1111001101 & b_1011=0b10 & fcvt_v :^fcvt_vmnemonic Rd_VPR64.2S, Rn_VPR64.2S is b_31=0 & b_30=0 & b_2428=0b01110 & b_22=0 & b_1321=0b100001101 & b_1011=0b10 & fcvt_vmnemonic & Rd_VPR64.2S & Rn_VPR64.2S & Zd { -@if defined(SEMANTIC_primitive) # simd unary Rd_VPR64.2S = trunc(Rn_VPR64.2S) on lane size 4 - local tmp1:4 = 0; - local tmp2:4 = 0; - simd_address_at(tmp1, Rn_VPR64.2S, 0, 4, 8); - simd_address_at(tmp2, Rd_VPR64.2S, 0, 4, 8); - * [register]:4 tmp2 = trunc(* [register]:4 tmp1); - simd_address_at(tmp1, Rn_VPR64.2S, 1, 4, 8); - simd_address_at(tmp2, Rd_VPR64.2S, 1, 4, 8); - * [register]:4 tmp2 = trunc(* [register]:4 tmp1); + Rd_VPR64.2S[0,32] = trunc(Rn_VPR64.2S[0,32]); + Rd_VPR64.2S[32,32] = trunc(Rn_VPR64.2S[32,32]); zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:8 = SIMD_TRUNC(Rn_VPR64.2S, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.2S -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.2S = NEON_fcvt_amnpz_su(Rn_VPR64.2S, 4:1); -@endif } # C7.2.75 FCVTMS (vector) page C7-1561 line 86825 MATCH x0e21b800/mask=xbfbffc00 @@ -8660,29 +5324,12 @@ is b_31=0 & b_30=0 & b_2428=0b01110 & b_22=0 & b_1321=0b100001101 & b_1011=0b10 :^fcvt_vmnemonic Rd_VPR128.4S, Rn_VPR128.4S is b_31=0 & b_30=1 & b_2428=0b01110 & b_22=0 & b_1321=0b100001101 & b_1011=0b10 & fcvt_vmnemonic & Rd_VPR128.4S & Rn_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) # simd unary Rd_VPR128.4S = trunc(Rn_VPR128.4S) on lane size 4 - local tmp1:4 = 0; - local tmp2:4 = 0; - simd_address_at(tmp1, Rn_VPR128.4S, 0, 4, 16); - simd_address_at(tmp2, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp2 = trunc(* [register]:4 tmp1); - simd_address_at(tmp1, Rn_VPR128.4S, 1, 4, 16); - simd_address_at(tmp2, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp2 = trunc(* [register]:4 tmp1); - simd_address_at(tmp1, Rn_VPR128.4S, 2, 4, 16); - simd_address_at(tmp2, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp2 = trunc(* [register]:4 tmp1); - simd_address_at(tmp1, Rn_VPR128.4S, 3, 4, 16); - simd_address_at(tmp2, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp2 = trunc(* [register]:4 tmp1); + Rd_VPR128.4S[0,32] = trunc(Rn_VPR128.4S[0,32]); + Rd_VPR128.4S[32,32] = trunc(Rn_VPR128.4S[32,32]); + Rd_VPR128.4S[64,32] = trunc(Rn_VPR128.4S[64,32]); + Rd_VPR128.4S[96,32] = trunc(Rn_VPR128.4S[96,32]); zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:16 = SIMD_TRUNC(Rn_VPR128.4S, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_fcvt_amnpz_su(Rn_VPR128.4S, 4:1); -@endif } # C7.2.75 FCVTMS (vector) page C7-1561 line 86825 MATCH x0e21b800/mask=xbfbffc00 @@ -8702,23 +5349,10 @@ is b_31=0 & b_30=1 & b_2428=0b01110 & b_22=0 & b_1321=0b100001101 & b_1011=0b10 :^fcvt_vmnemonic Rd_VPR128.2D, Rn_VPR128.2D is b_31=0 & b_30=1 & b_2428=0b01110 & b_22=1 & b_1321=0b100001101 & b_1011=0b10 & fcvt_vmnemonic & Rd_VPR128.2D & Rn_VPR128.2D & Zd { -@if defined(SEMANTIC_primitive) # simd unary Rd_VPR128.2D = trunc(Rn_VPR128.2D) on lane size 8 - local tmp1:4 = 0; - local tmp2:4 = 0; - simd_address_at(tmp1, Rn_VPR128.2D, 0, 8, 16); - simd_address_at(tmp2, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp2 = trunc(* [register]:8 tmp1); - simd_address_at(tmp1, Rn_VPR128.2D, 1, 8, 16); - simd_address_at(tmp2, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp2 = trunc(* [register]:8 tmp1); + Rd_VPR128.2D[0,64] = trunc(Rn_VPR128.2D[0,64]); + Rd_VPR128.2D[64,64] = trunc(Rn_VPR128.2D[64,64]); zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:16 = SIMD_TRUNC(Rn_VPR128.2D, 8:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.2D = NEON_fcvt_amnpz_su(Rn_VPR128.2D, 8:1); -@endif } # C7.2.76 FCVTMS (scalar) page C7-1564 line 87010 MATCH x1e300000/mask=x7f3ffc00 @@ -8738,15 +5372,8 @@ is b_31=0 & b_30=1 & b_2428=0b01110 & b_22=1 & b_1321=0b100001101 & b_1011=0b10 :^fcvt_smnemonic Rd_GPR32, Rn_FPR16 is b_31=0 & b_2430=0b0011110 & b_2223=0b11 & b_21=1 & b_1718=0b00 & b_1015=0b000000 & fcvt_smnemonic & Rd_GPR32 & Rn_FPR16 & Rd_GPR64 { -@if defined(SEMANTIC_primitive) Rd_GPR32 = trunc(Rn_FPR16); zext_rs(Rd_GPR64); # zero upper 28 bytes of Rd_GPR64 -@elif defined(SEMANTIC_pcode) - local tmpd:4 = trunc(Rn_FPR16); - Rd_GPR64 = zext(tmpd); # assigning to Rd_GPR32 -@elif defined(SEMANTIC_pseudo) - Rd_GPR32 = NEON_fcvt_amnpz_su(Rn_FPR16); -@endif } # C7.2.76 FCVTMS (scalar) page C7-1564 line 87010 MATCH x1e300000/mask=x7f3ffc00 @@ -8766,13 +5393,7 @@ is b_31=0 & b_2430=0b0011110 & b_2223=0b11 & b_21=1 & b_1718=0b00 & b_1015=0b000 :^fcvt_smnemonic Rd_GPR64, Rn_FPR16 is b_31=1 & b_2430=0b0011110 & b_2223=0b11 & b_21=1 & b_1718=0b00 & b_1015=0b000000 & fcvt_smnemonic & Rd_GPR64 & Rn_FPR16 { -@if defined(SEMANTIC_primitive) Rd_GPR64 = trunc(Rn_FPR16); -@elif defined(SEMANTIC_pcode) - Rd_GPR64 = trunc(Rn_FPR16); -@elif defined(SEMANTIC_pseudo) - Rd_GPR64 = NEON_fcvt_amnpz_su(Rn_FPR16); -@endif } # C7.2.76 FCVTMS (scalar) page C7-1564 line 87010 MATCH x1e300000/mask=x7f3ffc00 @@ -8792,15 +5413,8 @@ is b_31=1 & b_2430=0b0011110 & b_2223=0b11 & b_21=1 & b_1718=0b00 & b_1015=0b000 :^fcvt_smnemonic Rd_GPR32, Rn_FPR32 is b_31=0 & b_2430=0b0011110 & b_2223=0b00 & b_21=1 & b_1718=0b00 & b_1015=0b000000 & fcvt_smnemonic & Rd_GPR32 & Rn_FPR32 & Rd_GPR64 { -@if defined(SEMANTIC_primitive) Rd_GPR32 = trunc(Rn_FPR32); zext_rs(Rd_GPR64); # zero upper 28 bytes of Rd_GPR64 -@elif defined(SEMANTIC_pcode) - local tmpd:4 = trunc(Rn_FPR32); - Rd_GPR64 = zext(tmpd); # assigning to Rd_GPR32 -@elif defined(SEMANTIC_pseudo) - Rd_GPR32 = NEON_fcvt_amnpz_su(Rn_FPR32); -@endif } # C7.2.76 FCVTMS (scalar) page C7-1564 line 87010 MATCH x1e300000/mask=x7f3ffc00 @@ -8820,13 +5434,7 @@ is b_31=0 & b_2430=0b0011110 & b_2223=0b00 & b_21=1 & b_1718=0b00 & b_1015=0b000 :^fcvt_smnemonic Rd_GPR64, Rn_FPR32 is b_31=1 & b_2430=0b0011110 & b_2223=0b00 & b_21=1 & b_1718=0b00 & b_1015=0b000000 & fcvt_smnemonic & Rd_GPR64 & Rn_FPR32 { -@if defined(SEMANTIC_primitive) Rd_GPR64 = trunc(Rn_FPR32); -@elif defined(SEMANTIC_pcode) - Rd_GPR64 = trunc(Rn_FPR32); -@elif defined(SEMANTIC_pseudo) - Rd_GPR64 = NEON_fcvt_amnpz_su(Rn_FPR32); -@endif } # C7.2.76 FCVTMS (scalar) page C7-1564 line 87010 MATCH x1e300000/mask=x7f3ffc00 @@ -8846,15 +5454,8 @@ is b_31=1 & b_2430=0b0011110 & b_2223=0b00 & b_21=1 & b_1718=0b00 & b_1015=0b000 :^fcvt_smnemonic Rd_GPR32, Rn_FPR64 is b_31=0 & b_2430=0b0011110 & b_2223=0b01 & b_21=1 & b_1718=0b00 & b_1015=0b000000 & fcvt_smnemonic & Rd_GPR32 & Rn_FPR64 & Rd_GPR64 { -@if defined(SEMANTIC_primitive) Rd_GPR32 = trunc(Rn_FPR64); zext_rs(Rd_GPR64); # zero upper 28 bytes of Rd_GPR64 -@elif defined(SEMANTIC_pcode) - local tmpd:4 = trunc(Rn_FPR64); - Rd_GPR64 = zext(tmpd); # assigning to Rd_GPR32 -@elif defined(SEMANTIC_pseudo) - Rd_GPR32 = NEON_fcvt_amnpz_su(Rn_FPR64); -@endif } # C7.2.76 FCVTMS (scalar) page C7-1564 line 87010 MATCH x1e300000/mask=x7f3ffc00 @@ -8874,13 +5475,7 @@ is b_31=0 & b_2430=0b0011110 & b_2223=0b01 & b_21=1 & b_1718=0b00 & b_1015=0b000 :^fcvt_smnemonic Rd_GPR64, Rn_FPR64 is b_31=1 & b_2430=0b0011110 & b_2223=0b01 & b_21=1 & b_1718=0b00 & b_1015=0b000000 & fcvt_smnemonic & Rd_GPR64 & Rn_FPR64 { -@if defined(SEMANTIC_primitive) Rd_GPR64 = trunc(Rn_FPR64); -@elif defined(SEMANTIC_pcode) - Rd_GPR64 = trunc(Rn_FPR64); -@elif defined(SEMANTIC_pseudo) - Rd_GPR64 = NEON_fcvt_amnpz_su(Rn_FPR64); -@endif } # C7.2.79 FCVTN, FCVTN2 page C7-1571 line 87441 MATCH x0e216800/mask=xbfbffc00 @@ -8892,25 +5487,11 @@ is b_31=1 & b_2430=0b0011110 & b_2223=0b01 & b_21=1 & b_1718=0b00 & b_1015=0b000 :fcvtn Rd_VPR64.2S, Rn_VPR128.2D is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=0x16 & b_1011=2 & Rn_VPR128.2D & Rd_VPR64.2S & Rd_VPR128 & Zd { -@if defined(SEMANTIC_primitive) TMPQ1 = Rn_VPR128.2D; # simd resize Rd_VPR64.2S = float2float(TMPQ1) (lane size 8 to 4) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, TMPQ1, 0, 8, 16); - simd_address_at(tmp3, Rd_VPR64.2S, 0, 4, 8); - * [register]:4 tmp3 = float2float(* [register]:8 tmp2); - simd_address_at(tmp2, TMPQ1, 1, 8, 16); - simd_address_at(tmp3, Rd_VPR64.2S, 1, 4, 8); - * [register]:4 tmp3 = float2float(* [register]:8 tmp2); + Rd_VPR64.2S[0,32] = float2float(TMPQ1[0,64]); + Rd_VPR64.2S[32,32] = float2float(TMPQ1[64,64]); zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = Rn_VPR128.2D; - local tmpd:8 = SIMD_FLOAT2FLOAT(tmp1, 8:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.2S -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.2S = NEON_fcvtn(Rd_VPR64.2S, Rn_VPR128.2D, 8:1); -@endif } # C7.2.79 FCVTN, FCVTN2 page C7-1571 line 87441 MATCH x0e216800/mask=xbfbffc00 @@ -8922,28 +5503,12 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=0x :fcvtn2 Rd_VPR128.4S, Rn_VPR128.2D is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=0x16 & b_1011=2 & Rn_VPR128.2D & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) # simd resize TMPD1 = float2float(Rn_VPR128.2D) (lane size 8 to 4) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR128.2D, 0, 8, 16); - simd_address_at(tmp3, TMPD1, 0, 4, 8); - * [register]:4 tmp3 = float2float(* [register]:8 tmp2); - simd_address_at(tmp2, Rn_VPR128.2D, 1, 8, 16); - simd_address_at(tmp3, TMPD1, 1, 4, 8); - * [register]:4 tmp3 = float2float(* [register]:8 tmp2); + TMPD1[0,32] = float2float(Rn_VPR128.2D[0,64]); + TMPD1[32,32] = float2float(Rn_VPR128.2D[64,64]); # simd copy Rd_VPR128.4S element 1:1 = TMPD1 (lane size 8) - local tmp4:4 = 0; - simd_address_at(tmp4, Rd_VPR128.4S, 1, 8, 16); - * [register]:8 tmp4 = TMPD1; + Rd_VPR128.4S[64,64] = TMPD1; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_FLOAT2FLOAT(Rn_VPR128.2D, 8:1); - local tmpd:16 = SIMD_COPY(Rd_VPR128.4S, tmp1, 1:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_fcvtn2(Rd_VPR128.4S, Rn_VPR128.2D, 8:1); -@endif } # C7.2.79 FCVTN, FCVTN2 page C7-1571 line 87441 MATCH x0e216800/mask=xbfbffc00 @@ -8955,31 +5520,13 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=0x :fcvtn Rd_VPR64.4H, Rn_VPR128.4S is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x16 & b_1011=2 & Rn_VPR128.4S & Rd_VPR64.4H & Rd_VPR128 & Zd { -@if defined(SEMANTIC_primitive) TMPQ1 = Rn_VPR128.4S; # simd resize Rd_VPR64.4H = float2float(TMPQ1) (lane size 4 to 2) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, TMPQ1, 0, 4, 16); - simd_address_at(tmp3, Rd_VPR64.4H, 0, 2, 8); - * [register]:2 tmp3 = float2float(* [register]:4 tmp2); - simd_address_at(tmp2, TMPQ1, 1, 4, 16); - simd_address_at(tmp3, Rd_VPR64.4H, 1, 2, 8); - * [register]:2 tmp3 = float2float(* [register]:4 tmp2); - simd_address_at(tmp2, TMPQ1, 2, 4, 16); - simd_address_at(tmp3, Rd_VPR64.4H, 2, 2, 8); - * [register]:2 tmp3 = float2float(* [register]:4 tmp2); - simd_address_at(tmp2, TMPQ1, 3, 4, 16); - simd_address_at(tmp3, Rd_VPR64.4H, 3, 2, 8); - * [register]:2 tmp3 = float2float(* [register]:4 tmp2); + Rd_VPR64.4H[0,16] = float2float(TMPQ1[0,32]); + Rd_VPR64.4H[16,16] = float2float(TMPQ1[32,32]); + Rd_VPR64.4H[32,16] = float2float(TMPQ1[64,32]); + Rd_VPR64.4H[48,16] = float2float(TMPQ1[96,32]); zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = Rn_VPR128.4S; - local tmpd:8 = SIMD_FLOAT2FLOAT(tmp1, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.4H -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.4H = NEON_fcvtn(Rd_VPR64.4H, Rn_VPR128.4S, 4:1); -@endif } # C7.2.79 FCVTN, FCVTN2 page C7-1571 line 87441 MATCH x0e216800/mask=xbfbffc00 @@ -8991,34 +5538,14 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x :fcvtn2 Rd_VPR128.8H, Rn_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x16 & b_1011=2 & Rn_VPR128.4S & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_primitive) # simd resize TMPD1 = float2float(Rn_VPR128.4S) (lane size 4 to 2) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR128.4S, 0, 4, 16); - simd_address_at(tmp3, TMPD1, 0, 2, 8); - * [register]:2 tmp3 = float2float(* [register]:4 tmp2); - simd_address_at(tmp2, Rn_VPR128.4S, 1, 4, 16); - simd_address_at(tmp3, TMPD1, 1, 2, 8); - * [register]:2 tmp3 = float2float(* [register]:4 tmp2); - simd_address_at(tmp2, Rn_VPR128.4S, 2, 4, 16); - simd_address_at(tmp3, TMPD1, 2, 2, 8); - * [register]:2 tmp3 = float2float(* [register]:4 tmp2); - simd_address_at(tmp2, Rn_VPR128.4S, 3, 4, 16); - simd_address_at(tmp3, TMPD1, 3, 2, 8); - * [register]:2 tmp3 = float2float(* [register]:4 tmp2); + TMPD1[0,16] = float2float(Rn_VPR128.4S[0,32]); + TMPD1[16,16] = float2float(Rn_VPR128.4S[32,32]); + TMPD1[32,16] = float2float(Rn_VPR128.4S[64,32]); + TMPD1[48,16] = float2float(Rn_VPR128.4S[96,32]); # simd copy Rd_VPR128.8H element 1:1 = TMPD1 (lane size 8) - local tmp4:4 = 0; - simd_address_at(tmp4, Rd_VPR128.8H, 1, 8, 16); - * [register]:8 tmp4 = TMPD1; + Rd_VPR128.8H[64,64] = TMPD1; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_FLOAT2FLOAT(Rn_VPR128.4S, 4:1); - local tmpd:16 = SIMD_COPY(Rd_VPR128.8H, tmp1, 1:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.8H -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.8H = NEON_fcvtn2(Rd_VPR128.8H, Rn_VPR128.4S, 4:1); -@endif } # C7.2.88 FCVTXN, FCVTXN2 page C7-1593 line 88766 MATCH x7e216800/mask=xffbffc00 @@ -9030,15 +5557,8 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x :fcvtxn Rd_FPR32, Rn_FPR64 is b_2331=0b011111100 & b_22=1 & b_1021=0b100001011010 & Rd_FPR32 & Rn_FPR64 & Zd { -@if defined(SEMANTIC_primitive) Rd_FPR32 = float2float(Rn_FPR64); zext_zs(Zd); # zero upper 28 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:4 = float2float(Rn_FPR64); - Zd = zext(tmpd); # assigning to Rd_FPR32 -@elif defined(SEMANTIC_pseudo) - Rd_FPR32 = NEON_fcvtxn(Rd_FPR32, Rn_FPR64); -@endif } # C7.2.88 FCVTXN, FCVTXN2 page C7-1593 line 88766 MATCH x2e216800/mask=xbfbffc00 @@ -9051,25 +5571,11 @@ is b_2331=0b011111100 & b_22=1 & b_1021=0b100001011010 & Rd_FPR32 & Rn_FPR64 & Z :fcvtxn Rd_VPR64.2S, Rn_VPR128.2D is b_31=0 & b_30=0 & b_2329=0b1011100 & b_22=1 & b_1021=0b100001011010 & Rd_VPR64.2S & Rd_VPR128 & Rn_VPR128.2D & Zd { -@if defined(SEMANTIC_primitive) TMPQ1 = Rn_VPR128.2D; # simd resize Rd_VPR64.2S = float2float(TMPQ1) (lane size 8 to 4) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, TMPQ1, 0, 8, 16); - simd_address_at(tmp3, Rd_VPR64.2S, 0, 4, 8); - * [register]:4 tmp3 = float2float(* [register]:8 tmp2); - simd_address_at(tmp2, TMPQ1, 1, 8, 16); - simd_address_at(tmp3, Rd_VPR64.2S, 1, 4, 8); - * [register]:4 tmp3 = float2float(* [register]:8 tmp2); + Rd_VPR64.2S[0,32] = float2float(TMPQ1[0,64]); + Rd_VPR64.2S[32,32] = float2float(TMPQ1[64,64]); zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = Rn_VPR128.2D; - local tmpd:8 = SIMD_FLOAT2FLOAT(tmp1, 8:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.2S -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.2S = NEON_fcvtxn(Rd_VPR64.2S, Rn_VPR128.2D, 8:1); -@endif } # C7.2.88 FCVTXN, FCVTXN2 page C7-1593 line 88766 MATCH x2e216800/mask=xbfbffc00 @@ -9082,28 +5588,12 @@ is b_31=0 & b_30=0 & b_2329=0b1011100 & b_22=1 & b_1021=0b100001011010 & Rd_VPR6 :fcvtxn2 Rd_VPR128.4S, Rn_VPR128.2D is b_31=0 & b_30=1 & b_2329=0b1011100 & b_22=1 & b_1021=0b100001011010 & Rd_VPR128.4S & Rn_VPR128.2D & Rd_VPR128 & Zd { -@if defined(SEMANTIC_primitive) # simd resize TMPD1 = float2float(Rn_VPR128.2D) (lane size 8 to 4) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR128.2D, 0, 8, 16); - simd_address_at(tmp3, TMPD1, 0, 4, 8); - * [register]:4 tmp3 = float2float(* [register]:8 tmp2); - simd_address_at(tmp2, Rn_VPR128.2D, 1, 8, 16); - simd_address_at(tmp3, TMPD1, 1, 4, 8); - * [register]:4 tmp3 = float2float(* [register]:8 tmp2); + TMPD1[0,32] = float2float(Rn_VPR128.2D[0,64]); + TMPD1[32,32] = float2float(Rn_VPR128.2D[64,64]); # simd copy Rd_VPR128.4S element 1:1 = TMPD1 (lane size 8) - local tmp4:4 = 0; - simd_address_at(tmp4, Rd_VPR128.4S, 1, 8, 16); - * [register]:8 tmp4 = TMPD1; + Rd_VPR128.4S[64,64] = TMPD1; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_FLOAT2FLOAT(Rn_VPR128.2D, 8:1); - local tmpd:16 = SIMD_COPY(Rd_VPR128.4S, tmp1, 1:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_fcvtxn2(Rd_VPR128.4S, Rn_VPR128.2D, 8:1); -@endif } # C7.2.89 FCVTZS (vector, fixed-point) page C7-1595 line 88905 MATCH x5f00fc00/mask=xff80fc00 @@ -9115,10 +5605,8 @@ is b_31=0 & b_30=1 & b_2329=0b1011100 & b_22=1 & b_1021=0b100001011010 & Rd_VPR1 :fcvtzs Rd_FPR64, Rn_FPR64, Imm_shr_imm64 is b_2331=0b010111110 & b_22=1 & b_1015=0b111111 & Imm_shr_imm64 & Rn_FPR64 & Rd_FPR64 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) local tmp1:8 = zext(Imm_shr_imm64); Rd_FPR64 = NEON_fcvtzs(Rn_FPR64, tmp1); -@endif } # C7.2.89 FCVTZS (vector, fixed-point) page C7-1595 line 88905 MATCH x5f00fc00/mask=xff80fc00 @@ -9130,9 +5618,7 @@ is b_2331=0b010111110 & b_22=1 & b_1015=0b111111 & Imm_shr_imm64 & Rn_FPR64 & Rd :fcvtzs Rd_FPR32, Rn_FPR32, Imm_shr_imm32 is b_2331=0b010111110 & b_2122=0b01 & b_1015=0b111111 & Imm_shr_imm32 & Rn_FPR32 & Rd_FPR32 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR32 = NEON_fcvtzs(Rn_FPR32, Imm_shr_imm32:4); -@endif } # C7.2.89 FCVTZS (vector, fixed-point) page C7-1595 line 88905 MATCH x5f00fc00/mask=xff80fc00 @@ -9144,9 +5630,7 @@ is b_2331=0b010111110 & b_2122=0b01 & b_1015=0b111111 & Imm_shr_imm32 & Rn_FPR32 :fcvtzs Rd_FPR16, Rn_FPR16, Imm_shr_imm16 is b_2331=0b010111110 & b_2022=0b001 & b_1015=0b111111 & Imm_shr_imm16 & Rn_FPR16 & Rd_FPR16 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR16 = NEON_fcvtzs(Rn_FPR16, Imm_shr_imm16); -@endif } # C7.2.89 FCVTZS (vector, fixed-point) page C7-1595 line 88905 MATCH x0f00fc00/mask=xbf80fc00 @@ -9158,10 +5642,8 @@ is b_2331=0b010111110 & b_2022=0b001 & b_1015=0b111111 & Imm_shr_imm16 & Rn_FPR1 :fcvtzs Rd_VPR128.2D, Rn_VPR128.2D, Imm_shr_imm64 is b_31=0 & b_30=1 & b_2329=0b0011110 & b_22=1 & b_1015=0b111111 & Rd_VPR128.2D & Rn_VPR128.2D & Imm_shr_imm64 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) local tmp1:8 = zext(Imm_shr_imm64); Rd_VPR128.2D = NEON_fcvtzs(Rn_VPR128.2D, tmp1, 8:1); -@endif } # C7.2.89 FCVTZS (vector, fixed-point) page C7-1595 line 88905 MATCH x0f00fc00/mask=xbf80fc00 @@ -9173,9 +5655,7 @@ is b_31=0 & b_30=1 & b_2329=0b0011110 & b_22=1 & b_1015=0b111111 & Rd_VPR128.2D :fcvtzs Rd_VPR64.2S, Rn_VPR64.2S, Imm_shr_imm32 is b_31=0 & b_30=0 & b_2329=0b0011110 & b_2122=0b01 & b_1015=0b111111 & Rd_VPR64.2S & Rn_VPR64.2S & Imm_shr_imm32 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.2S = NEON_fcvtzs(Rn_VPR64.2S, Imm_shr_imm32:4, 4:1); -@endif } # C7.2.89 FCVTZS (vector, fixed-point) page C7-1595 line 88905 MATCH x0f00fc00/mask=xbf80fc00 @@ -9187,9 +5667,7 @@ is b_31=0 & b_30=0 & b_2329=0b0011110 & b_2122=0b01 & b_1015=0b111111 & Rd_VPR64 :fcvtzs Rd_VPR128.4S, Rn_VPR128.4S, Imm_shr_imm32 is b_31=0 & b_30=1 & b_2329=0b0011110 & b_2122=0b01 & b_1015=0b111111 & Rd_VPR128.4S & Rn_VPR128.4S & Imm_shr_imm32 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_fcvtzs(Rn_VPR128.4S, Imm_shr_imm32:4, 4:1); -@endif } # C7.2.89 FCVTZS (vector, fixed-point) page C7-1595 line 88905 MATCH x0f00fc00/mask=xbf80fc00 @@ -9201,9 +5679,7 @@ is b_31=0 & b_30=1 & b_2329=0b0011110 & b_2122=0b01 & b_1015=0b111111 & Rd_VPR12 :fcvtzs Rd_VPR64.4H, Rn_VPR64.4H, Imm_shr_imm16 is b_31=0 & b_30=0 & b_2329=0b0011110 & b_2022=0b001 & b_1015=0b111111 & Rd_VPR64.4H & Rn_VPR64.4H & Imm_shr_imm16 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.4H = NEON_fcvtzs(Rn_VPR64.4H, Imm_shr_imm16, 2:1); -@endif } # C7.2.89 FCVTZS (vector, fixed-point) page C7-1595 line 88905 MATCH x0f00fc00/mask=xbf80fc00 @@ -9215,9 +5691,7 @@ is b_31=0 & b_30=0 & b_2329=0b0011110 & b_2022=0b001 & b_1015=0b111111 & Rd_VPR6 :fcvtzs Rd_VPR128.8H, Rn_VPR128.8H, Imm_shr_imm16 is b_31=0 & b_30=1 & b_2329=0b0011110 & b_2022=0b001 & b_1015=0b111111 & Rd_VPR128.8H & Rn_VPR128.8H & Imm_shr_imm16 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.8H = NEON_fcvtzs(Rn_VPR128.8H, Imm_shr_imm16, 2:1); -@endif } # C7.2.91 FCVTZS (scalar, fixed-point) page C7-1601 line 89240 MATCH x1e180000/mask=x7f3f0000 @@ -9231,17 +5705,9 @@ is b_31=0 & b_30=1 & b_2329=0b0011110 & b_2022=0b001 & b_1015=0b111111 & Rd_VPR1 :fcvtzs Rd_GPR32, Rn_FPR16, FBitsOp is b_31=0 & b_2430=0b0011110 & b_2223=0b11 & b_1621=0b011000 & b_15=1 & Rd_GPR32 & Rn_FPR16 & FBitsOp & FBits16 & Rd_GPR64 { -@if defined(SEMANTIC_primitive) local tmp1:2 = Rn_FPR16 f* FBits16; Rd_GPR32 = trunc(tmp1); zext_rs(Rd_GPR64); # zero upper 28 bytes of Rd_GPR64 -@elif defined(SEMANTIC_pcode) - local tmp1:2 = Rn_FPR16 f* FBits16; - local tmpd:4 = trunc(tmp1); - Rd_GPR64 = zext(tmpd); # assigning to Rd_GPR32 -@elif defined(SEMANTIC_pseudo) - Rd_GPR32 = NEON_fcvtzs(Rn_FPR16, FBits16); -@endif } # C7.2.91 FCVTZS (scalar, fixed-point) page C7-1601 line 89240 MATCH x1e180000/mask=x7f3f0000 @@ -9254,15 +5720,8 @@ is b_31=0 & b_2430=0b0011110 & b_2223=0b11 & b_1621=0b011000 & b_15=1 & Rd_GPR32 :fcvtzs Rd_GPR64, Rn_FPR16, FBitsOp is b_31=1 & b_2430=0b0011110 & b_2223=0b11 & b_1621=0b011000 & Rd_GPR64 & Rn_FPR16 & FBitsOp & FBits16 { -@if defined(SEMANTIC_primitive) local tmp1:2 = Rn_FPR16 f* FBitsOp; Rd_GPR64 = trunc(tmp1); -@elif defined(SEMANTIC_pcode) - local tmp1:2 = Rn_FPR16 f* FBitsOp; - Rd_GPR64 = trunc(tmp1); -@elif defined(SEMANTIC_pseudo) - Rd_GPR64 = NEON_fcvtzs(Rn_FPR16, FBitsOp); -@endif } # C7.2.91 FCVTZS (scalar, fixed-point) page C7-1601 line 89240 MATCH x1e180000/mask=x7f3f0000 @@ -9275,17 +5734,9 @@ is b_31=1 & b_2430=0b0011110 & b_2223=0b11 & b_1621=0b011000 & Rd_GPR64 & Rn_FPR :fcvtzs Rd_GPR32, Rn_FPR32, FBitsOp is b_31=0 & b_2430=0b0011110 & b_2223=0b00 & b_1621=0b011000 & b_15=1 & Rd_GPR32 & Rn_FPR32 & FBitsOp & FBits32 & Rd_GPR64 { -@if defined(SEMANTIC_primitive) local tmp1:4 = Rn_FPR32 f* FBits32; Rd_GPR32 = trunc(tmp1); zext_rs(Rd_GPR64); # zero upper 28 bytes of Rd_GPR64 -@elif defined(SEMANTIC_pcode) - local tmp1:4 = Rn_FPR32 f* FBits32; - local tmpd:4 = trunc(tmp1); - Rd_GPR64 = zext(tmpd); # assigning to Rd_GPR32 -@elif defined(SEMANTIC_pseudo) - Rd_GPR32 = NEON_fcvtzs(Rn_FPR32, FBits32); -@endif } # C7.2.91 FCVTZS (scalar, fixed-point) page C7-1601 line 89240 MATCH x1e180000/mask=x7f3f0000 @@ -9298,15 +5749,8 @@ is b_31=0 & b_2430=0b0011110 & b_2223=0b00 & b_1621=0b011000 & b_15=1 & Rd_GPR32 :fcvtzs Rd_GPR64, Rn_FPR32, FBitsOp is b_31=1 & b_2430=0b0011110 & b_2223=0b00 & b_1621=0b011000 & Rd_GPR64 & Rn_FPR32 & FBitsOp & FBits32 { -@if defined(SEMANTIC_primitive) local tmp1:4 = Rn_FPR32 f* FBits32; Rd_GPR64 = trunc(tmp1); -@elif defined(SEMANTIC_pcode) - local tmp1:4 = Rn_FPR32 f* FBits32; - Rd_GPR64 = trunc(tmp1); -@elif defined(SEMANTIC_pseudo) - Rd_GPR64 = NEON_fcvtzs(Rn_FPR32, FBits32); -@endif } # C7.2.91 FCVTZS (scalar, fixed-point) page C7-1601 line 89240 MATCH x1e180000/mask=x7f3f0000 @@ -9319,17 +5763,9 @@ is b_31=1 & b_2430=0b0011110 & b_2223=0b00 & b_1621=0b011000 & Rd_GPR64 & Rn_FPR :fcvtzs Rd_GPR32, Rn_FPR64, FBitsOp is b_31=0 & b_2430=0b0011110 & b_2223=0b01 & b_1621=0b011000 & b_15=1 & Rd_GPR32 & Rn_FPR64 & FBitsOp & FBits64 & Rd_GPR64 { -@if defined(SEMANTIC_primitive) local tmp1:8 = Rn_FPR64 f* FBits64; Rd_GPR32 = trunc(tmp1); zext_rs(Rd_GPR64); # zero upper 28 bytes of Rd_GPR64 -@elif defined(SEMANTIC_pcode) - local tmp1:8 = Rn_FPR64 f* FBits64; - local tmpd:4 = trunc(tmp1); - Rd_GPR64 = zext(tmpd); # assigning to Rd_GPR32 -@elif defined(SEMANTIC_pseudo) - Rd_GPR32 = NEON_fcvtzs(Rn_FPR64, FBits64); -@endif } # C7.2.91 FCVTZS (scalar, fixed-point) page C7-1601 line 89240 MATCH x1e180000/mask=x7f3f0000 @@ -9342,15 +5778,8 @@ is b_31=0 & b_2430=0b0011110 & b_2223=0b01 & b_1621=0b011000 & b_15=1 & Rd_GPR32 :fcvtzs Rd_GPR64, Rn_FPR64, FBitsOp is b_31=1 & b_2430=0b0011110 & b_2223=0b01 & b_1621=0b011000 & Rd_GPR64 & Rn_FPR64 & FBitsOp & FBits64 { -@if defined(SEMANTIC_primitive) local tmp1:8 = Rn_FPR64 f* FBits64; Rd_GPR64 = trunc(tmp1); -@elif defined(SEMANTIC_pcode) - local tmp1:8 = Rn_FPR64 f* FBits64; - Rd_GPR64 = trunc(tmp1); -@elif defined(SEMANTIC_pseudo) - Rd_GPR64 = NEON_fcvtzs(Rn_FPR64, FBits64); -@endif } # C7.2.93 FCVTZU (vector, fixed-point) page C7-1605 line 89490 MATCH x2f00fc00/mask=xbf80fc00 @@ -9361,10 +5790,8 @@ is b_31=1 & b_2430=0b0011110 & b_2223=0b01 & b_1621=0b011000 & Rd_GPR64 & Rn_FPR :fcvtzu Rd_VPR128.2D, Rn_VPR128.2D, Imm_shr_imm64 is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2223=0b01 & Imm_shr_imm64 & b_1115=0x1f & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) local tmp1:8 = zext(Imm_shr_imm64); Rd_VPR128.2D = NEON_fcvtzu(Rn_VPR128.2D, tmp1, 8:1); -@endif } # C7.2.93 FCVTZU (vector, fixed-point) page C7-1605 line 89490 MATCH x2f00fc00/mask=xbf80fc00 @@ -9375,9 +5802,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2223=0b01 & Imm_shr_imm64 & b_1115=0x1f :fcvtzu Rd_VPR64.2S, Rn_VPR64.2S, Imm_shr_imm32 is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x1f & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.2S = NEON_fcvtzu(Rn_VPR64.2S, Imm_shr_imm32:4, 4:1); -@endif } # C7.2.93 FCVTZU (vector, fixed-point) page C7-1605 line 89490 MATCH x2f00fc00/mask=xbf80fc00 @@ -9388,9 +5813,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x1f & :fcvtzu Rd_VPR128.4S, Rn_VPR128.4S, Imm_shr_imm32 is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x1f & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_fcvtzu(Rn_VPR128.4S, Imm_shr_imm32:4, 4:1); -@endif } # C7.2.93 FCVTZU (vector, fixed-point) page C7-1605 line 89490 MATCH x2f00fc00/mask=xbf80fc00 @@ -9401,9 +5824,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x1f & :fcvtzu Rd_VPR64.4H, Rn_VPR64.4H, Imm_shr_imm16 is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2023=1 & Imm_shr_imm16 & b_1115=0x1f & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.4H = NEON_fcvtzu(Rn_VPR64.4H, Imm_shr_imm16, 2:1); -@endif } # C7.2.93 FCVTZU (vector, fixed-point) page C7-1605 line 89490 MATCH x2f00fc00/mask=xbf80fc00 @@ -9414,9 +5835,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2023=1 & Imm_shr_imm16 & b_1115=0x1f & :fcvtzu Rd_VPR128.8H, Rn_VPR128.8H, Imm_shr_imm16 is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2023=1 & Imm_shr_imm16 & b_1115=0x1f & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.8H = NEON_fcvtzu(Rn_VPR128.8H, Imm_shr_imm16, 2:1); -@endif } # C7.2.93 FCVTZU (vector, fixed-point) page C7-1605 line 89490 MATCH x7f00fc00/mask=xff80fc00 @@ -9429,23 +5848,12 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2023=1 & Imm_shr_imm16 & b_1115=0x1f & :fcvtzu Rd_FPR16, Rn_FPR16, Imm_shr_imm32 is b_2331=0b011111110 & b_2022=0b001 & b_1015=0b111111 & Imm_shr_imm32 & Rn_FPR16 & Rd_FPR16 & Zd { -@if defined(SEMANTIC_primitive) local tmp1:2 = 1:2 << Imm_shr_imm32; local tmp2:2 = int2float(tmp1); local tmp3:2 = Rn_FPR16 f* tmp2; local tmp4:2 = abs(tmp3); Rd_FPR16 = trunc(tmp4); zext_zh(Zd); # zero upper 30 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:2 = 1:2 << Imm_shr_imm32; - local tmp2:2 = int2float(tmp1); - local tmp3:2 = Rn_FPR16 f* tmp2; - local tmp4:2 = abs(tmp3); - local tmpd:2 = trunc(tmp4); - Zd = zext(tmpd); # assigning to Rd_FPR16 -@elif defined(SEMANTIC_pseudo) - Rd_FPR16 = NEON_fcvtzu(Rn_FPR16, Imm_shr_imm32); -@endif } # C7.2.93 FCVTZU (vector, fixed-point) page C7-1605 line 89490 MATCH x7f00fc00/mask=xff80fc00 @@ -9458,23 +5866,12 @@ is b_2331=0b011111110 & b_2022=0b001 & b_1015=0b111111 & Imm_shr_imm32 & Rn_FPR1 :fcvtzu Rd_FPR32, Rn_FPR32, Imm_shr_imm32 is b_2331=0b011111110 & b_2122=0b01 & b_1015=0b111111 & Imm_shr_imm32 & Rn_FPR32 & Rd_FPR32 & Zd { -@if defined(SEMANTIC_primitive) local tmp1:4 = 1:4 << Imm_shr_imm32:4; local tmp2:4 = int2float(tmp1); local tmp3:4 = Rn_FPR32 f* tmp2; local tmp4:4 = abs(tmp3); Rd_FPR32 = trunc(tmp4); zext_zs(Zd); # zero upper 28 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:4 = 1:4 << Imm_shr_imm32:4; - local tmp2:4 = int2float(tmp1); - local tmp3:4 = Rn_FPR32 f* tmp2; - local tmp4:4 = abs(tmp3); - local tmpd:4 = trunc(tmp4); - Zd = zext(tmpd); # assigning to Rd_FPR32 -@elif defined(SEMANTIC_pseudo) - Rd_FPR32 = NEON_fcvtzu(Rn_FPR32, Imm_shr_imm32); -@endif } # C7.2.93 FCVTZU (vector, fixed-point) page C7-1605 line 89490 MATCH x7f00fc00/mask=xff80fc00 @@ -9487,7 +5884,6 @@ is b_2331=0b011111110 & b_2122=0b01 & b_1015=0b111111 & Imm_shr_imm32 & Rn_FPR32 :fcvtzu Rd_FPR64, Rn_FPR64, Imm_shr_imm32 is b_2331=0b011111110 & b_22=1 & b_1015=0b111111 & Imm_shr_imm32 & Rn_FPR64 & Rd_FPR64 & Zd { -@if defined(SEMANTIC_primitive) local tmp1:8 = zext(Imm_shr_imm32); local tmp2:8 = 1:8 << tmp1; local tmp3:8 = int2float(tmp2); @@ -9495,17 +5891,6 @@ is b_2331=0b011111110 & b_22=1 & b_1015=0b111111 & Imm_shr_imm32 & Rn_FPR64 & Rd local tmp5:8 = abs(tmp4); Rd_FPR64 = trunc(tmp5); zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = zext(Imm_shr_imm32); - local tmp2:8 = 1:8 << tmp1; - local tmp3:8 = int2float(tmp2); - local tmp4:8 = Rn_FPR64 f* tmp3; - local tmp5:8 = abs(tmp4); - local tmpd:8 = trunc(tmp5); - Zd = zext(tmpd); # assigning to Rd_FPR64 -@elif defined(SEMANTIC_pseudo) - Rd_FPR64 = NEON_fcvtzu(Rn_FPR64, Imm_shr_imm32); -@endif } # C7.2.95 FCVTZU (scalar, fixed-point) page C7-1611 line 89825 MATCH x1e190000/mask=x7f3f0000 @@ -9517,17 +5902,9 @@ is b_2331=0b011111110 & b_22=1 & b_1015=0b111111 & Imm_shr_imm32 & Rn_FPR64 & Rd :fcvtzu Rd_GPR32, Rn_FPR16, FBitsOp is sf=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=0 & mode=3 & fpOpcode=1 & b_15=1 & FBitsOp & FBits16 & Rn_FPR16 & Rd_GPR32 & Rd_GPR64 { -@if defined(SEMANTIC_primitive) local tmp1:2 = Rn_FPR16 f* FBitsOp; Rd_GPR32 = trunc(tmp1); zext_rs(Rd_GPR64); # zero upper 28 bytes of Rd_GPR64 -@elif defined(SEMANTIC_pcode) - local tmp1:2 = Rn_FPR16 f* FBitsOp; - local tmpd:4 = trunc(tmp1); - Rd_GPR64 = zext(tmpd); # assigning to Rd_GPR32 -@elif defined(SEMANTIC_pseudo) - Rd_GPR32 = NEON_fcvtzu(Rn_FPR16, FBitsOp); -@endif } # C7.2.95 FCVTZU (scalar, fixed-point) page C7-1611 line 89825 MATCH x1e190000/mask=x7f3f0000 @@ -9539,15 +5916,8 @@ is sf=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=0 & mode=3 & fpOpcode= :fcvtzu Rd_GPR64, Rn_FPR16, FBitsOp is sf=1 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=0 & mode=3 & fpOpcode=1 & FBitsOp & FBits16 & Rn_FPR16 & Rd_GPR64 { -@if defined(SEMANTIC_primitive) local tmp1:2 = Rn_FPR16 f* FBitsOp; Rd_GPR64 = trunc(tmp1); -@elif defined(SEMANTIC_pcode) - local tmp1:2 = Rn_FPR16 f* FBitsOp; - Rd_GPR64 = trunc(tmp1); -@elif defined(SEMANTIC_pseudo) - Rd_GPR64 = NEON_fcvtzu(Rn_FPR16, FBitsOp); -@endif } # C7.2.95 FCVTZU (scalar, fixed-point) page C7-1611 line 89825 MATCH x1e190000/mask=x7f3f0000 @@ -9559,17 +5929,9 @@ is sf=1 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=0 & mode=3 & fpOpcode= :fcvtzu Rd_GPR32, Rn_FPR64, FBitsOp is sf=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=0 & mode=3 & fpOpcode=1 & b_15=1 & FBitsOp & FBits64 & Rn_FPR64 & Rd_GPR32 & Rd_GPR64 { -@if defined(SEMANTIC_primitive) local tmp1:8 = Rn_FPR64 f* FBits64; Rd_GPR32 = trunc(tmp1); zext_rs(Rd_GPR64); # zero upper 28 bytes of Rd_GPR64 -@elif defined(SEMANTIC_pcode) - local tmp1:8 = Rn_FPR64 f* FBits64; - local tmpd:4 = trunc(tmp1); - Rd_GPR64 = zext(tmpd); # assigning to Rd_GPR32 -@elif defined(SEMANTIC_pseudo) - Rd_GPR32 = NEON_fcvtzu(Rn_FPR64, FBitsOp); -@endif } # C7.2.95 FCVTZU (scalar, fixed-point) page C7-1611 line 89825 MATCH x1e190000/mask=x7f3f0000 @@ -9581,17 +5943,9 @@ is sf=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=0 & mode=3 & fpOpcode= :fcvtzu Rd_GPR32, Rn_FPR32, FBitsOp is sf=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=0 & mode=3 & fpOpcode=1 & b_15=1 & FBitsOp & FBits32 & Rn_FPR32 & Rd_GPR32 & Rd_GPR64 { -@if defined(SEMANTIC_primitive) local tmp1:4 = Rn_FPR32 f* FBits32; Rd_GPR32 = trunc(tmp1); zext_rs(Rd_GPR64); # zero upper 28 bytes of Rd_GPR64 -@elif defined(SEMANTIC_pcode) - local tmp1:4 = Rn_FPR32 f* FBits32; - local tmpd:4 = trunc(tmp1); - Rd_GPR64 = zext(tmpd); # assigning to Rd_GPR32 -@elif defined(SEMANTIC_pseudo) - Rd_GPR32 = NEON_fcvtzu(Rn_FPR32, FBits32); -@endif } # C7.2.95 FCVTZU (scalar, fixed-point) page C7-1611 line 89825 MATCH x1e190000/mask=x7f3f0000 @@ -9603,15 +5957,8 @@ is sf=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=0 & mode=3 & fpOpcode= :fcvtzu Rd_GPR64, Rn_FPR64, FBitsOp is sf=1 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=0 & mode=3 & fpOpcode=1 & FBitsOp & FBits64 & Rn_FPR64 & Rd_GPR64 { -@if defined(SEMANTIC_primitive) local tmp1:8 = Rn_FPR64 f* FBits64; Rd_GPR64 = trunc(tmp1); -@elif defined(SEMANTIC_pcode) - local tmp1:8 = Rn_FPR64 f* FBits64; - Rd_GPR64 = trunc(tmp1); -@elif defined(SEMANTIC_pseudo) - Rd_GPR64 = NEON_fcvtzu(Rn_FPR64, FBits64); -@endif } # C7.2.95 FCVTZU (scalar, fixed-point) page C7-1611 line 89825 MATCH x1e190000/mask=x7f3f0000 @@ -9623,15 +5970,8 @@ is sf=1 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=0 & mode=3 & fpOpcode= :fcvtzu Rd_GPR64, Rn_FPR32, FBitsOp is sf=1 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=0 & mode=3 & fpOpcode=1 & FBitsOp & FBits32 & Rn_FPR32 & Rd_GPR64 { -@if defined(SEMANTIC_primitive) local tmp1:4 = Rn_FPR32 f* FBits32; Rd_GPR64 = trunc(tmp1); -@elif defined(SEMANTIC_pcode) - local tmp1:4 = Rn_FPR32 f* FBits32; - Rd_GPR64 = trunc(tmp1); -@elif defined(SEMANTIC_pseudo) - Rd_GPR64 = NEON_fcvtzu(Rn_FPR32, FBits32); -@endif } # C7.2.97 FDIV (vector) page C7-1615 line 90075 MATCH x2e20fc00/mask=xbfa0fc00 @@ -9643,26 +5983,10 @@ is sf=1 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=0 & mode=3 & fpOpcode= :fdiv Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_21=1 & Rm_VPR128.2D & b_1115=0x1f & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_primitive) # simd infix Rd_VPR128.2D = Rn_VPR128.2D f/ Rm_VPR128.2D on lane size 8 - local tmp1:4 = 0; - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp1, Rn_VPR128.2D, 0, 8, 16); - simd_address_at(tmp2, Rm_VPR128.2D, 0, 8, 16); - simd_address_at(tmp3, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp3 = (* [register]:8 tmp1) f/ (* [register]:8 tmp2); - simd_address_at(tmp1, Rn_VPR128.2D, 1, 8, 16); - simd_address_at(tmp2, Rm_VPR128.2D, 1, 8, 16); - simd_address_at(tmp3, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp3 = (* [register]:8 tmp1) f/ (* [register]:8 tmp2); + Rd_VPR128.2D[0,64] = Rn_VPR128.2D[0,64] f/ Rm_VPR128.2D[0,64]; + Rd_VPR128.2D[64,64] = Rn_VPR128.2D[64,64] f/ Rm_VPR128.2D[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:16 = SIMD_FLOAT_DIV(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.2D = NEON_fdiv(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); -@endif } # C7.2.97 FDIV (vector) page C7-1615 line 90075 MATCH x2e20fc00/mask=xbfa0fc00 @@ -9674,26 +5998,10 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_21=1 & Rm_VPR128.2D & :fdiv Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_21=1 & Rm_VPR64.2S & b_1115=0x1f & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_primitive) # simd infix Rd_VPR64.2S = Rn_VPR64.2S f/ Rm_VPR64.2S on lane size 4 - local tmp1:4 = 0; - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp1, Rn_VPR64.2S, 0, 4, 8); - simd_address_at(tmp2, Rm_VPR64.2S, 0, 4, 8); - simd_address_at(tmp3, Rd_VPR64.2S, 0, 4, 8); - * [register]:4 tmp3 = (* [register]:4 tmp1) f/ (* [register]:4 tmp2); - simd_address_at(tmp1, Rn_VPR64.2S, 1, 4, 8); - simd_address_at(tmp2, Rm_VPR64.2S, 1, 4, 8); - simd_address_at(tmp3, Rd_VPR64.2S, 1, 4, 8); - * [register]:4 tmp3 = (* [register]:4 tmp1) f/ (* [register]:4 tmp2); + Rd_VPR64.2S[0,32] = Rn_VPR64.2S[0,32] f/ Rm_VPR64.2S[0,32]; + Rd_VPR64.2S[32,32] = Rn_VPR64.2S[32,32] f/ Rm_VPR64.2S[32,32]; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:8 = SIMD_FLOAT_DIV(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.2S -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.2S = NEON_fdiv(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); -@endif } # C7.2.97 FDIV (vector) page C7-1615 line 90075 MATCH x2e20fc00/mask=xbfa0fc00 @@ -9705,34 +6013,12 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_21=1 & Rm_VPR64.2S & :fdiv Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_21=1 & Rm_VPR128.4S & b_1115=0x1f & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) # simd infix Rd_VPR128.4S = Rn_VPR128.4S f/ Rm_VPR128.4S on lane size 4 - local tmp1:4 = 0; - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp1, Rn_VPR128.4S, 0, 4, 16); - simd_address_at(tmp2, Rm_VPR128.4S, 0, 4, 16); - simd_address_at(tmp3, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp3 = (* [register]:4 tmp1) f/ (* [register]:4 tmp2); - simd_address_at(tmp1, Rn_VPR128.4S, 1, 4, 16); - simd_address_at(tmp2, Rm_VPR128.4S, 1, 4, 16); - simd_address_at(tmp3, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp3 = (* [register]:4 tmp1) f/ (* [register]:4 tmp2); - simd_address_at(tmp1, Rn_VPR128.4S, 2, 4, 16); - simd_address_at(tmp2, Rm_VPR128.4S, 2, 4, 16); - simd_address_at(tmp3, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp3 = (* [register]:4 tmp1) f/ (* [register]:4 tmp2); - simd_address_at(tmp1, Rn_VPR128.4S, 3, 4, 16); - simd_address_at(tmp2, Rm_VPR128.4S, 3, 4, 16); - simd_address_at(tmp3, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp3 = (* [register]:4 tmp1) f/ (* [register]:4 tmp2); + Rd_VPR128.4S[0,32] = Rn_VPR128.4S[0,32] f/ Rm_VPR128.4S[0,32]; + Rd_VPR128.4S[32,32] = Rn_VPR128.4S[32,32] f/ Rm_VPR128.4S[32,32]; + Rd_VPR128.4S[64,32] = Rn_VPR128.4S[64,32] f/ Rm_VPR128.4S[64,32]; + Rd_VPR128.4S[96,32] = Rn_VPR128.4S[96,32] f/ Rm_VPR128.4S[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:16 = SIMD_FLOAT_DIV(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_fdiv(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); -@endif } # C7.2.97 FDIV (vector) page C7-1615 line 90075 MATCH x2e403c00/mask=xbfe0fc00 @@ -9745,34 +6031,12 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_21=1 & Rm_VPR128.4S & :fdiv Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_31=0 & b_30=0 & b_2129=0b101110010 & b_1015=0b001111 & Rd_VPR64.4H & Rn_VPR64.4H & Rm_VPR64.4H & Zd { -@if defined(SEMANTIC_primitive) # simd infix Rd_VPR64.4H = Rn_VPR64.4H f/ Rm_VPR64.4H on lane size 2 - local tmp1:4 = 0; - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp1, Rn_VPR64.4H, 0, 2, 8); - simd_address_at(tmp2, Rm_VPR64.4H, 0, 2, 8); - simd_address_at(tmp3, Rd_VPR64.4H, 0, 2, 8); - * [register]:2 tmp3 = (* [register]:2 tmp1) f/ (* [register]:2 tmp2); - simd_address_at(tmp1, Rn_VPR64.4H, 1, 2, 8); - simd_address_at(tmp2, Rm_VPR64.4H, 1, 2, 8); - simd_address_at(tmp3, Rd_VPR64.4H, 1, 2, 8); - * [register]:2 tmp3 = (* [register]:2 tmp1) f/ (* [register]:2 tmp2); - simd_address_at(tmp1, Rn_VPR64.4H, 2, 2, 8); - simd_address_at(tmp2, Rm_VPR64.4H, 2, 2, 8); - simd_address_at(tmp3, Rd_VPR64.4H, 2, 2, 8); - * [register]:2 tmp3 = (* [register]:2 tmp1) f/ (* [register]:2 tmp2); - simd_address_at(tmp1, Rn_VPR64.4H, 3, 2, 8); - simd_address_at(tmp2, Rm_VPR64.4H, 3, 2, 8); - simd_address_at(tmp3, Rd_VPR64.4H, 3, 2, 8); - * [register]:2 tmp3 = (* [register]:2 tmp1) f/ (* [register]:2 tmp2); + Rd_VPR64.4H[0,16] = Rn_VPR64.4H[0,16] f/ Rm_VPR64.4H[0,16]; + Rd_VPR64.4H[16,16] = Rn_VPR64.4H[16,16] f/ Rm_VPR64.4H[16,16]; + Rd_VPR64.4H[32,16] = Rn_VPR64.4H[32,16] f/ Rm_VPR64.4H[32,16]; + Rd_VPR64.4H[48,16] = Rn_VPR64.4H[48,16] f/ Rm_VPR64.4H[48,16]; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:8 = SIMD_FLOAT_DIV(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.4H -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.4H = NEON_fdiv(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); -@endif } # C7.2.97 FDIV (vector) page C7-1615 line 90075 MATCH x2e403c00/mask=xbfe0fc00 @@ -9785,50 +6049,16 @@ is b_31=0 & b_30=0 & b_2129=0b101110010 & b_1015=0b001111 & Rd_VPR64.4H & Rn_VPR :fdiv Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_31=0 & b_30=1 & b_2129=0b101110010 & b_1015=0b001111 & Rd_VPR128.8H & Rn_VPR128.8H & Rm_VPR128.8H & Zd { -@if defined(SEMANTIC_primitive) # simd infix Rd_VPR128.8H = Rn_VPR128.8H f/ Rm_VPR128.8H on lane size 2 - local tmp1:4 = 0; - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp1, Rn_VPR128.8H, 0, 2, 16); - simd_address_at(tmp2, Rm_VPR128.8H, 0, 2, 16); - simd_address_at(tmp3, Rd_VPR128.8H, 0, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp1) f/ (* [register]:2 tmp2); - simd_address_at(tmp1, Rn_VPR128.8H, 1, 2, 16); - simd_address_at(tmp2, Rm_VPR128.8H, 1, 2, 16); - simd_address_at(tmp3, Rd_VPR128.8H, 1, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp1) f/ (* [register]:2 tmp2); - simd_address_at(tmp1, Rn_VPR128.8H, 2, 2, 16); - simd_address_at(tmp2, Rm_VPR128.8H, 2, 2, 16); - simd_address_at(tmp3, Rd_VPR128.8H, 2, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp1) f/ (* [register]:2 tmp2); - simd_address_at(tmp1, Rn_VPR128.8H, 3, 2, 16); - simd_address_at(tmp2, Rm_VPR128.8H, 3, 2, 16); - simd_address_at(tmp3, Rd_VPR128.8H, 3, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp1) f/ (* [register]:2 tmp2); - simd_address_at(tmp1, Rn_VPR128.8H, 4, 2, 16); - simd_address_at(tmp2, Rm_VPR128.8H, 4, 2, 16); - simd_address_at(tmp3, Rd_VPR128.8H, 4, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp1) f/ (* [register]:2 tmp2); - simd_address_at(tmp1, Rn_VPR128.8H, 5, 2, 16); - simd_address_at(tmp2, Rm_VPR128.8H, 5, 2, 16); - simd_address_at(tmp3, Rd_VPR128.8H, 5, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp1) f/ (* [register]:2 tmp2); - simd_address_at(tmp1, Rn_VPR128.8H, 6, 2, 16); - simd_address_at(tmp2, Rm_VPR128.8H, 6, 2, 16); - simd_address_at(tmp3, Rd_VPR128.8H, 6, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp1) f/ (* [register]:2 tmp2); - simd_address_at(tmp1, Rn_VPR128.8H, 7, 2, 16); - simd_address_at(tmp2, Rm_VPR128.8H, 7, 2, 16); - simd_address_at(tmp3, Rd_VPR128.8H, 7, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp1) f/ (* [register]:2 tmp2); + Rd_VPR128.8H[0,16] = Rn_VPR128.8H[0,16] f/ Rm_VPR128.8H[0,16]; + Rd_VPR128.8H[16,16] = Rn_VPR128.8H[16,16] f/ Rm_VPR128.8H[16,16]; + Rd_VPR128.8H[32,16] = Rn_VPR128.8H[32,16] f/ Rm_VPR128.8H[32,16]; + Rd_VPR128.8H[48,16] = Rn_VPR128.8H[48,16] f/ Rm_VPR128.8H[48,16]; + Rd_VPR128.8H[64,16] = Rn_VPR128.8H[64,16] f/ Rm_VPR128.8H[64,16]; + Rd_VPR128.8H[80,16] = Rn_VPR128.8H[80,16] f/ Rm_VPR128.8H[80,16]; + Rd_VPR128.8H[96,16] = Rn_VPR128.8H[96,16] f/ Rm_VPR128.8H[96,16]; + Rd_VPR128.8H[112,16] = Rn_VPR128.8H[112,16] f/ Rm_VPR128.8H[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:16 = SIMD_FLOAT_DIV(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.8H -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.8H = NEON_fdiv(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); -@endif } # C7.2.98 FDIV (scalar) page C7-1617 line 90190 MATCH x1e201800/mask=xff20fc00 @@ -9840,15 +6070,8 @@ is b_31=0 & b_30=1 & b_2129=0b101110010 & b_1015=0b001111 & Rd_VPR128.8H & Rn_VP :fdiv Rd_FPR64, Rn_FPR64, Rm_FPR64 is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=1 & Rm_FPR64 & b_1215=0x1 & b_1011=2 & Rn_FPR64 & Rd_FPR64 & Zd { -@if defined(SEMANTIC_primitive) Rd_FPR64 = Rn_FPR64 f/ Rm_FPR64; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - Rd_FPR64 = Rn_FPR64 f/ Rm_FPR64; - zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pseudo) - Rd_FPR64 = NEON_fdiv(Rn_FPR64, Rm_FPR64); -@endif } # C7.2.98 FDIV (scalar) page C7-1617 line 90190 MATCH x1e201800/mask=xff20fc00 @@ -9860,15 +6083,8 @@ is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=1 & Rm_FPR64 & b_1215=0 :fdiv Rd_FPR32, Rn_FPR32, Rm_FPR32 is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=1 & Rm_FPR32 & b_1215=0x1 & b_1011=2 & Rn_FPR32 & Rd_FPR32 & Zd { -@if defined(SEMANTIC_primitive) Rd_FPR32 = Rn_FPR32 f/ Rm_FPR32; zext_zs(Zd); # zero upper 28 bytes of Zd -@elif defined(SEMANTIC_pcode) - Rd_FPR32 = Rn_FPR32 f/ Rm_FPR32; - zext_zs(Zd); # zero upper 28 bytes of Zd -@elif defined(SEMANTIC_pseudo) - Rd_FPR32 = NEON_fdiv(Rn_FPR32, Rm_FPR32); -@endif } # C7.2.98 FDIV (scalar) page C7-1617 line 90190 MATCH x1e201800/mask=xff20fc00 @@ -9880,15 +6096,8 @@ is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=1 & Rm_FPR32 & b_1215=0 :fdiv Rd_FPR16, Rn_FPR16, Rm_FPR16 is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=1 & Rm_FPR16 & b_1215=0x1 & b_1011=2 & Rn_FPR16 & Rd_FPR16 & Zd { -@if defined(SEMANTIC_primitive) Rd_FPR16 = Rn_FPR16 f/ Rm_FPR16; zext_zh(Zd); # zero upper 30 bytes of Zd -@elif defined(SEMANTIC_pcode) - Rd_FPR16 = Rn_FPR16 f/ Rm_FPR16; - zext_zh(Zd); # zero upper 30 bytes of Zd -@elif defined(SEMANTIC_pseudo) - Rd_FPR16 = NEON_fdiv(Rn_FPR16, Rm_FPR16); -@endif } # C7.2.99 FJCVTZS page C7-1619 line 90296 MATCH x1e7e0000/mask=xfffffc00 @@ -9900,15 +6109,8 @@ is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=1 & Rm_FPR16 & b_1215=0 :fjcvtzs Rd_GPR32, Rn_FPR64 is b_1031=0b0001111001111110000000 & Rd_GPR32 & Rn_FPR64 & Rd_GPR64 { -@if defined(SEMANTIC_primitive) Rd_GPR32 = trunc(Rn_FPR64); zext_rs(Rd_GPR64); # zero upper 28 bytes of Rd_GPR64 -@elif defined(SEMANTIC_pcode) - local tmpd:4 = trunc(Rn_FPR64); - Rd_GPR64 = zext(tmpd); # assigning to Rd_GPR32 -@elif defined(SEMANTIC_pseudo) - Rd_GPR32 = NEON_fjcvtzs(Rn_FPR64); -@endif } # C7.2.100 FMADD page C7-1620 line 90360 MATCH x1f000000/mask=xff208000 @@ -9919,9 +6121,7 @@ is b_1031=0b0001111001111110000000 & Rd_GPR32 & Rn_FPR64 & Rd_GPR64 :fmadd Rd_FPR64, Rn_FPR64, Rm_FPR64, Ra_FPR64 is m=0 & b_3030=0 & s=0 & b_2428=0x1f & ftype=1 & b_21=0 & Rm_FPR64 & b_15=0 & Ra_FPR64 & Rn_FPR64 & Rd_FPR64 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR64 = NEON_fmadd(Rn_FPR64, Rm_FPR64, Ra_FPR64); -@endif } # C7.2.100 FMADD page C7-1620 line 90360 MATCH x1f000000/mask=xff208000 @@ -9932,9 +6132,7 @@ is m=0 & b_3030=0 & s=0 & b_2428=0x1f & ftype=1 & b_21=0 & Rm_FPR64 & b_15=0 & R :fmadd Rd_FPR32, Rn_FPR32, Rm_FPR32, Ra_FPR32 is m=0 & b_3030=0 & s=0 & b_2428=0x1f & ftype=0 & b_21=0 & Rm_FPR32 & b_15=0 & Ra_FPR32 & Rn_FPR32 & Rd_FPR32 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR32 = NEON_fmadd(Rn_FPR32, Rm_FPR32, Ra_FPR32); -@endif } # C7.2.100 FMADD page C7-1620 line 90360 MATCH x1f000000/mask=xff208000 @@ -9945,9 +6143,7 @@ is m=0 & b_3030=0 & s=0 & b_2428=0x1f & ftype=0 & b_21=0 & Rm_FPR32 & b_15=0 & R :fmadd Rd_FPR16, Rn_FPR16, Rm_FPR16, Ra_FPR16 is m=0 & b_3030=0 & s=0 & b_2428=0x1f & ftype=3 & b_21=0 & Rm_FPR16 & b_15=0 & Ra_FPR16 & Rn_FPR16 & Rd_FPR16 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR16 = NEON_fmadd(Rn_FPR16, Rm_FPR16, Ra_FPR16); -@endif } # C7.2.101 FMAX (vector) page C7-1622 line 90483 MATCH x0e20f400/mask=xbfa0fc00 @@ -9958,9 +6154,7 @@ is m=0 & b_3030=0 & s=0 & b_2428=0x1f & ftype=3 & b_21=0 & Rm_FPR16 & b_15=0 & R :fmax Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_21=1 & Rm_VPR128.2D & b_1115=0x1e & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.2D = NEON_fmax(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); -@endif } # C7.2.101 FMAX (vector) page C7-1622 line 90483 MATCH x0e20f400/mask=xbfa0fc00 @@ -9971,9 +6165,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_21=1 & Rm_VPR128.2D & :fmax Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_21=1 & Rm_VPR64.2S & b_1115=0x1e & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.2S = NEON_fmax(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); -@endif } # C7.2.101 FMAX (vector) page C7-1622 line 90483 MATCH x0e20f400/mask=xbfa0fc00 @@ -9984,9 +6176,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_21=1 & Rm_VPR64.2S & :fmax Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_21=1 & Rm_VPR128.4S & b_1115=0x1e & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_fmax(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); -@endif } # C7.2.101 FMAX (vector) page C7-1622 line 90483 MATCH x0e403400/mask=xbfe0fc00 @@ -9998,9 +6188,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_21=1 & Rm_VPR128.4S & :fmax Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_31=0 & b_30=0 & b_2129=0b001110010 & b_1015=0b001101 & Rd_VPR64.4H & Rn_VPR64.4H & Rm_VPR64.4H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.4H = NEON_fmax(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); -@endif } # C7.2.101 FMAX (vector) page C7-1622 line 90483 MATCH x0e403400/mask=xbfe0fc00 @@ -10012,9 +6200,7 @@ is b_31=0 & b_30=0 & b_2129=0b001110010 & b_1015=0b001101 & Rd_VPR64.4H & Rn_VPR :fmax Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_31=0 & b_30=1 & b_2129=0b001110010 & b_1015=0b001101 & Rd_VPR128.8H & Rn_VPR128.8H & Rm_VPR128.8H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.8H = NEON_fmax(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); -@endif } # C7.2.102 FMAX (scalar) page C7-1624 line 90609 MATCH x1e204800/mask=xff20fc00 @@ -10026,23 +6212,12 @@ is b_31=0 & b_30=1 & b_2129=0b001110010 & b_1015=0b001101 & Rd_VPR128.8H & Rn_VP :fmax Rd_FPR64, Rn_FPR64, Rm_FPR64 is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=1 & Rm_FPR64 & b_1215=0x4 & b_1011=2 & Rn_FPR64 & Rd_FPR64 & Zd { -@if defined(SEMANTIC_primitive) Rd_FPR64 = Rn_FPR64; zext_zd(Zd); # zero upper 24 bytes of Zd local tmp1:1 = Rn_FPR64 f> Rm_FPR64; if (tmp1) goto inst_next; Rd_FPR64 = Rm_FPR64; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:8 = Rn_FPR64; - Zd = zext(tmpd); # assigning to Rd_FPR64 - local tmp1:1 = Rn_FPR64 f> Rm_FPR64; - if (tmp1) goto inst_next; - tmpd = Rm_FPR64; - Zd = zext(tmpd); # assigning to Rd_FPR64 -@elif defined(SEMANTIC_pseudo) - Rd_FPR64 = NEON_fmax(Rn_FPR64, Rm_FPR64); -@endif } # C7.2.102 FMAX (scalar) page C7-1624 line 90609 MATCH x1e204800/mask=xff20fc00 @@ -10054,23 +6229,12 @@ is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=1 & Rm_FPR64 & b_1215=0 :fmax Rd_FPR32, Rn_FPR32, Rm_FPR32 is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=1 & Rm_FPR32 & b_1215=0x4 & b_1011=2 & Rn_FPR32 & Rd_FPR32 & Zd { -@if defined(SEMANTIC_primitive) Rd_FPR32 = Rn_FPR32; zext_zs(Zd); # zero upper 28 bytes of Zd local tmp1:1 = Rn_FPR32 f> Rm_FPR32; if (tmp1) goto inst_next; Rd_FPR32 = Rm_FPR32; zext_zs(Zd); # zero upper 28 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:4 = Rn_FPR32; - Zd = zext(tmpd); # assigning to Rd_FPR32 - local tmp1:1 = Rn_FPR32 f> Rm_FPR32; - if (tmp1) goto inst_next; - tmpd = Rm_FPR32; - Zd = zext(tmpd); # assigning to Rd_FPR32 -@elif defined(SEMANTIC_pseudo) - Rd_FPR32 = NEON_fmax(Rn_FPR32, Rm_FPR32); -@endif } # C7.2.102 FMAX (scalar) page C7-1624 line 90609 MATCH x1e204800/mask=xff20fc00 @@ -10081,9 +6245,7 @@ is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=1 & Rm_FPR32 & b_1215=0 :fmax Rd_FPR16, Rn_FPR16, Rm_FPR16 is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=1 & Rm_FPR16 & b_1215=0x4 & b_1011=2 & Rn_FPR16 & Rd_FPR16 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR16 = NEON_fmax(Rn_FPR16, Rm_FPR16); -@endif } # C7.2.103 FMAXNM (vector) page C7-1626 line 90711 MATCH x0e20c400/mask=xbfa0fc00 @@ -10094,9 +6256,7 @@ is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=1 & Rm_FPR16 & b_1215=0 :fmaxnm Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_21=1 & Rm_VPR128.2D & b_1115=0x18 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.2D = NEON_fmaxnm(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); -@endif } # C7.2.103 FMAXNM (vector) page C7-1626 line 90711 MATCH x0e20c400/mask=xbfa0fc00 @@ -10107,9 +6267,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_21=1 & Rm_VPR128.2D & :fmaxnm Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_21=1 & Rm_VPR64.2S & b_1115=0x18 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.2S = NEON_fmaxnm(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); -@endif } # C7.2.103 FMAXNM (vector) page C7-1626 line 90711 MATCH x0e20c400/mask=xbfa0fc00 @@ -10120,9 +6278,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_21=1 & Rm_VPR64.2S & :fmaxnm Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_21=1 & Rm_VPR128.4S & b_1115=0x18 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_fmaxnm(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); -@endif } # C7.2.103 FMAXNM (vector) page C7-1626 line 90711 MATCH x0e400400/mask=xbfe0fc00 @@ -10134,9 +6290,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_21=1 & Rm_VPR128.4S & :fmaxnm Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_31=0 & b_30=0 & b_2129=0b001110010 & b_1015=0b000001 & Rd_VPR64.4H & Rn_VPR64.4H & Rm_VPR64.4H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.4H = NEON_fmaxnm(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); -@endif } # C7.2.103 FMAXNM (vector) page C7-1626 line 90711 MATCH x0e400400/mask=xbfe0fc00 @@ -10148,9 +6302,7 @@ is b_31=0 & b_30=0 & b_2129=0b001110010 & b_1015=0b000001 & Rd_VPR64.4H & Rn_VPR :fmaxnm Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_31=0 & b_30=1 & b_2129=0b001110010 & b_1015=0b000001 & Rd_VPR128.8H & Rn_VPR128.8H & Rm_VPR128.8H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.8H = NEON_fmaxnm(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); -@endif } # C7.2.104 FMAXNM (scalar) page C7-1628 line 90842 MATCH x1e206800/mask=xff20fc00 @@ -10162,23 +6314,12 @@ is b_31=0 & b_30=1 & b_2129=0b001110010 & b_1015=0b000001 & Rd_VPR128.8H & Rn_VP :fmaxnm Rd_FPR64, Rn_FPR64, Rm_FPR64 is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=1 & Rm_FPR64 & b_1215=0x6 & b_1011=2 & Rn_FPR64 & Rd_FPR64 & Zd { -@if defined(SEMANTIC_primitive) Rd_FPR64 = Rn_FPR64; zext_zd(Zd); # zero upper 24 bytes of Zd local tmp1:1 = Rn_FPR64 f> Rm_FPR64; if (tmp1) goto inst_next; Rd_FPR64 = Rm_FPR64; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:8 = Rn_FPR64; - Zd = zext(tmpd); # assigning to Rd_FPR64 - local tmp1:1 = Rn_FPR64 f> Rm_FPR64; - if (tmp1) goto inst_next; - tmpd = Rm_FPR64; - Zd = zext(tmpd); # assigning to Rd_FPR64 -@elif defined(SEMANTIC_pseudo) - Rd_FPR64 = NEON_fmaxnm(Rn_FPR64, Rm_FPR64); -@endif } # C7.2.104 FMAXNM (scalar) page C7-1628 line 90842 MATCH x1e206800/mask=xff20fc00 @@ -10190,23 +6331,12 @@ is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=1 & Rm_FPR64 & b_1215=0 :fmaxnm Rd_FPR32, Rn_FPR32, Rm_FPR32 is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=1 & Rm_FPR32 & b_1215=0x6 & b_1011=2 & Rn_FPR32 & Rd_FPR32 & Zd { -@if defined(SEMANTIC_primitive) Rd_FPR32 = Rn_FPR32; zext_zs(Zd); # zero upper 28 bytes of Zd local tmp1:1 = Rn_FPR32 f> Rm_FPR32; if (tmp1) goto inst_next; Rd_FPR32 = Rm_FPR32; zext_zs(Zd); # zero upper 28 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:4 = Rn_FPR32; - Zd = zext(tmpd); # assigning to Rd_FPR32 - local tmp1:1 = Rn_FPR32 f> Rm_FPR32; - if (tmp1) goto inst_next; - tmpd = Rm_FPR32; - Zd = zext(tmpd); # assigning to Rd_FPR32 -@elif defined(SEMANTIC_pseudo) - Rd_FPR32 = NEON_fmaxnm(Rn_FPR32, Rm_FPR32); -@endif } # C7.2.104 FMAXNM (scalar) page C7-1628 line 90842 MATCH x1e206800/mask=xff20fc00 @@ -10218,23 +6348,12 @@ is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=1 & Rm_FPR32 & b_1215=0 :fmaxnm Rd_FPR16, Rn_FPR16, Rm_FPR16 is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=1 & Rm_FPR16 & b_1215=0x6 & b_1011=2 & Rn_FPR16 & Rd_FPR16 & Zd { -@if defined(SEMANTIC_primitive) Rd_FPR16 = Rn_FPR16; zext_zh(Zd); # zero upper 30 bytes of Zd local tmp1:1 = Rn_FPR16 f> Rm_FPR16; if (tmp1) goto inst_next; Rd_FPR16 = Rm_FPR16; zext_zh(Zd); # zero upper 30 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:2 = Rn_FPR16; - Zd = zext(tmpd); # assigning to Rd_FPR16 - local tmp1:1 = Rn_FPR16 f> Rm_FPR16; - if (tmp1) goto inst_next; - tmpd = Rm_FPR16; - Zd = zext(tmpd); # assigning to Rd_FPR16 -@elif defined(SEMANTIC_pseudo) - Rd_FPR16 = NEON_fmaxnm(Rn_FPR16, Rm_FPR16); -@endif } # C7.2.105 FMAXNMP (scalar) page C7-1630 line 90948 MATCH x7e30c800/mask=xffbffc00 @@ -10245,9 +6364,7 @@ is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=1 & Rm_FPR16 & b_1215=0 :fmaxnmp Rd_FPR64, Rn_VPR128.2D is b_3031=1 & u=1 & b_2428=0x1e & b_23=0 & b_1722=0x38 & b_1216=0xc & b_1011=2 & Rn_VPR128.2D & Rd_FPR64 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR64 = NEON_fmaxnmp(Rn_VPR128.2D, 8:1); -@endif } # C7.2.105 FMAXNMP (scalar) page C7-1630 line 90948 MATCH x7e30c800/mask=xffbffc00 @@ -10258,9 +6375,7 @@ is b_3031=1 & u=1 & b_2428=0x1e & b_23=0 & b_1722=0x38 & b_1216=0xc & b_1011=2 & :fmaxnmp Rd_FPR32, Rn_VPR64.2S is b_3031=1 & u=1 & b_2428=0x1e & b_23=0 & b_1722=0x18 & b_1216=0xc & b_1011=2 & Rn_VPR64.2S & Rd_FPR32 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR32 = NEON_fmaxnmp(Rn_VPR64.2S, 4:1); -@endif } # C7.2.105 FMAXNMP (scalar) page C7-1630 line 90948 MATCH x5e30c800/mask=xfffffc00 @@ -10272,9 +6387,7 @@ is b_3031=1 & u=1 & b_2428=0x1e & b_23=0 & b_1722=0x18 & b_1216=0xc & b_1011=2 & :fmaxnmp Rd_FPR16, vRn_VPR128^".2H" is b_1031=0b0101111000110000110010 & Rd_FPR16 & vRn_VPR128 & Rn_FPR32 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR16 = NEON_fmaxnmp(Rn_FPR32, 2:1); -@endif } # C7.2.106 FMAXNMP (vector) page C7-1632 line 91052 MATCH x2e20c400/mask=xbfa0fc00 @@ -10285,9 +6398,7 @@ is b_1031=0b0101111000110000110010 & Rd_FPR16 & vRn_VPR128 & Rn_FPR32 & Zd :fmaxnmp Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_21=1 & Rm_VPR128.2D & b_1115=0x18 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.2D = NEON_fmaxnmp(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); -@endif } # C7.2.106 FMAXNMP (vector) page C7-1632 line 91052 MATCH x2e20c400/mask=xbfa0fc00 @@ -10298,9 +6409,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_21=1 & Rm_VPR128.2D & :fmaxnmp Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_21=1 & Rm_VPR64.2S & b_1115=0x18 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.2S = NEON_fmaxnmp(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); -@endif } # C7.2.106 FMAXNMP (vector) page C7-1632 line 91052 MATCH x2e20c400/mask=xbfa0fc00 @@ -10311,9 +6420,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_21=1 & Rm_VPR64.2S & :fmaxnmp Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_21=1 & Rm_VPR128.4S & b_1115=0x18 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_fmaxnmp(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); -@endif } # C7.2.106 FMAXNMP (vector) page C7-1632 line 91052 MATCH x2e400400/mask=xbfe0fc00 @@ -10325,9 +6432,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_21=1 & Rm_VPR128.4S & :fmaxnmp Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_31=0 & b_30=0 & b_2129=0b101110010 & b_1015=0b000001 & Rd_VPR64.4H & Rn_VPR64.4H & Rm_VPR64.4H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.4H = NEON_fmaxnmp(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); -@endif } # C7.2.106 FMAXNMP (vector) page C7-1632 line 91052 MATCH x2e400400/mask=xbfe0fc00 @@ -10339,9 +6444,7 @@ is b_31=0 & b_30=0 & b_2129=0b101110010 & b_1015=0b000001 & Rd_VPR64.4H & Rn_VPR :fmaxnmp Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_31=0 & b_30=1 & b_2129=0b101110010 & b_1015=0b000001 & Rd_VPR128.8H & Rn_VPR128.8H & Rm_VPR128.8H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.8H = NEON_fmaxnmp(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); -@endif } # C7.2.107 FMAXNMV page C7-1634 line 91185 MATCH x2e30c800/mask=xbfbffc00 @@ -10352,9 +6455,7 @@ is b_31=0 & b_30=1 & b_2129=0b101110010 & b_1015=0b000001 & Rd_VPR128.8H & Rn_VP :fmaxnmv Rd_FPR32, Rn_VPR128.4S is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x18 & b_1216=0xc & b_1011=2 & Rn_VPR128.4S & Rd_FPR32 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR32 = NEON_fmaxnmv(Rn_VPR128.4S, 4:1); -@endif } # C7.2.107 FMAXNMV page C7-1634 line 91185 MATCH x0e30c800/mask=xbffffc00 @@ -10366,9 +6467,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x18 & b_1216=0x :fmaxnmv Rd_FPR16, Rn_VPR64.4H is b_31=0 & b_30=0 & b_1029=0b00111000110000110010 & Rd_FPR16 & Rn_VPR64.4H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR16 = NEON_fmaxnmv(Rn_VPR64.4H, 2:1); -@endif } # C7.2.107 FMAXNMV page C7-1634 line 91185 MATCH x0e30c800/mask=xbffffc00 @@ -10380,9 +6479,7 @@ is b_31=0 & b_30=0 & b_1029=0b00111000110000110010 & Rd_FPR16 & Rn_VPR64.4H & Zd :fmaxnmv Rd_FPR16, Rn_VPR128.8H is b_31=0 & b_30=1 & b_1029=0b00111000110000110010 & Rd_FPR16 & Rn_VPR128.8H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR16 = NEON_fmaxnmv(Rn_VPR128.8H, 2:1); -@endif } # C7.2.108 FMAXP (scalar) page C7-1636 line 91293 MATCH x7e30f800/mask=xffbffc00 @@ -10393,9 +6490,7 @@ is b_31=0 & b_30=1 & b_1029=0b00111000110000110010 & Rd_FPR16 & Rn_VPR128.8H & Z :fmaxp Rd_FPR64, Rn_VPR128.2D is b_3031=1 & u=1 & b_2428=0x1e & b_23=0 & b_1722=0x38 & b_1216=0xf & b_1011=2 & Rn_VPR128.2D & Rd_FPR64 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR64 = NEON_fmaxnmv(Rn_VPR128.2D, 8:1); -@endif } # C7.2.108 FMAXP (scalar) page C7-1636 line 91293 MATCH x7e30f800/mask=xffbffc00 @@ -10406,9 +6501,7 @@ is b_3031=1 & u=1 & b_2428=0x1e & b_23=0 & b_1722=0x38 & b_1216=0xf & b_1011=2 & :fmaxp Rd_FPR32, Rn_VPR64.2S is b_3031=1 & u=1 & b_2428=0x1e & b_23=0 & b_1722=0x18 & b_1216=0xf & b_1011=2 & Rn_VPR64.2S & Rd_FPR32 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR32 = NEON_fmaxp(Rn_VPR64.2S, 4:1); -@endif } # C7.2.108 FMAXP (scalar) page C7-1636 line 91293 MATCH x5e30f800/mask=xfffffc00 @@ -10420,9 +6513,7 @@ is b_3031=1 & u=1 & b_2428=0x1e & b_23=0 & b_1722=0x18 & b_1216=0xf & b_1011=2 & :fmaxp Rd_FPR16, vRn_VPR128^".2H" is b_1031=0b0101111000110000111110 & Rd_FPR16 & vRn_VPR128 & Rn_FPR32 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR16 = NEON_fmaxp(Rn_FPR32, 2:1); -@endif } # C7.2.109 FMAXP (vector) page C7-1638 line 91397 MATCH x2e20f400/mask=xbfa0fc00 @@ -10433,9 +6524,7 @@ is b_1031=0b0101111000110000111110 & Rd_FPR16 & vRn_VPR128 & Rn_FPR32 & Zd :fmaxp Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_21=1 & Rm_VPR128.2D & b_1115=0x1e & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.2D = NEON_fmaxp(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); -@endif } # C7.2.109 FMAXP (vector) page C7-1638 line 91397 MATCH x2e20f400/mask=xbfa0fc00 @@ -10446,9 +6535,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_21=1 & Rm_VPR128.2D & :fmaxp Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_21=1 & Rm_VPR64.2S & b_1115=0x1e & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.2S = NEON_fmaxp(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); -@endif } # C7.2.109 FMAXP (vector) page C7-1638 line 91397 MATCH x2e20f400/mask=xbfa0fc00 @@ -10459,9 +6546,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_21=1 & Rm_VPR64.2S & :fmaxp Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_21=1 & Rm_VPR128.4S & b_1115=0x1e & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_fmaxp(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); -@endif } # C7.2.109 FMAXP (vector) page C7-1638 line 91397 MATCH x2e403400/mask=xbfe0fc00 @@ -10473,9 +6558,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_21=1 & Rm_VPR128.4S & :fmaxp Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_31=0 & b_30=0 & b_2129=0b101110010 & b_1015=0b001101 & Rd_VPR64.4H & Rn_VPR64.4H & Rm_VPR64.4H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.4H = NEON_fmaxp(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); -@endif } # C7.2.109 FMAXP (vector) page C7-1638 line 91397 MATCH x2e403400/mask=xbfe0fc00 @@ -10487,9 +6570,7 @@ is b_31=0 & b_30=0 & b_2129=0b101110010 & b_1015=0b001101 & Rd_VPR64.4H & Rn_VPR :fmaxp Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_31=0 & b_30=1 & b_2129=0b101110010 & b_1015=0b001101 & Rd_VPR128.8H & Rn_VPR128.8H & Rm_VPR128.8H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.8H = NEON_fmaxp(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); -@endif } # C7.2.110 FMAXV page C7-1640 line 91528 MATCH x2e30f800/mask=xbfbffc00 @@ -10500,9 +6581,7 @@ is b_31=0 & b_30=1 & b_2129=0b101110010 & b_1015=0b001101 & Rd_VPR128.8H & Rn_VP :fmaxv Rd_FPR32, Rn_VPR128.4S is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x18 & b_1216=0xf & b_1011=2 & Rn_VPR128.4S & Rd_FPR32 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR32 = NEON_fmaxv(Rn_VPR128.4S, 4:1); -@endif } # C7.2.110 FMAXV page C7-1640 line 91528 MATCH x0e30f800/mask=xbffffc00 @@ -10514,9 +6593,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x18 & b_1216=0x :fmaxv Rd_FPR16, Rn_VPR64.4H is b_31=0 & b_30=0 & b_1029=0b00111000110000111110 & Rd_FPR16 & Rn_VPR64.4H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR16 = NEON_fmaxv(Rn_VPR64.4H, 2:1); -@endif } # C7.2.110 FMAXV page C7-1640 line 91528 MATCH x0e30f800/mask=xbffffc00 @@ -10528,9 +6605,7 @@ is b_31=0 & b_30=0 & b_1029=0b00111000110000111110 & Rd_FPR16 & Rn_VPR64.4H & Zd :fmaxv Rd_FPR16, Rn_VPR128.8H is b_31=0 & b_30=1 & b_1029=0b00111000110000111110 & Rd_FPR16 & Rn_VPR128.8H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR16 = NEON_fmaxv(Rn_VPR128.8H, 2:1); -@endif } # C7.2.111 FMIN (vector) page C7-1642 line 91635 MATCH x0ea0f400/mask=xbfa0fc00 @@ -10541,9 +6616,7 @@ is b_31=0 & b_30=1 & b_1029=0b00111000110000111110 & Rd_FPR16 & Rn_VPR128.8H & Z :fmin Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=3 & b_21=1 & Rm_VPR128.2D & b_1115=0x1e & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.2D = NEON_fmin(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); -@endif } # C7.2.111 FMIN (vector) page C7-1642 line 91635 MATCH x0ea0f400/mask=xbfa0fc00 @@ -10554,9 +6627,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=3 & b_21=1 & Rm_VPR128.2D & :fmin Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_21=1 & Rm_VPR64.2S & b_1115=0x1e & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.2S = NEON_fmin(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); -@endif } # C7.2.111 FMIN (vector) page C7-1642 line 91635 MATCH x0ea0f400/mask=xbfa0fc00 @@ -10567,9 +6638,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_21=1 & Rm_VPR64.2S & :fmin Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_21=1 & Rm_VPR128.4S & b_1115=0x1e & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_fmin(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); -@endif } # C7.2.111 FMIN (vector) page C7-1642 line 91635 MATCH x0ec03400/mask=xbfe0fc00 @@ -10581,9 +6650,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_21=1 & Rm_VPR128.4S & :fmin Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_31=0 & b_30=0 & b_2129=0b001110110 & b_1015=0b001101 & Rd_VPR64.4H & Rn_VPR64.4H & Rm_VPR64.4H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.4H = NEON_fmin(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); -@endif } # C7.2.111 FMIN (vector) page C7-1642 line 91635 MATCH x0ec03400/mask=xbfe0fc00 @@ -10595,9 +6662,7 @@ is b_31=0 & b_30=0 & b_2129=0b001110110 & b_1015=0b001101 & Rd_VPR64.4H & Rn_VPR :fmin Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_31=0 & b_30=1 & b_2129=0b001110110 & b_1015=0b001101 & Rd_VPR128.8H & Rn_VPR128.8H & Rm_VPR128.8H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.8H = NEON_fmin(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); -@endif } # C7.2.112 FMIN (scalar) page C7-1644 line 91761 MATCH x1e205800/mask=xff20fc00 @@ -10608,9 +6673,7 @@ is b_31=0 & b_30=1 & b_2129=0b001110110 & b_1015=0b001101 & Rd_VPR128.8H & Rn_VP :fmin Rd_FPR64, Rn_FPR64, Rm_FPR64 is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=1 & Rm_FPR64 & b_1215=0x5 & b_1011=2 & Rn_FPR64 & Rd_FPR64 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR64 = NEON_fmin(Rn_FPR64, Rm_FPR64); -@endif } # C7.2.112 FMIN (scalar) page C7-1644 line 91761 MATCH x1e205800/mask=xff20fc00 @@ -10621,9 +6684,7 @@ is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=1 & Rm_FPR64 & b_1215=0 :fmin Rd_FPR32, Rn_FPR32, Rm_FPR32 is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=1 & Rm_FPR32 & b_1215=0x5 & b_1011=2 & Rn_FPR32 & Rd_FPR32 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR32 = NEON_fmin(Rn_FPR32, Rm_FPR32); -@endif } # C7.2.112 FMIN (scalar) page C7-1644 line 91761 MATCH x1e205800/mask=xff20fc00 @@ -10634,9 +6695,7 @@ is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=1 & Rm_FPR32 & b_1215=0 :fmin Rd_FPR16, Rn_FPR16, Rm_FPR16 is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=1 & Rm_FPR16 & b_1215=0x5 & b_1011=2 & Rn_FPR16 & Rd_FPR16 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR16 = NEON_fmin(Rn_FPR16, Rm_FPR16); -@endif } # C7.2.113 FMINNM (vector) page C7-1646 line 91863 MATCH x0ea0c400/mask=xbfa0fc00 @@ -10647,9 +6706,7 @@ is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=1 & Rm_FPR16 & b_1215=0 :fminnm Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=3 & b_21=1 & Rm_VPR128.2D & b_1115=0x18 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.2D = NEON_fminnm(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); -@endif } # C7.2.113 FMINNM (vector) page C7-1646 line 91863 MATCH x0ea0c400/mask=xbfa0fc00 @@ -10660,9 +6717,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=3 & b_21=1 & Rm_VPR128.2D & :fminnm Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_21=1 & Rm_VPR64.2S & b_1115=0x18 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.2S = NEON_fminnm(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); -@endif } # C7.2.113 FMINNM (vector) page C7-1646 line 91863 MATCH x0ea0c400/mask=xbfa0fc00 @@ -10673,9 +6728,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_21=1 & Rm_VPR64.2S & :fminnm Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_21=1 & Rm_VPR128.4S & b_1115=0x18 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_fminnm(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); -@endif } # C7.2.113 FMINNM (vector) page C7-1646 line 91863 MATCH x0ec00400/mask=xbfe0fc00 @@ -10687,9 +6740,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_21=1 & Rm_VPR128.4S & :fminnm Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_31=0 & b_30=0 & b_2129=0b001110110 & b_1015=0b000001 & Rd_VPR64.4H & Rn_VPR64.4H & Rm_VPR64.4H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.4H = NEON_fminnm(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); -@endif } # C7.2.113 FMINNM (vector) page C7-1646 line 91863 MATCH x0ec00400/mask=xbfe0fc00 @@ -10701,9 +6752,7 @@ is b_31=0 & b_30=0 & b_2129=0b001110110 & b_1015=0b000001 & Rd_VPR64.4H & Rn_VPR :fminnm Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_31=0 & b_30=1 & b_2129=0b001110110 & b_1015=0b000001 & Rd_VPR128.8H & Rn_VPR128.8H & Rm_VPR128.8H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.8H = NEON_fminnm(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); -@endif } # C7.2.114 FMINNM (scalar) page C7-1648 line 91994 MATCH x1e207800/mask=xff20fc00 @@ -10714,9 +6763,7 @@ is b_31=0 & b_30=1 & b_2129=0b001110110 & b_1015=0b000001 & Rd_VPR128.8H & Rn_VP :fminnm Rd_FPR64, Rn_FPR64, Rm_FPR64 is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=1 & Rm_FPR64 & b_1215=0x7 & b_1011=2 & Rn_FPR64 & Rd_FPR64 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR64 = NEON_fminnm(Rn_FPR64, Rm_FPR64); -@endif } # C7.2.114 FMINNM (scalar) page C7-1648 line 91994 MATCH x1e207800/mask=xff20fc00 @@ -10727,9 +6774,7 @@ is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=1 & Rm_FPR64 & b_1215=0 :fminnm Rd_FPR32, Rn_FPR32, Rm_FPR32 is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=1 & Rm_FPR32 & b_1215=0x7 & b_1011=2 & Rn_FPR32 & Rd_FPR32 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR32 = NEON_fminnm(Rn_FPR32, Rm_FPR32); -@endif } # C7.2.114 FMINNM (scalar) page C7-1648 line 91994 MATCH x1e207800/mask=xff20fc00 @@ -10740,9 +6785,7 @@ is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=1 & Rm_FPR32 & b_1215=0 :fminnm Rd_FPR16, Rn_FPR16, Rm_FPR16 is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=1 & Rm_FPR16 & b_1215=0x7 & b_1011=2 & Rn_FPR16 & Rd_FPR16 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR16 = NEON_fminnm(Rn_FPR16, Rm_FPR16); -@endif } # C7.2.115 FMINNMP (scalar) page C7-1650 line 92101 MATCH x7eb0c800/mask=xffbffc00 @@ -10753,9 +6796,7 @@ is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=1 & Rm_FPR16 & b_1215=0 :fminnmp Rd_FPR64, Rn_VPR128.2D is b_3031=1 & u=1 & b_2428=0x1e & b_23=1 & b_1722=0x38 & b_1216=0xc & b_1011=2 & Rn_VPR128.2D & Rd_FPR64 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR64 = NEON_fminnmp(Rn_VPR128.2D, 8:1); -@endif } # C7.2.115 FMINNMP (scalar) page C7-1650 line 92101 MATCH x7eb0c800/mask=xffbffc00 @@ -10766,9 +6807,7 @@ is b_3031=1 & u=1 & b_2428=0x1e & b_23=1 & b_1722=0x38 & b_1216=0xc & b_1011=2 & :fminnmp Rd_FPR32, Rn_VPR64.2S is b_3031=1 & u=1 & b_2428=0x1e & b_23=1 & b_1722=0x18 & b_1216=0xc & b_1011=2 & Rn_VPR64.2S & Rd_FPR32 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR32 = NEON_fminnmp(Rn_VPR64.2S, 4:1); -@endif } # C7.2.115 FMINNMP (scalar) page C7-1650 line 92101 MATCH x5eb0c800/mask=xfffffc00 @@ -10780,9 +6819,7 @@ is b_3031=1 & u=1 & b_2428=0x1e & b_23=1 & b_1722=0x18 & b_1216=0xc & b_1011=2 & :fminnmp Rd_FPR16, vRn_VPR128^".2H" is b_1031=0b0101111010110000110010 & Rd_FPR16 & vRn_VPR128 & Rn_FPR32 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR16 = NEON_fminnmp(Rn_FPR32, 2:1); -@endif } # C7.2.116 FMINNMP (vector) page C7-1652 line 92205 MATCH x2ea0c400/mask=xbfa0fc00 @@ -10793,9 +6830,7 @@ is b_1031=0b0101111010110000110010 & Rd_FPR16 & vRn_VPR128 & Rn_FPR32 & Zd :fminnmp Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=3 & b_21=1 & Rm_VPR128.2D & b_1115=0x18 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.2D = NEON_fminnmp(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); -@endif } # C7.2.116 FMINNMP (vector) page C7-1652 line 92205 MATCH x2ea0c400/mask=xbfa0fc00 @@ -10806,9 +6841,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=3 & b_21=1 & Rm_VPR128.2D & :fminnmp Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_21=1 & Rm_VPR64.2S & b_1115=0x18 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.2S = NEON_fminnmp(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); -@endif } # C7.2.116 FMINNMP (vector) page C7-1652 line 92205 MATCH x2ea0c400/mask=xbfa0fc00 @@ -10819,9 +6852,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_21=1 & Rm_VPR64.2S & :fminnmp Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_21=1 & Rm_VPR128.4S & b_1115=0x18 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_fminnmp(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); -@endif } # C7.2.116 FMINNMP (vector) page C7-1652 line 92205 MATCH x2ec00400/mask=xbfe0fc00 @@ -10833,9 +6864,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_21=1 & Rm_VPR128.4S & :fminnmp Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_31=0 & b_30=0 & b_2129=0b101110110 & b_1015=0b000001 & Rd_VPR64.4H & Rn_VPR64.4H & Rm_VPR64.4H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.4H = NEON_fminnmp(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); -@endif } # C7.2.116 FMINNMP (vector) page C7-1652 line 92205 MATCH x2ec00400/mask=xbfe0fc00 @@ -10847,9 +6876,7 @@ is b_31=0 & b_30=0 & b_2129=0b101110110 & b_1015=0b000001 & Rd_VPR64.4H & Rn_VPR :fminnmp Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_31=0 & b_30=1 & b_2129=0b101110110 & b_1015=0b000001 & Rd_VPR128.8H & Rn_VPR128.8H & Rm_VPR128.8H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.8H = NEON_fminnmp(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); -@endif } # C7.2.117 FMINNMV page C7-1654 line 92338 MATCH x2eb0c800/mask=xbfbffc00 @@ -10860,9 +6887,7 @@ is b_31=0 & b_30=1 & b_2129=0b101110110 & b_1015=0b000001 & Rd_VPR128.8H & Rn_VP :fminnmv Rd_FPR32, Rn_VPR128.4S is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x18 & b_1216=0xc & b_1011=2 & Rn_VPR128.4S & Rd_FPR32 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR32 = NEON_fminnmv(Rn_VPR128.4S, 4:1); -@endif } # C7.2.117 FMINNMV page C7-1654 line 92338 MATCH x0eb0c800/mask=xbffffc00 @@ -10874,9 +6899,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x18 & b_1216=0x :fminnmv Rd_FPR16, Rn_VPR64.4H is b_31=0 & b_30=0 & b_1029=0b00111010110000110010 & Rd_FPR16 & Rn_VPR64.4H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR16 = NEON_fminnmv(Rn_VPR64.4H, 2:1); -@endif } # C7.2.117 FMINNMV page C7-1654 line 92338 MATCH x0eb0c800/mask=xbffffc00 @@ -10888,9 +6911,7 @@ is b_31=0 & b_30=0 & b_1029=0b00111010110000110010 & Rd_FPR16 & Rn_VPR64.4H & Zd :fminnmv Rd_FPR16, Rn_VPR128.8H is b_31=0 & b_30=1 & b_1029=0b00111010110000110010 & Rd_FPR16 & Rn_VPR128.8H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR16 = NEON_fminnmv(Rn_VPR128.8H, 2:1); -@endif } # C7.2.118 FMINP (scalar) page C7-1656 line 92446 MATCH x7eb0f800/mask=xffbffc00 @@ -10901,9 +6922,7 @@ is b_31=0 & b_30=1 & b_1029=0b00111010110000110010 & Rd_FPR16 & Rn_VPR128.8H & Z :fminp Rd_FPR64, Rn_VPR128.2D is b_3031=1 & u=1 & b_2428=0x1e & b_23=1 & b_1722=0x38 & b_1216=0xf & b_1011=2 & Rn_VPR128.2D & Rd_FPR64 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR64 = NEON_fminp(Rn_VPR128.2D, 8:1); -@endif } # C7.2.118 FMINP (scalar) page C7-1656 line 92446 MATCH x7eb0f800/mask=xffbffc00 @@ -10914,9 +6933,7 @@ is b_3031=1 & u=1 & b_2428=0x1e & b_23=1 & b_1722=0x38 & b_1216=0xf & b_1011=2 & :fminp Rd_FPR32, Rn_VPR64.2S is b_3031=1 & u=1 & b_2428=0x1e & b_23=1 & b_1722=0x18 & b_1216=0xf & b_1011=2 & Rn_VPR64.2S & Rd_FPR32 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR32 = NEON_fminp(Rn_VPR64.2S, 4:1); -@endif } # C7.2.118 FMINP (scalar) page C7-1656 line 92446 MATCH x5eb0f800/mask=xfffffc00 @@ -10928,9 +6945,7 @@ is b_3031=1 & u=1 & b_2428=0x1e & b_23=1 & b_1722=0x18 & b_1216=0xf & b_1011=2 & :fminp Rd_FPR16, vRn_VPR128^".2H" is b_1031=0b0101111010110000111110 & Rd_FPR16 & vRn_VPR128 & Rn_FPR32 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR16 = NEON_fminp(Rn_FPR32, 2:1); -@endif } # C7.2.119 FMINP (vector) page C7-1658 line 92550 MATCH x2ea0f400/mask=xbfa0fc00 @@ -10941,9 +6956,7 @@ is b_1031=0b0101111010110000111110 & Rd_FPR16 & vRn_VPR128 & Rn_FPR32 & Zd :fminp Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=3 & b_21=1 & Rm_VPR128.2D & b_1115=0x1e & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.2D = NEON_fminp(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); -@endif } # C7.2.119 FMINP (vector) page C7-1658 line 92550 MATCH x2ea0f400/mask=xbfa0fc00 @@ -10954,9 +6967,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=3 & b_21=1 & Rm_VPR128.2D & :fminp Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_21=1 & Rm_VPR64.2S & b_1115=0x1e & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.2S = NEON_fminp(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); -@endif } # C7.2.119 FMINP (vector) page C7-1658 line 92550 MATCH x2ea0f400/mask=xbfa0fc00 @@ -10967,9 +6978,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_21=1 & Rm_VPR64.2S & :fminp Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_21=1 & Rm_VPR128.4S & b_1115=0x1e & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_fminp(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); -@endif } # C7.2.119 FMINP (vector) page C7-1658 line 92550 MATCH x2ec03400/mask=xbfe0fc00 @@ -10981,9 +6990,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_21=1 & Rm_VPR128.4S & :fminp Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_31=0 & b_30=0 & b_2129=0b101110110 & b_1015=0b001101 & Rd_VPR64.4H & Rn_VPR64.4H & Rm_VPR64.4H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.4H = NEON_fminp(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); -@endif } # C7.2.119 FMINP (vector) page C7-1658 line 92550 MATCH x2ec03400/mask=xbfe0fc00 @@ -10995,9 +7002,7 @@ is b_31=0 & b_30=0 & b_2129=0b101110110 & b_1015=0b001101 & Rd_VPR64.4H & Rn_VPR :fminp Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_31=0 & b_30=1 & b_2129=0b101110110 & b_1015=0b001101 & Rd_VPR128.8H & Rn_VPR128.8H & Rm_VPR128.8H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.8H = NEON_fminp(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); -@endif } # C7.2.120 FMINV page C7-1660 line 92681 MATCH x2eb0f800/mask=xbfbffc00 @@ -11008,9 +7013,7 @@ is b_31=0 & b_30=1 & b_2129=0b101110110 & b_1015=0b001101 & Rd_VPR128.8H & Rn_VP :fminv Rd_FPR32, Rn_VPR128.4S is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x18 & b_1216=0xf & b_1011=2 & Rn_VPR128.4S & Rd_FPR32 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR32 = NEON_fminv(Rn_VPR128.4S, 4:1); -@endif } # C7.2.120 FMINV page C7-1660 line 92681 MATCH x0eb0f800/mask=xbffffc00 @@ -11022,9 +7025,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x18 & b_1216=0x :fminv Rd_FPR16, Rn_VPR64.4H is b_31=0 & b_30=0 & b_1029=0b00111010110000111110 & Rd_FPR16 & Rn_VPR64.4H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR16 = NEON_fminv(Rn_VPR64.4H, 2:1); -@endif } # C7.2.120 FMINV page C7-1660 line 92681 MATCH x0eb0f800/mask=xbffffc00 @@ -11036,9 +7037,7 @@ is b_31=0 & b_30=0 & b_1029=0b00111010110000111110 & Rd_FPR16 & Rn_VPR64.4H & Zd :fminv Rd_FPR16, Rn_VPR128.8H is b_31=0 & b_30=1 & b_1029=0b00111010110000111110 & Rd_FPR16 & Rn_VPR128.8H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR16 = NEON_fminv(Rn_VPR128.8H, 2:1); -@endif } # C7.2.121 FMLA (by element) page C7-1662 line 92788 MATCH x0f801000/mask=xbf80f400 @@ -11050,42 +7049,15 @@ is b_31=0 & b_30=1 & b_1029=0b00111010110000111110 & Rd_FPR16 & Rn_VPR128.8H & Z :fmla Rd_VPR128.2D, Rn_VPR128.2D, Re_VPR128.D.vIndex is b_3131=0 & q=1 & u=0 & b_2428=0xf & advSIMD3.size=3 & b_2121=0 & Re_VPR128.D.vIndex & vIndex & Re_VPR128.D & b_1215=0x1 & b_1010=0 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; # simd element Re_VPR128.D[vIndex] lane size 8 - simd_address_at(tmp1, Re_VPR128.D, vIndex:4, 8, 16); - local tmp2:8 = * [register]:8 tmp1; - # simd infix TMPQ1 = Rn_VPR128.2D f* tmp2 on lane size 8 - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, Rn_VPR128.2D, 0, 8, 16); - simd_address_at(tmp5, TMPQ1, 0, 8, 16); - * [register]:8 tmp5 = (* [register]:8 tmp4) f* tmp2; - simd_address_at(tmp4, Rn_VPR128.2D, 1, 8, 16); - simd_address_at(tmp5, TMPQ1, 1, 8, 16); - * [register]:8 tmp5 = (* [register]:8 tmp4) f* tmp2; + local tmp1:8 = Re_VPR128.D.vIndex; + # simd infix TMPQ1 = Rn_VPR128.2D f* tmp1 on lane size 8 + TMPQ1[0,64] = Rn_VPR128.2D[0,64] f* tmp1; + TMPQ1[64,64] = Rn_VPR128.2D[64,64] f* tmp1; # simd infix Rd_VPR128.2D = Rd_VPR128.2D f+ TMPQ1 on lane size 8 - local tmp6:4 = 0; - local tmp7:4 = 0; - local tmp8:4 = 0; - simd_address_at(tmp6, Rd_VPR128.2D, 0, 8, 16); - simd_address_at(tmp7, TMPQ1, 0, 8, 16); - simd_address_at(tmp8, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp8 = (* [register]:8 tmp6) f+ (* [register]:8 tmp7); - simd_address_at(tmp6, Rd_VPR128.2D, 1, 8, 16); - simd_address_at(tmp7, TMPQ1, 1, 8, 16); - simd_address_at(tmp8, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp8 = (* [register]:8 tmp6) f+ (* [register]:8 tmp7); + Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] f+ TMPQ1[0,64]; + Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] f+ TMPQ1[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Re_VPR128.D, vIndex:1); - local tmp2:16 = SIMD_FLOAT_MULT(Rn_VPR128.2D, tmp1); - local tmpd:16 = SIMD_FLOAT_ADD(Rd_VPR128.2D, tmp2, 8:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - local tmp1:8 = SIMD_PIECE(Re_VPR128.D, vIndex:1); - Rd_VPR128.2D = NEON_fmla(Rd_VPR128.2D, Rn_VPR128.2D, tmp1, 8:1); -@endif } # C7.2.121 FMLA (by element) page C7-1662 line 92788 MATCH x0f801000/mask=xbf80f400 @@ -11097,42 +7069,15 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xf & advSIMD3.size=3 & b_2121=0 & Re_VPR128.D. :fmla Rd_VPR64.2S, Rn_VPR64.2S, Re_VPR128.S.vIndex is b_3131=0 & q=0 & u=0 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & b_1215=0x1 & b_1010=0 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; # simd element Re_VPR128.S[vIndex] lane size 4 - simd_address_at(tmp1, Re_VPR128.S, vIndex:4, 4, 16); - local tmp2:4 = * [register]:4 tmp1; - # simd infix TMPD1 = Rn_VPR64.2S f* tmp2 on lane size 4 - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, Rn_VPR64.2S, 0, 4, 8); - simd_address_at(tmp5, TMPD1, 0, 4, 8); - * [register]:4 tmp5 = (* [register]:4 tmp4) f* tmp2; - simd_address_at(tmp4, Rn_VPR64.2S, 1, 4, 8); - simd_address_at(tmp5, TMPD1, 1, 4, 8); - * [register]:4 tmp5 = (* [register]:4 tmp4) f* tmp2; + local tmp1:4 = Re_VPR128.S.vIndex; + # simd infix TMPD1 = Rn_VPR64.2S f* tmp1 on lane size 4 + TMPD1[0,32] = Rn_VPR64.2S[0,32] f* tmp1; + TMPD1[32,32] = Rn_VPR64.2S[32,32] f* tmp1; # simd infix Rd_VPR64.2S = Rd_VPR64.2S f+ TMPD1 on lane size 4 - local tmp6:4 = 0; - local tmp7:4 = 0; - local tmp8:4 = 0; - simd_address_at(tmp6, Rd_VPR64.2S, 0, 4, 8); - simd_address_at(tmp7, TMPD1, 0, 4, 8); - simd_address_at(tmp8, Rd_VPR64.2S, 0, 4, 8); - * [register]:4 tmp8 = (* [register]:4 tmp6) f+ (* [register]:4 tmp7); - simd_address_at(tmp6, Rd_VPR64.2S, 1, 4, 8); - simd_address_at(tmp7, TMPD1, 1, 4, 8); - simd_address_at(tmp8, Rd_VPR64.2S, 1, 4, 8); - * [register]:4 tmp8 = (* [register]:4 tmp6) f+ (* [register]:4 tmp7); + Rd_VPR64.2S[0,32] = Rd_VPR64.2S[0,32] f+ TMPD1[0,32]; + Rd_VPR64.2S[32,32] = Rd_VPR64.2S[32,32] f+ TMPD1[32,32]; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:4 = SIMD_PIECE(Re_VPR128.S, vIndex:1); - local tmp2:8 = SIMD_FLOAT_MULT(Rn_VPR64.2S, tmp1); - local tmpd:8 = SIMD_FLOAT_ADD(Rd_VPR64.2S, tmp2, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.2S -@elif defined(SEMANTIC_pseudo) - local tmp1:4 = SIMD_PIECE(Re_VPR128.S, vIndex:1); - Rd_VPR64.2S = NEON_fmla(Rd_VPR64.2S, Rn_VPR64.2S, tmp1, 4:1); -@endif } # C7.2.121 FMLA (by element) page C7-1662 line 92788 MATCH x0f801000/mask=xbf80f400 @@ -11144,56 +7089,19 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re :fmla Rd_VPR128.4S, Rn_VPR128.4S, Re_VPR128.S.vIndex is b_3131=0 & q=1 & u=0 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & b_1215=0x1 & b_1010=0 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; # simd element Re_VPR128.S[vIndex] lane size 4 - simd_address_at(tmp1, Re_VPR128.S, vIndex:4, 4, 16); - local tmp2:4 = * [register]:4 tmp1; - # simd infix TMPQ1 = Rn_VPR128.4S f* tmp2 on lane size 4 - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, Rn_VPR128.4S, 0, 4, 16); - simd_address_at(tmp5, TMPQ1, 0, 4, 16); - * [register]:4 tmp5 = (* [register]:4 tmp4) f* tmp2; - simd_address_at(tmp4, Rn_VPR128.4S, 1, 4, 16); - simd_address_at(tmp5, TMPQ1, 1, 4, 16); - * [register]:4 tmp5 = (* [register]:4 tmp4) f* tmp2; - simd_address_at(tmp4, Rn_VPR128.4S, 2, 4, 16); - simd_address_at(tmp5, TMPQ1, 2, 4, 16); - * [register]:4 tmp5 = (* [register]:4 tmp4) f* tmp2; - simd_address_at(tmp4, Rn_VPR128.4S, 3, 4, 16); - simd_address_at(tmp5, TMPQ1, 3, 4, 16); - * [register]:4 tmp5 = (* [register]:4 tmp4) f* tmp2; + local tmp1:4 = Re_VPR128.S.vIndex; + # simd infix TMPQ1 = Rn_VPR128.4S f* tmp1 on lane size 4 + TMPQ1[0,32] = Rn_VPR128.4S[0,32] f* tmp1; + TMPQ1[32,32] = Rn_VPR128.4S[32,32] f* tmp1; + TMPQ1[64,32] = Rn_VPR128.4S[64,32] f* tmp1; + TMPQ1[96,32] = Rn_VPR128.4S[96,32] f* tmp1; # simd infix Rd_VPR128.4S = Rd_VPR128.4S f+ TMPQ1 on lane size 4 - local tmp6:4 = 0; - local tmp7:4 = 0; - local tmp8:4 = 0; - simd_address_at(tmp6, Rd_VPR128.4S, 0, 4, 16); - simd_address_at(tmp7, TMPQ1, 0, 4, 16); - simd_address_at(tmp8, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp8 = (* [register]:4 tmp6) f+ (* [register]:4 tmp7); - simd_address_at(tmp6, Rd_VPR128.4S, 1, 4, 16); - simd_address_at(tmp7, TMPQ1, 1, 4, 16); - simd_address_at(tmp8, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp8 = (* [register]:4 tmp6) f+ (* [register]:4 tmp7); - simd_address_at(tmp6, Rd_VPR128.4S, 2, 4, 16); - simd_address_at(tmp7, TMPQ1, 2, 4, 16); - simd_address_at(tmp8, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp8 = (* [register]:4 tmp6) f+ (* [register]:4 tmp7); - simd_address_at(tmp6, Rd_VPR128.4S, 3, 4, 16); - simd_address_at(tmp7, TMPQ1, 3, 4, 16); - simd_address_at(tmp8, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp8 = (* [register]:4 tmp6) f+ (* [register]:4 tmp7); + Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] f+ TMPQ1[0,32]; + Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] f+ TMPQ1[32,32]; + Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] f+ TMPQ1[64,32]; + Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] f+ TMPQ1[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:4 = SIMD_PIECE(Re_VPR128.S, vIndex:1); - local tmp2:16 = SIMD_FLOAT_MULT(Rn_VPR128.4S, tmp1); - local tmpd:16 = SIMD_FLOAT_ADD(Rd_VPR128.4S, tmp2, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - local tmp1:4 = SIMD_PIECE(Re_VPR128.S, vIndex:1); - Rd_VPR128.4S = NEON_fmla(Rd_VPR128.4S, Rn_VPR128.4S, tmp1, 4:1); -@endif } # C7.2.121 FMLA (by element) page C7-1662 line 92788 MATCH x5f001000/mask=xffc0f400 @@ -11206,23 +7114,11 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re :fmla Rd_FPR16, Rn_FPR16, Re_VPR128Lo.H.vIndexHLM is b_2231=0b0101111100 & b_1215=0b0001 & b_10=0 & Re_VPR128Lo.H & vIndexHLM & Rd_FPR16 & Rn_FPR16 & Re_VPR128Lo.H.vIndexHLM & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 - simd_address_at(tmp1, Re_VPR128Lo.H, vIndexHLM:4, 2, 16); - local tmp2:2 = * [register]:2 tmp1; - local tmp3:2 = Rn_FPR16 f* tmp2; - Rd_FPR16 = Rd_FPR16 f+ tmp3; - zext_zh(Zd); # zero upper 30 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:2 = SIMD_PIECE(Re_VPR128Lo.H, vIndexHLM:1); + local tmp1:2 = Re_VPR128Lo.H.vIndexHLM; local tmp2:2 = Rn_FPR16 f* tmp1; - local tmpd:2 = Rd_FPR16 f+ tmp2; - Zd = zext(tmpd); # assigning to Rd_FPR16 -@elif defined(SEMANTIC_pseudo) - local tmp1:2 = SIMD_PIECE(Re_VPR128Lo.H, vIndexHLM:1); - Rd_FPR16 = NEON_fmla(Rd_FPR16, Rn_FPR16, tmp1, 2:1); -@endif + Rd_FPR16 = Rd_FPR16 f+ tmp2; + zext_zh(Zd); # zero upper 30 bytes of Zd } # C7.2.121 FMLA (by element) page C7-1662 line 92788 MATCH x5f801000/mask=xff80f400 @@ -11235,23 +7131,11 @@ is b_2231=0b0101111100 & b_1215=0b0001 & b_10=0 & Re_VPR128Lo.H & vIndexHLM & Rd :fmla Rd_FPR32, Rn_FPR32, Re_VPR128.S.vIndex is b_2331=0b010111111 & b_22=0 & b_1215=0b0001 & b_10=0 & Re_VPR128.S & vIndex & Rd_FPR32 & Rn_FPR32 & Re_VPR128.S.vIndex & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; # simd element Re_VPR128.S[vIndex] lane size 4 - simd_address_at(tmp1, Re_VPR128.S, vIndex:4, 4, 16); - local tmp2:4 = * [register]:4 tmp1; - local tmp3:4 = Rn_FPR32 f* tmp2; - Rd_FPR32 = Rd_FPR32 f+ tmp3; - zext_zs(Zd); # zero upper 28 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:4 = SIMD_PIECE(Re_VPR128.S, vIndex:1); + local tmp1:4 = Re_VPR128.S.vIndex; local tmp2:4 = Rn_FPR32 f* tmp1; - local tmpd:4 = Rd_FPR32 f+ tmp2; - Zd = zext(tmpd); # assigning to Rd_FPR32 -@elif defined(SEMANTIC_pseudo) - local tmp1:4 = SIMD_PIECE(Re_VPR128.S, vIndex:1); - Rd_FPR32 = NEON_fmla(Rd_FPR32, Rn_FPR32, tmp1, 4:1); -@endif + Rd_FPR32 = Rd_FPR32 f+ tmp2; + zext_zs(Zd); # zero upper 28 bytes of Zd } # C7.2.121 FMLA (by element) page C7-1662 line 92788 MATCH x5f801000/mask=xff80f400 @@ -11264,23 +7148,11 @@ is b_2331=0b010111111 & b_22=0 & b_1215=0b0001 & b_10=0 & Re_VPR128.S & vIndex & :fmla Rd_FPR64, Rn_FPR64, Re_VPR128.D.vIndex is b_2331=0b010111111 & b_22=1 & b_21=0 & b_1215=0b0001 & b_10=0 & Re_VPR128.D & vIndex & Rd_FPR64 & Rn_FPR64 & Re_VPR128.D.vIndex & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; # simd element Re_VPR128.D[vIndex] lane size 8 - simd_address_at(tmp1, Re_VPR128.D, vIndex:4, 8, 16); - local tmp2:8 = * [register]:8 tmp1; - local tmp3:8 = Rn_FPR64 f* tmp2; - Rd_FPR64 = Rd_FPR64 f+ tmp3; - zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Re_VPR128.D, vIndex:1); + local tmp1:8 = Re_VPR128.D.vIndex; local tmp2:8 = Rn_FPR64 f* tmp1; - local tmpd:8 = Rd_FPR64 f+ tmp2; - Zd = zext(tmpd); # assigning to Rd_FPR64 -@elif defined(SEMANTIC_pseudo) - local tmp1:8 = SIMD_PIECE(Re_VPR128.D, vIndex:1); - Rd_FPR64 = NEON_fmla(Rd_FPR64, Rn_FPR64, tmp1, 8:1); -@endif + Rd_FPR64 = Rd_FPR64 f+ tmp2; + zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.121 FMLA (by element) page C7-1662 line 92788 MATCH x0f001000/mask=xbfc0f400 @@ -11293,56 +7165,19 @@ is b_2331=0b010111111 & b_22=1 & b_21=0 & b_1215=0b0001 & b_10=0 & Re_VPR128.D & :fmla Rd_VPR64.4H, Rn_VPR64.4H, Re_VPR128Lo.H.vIndexHLM is b_31=0 & b_30=0 & b_2229=0b00111100 & b_1215=0b0001 & b_10=0 & Re_VPR128Lo.H & vIndexHLM & Rd_VPR64.4H & Rn_VPR64.4H & Re_VPR128Lo.H.vIndexHLM & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 - simd_address_at(tmp1, Re_VPR128Lo.H, vIndexHLM:4, 2, 16); - local tmp2:2 = * [register]:2 tmp1; - # simd infix TMPD1 = Rn_VPR64.4H f* tmp2 on lane size 2 - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, Rn_VPR64.4H, 0, 2, 8); - simd_address_at(tmp5, TMPD1, 0, 2, 8); - * [register]:2 tmp5 = (* [register]:2 tmp4) f* tmp2; - simd_address_at(tmp4, Rn_VPR64.4H, 1, 2, 8); - simd_address_at(tmp5, TMPD1, 1, 2, 8); - * [register]:2 tmp5 = (* [register]:2 tmp4) f* tmp2; - simd_address_at(tmp4, Rn_VPR64.4H, 2, 2, 8); - simd_address_at(tmp5, TMPD1, 2, 2, 8); - * [register]:2 tmp5 = (* [register]:2 tmp4) f* tmp2; - simd_address_at(tmp4, Rn_VPR64.4H, 3, 2, 8); - simd_address_at(tmp5, TMPD1, 3, 2, 8); - * [register]:2 tmp5 = (* [register]:2 tmp4) f* tmp2; + local tmp1:2 = Re_VPR128Lo.H.vIndexHLM; + # simd infix TMPD1 = Rn_VPR64.4H f* tmp1 on lane size 2 + TMPD1[0,16] = Rn_VPR64.4H[0,16] f* tmp1; + TMPD1[16,16] = Rn_VPR64.4H[16,16] f* tmp1; + TMPD1[32,16] = Rn_VPR64.4H[32,16] f* tmp1; + TMPD1[48,16] = Rn_VPR64.4H[48,16] f* tmp1; # simd infix Rd_VPR64.4H = Rd_VPR64.4H f+ TMPD1 on lane size 2 - local tmp6:4 = 0; - local tmp7:4 = 0; - local tmp8:4 = 0; - simd_address_at(tmp6, Rd_VPR64.4H, 0, 2, 8); - simd_address_at(tmp7, TMPD1, 0, 2, 8); - simd_address_at(tmp8, Rd_VPR64.4H, 0, 2, 8); - * [register]:2 tmp8 = (* [register]:2 tmp6) f+ (* [register]:2 tmp7); - simd_address_at(tmp6, Rd_VPR64.4H, 1, 2, 8); - simd_address_at(tmp7, TMPD1, 1, 2, 8); - simd_address_at(tmp8, Rd_VPR64.4H, 1, 2, 8); - * [register]:2 tmp8 = (* [register]:2 tmp6) f+ (* [register]:2 tmp7); - simd_address_at(tmp6, Rd_VPR64.4H, 2, 2, 8); - simd_address_at(tmp7, TMPD1, 2, 2, 8); - simd_address_at(tmp8, Rd_VPR64.4H, 2, 2, 8); - * [register]:2 tmp8 = (* [register]:2 tmp6) f+ (* [register]:2 tmp7); - simd_address_at(tmp6, Rd_VPR64.4H, 3, 2, 8); - simd_address_at(tmp7, TMPD1, 3, 2, 8); - simd_address_at(tmp8, Rd_VPR64.4H, 3, 2, 8); - * [register]:2 tmp8 = (* [register]:2 tmp6) f+ (* [register]:2 tmp7); + Rd_VPR64.4H[0,16] = Rd_VPR64.4H[0,16] f+ TMPD1[0,16]; + Rd_VPR64.4H[16,16] = Rd_VPR64.4H[16,16] f+ TMPD1[16,16]; + Rd_VPR64.4H[32,16] = Rd_VPR64.4H[32,16] f+ TMPD1[32,16]; + Rd_VPR64.4H[48,16] = Rd_VPR64.4H[48,16] f+ TMPD1[48,16]; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:2 = SIMD_PIECE(Re_VPR128Lo.H, vIndexHLM:1); - local tmp2:8 = SIMD_FLOAT_MULT(Rn_VPR64.4H, tmp1); - local tmpd:8 = SIMD_FLOAT_ADD(Rd_VPR64.4H, tmp2, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.4H -@elif defined(SEMANTIC_pseudo) - local tmp1:2 = SIMD_PIECE(Re_VPR128Lo.H, vIndexHLM:1); - Rd_VPR64.4H = NEON_fmla(Rd_VPR64.4H, Rn_VPR64.4H, tmp1, 2:1); -@endif } # C7.2.121 FMLA (by element) page C7-1662 line 92788 MATCH x0f001000/mask=xbfc0f400 @@ -11355,84 +7190,27 @@ is b_31=0 & b_30=0 & b_2229=0b00111100 & b_1215=0b0001 & b_10=0 & Re_VPR128Lo.H :fmla Rd_VPR128.8H, Rn_VPR128.8H, Re_VPR128Lo.H.vIndexHLM is b_31=0 & b_30=1 & b_2229=0b00111100 & b_1215=0b0001 & b_10=0 & Re_VPR128Lo.H & vIndexHLM & Rd_VPR128.8H & Rn_VPR128.8H & Re_VPR128Lo.H.vIndexHLM & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 - simd_address_at(tmp1, Re_VPR128Lo.H, vIndexHLM:4, 2, 16); - local tmp2:2 = * [register]:2 tmp1; - # simd infix TMPQ1 = Rn_VPR128.8H f* tmp2 on lane size 2 - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, Rn_VPR128.8H, 0, 2, 16); - simd_address_at(tmp5, TMPQ1, 0, 2, 16); - * [register]:2 tmp5 = (* [register]:2 tmp4) f* tmp2; - simd_address_at(tmp4, Rn_VPR128.8H, 1, 2, 16); - simd_address_at(tmp5, TMPQ1, 1, 2, 16); - * [register]:2 tmp5 = (* [register]:2 tmp4) f* tmp2; - simd_address_at(tmp4, Rn_VPR128.8H, 2, 2, 16); - simd_address_at(tmp5, TMPQ1, 2, 2, 16); - * [register]:2 tmp5 = (* [register]:2 tmp4) f* tmp2; - simd_address_at(tmp4, Rn_VPR128.8H, 3, 2, 16); - simd_address_at(tmp5, TMPQ1, 3, 2, 16); - * [register]:2 tmp5 = (* [register]:2 tmp4) f* tmp2; - simd_address_at(tmp4, Rn_VPR128.8H, 4, 2, 16); - simd_address_at(tmp5, TMPQ1, 4, 2, 16); - * [register]:2 tmp5 = (* [register]:2 tmp4) f* tmp2; - simd_address_at(tmp4, Rn_VPR128.8H, 5, 2, 16); - simd_address_at(tmp5, TMPQ1, 5, 2, 16); - * [register]:2 tmp5 = (* [register]:2 tmp4) f* tmp2; - simd_address_at(tmp4, Rn_VPR128.8H, 6, 2, 16); - simd_address_at(tmp5, TMPQ1, 6, 2, 16); - * [register]:2 tmp5 = (* [register]:2 tmp4) f* tmp2; - simd_address_at(tmp4, Rn_VPR128.8H, 7, 2, 16); - simd_address_at(tmp5, TMPQ1, 7, 2, 16); - * [register]:2 tmp5 = (* [register]:2 tmp4) f* tmp2; + local tmp1:2 = Re_VPR128Lo.H.vIndexHLM; + # simd infix TMPQ1 = Rn_VPR128.8H f* tmp1 on lane size 2 + TMPQ1[0,16] = Rn_VPR128.8H[0,16] f* tmp1; + TMPQ1[16,16] = Rn_VPR128.8H[16,16] f* tmp1; + TMPQ1[32,16] = Rn_VPR128.8H[32,16] f* tmp1; + TMPQ1[48,16] = Rn_VPR128.8H[48,16] f* tmp1; + TMPQ1[64,16] = Rn_VPR128.8H[64,16] f* tmp1; + TMPQ1[80,16] = Rn_VPR128.8H[80,16] f* tmp1; + TMPQ1[96,16] = Rn_VPR128.8H[96,16] f* tmp1; + TMPQ1[112,16] = Rn_VPR128.8H[112,16] f* tmp1; # simd infix Rd_VPR128.8H = Rd_VPR128.8H f+ TMPQ1 on lane size 2 - local tmp6:4 = 0; - local tmp7:4 = 0; - local tmp8:4 = 0; - simd_address_at(tmp6, Rd_VPR128.8H, 0, 2, 16); - simd_address_at(tmp7, TMPQ1, 0, 2, 16); - simd_address_at(tmp8, Rd_VPR128.8H, 0, 2, 16); - * [register]:2 tmp8 = (* [register]:2 tmp6) f+ (* [register]:2 tmp7); - simd_address_at(tmp6, Rd_VPR128.8H, 1, 2, 16); - simd_address_at(tmp7, TMPQ1, 1, 2, 16); - simd_address_at(tmp8, Rd_VPR128.8H, 1, 2, 16); - * [register]:2 tmp8 = (* [register]:2 tmp6) f+ (* [register]:2 tmp7); - simd_address_at(tmp6, Rd_VPR128.8H, 2, 2, 16); - simd_address_at(tmp7, TMPQ1, 2, 2, 16); - simd_address_at(tmp8, Rd_VPR128.8H, 2, 2, 16); - * [register]:2 tmp8 = (* [register]:2 tmp6) f+ (* [register]:2 tmp7); - simd_address_at(tmp6, Rd_VPR128.8H, 3, 2, 16); - simd_address_at(tmp7, TMPQ1, 3, 2, 16); - simd_address_at(tmp8, Rd_VPR128.8H, 3, 2, 16); - * [register]:2 tmp8 = (* [register]:2 tmp6) f+ (* [register]:2 tmp7); - simd_address_at(tmp6, Rd_VPR128.8H, 4, 2, 16); - simd_address_at(tmp7, TMPQ1, 4, 2, 16); - simd_address_at(tmp8, Rd_VPR128.8H, 4, 2, 16); - * [register]:2 tmp8 = (* [register]:2 tmp6) f+ (* [register]:2 tmp7); - simd_address_at(tmp6, Rd_VPR128.8H, 5, 2, 16); - simd_address_at(tmp7, TMPQ1, 5, 2, 16); - simd_address_at(tmp8, Rd_VPR128.8H, 5, 2, 16); - * [register]:2 tmp8 = (* [register]:2 tmp6) f+ (* [register]:2 tmp7); - simd_address_at(tmp6, Rd_VPR128.8H, 6, 2, 16); - simd_address_at(tmp7, TMPQ1, 6, 2, 16); - simd_address_at(tmp8, Rd_VPR128.8H, 6, 2, 16); - * [register]:2 tmp8 = (* [register]:2 tmp6) f+ (* [register]:2 tmp7); - simd_address_at(tmp6, Rd_VPR128.8H, 7, 2, 16); - simd_address_at(tmp7, TMPQ1, 7, 2, 16); - simd_address_at(tmp8, Rd_VPR128.8H, 7, 2, 16); - * [register]:2 tmp8 = (* [register]:2 tmp6) f+ (* [register]:2 tmp7); + Rd_VPR128.8H[0,16] = Rd_VPR128.8H[0,16] f+ TMPQ1[0,16]; + Rd_VPR128.8H[16,16] = Rd_VPR128.8H[16,16] f+ TMPQ1[16,16]; + Rd_VPR128.8H[32,16] = Rd_VPR128.8H[32,16] f+ TMPQ1[32,16]; + Rd_VPR128.8H[48,16] = Rd_VPR128.8H[48,16] f+ TMPQ1[48,16]; + Rd_VPR128.8H[64,16] = Rd_VPR128.8H[64,16] f+ TMPQ1[64,16]; + Rd_VPR128.8H[80,16] = Rd_VPR128.8H[80,16] f+ TMPQ1[80,16]; + Rd_VPR128.8H[96,16] = Rd_VPR128.8H[96,16] f+ TMPQ1[96,16]; + Rd_VPR128.8H[112,16] = Rd_VPR128.8H[112,16] f+ TMPQ1[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:2 = SIMD_PIECE(Re_VPR128Lo.H, vIndexHLM:1); - local tmp2:16 = SIMD_FLOAT_MULT(Rn_VPR128.8H, tmp1); - local tmpd:16 = SIMD_FLOAT_ADD(Rd_VPR128.8H, tmp2, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.8H -@elif defined(SEMANTIC_pseudo) - local tmp1:2 = SIMD_PIECE(Re_VPR128Lo.H, vIndexHLM:1); - Rd_VPR128.8H = NEON_fmla(Rd_VPR128.8H, Rn_VPR128.8H, tmp1, 2:1); -@endif } # C7.2.122 FMLA (vector) page C7-1666 line 93022 MATCH x0e20cc00/mask=xbfa0fc00 @@ -11444,39 +7222,13 @@ is b_31=0 & b_30=1 & b_2229=0b00111100 & b_1215=0b0001 & b_10=0 & Re_VPR128Lo.H :fmla Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.2D & b_1115=0x19 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_primitive) # simd infix TMPQ1 = Rn_VPR128.2D f* Rm_VPR128.2D on lane size 8 - local tmp2:4 = 0; - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp2, Rn_VPR128.2D, 0, 8, 16); - simd_address_at(tmp3, Rm_VPR128.2D, 0, 8, 16); - simd_address_at(tmp4, TMPQ1, 0, 8, 16); - * [register]:8 tmp4 = (* [register]:8 tmp2) f* (* [register]:8 tmp3); - simd_address_at(tmp2, Rn_VPR128.2D, 1, 8, 16); - simd_address_at(tmp3, Rm_VPR128.2D, 1, 8, 16); - simd_address_at(tmp4, TMPQ1, 1, 8, 16); - * [register]:8 tmp4 = (* [register]:8 tmp2) f* (* [register]:8 tmp3); + TMPQ1[0,64] = Rn_VPR128.2D[0,64] f* Rm_VPR128.2D[0,64]; + TMPQ1[64,64] = Rn_VPR128.2D[64,64] f* Rm_VPR128.2D[64,64]; # simd infix Rd_VPR128.2D = Rd_VPR128.2D f+ TMPQ1 on lane size 8 - local tmp5:4 = 0; - local tmp6:4 = 0; - local tmp7:4 = 0; - simd_address_at(tmp5, Rd_VPR128.2D, 0, 8, 16); - simd_address_at(tmp6, TMPQ1, 0, 8, 16); - simd_address_at(tmp7, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp7 = (* [register]:8 tmp5) f+ (* [register]:8 tmp6); - simd_address_at(tmp5, Rd_VPR128.2D, 1, 8, 16); - simd_address_at(tmp6, TMPQ1, 1, 8, 16); - simd_address_at(tmp7, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp7 = (* [register]:8 tmp5) f+ (* [register]:8 tmp6); + Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] f+ TMPQ1[0,64]; + Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] f+ TMPQ1[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_FLOAT_MULT(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); - local tmpd:16 = SIMD_FLOAT_ADD(Rd_VPR128.2D, tmp1, 8:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.2D = NEON_fmla(Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D, 8:1); -@endif } # C7.2.122 FMLA (vector) page C7-1666 line 93022 MATCH x0e20cc00/mask=xbfa0fc00 @@ -11488,39 +7240,13 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.2D :fmla Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.2S & b_1115=0x19 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Re_VPR128 & Zd { -@if defined(SEMANTIC_primitive) # simd infix TMPD1 = Rn_VPR64.2S f* Rm_VPR64.2S on lane size 4 - local tmp2:4 = 0; - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp2, Rn_VPR64.2S, 0, 4, 8); - simd_address_at(tmp3, Rm_VPR64.2S, 0, 4, 8); - simd_address_at(tmp4, TMPD1, 0, 4, 8); - * [register]:4 tmp4 = (* [register]:4 tmp2) f* (* [register]:4 tmp3); - simd_address_at(tmp2, Rn_VPR64.2S, 1, 4, 8); - simd_address_at(tmp3, Rm_VPR64.2S, 1, 4, 8); - simd_address_at(tmp4, TMPD1, 1, 4, 8); - * [register]:4 tmp4 = (* [register]:4 tmp2) f* (* [register]:4 tmp3); + TMPD1[0,32] = Rn_VPR64.2S[0,32] f* Rm_VPR64.2S[0,32]; + TMPD1[32,32] = Rn_VPR64.2S[32,32] f* Rm_VPR64.2S[32,32]; # simd infix Rd_VPR64.2S = Rd_VPR64.2S f+ TMPD1 on lane size 4 - local tmp5:4 = 0; - local tmp6:4 = 0; - local tmp7:4 = 0; - simd_address_at(tmp5, Rd_VPR64.2S, 0, 4, 8); - simd_address_at(tmp6, TMPD1, 0, 4, 8); - simd_address_at(tmp7, Rd_VPR64.2S, 0, 4, 8); - * [register]:4 tmp7 = (* [register]:4 tmp5) f+ (* [register]:4 tmp6); - simd_address_at(tmp5, Rd_VPR64.2S, 1, 4, 8); - simd_address_at(tmp6, TMPD1, 1, 4, 8); - simd_address_at(tmp7, Rd_VPR64.2S, 1, 4, 8); - * [register]:4 tmp7 = (* [register]:4 tmp5) f+ (* [register]:4 tmp6); + Rd_VPR64.2S[0,32] = Rd_VPR64.2S[0,32] f+ TMPD1[0,32]; + Rd_VPR64.2S[32,32] = Rd_VPR64.2S[32,32] f+ TMPD1[32,32]; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_FLOAT_MULT(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); - local tmpd:8 = SIMD_FLOAT_ADD(Rd_VPR64.2S, tmp1, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.2S -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.2S = NEON_fmla(Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S, 4:1); -@endif } # C7.2.122 FMLA (vector) page C7-1666 line 93022 MATCH x0e20cc00/mask=xbfa0fc00 @@ -11532,55 +7258,17 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.2S :fmla Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.4S & b_1115=0x19 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) # simd infix TMPQ1 = Rn_VPR128.4S f* Rm_VPR128.4S on lane size 4 - local tmp2:4 = 0; - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp2, Rn_VPR128.4S, 0, 4, 16); - simd_address_at(tmp3, Rm_VPR128.4S, 0, 4, 16); - simd_address_at(tmp4, TMPQ1, 0, 4, 16); - * [register]:4 tmp4 = (* [register]:4 tmp2) f* (* [register]:4 tmp3); - simd_address_at(tmp2, Rn_VPR128.4S, 1, 4, 16); - simd_address_at(tmp3, Rm_VPR128.4S, 1, 4, 16); - simd_address_at(tmp4, TMPQ1, 1, 4, 16); - * [register]:4 tmp4 = (* [register]:4 tmp2) f* (* [register]:4 tmp3); - simd_address_at(tmp2, Rn_VPR128.4S, 2, 4, 16); - simd_address_at(tmp3, Rm_VPR128.4S, 2, 4, 16); - simd_address_at(tmp4, TMPQ1, 2, 4, 16); - * [register]:4 tmp4 = (* [register]:4 tmp2) f* (* [register]:4 tmp3); - simd_address_at(tmp2, Rn_VPR128.4S, 3, 4, 16); - simd_address_at(tmp3, Rm_VPR128.4S, 3, 4, 16); - simd_address_at(tmp4, TMPQ1, 3, 4, 16); - * [register]:4 tmp4 = (* [register]:4 tmp2) f* (* [register]:4 tmp3); + TMPQ1[0,32] = Rn_VPR128.4S[0,32] f* Rm_VPR128.4S[0,32]; + TMPQ1[32,32] = Rn_VPR128.4S[32,32] f* Rm_VPR128.4S[32,32]; + TMPQ1[64,32] = Rn_VPR128.4S[64,32] f* Rm_VPR128.4S[64,32]; + TMPQ1[96,32] = Rn_VPR128.4S[96,32] f* Rm_VPR128.4S[96,32]; # simd infix Rd_VPR128.4S = Rd_VPR128.4S f+ TMPQ1 on lane size 4 - local tmp5:4 = 0; - local tmp6:4 = 0; - local tmp7:4 = 0; - simd_address_at(tmp5, Rd_VPR128.4S, 0, 4, 16); - simd_address_at(tmp6, TMPQ1, 0, 4, 16); - simd_address_at(tmp7, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp7 = (* [register]:4 tmp5) f+ (* [register]:4 tmp6); - simd_address_at(tmp5, Rd_VPR128.4S, 1, 4, 16); - simd_address_at(tmp6, TMPQ1, 1, 4, 16); - simd_address_at(tmp7, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp7 = (* [register]:4 tmp5) f+ (* [register]:4 tmp6); - simd_address_at(tmp5, Rd_VPR128.4S, 2, 4, 16); - simd_address_at(tmp6, TMPQ1, 2, 4, 16); - simd_address_at(tmp7, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp7 = (* [register]:4 tmp5) f+ (* [register]:4 tmp6); - simd_address_at(tmp5, Rd_VPR128.4S, 3, 4, 16); - simd_address_at(tmp6, TMPQ1, 3, 4, 16); - simd_address_at(tmp7, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp7 = (* [register]:4 tmp5) f+ (* [register]:4 tmp6); + Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] f+ TMPQ1[0,32]; + Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] f+ TMPQ1[32,32]; + Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] f+ TMPQ1[64,32]; + Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] f+ TMPQ1[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_FLOAT_MULT(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); - local tmpd:16 = SIMD_FLOAT_ADD(Rd_VPR128.4S, tmp1, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_fmla(Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S, 4:1); -@endif } # C7.2.122 FMLA (vector) page C7-1666 line 93022 MATCH x0e400c00/mask=xbfe0fc00 @@ -11593,39 +7281,13 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.4S :fmla Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_31=0 & b_30=0 & b_2129=0b001110010 & b_1015=0b000011 & Rd_VPR64.4H & Rn_VPR64.4H & Rm_VPR64.4H & Zd { -@if defined(SEMANTIC_primitive) # simd infix TMPD1 = Rn_VPR64.4H f* Rm_VPR64.4H on lane size 4 - local tmp2:4 = 0; - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp2, Rn_VPR64.4H, 0, 4, 8); - simd_address_at(tmp3, Rm_VPR64.4H, 0, 4, 8); - simd_address_at(tmp4, TMPD1, 0, 4, 8); - * [register]:4 tmp4 = (* [register]:4 tmp2) f* (* [register]:4 tmp3); - simd_address_at(tmp2, Rn_VPR64.4H, 1, 4, 8); - simd_address_at(tmp3, Rm_VPR64.4H, 1, 4, 8); - simd_address_at(tmp4, TMPD1, 1, 4, 8); - * [register]:4 tmp4 = (* [register]:4 tmp2) f* (* [register]:4 tmp3); + TMPD1[0,32] = Rn_VPR64.4H[0,32] f* Rm_VPR64.4H[0,32]; + TMPD1[32,32] = Rn_VPR64.4H[32,32] f* Rm_VPR64.4H[32,32]; # simd infix Rd_VPR64.4H = Rd_VPR64.4H f+ TMPD1 on lane size 4 - local tmp5:4 = 0; - local tmp6:4 = 0; - local tmp7:4 = 0; - simd_address_at(tmp5, Rd_VPR64.4H, 0, 4, 8); - simd_address_at(tmp6, TMPD1, 0, 4, 8); - simd_address_at(tmp7, Rd_VPR64.4H, 0, 4, 8); - * [register]:4 tmp7 = (* [register]:4 tmp5) f+ (* [register]:4 tmp6); - simd_address_at(tmp5, Rd_VPR64.4H, 1, 4, 8); - simd_address_at(tmp6, TMPD1, 1, 4, 8); - simd_address_at(tmp7, Rd_VPR64.4H, 1, 4, 8); - * [register]:4 tmp7 = (* [register]:4 tmp5) f+ (* [register]:4 tmp6); + Rd_VPR64.4H[0,32] = Rd_VPR64.4H[0,32] f+ TMPD1[0,32]; + Rd_VPR64.4H[32,32] = Rd_VPR64.4H[32,32] f+ TMPD1[32,32]; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_FLOAT_MULT(Rn_VPR64.4H, Rm_VPR64.4H, 4:1); - local tmpd:8 = SIMD_FLOAT_ADD(Rd_VPR64.4H, tmp1, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.4H -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.4H = NEON_fmla(Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H, 2:1); -@endif } # C7.2.122 FMLA (vector) page C7-1666 line 93022 MATCH x0e400c00/mask=xbfe0fc00 @@ -11638,55 +7300,17 @@ is b_31=0 & b_30=0 & b_2129=0b001110010 & b_1015=0b000011 & Rd_VPR64.4H & Rn_VPR :fmla Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_31=0 & b_30=1 & b_2129=0b001110010 & b_1015=0b000011 & Rd_VPR128.8H & Rn_VPR128.8H & Rm_VPR128.8H & Zd { -@if defined(SEMANTIC_primitive) # simd infix TMPQ1 = Rn_VPR128.8H f* Rm_VPR128.8H on lane size 4 - local tmp2:4 = 0; - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp2, Rn_VPR128.8H, 0, 4, 16); - simd_address_at(tmp3, Rm_VPR128.8H, 0, 4, 16); - simd_address_at(tmp4, TMPQ1, 0, 4, 16); - * [register]:4 tmp4 = (* [register]:4 tmp2) f* (* [register]:4 tmp3); - simd_address_at(tmp2, Rn_VPR128.8H, 1, 4, 16); - simd_address_at(tmp3, Rm_VPR128.8H, 1, 4, 16); - simd_address_at(tmp4, TMPQ1, 1, 4, 16); - * [register]:4 tmp4 = (* [register]:4 tmp2) f* (* [register]:4 tmp3); - simd_address_at(tmp2, Rn_VPR128.8H, 2, 4, 16); - simd_address_at(tmp3, Rm_VPR128.8H, 2, 4, 16); - simd_address_at(tmp4, TMPQ1, 2, 4, 16); - * [register]:4 tmp4 = (* [register]:4 tmp2) f* (* [register]:4 tmp3); - simd_address_at(tmp2, Rn_VPR128.8H, 3, 4, 16); - simd_address_at(tmp3, Rm_VPR128.8H, 3, 4, 16); - simd_address_at(tmp4, TMPQ1, 3, 4, 16); - * [register]:4 tmp4 = (* [register]:4 tmp2) f* (* [register]:4 tmp3); + TMPQ1[0,32] = Rn_VPR128.8H[0,32] f* Rm_VPR128.8H[0,32]; + TMPQ1[32,32] = Rn_VPR128.8H[32,32] f* Rm_VPR128.8H[32,32]; + TMPQ1[64,32] = Rn_VPR128.8H[64,32] f* Rm_VPR128.8H[64,32]; + TMPQ1[96,32] = Rn_VPR128.8H[96,32] f* Rm_VPR128.8H[96,32]; # simd infix Rd_VPR128.8H = Rd_VPR128.8H f+ TMPQ1 on lane size 4 - local tmp5:4 = 0; - local tmp6:4 = 0; - local tmp7:4 = 0; - simd_address_at(tmp5, Rd_VPR128.8H, 0, 4, 16); - simd_address_at(tmp6, TMPQ1, 0, 4, 16); - simd_address_at(tmp7, Rd_VPR128.8H, 0, 4, 16); - * [register]:4 tmp7 = (* [register]:4 tmp5) f+ (* [register]:4 tmp6); - simd_address_at(tmp5, Rd_VPR128.8H, 1, 4, 16); - simd_address_at(tmp6, TMPQ1, 1, 4, 16); - simd_address_at(tmp7, Rd_VPR128.8H, 1, 4, 16); - * [register]:4 tmp7 = (* [register]:4 tmp5) f+ (* [register]:4 tmp6); - simd_address_at(tmp5, Rd_VPR128.8H, 2, 4, 16); - simd_address_at(tmp6, TMPQ1, 2, 4, 16); - simd_address_at(tmp7, Rd_VPR128.8H, 2, 4, 16); - * [register]:4 tmp7 = (* [register]:4 tmp5) f+ (* [register]:4 tmp6); - simd_address_at(tmp5, Rd_VPR128.8H, 3, 4, 16); - simd_address_at(tmp6, TMPQ1, 3, 4, 16); - simd_address_at(tmp7, Rd_VPR128.8H, 3, 4, 16); - * [register]:4 tmp7 = (* [register]:4 tmp5) f+ (* [register]:4 tmp6); + Rd_VPR128.8H[0,32] = Rd_VPR128.8H[0,32] f+ TMPQ1[0,32]; + Rd_VPR128.8H[32,32] = Rd_VPR128.8H[32,32] f+ TMPQ1[32,32]; + Rd_VPR128.8H[64,32] = Rd_VPR128.8H[64,32] f+ TMPQ1[64,32]; + Rd_VPR128.8H[96,32] = Rd_VPR128.8H[96,32] f+ TMPQ1[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_FLOAT_MULT(Rn_VPR128.8H, Rm_VPR128.8H, 4:1); - local tmpd:16 = SIMD_FLOAT_ADD(Rd_VPR128.8H, tmp1, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.8H -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.8H = NEON_fmla(Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H, 2:1); -@endif } # C7.2.123 FMLAL, FMLAL2 (by element) page C7-1668 line 93140 MATCH x0f800000/mask=xbfc0f400 @@ -11699,56 +7323,19 @@ is b_31=0 & b_30=1 & b_2129=0b001110010 & b_1015=0b000011 & Rd_VPR128.8H & Rn_VP :fmlal Rd_VPR64.2S, vRn_VPR64^".2H", Re_VPR128Lo.H.vIndexHLM is b_31=0 & b_30=0 & b_2329=0b0011111 & b_22=0 & b_1215=0b0000 & b_10=0 & Re_VPR128Lo.H & vIndexHLM & Rd_VPR64.2S & vRn_VPR64 & Rn_VPR64 & Re_VPR128Lo.H.vIndexHLM & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rn_VPR64, 0, 4, 8); - TMPS1 = * [register]:4 tmp1; - local tmp3:4 = 0; + TMPS1 = Rn_VPR64[0,32]; # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 - simd_address_at(tmp3, Re_VPR128Lo.H, vIndexHLM:4, 2, 16); - local tmp4:2 = * [register]:2 tmp3; - # simd infix TMPS2 = TMPS1 f* tmp4 on lane size 2 - local tmp6:4 = 0; - local tmp7:4 = 0; - simd_address_at(tmp6, TMPS1, 0, 2, 4); - simd_address_at(tmp7, TMPS2, 0, 2, 4); - * [register]:2 tmp7 = (* [register]:2 tmp6) f* tmp4; - simd_address_at(tmp6, TMPS1, 1, 2, 4); - simd_address_at(tmp7, TMPS2, 1, 2, 4); - * [register]:2 tmp7 = (* [register]:2 tmp6) f* tmp4; + local tmp2:2 = Re_VPR128Lo.H.vIndexHLM; + # simd infix TMPS2 = TMPS1 f* tmp2 on lane size 2 + TMPS2[0,16] = TMPS1[0,16] f* tmp2; + TMPS2[16,16] = TMPS1[16,16] f* tmp2; # simd resize TMPD3 = float2float(TMPS2) (lane size 2 to 4) - local tmp9:4 = 0; - local tmp10:4 = 0; - simd_address_at(tmp9, TMPS2, 0, 2, 4); - simd_address_at(tmp10, TMPD3, 0, 4, 8); - * [register]:4 tmp10 = float2float(* [register]:2 tmp9); - simd_address_at(tmp9, TMPS2, 1, 2, 4); - simd_address_at(tmp10, TMPD3, 1, 4, 8); - * [register]:4 tmp10 = float2float(* [register]:2 tmp9); + TMPD3[0,32] = float2float(TMPS2[0,16]); + TMPD3[32,32] = float2float(TMPS2[16,16]); # simd infix Rd_VPR64.2S = Rd_VPR64.2S + TMPD3 on lane size 4 - local tmp11:4 = 0; - local tmp12:4 = 0; - local tmp13:4 = 0; - simd_address_at(tmp11, Rd_VPR64.2S, 0, 4, 8); - simd_address_at(tmp12, TMPD3, 0, 4, 8); - simd_address_at(tmp13, Rd_VPR64.2S, 0, 4, 8); - * [register]:4 tmp13 = (* [register]:4 tmp11) + (* [register]:4 tmp12); - simd_address_at(tmp11, Rd_VPR64.2S, 1, 4, 8); - simd_address_at(tmp12, TMPD3, 1, 4, 8); - simd_address_at(tmp13, Rd_VPR64.2S, 1, 4, 8); - * [register]:4 tmp13 = (* [register]:4 tmp11) + (* [register]:4 tmp12); + Rd_VPR64.2S[0,32] = Rd_VPR64.2S[0,32] + TMPD3[0,32]; + Rd_VPR64.2S[32,32] = Rd_VPR64.2S[32,32] + TMPD3[32,32]; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:4 = SIMD_PIECE(Rn_VPR64, 0:1); - local tmp2:2 = SIMD_PIECE(Re_VPR128Lo.H, vIndexHLM:1); - local tmp3:4 = SIMD_FLOAT_MULT(tmp1, tmp2); - local tmp4:8 = SIMD_FLOAT2FLOAT(tmp3, 2:1); - local tmpd:8 = SIMD_INT_ADD(Rd_VPR64.2S, tmp4, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.2S -@elif defined(SEMANTIC_pseudo) - local tmp1:2 = SIMD_PIECE(Re_VPR128Lo.H, vIndexHLM:1); - Rd_VPR64.2S = NEON_fmlal(Rd_VPR64.2S, Rn_VPR64, tmp1, 4:1); -@endif } # C7.2.123 FMLAL, FMLAL2 (by element) page C7-1668 line 93140 MATCH x0f800000/mask=xbfc0f400 @@ -11761,76 +7348,25 @@ is b_31=0 & b_30=0 & b_2329=0b0011111 & b_22=0 & b_1215=0b0000 & b_10=0 & Re_VPR :fmlal Rd_VPR128.4S, vRn_VPR128^".4H", Re_VPR128Lo.H.vIndexHLM is b_31=0 & b_30=1 & b_2329=0b0011111 & b_22=0 & b_1215=0b0000 & b_10=0 & Re_VPR128Lo.H & vIndexHLM & Rd_VPR128.4S & vRn_VPR128 & Rn_VPR128 & Re_VPR128Lo.H.vIndexHLM & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rn_VPR128, 0, 8, 16); - TMPD1 = * [register]:8 tmp1; - local tmp3:4 = 0; + TMPD1 = Rn_VPR128[0,64]; # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 - simd_address_at(tmp3, Re_VPR128Lo.H, vIndexHLM:4, 2, 16); - local tmp4:2 = * [register]:2 tmp3; - # simd infix TMPD2 = TMPD1 f* tmp4 on lane size 2 - local tmp6:4 = 0; - local tmp7:4 = 0; - simd_address_at(tmp6, TMPD1, 0, 2, 8); - simd_address_at(tmp7, TMPD2, 0, 2, 8); - * [register]:2 tmp7 = (* [register]:2 tmp6) f* tmp4; - simd_address_at(tmp6, TMPD1, 1, 2, 8); - simd_address_at(tmp7, TMPD2, 1, 2, 8); - * [register]:2 tmp7 = (* [register]:2 tmp6) f* tmp4; - simd_address_at(tmp6, TMPD1, 2, 2, 8); - simd_address_at(tmp7, TMPD2, 2, 2, 8); - * [register]:2 tmp7 = (* [register]:2 tmp6) f* tmp4; - simd_address_at(tmp6, TMPD1, 3, 2, 8); - simd_address_at(tmp7, TMPD2, 3, 2, 8); - * [register]:2 tmp7 = (* [register]:2 tmp6) f* tmp4; + local tmp2:2 = Re_VPR128Lo.H.vIndexHLM; + # simd infix TMPD2 = TMPD1 f* tmp2 on lane size 2 + TMPD2[0,16] = TMPD1[0,16] f* tmp2; + TMPD2[16,16] = TMPD1[16,16] f* tmp2; + TMPD2[32,16] = TMPD1[32,16] f* tmp2; + TMPD2[48,16] = TMPD1[48,16] f* tmp2; # simd resize TMPQ3 = float2float(TMPD2) (lane size 2 to 4) - local tmp9:4 = 0; - local tmp10:4 = 0; - simd_address_at(tmp9, TMPD2, 0, 2, 8); - simd_address_at(tmp10, TMPQ3, 0, 4, 16); - * [register]:4 tmp10 = float2float(* [register]:2 tmp9); - simd_address_at(tmp9, TMPD2, 1, 2, 8); - simd_address_at(tmp10, TMPQ3, 1, 4, 16); - * [register]:4 tmp10 = float2float(* [register]:2 tmp9); - simd_address_at(tmp9, TMPD2, 2, 2, 8); - simd_address_at(tmp10, TMPQ3, 2, 4, 16); - * [register]:4 tmp10 = float2float(* [register]:2 tmp9); - simd_address_at(tmp9, TMPD2, 3, 2, 8); - simd_address_at(tmp10, TMPQ3, 3, 4, 16); - * [register]:4 tmp10 = float2float(* [register]:2 tmp9); + TMPQ3[0,32] = float2float(TMPD2[0,16]); + TMPQ3[32,32] = float2float(TMPD2[16,16]); + TMPQ3[64,32] = float2float(TMPD2[32,16]); + TMPQ3[96,32] = float2float(TMPD2[48,16]); # simd infix Rd_VPR128.4S = Rd_VPR128.4S + TMPQ3 on lane size 4 - local tmp11:4 = 0; - local tmp12:4 = 0; - local tmp13:4 = 0; - simd_address_at(tmp11, Rd_VPR128.4S, 0, 4, 16); - simd_address_at(tmp12, TMPQ3, 0, 4, 16); - simd_address_at(tmp13, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp13 = (* [register]:4 tmp11) + (* [register]:4 tmp12); - simd_address_at(tmp11, Rd_VPR128.4S, 1, 4, 16); - simd_address_at(tmp12, TMPQ3, 1, 4, 16); - simd_address_at(tmp13, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp13 = (* [register]:4 tmp11) + (* [register]:4 tmp12); - simd_address_at(tmp11, Rd_VPR128.4S, 2, 4, 16); - simd_address_at(tmp12, TMPQ3, 2, 4, 16); - simd_address_at(tmp13, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp13 = (* [register]:4 tmp11) + (* [register]:4 tmp12); - simd_address_at(tmp11, Rd_VPR128.4S, 3, 4, 16); - simd_address_at(tmp12, TMPQ3, 3, 4, 16); - simd_address_at(tmp13, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp13 = (* [register]:4 tmp11) + (* [register]:4 tmp12); + Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] + TMPQ3[0,32]; + Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] + TMPQ3[32,32]; + Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] + TMPQ3[64,32]; + Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] + TMPQ3[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Rn_VPR128, 0:1); - local tmp2:2 = SIMD_PIECE(Re_VPR128Lo.H, vIndexHLM:1); - local tmp3:8 = SIMD_FLOAT_MULT(tmp1, tmp2); - local tmp4:16 = SIMD_FLOAT2FLOAT(tmp3, 2:1); - local tmpd:16 = SIMD_INT_ADD(Rd_VPR128.4S, tmp4, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - local tmp1:2 = SIMD_PIECE(Re_VPR128Lo.H, vIndexHLM:1); - Rd_VPR128.4S = NEON_fmlal(Rd_VPR128.4S, Rn_VPR128, tmp1, 2:1); -@endif } # C7.2.123 FMLAL, FMLAL2 (by element) page C7-1668 line 93140 MATCH x2f808000/mask=xbfc0f400 @@ -11843,56 +7379,19 @@ is b_31=0 & b_30=1 & b_2329=0b0011111 & b_22=0 & b_1215=0b0000 & b_10=0 & Re_VPR :fmlal2 Rd_VPR64.2S, vRn_VPR64^".2H", Re_VPR128Lo.H.vIndexHLM is b_31=0 & b_30=0 & b_2329=0b1011111 & b_22=0 & b_1215=0b1000 & b_10=0 & Re_VPR128Lo.H & vIndexHLM & Rd_VPR64.2S & vRn_VPR64 & Rn_VPR64 & Re_VPR128Lo.H.vIndexHLM & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rn_VPR64, 1, 4, 8); - TMPS1 = * [register]:4 tmp1; - local tmp3:4 = 0; + TMPS1 = Rn_VPR64[32,32]; # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 - simd_address_at(tmp3, Re_VPR128Lo.H, vIndexHLM:4, 2, 16); - local tmp4:2 = * [register]:2 tmp3; - # simd infix TMPS2 = TMPS1 f* tmp4 on lane size 2 - local tmp6:4 = 0; - local tmp7:4 = 0; - simd_address_at(tmp6, TMPS1, 0, 2, 4); - simd_address_at(tmp7, TMPS2, 0, 2, 4); - * [register]:2 tmp7 = (* [register]:2 tmp6) f* tmp4; - simd_address_at(tmp6, TMPS1, 1, 2, 4); - simd_address_at(tmp7, TMPS2, 1, 2, 4); - * [register]:2 tmp7 = (* [register]:2 tmp6) f* tmp4; + local tmp2:2 = Re_VPR128Lo.H.vIndexHLM; + # simd infix TMPS2 = TMPS1 f* tmp2 on lane size 2 + TMPS2[0,16] = TMPS1[0,16] f* tmp2; + TMPS2[16,16] = TMPS1[16,16] f* tmp2; # simd resize TMPD3 = float2float(TMPS2) (lane size 2 to 4) - local tmp9:4 = 0; - local tmp10:4 = 0; - simd_address_at(tmp9, TMPS2, 0, 2, 4); - simd_address_at(tmp10, TMPD3, 0, 4, 8); - * [register]:4 tmp10 = float2float(* [register]:2 tmp9); - simd_address_at(tmp9, TMPS2, 1, 2, 4); - simd_address_at(tmp10, TMPD3, 1, 4, 8); - * [register]:4 tmp10 = float2float(* [register]:2 tmp9); + TMPD3[0,32] = float2float(TMPS2[0,16]); + TMPD3[32,32] = float2float(TMPS2[16,16]); # simd infix Rd_VPR64.2S = Rd_VPR64.2S + TMPD3 on lane size 4 - local tmp11:4 = 0; - local tmp12:4 = 0; - local tmp13:4 = 0; - simd_address_at(tmp11, Rd_VPR64.2S, 0, 4, 8); - simd_address_at(tmp12, TMPD3, 0, 4, 8); - simd_address_at(tmp13, Rd_VPR64.2S, 0, 4, 8); - * [register]:4 tmp13 = (* [register]:4 tmp11) + (* [register]:4 tmp12); - simd_address_at(tmp11, Rd_VPR64.2S, 1, 4, 8); - simd_address_at(tmp12, TMPD3, 1, 4, 8); - simd_address_at(tmp13, Rd_VPR64.2S, 1, 4, 8); - * [register]:4 tmp13 = (* [register]:4 tmp11) + (* [register]:4 tmp12); + Rd_VPR64.2S[0,32] = Rd_VPR64.2S[0,32] + TMPD3[0,32]; + Rd_VPR64.2S[32,32] = Rd_VPR64.2S[32,32] + TMPD3[32,32]; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:4 = SIMD_PIECE(Rn_VPR64, 1:1); - local tmp2:2 = SIMD_PIECE(Re_VPR128Lo.H, vIndexHLM:1); - local tmp3:4 = SIMD_FLOAT_MULT(tmp1, tmp2); - local tmp4:8 = SIMD_FLOAT2FLOAT(tmp3, 2:1); - local tmpd:8 = SIMD_INT_ADD(Rd_VPR64.2S, tmp4, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.2S -@elif defined(SEMANTIC_pseudo) - local tmp1:2 = SIMD_PIECE(Re_VPR128Lo.H, vIndexHLM:1); - Rd_VPR64.2S = NEON_fmlal2(Rd_VPR64.2S, Rn_VPR64, tmp1, 2:1); -@endif } # C7.2.123 FMLAL, FMLAL2 (by element) page C7-1668 line 93140 MATCH x2f808000/mask=xbfc0f400 @@ -11905,76 +7404,25 @@ is b_31=0 & b_30=0 & b_2329=0b1011111 & b_22=0 & b_1215=0b1000 & b_10=0 & Re_VPR :fmlal2 Rd_VPR128.4S, vRn_VPR128^".4H", Re_VPR128Lo.H.vIndexHLM is b_31=0 & b_30=1 & b_2329=0b1011111 & b_22=0 & b_1215=0b1000 & b_10=0 & Re_VPR128Lo.H & vIndexHLM & Rd_VPR128.4S & vRn_VPR128 & Rn_VPR128 & Re_VPR128Lo.H.vIndexHLM & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rn_VPR128, 1, 8, 16); - TMPD1 = * [register]:8 tmp1; - local tmp3:4 = 0; + TMPD1 = Rn_VPR128[64,64]; # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 - simd_address_at(tmp3, Re_VPR128Lo.H, vIndexHLM:4, 2, 16); - local tmp4:2 = * [register]:2 tmp3; - # simd infix TMPD2 = TMPD1 f* tmp4 on lane size 2 - local tmp6:4 = 0; - local tmp7:4 = 0; - simd_address_at(tmp6, TMPD1, 0, 2, 8); - simd_address_at(tmp7, TMPD2, 0, 2, 8); - * [register]:2 tmp7 = (* [register]:2 tmp6) f* tmp4; - simd_address_at(tmp6, TMPD1, 1, 2, 8); - simd_address_at(tmp7, TMPD2, 1, 2, 8); - * [register]:2 tmp7 = (* [register]:2 tmp6) f* tmp4; - simd_address_at(tmp6, TMPD1, 2, 2, 8); - simd_address_at(tmp7, TMPD2, 2, 2, 8); - * [register]:2 tmp7 = (* [register]:2 tmp6) f* tmp4; - simd_address_at(tmp6, TMPD1, 3, 2, 8); - simd_address_at(tmp7, TMPD2, 3, 2, 8); - * [register]:2 tmp7 = (* [register]:2 tmp6) f* tmp4; + local tmp2:2 = Re_VPR128Lo.H.vIndexHLM; + # simd infix TMPD2 = TMPD1 f* tmp2 on lane size 2 + TMPD2[0,16] = TMPD1[0,16] f* tmp2; + TMPD2[16,16] = TMPD1[16,16] f* tmp2; + TMPD2[32,16] = TMPD1[32,16] f* tmp2; + TMPD2[48,16] = TMPD1[48,16] f* tmp2; # simd resize TMPQ3 = float2float(TMPD2) (lane size 2 to 4) - local tmp9:4 = 0; - local tmp10:4 = 0; - simd_address_at(tmp9, TMPD2, 0, 2, 8); - simd_address_at(tmp10, TMPQ3, 0, 4, 16); - * [register]:4 tmp10 = float2float(* [register]:2 tmp9); - simd_address_at(tmp9, TMPD2, 1, 2, 8); - simd_address_at(tmp10, TMPQ3, 1, 4, 16); - * [register]:4 tmp10 = float2float(* [register]:2 tmp9); - simd_address_at(tmp9, TMPD2, 2, 2, 8); - simd_address_at(tmp10, TMPQ3, 2, 4, 16); - * [register]:4 tmp10 = float2float(* [register]:2 tmp9); - simd_address_at(tmp9, TMPD2, 3, 2, 8); - simd_address_at(tmp10, TMPQ3, 3, 4, 16); - * [register]:4 tmp10 = float2float(* [register]:2 tmp9); + TMPQ3[0,32] = float2float(TMPD2[0,16]); + TMPQ3[32,32] = float2float(TMPD2[16,16]); + TMPQ3[64,32] = float2float(TMPD2[32,16]); + TMPQ3[96,32] = float2float(TMPD2[48,16]); # simd infix Rd_VPR128.4S = Rd_VPR128.4S + TMPQ3 on lane size 4 - local tmp11:4 = 0; - local tmp12:4 = 0; - local tmp13:4 = 0; - simd_address_at(tmp11, Rd_VPR128.4S, 0, 4, 16); - simd_address_at(tmp12, TMPQ3, 0, 4, 16); - simd_address_at(tmp13, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp13 = (* [register]:4 tmp11) + (* [register]:4 tmp12); - simd_address_at(tmp11, Rd_VPR128.4S, 1, 4, 16); - simd_address_at(tmp12, TMPQ3, 1, 4, 16); - simd_address_at(tmp13, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp13 = (* [register]:4 tmp11) + (* [register]:4 tmp12); - simd_address_at(tmp11, Rd_VPR128.4S, 2, 4, 16); - simd_address_at(tmp12, TMPQ3, 2, 4, 16); - simd_address_at(tmp13, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp13 = (* [register]:4 tmp11) + (* [register]:4 tmp12); - simd_address_at(tmp11, Rd_VPR128.4S, 3, 4, 16); - simd_address_at(tmp12, TMPQ3, 3, 4, 16); - simd_address_at(tmp13, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp13 = (* [register]:4 tmp11) + (* [register]:4 tmp12); + Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] + TMPQ3[0,32]; + Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] + TMPQ3[32,32]; + Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] + TMPQ3[64,32]; + Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] + TMPQ3[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Rn_VPR128, 1:1); - local tmp2:2 = SIMD_PIECE(Re_VPR128Lo.H, vIndexHLM:1); - local tmp3:8 = SIMD_FLOAT_MULT(tmp1, tmp2); - local tmp4:16 = SIMD_FLOAT2FLOAT(tmp3, 2:1); - local tmpd:16 = SIMD_INT_ADD(Rd_VPR128.4S, tmp4, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - local tmp1:2 = SIMD_PIECE(Re_VPR128Lo.H, vIndexHLM:1); - Rd_VPR128.4S = NEON_fmlal2(Rd_VPR128.4S, Rn_VPR128, tmp1, 2:1); -@endif } # C7.2.124 FMLAL, FMLAL2 (vector) page C7-1670 line 93272 MATCH x0e20ec00/mask=xbfe0fc00 @@ -11987,57 +7435,18 @@ is b_31=0 & b_30=1 & b_2329=0b1011111 & b_22=0 & b_1215=0b1000 & b_10=0 & Re_VPR :fmlal Rd_VPR64.2S, vRn_VPR64^".2H", vRm_VPR64^".2H" is b_31=0 & b_30=0 & b_2329=0b0011100 & b_22=0 & b_21=1 & b_1015=0b111011 & Rd_VPR64.2S & vRn_VPR64 & Rn_VPR64 & vRm_VPR64 & Rm_VPR64 & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rn_VPR64, 0, 4, 8); - TMPS1 = * [register]:4 tmp1; - local tmp3:4 = 0; - simd_address_at(tmp3, Rm_VPR64, 0, 4, 8); - TMPS2 = * [register]:4 tmp3; + TMPS1 = Rn_VPR64[0,32]; + TMPS2 = Rm_VPR64[0,32]; # simd infix TMPS3 = TMPS1 f* TMPS2 on lane size 2 - local tmp6:4 = 0; - local tmp7:4 = 0; - local tmp8:4 = 0; - simd_address_at(tmp6, TMPS1, 0, 2, 4); - simd_address_at(tmp7, TMPS2, 0, 2, 4); - simd_address_at(tmp8, TMPS3, 0, 2, 4); - * [register]:2 tmp8 = (* [register]:2 tmp6) f* (* [register]:2 tmp7); - simd_address_at(tmp6, TMPS1, 1, 2, 4); - simd_address_at(tmp7, TMPS2, 1, 2, 4); - simd_address_at(tmp8, TMPS3, 1, 2, 4); - * [register]:2 tmp8 = (* [register]:2 tmp6) f* (* [register]:2 tmp7); + TMPS3[0,16] = TMPS1[0,16] f* TMPS2[0,16]; + TMPS3[16,16] = TMPS1[16,16] f* TMPS2[16,16]; # simd resize TMPD4 = float2float(TMPS3) (lane size 2 to 4) - local tmp10:4 = 0; - local tmp11:4 = 0; - simd_address_at(tmp10, TMPS3, 0, 2, 4); - simd_address_at(tmp11, TMPD4, 0, 4, 8); - * [register]:4 tmp11 = float2float(* [register]:2 tmp10); - simd_address_at(tmp10, TMPS3, 1, 2, 4); - simd_address_at(tmp11, TMPD4, 1, 4, 8); - * [register]:4 tmp11 = float2float(* [register]:2 tmp10); + TMPD4[0,32] = float2float(TMPS3[0,16]); + TMPD4[32,32] = float2float(TMPS3[16,16]); # simd infix Rd_VPR64.2S = Rd_VPR64.2S + TMPD4 on lane size 4 - local tmp12:4 = 0; - local tmp13:4 = 0; - local tmp14:4 = 0; - simd_address_at(tmp12, Rd_VPR64.2S, 0, 4, 8); - simd_address_at(tmp13, TMPD4, 0, 4, 8); - simd_address_at(tmp14, Rd_VPR64.2S, 0, 4, 8); - * [register]:4 tmp14 = (* [register]:4 tmp12) + (* [register]:4 tmp13); - simd_address_at(tmp12, Rd_VPR64.2S, 1, 4, 8); - simd_address_at(tmp13, TMPD4, 1, 4, 8); - simd_address_at(tmp14, Rd_VPR64.2S, 1, 4, 8); - * [register]:4 tmp14 = (* [register]:4 tmp12) + (* [register]:4 tmp13); + Rd_VPR64.2S[0,32] = Rd_VPR64.2S[0,32] + TMPD4[0,32]; + Rd_VPR64.2S[32,32] = Rd_VPR64.2S[32,32] + TMPD4[32,32]; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:4 = SIMD_PIECE(Rn_VPR64, 0:1); - local tmp2:4 = SIMD_PIECE(Rm_VPR64, 0:1); - local tmp3:4 = SIMD_FLOAT_MULT(tmp1, tmp2, 2:1); - local tmp4:8 = SIMD_FLOAT2FLOAT(tmp3, 2:1); - local tmpd:8 = SIMD_INT_ADD(Rd_VPR64.2S, tmp4, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.2S -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.2S = NEON_fmlal(Rd_VPR64.2S, Rn_VPR64, Rm_VPR64, 2:1); -@endif } # C7.2.124 FMLAL, FMLAL2 (vector) page C7-1670 line 93272 MATCH x0e20ec00/mask=xbfe0fc00 @@ -12050,79 +7459,24 @@ is b_31=0 & b_30=0 & b_2329=0b0011100 & b_22=0 & b_21=1 & b_1015=0b111011 & Rd_V :fmlal Rd_VPR128.4S, vRn_VPR128^".4H", Rm_VPR64.4H is b_31=0 & b_30=1 & b_2329=0b0011100 & b_22=0 & b_21=1 & b_1015=0b111011 & Rd_VPR128.4S & vRn_VPR128 & Rn_VPR128 & Rm_VPR64.4H & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rn_VPR128, 0, 8, 16); - TMPD1 = * [register]:8 tmp1; - local tmp3:4 = 0; - simd_address_at(tmp3, Rm_VPR64.4H, 0, 8, 8); - TMPD2 = * [register]:8 tmp3; + TMPD1 = Rn_VPR128[0,64]; + TMPD2 = Rm_VPR64.4H[0,64]; # simd infix TMPD3 = TMPD1 f* TMPD2 on lane size 2 - local tmp6:4 = 0; - local tmp7:4 = 0; - local tmp8:4 = 0; - simd_address_at(tmp6, TMPD1, 0, 2, 8); - simd_address_at(tmp7, TMPD2, 0, 2, 8); - simd_address_at(tmp8, TMPD3, 0, 2, 8); - * [register]:2 tmp8 = (* [register]:2 tmp6) f* (* [register]:2 tmp7); - simd_address_at(tmp6, TMPD1, 1, 2, 8); - simd_address_at(tmp7, TMPD2, 1, 2, 8); - simd_address_at(tmp8, TMPD3, 1, 2, 8); - * [register]:2 tmp8 = (* [register]:2 tmp6) f* (* [register]:2 tmp7); - simd_address_at(tmp6, TMPD1, 2, 2, 8); - simd_address_at(tmp7, TMPD2, 2, 2, 8); - simd_address_at(tmp8, TMPD3, 2, 2, 8); - * [register]:2 tmp8 = (* [register]:2 tmp6) f* (* [register]:2 tmp7); - simd_address_at(tmp6, TMPD1, 3, 2, 8); - simd_address_at(tmp7, TMPD2, 3, 2, 8); - simd_address_at(tmp8, TMPD3, 3, 2, 8); - * [register]:2 tmp8 = (* [register]:2 tmp6) f* (* [register]:2 tmp7); + TMPD3[0,16] = TMPD1[0,16] f* TMPD2[0,16]; + TMPD3[16,16] = TMPD1[16,16] f* TMPD2[16,16]; + TMPD3[32,16] = TMPD1[32,16] f* TMPD2[32,16]; + TMPD3[48,16] = TMPD1[48,16] f* TMPD2[48,16]; # simd resize TMPQ4 = float2float(TMPD3) (lane size 2 to 4) - local tmp10:4 = 0; - local tmp11:4 = 0; - simd_address_at(tmp10, TMPD3, 0, 2, 8); - simd_address_at(tmp11, TMPQ4, 0, 4, 16); - * [register]:4 tmp11 = float2float(* [register]:2 tmp10); - simd_address_at(tmp10, TMPD3, 1, 2, 8); - simd_address_at(tmp11, TMPQ4, 1, 4, 16); - * [register]:4 tmp11 = float2float(* [register]:2 tmp10); - simd_address_at(tmp10, TMPD3, 2, 2, 8); - simd_address_at(tmp11, TMPQ4, 2, 4, 16); - * [register]:4 tmp11 = float2float(* [register]:2 tmp10); - simd_address_at(tmp10, TMPD3, 3, 2, 8); - simd_address_at(tmp11, TMPQ4, 3, 4, 16); - * [register]:4 tmp11 = float2float(* [register]:2 tmp10); + TMPQ4[0,32] = float2float(TMPD3[0,16]); + TMPQ4[32,32] = float2float(TMPD3[16,16]); + TMPQ4[64,32] = float2float(TMPD3[32,16]); + TMPQ4[96,32] = float2float(TMPD3[48,16]); # simd infix Rd_VPR128.4S = Rd_VPR128.4S + TMPQ4 on lane size 4 - local tmp12:4 = 0; - local tmp13:4 = 0; - local tmp14:4 = 0; - simd_address_at(tmp12, Rd_VPR128.4S, 0, 4, 16); - simd_address_at(tmp13, TMPQ4, 0, 4, 16); - simd_address_at(tmp14, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp14 = (* [register]:4 tmp12) + (* [register]:4 tmp13); - simd_address_at(tmp12, Rd_VPR128.4S, 1, 4, 16); - simd_address_at(tmp13, TMPQ4, 1, 4, 16); - simd_address_at(tmp14, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp14 = (* [register]:4 tmp12) + (* [register]:4 tmp13); - simd_address_at(tmp12, Rd_VPR128.4S, 2, 4, 16); - simd_address_at(tmp13, TMPQ4, 2, 4, 16); - simd_address_at(tmp14, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp14 = (* [register]:4 tmp12) + (* [register]:4 tmp13); - simd_address_at(tmp12, Rd_VPR128.4S, 3, 4, 16); - simd_address_at(tmp13, TMPQ4, 3, 4, 16); - simd_address_at(tmp14, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp14 = (* [register]:4 tmp12) + (* [register]:4 tmp13); + Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] + TMPQ4[0,32]; + Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] + TMPQ4[32,32]; + Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] + TMPQ4[64,32]; + Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] + TMPQ4[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Rn_VPR128, 0:1); - local tmp2:8 = SIMD_PIECE(Rm_VPR64.4H, 0:1); - local tmp3:8 = SIMD_FLOAT_MULT(tmp1, tmp2, 2:1); - local tmp4:16 = SIMD_FLOAT2FLOAT(tmp3, 2:1); - local tmpd:16 = SIMD_INT_ADD(Rd_VPR128.4S, tmp4, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_fmlal(Rd_VPR128.4S, Rn_VPR128, Rm_VPR64.4H, 2:1); -@endif } # C7.2.124 FMLAL, FMLAL2 (vector) page C7-1670 line 93272 MATCH x2e20cc00/mask=xbfe0fc00 @@ -12135,62 +7489,23 @@ is b_31=0 & b_30=1 & b_2329=0b0011100 & b_22=0 & b_21=1 & b_1015=0b111011 & Rd_V :fmlal2 Rd_VPR64.2S, vRn_VPR64^".2H", vRm_VPR128^".2H" is b_31=0 & b_30=0 & b_2329=0b1011100 & b_22=0 & b_21=1 & b_1015=0b110011 & Rd_VPR64.2S & vRn_VPR64 & Rn_VPR64 & vRm_VPR128 & Rm_VPR128 & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rn_VPR64, 1, 4, 8); - TMPS1 = * [register]:4 tmp1; - local tmp3:4 = 0; - simd_address_at(tmp3, Rm_VPR128, 1, 4, 16); - TMPS2 = * [register]:4 tmp3; + TMPS1 = Rn_VPR64[32,32]; + TMPS2 = Rm_VPR128[32,32]; # simd infix TMPS3 = TMPS1 f* TMPS2 on lane size 2 - local tmp6:4 = 0; - local tmp7:4 = 0; - local tmp8:4 = 0; - simd_address_at(tmp6, TMPS1, 0, 2, 4); - simd_address_at(tmp7, TMPS2, 0, 2, 4); - simd_address_at(tmp8, TMPS3, 0, 2, 4); - * [register]:2 tmp8 = (* [register]:2 tmp6) f* (* [register]:2 tmp7); - simd_address_at(tmp6, TMPS1, 1, 2, 4); - simd_address_at(tmp7, TMPS2, 1, 2, 4); - simd_address_at(tmp8, TMPS3, 1, 2, 4); - * [register]:2 tmp8 = (* [register]:2 tmp6) f* (* [register]:2 tmp7); + TMPS3[0,16] = TMPS1[0,16] f* TMPS2[0,16]; + TMPS3[16,16] = TMPS1[16,16] f* TMPS2[16,16]; # simd resize TMPD4 = float2float(TMPS3) (lane size 2 to 4) - local tmp10:4 = 0; - local tmp11:4 = 0; - simd_address_at(tmp10, TMPS3, 0, 2, 4); - simd_address_at(tmp11, TMPD4, 0, 4, 8); - * [register]:4 tmp11 = float2float(* [register]:2 tmp10); - simd_address_at(tmp10, TMPS3, 1, 2, 4); - simd_address_at(tmp11, TMPD4, 1, 4, 8); - * [register]:4 tmp11 = float2float(* [register]:2 tmp10); + TMPD4[0,32] = float2float(TMPS3[0,16]); + TMPD4[32,32] = float2float(TMPS3[16,16]); # simd infix Rd_VPR64.2S = Rd_VPR64.2S + TMPD4 on lane size 4 - local tmp12:4 = 0; - local tmp13:4 = 0; - local tmp14:4 = 0; - simd_address_at(tmp12, Rd_VPR64.2S, 0, 4, 8); - simd_address_at(tmp13, TMPD4, 0, 4, 8); - simd_address_at(tmp14, Rd_VPR64.2S, 0, 4, 8); - * [register]:4 tmp14 = (* [register]:4 tmp12) + (* [register]:4 tmp13); - simd_address_at(tmp12, Rd_VPR64.2S, 1, 4, 8); - simd_address_at(tmp13, TMPD4, 1, 4, 8); - simd_address_at(tmp14, Rd_VPR64.2S, 1, 4, 8); - * [register]:4 tmp14 = (* [register]:4 tmp12) + (* [register]:4 tmp13); + Rd_VPR64.2S[0,32] = Rd_VPR64.2S[0,32] + TMPD4[0,32]; + Rd_VPR64.2S[32,32] = Rd_VPR64.2S[32,32] + TMPD4[32,32]; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:4 = SIMD_PIECE(Rn_VPR64, 1:1); - local tmp2:4 = SIMD_PIECE(Rm_VPR128, 1:1); - local tmp3:4 = SIMD_FLOAT_MULT(tmp1, tmp2, 2:1); - local tmp4:8 = SIMD_FLOAT2FLOAT(tmp3, 2:1); - local tmpd:8 = SIMD_INT_ADD(Rd_VPR64.2S, tmp4, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.2S -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.2S = NEON_fmlal2(Rd_VPR64.2S, Rn_VPR64, Rm_VPR128, 2:1); -@endif } # C7.2.124 FMLAL, FMLAL2 (vector) page C7-1670 line 93272 MATCH x2e20cc00/mask=xbfe0fc00 # CONSTRUCT x6e20cc00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES -# SMACRO ARG1 ARG2[1]:8 ARG3[1]:8 $f*@2 $float2float@2:16 &=$+@4 +# SMACRO ARG1 ARG2[1]:8 ARG3 $f*@2 $float2float@2:16 &=$+@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_fmlal2/3@2 # AUNIT --inst x6e20cc00/mask=xffe0fc00 --rand sfp --status noqemu --comment "ext nofpround" # SIMD 4S when Q = 1 @@ -12198,79 +7513,23 @@ is b_31=0 & b_30=0 & b_2329=0b1011100 & b_22=0 & b_21=1 & b_1015=0b110011 & Rd_V :fmlal2 Rd_VPR128.4S, vRn_VPR128^".4H", Rm_VPR64.4H is b_31=0 & b_30=1 & b_2329=0b1011100 & b_22=0 & b_21=1 & b_1015=0b110011 & Rd_VPR128.4S & vRn_VPR128 & Rn_VPR128 & Rm_VPR64.4H & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rn_VPR128, 1, 8, 16); - TMPD1 = * [register]:8 tmp1; - local tmp3:4 = 0; - simd_address_at(tmp3, Rm_VPR64.4H, 1, 8, 8); - TMPD2 = * [register]:8 tmp3; - # simd infix TMPD3 = TMPD1 f* TMPD2 on lane size 2 - local tmp6:4 = 0; - local tmp7:4 = 0; - local tmp8:4 = 0; - simd_address_at(tmp6, TMPD1, 0, 2, 8); - simd_address_at(tmp7, TMPD2, 0, 2, 8); - simd_address_at(tmp8, TMPD3, 0, 2, 8); - * [register]:2 tmp8 = (* [register]:2 tmp6) f* (* [register]:2 tmp7); - simd_address_at(tmp6, TMPD1, 1, 2, 8); - simd_address_at(tmp7, TMPD2, 1, 2, 8); - simd_address_at(tmp8, TMPD3, 1, 2, 8); - * [register]:2 tmp8 = (* [register]:2 tmp6) f* (* [register]:2 tmp7); - simd_address_at(tmp6, TMPD1, 2, 2, 8); - simd_address_at(tmp7, TMPD2, 2, 2, 8); - simd_address_at(tmp8, TMPD3, 2, 2, 8); - * [register]:2 tmp8 = (* [register]:2 tmp6) f* (* [register]:2 tmp7); - simd_address_at(tmp6, TMPD1, 3, 2, 8); - simd_address_at(tmp7, TMPD2, 3, 2, 8); - simd_address_at(tmp8, TMPD3, 3, 2, 8); - * [register]:2 tmp8 = (* [register]:2 tmp6) f* (* [register]:2 tmp7); - # simd resize TMPQ4 = float2float(TMPD3) (lane size 2 to 4) - local tmp10:4 = 0; - local tmp11:4 = 0; - simd_address_at(tmp10, TMPD3, 0, 2, 8); - simd_address_at(tmp11, TMPQ4, 0, 4, 16); - * [register]:4 tmp11 = float2float(* [register]:2 tmp10); - simd_address_at(tmp10, TMPD3, 1, 2, 8); - simd_address_at(tmp11, TMPQ4, 1, 4, 16); - * [register]:4 tmp11 = float2float(* [register]:2 tmp10); - simd_address_at(tmp10, TMPD3, 2, 2, 8); - simd_address_at(tmp11, TMPQ4, 2, 4, 16); - * [register]:4 tmp11 = float2float(* [register]:2 tmp10); - simd_address_at(tmp10, TMPD3, 3, 2, 8); - simd_address_at(tmp11, TMPQ4, 3, 4, 16); - * [register]:4 tmp11 = float2float(* [register]:2 tmp10); - # simd infix Rd_VPR128.4S = Rd_VPR128.4S + TMPQ4 on lane size 4 - local tmp12:4 = 0; - local tmp13:4 = 0; - local tmp14:4 = 0; - simd_address_at(tmp12, Rd_VPR128.4S, 0, 4, 16); - simd_address_at(tmp13, TMPQ4, 0, 4, 16); - simd_address_at(tmp14, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp14 = (* [register]:4 tmp12) + (* [register]:4 tmp13); - simd_address_at(tmp12, Rd_VPR128.4S, 1, 4, 16); - simd_address_at(tmp13, TMPQ4, 1, 4, 16); - simd_address_at(tmp14, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp14 = (* [register]:4 tmp12) + (* [register]:4 tmp13); - simd_address_at(tmp12, Rd_VPR128.4S, 2, 4, 16); - simd_address_at(tmp13, TMPQ4, 2, 4, 16); - simd_address_at(tmp14, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp14 = (* [register]:4 tmp12) + (* [register]:4 tmp13); - simd_address_at(tmp12, Rd_VPR128.4S, 3, 4, 16); - simd_address_at(tmp13, TMPQ4, 3, 4, 16); - simd_address_at(tmp14, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp14 = (* [register]:4 tmp12) + (* [register]:4 tmp13); + TMPD1 = Rn_VPR128[64,64]; + # simd infix TMPD2 = TMPD1 f* Rm_VPR64.4H on lane size 2 + TMPD2[0,16] = TMPD1[0,16] f* Rm_VPR64.4H[0,16]; + TMPD2[16,16] = TMPD1[16,16] f* Rm_VPR64.4H[16,16]; + TMPD2[32,16] = TMPD1[32,16] f* Rm_VPR64.4H[32,16]; + TMPD2[48,16] = TMPD1[48,16] f* Rm_VPR64.4H[48,16]; + # simd resize TMPQ3 = float2float(TMPD2) (lane size 2 to 4) + TMPQ3[0,32] = float2float(TMPD2[0,16]); + TMPQ3[32,32] = float2float(TMPD2[16,16]); + TMPQ3[64,32] = float2float(TMPD2[32,16]); + TMPQ3[96,32] = float2float(TMPD2[48,16]); + # simd infix Rd_VPR128.4S = Rd_VPR128.4S + TMPQ3 on lane size 4 + Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] + TMPQ3[0,32]; + Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] + TMPQ3[32,32]; + Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] + TMPQ3[64,32]; + Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] + TMPQ3[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Rn_VPR128, 1:1); - local tmp2:8 = SIMD_PIECE(Rm_VPR64.4H, 1:1); - local tmp3:8 = SIMD_FLOAT_MULT(tmp1, tmp2, 2:1); - local tmp4:16 = SIMD_FLOAT2FLOAT(tmp3, 2:1); - local tmpd:16 = SIMD_INT_ADD(Rd_VPR128.4S, tmp4, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_fmlal2(Rd_VPR128.4S, Rn_VPR128, Rm_VPR64.4H, 2:1); -@endif } # C7.2.125 FMLS (by element) page C7-1672 line 93397 MATCH x0f805000/mask=xbf80f400 @@ -12282,42 +7541,15 @@ is b_31=0 & b_30=1 & b_2329=0b1011100 & b_22=0 & b_21=1 & b_1015=0b110011 & Rd_V :fmls Rd_VPR128.2D, Rn_VPR128.2D, Re_VPR128.D.vIndex is b_3131=0 & q=1 & u=0 & b_2428=0xf & advSIMD3.size=3 & b_2121=0 & Re_VPR128.D.vIndex & vIndex & Re_VPR128.D & b_1215=0x5 & b_1010=0 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; # simd element Re_VPR128.D[vIndex] lane size 8 - simd_address_at(tmp1, Re_VPR128.D, vIndex:4, 8, 16); - local tmp2:8 = * [register]:8 tmp1; - # simd infix TMPQ1 = Rn_VPR128.2D f* tmp2 on lane size 8 - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, Rn_VPR128.2D, 0, 8, 16); - simd_address_at(tmp5, TMPQ1, 0, 8, 16); - * [register]:8 tmp5 = (* [register]:8 tmp4) f* tmp2; - simd_address_at(tmp4, Rn_VPR128.2D, 1, 8, 16); - simd_address_at(tmp5, TMPQ1, 1, 8, 16); - * [register]:8 tmp5 = (* [register]:8 tmp4) f* tmp2; + local tmp1:8 = Re_VPR128.D.vIndex; + # simd infix TMPQ1 = Rn_VPR128.2D f* tmp1 on lane size 8 + TMPQ1[0,64] = Rn_VPR128.2D[0,64] f* tmp1; + TMPQ1[64,64] = Rn_VPR128.2D[64,64] f* tmp1; # simd infix Rd_VPR128.2D = Rd_VPR128.2D f- TMPQ1 on lane size 8 - local tmp6:4 = 0; - local tmp7:4 = 0; - local tmp8:4 = 0; - simd_address_at(tmp6, Rd_VPR128.2D, 0, 8, 16); - simd_address_at(tmp7, TMPQ1, 0, 8, 16); - simd_address_at(tmp8, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp8 = (* [register]:8 tmp6) f- (* [register]:8 tmp7); - simd_address_at(tmp6, Rd_VPR128.2D, 1, 8, 16); - simd_address_at(tmp7, TMPQ1, 1, 8, 16); - simd_address_at(tmp8, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp8 = (* [register]:8 tmp6) f- (* [register]:8 tmp7); + Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] f- TMPQ1[0,64]; + Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] f- TMPQ1[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Re_VPR128.D, vIndex:1); - local tmp2:16 = SIMD_FLOAT_MULT(Rn_VPR128.2D, tmp1); - local tmpd:16 = SIMD_FLOAT_SUB(Rd_VPR128.2D, tmp2, 8:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - local tmp1:8 = SIMD_PIECE(Re_VPR128.D, vIndex:1); - Rd_VPR128.2D = NEON_fmls(Rd_VPR128.2D, Rn_VPR128.2D, tmp1, 8:1); -@endif } # C7.2.125 FMLS (by element) page C7-1672 line 93397 MATCH x0f805000/mask=xbf80f400 @@ -12329,42 +7561,15 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xf & advSIMD3.size=3 & b_2121=0 & Re_VPR128.D. :fmls Rd_VPR64.2S, Rn_VPR64.2S, Re_VPR128.S.vIndex is b_3131=0 & q=0 & u=0 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & b_1215=0x5 & b_1010=0 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; # simd element Re_VPR128.S[vIndex] lane size 4 - simd_address_at(tmp1, Re_VPR128.S, vIndex:4, 4, 16); - local tmp2:4 = * [register]:4 tmp1; - # simd infix TMPD1 = Rn_VPR64.2S f* tmp2 on lane size 4 - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, Rn_VPR64.2S, 0, 4, 8); - simd_address_at(tmp5, TMPD1, 0, 4, 8); - * [register]:4 tmp5 = (* [register]:4 tmp4) f* tmp2; - simd_address_at(tmp4, Rn_VPR64.2S, 1, 4, 8); - simd_address_at(tmp5, TMPD1, 1, 4, 8); - * [register]:4 tmp5 = (* [register]:4 tmp4) f* tmp2; + local tmp1:4 = Re_VPR128.S.vIndex; + # simd infix TMPD1 = Rn_VPR64.2S f* tmp1 on lane size 4 + TMPD1[0,32] = Rn_VPR64.2S[0,32] f* tmp1; + TMPD1[32,32] = Rn_VPR64.2S[32,32] f* tmp1; # simd infix Rd_VPR64.2S = Rd_VPR64.2S f- TMPD1 on lane size 4 - local tmp6:4 = 0; - local tmp7:4 = 0; - local tmp8:4 = 0; - simd_address_at(tmp6, Rd_VPR64.2S, 0, 4, 8); - simd_address_at(tmp7, TMPD1, 0, 4, 8); - simd_address_at(tmp8, Rd_VPR64.2S, 0, 4, 8); - * [register]:4 tmp8 = (* [register]:4 tmp6) f- (* [register]:4 tmp7); - simd_address_at(tmp6, Rd_VPR64.2S, 1, 4, 8); - simd_address_at(tmp7, TMPD1, 1, 4, 8); - simd_address_at(tmp8, Rd_VPR64.2S, 1, 4, 8); - * [register]:4 tmp8 = (* [register]:4 tmp6) f- (* [register]:4 tmp7); + Rd_VPR64.2S[0,32] = Rd_VPR64.2S[0,32] f- TMPD1[0,32]; + Rd_VPR64.2S[32,32] = Rd_VPR64.2S[32,32] f- TMPD1[32,32]; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:4 = SIMD_PIECE(Re_VPR128.S, vIndex:1); - local tmp2:8 = SIMD_FLOAT_MULT(Rn_VPR64.2S, tmp1); - local tmpd:8 = SIMD_FLOAT_SUB(Rd_VPR64.2S, tmp2, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.2S -@elif defined(SEMANTIC_pseudo) - local tmp1:4 = SIMD_PIECE(Re_VPR128.S, vIndex:1); - Rd_VPR64.2S = NEON_fmls(Rd_VPR64.2S, Rn_VPR64.2S, tmp1, 4:1); -@endif } # C7.2.125 FMLS (by element) page C7-1672 line 93397 MATCH x0f805000/mask=xbf80f400 @@ -12376,56 +7581,19 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re :fmls Rd_VPR128.4S, Rn_VPR128.4S, Re_VPR128.S.vIndex is b_3131=0 & q=1 & u=0 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & b_1215=0x5 & b_1010=0 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; # simd element Re_VPR128.S[vIndex] lane size 4 - simd_address_at(tmp1, Re_VPR128.S, vIndex:4, 4, 16); - local tmp2:4 = * [register]:4 tmp1; - # simd infix TMPQ1 = Rn_VPR128.4S f* tmp2 on lane size 4 - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, Rn_VPR128.4S, 0, 4, 16); - simd_address_at(tmp5, TMPQ1, 0, 4, 16); - * [register]:4 tmp5 = (* [register]:4 tmp4) f* tmp2; - simd_address_at(tmp4, Rn_VPR128.4S, 1, 4, 16); - simd_address_at(tmp5, TMPQ1, 1, 4, 16); - * [register]:4 tmp5 = (* [register]:4 tmp4) f* tmp2; - simd_address_at(tmp4, Rn_VPR128.4S, 2, 4, 16); - simd_address_at(tmp5, TMPQ1, 2, 4, 16); - * [register]:4 tmp5 = (* [register]:4 tmp4) f* tmp2; - simd_address_at(tmp4, Rn_VPR128.4S, 3, 4, 16); - simd_address_at(tmp5, TMPQ1, 3, 4, 16); - * [register]:4 tmp5 = (* [register]:4 tmp4) f* tmp2; + local tmp1:4 = Re_VPR128.S.vIndex; + # simd infix TMPQ1 = Rn_VPR128.4S f* tmp1 on lane size 4 + TMPQ1[0,32] = Rn_VPR128.4S[0,32] f* tmp1; + TMPQ1[32,32] = Rn_VPR128.4S[32,32] f* tmp1; + TMPQ1[64,32] = Rn_VPR128.4S[64,32] f* tmp1; + TMPQ1[96,32] = Rn_VPR128.4S[96,32] f* tmp1; # simd infix Rd_VPR128.4S = Rd_VPR128.4S f- TMPQ1 on lane size 4 - local tmp6:4 = 0; - local tmp7:4 = 0; - local tmp8:4 = 0; - simd_address_at(tmp6, Rd_VPR128.4S, 0, 4, 16); - simd_address_at(tmp7, TMPQ1, 0, 4, 16); - simd_address_at(tmp8, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp8 = (* [register]:4 tmp6) f- (* [register]:4 tmp7); - simd_address_at(tmp6, Rd_VPR128.4S, 1, 4, 16); - simd_address_at(tmp7, TMPQ1, 1, 4, 16); - simd_address_at(tmp8, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp8 = (* [register]:4 tmp6) f- (* [register]:4 tmp7); - simd_address_at(tmp6, Rd_VPR128.4S, 2, 4, 16); - simd_address_at(tmp7, TMPQ1, 2, 4, 16); - simd_address_at(tmp8, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp8 = (* [register]:4 tmp6) f- (* [register]:4 tmp7); - simd_address_at(tmp6, Rd_VPR128.4S, 3, 4, 16); - simd_address_at(tmp7, TMPQ1, 3, 4, 16); - simd_address_at(tmp8, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp8 = (* [register]:4 tmp6) f- (* [register]:4 tmp7); + Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] f- TMPQ1[0,32]; + Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] f- TMPQ1[32,32]; + Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] f- TMPQ1[64,32]; + Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] f- TMPQ1[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:4 = SIMD_PIECE(Re_VPR128.S, vIndex:1); - local tmp2:16 = SIMD_FLOAT_MULT(Rn_VPR128.4S, tmp1); - local tmpd:16 = SIMD_FLOAT_SUB(Rd_VPR128.4S, tmp2, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - local tmp1:4 = SIMD_PIECE(Re_VPR128.S, vIndex:1); - Rd_VPR128.4S = NEON_fmls(Rd_VPR128.4S, Rn_VPR128.4S, tmp1, 4:1); -@endif } # C7.2.125 FMLS (by element) page C7-1672 line 93397 MATCH x5f005000/mask=xffc0f400 @@ -12438,23 +7606,11 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re :fmls Rd_FPR16, Rn_FPR16, Re_VPR128Lo.H.vIndexHLM is b_2231=0b0101111100 & b_1215=0b0101 & b_10=0 & Re_VPR128Lo.H & vIndexHLM & Rd_FPR16 & Rn_FPR16 & Re_VPR128Lo.H.vIndexHLM & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 - simd_address_at(tmp1, Re_VPR128Lo.H, vIndexHLM:4, 2, 16); - local tmp2:2 = * [register]:2 tmp1; - local tmp3:2 = Rn_FPR16 f* tmp2; - Rd_FPR16 = Rd_FPR16 f- tmp3; - zext_zh(Zd); # zero upper 30 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:2 = SIMD_PIECE(Re_VPR128Lo.H, vIndexHLM:1); + local tmp1:2 = Re_VPR128Lo.H.vIndexHLM; local tmp2:2 = Rn_FPR16 f* tmp1; - local tmpd:2 = Rd_FPR16 f- tmp2; - Zd = zext(tmpd); # assigning to Rd_FPR16 -@elif defined(SEMANTIC_pseudo) - local tmp1:2 = SIMD_PIECE(Re_VPR128Lo.H, vIndexHLM:1); - Rd_FPR16 = NEON_fmls(Rd_FPR16, Rn_FPR16, tmp1, 2:1); -@endif + Rd_FPR16 = Rd_FPR16 f- tmp2; + zext_zh(Zd); # zero upper 30 bytes of Zd } # C7.2.125 FMLS (by element) page C7-1672 line 93397 MATCH x5f805000/mask=xff80f400 @@ -12467,23 +7623,11 @@ is b_2231=0b0101111100 & b_1215=0b0101 & b_10=0 & Re_VPR128Lo.H & vIndexHLM & Rd :fmls Rd_FPR32, Rn_FPR32, Re_VPR128.S.vIndex is b_2331=0b010111111 & b_22=0 & b_1215=0b0101 & b_10=0 & Re_VPR128.S & vIndex & Rd_FPR32 & Rn_FPR32 & Re_VPR128.S.vIndex & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; # simd element Re_VPR128.S[vIndex] lane size 4 - simd_address_at(tmp1, Re_VPR128.S, vIndex:4, 4, 16); - local tmp2:4 = * [register]:4 tmp1; - local tmp3:4 = Rn_FPR32 f* tmp2; - Rd_FPR32 = Rd_FPR32 f- tmp3; - zext_zs(Zd); # zero upper 28 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:4 = SIMD_PIECE(Re_VPR128.S, vIndex:1); + local tmp1:4 = Re_VPR128.S.vIndex; local tmp2:4 = Rn_FPR32 f* tmp1; - local tmpd:4 = Rd_FPR32 f- tmp2; - Zd = zext(tmpd); # assigning to Rd_FPR32 -@elif defined(SEMANTIC_pseudo) - local tmp1:4 = SIMD_PIECE(Re_VPR128.S, vIndex:1); - Rd_FPR32 = NEON_fmls(Rd_FPR32, Rn_FPR32, tmp1, 4:1); -@endif + Rd_FPR32 = Rd_FPR32 f- tmp2; + zext_zs(Zd); # zero upper 28 bytes of Zd } # C7.2.125 FMLS (by element) page C7-1672 line 93397 MATCH x5f805000/mask=xff80f400 @@ -12496,23 +7640,11 @@ is b_2331=0b010111111 & b_22=0 & b_1215=0b0101 & b_10=0 & Re_VPR128.S & vIndex & :fmls Rd_FPR64, Rn_FPR64, Re_VPR128.D.vIndex is b_2331=0b010111111 & b_22=1 & b_21=0 & b_1215=0b0101 & b_10=0 & Re_VPR128.D & vIndex & Rd_FPR64 & Rn_FPR64 & Re_VPR128.D.vIndex & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; # simd element Re_VPR128.D[vIndex] lane size 8 - simd_address_at(tmp1, Re_VPR128.D, vIndex:4, 8, 16); - local tmp2:8 = * [register]:8 tmp1; - local tmp3:8 = Rn_FPR64 f* tmp2; - Rd_FPR64 = Rd_FPR64 f- tmp3; - zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Re_VPR128.D, vIndex:1); + local tmp1:8 = Re_VPR128.D.vIndex; local tmp2:8 = Rn_FPR64 f* tmp1; - local tmpd:8 = Rd_FPR64 f- tmp2; - Zd = zext(tmpd); # assigning to Rd_FPR64 -@elif defined(SEMANTIC_pseudo) - local tmp1:8 = SIMD_PIECE(Re_VPR128.D, vIndex:1); - Rd_FPR64 = NEON_fmls(Rd_FPR64, Rn_FPR64, tmp1, 8:1); -@endif + Rd_FPR64 = Rd_FPR64 f- tmp2; + zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.125 FMLS (by element) page C7-1672 line 93397 MATCH x0f005000/mask=xbfc0f400 @@ -12525,56 +7657,19 @@ is b_2331=0b010111111 & b_22=1 & b_21=0 & b_1215=0b0101 & b_10=0 & Re_VPR128.D & :fmls Rd_VPR64.4H, Rn_VPR64.4H, Re_VPR128Lo.H.vIndexHLM is b_31=0 & b_30=0 & b_2229=0b00111100 & b_1215=0b0101 & b_10=0 & Re_VPR128Lo.H & vIndexHLM & Rd_VPR64.4H & Rn_VPR64.4H & Re_VPR128Lo.H.vIndexHLM & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 - simd_address_at(tmp1, Re_VPR128Lo.H, vIndexHLM:4, 2, 16); - local tmp2:2 = * [register]:2 tmp1; - # simd infix TMPD1 = Rn_VPR64.4H f* tmp2 on lane size 2 - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, Rn_VPR64.4H, 0, 2, 8); - simd_address_at(tmp5, TMPD1, 0, 2, 8); - * [register]:2 tmp5 = (* [register]:2 tmp4) f* tmp2; - simd_address_at(tmp4, Rn_VPR64.4H, 1, 2, 8); - simd_address_at(tmp5, TMPD1, 1, 2, 8); - * [register]:2 tmp5 = (* [register]:2 tmp4) f* tmp2; - simd_address_at(tmp4, Rn_VPR64.4H, 2, 2, 8); - simd_address_at(tmp5, TMPD1, 2, 2, 8); - * [register]:2 tmp5 = (* [register]:2 tmp4) f* tmp2; - simd_address_at(tmp4, Rn_VPR64.4H, 3, 2, 8); - simd_address_at(tmp5, TMPD1, 3, 2, 8); - * [register]:2 tmp5 = (* [register]:2 tmp4) f* tmp2; + local tmp1:2 = Re_VPR128Lo.H.vIndexHLM; + # simd infix TMPD1 = Rn_VPR64.4H f* tmp1 on lane size 2 + TMPD1[0,16] = Rn_VPR64.4H[0,16] f* tmp1; + TMPD1[16,16] = Rn_VPR64.4H[16,16] f* tmp1; + TMPD1[32,16] = Rn_VPR64.4H[32,16] f* tmp1; + TMPD1[48,16] = Rn_VPR64.4H[48,16] f* tmp1; # simd infix Rd_VPR64.4H = Rd_VPR64.4H f- TMPD1 on lane size 2 - local tmp6:4 = 0; - local tmp7:4 = 0; - local tmp8:4 = 0; - simd_address_at(tmp6, Rd_VPR64.4H, 0, 2, 8); - simd_address_at(tmp7, TMPD1, 0, 2, 8); - simd_address_at(tmp8, Rd_VPR64.4H, 0, 2, 8); - * [register]:2 tmp8 = (* [register]:2 tmp6) f- (* [register]:2 tmp7); - simd_address_at(tmp6, Rd_VPR64.4H, 1, 2, 8); - simd_address_at(tmp7, TMPD1, 1, 2, 8); - simd_address_at(tmp8, Rd_VPR64.4H, 1, 2, 8); - * [register]:2 tmp8 = (* [register]:2 tmp6) f- (* [register]:2 tmp7); - simd_address_at(tmp6, Rd_VPR64.4H, 2, 2, 8); - simd_address_at(tmp7, TMPD1, 2, 2, 8); - simd_address_at(tmp8, Rd_VPR64.4H, 2, 2, 8); - * [register]:2 tmp8 = (* [register]:2 tmp6) f- (* [register]:2 tmp7); - simd_address_at(tmp6, Rd_VPR64.4H, 3, 2, 8); - simd_address_at(tmp7, TMPD1, 3, 2, 8); - simd_address_at(tmp8, Rd_VPR64.4H, 3, 2, 8); - * [register]:2 tmp8 = (* [register]:2 tmp6) f- (* [register]:2 tmp7); + Rd_VPR64.4H[0,16] = Rd_VPR64.4H[0,16] f- TMPD1[0,16]; + Rd_VPR64.4H[16,16] = Rd_VPR64.4H[16,16] f- TMPD1[16,16]; + Rd_VPR64.4H[32,16] = Rd_VPR64.4H[32,16] f- TMPD1[32,16]; + Rd_VPR64.4H[48,16] = Rd_VPR64.4H[48,16] f- TMPD1[48,16]; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:2 = SIMD_PIECE(Re_VPR128Lo.H, vIndexHLM:1); - local tmp2:8 = SIMD_FLOAT_MULT(Rn_VPR64.4H, tmp1, 2:1); - local tmpd:8 = SIMD_FLOAT_SUB(Rd_VPR64.4H, tmp2, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.4H -@elif defined(SEMANTIC_pseudo) - local tmp1:2 = SIMD_PIECE(Re_VPR128Lo.H, vIndexHLM:1); - Rd_VPR64.4H = NEON_fmls(Rd_VPR64.4H, Rn_VPR64.4H, tmp1, 2:1); -@endif } # C7.2.125 FMLS (by element) page C7-1672 line 93397 MATCH x0f005000/mask=xbfc0f400 @@ -12587,84 +7682,27 @@ is b_31=0 & b_30=0 & b_2229=0b00111100 & b_1215=0b0101 & b_10=0 & Re_VPR128Lo.H :fmls Rd_VPR128.8H, Rn_VPR128.8H, Re_VPR128Lo.H.vIndexHLM is b_31=0 & b_30=1 & b_2229=0b00111100 & b_1215=0b0101 & b_10=0 & Re_VPR128Lo.H & vIndexHLM & Rd_VPR128.8H & Rn_VPR128.8H & Re_VPR128Lo.H.vIndexHLM & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 - simd_address_at(tmp1, Re_VPR128Lo.H, vIndexHLM:4, 2, 16); - local tmp2:2 = * [register]:2 tmp1; - # simd infix TMPQ1 = Rn_VPR128.8H f* tmp2 on lane size 2 - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, Rn_VPR128.8H, 0, 2, 16); - simd_address_at(tmp5, TMPQ1, 0, 2, 16); - * [register]:2 tmp5 = (* [register]:2 tmp4) f* tmp2; - simd_address_at(tmp4, Rn_VPR128.8H, 1, 2, 16); - simd_address_at(tmp5, TMPQ1, 1, 2, 16); - * [register]:2 tmp5 = (* [register]:2 tmp4) f* tmp2; - simd_address_at(tmp4, Rn_VPR128.8H, 2, 2, 16); - simd_address_at(tmp5, TMPQ1, 2, 2, 16); - * [register]:2 tmp5 = (* [register]:2 tmp4) f* tmp2; - simd_address_at(tmp4, Rn_VPR128.8H, 3, 2, 16); - simd_address_at(tmp5, TMPQ1, 3, 2, 16); - * [register]:2 tmp5 = (* [register]:2 tmp4) f* tmp2; - simd_address_at(tmp4, Rn_VPR128.8H, 4, 2, 16); - simd_address_at(tmp5, TMPQ1, 4, 2, 16); - * [register]:2 tmp5 = (* [register]:2 tmp4) f* tmp2; - simd_address_at(tmp4, Rn_VPR128.8H, 5, 2, 16); - simd_address_at(tmp5, TMPQ1, 5, 2, 16); - * [register]:2 tmp5 = (* [register]:2 tmp4) f* tmp2; - simd_address_at(tmp4, Rn_VPR128.8H, 6, 2, 16); - simd_address_at(tmp5, TMPQ1, 6, 2, 16); - * [register]:2 tmp5 = (* [register]:2 tmp4) f* tmp2; - simd_address_at(tmp4, Rn_VPR128.8H, 7, 2, 16); - simd_address_at(tmp5, TMPQ1, 7, 2, 16); - * [register]:2 tmp5 = (* [register]:2 tmp4) f* tmp2; + local tmp1:2 = Re_VPR128Lo.H.vIndexHLM; + # simd infix TMPQ1 = Rn_VPR128.8H f* tmp1 on lane size 2 + TMPQ1[0,16] = Rn_VPR128.8H[0,16] f* tmp1; + TMPQ1[16,16] = Rn_VPR128.8H[16,16] f* tmp1; + TMPQ1[32,16] = Rn_VPR128.8H[32,16] f* tmp1; + TMPQ1[48,16] = Rn_VPR128.8H[48,16] f* tmp1; + TMPQ1[64,16] = Rn_VPR128.8H[64,16] f* tmp1; + TMPQ1[80,16] = Rn_VPR128.8H[80,16] f* tmp1; + TMPQ1[96,16] = Rn_VPR128.8H[96,16] f* tmp1; + TMPQ1[112,16] = Rn_VPR128.8H[112,16] f* tmp1; # simd infix Rd_VPR128.8H = Rd_VPR128.8H f- TMPQ1 on lane size 2 - local tmp6:4 = 0; - local tmp7:4 = 0; - local tmp8:4 = 0; - simd_address_at(tmp6, Rd_VPR128.8H, 0, 2, 16); - simd_address_at(tmp7, TMPQ1, 0, 2, 16); - simd_address_at(tmp8, Rd_VPR128.8H, 0, 2, 16); - * [register]:2 tmp8 = (* [register]:2 tmp6) f- (* [register]:2 tmp7); - simd_address_at(tmp6, Rd_VPR128.8H, 1, 2, 16); - simd_address_at(tmp7, TMPQ1, 1, 2, 16); - simd_address_at(tmp8, Rd_VPR128.8H, 1, 2, 16); - * [register]:2 tmp8 = (* [register]:2 tmp6) f- (* [register]:2 tmp7); - simd_address_at(tmp6, Rd_VPR128.8H, 2, 2, 16); - simd_address_at(tmp7, TMPQ1, 2, 2, 16); - simd_address_at(tmp8, Rd_VPR128.8H, 2, 2, 16); - * [register]:2 tmp8 = (* [register]:2 tmp6) f- (* [register]:2 tmp7); - simd_address_at(tmp6, Rd_VPR128.8H, 3, 2, 16); - simd_address_at(tmp7, TMPQ1, 3, 2, 16); - simd_address_at(tmp8, Rd_VPR128.8H, 3, 2, 16); - * [register]:2 tmp8 = (* [register]:2 tmp6) f- (* [register]:2 tmp7); - simd_address_at(tmp6, Rd_VPR128.8H, 4, 2, 16); - simd_address_at(tmp7, TMPQ1, 4, 2, 16); - simd_address_at(tmp8, Rd_VPR128.8H, 4, 2, 16); - * [register]:2 tmp8 = (* [register]:2 tmp6) f- (* [register]:2 tmp7); - simd_address_at(tmp6, Rd_VPR128.8H, 5, 2, 16); - simd_address_at(tmp7, TMPQ1, 5, 2, 16); - simd_address_at(tmp8, Rd_VPR128.8H, 5, 2, 16); - * [register]:2 tmp8 = (* [register]:2 tmp6) f- (* [register]:2 tmp7); - simd_address_at(tmp6, Rd_VPR128.8H, 6, 2, 16); - simd_address_at(tmp7, TMPQ1, 6, 2, 16); - simd_address_at(tmp8, Rd_VPR128.8H, 6, 2, 16); - * [register]:2 tmp8 = (* [register]:2 tmp6) f- (* [register]:2 tmp7); - simd_address_at(tmp6, Rd_VPR128.8H, 7, 2, 16); - simd_address_at(tmp7, TMPQ1, 7, 2, 16); - simd_address_at(tmp8, Rd_VPR128.8H, 7, 2, 16); - * [register]:2 tmp8 = (* [register]:2 tmp6) f- (* [register]:2 tmp7); + Rd_VPR128.8H[0,16] = Rd_VPR128.8H[0,16] f- TMPQ1[0,16]; + Rd_VPR128.8H[16,16] = Rd_VPR128.8H[16,16] f- TMPQ1[16,16]; + Rd_VPR128.8H[32,16] = Rd_VPR128.8H[32,16] f- TMPQ1[32,16]; + Rd_VPR128.8H[48,16] = Rd_VPR128.8H[48,16] f- TMPQ1[48,16]; + Rd_VPR128.8H[64,16] = Rd_VPR128.8H[64,16] f- TMPQ1[64,16]; + Rd_VPR128.8H[80,16] = Rd_VPR128.8H[80,16] f- TMPQ1[80,16]; + Rd_VPR128.8H[96,16] = Rd_VPR128.8H[96,16] f- TMPQ1[96,16]; + Rd_VPR128.8H[112,16] = Rd_VPR128.8H[112,16] f- TMPQ1[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:2 = SIMD_PIECE(Re_VPR128Lo.H, vIndexHLM:1); - local tmp2:16 = SIMD_FLOAT_MULT(Rn_VPR128.8H, tmp1, 2:1); - local tmpd:16 = SIMD_FLOAT_SUB(Rd_VPR128.8H, tmp2, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.8H -@elif defined(SEMANTIC_pseudo) - local tmp1:2 = SIMD_PIECE(Re_VPR128Lo.H, vIndexHLM:1); - Rd_VPR128.8H = NEON_fmls(Rd_VPR128.8H, Rn_VPR128.8H, tmp1, 2:1); -@endif } # C7.2.126 FMLS (vector) page C7-1676 line 93631 MATCH x0ea0cc00/mask=xbfa0fc00 @@ -12676,39 +7714,13 @@ is b_31=0 & b_30=1 & b_2229=0b00111100 & b_1215=0b0101 & b_10=0 & Re_VPR128Lo.H :fmls Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=3 & b_2121=1 & Rm_VPR128.2D & b_1115=0x19 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_primitive) # simd infix TMPQ1 = Rn_VPR128.2D f* Rd_VPR128.2D on lane size 8 - local tmp2:4 = 0; - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp2, Rn_VPR128.2D, 0, 8, 16); - simd_address_at(tmp3, Rd_VPR128.2D, 0, 8, 16); - simd_address_at(tmp4, TMPQ1, 0, 8, 16); - * [register]:8 tmp4 = (* [register]:8 tmp2) f* (* [register]:8 tmp3); - simd_address_at(tmp2, Rn_VPR128.2D, 1, 8, 16); - simd_address_at(tmp3, Rd_VPR128.2D, 1, 8, 16); - simd_address_at(tmp4, TMPQ1, 1, 8, 16); - * [register]:8 tmp4 = (* [register]:8 tmp2) f* (* [register]:8 tmp3); + TMPQ1[0,64] = Rn_VPR128.2D[0,64] f* Rd_VPR128.2D[0,64]; + TMPQ1[64,64] = Rn_VPR128.2D[64,64] f* Rd_VPR128.2D[64,64]; # simd infix Rd_VPR128.2D = Rd_VPR128.2D f- TMPQ1 on lane size 8 - local tmp5:4 = 0; - local tmp6:4 = 0; - local tmp7:4 = 0; - simd_address_at(tmp5, Rd_VPR128.2D, 0, 8, 16); - simd_address_at(tmp6, TMPQ1, 0, 8, 16); - simd_address_at(tmp7, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp7 = (* [register]:8 tmp5) f- (* [register]:8 tmp6); - simd_address_at(tmp5, Rd_VPR128.2D, 1, 8, 16); - simd_address_at(tmp6, TMPQ1, 1, 8, 16); - simd_address_at(tmp7, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp7 = (* [register]:8 tmp5) f- (* [register]:8 tmp6); + Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] f- TMPQ1[0,64]; + Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] f- TMPQ1[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_FLOAT_MULT(Rn_VPR128.2D, Rd_VPR128.2D, 8:1); - local tmpd:16 = SIMD_FLOAT_SUB(Rd_VPR128.2D, tmp1, 8:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.2D = NEON_fmls(Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D, 8:1); -@endif } # C7.2.126 FMLS (vector) page C7-1676 line 93631 MATCH x0ea0cc00/mask=xbfa0fc00 @@ -12720,28 +7732,11 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=3 & b_2121=1 & Rm_VPR128.2D :fmls Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0x19 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_primitive) # simd infix TMPD1 = Rn_VPR64.2S f* Rm_VPR64.2S on lane size 4 - local tmp2:4 = 0; - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp2, Rn_VPR64.2S, 0, 4, 8); - simd_address_at(tmp3, Rm_VPR64.2S, 0, 4, 8); - simd_address_at(tmp4, TMPD1, 0, 4, 8); - * [register]:4 tmp4 = (* [register]:4 tmp2) f* (* [register]:4 tmp3); - simd_address_at(tmp2, Rn_VPR64.2S, 1, 4, 8); - simd_address_at(tmp3, Rm_VPR64.2S, 1, 4, 8); - simd_address_at(tmp4, TMPD1, 1, 4, 8); - * [register]:4 tmp4 = (* [register]:4 tmp2) f* (* [register]:4 tmp3); + TMPD1[0,32] = Rn_VPR64.2S[0,32] f* Rm_VPR64.2S[0,32]; + TMPD1[32,32] = Rn_VPR64.2S[32,32] f* Rm_VPR64.2S[32,32]; Rd_VPR64.2S = Rd_VPR64.2S f- TMPD1; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_FLOAT_MULT(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); - local tmpd:8 = Rd_VPR64.2S f- tmp1; - Zd = zext(tmpd); # assigning to Rd_VPR64.2S -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.2S = NEON_fmls(Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S, 4:1); -@endif } # C7.2.126 FMLS (vector) page C7-1676 line 93631 MATCH x0ea0cc00/mask=xbfa0fc00 @@ -12753,55 +7748,17 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S :fmls Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0x19 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) # simd infix TMPQ1 = Rn_VPR128.4S f* Rm_VPR128.4S on lane size 4 - local tmp2:4 = 0; - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp2, Rn_VPR128.4S, 0, 4, 16); - simd_address_at(tmp3, Rm_VPR128.4S, 0, 4, 16); - simd_address_at(tmp4, TMPQ1, 0, 4, 16); - * [register]:4 tmp4 = (* [register]:4 tmp2) f* (* [register]:4 tmp3); - simd_address_at(tmp2, Rn_VPR128.4S, 1, 4, 16); - simd_address_at(tmp3, Rm_VPR128.4S, 1, 4, 16); - simd_address_at(tmp4, TMPQ1, 1, 4, 16); - * [register]:4 tmp4 = (* [register]:4 tmp2) f* (* [register]:4 tmp3); - simd_address_at(tmp2, Rn_VPR128.4S, 2, 4, 16); - simd_address_at(tmp3, Rm_VPR128.4S, 2, 4, 16); - simd_address_at(tmp4, TMPQ1, 2, 4, 16); - * [register]:4 tmp4 = (* [register]:4 tmp2) f* (* [register]:4 tmp3); - simd_address_at(tmp2, Rn_VPR128.4S, 3, 4, 16); - simd_address_at(tmp3, Rm_VPR128.4S, 3, 4, 16); - simd_address_at(tmp4, TMPQ1, 3, 4, 16); - * [register]:4 tmp4 = (* [register]:4 tmp2) f* (* [register]:4 tmp3); + TMPQ1[0,32] = Rn_VPR128.4S[0,32] f* Rm_VPR128.4S[0,32]; + TMPQ1[32,32] = Rn_VPR128.4S[32,32] f* Rm_VPR128.4S[32,32]; + TMPQ1[64,32] = Rn_VPR128.4S[64,32] f* Rm_VPR128.4S[64,32]; + TMPQ1[96,32] = Rn_VPR128.4S[96,32] f* Rm_VPR128.4S[96,32]; # simd infix Rd_VPR128.4S = Rd_VPR128.4S f- TMPQ1 on lane size 4 - local tmp5:4 = 0; - local tmp6:4 = 0; - local tmp7:4 = 0; - simd_address_at(tmp5, Rd_VPR128.4S, 0, 4, 16); - simd_address_at(tmp6, TMPQ1, 0, 4, 16); - simd_address_at(tmp7, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp7 = (* [register]:4 tmp5) f- (* [register]:4 tmp6); - simd_address_at(tmp5, Rd_VPR128.4S, 1, 4, 16); - simd_address_at(tmp6, TMPQ1, 1, 4, 16); - simd_address_at(tmp7, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp7 = (* [register]:4 tmp5) f- (* [register]:4 tmp6); - simd_address_at(tmp5, Rd_VPR128.4S, 2, 4, 16); - simd_address_at(tmp6, TMPQ1, 2, 4, 16); - simd_address_at(tmp7, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp7 = (* [register]:4 tmp5) f- (* [register]:4 tmp6); - simd_address_at(tmp5, Rd_VPR128.4S, 3, 4, 16); - simd_address_at(tmp6, TMPQ1, 3, 4, 16); - simd_address_at(tmp7, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp7 = (* [register]:4 tmp5) f- (* [register]:4 tmp6); + Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] f- TMPQ1[0,32]; + Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] f- TMPQ1[32,32]; + Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] f- TMPQ1[64,32]; + Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] f- TMPQ1[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_FLOAT_MULT(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); - local tmpd:16 = SIMD_FLOAT_SUB(Rd_VPR128.4S, tmp1, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_fmls(Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S, 4:1); -@endif } # C7.2.126 FMLS (vector) page C7-1676 line 93631 MATCH x0ec00c00/mask=xbfe0fc00 @@ -12814,39 +7771,13 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S :fmls Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_31=0 & b_30=0 & b_2129=0b001110110 & b_1015=0b000011 & Rd_VPR64.4H & Rn_VPR64.4H & Rm_VPR64.4H & Zd { -@if defined(SEMANTIC_primitive) # simd infix TMPD1 = Rn_VPR64.4H f* Rm_VPR64.4H on lane size 4 - local tmp2:4 = 0; - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp2, Rn_VPR64.4H, 0, 4, 8); - simd_address_at(tmp3, Rm_VPR64.4H, 0, 4, 8); - simd_address_at(tmp4, TMPD1, 0, 4, 8); - * [register]:4 tmp4 = (* [register]:4 tmp2) f* (* [register]:4 tmp3); - simd_address_at(tmp2, Rn_VPR64.4H, 1, 4, 8); - simd_address_at(tmp3, Rm_VPR64.4H, 1, 4, 8); - simd_address_at(tmp4, TMPD1, 1, 4, 8); - * [register]:4 tmp4 = (* [register]:4 tmp2) f* (* [register]:4 tmp3); + TMPD1[0,32] = Rn_VPR64.4H[0,32] f* Rm_VPR64.4H[0,32]; + TMPD1[32,32] = Rn_VPR64.4H[32,32] f* Rm_VPR64.4H[32,32]; # simd infix Rd_VPR64.4H = Rd_VPR64.4H f- TMPD1 on lane size 4 - local tmp5:4 = 0; - local tmp6:4 = 0; - local tmp7:4 = 0; - simd_address_at(tmp5, Rd_VPR64.4H, 0, 4, 8); - simd_address_at(tmp6, TMPD1, 0, 4, 8); - simd_address_at(tmp7, Rd_VPR64.4H, 0, 4, 8); - * [register]:4 tmp7 = (* [register]:4 tmp5) f- (* [register]:4 tmp6); - simd_address_at(tmp5, Rd_VPR64.4H, 1, 4, 8); - simd_address_at(tmp6, TMPD1, 1, 4, 8); - simd_address_at(tmp7, Rd_VPR64.4H, 1, 4, 8); - * [register]:4 tmp7 = (* [register]:4 tmp5) f- (* [register]:4 tmp6); + Rd_VPR64.4H[0,32] = Rd_VPR64.4H[0,32] f- TMPD1[0,32]; + Rd_VPR64.4H[32,32] = Rd_VPR64.4H[32,32] f- TMPD1[32,32]; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_FLOAT_MULT(Rn_VPR64.4H, Rm_VPR64.4H, 4:1); - local tmpd:8 = SIMD_FLOAT_SUB(Rd_VPR64.4H, tmp1, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.4H -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.4H = NEON_fmls(Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H, 2:1); -@endif } # C7.2.126 FMLS (vector) page C7-1676 line 93631 MATCH x0ec00c00/mask=xbfe0fc00 @@ -12859,55 +7790,17 @@ is b_31=0 & b_30=0 & b_2129=0b001110110 & b_1015=0b000011 & Rd_VPR64.4H & Rn_VPR :fmls Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_31=0 & b_30=1 & b_2129=0b001110110 & b_1015=0b000011 & Rd_VPR128.8H & Rn_VPR128.8H & Rm_VPR128.8H & Zd { -@if defined(SEMANTIC_primitive) # simd infix TMPQ1 = Rn_VPR128.8H f* Rm_VPR128.8H on lane size 4 - local tmp2:4 = 0; - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp2, Rn_VPR128.8H, 0, 4, 16); - simd_address_at(tmp3, Rm_VPR128.8H, 0, 4, 16); - simd_address_at(tmp4, TMPQ1, 0, 4, 16); - * [register]:4 tmp4 = (* [register]:4 tmp2) f* (* [register]:4 tmp3); - simd_address_at(tmp2, Rn_VPR128.8H, 1, 4, 16); - simd_address_at(tmp3, Rm_VPR128.8H, 1, 4, 16); - simd_address_at(tmp4, TMPQ1, 1, 4, 16); - * [register]:4 tmp4 = (* [register]:4 tmp2) f* (* [register]:4 tmp3); - simd_address_at(tmp2, Rn_VPR128.8H, 2, 4, 16); - simd_address_at(tmp3, Rm_VPR128.8H, 2, 4, 16); - simd_address_at(tmp4, TMPQ1, 2, 4, 16); - * [register]:4 tmp4 = (* [register]:4 tmp2) f* (* [register]:4 tmp3); - simd_address_at(tmp2, Rn_VPR128.8H, 3, 4, 16); - simd_address_at(tmp3, Rm_VPR128.8H, 3, 4, 16); - simd_address_at(tmp4, TMPQ1, 3, 4, 16); - * [register]:4 tmp4 = (* [register]:4 tmp2) f* (* [register]:4 tmp3); + TMPQ1[0,32] = Rn_VPR128.8H[0,32] f* Rm_VPR128.8H[0,32]; + TMPQ1[32,32] = Rn_VPR128.8H[32,32] f* Rm_VPR128.8H[32,32]; + TMPQ1[64,32] = Rn_VPR128.8H[64,32] f* Rm_VPR128.8H[64,32]; + TMPQ1[96,32] = Rn_VPR128.8H[96,32] f* Rm_VPR128.8H[96,32]; # simd infix Rd_VPR128.8H = Rd_VPR128.8H f- TMPQ1 on lane size 4 - local tmp5:4 = 0; - local tmp6:4 = 0; - local tmp7:4 = 0; - simd_address_at(tmp5, Rd_VPR128.8H, 0, 4, 16); - simd_address_at(tmp6, TMPQ1, 0, 4, 16); - simd_address_at(tmp7, Rd_VPR128.8H, 0, 4, 16); - * [register]:4 tmp7 = (* [register]:4 tmp5) f- (* [register]:4 tmp6); - simd_address_at(tmp5, Rd_VPR128.8H, 1, 4, 16); - simd_address_at(tmp6, TMPQ1, 1, 4, 16); - simd_address_at(tmp7, Rd_VPR128.8H, 1, 4, 16); - * [register]:4 tmp7 = (* [register]:4 tmp5) f- (* [register]:4 tmp6); - simd_address_at(tmp5, Rd_VPR128.8H, 2, 4, 16); - simd_address_at(tmp6, TMPQ1, 2, 4, 16); - simd_address_at(tmp7, Rd_VPR128.8H, 2, 4, 16); - * [register]:4 tmp7 = (* [register]:4 tmp5) f- (* [register]:4 tmp6); - simd_address_at(tmp5, Rd_VPR128.8H, 3, 4, 16); - simd_address_at(tmp6, TMPQ1, 3, 4, 16); - simd_address_at(tmp7, Rd_VPR128.8H, 3, 4, 16); - * [register]:4 tmp7 = (* [register]:4 tmp5) f- (* [register]:4 tmp6); + Rd_VPR128.8H[0,32] = Rd_VPR128.8H[0,32] f- TMPQ1[0,32]; + Rd_VPR128.8H[32,32] = Rd_VPR128.8H[32,32] f- TMPQ1[32,32]; + Rd_VPR128.8H[64,32] = Rd_VPR128.8H[64,32] f- TMPQ1[64,32]; + Rd_VPR128.8H[96,32] = Rd_VPR128.8H[96,32] f- TMPQ1[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_FLOAT_MULT(Rn_VPR128.8H, Rm_VPR128.8H, 4:1); - local tmpd:16 = SIMD_FLOAT_SUB(Rd_VPR128.8H, tmp1, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.8H -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.8H = NEON_fmls(Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H, 2:1); -@endif } # C7.2.127 FMLSL, FMLSL2 (by element) page C7-1678 line 93750 MATCH x0f804000/mask=xbfc0f400 @@ -12920,56 +7813,19 @@ is b_31=0 & b_30=1 & b_2129=0b001110110 & b_1015=0b000011 & Rd_VPR128.8H & Rn_VP :fmlsl Rd_VPR64.2S, vRn_VPR64^".2H", Re_VPR128Lo.H.vIndexHLM is b_31=0 & b_30=0 & b_2329=0b0011111 & b_22=0 & b_1215=0b0100 & b_10=0 & Re_VPR128Lo.H & vIndexHLM & Rd_VPR64.2S & vRn_VPR64 & Rn_VPR64 & Re_VPR128Lo.H.vIndexHLM & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rn_VPR64, 0, 4, 8); - TMPS1 = * [register]:4 tmp1; - local tmp3:4 = 0; + TMPS1 = Rn_VPR64[0,32]; # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 - simd_address_at(tmp3, Re_VPR128Lo.H, vIndexHLM:4, 2, 16); - local tmp4:2 = * [register]:2 tmp3; - # simd infix TMPS2 = TMPS1 f* tmp4 on lane size 2 - local tmp6:4 = 0; - local tmp7:4 = 0; - simd_address_at(tmp6, TMPS1, 0, 2, 4); - simd_address_at(tmp7, TMPS2, 0, 2, 4); - * [register]:2 tmp7 = (* [register]:2 tmp6) f* tmp4; - simd_address_at(tmp6, TMPS1, 1, 2, 4); - simd_address_at(tmp7, TMPS2, 1, 2, 4); - * [register]:2 tmp7 = (* [register]:2 tmp6) f* tmp4; + local tmp2:2 = Re_VPR128Lo.H.vIndexHLM; + # simd infix TMPS2 = TMPS1 f* tmp2 on lane size 2 + TMPS2[0,16] = TMPS1[0,16] f* tmp2; + TMPS2[16,16] = TMPS1[16,16] f* tmp2; # simd resize TMPD3 = float2float(TMPS2) (lane size 2 to 4) - local tmp9:4 = 0; - local tmp10:4 = 0; - simd_address_at(tmp9, TMPS2, 0, 2, 4); - simd_address_at(tmp10, TMPD3, 0, 4, 8); - * [register]:4 tmp10 = float2float(* [register]:2 tmp9); - simd_address_at(tmp9, TMPS2, 1, 2, 4); - simd_address_at(tmp10, TMPD3, 1, 4, 8); - * [register]:4 tmp10 = float2float(* [register]:2 tmp9); + TMPD3[0,32] = float2float(TMPS2[0,16]); + TMPD3[32,32] = float2float(TMPS2[16,16]); # simd infix Rd_VPR64.2S = Rd_VPR64.2S - TMPD3 on lane size 4 - local tmp11:4 = 0; - local tmp12:4 = 0; - local tmp13:4 = 0; - simd_address_at(tmp11, Rd_VPR64.2S, 0, 4, 8); - simd_address_at(tmp12, TMPD3, 0, 4, 8); - simd_address_at(tmp13, Rd_VPR64.2S, 0, 4, 8); - * [register]:4 tmp13 = (* [register]:4 tmp11) - (* [register]:4 tmp12); - simd_address_at(tmp11, Rd_VPR64.2S, 1, 4, 8); - simd_address_at(tmp12, TMPD3, 1, 4, 8); - simd_address_at(tmp13, Rd_VPR64.2S, 1, 4, 8); - * [register]:4 tmp13 = (* [register]:4 tmp11) - (* [register]:4 tmp12); + Rd_VPR64.2S[0,32] = Rd_VPR64.2S[0,32] - TMPD3[0,32]; + Rd_VPR64.2S[32,32] = Rd_VPR64.2S[32,32] - TMPD3[32,32]; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:4 = SIMD_PIECE(Rn_VPR64, 0:1); - local tmp2:2 = SIMD_PIECE(Re_VPR128Lo.H, vIndexHLM:1); - local tmp3:4 = SIMD_FLOAT_MULT(tmp1, tmp2); - local tmp4:8 = SIMD_FLOAT2FLOAT(tmp3, 2:1); - local tmpd:8 = SIMD_INT_SUB(Rd_VPR64.2S, tmp4, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.2S -@elif defined(SEMANTIC_pseudo) - local tmp1:2 = SIMD_PIECE(Re_VPR128Lo.H, vIndexHLM:1); - Rd_VPR64.2S = NEON_fmlsl(Rd_VPR64.2S, Rn_VPR64, tmp1, 2:1); -@endif } # C7.2.127 FMLSL, FMLSL2 (by element) page C7-1678 line 93750 MATCH x0f804000/mask=xbfc0f400 @@ -12982,76 +7838,25 @@ is b_31=0 & b_30=0 & b_2329=0b0011111 & b_22=0 & b_1215=0b0100 & b_10=0 & Re_VPR :fmlsl Rd_VPR128.4S, vRn_VPR128^".4H", Re_VPR128Lo.H.vIndexHLM is b_31=0 & b_30=1 & b_2329=0b0011111 & b_22=0 & b_1215=0b0100 & b_10=0 & Re_VPR128Lo.H & vIndexHLM & Rd_VPR128.4S & vRn_VPR128 & Rn_VPR128 & Re_VPR128Lo.H.vIndexHLM & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rn_VPR128, 0, 8, 16); - TMPD1 = * [register]:8 tmp1; - local tmp3:4 = 0; + TMPD1 = Rn_VPR128[0,64]; # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 - simd_address_at(tmp3, Re_VPR128Lo.H, vIndexHLM:4, 2, 16); - local tmp4:2 = * [register]:2 tmp3; - # simd infix TMPD2 = TMPD1 f* tmp4 on lane size 2 - local tmp6:4 = 0; - local tmp7:4 = 0; - simd_address_at(tmp6, TMPD1, 0, 2, 8); - simd_address_at(tmp7, TMPD2, 0, 2, 8); - * [register]:2 tmp7 = (* [register]:2 tmp6) f* tmp4; - simd_address_at(tmp6, TMPD1, 1, 2, 8); - simd_address_at(tmp7, TMPD2, 1, 2, 8); - * [register]:2 tmp7 = (* [register]:2 tmp6) f* tmp4; - simd_address_at(tmp6, TMPD1, 2, 2, 8); - simd_address_at(tmp7, TMPD2, 2, 2, 8); - * [register]:2 tmp7 = (* [register]:2 tmp6) f* tmp4; - simd_address_at(tmp6, TMPD1, 3, 2, 8); - simd_address_at(tmp7, TMPD2, 3, 2, 8); - * [register]:2 tmp7 = (* [register]:2 tmp6) f* tmp4; + local tmp2:2 = Re_VPR128Lo.H.vIndexHLM; + # simd infix TMPD2 = TMPD1 f* tmp2 on lane size 2 + TMPD2[0,16] = TMPD1[0,16] f* tmp2; + TMPD2[16,16] = TMPD1[16,16] f* tmp2; + TMPD2[32,16] = TMPD1[32,16] f* tmp2; + TMPD2[48,16] = TMPD1[48,16] f* tmp2; # simd resize TMPQ3 = float2float(TMPD2) (lane size 2 to 4) - local tmp9:4 = 0; - local tmp10:4 = 0; - simd_address_at(tmp9, TMPD2, 0, 2, 8); - simd_address_at(tmp10, TMPQ3, 0, 4, 16); - * [register]:4 tmp10 = float2float(* [register]:2 tmp9); - simd_address_at(tmp9, TMPD2, 1, 2, 8); - simd_address_at(tmp10, TMPQ3, 1, 4, 16); - * [register]:4 tmp10 = float2float(* [register]:2 tmp9); - simd_address_at(tmp9, TMPD2, 2, 2, 8); - simd_address_at(tmp10, TMPQ3, 2, 4, 16); - * [register]:4 tmp10 = float2float(* [register]:2 tmp9); - simd_address_at(tmp9, TMPD2, 3, 2, 8); - simd_address_at(tmp10, TMPQ3, 3, 4, 16); - * [register]:4 tmp10 = float2float(* [register]:2 tmp9); + TMPQ3[0,32] = float2float(TMPD2[0,16]); + TMPQ3[32,32] = float2float(TMPD2[16,16]); + TMPQ3[64,32] = float2float(TMPD2[32,16]); + TMPQ3[96,32] = float2float(TMPD2[48,16]); # simd infix Rd_VPR128.4S = Rd_VPR128.4S - TMPQ3 on lane size 4 - local tmp11:4 = 0; - local tmp12:4 = 0; - local tmp13:4 = 0; - simd_address_at(tmp11, Rd_VPR128.4S, 0, 4, 16); - simd_address_at(tmp12, TMPQ3, 0, 4, 16); - simd_address_at(tmp13, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp13 = (* [register]:4 tmp11) - (* [register]:4 tmp12); - simd_address_at(tmp11, Rd_VPR128.4S, 1, 4, 16); - simd_address_at(tmp12, TMPQ3, 1, 4, 16); - simd_address_at(tmp13, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp13 = (* [register]:4 tmp11) - (* [register]:4 tmp12); - simd_address_at(tmp11, Rd_VPR128.4S, 2, 4, 16); - simd_address_at(tmp12, TMPQ3, 2, 4, 16); - simd_address_at(tmp13, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp13 = (* [register]:4 tmp11) - (* [register]:4 tmp12); - simd_address_at(tmp11, Rd_VPR128.4S, 3, 4, 16); - simd_address_at(tmp12, TMPQ3, 3, 4, 16); - simd_address_at(tmp13, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp13 = (* [register]:4 tmp11) - (* [register]:4 tmp12); + Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] - TMPQ3[0,32]; + Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] - TMPQ3[32,32]; + Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] - TMPQ3[64,32]; + Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] - TMPQ3[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Rn_VPR128, 0:1); - local tmp2:2 = SIMD_PIECE(Re_VPR128Lo.H, vIndexHLM:1); - local tmp3:8 = SIMD_FLOAT_MULT(tmp1, tmp2); - local tmp4:16 = SIMD_FLOAT2FLOAT(tmp3, 2:1); - local tmpd:16 = SIMD_INT_SUB(Rd_VPR128.4S, tmp4, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - local tmp1:2 = SIMD_PIECE(Re_VPR128Lo.H, vIndexHLM:1); - Rd_VPR128.4S = NEON_fmlsl(Rd_VPR128.4S, Rn_VPR128, tmp1, 2:1); -@endif } # C7.2.127 FMLSL, FMLSL2 (by element) page C7-1678 line 93750 MATCH x2f80c000/mask=xbfc0f400 @@ -13064,56 +7869,19 @@ is b_31=0 & b_30=1 & b_2329=0b0011111 & b_22=0 & b_1215=0b0100 & b_10=0 & Re_VPR :fmlsl2 Rd_VPR64.2S, vRn_VPR64^".2H", Re_VPR128Lo.H.vIndexHLM is b_31=0 & b_30=0 & b_2329=0b1011111 & b_22=0 & b_1215=0b1100 & b_10=0 & Re_VPR128Lo.H & vIndexHLM & Rd_VPR64.2S & vRn_VPR64 & Rn_VPR64 & Re_VPR128Lo.H.vIndexHLM & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rn_VPR64, 1, 4, 8); - TMPS1 = * [register]:4 tmp1; - local tmp3:4 = 0; + TMPS1 = Rn_VPR64[32,32]; # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 - simd_address_at(tmp3, Re_VPR128Lo.H, vIndexHLM:4, 2, 16); - local tmp4:2 = * [register]:2 tmp3; - # simd infix TMPS2 = TMPS1 f* tmp4 on lane size 2 - local tmp6:4 = 0; - local tmp7:4 = 0; - simd_address_at(tmp6, TMPS1, 0, 2, 4); - simd_address_at(tmp7, TMPS2, 0, 2, 4); - * [register]:2 tmp7 = (* [register]:2 tmp6) f* tmp4; - simd_address_at(tmp6, TMPS1, 1, 2, 4); - simd_address_at(tmp7, TMPS2, 1, 2, 4); - * [register]:2 tmp7 = (* [register]:2 tmp6) f* tmp4; + local tmp2:2 = Re_VPR128Lo.H.vIndexHLM; + # simd infix TMPS2 = TMPS1 f* tmp2 on lane size 2 + TMPS2[0,16] = TMPS1[0,16] f* tmp2; + TMPS2[16,16] = TMPS1[16,16] f* tmp2; # simd resize TMPD3 = float2float(TMPS2) (lane size 2 to 4) - local tmp9:4 = 0; - local tmp10:4 = 0; - simd_address_at(tmp9, TMPS2, 0, 2, 4); - simd_address_at(tmp10, TMPD3, 0, 4, 8); - * [register]:4 tmp10 = float2float(* [register]:2 tmp9); - simd_address_at(tmp9, TMPS2, 1, 2, 4); - simd_address_at(tmp10, TMPD3, 1, 4, 8); - * [register]:4 tmp10 = float2float(* [register]:2 tmp9); + TMPD3[0,32] = float2float(TMPS2[0,16]); + TMPD3[32,32] = float2float(TMPS2[16,16]); # simd infix Rd_VPR64.2S = Rd_VPR64.2S - TMPD3 on lane size 4 - local tmp11:4 = 0; - local tmp12:4 = 0; - local tmp13:4 = 0; - simd_address_at(tmp11, Rd_VPR64.2S, 0, 4, 8); - simd_address_at(tmp12, TMPD3, 0, 4, 8); - simd_address_at(tmp13, Rd_VPR64.2S, 0, 4, 8); - * [register]:4 tmp13 = (* [register]:4 tmp11) - (* [register]:4 tmp12); - simd_address_at(tmp11, Rd_VPR64.2S, 1, 4, 8); - simd_address_at(tmp12, TMPD3, 1, 4, 8); - simd_address_at(tmp13, Rd_VPR64.2S, 1, 4, 8); - * [register]:4 tmp13 = (* [register]:4 tmp11) - (* [register]:4 tmp12); + Rd_VPR64.2S[0,32] = Rd_VPR64.2S[0,32] - TMPD3[0,32]; + Rd_VPR64.2S[32,32] = Rd_VPR64.2S[32,32] - TMPD3[32,32]; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:4 = SIMD_PIECE(Rn_VPR64, 1:1); - local tmp2:2 = SIMD_PIECE(Re_VPR128Lo.H, vIndexHLM:1); - local tmp3:4 = SIMD_FLOAT_MULT(tmp1, tmp2); - local tmp4:8 = SIMD_FLOAT2FLOAT(tmp3, 2:1); - local tmpd:8 = SIMD_INT_SUB(Rd_VPR64.2S, tmp4, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.2S -@elif defined(SEMANTIC_pseudo) - local tmp1:2 = SIMD_PIECE(Re_VPR128Lo.H, vIndexHLM:1); - Rd_VPR64.2S = NEON_fmlsl2(Rd_VPR64.2S, Rn_VPR64, tmp1, 2:1); -@endif } # C7.2.127 FMLSL, FMLSL2 (by element) page C7-1678 line 93750 MATCH x2f80c000/mask=xbfc0f400 @@ -13126,76 +7894,25 @@ is b_31=0 & b_30=0 & b_2329=0b1011111 & b_22=0 & b_1215=0b1100 & b_10=0 & Re_VPR :fmlsl2 Rd_VPR128.4S, vRn_VPR128^".4H", Re_VPR128Lo.H.vIndexHLM is b_31=0 & b_30=1 & b_2329=0b1011111 & b_22=0 & b_1215=0b1100 & b_10=0 & Re_VPR128Lo.H & vIndexHLM & Rd_VPR128.4S & vRn_VPR128 & Rn_VPR128 & Re_VPR128Lo.H.vIndexHLM & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rn_VPR128, 1, 8, 16); - TMPD1 = * [register]:8 tmp1; - local tmp3:4 = 0; + TMPD1 = Rn_VPR128[64,64]; # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 - simd_address_at(tmp3, Re_VPR128Lo.H, vIndexHLM:4, 2, 16); - local tmp4:2 = * [register]:2 tmp3; - # simd infix TMPD2 = TMPD1 f* tmp4 on lane size 2 - local tmp6:4 = 0; - local tmp7:4 = 0; - simd_address_at(tmp6, TMPD1, 0, 2, 8); - simd_address_at(tmp7, TMPD2, 0, 2, 8); - * [register]:2 tmp7 = (* [register]:2 tmp6) f* tmp4; - simd_address_at(tmp6, TMPD1, 1, 2, 8); - simd_address_at(tmp7, TMPD2, 1, 2, 8); - * [register]:2 tmp7 = (* [register]:2 tmp6) f* tmp4; - simd_address_at(tmp6, TMPD1, 2, 2, 8); - simd_address_at(tmp7, TMPD2, 2, 2, 8); - * [register]:2 tmp7 = (* [register]:2 tmp6) f* tmp4; - simd_address_at(tmp6, TMPD1, 3, 2, 8); - simd_address_at(tmp7, TMPD2, 3, 2, 8); - * [register]:2 tmp7 = (* [register]:2 tmp6) f* tmp4; + local tmp2:2 = Re_VPR128Lo.H.vIndexHLM; + # simd infix TMPD2 = TMPD1 f* tmp2 on lane size 2 + TMPD2[0,16] = TMPD1[0,16] f* tmp2; + TMPD2[16,16] = TMPD1[16,16] f* tmp2; + TMPD2[32,16] = TMPD1[32,16] f* tmp2; + TMPD2[48,16] = TMPD1[48,16] f* tmp2; # simd resize TMPQ3 = float2float(TMPD2) (lane size 2 to 4) - local tmp9:4 = 0; - local tmp10:4 = 0; - simd_address_at(tmp9, TMPD2, 0, 2, 8); - simd_address_at(tmp10, TMPQ3, 0, 4, 16); - * [register]:4 tmp10 = float2float(* [register]:2 tmp9); - simd_address_at(tmp9, TMPD2, 1, 2, 8); - simd_address_at(tmp10, TMPQ3, 1, 4, 16); - * [register]:4 tmp10 = float2float(* [register]:2 tmp9); - simd_address_at(tmp9, TMPD2, 2, 2, 8); - simd_address_at(tmp10, TMPQ3, 2, 4, 16); - * [register]:4 tmp10 = float2float(* [register]:2 tmp9); - simd_address_at(tmp9, TMPD2, 3, 2, 8); - simd_address_at(tmp10, TMPQ3, 3, 4, 16); - * [register]:4 tmp10 = float2float(* [register]:2 tmp9); + TMPQ3[0,32] = float2float(TMPD2[0,16]); + TMPQ3[32,32] = float2float(TMPD2[16,16]); + TMPQ3[64,32] = float2float(TMPD2[32,16]); + TMPQ3[96,32] = float2float(TMPD2[48,16]); # simd infix Rd_VPR128.4S = Rd_VPR128.4S - TMPQ3 on lane size 4 - local tmp11:4 = 0; - local tmp12:4 = 0; - local tmp13:4 = 0; - simd_address_at(tmp11, Rd_VPR128.4S, 0, 4, 16); - simd_address_at(tmp12, TMPQ3, 0, 4, 16); - simd_address_at(tmp13, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp13 = (* [register]:4 tmp11) - (* [register]:4 tmp12); - simd_address_at(tmp11, Rd_VPR128.4S, 1, 4, 16); - simd_address_at(tmp12, TMPQ3, 1, 4, 16); - simd_address_at(tmp13, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp13 = (* [register]:4 tmp11) - (* [register]:4 tmp12); - simd_address_at(tmp11, Rd_VPR128.4S, 2, 4, 16); - simd_address_at(tmp12, TMPQ3, 2, 4, 16); - simd_address_at(tmp13, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp13 = (* [register]:4 tmp11) - (* [register]:4 tmp12); - simd_address_at(tmp11, Rd_VPR128.4S, 3, 4, 16); - simd_address_at(tmp12, TMPQ3, 3, 4, 16); - simd_address_at(tmp13, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp13 = (* [register]:4 tmp11) - (* [register]:4 tmp12); + Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] - TMPQ3[0,32]; + Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] - TMPQ3[32,32]; + Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] - TMPQ3[64,32]; + Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] - TMPQ3[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Rn_VPR128, 1:1); - local tmp2:2 = SIMD_PIECE(Re_VPR128Lo.H, vIndexHLM:1); - local tmp3:8 = SIMD_FLOAT_MULT(tmp1, tmp2); - local tmp4:16 = SIMD_FLOAT2FLOAT(tmp3, 2:1); - local tmpd:16 = SIMD_INT_SUB(Rd_VPR128.4S, tmp4, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - local tmp1:2 = SIMD_PIECE(Re_VPR128Lo.H, vIndexHLM:1); - Rd_VPR128.4S = NEON_fmlsl2(Rd_VPR128.4S, Rn_VPR128, tmp1, 2:1); -@endif } # C7.2.128 FMLSL, FMLSL2 (vector) page C7-1680 line 93882 MATCH x0ea0ec00/mask=xbfe0fc00 @@ -13208,57 +7925,18 @@ is b_31=0 & b_30=1 & b_2329=0b1011111 & b_22=0 & b_1215=0b1100 & b_10=0 & Re_VPR :fmlsl Rd_VPR64.2S, vRn_VPR64^".2H", vRm_VPR64^".2H" is b_31=0 & b_30=0 & b_2329=0b0011101 & b_22=0 & b_21=1 & b_1015=0b111011 & Rd_VPR64.2S & vRn_VPR64 & Rn_VPR64 & vRm_VPR64 & Rm_VPR64 & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rn_VPR64, 0, 4, 8); - TMPS1 = * [register]:4 tmp1; - local tmp3:4 = 0; - simd_address_at(tmp3, Rm_VPR64, 0, 4, 8); - TMPS2 = * [register]:4 tmp3; + TMPS1 = Rn_VPR64[0,32]; + TMPS2 = Rm_VPR64[0,32]; # simd infix TMPS3 = TMPS1 f* TMPS2 on lane size 2 - local tmp6:4 = 0; - local tmp7:4 = 0; - local tmp8:4 = 0; - simd_address_at(tmp6, TMPS1, 0, 2, 4); - simd_address_at(tmp7, TMPS2, 0, 2, 4); - simd_address_at(tmp8, TMPS3, 0, 2, 4); - * [register]:2 tmp8 = (* [register]:2 tmp6) f* (* [register]:2 tmp7); - simd_address_at(tmp6, TMPS1, 1, 2, 4); - simd_address_at(tmp7, TMPS2, 1, 2, 4); - simd_address_at(tmp8, TMPS3, 1, 2, 4); - * [register]:2 tmp8 = (* [register]:2 tmp6) f* (* [register]:2 tmp7); + TMPS3[0,16] = TMPS1[0,16] f* TMPS2[0,16]; + TMPS3[16,16] = TMPS1[16,16] f* TMPS2[16,16]; # simd resize TMPD4 = float2float(TMPS3) (lane size 2 to 4) - local tmp10:4 = 0; - local tmp11:4 = 0; - simd_address_at(tmp10, TMPS3, 0, 2, 4); - simd_address_at(tmp11, TMPD4, 0, 4, 8); - * [register]:4 tmp11 = float2float(* [register]:2 tmp10); - simd_address_at(tmp10, TMPS3, 1, 2, 4); - simd_address_at(tmp11, TMPD4, 1, 4, 8); - * [register]:4 tmp11 = float2float(* [register]:2 tmp10); + TMPD4[0,32] = float2float(TMPS3[0,16]); + TMPD4[32,32] = float2float(TMPS3[16,16]); # simd infix Rd_VPR64.2S = Rd_VPR64.2S - TMPD4 on lane size 4 - local tmp12:4 = 0; - local tmp13:4 = 0; - local tmp14:4 = 0; - simd_address_at(tmp12, Rd_VPR64.2S, 0, 4, 8); - simd_address_at(tmp13, TMPD4, 0, 4, 8); - simd_address_at(tmp14, Rd_VPR64.2S, 0, 4, 8); - * [register]:4 tmp14 = (* [register]:4 tmp12) - (* [register]:4 tmp13); - simd_address_at(tmp12, Rd_VPR64.2S, 1, 4, 8); - simd_address_at(tmp13, TMPD4, 1, 4, 8); - simd_address_at(tmp14, Rd_VPR64.2S, 1, 4, 8); - * [register]:4 tmp14 = (* [register]:4 tmp12) - (* [register]:4 tmp13); + Rd_VPR64.2S[0,32] = Rd_VPR64.2S[0,32] - TMPD4[0,32]; + Rd_VPR64.2S[32,32] = Rd_VPR64.2S[32,32] - TMPD4[32,32]; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:4 = SIMD_PIECE(Rn_VPR64, 0:1); - local tmp2:4 = SIMD_PIECE(Rm_VPR64, 0:1); - local tmp3:4 = SIMD_FLOAT_MULT(tmp1, tmp2, 2:1); - local tmp4:8 = SIMD_FLOAT2FLOAT(tmp3, 2:1); - local tmpd:8 = SIMD_INT_SUB(Rd_VPR64.2S, tmp4, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.2S -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.2S = NEON_fmlsl(Rd_VPR64.2S, Rn_VPR64, Rm_VPR64, 2:1); -@endif } # C7.2.128 FMLSL, FMLSL2 (vector) page C7-1680 line 93882 MATCH x0ea0ec00/mask=xbfe0fc00 @@ -13271,79 +7949,24 @@ is b_31=0 & b_30=0 & b_2329=0b0011101 & b_22=0 & b_21=1 & b_1015=0b111011 & Rd_V :fmlsl Rd_VPR128.4S, vRn_VPR128^".4H", Rm_VPR64.4H is b_31=0 & b_30=1 & b_2329=0b0011101 & b_22=0 & b_21=1 & b_1015=0b111011 & Rd_VPR128.4S & vRn_VPR128 & Rn_VPR128 & Rm_VPR64.4H & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rn_VPR128, 0, 8, 16); - TMPD1 = * [register]:8 tmp1; - local tmp3:4 = 0; - simd_address_at(tmp3, Rm_VPR64.4H, 0, 8, 8); - TMPD2 = * [register]:8 tmp3; + TMPD1 = Rn_VPR128[0,64]; + TMPD2 = Rm_VPR64.4H[0,64]; # simd infix TMPD3 = TMPD1 f* TMPD2 on lane size 2 - local tmp6:4 = 0; - local tmp7:4 = 0; - local tmp8:4 = 0; - simd_address_at(tmp6, TMPD1, 0, 2, 8); - simd_address_at(tmp7, TMPD2, 0, 2, 8); - simd_address_at(tmp8, TMPD3, 0, 2, 8); - * [register]:2 tmp8 = (* [register]:2 tmp6) f* (* [register]:2 tmp7); - simd_address_at(tmp6, TMPD1, 1, 2, 8); - simd_address_at(tmp7, TMPD2, 1, 2, 8); - simd_address_at(tmp8, TMPD3, 1, 2, 8); - * [register]:2 tmp8 = (* [register]:2 tmp6) f* (* [register]:2 tmp7); - simd_address_at(tmp6, TMPD1, 2, 2, 8); - simd_address_at(tmp7, TMPD2, 2, 2, 8); - simd_address_at(tmp8, TMPD3, 2, 2, 8); - * [register]:2 tmp8 = (* [register]:2 tmp6) f* (* [register]:2 tmp7); - simd_address_at(tmp6, TMPD1, 3, 2, 8); - simd_address_at(tmp7, TMPD2, 3, 2, 8); - simd_address_at(tmp8, TMPD3, 3, 2, 8); - * [register]:2 tmp8 = (* [register]:2 tmp6) f* (* [register]:2 tmp7); + TMPD3[0,16] = TMPD1[0,16] f* TMPD2[0,16]; + TMPD3[16,16] = TMPD1[16,16] f* TMPD2[16,16]; + TMPD3[32,16] = TMPD1[32,16] f* TMPD2[32,16]; + TMPD3[48,16] = TMPD1[48,16] f* TMPD2[48,16]; # simd resize TMPQ4 = float2float(TMPD3) (lane size 2 to 4) - local tmp10:4 = 0; - local tmp11:4 = 0; - simd_address_at(tmp10, TMPD3, 0, 2, 8); - simd_address_at(tmp11, TMPQ4, 0, 4, 16); - * [register]:4 tmp11 = float2float(* [register]:2 tmp10); - simd_address_at(tmp10, TMPD3, 1, 2, 8); - simd_address_at(tmp11, TMPQ4, 1, 4, 16); - * [register]:4 tmp11 = float2float(* [register]:2 tmp10); - simd_address_at(tmp10, TMPD3, 2, 2, 8); - simd_address_at(tmp11, TMPQ4, 2, 4, 16); - * [register]:4 tmp11 = float2float(* [register]:2 tmp10); - simd_address_at(tmp10, TMPD3, 3, 2, 8); - simd_address_at(tmp11, TMPQ4, 3, 4, 16); - * [register]:4 tmp11 = float2float(* [register]:2 tmp10); + TMPQ4[0,32] = float2float(TMPD3[0,16]); + TMPQ4[32,32] = float2float(TMPD3[16,16]); + TMPQ4[64,32] = float2float(TMPD3[32,16]); + TMPQ4[96,32] = float2float(TMPD3[48,16]); # simd infix Rd_VPR128.4S = Rd_VPR128.4S - TMPQ4 on lane size 4 - local tmp12:4 = 0; - local tmp13:4 = 0; - local tmp14:4 = 0; - simd_address_at(tmp12, Rd_VPR128.4S, 0, 4, 16); - simd_address_at(tmp13, TMPQ4, 0, 4, 16); - simd_address_at(tmp14, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp14 = (* [register]:4 tmp12) - (* [register]:4 tmp13); - simd_address_at(tmp12, Rd_VPR128.4S, 1, 4, 16); - simd_address_at(tmp13, TMPQ4, 1, 4, 16); - simd_address_at(tmp14, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp14 = (* [register]:4 tmp12) - (* [register]:4 tmp13); - simd_address_at(tmp12, Rd_VPR128.4S, 2, 4, 16); - simd_address_at(tmp13, TMPQ4, 2, 4, 16); - simd_address_at(tmp14, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp14 = (* [register]:4 tmp12) - (* [register]:4 tmp13); - simd_address_at(tmp12, Rd_VPR128.4S, 3, 4, 16); - simd_address_at(tmp13, TMPQ4, 3, 4, 16); - simd_address_at(tmp14, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp14 = (* [register]:4 tmp12) - (* [register]:4 tmp13); + Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] - TMPQ4[0,32]; + Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] - TMPQ4[32,32]; + Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] - TMPQ4[64,32]; + Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] - TMPQ4[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Rn_VPR128, 0:1); - local tmp2:8 = SIMD_PIECE(Rm_VPR64.4H, 0:1); - local tmp3:8 = SIMD_FLOAT_MULT(tmp1, tmp2, 2:1); - local tmp4:16 = SIMD_FLOAT2FLOAT(tmp3, 2:1); - local tmpd:16 = SIMD_INT_SUB(Rd_VPR128.4S, tmp4, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_fmlsl(Rd_VPR128.4S, Rn_VPR128, Rm_VPR64.4H, 2:1); -@endif } # C7.2.128 FMLSL, FMLSL2 (vector) page C7-1680 line 93882 MATCH x2ea0cc00/mask=xbfe0fc00 @@ -13356,62 +7979,23 @@ is b_31=0 & b_30=1 & b_2329=0b0011101 & b_22=0 & b_21=1 & b_1015=0b111011 & Rd_V :fmlsl2 Rd_VPR64.2S, vRn_VPR64^".2H", vRm_VPR128^".2H" is b_31=0 & b_30=0 & b_2329=0b1011101 & b_22=0 & b_21=1 & b_1015=0b110011 & Rd_VPR64.2S & vRn_VPR64 & Rn_VPR64 & vRm_VPR128 & Rm_VPR128 & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rn_VPR64, 1, 4, 8); - TMPS1 = * [register]:4 tmp1; - local tmp3:4 = 0; - simd_address_at(tmp3, Rm_VPR128, 1, 4, 16); - TMPS2 = * [register]:4 tmp3; + TMPS1 = Rn_VPR64[32,32]; + TMPS2 = Rm_VPR128[32,32]; # simd infix TMPS3 = TMPS1 f* TMPS2 on lane size 2 - local tmp6:4 = 0; - local tmp7:4 = 0; - local tmp8:4 = 0; - simd_address_at(tmp6, TMPS1, 0, 2, 4); - simd_address_at(tmp7, TMPS2, 0, 2, 4); - simd_address_at(tmp8, TMPS3, 0, 2, 4); - * [register]:2 tmp8 = (* [register]:2 tmp6) f* (* [register]:2 tmp7); - simd_address_at(tmp6, TMPS1, 1, 2, 4); - simd_address_at(tmp7, TMPS2, 1, 2, 4); - simd_address_at(tmp8, TMPS3, 1, 2, 4); - * [register]:2 tmp8 = (* [register]:2 tmp6) f* (* [register]:2 tmp7); + TMPS3[0,16] = TMPS1[0,16] f* TMPS2[0,16]; + TMPS3[16,16] = TMPS1[16,16] f* TMPS2[16,16]; # simd resize TMPD4 = float2float(TMPS3) (lane size 2 to 4) - local tmp10:4 = 0; - local tmp11:4 = 0; - simd_address_at(tmp10, TMPS3, 0, 2, 4); - simd_address_at(tmp11, TMPD4, 0, 4, 8); - * [register]:4 tmp11 = float2float(* [register]:2 tmp10); - simd_address_at(tmp10, TMPS3, 1, 2, 4); - simd_address_at(tmp11, TMPD4, 1, 4, 8); - * [register]:4 tmp11 = float2float(* [register]:2 tmp10); + TMPD4[0,32] = float2float(TMPS3[0,16]); + TMPD4[32,32] = float2float(TMPS3[16,16]); # simd infix Rd_VPR64.2S = Rd_VPR64.2S - TMPD4 on lane size 4 - local tmp12:4 = 0; - local tmp13:4 = 0; - local tmp14:4 = 0; - simd_address_at(tmp12, Rd_VPR64.2S, 0, 4, 8); - simd_address_at(tmp13, TMPD4, 0, 4, 8); - simd_address_at(tmp14, Rd_VPR64.2S, 0, 4, 8); - * [register]:4 tmp14 = (* [register]:4 tmp12) - (* [register]:4 tmp13); - simd_address_at(tmp12, Rd_VPR64.2S, 1, 4, 8); - simd_address_at(tmp13, TMPD4, 1, 4, 8); - simd_address_at(tmp14, Rd_VPR64.2S, 1, 4, 8); - * [register]:4 tmp14 = (* [register]:4 tmp12) - (* [register]:4 tmp13); + Rd_VPR64.2S[0,32] = Rd_VPR64.2S[0,32] - TMPD4[0,32]; + Rd_VPR64.2S[32,32] = Rd_VPR64.2S[32,32] - TMPD4[32,32]; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:4 = SIMD_PIECE(Rn_VPR64, 1:1); - local tmp2:4 = SIMD_PIECE(Rm_VPR128, 1:1); - local tmp3:4 = SIMD_FLOAT_MULT(tmp1, tmp2, 2:1); - local tmp4:8 = SIMD_FLOAT2FLOAT(tmp3, 2:1); - local tmpd:8 = SIMD_INT_SUB(Rd_VPR64.2S, tmp4, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.2S -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.2S = NEON_fmlsl2(Rd_VPR64.2S, Rn_VPR64, Rm_VPR128, 2:1); -@endif } # C7.2.128 FMLSL, FMLSL2 (vector) page C7-1680 line 93882 MATCH x2ea0cc00/mask=xbfe0fc00 # CONSTRUCT x6ea0cc00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES -# SMACRO ARG1 ARG2[1]:8 ARG3[1]:8 $f*@2 $float2float@2:16 &=$-@4 +# SMACRO ARG1 ARG2[1]:8 ARG3 $f*@2 $float2float@2:16 &=$-@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_fmlsl2/3@2 # AUNIT --inst x6ea0cc00/mask=xffe0fc00 --rand sfp --status noqemu --comment "ext nofpround" # SIMD 4S when Q = 1 @@ -13419,79 +8003,23 @@ is b_31=0 & b_30=0 & b_2329=0b1011101 & b_22=0 & b_21=1 & b_1015=0b110011 & Rd_V :fmlsl2 Rd_VPR128.4S, vRn_VPR128^".4H", Rm_VPR64.4H is b_31=0 & b_30=1 & b_2329=0b1011101 & b_22=0 & b_21=1 & b_1015=0b110011 & Rd_VPR128.4S & vRn_VPR128 & Rn_VPR128 & Rm_VPR64.4H & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rn_VPR128, 1, 8, 16); - TMPD1 = * [register]:8 tmp1; - local tmp3:4 = 0; - simd_address_at(tmp3, Rm_VPR64.4H, 1, 8, 8); - TMPD2 = * [register]:8 tmp3; - # simd infix TMPD3 = TMPD1 f* TMPD2 on lane size 2 - local tmp6:4 = 0; - local tmp7:4 = 0; - local tmp8:4 = 0; - simd_address_at(tmp6, TMPD1, 0, 2, 8); - simd_address_at(tmp7, TMPD2, 0, 2, 8); - simd_address_at(tmp8, TMPD3, 0, 2, 8); - * [register]:2 tmp8 = (* [register]:2 tmp6) f* (* [register]:2 tmp7); - simd_address_at(tmp6, TMPD1, 1, 2, 8); - simd_address_at(tmp7, TMPD2, 1, 2, 8); - simd_address_at(tmp8, TMPD3, 1, 2, 8); - * [register]:2 tmp8 = (* [register]:2 tmp6) f* (* [register]:2 tmp7); - simd_address_at(tmp6, TMPD1, 2, 2, 8); - simd_address_at(tmp7, TMPD2, 2, 2, 8); - simd_address_at(tmp8, TMPD3, 2, 2, 8); - * [register]:2 tmp8 = (* [register]:2 tmp6) f* (* [register]:2 tmp7); - simd_address_at(tmp6, TMPD1, 3, 2, 8); - simd_address_at(tmp7, TMPD2, 3, 2, 8); - simd_address_at(tmp8, TMPD3, 3, 2, 8); - * [register]:2 tmp8 = (* [register]:2 tmp6) f* (* [register]:2 tmp7); - # simd resize TMPQ4 = float2float(TMPD3) (lane size 2 to 4) - local tmp10:4 = 0; - local tmp11:4 = 0; - simd_address_at(tmp10, TMPD3, 0, 2, 8); - simd_address_at(tmp11, TMPQ4, 0, 4, 16); - * [register]:4 tmp11 = float2float(* [register]:2 tmp10); - simd_address_at(tmp10, TMPD3, 1, 2, 8); - simd_address_at(tmp11, TMPQ4, 1, 4, 16); - * [register]:4 tmp11 = float2float(* [register]:2 tmp10); - simd_address_at(tmp10, TMPD3, 2, 2, 8); - simd_address_at(tmp11, TMPQ4, 2, 4, 16); - * [register]:4 tmp11 = float2float(* [register]:2 tmp10); - simd_address_at(tmp10, TMPD3, 3, 2, 8); - simd_address_at(tmp11, TMPQ4, 3, 4, 16); - * [register]:4 tmp11 = float2float(* [register]:2 tmp10); - # simd infix Rd_VPR128.4S = Rd_VPR128.4S - TMPQ4 on lane size 4 - local tmp12:4 = 0; - local tmp13:4 = 0; - local tmp14:4 = 0; - simd_address_at(tmp12, Rd_VPR128.4S, 0, 4, 16); - simd_address_at(tmp13, TMPQ4, 0, 4, 16); - simd_address_at(tmp14, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp14 = (* [register]:4 tmp12) - (* [register]:4 tmp13); - simd_address_at(tmp12, Rd_VPR128.4S, 1, 4, 16); - simd_address_at(tmp13, TMPQ4, 1, 4, 16); - simd_address_at(tmp14, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp14 = (* [register]:4 tmp12) - (* [register]:4 tmp13); - simd_address_at(tmp12, Rd_VPR128.4S, 2, 4, 16); - simd_address_at(tmp13, TMPQ4, 2, 4, 16); - simd_address_at(tmp14, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp14 = (* [register]:4 tmp12) - (* [register]:4 tmp13); - simd_address_at(tmp12, Rd_VPR128.4S, 3, 4, 16); - simd_address_at(tmp13, TMPQ4, 3, 4, 16); - simd_address_at(tmp14, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp14 = (* [register]:4 tmp12) - (* [register]:4 tmp13); + TMPD1 = Rn_VPR128[64,64]; + # simd infix TMPD2 = TMPD1 f* Rm_VPR64.4H on lane size 2 + TMPD2[0,16] = TMPD1[0,16] f* Rm_VPR64.4H[0,16]; + TMPD2[16,16] = TMPD1[16,16] f* Rm_VPR64.4H[16,16]; + TMPD2[32,16] = TMPD1[32,16] f* Rm_VPR64.4H[32,16]; + TMPD2[48,16] = TMPD1[48,16] f* Rm_VPR64.4H[48,16]; + # simd resize TMPQ3 = float2float(TMPD2) (lane size 2 to 4) + TMPQ3[0,32] = float2float(TMPD2[0,16]); + TMPQ3[32,32] = float2float(TMPD2[16,16]); + TMPQ3[64,32] = float2float(TMPD2[32,16]); + TMPQ3[96,32] = float2float(TMPD2[48,16]); + # simd infix Rd_VPR128.4S = Rd_VPR128.4S - TMPQ3 on lane size 4 + Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] - TMPQ3[0,32]; + Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] - TMPQ3[32,32]; + Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] - TMPQ3[64,32]; + Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] - TMPQ3[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Rn_VPR128, 1:1); - local tmp2:8 = SIMD_PIECE(Rm_VPR64.4H, 1:1); - local tmp3:8 = SIMD_FLOAT_MULT(tmp1, tmp2, 2:1); - local tmp4:16 = SIMD_FLOAT2FLOAT(tmp3, 2:1); - local tmpd:16 = SIMD_INT_SUB(Rd_VPR128.4S, tmp4, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_fmlsl2(Rd_VPR128.4S, Rn_VPR128, Rm_VPR64.4H, 2:1); -@endif } # C7.2.129 FMOV (vector, immediate) page C7-1682 line 94007 MATCH x0f00f400/mask=x9ff8fc00 @@ -13505,9 +8033,7 @@ is b_31=0 & b_30=1 & b_2329=0b1011101 & b_22=0 & b_21=1 & b_1015=0b110011 & Rd_V :fmov Rd_VPR128.2D, Imm_neon_uimm8Shift is b_3131=0 & q=1 & b_29=1 & b_2428=0xf & b_1923=0x0 & Imm_neon_uimm8Shift & cmode=0xf & b_1011=1 & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.2D = NEON_fmov(Imm_neon_uimm8Shift, 8:1); -@endif } # C7.2.129 FMOV (vector, immediate) page C7-1682 line 94007 MATCH x0f00f400/mask=x9ff8fc00 @@ -13520,9 +8046,7 @@ is b_3131=0 & q=1 & b_29=1 & b_2428=0xf & b_1923=0x0 & Imm_neon_uimm8Shift & cmo :fmov Rd_VPR64.2S, Imm_neon_uimm8Shift is b_3131=0 & q=0 & b_29=0 & b_2428=0xf & b_1923=0x0 & Imm_neon_uimm8Shift & cmode=0xf & b_1011=1 & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.2S = NEON_fmov(Imm_neon_uimm8Shift:4, 4:1); -@endif } # C7.2.129 FMOV (vector, immediate) page C7-1682 line 94007 MATCH x0f00f400/mask=x9ff8fc00 @@ -13535,9 +8059,7 @@ is b_3131=0 & q=0 & b_29=0 & b_2428=0xf & b_1923=0x0 & Imm_neon_uimm8Shift & cmo :fmov Rd_VPR128.4S, Imm_neon_uimm8Shift is b_3131=0 & q=1 & b_29=0 & b_2428=0xf & b_1923=0x0 & Imm_neon_uimm8Shift & cmode=0xf & b_1011=1 & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_fmov(Imm_neon_uimm8Shift:4, 4:1); -@endif } # C7.2.129 FMOV (vector, immediate) page C7-1682 line 94007 MATCH x0f00fc00/mask=xbff8fc00 @@ -13551,26 +8073,13 @@ is b_3131=0 & q=1 & b_29=0 & b_2428=0xf & b_1923=0x0 & Imm_neon_uimm8Shift & cmo :fmov Rd_VPR64.4H, Imm_neon_uimm8Shift is b_31=0 & b_30=0 & b_1929=0b00111100000 & b_1015=0b111111 & Rd_VPR64.4H & Imm_neon_uimm8Shift & Zd { -@if defined(SEMANTIC_primitive) local tmp1:2 = int2float(Imm_neon_uimm8Shift); # simd duplicate Rd_VPR64.4H = all elements tmp1 (lane size 2) - local tmp2:4 = 0; - simd_address_at(tmp2, Rd_VPR64.4H, 0, 2, 8); - * [register]:2 tmp2 = tmp1; - simd_address_at(tmp2, Rd_VPR64.4H, 1, 2, 8); - * [register]:2 tmp2 = tmp1; - simd_address_at(tmp2, Rd_VPR64.4H, 2, 2, 8); - * [register]:2 tmp2 = tmp1; - simd_address_at(tmp2, Rd_VPR64.4H, 3, 2, 8); - * [register]:2 tmp2 = tmp1; + Rd_VPR64.4H[0,16] = tmp1; + Rd_VPR64.4H[16,16] = tmp1; + Rd_VPR64.4H[32,16] = tmp1; + Rd_VPR64.4H[48,16] = tmp1; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:2 = int2float(Imm_neon_uimm8Shift); - local tmpd:8 = SIMD_COPY(Rd_VPR64.4H, tmp1); - Zd = zext(tmpd); # assigning to Rd_VPR64.4H -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.4H = NEON_fmov(Imm_neon_uimm8Shift, 2:1); -@endif } # C7.2.129 FMOV (vector, immediate) page C7-1682 line 94007 MATCH x0f00fc00/mask=xbff8fc00 @@ -13584,34 +8093,17 @@ is b_31=0 & b_30=0 & b_1929=0b00111100000 & b_1015=0b111111 & Rd_VPR64.4H & Imm_ :fmov Rd_VPR128.8H, Imm_neon_uimm8Shift is b_31=0 & b_30=1 & b_1929=0b00111100000 & b_1015=0b111111 & Rd_VPR128.8H & Imm_neon_uimm8Shift & Zd { -@if defined(SEMANTIC_primitive) local tmp1:2 = int2float(Imm_neon_uimm8Shift); # simd duplicate Rd_VPR128.8H = all elements tmp1 (lane size 2) - local tmp2:4 = 0; - simd_address_at(tmp2, Rd_VPR128.8H, 0, 2, 16); - * [register]:2 tmp2 = tmp1; - simd_address_at(tmp2, Rd_VPR128.8H, 1, 2, 16); - * [register]:2 tmp2 = tmp1; - simd_address_at(tmp2, Rd_VPR128.8H, 2, 2, 16); - * [register]:2 tmp2 = tmp1; - simd_address_at(tmp2, Rd_VPR128.8H, 3, 2, 16); - * [register]:2 tmp2 = tmp1; - simd_address_at(tmp2, Rd_VPR128.8H, 4, 2, 16); - * [register]:2 tmp2 = tmp1; - simd_address_at(tmp2, Rd_VPR128.8H, 5, 2, 16); - * [register]:2 tmp2 = tmp1; - simd_address_at(tmp2, Rd_VPR128.8H, 6, 2, 16); - * [register]:2 tmp2 = tmp1; - simd_address_at(tmp2, Rd_VPR128.8H, 7, 2, 16); - * [register]:2 tmp2 = tmp1; + Rd_VPR128.8H[0,16] = tmp1; + Rd_VPR128.8H[16,16] = tmp1; + Rd_VPR128.8H[32,16] = tmp1; + Rd_VPR128.8H[48,16] = tmp1; + Rd_VPR128.8H[64,16] = tmp1; + Rd_VPR128.8H[80,16] = tmp1; + Rd_VPR128.8H[96,16] = tmp1; + Rd_VPR128.8H[112,16] = tmp1; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:2 = int2float(Imm_neon_uimm8Shift); - local tmpd:16 = SIMD_COPY(Rd_VPR128.8H, tmp1); - Zd = zext(tmpd); # assigning to Rd_VPR128.8H -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.8H = NEON_fmov(Imm_neon_uimm8Shift, 2:1); -@endif } # C7.2.130 FMOV (register) page C7-1684 line 94119 MATCH x1e204000/mask=xff3ffc00 @@ -13624,15 +8116,8 @@ is b_31=0 & b_30=1 & b_1929=0b00111100000 & b_1015=0b111111 & Rd_VPR128.8H & Imm :fmov Rd_FPR16, Rn_FPR16 is b_2431=0b00011110 & b_2223=0b11 & b_1021=0b100000010000 & Rd_FPR16 & Rn_FPR16 & Rd_FPR64 & Zd { -@if defined(SEMANTIC_primitive) Rd_FPR16 = Rn_FPR16; zext_zh(Zd); # zero upper 30 bytes of Zd -@elif defined(SEMANTIC_pcode) - Rd_FPR16 = Rn_FPR16; - zext_zh(Zd); # zero upper 30 bytes of Zd -@elif defined(SEMANTIC_pseudo) - Rd_FPR16 = NEON_fmov(Rn_FPR16); -@endif } # C7.2.130 FMOV (register) page C7-1684 line 94119 MATCH x1e204000/mask=xff3ffc00 @@ -13645,15 +8130,8 @@ is b_2431=0b00011110 & b_2223=0b11 & b_1021=0b100000010000 & Rd_FPR16 & Rn_FPR16 :fmov Rd_FPR32, Rn_FPR32 is b_2431=0b00011110 & b_2223=0b00 & b_1021=0b100000010000 & Rd_FPR32 & Rn_FPR32 & Zd { -@if defined(SEMANTIC_primitive) Rd_FPR32 = Rn_FPR32; zext_zs(Zd); # zero upper 28 bytes of Zd -@elif defined(SEMANTIC_pcode) - Rd_FPR32 = Rn_FPR32; - zext_zs(Zd); # zero upper 28 bytes of Zd -@elif defined(SEMANTIC_pseudo) - Rd_FPR32 = NEON_fmov(Rn_FPR32); -@endif } # C7.2.130 FMOV (register) page C7-1684 line 94119 MATCH x1e204000/mask=xff3ffc00 @@ -13666,15 +8144,8 @@ is b_2431=0b00011110 & b_2223=0b00 & b_1021=0b100000010000 & Rd_FPR32 & Rn_FPR32 :fmov Rd_FPR64, Rn_FPR64 is b_2431=0b00011110 & b_2223=0b01 & b_1021=0b100000010000 & Rd_FPR64 & Rn_FPR64 & Zd { -@if defined(SEMANTIC_primitive) Rd_FPR64 = Rn_FPR64; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - Rd_FPR64 = Rn_FPR64; - zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pseudo) - Rd_FPR64 = NEON_fmov(Rn_FPR64); -@endif } # C7.2.131 FMOV (general) page C7-1686 line 94209 MATCH x1e260000/mask=x7f36fc00 @@ -13687,15 +8158,8 @@ is b_2431=0b00011110 & b_2223=0b01 & b_1021=0b100000010000 & Rd_FPR64 & Rn_FPR64 :fmov Rd_GPR32, Rn_FPR64 is b_31=0 & b_2430=0b0011110 & b_2223=0b01 & b_21=1 & b_1920=0b00 & b_1618=0b110 & b_1015=0b000000 & Rd_GPR32 & Rn_FPR64 & Rd_GPR64 { -@if defined(SEMANTIC_primitive) Rd_GPR32 = float2float(Rn_FPR64); zext_rs(Rd_GPR64); # zero upper 28 bytes of Rd_GPR64 -@elif defined(SEMANTIC_pcode) - Rd_GPR32 = float2float(Rn_FPR64); - zext_rs(Rd_GPR64); # zero upper 28 bytes of Rd_GPR64 -@elif defined(SEMANTIC_pseudo) - Rd_GPR32 = NEON_fmov(Rn_FPR64); -@endif } # C7.2.131 FMOV (general) page C7-1686 line 94209 MATCH x1e260000/mask=x7f36fc00 @@ -13708,13 +8172,7 @@ is b_31=0 & b_2430=0b0011110 & b_2223=0b01 & b_21=1 & b_1920=0b00 & b_1618=0b110 :fmov Rd_GPR64, Rn_FPR32 is b_31=1 & b_2430=0b0011110 & b_2223=0b00 & b_21=1 & b_1920=0b00 & b_1618=0b110 & b_1015=0b000000 & Rd_GPR64 & Rn_FPR32 { -@if defined(SEMANTIC_primitive) Rd_GPR64 = float2float(Rn_FPR32); -@elif defined(SEMANTIC_pcode) - Rd_GPR64 = float2float(Rn_FPR32); -@elif defined(SEMANTIC_pseudo) - Rd_GPR64 = NEON_fmov(Rn_FPR32); -@endif } # C7.2.131 FMOV (general) page C7-1686 line 94209 MATCH x1e260000/mask=x7f36fc00 @@ -13727,15 +8185,8 @@ is b_31=1 & b_2430=0b0011110 & b_2223=0b00 & b_21=1 & b_1920=0b00 & b_1618=0b110 :fmov Rd_FPR64, Rn_GPR32 is b_31=0 & b_2430=0b0011110 & b_2223=0b01 & b_21=1 & b_1920=0b00 & b_1618=0b111 & b_1015=0b000000 & Rd_FPR64 & Rn_GPR32 & Zd { -@if defined(SEMANTIC_primitive) Rd_FPR64 = float2float(Rn_GPR32); zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - Rd_FPR64 = float2float(Rn_GPR32); - zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pseudo) - Rd_FPR64 = NEON_fmov(Rn_GPR32); -@endif } # C7.2.131 FMOV (general) page C7-1686 line 94209 MATCH x1e260000/mask=x7f36fc00 @@ -13748,15 +8199,8 @@ is b_31=0 & b_2430=0b0011110 & b_2223=0b01 & b_21=1 & b_1920=0b00 & b_1618=0b111 :fmov Rd_FPR32, Rn_GPR64 is b_31=1 & b_2430=0b0011110 & b_2223=0b00 & b_21=1 & b_1920=0b00 & b_1618=0b111 & b_1015=0b000000 & Rd_FPR32 & Rn_GPR64 & Zd { -@if defined(SEMANTIC_primitive) Rd_FPR32 = float2float(Rn_GPR64); zext_zs(Zd); # zero upper 28 bytes of Zd -@elif defined(SEMANTIC_pcode) - Rd_FPR32 = float2float(Rn_GPR64); - zext_zs(Zd); # zero upper 28 bytes of Zd -@elif defined(SEMANTIC_pseudo) - Rd_FPR32 = NEON_fmov(Rn_GPR64); -@endif } # C7.2.131 FMOV (general) page C7-1686 line 94209 MATCH x1e260000/mask=x7f36fc00 @@ -13769,15 +8213,8 @@ is b_31=1 & b_2430=0b0011110 & b_2223=0b00 & b_21=1 & b_1920=0b00 & b_1618=0b111 :fmov Rd_GPR32, Rn_FPR16 is b_31=0 & b_2430=0b0011110 & b_2223=0b11 & b_21=1 & b_1920=0b00 & b_1618=0b110 & b_1015=0b000000 & Rd_GPR32 & Rn_FPR16 & Rd_GPR64 { -@if defined(SEMANTIC_primitive) Rd_GPR32 = float2float(Rn_FPR16); zext_rs(Rd_GPR64); # zero upper 28 bytes of Rd_GPR64 -@elif defined(SEMANTIC_pcode) - Rd_GPR32 = float2float(Rn_FPR16); - zext_rs(Rd_GPR64); # zero upper 28 bytes of Rd_GPR64 -@elif defined(SEMANTIC_pseudo) - Rd_GPR32 = NEON_fmov(Rn_FPR16); -@endif } # C7.2.131 FMOV (general) page C7-1686 line 94209 MATCH x1e260000/mask=x7f36fc00 @@ -13790,13 +8227,7 @@ is b_31=0 & b_2430=0b0011110 & b_2223=0b11 & b_21=1 & b_1920=0b00 & b_1618=0b110 :fmov Rd_GPR64, Rn_FPR16 is b_31=1 & b_2430=0b0011110 & b_2223=0b11 & b_21=1 & b_1920=0b00 & b_1618=0b110 & b_1015=0b000000 & Rd_GPR64 & Rn_FPR16 { -@if defined(SEMANTIC_primitive) Rd_GPR64 = float2float(Rn_FPR16); -@elif defined(SEMANTIC_pcode) - Rd_GPR64 = float2float(Rn_FPR16); -@elif defined(SEMANTIC_pseudo) - Rd_GPR64 = NEON_fmov(Rn_FPR16); -@endif } # C7.2.131 FMOV (general) page C7-1686 line 94209 MATCH x1e260000/mask=x7f36fc00 @@ -13809,15 +8240,8 @@ is b_31=1 & b_2430=0b0011110 & b_2223=0b11 & b_21=1 & b_1920=0b00 & b_1618=0b110 :fmov Rd_FPR16, Rn_GPR32 is b_31=0 & b_2430=0b0011110 & b_2223=0b11 & b_21=1 & b_1920=0b00 & b_1618=0b111 & b_1015=0b000000 & Rd_FPR16 & Rn_GPR32 & Zd { -@if defined(SEMANTIC_primitive) Rd_FPR16 = float2float(Rn_GPR32); zext_zh(Zd); # zero upper 30 bytes of Zd -@elif defined(SEMANTIC_pcode) - Rd_FPR16 = float2float(Rn_GPR32); - zext_zh(Zd); # zero upper 30 bytes of Zd -@elif defined(SEMANTIC_pseudo) - Rd_FPR16 = NEON_fmov(Rn_GPR32); -@endif } # C7.2.131 FMOV (general) page C7-1686 line 94209 MATCH x1e260000/mask=x7f36fc00 @@ -13830,15 +8254,8 @@ is b_31=0 & b_2430=0b0011110 & b_2223=0b11 & b_21=1 & b_1920=0b00 & b_1618=0b111 :fmov Rd_FPR32, Rn_GPR32 is b_31=0 & b_2430=0b0011110 & b_2223=0b00 & b_21=1 & b_1920=0b00 & b_1618=0b111 & b_1015=0b000000 & Rd_FPR32 & Rn_GPR32 & Zd { -@if defined(SEMANTIC_primitive) Rd_FPR32 = Rn_GPR32; zext_zs(Zd); # zero upper 28 bytes of Zd -@elif defined(SEMANTIC_pcode) - Rd_FPR32 = Rn_GPR32; - zext_zs(Zd); # zero upper 28 bytes of Zd -@elif defined(SEMANTIC_pseudo) - Rd_FPR32 = NEON_fmov(Rn_GPR32); -@endif } # C7.2.131 FMOV (general) page C7-1686 line 94209 MATCH x1e260000/mask=x7f36fc00 @@ -13851,15 +8268,8 @@ is b_31=0 & b_2430=0b0011110 & b_2223=0b00 & b_21=1 & b_1920=0b00 & b_1618=0b111 :fmov Rd_GPR32, Rn_FPR32 is b_31=0 & b_2430=0b0011110 & b_2223=0b00 & b_21=1 & b_1920=0b00 & b_1618=0b110 & b_1015=0b000000 & Rd_GPR32 & Rn_FPR32 & Rd_GPR64 { -@if defined(SEMANTIC_primitive) Rd_GPR32 = Rn_FPR32; zext_rs(Rd_GPR64); # zero upper 28 bytes of Rd_GPR64 -@elif defined(SEMANTIC_pcode) - Rd_GPR32 = Rn_FPR32; - zext_rs(Rd_GPR64); # zero upper 28 bytes of Rd_GPR64 -@elif defined(SEMANTIC_pseudo) - Rd_GPR32 = NEON_fmov(Rn_FPR32); -@endif } # C7.2.131 FMOV (general) page C7-1686 line 94209 MATCH x1e260000/mask=x7f36fc00 @@ -13872,15 +8282,8 @@ is b_31=0 & b_2430=0b0011110 & b_2223=0b00 & b_21=1 & b_1920=0b00 & b_1618=0b110 :fmov Rd_FPR16, Rn_GPR64 is b_31=1 & b_2430=0b0011110 & b_2223=0b11 & b_21=1 & b_1920=0b00 & b_1618=0b111 & b_1015=0b000000 & Rd_FPR16 & Rn_GPR64 & Zd { -@if defined(SEMANTIC_primitive) Rd_FPR16 = float2float(Rn_GPR64); zext_zh(Zd); # zero upper 30 bytes of Zd -@elif defined(SEMANTIC_pcode) - Rd_FPR16 = float2float(Rn_GPR64); - zext_zh(Zd); # zero upper 30 bytes of Zd -@elif defined(SEMANTIC_pseudo) - Rd_FPR16 = NEON_fmov(Rn_GPR64); -@endif } # C7.2.131 FMOV (general) page C7-1686 line 94209 MATCH x1e260000/mask=x7f36fc00 @@ -13893,15 +8296,8 @@ is b_31=1 & b_2430=0b0011110 & b_2223=0b11 & b_21=1 & b_1920=0b00 & b_1618=0b111 :fmov Rd_FPR64, Rn_GPR64 is b_31=1 & b_2430=0b0011110 & b_2223=0b01 & b_21=1 & b_1920=0b00 & b_1618=0b111 & b_1015=0b000000 & Rd_FPR64 & Rn_GPR64 & Zd { -@if defined(SEMANTIC_primitive) Rd_FPR64 = Rn_GPR64; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - Rd_FPR64 = Rn_GPR64; - zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pseudo) - Rd_FPR64 = NEON_fmov(Rn_GPR64); -@endif } # C7.2.131 FMOV (general) page C7-1686 line 94209 MATCH x1e260000/mask=x7f36fc00 @@ -13914,18 +8310,9 @@ is b_31=1 & b_2430=0b0011110 & b_2223=0b01 & b_21=1 & b_1920=0b00 & b_1618=0b111 :fmov vRd_VPR128^".D[1]", Rn_GPR64 is b_31=1 & b_2430=0b0011110 & b_2223=0b10 & b_21=1 & b_1920=0b01 & b_1618=0b111 & b_1015=0b000000 & vRd_VPR128 & Rd_VPR128 & Rn_GPR64 & Zd { -@if defined(SEMANTIC_primitive) # simd copy Rd_VPR128 element 1:1 = Rn_GPR64 (lane size 8) - local tmp1:4 = 0; - simd_address_at(tmp1, Rd_VPR128, 1, 8, 16); - * [register]:8 tmp1 = Rn_GPR64; + Rd_VPR128[64,64] = Rn_GPR64; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:16 = SIMD_COPY(Rd_VPR128, Rn_GPR64, 1:1, 8:1); - Zd = zext(tmpd); # assigning to Rd_VPR128 -@elif defined(SEMANTIC_pseudo) - Rd_VPR128 = NEON_fmov(Rd_VPR128, Rn_GPR64); -@endif } # C7.2.131 FMOV (general) page C7-1686 line 94209 MATCH x1e260000/mask=x7f36fc00 @@ -13938,13 +8325,7 @@ is b_31=1 & b_2430=0b0011110 & b_2223=0b10 & b_21=1 & b_1920=0b01 & b_1618=0b111 :fmov Rd_GPR64, Rn_FPR64 is b_31=1 & b_2430=0b0011110 & b_2223=0b01 & b_21=1 & b_1920=0b00 & b_1618=0b110 & b_1015=0b000000 & Rd_GPR64 & Rn_FPR64 { -@if defined(SEMANTIC_primitive) Rd_GPR64 = Rn_FPR64; -@elif defined(SEMANTIC_pcode) - Rd_GPR64 = Rn_FPR64; -@elif defined(SEMANTIC_pseudo) - Rd_GPR64 = NEON_fmov(Rn_FPR64); -@endif } # C7.2.131 FMOV (general) page C7-1686 line 94209 MATCH x1e260000/mask=x7f36fc00 @@ -13957,15 +8338,7 @@ is b_31=1 & b_2430=0b0011110 & b_2223=0b01 & b_21=1 & b_1920=0b00 & b_1618=0b110 :fmov Rd_GPR64, vRn_VPR128^".D[1]" is b_31=1 & b_2430=0b0011110 & b_2223=0b10 & b_21=1 & b_1920=0b01 & b_1618=0b110 & b_1015=0b000000 & Rd_GPR64 & vRn_VPR128 & Rn_VPR128 { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rn_VPR128, 1, 8, 16); - Rd_GPR64 = * [register]:8 tmp1; -@elif defined(SEMANTIC_pcode) - Rd_GPR64 = SIMD_PIECE(Rn_VPR128, 1:1); -@elif defined(SEMANTIC_pseudo) - Rd_GPR64 = NEON_fmov(Rn_VPR128, 8:1); -@endif + Rd_GPR64 = Rn_VPR128[64,64]; } # C7.2.132 FMOV (scalar, immediate) page C7-1689 line 94427 MATCH x1e201000/mask=xff201fe0 @@ -13977,15 +8350,8 @@ is b_31=1 & b_2430=0b0011110 & b_2223=0b10 & b_21=1 & b_1920=0b01 & b_1618=0b110 :fmov Rd_FPR64, Imm8_fmov64_operand is ImmS_ImmR_TestSet=1 & m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=1 & Imm8_fmov64_operand & b_1012=4 & imm5=0x0 & Rd_FPR64 & Zd { -@if defined(SEMANTIC_primitive) Rd_FPR64 = Imm8_fmov64_operand:8; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - Rd_FPR64 = Imm8_fmov64_operand:8; - zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pseudo) - Rd_FPR64 = NEON_fmov(Imm8_fmov64_operand); -@endif } # C7.2.132 FMOV (scalar, immediate) page C7-1689 line 94427 MATCH x1e201000/mask=xff201fe0 @@ -13997,15 +8363,8 @@ is ImmS_ImmR_TestSet=1 & m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=1 :fmov Rd_FPR32, Imm8_fmov32_operand is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=1 & Imm8_fmov32_operand & b_1012=4 & imm5=0x0 & Rd_FPR32 & Zd { -@if defined(SEMANTIC_primitive) Rd_FPR32 = Imm8_fmov32_operand:4; zext_zs(Zd); # zero upper 28 bytes of Zd -@elif defined(SEMANTIC_pcode) - Rd_FPR32 = Imm8_fmov32_operand:4; - zext_zs(Zd); # zero upper 28 bytes of Zd -@elif defined(SEMANTIC_pseudo) - Rd_FPR32 = NEON_fmov(Imm8_fmov32_operand); -@endif } # C7.2.132 FMOV (scalar, immediate) page C7-1689 line 94427 MATCH x1e201000/mask=xff201fe0 @@ -14017,15 +8376,8 @@ is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=1 & Imm8_fmov32_operand :fmov Rd_FPR16, Imm8_fmov16_operand is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=1 & Imm8_fmov16_operand & b_1012=4 & imm5=0x0 & Rd_FPR16 & Zd { -@if defined(SEMANTIC_primitive) Rd_FPR16 = Imm8_fmov16_operand:2; zext_zh(Zd); # zero upper 30 bytes of Zd -@elif defined(SEMANTIC_pcode) - Rd_FPR16 = Imm8_fmov16_operand:2; - zext_zh(Zd); # zero upper 30 bytes of Zd -@elif defined(SEMANTIC_pseudo) - Rd_FPR16 = NEON_fmov(Imm8_fmov16_operand); -@endif } # C7.2.133 FMSUB page C7-1691 line 94515 MATCH x1f008000/mask=xff208000 @@ -14036,9 +8388,7 @@ is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=1 & Imm8_fmov16_operand :fmsub Rd_FPR64, Rn_FPR64, Rm_FPR64, Ra_FPR64 is m=0 & b_3030=0 & s=0 & b_2428=0x1f & ftype=1 & b_21=0 & Rm_FPR64 & b_15=1 & Ra_FPR64 & Rn_FPR64 & Rd_FPR64 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR64 = NEON_fmsub(Rn_FPR64, Rm_FPR64, Ra_FPR64); -@endif } # C7.2.133 FMSUB page C7-1691 line 94515 MATCH x1f008000/mask=xff208000 @@ -14049,9 +8399,7 @@ is m=0 & b_3030=0 & s=0 & b_2428=0x1f & ftype=1 & b_21=0 & Rm_FPR64 & b_15=1 & R :fmsub Rd_FPR32, Rn_FPR32, Rm_FPR32, Ra_FPR32 is m=0 & b_3030=0 & s=0 & b_2428=0x1f & ftype=0 & b_21=0 & Rm_FPR32 & b_15=1 & Ra_FPR32 & Rn_FPR32 & Rd_FPR32 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR32 = NEON_fmsub(Rn_FPR32, Rm_FPR32, Ra_FPR32); -@endif } # C7.2.133 FMSUB page C7-1691 line 94515 MATCH x1f008000/mask=xff208000 @@ -14062,9 +8410,7 @@ is m=0 & b_3030=0 & s=0 & b_2428=0x1f & ftype=0 & b_21=0 & Rm_FPR32 & b_15=1 & R :fmsub Rd_FPR16, Rn_FPR16, Rm_FPR16, Ra_FPR16 is m=0 & b_3030=0 & s=0 & b_2428=0x1f & ftype=3 & b_21=0 & Rm_FPR16 & b_15=1 & Ra_FPR16 & Rn_FPR16 & Rd_FPR16 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR16 = NEON_fmsub(Rn_FPR16, Rm_FPR16, Ra_FPR16); -@endif } # C7.2.134 FMUL (by element) page C7-1693 line 94639 MATCH x5f009000/mask=xffc0f400 @@ -14077,21 +8423,10 @@ is m=0 & b_3030=0 & s=0 & b_2428=0x1f & ftype=3 & b_21=0 & Rm_FPR16 & b_15=1 & R :fmul Rd_FPR16, Rn_FPR16, Re_VPR128Lo.H.vIndexHLM is b_2231=0b0101111100 & b_1215=0b1001 & b_10=0 & Rd_FPR16 & Rn_FPR16 & Re_VPR128Lo.H.vIndexHLM & Re_VPR128Lo.H & vIndexHLM & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 - simd_address_at(tmp1, Re_VPR128Lo.H, vIndexHLM:4, 2, 16); - local tmp2:2 = * [register]:2 tmp1; - Rd_FPR16 = Rn_FPR16 f* tmp2; + local tmp1:2 = Re_VPR128Lo.H.vIndexHLM; + Rd_FPR16 = Rn_FPR16 f* tmp1; zext_zh(Zd); # zero upper 30 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:2 = SIMD_PIECE(Re_VPR128Lo.H, vIndexHLM:1); - local tmpd:2 = Rn_FPR16 f* tmp1; - Zd = zext(tmpd); # assigning to Rd_FPR16 -@elif defined(SEMANTIC_pseudo) - local tmp1:2 = SIMD_PIECE(Re_VPR128Lo.H, vIndexHLM:1); - Rd_FPR16 = NEON_fmul(Rn_FPR16, tmp1, 2:1); -@endif } # C7.2.134 FMUL (by element) page C7-1693 line 94639 MATCH x5f809000/mask=xff80f400 @@ -14104,21 +8439,10 @@ is b_2231=0b0101111100 & b_1215=0b1001 & b_10=0 & Rd_FPR16 & Rn_FPR16 & Re_VPR12 :fmul Rd_FPR32, Rn_FPR32, Re_VPR128.S.vIndex is b_2331=0b010111111 & b_22=0 & b_1215=0b1001 & b_10=0 & Re_VPR128.S & vIndex & Rd_FPR32 & Rn_FPR32 & Re_VPR128.S.vIndex & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; # simd element Re_VPR128.S[vIndex] lane size 4 - simd_address_at(tmp1, Re_VPR128.S, vIndex:4, 4, 16); - local tmp2:4 = * [register]:4 tmp1; - Rd_FPR32 = Rn_FPR32 f* tmp2; + local tmp1:4 = Re_VPR128.S.vIndex; + Rd_FPR32 = Rn_FPR32 f* tmp1; zext_zs(Zd); # zero upper 28 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:4 = SIMD_PIECE(Re_VPR128.S, vIndex:1); - local tmpd:4 = Rn_FPR32 f* tmp1; - Zd = zext(tmpd); # assigning to Rd_FPR32 -@elif defined(SEMANTIC_pseudo) - local tmp1:4 = SIMD_PIECE(Re_VPR128.S, vIndex:1); - Rd_FPR32 = NEON_fmul(Rn_FPR32, tmp1, 4:1); -@endif } # C7.2.134 FMUL (by element) page C7-1693 line 94639 MATCH x5f809000/mask=xff80f400 @@ -14131,21 +8455,10 @@ is b_2331=0b010111111 & b_22=0 & b_1215=0b1001 & b_10=0 & Re_VPR128.S & vIndex & :fmul Rd_FPR64, Rn_FPR64, Re_VPR128.D.vIndex is b_2331=0b010111111 & b_22=1 & b_21=0 & b_1215=0b1001 & b_10=0 & Re_VPR128.D & vIndex & Rd_FPR64 & Rn_FPR64 & Re_VPR128.D.vIndex & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; # simd element Re_VPR128.D[vIndex] lane size 8 - simd_address_at(tmp1, Re_VPR128.D, vIndex:4, 8, 16); - local tmp2:8 = * [register]:8 tmp1; - Rd_FPR64 = Rn_FPR64 f* tmp2; + local tmp1:8 = Re_VPR128.D.vIndex; + Rd_FPR64 = Rn_FPR64 f* tmp1; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Re_VPR128.D, vIndex:1); - local tmpd:8 = Rn_FPR64 f* tmp1; - Zd = zext(tmpd); # assigning to Rd_FPR64 -@elif defined(SEMANTIC_pseudo) - local tmp1:8 = SIMD_PIECE(Re_VPR128.D, vIndex:1); - Rd_FPR64 = NEON_fmul(Rn_FPR64, tmp1, 8:1); -@endif } # C7.2.134 FMUL (by element) page C7-1693 line 94639 MATCH x0f009000/mask=xbfc0f400 @@ -14158,35 +8471,14 @@ is b_2331=0b010111111 & b_22=1 & b_21=0 & b_1215=0b1001 & b_10=0 & Re_VPR128.D & :fmul Rd_VPR64.4H, Rn_VPR64.4H, Re_VPR128Lo.H.vIndexHLM is b_31=0 &b_30=0 & b_2229=0b00111100 & b_1215=0b1001 & b_10=0 & Re_VPR128Lo.H & vIndexHLM & Rd_VPR64.4H & Rn_VPR64.4H & Re_VPR128Lo.H.vIndexHLM & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 - simd_address_at(tmp1, Re_VPR128Lo.H, vIndexHLM:4, 2, 16); - local tmp2:2 = * [register]:2 tmp1; - # simd infix Rd_VPR64.4H = Rn_VPR64.4H f* tmp2 on lane size 2 - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp3, Rn_VPR64.4H, 0, 2, 8); - simd_address_at(tmp4, Rd_VPR64.4H, 0, 2, 8); - * [register]:2 tmp4 = (* [register]:2 tmp3) f* tmp2; - simd_address_at(tmp3, Rn_VPR64.4H, 1, 2, 8); - simd_address_at(tmp4, Rd_VPR64.4H, 1, 2, 8); - * [register]:2 tmp4 = (* [register]:2 tmp3) f* tmp2; - simd_address_at(tmp3, Rn_VPR64.4H, 2, 2, 8); - simd_address_at(tmp4, Rd_VPR64.4H, 2, 2, 8); - * [register]:2 tmp4 = (* [register]:2 tmp3) f* tmp2; - simd_address_at(tmp3, Rn_VPR64.4H, 3, 2, 8); - simd_address_at(tmp4, Rd_VPR64.4H, 3, 2, 8); - * [register]:2 tmp4 = (* [register]:2 tmp3) f* tmp2; + local tmp1:2 = Re_VPR128Lo.H.vIndexHLM; + # simd infix Rd_VPR64.4H = Rn_VPR64.4H f* tmp1 on lane size 2 + Rd_VPR64.4H[0,16] = Rn_VPR64.4H[0,16] f* tmp1; + Rd_VPR64.4H[16,16] = Rn_VPR64.4H[16,16] f* tmp1; + Rd_VPR64.4H[32,16] = Rn_VPR64.4H[32,16] f* tmp1; + Rd_VPR64.4H[48,16] = Rn_VPR64.4H[48,16] f* tmp1; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:2 = SIMD_PIECE(Re_VPR128Lo.H, vIndexHLM:1); - local tmpd:8 = SIMD_FLOAT_MULT(Rn_VPR64.4H, tmp1); - Zd = zext(tmpd); # assigning to Rd_VPR64.4H -@elif defined(SEMANTIC_pseudo) - local tmp1:2 = SIMD_PIECE(Re_VPR128Lo.H, vIndexHLM:1); - Rd_VPR64.4H = NEON_fmul(Rn_VPR64.4H, tmp1, 2:1); -@endif } # C7.2.134 FMUL (by element) page C7-1693 line 94639 MATCH x0f009000/mask=xbfc0f400 @@ -14199,47 +8491,18 @@ is b_31=0 &b_30=0 & b_2229=0b00111100 & b_1215=0b1001 & b_10=0 & Re_VPR128Lo.H & :fmul Rd_VPR128.8H, Rn_VPR128.8H, Re_VPR128Lo.H.vIndexHLM is b_31=0 &b_30=1 & b_2229=0b00111100 & b_1215=0b1001 & b_10=0 & Re_VPR128Lo.H & vIndexHLM & Rd_VPR128.8H & Rn_VPR128.8H & Re_VPR128Lo.H.vIndexHLM & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 - simd_address_at(tmp1, Re_VPR128Lo.H, vIndexHLM:4, 2, 16); - local tmp2:2 = * [register]:2 tmp1; - # simd infix Rd_VPR128.8H = Rn_VPR128.8H f* tmp2 on lane size 2 - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp3, Rn_VPR128.8H, 0, 2, 16); - simd_address_at(tmp4, Rd_VPR128.8H, 0, 2, 16); - * [register]:2 tmp4 = (* [register]:2 tmp3) f* tmp2; - simd_address_at(tmp3, Rn_VPR128.8H, 1, 2, 16); - simd_address_at(tmp4, Rd_VPR128.8H, 1, 2, 16); - * [register]:2 tmp4 = (* [register]:2 tmp3) f* tmp2; - simd_address_at(tmp3, Rn_VPR128.8H, 2, 2, 16); - simd_address_at(tmp4, Rd_VPR128.8H, 2, 2, 16); - * [register]:2 tmp4 = (* [register]:2 tmp3) f* tmp2; - simd_address_at(tmp3, Rn_VPR128.8H, 3, 2, 16); - simd_address_at(tmp4, Rd_VPR128.8H, 3, 2, 16); - * [register]:2 tmp4 = (* [register]:2 tmp3) f* tmp2; - simd_address_at(tmp3, Rn_VPR128.8H, 4, 2, 16); - simd_address_at(tmp4, Rd_VPR128.8H, 4, 2, 16); - * [register]:2 tmp4 = (* [register]:2 tmp3) f* tmp2; - simd_address_at(tmp3, Rn_VPR128.8H, 5, 2, 16); - simd_address_at(tmp4, Rd_VPR128.8H, 5, 2, 16); - * [register]:2 tmp4 = (* [register]:2 tmp3) f* tmp2; - simd_address_at(tmp3, Rn_VPR128.8H, 6, 2, 16); - simd_address_at(tmp4, Rd_VPR128.8H, 6, 2, 16); - * [register]:2 tmp4 = (* [register]:2 tmp3) f* tmp2; - simd_address_at(tmp3, Rn_VPR128.8H, 7, 2, 16); - simd_address_at(tmp4, Rd_VPR128.8H, 7, 2, 16); - * [register]:2 tmp4 = (* [register]:2 tmp3) f* tmp2; + local tmp1:2 = Re_VPR128Lo.H.vIndexHLM; + # simd infix Rd_VPR128.8H = Rn_VPR128.8H f* tmp1 on lane size 2 + Rd_VPR128.8H[0,16] = Rn_VPR128.8H[0,16] f* tmp1; + Rd_VPR128.8H[16,16] = Rn_VPR128.8H[16,16] f* tmp1; + Rd_VPR128.8H[32,16] = Rn_VPR128.8H[32,16] f* tmp1; + Rd_VPR128.8H[48,16] = Rn_VPR128.8H[48,16] f* tmp1; + Rd_VPR128.8H[64,16] = Rn_VPR128.8H[64,16] f* tmp1; + Rd_VPR128.8H[80,16] = Rn_VPR128.8H[80,16] f* tmp1; + Rd_VPR128.8H[96,16] = Rn_VPR128.8H[96,16] f* tmp1; + Rd_VPR128.8H[112,16] = Rn_VPR128.8H[112,16] f* tmp1; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:2 = SIMD_PIECE(Re_VPR128Lo.H, vIndexHLM:1); - local tmpd:16 = SIMD_FLOAT_MULT(Rn_VPR128.8H, tmp1); - Zd = zext(tmpd); # assigning to Rd_VPR128.8H -@elif defined(SEMANTIC_pseudo) - local tmp1:2 = SIMD_PIECE(Re_VPR128Lo.H, vIndexHLM:1); - Rd_VPR128.8H = NEON_fmul(Rn_VPR128.8H, tmp1, 2:1); -@endif } # C7.2.134 FMUL (by element) page C7-1693 line 94639 MATCH x0f809000/mask=xbf80f400 @@ -14252,29 +8515,12 @@ is b_31=0 &b_30=1 & b_2229=0b00111100 & b_1215=0b1001 & b_10=0 & Re_VPR128Lo.H & :fmul Rd_VPR128.2D, Rn_VPR128.2D, Re_VPR128.D.vIndex is b_31=0 & b_30=1 & b_2329=0b0011111 & b_22=1 & b_21=0 & b_1215=0b1001 & b_10=0 & Re_VPR128.D.vIndex & vIndex & Re_VPR128.D & Rn_VPR128.2D & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; # simd element Re_VPR128.D[vIndex] lane size 8 - simd_address_at(tmp1, Re_VPR128.D, vIndex:4, 8, 16); - local tmp2:8 = * [register]:8 tmp1; - # simd infix Rd_VPR128.2D = Rn_VPR128.2D f* tmp2 on lane size 8 - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp3, Rn_VPR128.2D, 0, 8, 16); - simd_address_at(tmp4, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp4 = (* [register]:8 tmp3) f* tmp2; - simd_address_at(tmp3, Rn_VPR128.2D, 1, 8, 16); - simd_address_at(tmp4, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp4 = (* [register]:8 tmp3) f* tmp2; + local tmp1:8 = Re_VPR128.D.vIndex; + # simd infix Rd_VPR128.2D = Rn_VPR128.2D f* tmp1 on lane size 8 + Rd_VPR128.2D[0,64] = Rn_VPR128.2D[0,64] f* tmp1; + Rd_VPR128.2D[64,64] = Rn_VPR128.2D[64,64] f* tmp1; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Re_VPR128.D, vIndex:1); - local tmpd:16 = SIMD_FLOAT_MULT(Rn_VPR128.2D, tmp1); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - local tmp1:8 = SIMD_PIECE(Re_VPR128.D, vIndex:1); - Rd_VPR128.2D = NEON_fmul(Rn_VPR128.2D, tmp1, 8:1); -@endif } # C7.2.134 FMUL (by element) page C7-1693 line 94639 MATCH x0f809000/mask=xbf80f400 @@ -14287,29 +8533,12 @@ is b_31=0 & b_30=1 & b_2329=0b0011111 & b_22=1 & b_21=0 & b_1215=0b1001 & b_10=0 :fmul Rd_VPR64.2S, Rn_VPR64.2S, Re_VPR128.S.vIndex is b_31=0 & b_30=0 & b_2329=0b0011111 & b_22=0 & b_1215=0b1001 & b_10=0 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; # simd element Re_VPR128.S[vIndex] lane size 4 - simd_address_at(tmp1, Re_VPR128.S, vIndex:4, 4, 16); - local tmp2:4 = * [register]:4 tmp1; - # simd infix Rd_VPR64.2S = Rn_VPR64.2S f* tmp2 on lane size 4 - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp3, Rn_VPR64.2S, 0, 4, 8); - simd_address_at(tmp4, Rd_VPR64.2S, 0, 4, 8); - * [register]:4 tmp4 = (* [register]:4 tmp3) f* tmp2; - simd_address_at(tmp3, Rn_VPR64.2S, 1, 4, 8); - simd_address_at(tmp4, Rd_VPR64.2S, 1, 4, 8); - * [register]:4 tmp4 = (* [register]:4 tmp3) f* tmp2; + local tmp1:4 = Re_VPR128.S.vIndex; + # simd infix Rd_VPR64.2S = Rn_VPR64.2S f* tmp1 on lane size 4 + Rd_VPR64.2S[0,32] = Rn_VPR64.2S[0,32] f* tmp1; + Rd_VPR64.2S[32,32] = Rn_VPR64.2S[32,32] f* tmp1; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:4 = SIMD_PIECE(Re_VPR128.S, vIndex:1); - local tmpd:8 = SIMD_FLOAT_MULT(Rn_VPR64.2S, tmp1); - Zd = zext(tmpd); # assigning to Rd_VPR64.2S -@elif defined(SEMANTIC_pseudo) - local tmp1:4 = SIMD_PIECE(Re_VPR128.S, vIndex:1); - Rd_VPR64.2S = NEON_fmul(Rn_VPR64.2S, tmp1, 4:1); -@endif } # C7.2.134 FMUL (by element) page C7-1693 line 94639 MATCH x0f809000/mask=xbf80f400 @@ -14322,35 +8551,14 @@ is b_31=0 & b_30=0 & b_2329=0b0011111 & b_22=0 & b_1215=0b1001 & b_10=0 & Re_VPR :fmul Rd_VPR128.4S, Rn_VPR128.4S, Re_VPR128.S.vIndex is b_31=0 & b_30=1 & b_2329=0b0011111 & b_22=0 & b_1215=0b1001 & b_10=0 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; # simd element Re_VPR128.S[vIndex] lane size 4 - simd_address_at(tmp1, Re_VPR128.S, vIndex:4, 4, 16); - local tmp2:4 = * [register]:4 tmp1; - # simd infix Rd_VPR128.4S = Rn_VPR128.4S f* tmp2 on lane size 4 - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp3, Rn_VPR128.4S, 0, 4, 16); - simd_address_at(tmp4, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp4 = (* [register]:4 tmp3) f* tmp2; - simd_address_at(tmp3, Rn_VPR128.4S, 1, 4, 16); - simd_address_at(tmp4, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp4 = (* [register]:4 tmp3) f* tmp2; - simd_address_at(tmp3, Rn_VPR128.4S, 2, 4, 16); - simd_address_at(tmp4, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp4 = (* [register]:4 tmp3) f* tmp2; - simd_address_at(tmp3, Rn_VPR128.4S, 3, 4, 16); - simd_address_at(tmp4, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp4 = (* [register]:4 tmp3) f* tmp2; + local tmp1:4 = Re_VPR128.S.vIndex; + # simd infix Rd_VPR128.4S = Rn_VPR128.4S f* tmp1 on lane size 4 + Rd_VPR128.4S[0,32] = Rn_VPR128.4S[0,32] f* tmp1; + Rd_VPR128.4S[32,32] = Rn_VPR128.4S[32,32] f* tmp1; + Rd_VPR128.4S[64,32] = Rn_VPR128.4S[64,32] f* tmp1; + Rd_VPR128.4S[96,32] = Rn_VPR128.4S[96,32] f* tmp1; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:4 = SIMD_PIECE(Re_VPR128.S, vIndex:1); - local tmpd:16 = SIMD_FLOAT_MULT(Rn_VPR128.4S, tmp1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - local tmp1:4 = SIMD_PIECE(Re_VPR128.S, vIndex:1); - Rd_VPR128.4S = NEON_fmul(Rn_VPR128.4S, tmp1, 4:1); -@endif } # C7.2.135 FMUL (vector) page C7-1697 line 94875 MATCH x2e20dc00/mask=xbfa0fc00 @@ -14362,26 +8570,10 @@ is b_31=0 & b_30=1 & b_2329=0b0011111 & b_22=0 & b_1215=0b1001 & b_10=0 & Re_VPR :fmul Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_21=1 & Rm_VPR128.2D & b_1115=0x1b & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_primitive) # simd infix Rd_VPR128.2D = Rn_VPR128.2D f* Rm_VPR128.2D on lane size 8 - local tmp1:4 = 0; - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp1, Rn_VPR128.2D, 0, 8, 16); - simd_address_at(tmp2, Rm_VPR128.2D, 0, 8, 16); - simd_address_at(tmp3, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp3 = (* [register]:8 tmp1) f* (* [register]:8 tmp2); - simd_address_at(tmp1, Rn_VPR128.2D, 1, 8, 16); - simd_address_at(tmp2, Rm_VPR128.2D, 1, 8, 16); - simd_address_at(tmp3, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp3 = (* [register]:8 tmp1) f* (* [register]:8 tmp2); + Rd_VPR128.2D[0,64] = Rn_VPR128.2D[0,64] f* Rm_VPR128.2D[0,64]; + Rd_VPR128.2D[64,64] = Rn_VPR128.2D[64,64] f* Rm_VPR128.2D[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:16 = SIMD_FLOAT_MULT(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.2D = NEON_fmul(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); -@endif } # C7.2.135 FMUL (vector) page C7-1697 line 94875 MATCH x2e20dc00/mask=xbfa0fc00 @@ -14393,26 +8585,10 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_21=1 & Rm_VPR128.2D & :fmul Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_21=1 & Rm_VPR64.2S & b_1115=0x1b & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_primitive) # simd infix Rd_VPR64.2S = Rn_VPR64.2S f* Rm_VPR64.2S on lane size 4 - local tmp1:4 = 0; - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp1, Rn_VPR64.2S, 0, 4, 8); - simd_address_at(tmp2, Rm_VPR64.2S, 0, 4, 8); - simd_address_at(tmp3, Rd_VPR64.2S, 0, 4, 8); - * [register]:4 tmp3 = (* [register]:4 tmp1) f* (* [register]:4 tmp2); - simd_address_at(tmp1, Rn_VPR64.2S, 1, 4, 8); - simd_address_at(tmp2, Rm_VPR64.2S, 1, 4, 8); - simd_address_at(tmp3, Rd_VPR64.2S, 1, 4, 8); - * [register]:4 tmp3 = (* [register]:4 tmp1) f* (* [register]:4 tmp2); + Rd_VPR64.2S[0,32] = Rn_VPR64.2S[0,32] f* Rm_VPR64.2S[0,32]; + Rd_VPR64.2S[32,32] = Rn_VPR64.2S[32,32] f* Rm_VPR64.2S[32,32]; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:8 = SIMD_FLOAT_MULT(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.2S -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.2S = NEON_fmul(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); -@endif } # C7.2.135 FMUL (vector) page C7-1697 line 94875 MATCH x2e20dc00/mask=xbfa0fc00 @@ -14424,34 +8600,12 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_21=1 & Rm_VPR64.2S & :fmul Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_21=1 & Rm_VPR128.4S & b_1115=0x1b & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) # simd infix Rd_VPR128.4S = Rn_VPR128.4S f* Rm_VPR128.4S on lane size 4 - local tmp1:4 = 0; - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp1, Rn_VPR128.4S, 0, 4, 16); - simd_address_at(tmp2, Rm_VPR128.4S, 0, 4, 16); - simd_address_at(tmp3, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp3 = (* [register]:4 tmp1) f* (* [register]:4 tmp2); - simd_address_at(tmp1, Rn_VPR128.4S, 1, 4, 16); - simd_address_at(tmp2, Rm_VPR128.4S, 1, 4, 16); - simd_address_at(tmp3, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp3 = (* [register]:4 tmp1) f* (* [register]:4 tmp2); - simd_address_at(tmp1, Rn_VPR128.4S, 2, 4, 16); - simd_address_at(tmp2, Rm_VPR128.4S, 2, 4, 16); - simd_address_at(tmp3, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp3 = (* [register]:4 tmp1) f* (* [register]:4 tmp2); - simd_address_at(tmp1, Rn_VPR128.4S, 3, 4, 16); - simd_address_at(tmp2, Rm_VPR128.4S, 3, 4, 16); - simd_address_at(tmp3, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp3 = (* [register]:4 tmp1) f* (* [register]:4 tmp2); + Rd_VPR128.4S[0,32] = Rn_VPR128.4S[0,32] f* Rm_VPR128.4S[0,32]; + Rd_VPR128.4S[32,32] = Rn_VPR128.4S[32,32] f* Rm_VPR128.4S[32,32]; + Rd_VPR128.4S[64,32] = Rn_VPR128.4S[64,32] f* Rm_VPR128.4S[64,32]; + Rd_VPR128.4S[96,32] = Rn_VPR128.4S[96,32] f* Rm_VPR128.4S[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:16 = SIMD_FLOAT_MULT(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_fmul(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); -@endif } # C7.2.135 FMUL (vector) page C7-1697 line 94875 MATCH x2e401c00/mask=xbfe0fc00 @@ -14464,34 +8618,12 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_21=1 & Rm_VPR128.4S & :fmul Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_31=0 & b_30=0 & b_2129=0b101110010 & b_1015=0b000111 & Rd_VPR64.4H & Rn_VPR64.4H & Rm_VPR64.4H & Zd { -@if defined(SEMANTIC_primitive) # simd infix Rd_VPR64.4H = Rn_VPR64.4H f* Rm_VPR64.4H on lane size 2 - local tmp1:4 = 0; - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp1, Rn_VPR64.4H, 0, 2, 8); - simd_address_at(tmp2, Rm_VPR64.4H, 0, 2, 8); - simd_address_at(tmp3, Rd_VPR64.4H, 0, 2, 8); - * [register]:2 tmp3 = (* [register]:2 tmp1) f* (* [register]:2 tmp2); - simd_address_at(tmp1, Rn_VPR64.4H, 1, 2, 8); - simd_address_at(tmp2, Rm_VPR64.4H, 1, 2, 8); - simd_address_at(tmp3, Rd_VPR64.4H, 1, 2, 8); - * [register]:2 tmp3 = (* [register]:2 tmp1) f* (* [register]:2 tmp2); - simd_address_at(tmp1, Rn_VPR64.4H, 2, 2, 8); - simd_address_at(tmp2, Rm_VPR64.4H, 2, 2, 8); - simd_address_at(tmp3, Rd_VPR64.4H, 2, 2, 8); - * [register]:2 tmp3 = (* [register]:2 tmp1) f* (* [register]:2 tmp2); - simd_address_at(tmp1, Rn_VPR64.4H, 3, 2, 8); - simd_address_at(tmp2, Rm_VPR64.4H, 3, 2, 8); - simd_address_at(tmp3, Rd_VPR64.4H, 3, 2, 8); - * [register]:2 tmp3 = (* [register]:2 tmp1) f* (* [register]:2 tmp2); + Rd_VPR64.4H[0,16] = Rn_VPR64.4H[0,16] f* Rm_VPR64.4H[0,16]; + Rd_VPR64.4H[16,16] = Rn_VPR64.4H[16,16] f* Rm_VPR64.4H[16,16]; + Rd_VPR64.4H[32,16] = Rn_VPR64.4H[32,16] f* Rm_VPR64.4H[32,16]; + Rd_VPR64.4H[48,16] = Rn_VPR64.4H[48,16] f* Rm_VPR64.4H[48,16]; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:8 = SIMD_FLOAT_MULT(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.4H -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.4H = NEON_fmul(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); -@endif } # C7.2.135 FMUL (vector) page C7-1697 line 94875 MATCH x2e401c00/mask=xbfe0fc00 @@ -14504,50 +8636,16 @@ is b_31=0 & b_30=0 & b_2129=0b101110010 & b_1015=0b000111 & Rd_VPR64.4H & Rn_VPR :fmul Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_31=0 & b_30=1 & b_2129=0b101110010 & b_1015=0b000111 & Rd_VPR128.8H & Rn_VPR128.8H & Rm_VPR128.8H & Zd { -@if defined(SEMANTIC_primitive) # simd infix Rd_VPR128.8H = Rn_VPR128.8H f* Rm_VPR128.8H on lane size 2 - local tmp1:4 = 0; - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp1, Rn_VPR128.8H, 0, 2, 16); - simd_address_at(tmp2, Rm_VPR128.8H, 0, 2, 16); - simd_address_at(tmp3, Rd_VPR128.8H, 0, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp1) f* (* [register]:2 tmp2); - simd_address_at(tmp1, Rn_VPR128.8H, 1, 2, 16); - simd_address_at(tmp2, Rm_VPR128.8H, 1, 2, 16); - simd_address_at(tmp3, Rd_VPR128.8H, 1, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp1) f* (* [register]:2 tmp2); - simd_address_at(tmp1, Rn_VPR128.8H, 2, 2, 16); - simd_address_at(tmp2, Rm_VPR128.8H, 2, 2, 16); - simd_address_at(tmp3, Rd_VPR128.8H, 2, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp1) f* (* [register]:2 tmp2); - simd_address_at(tmp1, Rn_VPR128.8H, 3, 2, 16); - simd_address_at(tmp2, Rm_VPR128.8H, 3, 2, 16); - simd_address_at(tmp3, Rd_VPR128.8H, 3, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp1) f* (* [register]:2 tmp2); - simd_address_at(tmp1, Rn_VPR128.8H, 4, 2, 16); - simd_address_at(tmp2, Rm_VPR128.8H, 4, 2, 16); - simd_address_at(tmp3, Rd_VPR128.8H, 4, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp1) f* (* [register]:2 tmp2); - simd_address_at(tmp1, Rn_VPR128.8H, 5, 2, 16); - simd_address_at(tmp2, Rm_VPR128.8H, 5, 2, 16); - simd_address_at(tmp3, Rd_VPR128.8H, 5, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp1) f* (* [register]:2 tmp2); - simd_address_at(tmp1, Rn_VPR128.8H, 6, 2, 16); - simd_address_at(tmp2, Rm_VPR128.8H, 6, 2, 16); - simd_address_at(tmp3, Rd_VPR128.8H, 6, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp1) f* (* [register]:2 tmp2); - simd_address_at(tmp1, Rn_VPR128.8H, 7, 2, 16); - simd_address_at(tmp2, Rm_VPR128.8H, 7, 2, 16); - simd_address_at(tmp3, Rd_VPR128.8H, 7, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp1) f* (* [register]:2 tmp2); + Rd_VPR128.8H[0,16] = Rn_VPR128.8H[0,16] f* Rm_VPR128.8H[0,16]; + Rd_VPR128.8H[16,16] = Rn_VPR128.8H[16,16] f* Rm_VPR128.8H[16,16]; + Rd_VPR128.8H[32,16] = Rn_VPR128.8H[32,16] f* Rm_VPR128.8H[32,16]; + Rd_VPR128.8H[48,16] = Rn_VPR128.8H[48,16] f* Rm_VPR128.8H[48,16]; + Rd_VPR128.8H[64,16] = Rn_VPR128.8H[64,16] f* Rm_VPR128.8H[64,16]; + Rd_VPR128.8H[80,16] = Rn_VPR128.8H[80,16] f* Rm_VPR128.8H[80,16]; + Rd_VPR128.8H[96,16] = Rn_VPR128.8H[96,16] f* Rm_VPR128.8H[96,16]; + Rd_VPR128.8H[112,16] = Rn_VPR128.8H[112,16] f* Rm_VPR128.8H[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:16 = SIMD_FLOAT_MULT(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.8H -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.8H = NEON_fmul(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); -@endif } # C7.2.136 FMUL (scalar) page C7-1699 line 94990 MATCH x1e200800/mask=xff20fc00 @@ -14559,15 +8657,8 @@ is b_31=0 & b_30=1 & b_2129=0b101110010 & b_1015=0b000111 & Rd_VPR128.8H & Rn_VP :fmul Rd_FPR64, Rn_FPR64, Rm_FPR64 is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=1 & Rm_FPR64 & b_1215=0x0 & b_1011=2 & Rn_FPR64 & Rd_FPR64 & Zd { -@if defined(SEMANTIC_primitive) Rd_FPR64 = Rn_FPR64 f* Rm_FPR64; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - Rd_FPR64 = Rn_FPR64 f* Rm_FPR64; - zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pseudo) - Rd_FPR64 = NEON_fmul(Rn_FPR64, Rm_FPR64); -@endif } # C7.2.136 FMUL (scalar) page C7-1699 line 94990 MATCH x1e200800/mask=xff20fc00 @@ -14579,15 +8670,8 @@ is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=1 & Rm_FPR64 & b_1215=0 :fmul Rd_FPR32, Rn_FPR32, Rm_FPR32 is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=1 & Rm_FPR32 & b_1215=0x0 & b_1011=2 & Rn_FPR32 & Rd_FPR32 & Zd { -@if defined(SEMANTIC_primitive) Rd_FPR32 = Rn_FPR32 f* Rm_FPR32; zext_zs(Zd); # zero upper 28 bytes of Zd -@elif defined(SEMANTIC_pcode) - Rd_FPR32 = Rn_FPR32 f* Rm_FPR32; - zext_zs(Zd); # zero upper 28 bytes of Zd -@elif defined(SEMANTIC_pseudo) - Rd_FPR32 = NEON_fmul(Rn_FPR32, Rm_FPR32); -@endif } # C7.2.136 FMUL (scalar) page C7-1699 line 94990 MATCH x1e200800/mask=xff20fc00 @@ -14599,15 +8683,8 @@ is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=1 & Rm_FPR32 & b_1215=0 :fmul Rd_FPR16, Rn_FPR16, Rm_FPR16 is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=1 & Rm_FPR16 & b_1215=0x0 & b_1011=2 & Rn_FPR16 & Rd_FPR16 & Zd { -@if defined(SEMANTIC_primitive) Rd_FPR16 = Rn_FPR16 f* Rm_FPR16; zext_zh(Zd); # zero upper 30 bytes of Zd -@elif defined(SEMANTIC_pcode) - Rd_FPR16 = Rn_FPR16 f* Rm_FPR16; - zext_zh(Zd); # zero upper 30 bytes of Zd -@elif defined(SEMANTIC_pseudo) - Rd_FPR16 = NEON_fmul(Rn_FPR16, Rm_FPR16); -@endif } # C7.2.137 FMULX (by element) page C7-1701 line 95093 MATCH x2f809000/mask=xbf80f400 @@ -14618,10 +8695,8 @@ is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=1 & Rm_FPR16 & b_1215=0 :fmulx Rd_VPR128.2D, Rn_VPR128.2D, Re_VPR128.D.vIndex is b_3131=0 & q=1 & u=1 & b_2428=0xf & advSIMD3.size=3 & b_2121=0 & Re_VPR128.D.vIndex & vIndex & Re_VPR128.D & b_1215=0x9 & b_1010=0 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) local tmp1:8 = SIMD_PIECE(Re_VPR128.D, vIndex:1); Rd_VPR128.2D = NEON_fmulx(Rn_VPR128.2D, tmp1, 8:1); -@endif } # C7.2.137 FMULX (by element) page C7-1701 line 95093 MATCH x2f809000/mask=xbf80f400 @@ -14633,29 +8708,12 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xf & advSIMD3.size=3 & b_2121=0 & Re_VPR128.D. :fmulx Rd_VPR64.2S, Rn_VPR64.2S, Re_VPR128.S.vIndex is b_3131=0 & q=0 & u=1 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & b_1215=0x9 & b_1010=0 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; # simd element Re_VPR128.S[vIndex] lane size 4 - simd_address_at(tmp1, Re_VPR128.S, vIndex:4, 4, 16); - local tmp2:4 = * [register]:4 tmp1; - # simd infix Rd_VPR64.2S = Rn_VPR64.2S f* tmp2 on lane size 4 - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp3, Rn_VPR64.2S, 0, 4, 8); - simd_address_at(tmp4, Rd_VPR64.2S, 0, 4, 8); - * [register]:4 tmp4 = (* [register]:4 tmp3) f* tmp2; - simd_address_at(tmp3, Rn_VPR64.2S, 1, 4, 8); - simd_address_at(tmp4, Rd_VPR64.2S, 1, 4, 8); - * [register]:4 tmp4 = (* [register]:4 tmp3) f* tmp2; + local tmp1:4 = Re_VPR128.S.vIndex; + # simd infix Rd_VPR64.2S = Rn_VPR64.2S f* tmp1 on lane size 4 + Rd_VPR64.2S[0,32] = Rn_VPR64.2S[0,32] f* tmp1; + Rd_VPR64.2S[32,32] = Rn_VPR64.2S[32,32] f* tmp1; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:4 = SIMD_PIECE(Re_VPR128.S, vIndex:1); - local tmpd:8 = SIMD_FLOAT_MULT(Rn_VPR64.2S, tmp1); - Zd = zext(tmpd); # assigning to Rd_VPR64.2S -@elif defined(SEMANTIC_pseudo) - local tmp1:4 = SIMD_PIECE(Re_VPR128.S, vIndex:1); - Rd_VPR64.2S = NEON_fmulx(Rn_VPR64.2S, tmp1, 4:1); -@endif } # C7.2.137 FMULX (by element) page C7-1701 line 95093 MATCH x2f809000/mask=xbf80f400 @@ -14666,10 +8724,8 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re :fmulx Rd_VPR128.4S, Rn_VPR128.4S, Re_VPR128.S.vIndex is b_3131=0 & q=1 & u=1 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & b_1215=0x9 & b_1010=0 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) local tmp1:4 = SIMD_PIECE(Re_VPR128.S, vIndex:1); Rd_VPR128.4S = NEON_fmulx(Rn_VPR128.4S, tmp1, 4:1); -@endif } # C7.2.137 FMULX (by element) page C7-1701 line 95093 MATCH x7f009000/mask=xffc0f400 @@ -14682,21 +8738,10 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re :fmulx Rd_FPR16, Rn_FPR16, Re_VPR128Lo.H.vIndexHLM is b_2231=0b0111111100 & b_1215=0b1001 & b_10=0 & Rd_FPR16 & Rn_FPR16 & Re_VPR128Lo.H.vIndexHLM & Re_VPR128Lo.H & vIndexHLM & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 - simd_address_at(tmp1, Re_VPR128Lo.H, vIndexHLM:4, 2, 16); - local tmp2:2 = * [register]:2 tmp1; - Rd_FPR16 = Rn_FPR16 f* tmp2; + local tmp1:2 = Re_VPR128Lo.H.vIndexHLM; + Rd_FPR16 = Rn_FPR16 f* tmp1; zext_zh(Zd); # zero upper 30 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:2 = SIMD_PIECE(Re_VPR128Lo.H, vIndexHLM:1); - local tmpd:2 = Rn_FPR16 f* tmp1; - Zd = zext(tmpd); # assigning to Rd_FPR16 -@elif defined(SEMANTIC_pseudo) - local tmp1:2 = SIMD_PIECE(Re_VPR128Lo.H, vIndexHLM:1); - Rd_FPR16 = NEON_fmulx(Rn_FPR16, tmp1, 2:1); -@endif } # C7.2.137 FMULX (by element) page C7-1701 line 95093 MATCH x7f809000/mask=xff80f400 @@ -14709,21 +8754,10 @@ is b_2231=0b0111111100 & b_1215=0b1001 & b_10=0 & Rd_FPR16 & Rn_FPR16 & Re_VPR12 :fmulx Rd_FPR32, Rn_FPR32, Re_VPR128.S.vIndex is b_2331=0b011111111 & b_22=0 & b_1215=0b1001 & b_10=0 & Rd_FPR32 & Rn_FPR32 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; # simd element Re_VPR128.S[vIndex] lane size 4 - simd_address_at(tmp1, Re_VPR128.S, vIndex:4, 4, 16); - local tmp2:4 = * [register]:4 tmp1; - Rd_FPR32 = Rn_FPR32 f* tmp2; + local tmp1:4 = Re_VPR128.S.vIndex; + Rd_FPR32 = Rn_FPR32 f* tmp1; zext_zs(Zd); # zero upper 28 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:4 = SIMD_PIECE(Re_VPR128.S, vIndex:1); - local tmpd:4 = Rn_FPR32 f* tmp1; - Zd = zext(tmpd); # assigning to Rd_FPR32 -@elif defined(SEMANTIC_pseudo) - local tmp1:4 = SIMD_PIECE(Re_VPR128.S, vIndex:1); - Rd_FPR32 = NEON_fmulx(Rn_FPR32, tmp1, 4:1); -@endif } # C7.2.137 FMULX (by element) page C7-1701 line 95093 MATCH x7f809000/mask=xff80f400 @@ -14736,21 +8770,10 @@ is b_2331=0b011111111 & b_22=0 & b_1215=0b1001 & b_10=0 & Rd_FPR32 & Rn_FPR32 & :fmulx Rd_FPR64, Rn_FPR64, Re_VPR128.D.vIndex is b_2331=0b011111111 & b_22=1 & b_21=0 & b_1215=0b1001 & b_10=0 & Rd_FPR64 & Rn_FPR64 & Re_VPR128.D.vIndex & Re_VPR128.D & vIndex & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; # simd element Re_VPR128.D[vIndex] lane size 8 - simd_address_at(tmp1, Re_VPR128.D, vIndex:4, 8, 16); - local tmp2:8 = * [register]:8 tmp1; - Rd_FPR64 = Rn_FPR64 f* tmp2; + local tmp1:8 = Re_VPR128.D.vIndex; + Rd_FPR64 = Rn_FPR64 f* tmp1; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Re_VPR128.D, vIndex:1); - local tmpd:8 = Rn_FPR64 f* tmp1; - Zd = zext(tmpd); # assigning to Rd_FPR64 -@elif defined(SEMANTIC_pseudo) - local tmp1:8 = SIMD_PIECE(Re_VPR128.D, vIndex:1); - Rd_FPR64 = NEON_fmulx(Rn_FPR64, tmp1, 8:1); -@endif } # C7.2.137 FMULX (by element) page C7-1701 line 95093 MATCH x2f009000/mask=xbfc0f400 @@ -14763,35 +8786,14 @@ is b_2331=0b011111111 & b_22=1 & b_21=0 & b_1215=0b1001 & b_10=0 & Rd_FPR64 & Rn :fmulx Rd_VPR64.4H, Rn_VPR64.4H, Re_VPR128Lo.H.vIndexHLM is b_31=0 & b_30=0 & b_2229=0b10111100 & b_1215=0b1001 & b_10=0 & Rd_VPR64.4H & Rn_VPR64.4H & Re_VPR128Lo.H.vIndexHLM & Re_VPR128Lo.H & vIndexHLM & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 - simd_address_at(tmp1, Re_VPR128Lo.H, vIndexHLM:4, 2, 16); - local tmp2:2 = * [register]:2 tmp1; - # simd infix Rd_VPR64.4H = Rn_VPR64.4H f* tmp2 on lane size 2 - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp3, Rn_VPR64.4H, 0, 2, 8); - simd_address_at(tmp4, Rd_VPR64.4H, 0, 2, 8); - * [register]:2 tmp4 = (* [register]:2 tmp3) f* tmp2; - simd_address_at(tmp3, Rn_VPR64.4H, 1, 2, 8); - simd_address_at(tmp4, Rd_VPR64.4H, 1, 2, 8); - * [register]:2 tmp4 = (* [register]:2 tmp3) f* tmp2; - simd_address_at(tmp3, Rn_VPR64.4H, 2, 2, 8); - simd_address_at(tmp4, Rd_VPR64.4H, 2, 2, 8); - * [register]:2 tmp4 = (* [register]:2 tmp3) f* tmp2; - simd_address_at(tmp3, Rn_VPR64.4H, 3, 2, 8); - simd_address_at(tmp4, Rd_VPR64.4H, 3, 2, 8); - * [register]:2 tmp4 = (* [register]:2 tmp3) f* tmp2; + local tmp1:2 = Re_VPR128Lo.H.vIndexHLM; + # simd infix Rd_VPR64.4H = Rn_VPR64.4H f* tmp1 on lane size 2 + Rd_VPR64.4H[0,16] = Rn_VPR64.4H[0,16] f* tmp1; + Rd_VPR64.4H[16,16] = Rn_VPR64.4H[16,16] f* tmp1; + Rd_VPR64.4H[32,16] = Rn_VPR64.4H[32,16] f* tmp1; + Rd_VPR64.4H[48,16] = Rn_VPR64.4H[48,16] f* tmp1; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:2 = SIMD_PIECE(Re_VPR128Lo.H, vIndexHLM:1); - local tmpd:8 = SIMD_FLOAT_MULT(Rn_VPR64.4H, tmp1, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.4H -@elif defined(SEMANTIC_pseudo) - local tmp1:2 = SIMD_PIECE(Re_VPR128Lo.H, vIndexHLM:1); - Rd_VPR64.4H = NEON_fmulx(Rn_VPR64.4H, tmp1, 2:1); -@endif } # C7.2.137 FMULX (by element) page C7-1701 line 95093 MATCH x2f009000/mask=xbfc0f400 @@ -14804,47 +8806,18 @@ is b_31=0 & b_30=0 & b_2229=0b10111100 & b_1215=0b1001 & b_10=0 & Rd_VPR64.4H & :fmulx Rd_VPR128.8H, Rn_VPR128.8H, Re_VPR128Lo.H.vIndexHLM is b_31=0 & b_30=1 & b_2229=0b10111100 & b_1215=0b1001 & b_10=0 & Rd_VPR128.8H & Rn_VPR128.8H & Re_VPR128Lo.H.vIndexHLM & Re_VPR128Lo.H & vIndexHLM & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 - simd_address_at(tmp1, Re_VPR128Lo.H, vIndexHLM:4, 2, 16); - local tmp2:2 = * [register]:2 tmp1; - # simd infix Rd_VPR128.8H = Rn_VPR128.8H f* tmp2 on lane size 2 - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp3, Rn_VPR128.8H, 0, 2, 16); - simd_address_at(tmp4, Rd_VPR128.8H, 0, 2, 16); - * [register]:2 tmp4 = (* [register]:2 tmp3) f* tmp2; - simd_address_at(tmp3, Rn_VPR128.8H, 1, 2, 16); - simd_address_at(tmp4, Rd_VPR128.8H, 1, 2, 16); - * [register]:2 tmp4 = (* [register]:2 tmp3) f* tmp2; - simd_address_at(tmp3, Rn_VPR128.8H, 2, 2, 16); - simd_address_at(tmp4, Rd_VPR128.8H, 2, 2, 16); - * [register]:2 tmp4 = (* [register]:2 tmp3) f* tmp2; - simd_address_at(tmp3, Rn_VPR128.8H, 3, 2, 16); - simd_address_at(tmp4, Rd_VPR128.8H, 3, 2, 16); - * [register]:2 tmp4 = (* [register]:2 tmp3) f* tmp2; - simd_address_at(tmp3, Rn_VPR128.8H, 4, 2, 16); - simd_address_at(tmp4, Rd_VPR128.8H, 4, 2, 16); - * [register]:2 tmp4 = (* [register]:2 tmp3) f* tmp2; - simd_address_at(tmp3, Rn_VPR128.8H, 5, 2, 16); - simd_address_at(tmp4, Rd_VPR128.8H, 5, 2, 16); - * [register]:2 tmp4 = (* [register]:2 tmp3) f* tmp2; - simd_address_at(tmp3, Rn_VPR128.8H, 6, 2, 16); - simd_address_at(tmp4, Rd_VPR128.8H, 6, 2, 16); - * [register]:2 tmp4 = (* [register]:2 tmp3) f* tmp2; - simd_address_at(tmp3, Rn_VPR128.8H, 7, 2, 16); - simd_address_at(tmp4, Rd_VPR128.8H, 7, 2, 16); - * [register]:2 tmp4 = (* [register]:2 tmp3) f* tmp2; + local tmp1:2 = Re_VPR128Lo.H.vIndexHLM; + # simd infix Rd_VPR128.8H = Rn_VPR128.8H f* tmp1 on lane size 2 + Rd_VPR128.8H[0,16] = Rn_VPR128.8H[0,16] f* tmp1; + Rd_VPR128.8H[16,16] = Rn_VPR128.8H[16,16] f* tmp1; + Rd_VPR128.8H[32,16] = Rn_VPR128.8H[32,16] f* tmp1; + Rd_VPR128.8H[48,16] = Rn_VPR128.8H[48,16] f* tmp1; + Rd_VPR128.8H[64,16] = Rn_VPR128.8H[64,16] f* tmp1; + Rd_VPR128.8H[80,16] = Rn_VPR128.8H[80,16] f* tmp1; + Rd_VPR128.8H[96,16] = Rn_VPR128.8H[96,16] f* tmp1; + Rd_VPR128.8H[112,16] = Rn_VPR128.8H[112,16] f* tmp1; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:2 = SIMD_PIECE(Re_VPR128Lo.H, vIndexHLM:1); - local tmpd:16 = SIMD_FLOAT_MULT(Rn_VPR128.8H, tmp1, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.8H -@elif defined(SEMANTIC_pseudo) - local tmp1:2 = SIMD_PIECE(Re_VPR128Lo.H, vIndexHLM:1); - Rd_VPR128.8H = NEON_fmulx(Rn_VPR128.8H, tmp1, 2:1); -@endif } # C7.2.138 FMULX page C7-1705 line 95331 MATCH x5e20dc00/mask=xffa0fc00 @@ -14855,9 +8828,7 @@ is b_31=0 & b_30=1 & b_2229=0b10111100 & b_1215=0b1001 & b_10=0 & Rd_VPR128.8H & :fmulx Rd_FPR64, Rn_FPR64, Rm_FPR64 is b_3031=1 & u=0 & b_2428=0x1e & size_high=0 & b_2122=3 & Rm_FPR64 & b_1115=0x1b & b_1010=1 & Rn_FPR64 & Rd_FPR64 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR64 = NEON_fmulx(Rn_FPR64, Rm_FPR64); -@endif } # C7.2.138 FMULX page C7-1705 line 95331 MATCH x5e20dc00/mask=xffa0fc00 @@ -14868,9 +8839,7 @@ is b_3031=1 & u=0 & b_2428=0x1e & size_high=0 & b_2122=3 & Rm_FPR64 & b_1115=0x1 :fmulx Rd_FPR32, Rn_FPR32, Rm_FPR32 is b_3031=1 & u=0 & b_2428=0x1e & size_high=0 & b_2122=1 & Rm_FPR32 & b_1115=0x1b & b_1010=1 & Rn_FPR32 & Rd_FPR32 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR32 = NEON_fmulx(Rn_FPR32, Rm_FPR32); -@endif } # C7.2.138 FMULX page C7-1705 line 95331 MATCH x0e20dc00/mask=xbfa0fc00 @@ -14881,9 +8850,7 @@ is b_3031=1 & u=0 & b_2428=0x1e & size_high=0 & b_2122=1 & Rm_FPR32 & b_1115=0x1 :fmulx Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_21=1 & Rm_VPR128.2D & b_1115=0x1b & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.2D = NEON_fmulx(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); -@endif } # C7.2.138 FMULX page C7-1705 line 95331 MATCH x0e20dc00/mask=xbfa0fc00 @@ -14894,9 +8861,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_21=1 & Rm_VPR128.2D & :fmulx Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_21=1 & Rm_VPR64.2S & b_1115=0x1b & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.2S = NEON_fmulx(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); -@endif } # C7.2.138 FMULX page C7-1705 line 95331 MATCH x0e20dc00/mask=xbfa0fc00 @@ -14907,9 +8872,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_21=1 & Rm_VPR64.2S & :fmulx Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_21=1 & Rm_VPR128.4S & b_1115=0x1b & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_fmulx(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); -@endif } # C7.2.138 FMULX page C7-1705 line 95331 MATCH x5e401c00/mask=xffe0fc00 @@ -14922,15 +8885,8 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_21=1 & Rm_VPR128.4S & :fmulx Rd_FPR16, Rn_FPR16, Rm_FPR16 is b_2131=0b01011110010 & b_1015=0b000111 & Rd_FPR16 & Rn_FPR16 & Rm_FPR16 & Zd { -@if defined(SEMANTIC_primitive) Rd_FPR16 = Rn_FPR16 f* Rm_FPR16; zext_zh(Zd); # zero upper 30 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:2 = Rn_FPR16 f* Rm_FPR16; - Zd = zext(tmpd); # assigning to Rd_FPR16 -@elif defined(SEMANTIC_pseudo) - Rd_FPR16 = NEON_fmulx(Rn_FPR16, Rm_FPR16); -@endif } # C7.2.138 FMULX page C7-1705 line 95331 MATCH x0e401c00/mask=xbfe0fc00 @@ -14943,34 +8899,12 @@ is b_2131=0b01011110010 & b_1015=0b000111 & Rd_FPR16 & Rn_FPR16 & Rm_FPR16 & Zd :fmulx Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_31=0 & b_30=0 & b_2129=0b001110010 & b_1015=0b000111 & Rd_VPR64.4H & Rn_VPR64.4H & Rm_VPR64.4H & Zd { -@if defined(SEMANTIC_primitive) # simd infix Rd_VPR64.4H = Rn_VPR64.4H f* Rm_VPR64.4H on lane size 2 - local tmp1:4 = 0; - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp1, Rn_VPR64.4H, 0, 2, 8); - simd_address_at(tmp2, Rm_VPR64.4H, 0, 2, 8); - simd_address_at(tmp3, Rd_VPR64.4H, 0, 2, 8); - * [register]:2 tmp3 = (* [register]:2 tmp1) f* (* [register]:2 tmp2); - simd_address_at(tmp1, Rn_VPR64.4H, 1, 2, 8); - simd_address_at(tmp2, Rm_VPR64.4H, 1, 2, 8); - simd_address_at(tmp3, Rd_VPR64.4H, 1, 2, 8); - * [register]:2 tmp3 = (* [register]:2 tmp1) f* (* [register]:2 tmp2); - simd_address_at(tmp1, Rn_VPR64.4H, 2, 2, 8); - simd_address_at(tmp2, Rm_VPR64.4H, 2, 2, 8); - simd_address_at(tmp3, Rd_VPR64.4H, 2, 2, 8); - * [register]:2 tmp3 = (* [register]:2 tmp1) f* (* [register]:2 tmp2); - simd_address_at(tmp1, Rn_VPR64.4H, 3, 2, 8); - simd_address_at(tmp2, Rm_VPR64.4H, 3, 2, 8); - simd_address_at(tmp3, Rd_VPR64.4H, 3, 2, 8); - * [register]:2 tmp3 = (* [register]:2 tmp1) f* (* [register]:2 tmp2); + Rd_VPR64.4H[0,16] = Rn_VPR64.4H[0,16] f* Rm_VPR64.4H[0,16]; + Rd_VPR64.4H[16,16] = Rn_VPR64.4H[16,16] f* Rm_VPR64.4H[16,16]; + Rd_VPR64.4H[32,16] = Rn_VPR64.4H[32,16] f* Rm_VPR64.4H[32,16]; + Rd_VPR64.4H[48,16] = Rn_VPR64.4H[48,16] f* Rm_VPR64.4H[48,16]; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:8 = SIMD_FLOAT_MULT(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.4H -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.4H = NEON_fmulx(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); -@endif } # C7.2.138 FMULX page C7-1705 line 95331 MATCH x0e401c00/mask=xbfe0fc00 @@ -14983,50 +8917,16 @@ is b_31=0 & b_30=0 & b_2129=0b001110010 & b_1015=0b000111 & Rd_VPR64.4H & Rn_VPR :fmulx Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_31=0 & b_30=1 & b_2129=0b001110010 & b_1015=0b000111 & Rd_VPR128.8H & Rn_VPR128.8H & Rm_VPR128.8H & Zd { -@if defined(SEMANTIC_primitive) # simd infix Rd_VPR128.8H = Rn_VPR128.8H f* Rm_VPR128.8H on lane size 2 - local tmp1:4 = 0; - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp1, Rn_VPR128.8H, 0, 2, 16); - simd_address_at(tmp2, Rm_VPR128.8H, 0, 2, 16); - simd_address_at(tmp3, Rd_VPR128.8H, 0, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp1) f* (* [register]:2 tmp2); - simd_address_at(tmp1, Rn_VPR128.8H, 1, 2, 16); - simd_address_at(tmp2, Rm_VPR128.8H, 1, 2, 16); - simd_address_at(tmp3, Rd_VPR128.8H, 1, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp1) f* (* [register]:2 tmp2); - simd_address_at(tmp1, Rn_VPR128.8H, 2, 2, 16); - simd_address_at(tmp2, Rm_VPR128.8H, 2, 2, 16); - simd_address_at(tmp3, Rd_VPR128.8H, 2, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp1) f* (* [register]:2 tmp2); - simd_address_at(tmp1, Rn_VPR128.8H, 3, 2, 16); - simd_address_at(tmp2, Rm_VPR128.8H, 3, 2, 16); - simd_address_at(tmp3, Rd_VPR128.8H, 3, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp1) f* (* [register]:2 tmp2); - simd_address_at(tmp1, Rn_VPR128.8H, 4, 2, 16); - simd_address_at(tmp2, Rm_VPR128.8H, 4, 2, 16); - simd_address_at(tmp3, Rd_VPR128.8H, 4, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp1) f* (* [register]:2 tmp2); - simd_address_at(tmp1, Rn_VPR128.8H, 5, 2, 16); - simd_address_at(tmp2, Rm_VPR128.8H, 5, 2, 16); - simd_address_at(tmp3, Rd_VPR128.8H, 5, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp1) f* (* [register]:2 tmp2); - simd_address_at(tmp1, Rn_VPR128.8H, 6, 2, 16); - simd_address_at(tmp2, Rm_VPR128.8H, 6, 2, 16); - simd_address_at(tmp3, Rd_VPR128.8H, 6, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp1) f* (* [register]:2 tmp2); - simd_address_at(tmp1, Rn_VPR128.8H, 7, 2, 16); - simd_address_at(tmp2, Rm_VPR128.8H, 7, 2, 16); - simd_address_at(tmp3, Rd_VPR128.8H, 7, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp1) f* (* [register]:2 tmp2); + Rd_VPR128.8H[0,16] = Rn_VPR128.8H[0,16] f* Rm_VPR128.8H[0,16]; + Rd_VPR128.8H[16,16] = Rn_VPR128.8H[16,16] f* Rm_VPR128.8H[16,16]; + Rd_VPR128.8H[32,16] = Rn_VPR128.8H[32,16] f* Rm_VPR128.8H[32,16]; + Rd_VPR128.8H[48,16] = Rn_VPR128.8H[48,16] f* Rm_VPR128.8H[48,16]; + Rd_VPR128.8H[64,16] = Rn_VPR128.8H[64,16] f* Rm_VPR128.8H[64,16]; + Rd_VPR128.8H[80,16] = Rn_VPR128.8H[80,16] f* Rm_VPR128.8H[80,16]; + Rd_VPR128.8H[96,16] = Rn_VPR128.8H[96,16] f* Rm_VPR128.8H[96,16]; + Rd_VPR128.8H[112,16] = Rn_VPR128.8H[112,16] f* Rm_VPR128.8H[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:16 = SIMD_FLOAT_MULT(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.8H -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.8H = NEON_fmulx(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); -@endif } # C7.2.139 FNEG (vector) page C7-1708 line 95520 MATCH x2ea0f800/mask=xbfbffc00 @@ -15038,23 +8938,10 @@ is b_31=0 & b_30=1 & b_2129=0b001110010 & b_1015=0b000111 & Rd_VPR128.8H & Rn_VP :fneg Rd_VPR128.2D, Rn_VPR128.2D is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=3 & b_1721=0x10 & b_1216=0xf & b_1011=2 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_primitive) # simd unary Rd_VPR128.2D = f-(Rn_VPR128.2D) on lane size 8 - local tmp1:4 = 0; - local tmp2:4 = 0; - simd_address_at(tmp1, Rn_VPR128.2D, 0, 8, 16); - simd_address_at(tmp2, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp2 = f-(* [register]:8 tmp1); - simd_address_at(tmp1, Rn_VPR128.2D, 1, 8, 16); - simd_address_at(tmp2, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp2 = f-(* [register]:8 tmp1); + Rd_VPR128.2D[0,64] = f-(Rn_VPR128.2D[0,64]); + Rd_VPR128.2D[64,64] = f-(Rn_VPR128.2D[64,64]); zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:16 = SIMD_FLOAT_NEG(Rn_VPR128.2D, 8:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.2D = NEON_fneg(Rn_VPR128.2D, 8:1); -@endif } # C7.2.139 FNEG (vector) page C7-1708 line 95520 MATCH x2ea0f800/mask=xbfbffc00 @@ -15066,23 +8953,10 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=3 & b_1721=0x10 & b_1216=0x :fneg Rd_VPR64.2S, Rn_VPR64.2S is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x10 & b_1216=0xf & b_1011=2 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_primitive) # simd unary Rd_VPR64.2S = f-(Rn_VPR64.2S) on lane size 4 - local tmp1:4 = 0; - local tmp2:4 = 0; - simd_address_at(tmp1, Rn_VPR64.2S, 0, 4, 8); - simd_address_at(tmp2, Rd_VPR64.2S, 0, 4, 8); - * [register]:4 tmp2 = f-(* [register]:4 tmp1); - simd_address_at(tmp1, Rn_VPR64.2S, 1, 4, 8); - simd_address_at(tmp2, Rd_VPR64.2S, 1, 4, 8); - * [register]:4 tmp2 = f-(* [register]:4 tmp1); + Rd_VPR64.2S[0,32] = f-(Rn_VPR64.2S[0,32]); + Rd_VPR64.2S[32,32] = f-(Rn_VPR64.2S[32,32]); zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:8 = SIMD_FLOAT_NEG(Rn_VPR64.2S, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.2S -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.2S = NEON_fneg(Rn_VPR64.2S, 4:1); -@endif } # C7.2.139 FNEG (vector) page C7-1708 line 95520 MATCH x2ea0f800/mask=xbfbffc00 @@ -15094,29 +8968,12 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x10 & b_1216=0x :fneg Rd_VPR128.4S, Rn_VPR128.4S is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x10 & b_1216=0xf & b_1011=2 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) # simd unary Rd_VPR128.4S = f-(Rn_VPR128.4S) on lane size 4 - local tmp1:4 = 0; - local tmp2:4 = 0; - simd_address_at(tmp1, Rn_VPR128.4S, 0, 4, 16); - simd_address_at(tmp2, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp2 = f-(* [register]:4 tmp1); - simd_address_at(tmp1, Rn_VPR128.4S, 1, 4, 16); - simd_address_at(tmp2, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp2 = f-(* [register]:4 tmp1); - simd_address_at(tmp1, Rn_VPR128.4S, 2, 4, 16); - simd_address_at(tmp2, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp2 = f-(* [register]:4 tmp1); - simd_address_at(tmp1, Rn_VPR128.4S, 3, 4, 16); - simd_address_at(tmp2, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp2 = f-(* [register]:4 tmp1); + Rd_VPR128.4S[0,32] = f-(Rn_VPR128.4S[0,32]); + Rd_VPR128.4S[32,32] = f-(Rn_VPR128.4S[32,32]); + Rd_VPR128.4S[64,32] = f-(Rn_VPR128.4S[64,32]); + Rd_VPR128.4S[96,32] = f-(Rn_VPR128.4S[96,32]); zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:16 = SIMD_FLOAT_NEG(Rn_VPR128.4S, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_fneg(Rn_VPR128.4S, 4:1); -@endif } # C7.2.139 FNEG (vector) page C7-1708 line 95520 MATCH x2ef8f800/mask=xbffffc00 @@ -15129,29 +8986,12 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x10 & b_1216=0x :fneg Rd_VPR64.4H, Rn_VPR64.4H is b_31=0 & b_30=0 & b_1029=0b10111011111000111110 & Rd_VPR64.4H & Rn_VPR64.4H & Zd { -@if defined(SEMANTIC_primitive) # simd unary Rd_VPR64.4H = f-(Rn_VPR64.4H) on lane size 2 - local tmp1:4 = 0; - local tmp2:4 = 0; - simd_address_at(tmp1, Rn_VPR64.4H, 0, 2, 8); - simd_address_at(tmp2, Rd_VPR64.4H, 0, 2, 8); - * [register]:2 tmp2 = f-(* [register]:2 tmp1); - simd_address_at(tmp1, Rn_VPR64.4H, 1, 2, 8); - simd_address_at(tmp2, Rd_VPR64.4H, 1, 2, 8); - * [register]:2 tmp2 = f-(* [register]:2 tmp1); - simd_address_at(tmp1, Rn_VPR64.4H, 2, 2, 8); - simd_address_at(tmp2, Rd_VPR64.4H, 2, 2, 8); - * [register]:2 tmp2 = f-(* [register]:2 tmp1); - simd_address_at(tmp1, Rn_VPR64.4H, 3, 2, 8); - simd_address_at(tmp2, Rd_VPR64.4H, 3, 2, 8); - * [register]:2 tmp2 = f-(* [register]:2 tmp1); + Rd_VPR64.4H[0,16] = f-(Rn_VPR64.4H[0,16]); + Rd_VPR64.4H[16,16] = f-(Rn_VPR64.4H[16,16]); + Rd_VPR64.4H[32,16] = f-(Rn_VPR64.4H[32,16]); + Rd_VPR64.4H[48,16] = f-(Rn_VPR64.4H[48,16]); zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:8 = SIMD_FLOAT_NEG(Rn_VPR64.4H, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.4H -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.4H = NEON_fneg(Rn_VPR64.4H, 2:1); -@endif } # C7.2.139 FNEG (vector) page C7-1708 line 95520 MATCH x2ef8f800/mask=xbffffc00 @@ -15164,41 +9004,16 @@ is b_31=0 & b_30=0 & b_1029=0b10111011111000111110 & Rd_VPR64.4H & Rn_VPR64.4H & :fneg Rd_VPR128.8H, Rn_VPR128.8H is b_31=0 & b_30=1 & b_1029=0b10111011111000111110 & Rd_VPR128.8H & Rn_VPR128.8H & Zd { -@if defined(SEMANTIC_primitive) # simd unary Rd_VPR128.8H = f-(Rn_VPR128.8H) on lane size 2 - local tmp1:4 = 0; - local tmp2:4 = 0; - simd_address_at(tmp1, Rn_VPR128.8H, 0, 2, 16); - simd_address_at(tmp2, Rd_VPR128.8H, 0, 2, 16); - * [register]:2 tmp2 = f-(* [register]:2 tmp1); - simd_address_at(tmp1, Rn_VPR128.8H, 1, 2, 16); - simd_address_at(tmp2, Rd_VPR128.8H, 1, 2, 16); - * [register]:2 tmp2 = f-(* [register]:2 tmp1); - simd_address_at(tmp1, Rn_VPR128.8H, 2, 2, 16); - simd_address_at(tmp2, Rd_VPR128.8H, 2, 2, 16); - * [register]:2 tmp2 = f-(* [register]:2 tmp1); - simd_address_at(tmp1, Rn_VPR128.8H, 3, 2, 16); - simd_address_at(tmp2, Rd_VPR128.8H, 3, 2, 16); - * [register]:2 tmp2 = f-(* [register]:2 tmp1); - simd_address_at(tmp1, Rn_VPR128.8H, 4, 2, 16); - simd_address_at(tmp2, Rd_VPR128.8H, 4, 2, 16); - * [register]:2 tmp2 = f-(* [register]:2 tmp1); - simd_address_at(tmp1, Rn_VPR128.8H, 5, 2, 16); - simd_address_at(tmp2, Rd_VPR128.8H, 5, 2, 16); - * [register]:2 tmp2 = f-(* [register]:2 tmp1); - simd_address_at(tmp1, Rn_VPR128.8H, 6, 2, 16); - simd_address_at(tmp2, Rd_VPR128.8H, 6, 2, 16); - * [register]:2 tmp2 = f-(* [register]:2 tmp1); - simd_address_at(tmp1, Rn_VPR128.8H, 7, 2, 16); - simd_address_at(tmp2, Rd_VPR128.8H, 7, 2, 16); - * [register]:2 tmp2 = f-(* [register]:2 tmp1); + Rd_VPR128.8H[0,16] = f-(Rn_VPR128.8H[0,16]); + Rd_VPR128.8H[16,16] = f-(Rn_VPR128.8H[16,16]); + Rd_VPR128.8H[32,16] = f-(Rn_VPR128.8H[32,16]); + Rd_VPR128.8H[48,16] = f-(Rn_VPR128.8H[48,16]); + Rd_VPR128.8H[64,16] = f-(Rn_VPR128.8H[64,16]); + Rd_VPR128.8H[80,16] = f-(Rn_VPR128.8H[80,16]); + Rd_VPR128.8H[96,16] = f-(Rn_VPR128.8H[96,16]); + Rd_VPR128.8H[112,16] = f-(Rn_VPR128.8H[112,16]); zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:16 = SIMD_FLOAT_NEG(Rn_VPR128.8H, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.8H -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.8H = NEON_fneg(Rn_VPR128.8H, 2:1); -@endif } # C7.2.140 FNEG (scalar) page C7-1710 line 95628 MATCH x1e214000/mask=xff3ffc00 @@ -15210,15 +9025,8 @@ is b_31=0 & b_30=1 & b_1029=0b10111011111000111110 & Rd_VPR128.8H & Rn_VPR128.8H :fneg Rd_FPR64, Rn_FPR64 is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=1 & fpDpOpcode=0x2 & b_1014=0x10 & Rn_FPR64 & Rd_FPR64 & Zd { -@if defined(SEMANTIC_primitive) Rd_FPR64 = f- Rn_FPR64; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - Rd_FPR64 = f- Rn_FPR64; - zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pseudo) - Rd_FPR64 = NEON_fneg(Rn_FPR64); -@endif } # C7.2.140 FNEG (scalar) page C7-1710 line 95628 MATCH x1e214000/mask=xff3ffc00 @@ -15230,15 +9038,8 @@ is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=1 & fpDpOpcode=0x2 & b_ :fneg Rd_FPR32, Rn_FPR32 is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=1 & fpDpOpcode=0x2 & b_1014=0x10 & Rn_FPR32 & Rd_FPR32 & Zd { -@if defined(SEMANTIC_primitive) Rd_FPR32 = f- Rn_FPR32; zext_zs(Zd); # zero upper 28 bytes of Zd -@elif defined(SEMANTIC_pcode) - Rd_FPR32 = f- Rn_FPR32; - zext_zs(Zd); # zero upper 28 bytes of Zd -@elif defined(SEMANTIC_pseudo) - Rd_FPR32 = NEON_fneg(Rn_FPR32); -@endif } # C7.2.140 FNEG (scalar) page C7-1710 line 95628 MATCH x1e214000/mask=xff3ffc00 @@ -15250,15 +9051,8 @@ is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=1 & fpDpOpcode=0x2 & b_ :fneg Rd_FPR16, Rn_FPR16 is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=1 & fpDpOpcode=0x2 & b_1014=0x10 & Rn_FPR16 & Rd_FPR16 & Zd { -@if defined(SEMANTIC_primitive) Rd_FPR16 = f- Rn_FPR16; zext_zh(Zd); # zero upper 30 bytes of Zd -@elif defined(SEMANTIC_pcode) - Rd_FPR16 = f- Rn_FPR16; - zext_zh(Zd); # zero upper 30 bytes of Zd -@elif defined(SEMANTIC_pseudo) - Rd_FPR16 = NEON_fneg(Rn_FPR16); -@endif } # C7.2.141 FNMADD page C7-1712 line 95720 MATCH x1f200000/mask=xff208000 @@ -15269,10 +9063,8 @@ is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=1 & fpDpOpcode=0x2 & b_ :fnmadd Rd_FPR64, Rn_FPR64, Rm_FPR64, Ra_FPR64 is m=0 & b_3030=0 & s=0 & b_2428=0x1f & ftype=1 & b_21=1 & Rm_FPR64 & b_15=0 & Ra_FPR64 & Rn_FPR64 & Rd_FPR64 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) local tmp1:8 = f- Ra_FPR64; Rd_FPR64 = NEON_fnmadd(Rn_FPR64, Rm_FPR64, tmp1); -@endif } # C7.2.141 FNMADD page C7-1712 line 95720 MATCH x1f200000/mask=xff208000 @@ -15283,10 +9075,8 @@ is m=0 & b_3030=0 & s=0 & b_2428=0x1f & ftype=1 & b_21=1 & Rm_FPR64 & b_15=0 & R :fnmadd Rd_FPR32, Rn_FPR32, Rm_FPR32, Ra_FPR32 is m=0 & b_3030=0 & s=0 & b_2428=0x1f & ftype=0 & b_21=1 & Rm_FPR32 & b_15=0 & Ra_FPR32 & Rn_FPR32 & Rd_FPR32 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) local tmp1:4 = f- Ra_FPR32; Rd_FPR32 = NEON_fnmadd(Rn_FPR32, Rm_FPR32, tmp1); -@endif } # C7.2.141 FNMADD page C7-1712 line 95720 MATCH x1f200000/mask=xff208000 @@ -15297,10 +9087,8 @@ is m=0 & b_3030=0 & s=0 & b_2428=0x1f & ftype=0 & b_21=1 & Rm_FPR32 & b_15=0 & R :fnmadd Rd_FPR16, Rn_FPR16, Rm_FPR16, Ra_FPR16 is m=0 & b_3030=0 & s=0 & b_2428=0x1f & ftype=3 & b_21=1 & Rm_FPR16 & b_15=0 & Ra_FPR16 & Rn_FPR16 & Rd_FPR16 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) local tmp1:2 = f- Ra_FPR16; Rd_FPR16 = NEON_fnmadd(Rn_FPR16, Rm_FPR16, tmp1); -@endif } # C7.2.142 FNMSUB page C7-1714 line 95845 MATCH x1f208000/mask=xff208000 @@ -15311,9 +9099,7 @@ is m=0 & b_3030=0 & s=0 & b_2428=0x1f & ftype=3 & b_21=1 & Rm_FPR16 & b_15=0 & R :fnmsub Rd_FPR64, Rn_FPR64, Rm_FPR64, Ra_FPR64 is m=0 & b_3030=0 & s=0 & b_2428=0x1f & ftype=1 & b_21=1 & Rm_FPR64 & b_15=1 & Ra_FPR64 & Rn_FPR64 & Rd_FPR64 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR64 = NEON_fnmsub(Rn_FPR64, Rm_FPR64, Ra_FPR64); -@endif } # C7.2.142 FNMSUB page C7-1714 line 95845 MATCH x1f208000/mask=xff208000 @@ -15324,9 +9110,7 @@ is m=0 & b_3030=0 & s=0 & b_2428=0x1f & ftype=1 & b_21=1 & Rm_FPR64 & b_15=1 & R :fnmsub Rd_FPR32, Rn_FPR32, Rm_FPR32, Ra_FPR32 is m=0 & b_3030=0 & s=0 & b_2428=0x1f & ftype=0 & b_21=1 & Rm_FPR32 & b_15=1 & Ra_FPR32 & Rn_FPR32 & Rd_FPR32 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR32 = NEON_fnmsub(Rn_FPR32, Rm_FPR32, Ra_FPR32); -@endif } # C7.2.142 FNMSUB page C7-1714 line 95845 MATCH x1f208000/mask=xff208000 @@ -15337,9 +9121,7 @@ is m=0 & b_3030=0 & s=0 & b_2428=0x1f & ftype=0 & b_21=1 & Rm_FPR32 & b_15=1 & R :fnmsub Rd_FPR16, Rn_FPR16, Rm_FPR16, Ra_FPR16 is m=0 & b_3030=0 & s=0 & b_2428=0x1f & ftype=3 & b_21=1 & Rm_FPR16 & b_15=1 & Ra_FPR16 & Rn_FPR16 & Rd_FPR16 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR16 = NEON_fnmsub(Rn_FPR16, Rm_FPR16, Ra_FPR16); -@endif } # C7.2.143 FNMUL (scalar) page C7-1716 line 95969 MATCH x1e208800/mask=xff20fc00 @@ -15351,17 +9133,9 @@ is m=0 & b_3030=0 & s=0 & b_2428=0x1f & ftype=3 & b_21=1 & Rm_FPR16 & b_15=1 & R :fnmul Rd_FPR64, Rn_FPR64, Rm_FPR64 is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=1 & Rm_FPR64 & b_1215=0x8 & b_1011=2 & Rn_FPR64 & Rd_FPR64 & Zd { -@if defined(SEMANTIC_primitive) local tmp1:8 = Rn_FPR64 f* Rm_FPR64; Rd_FPR64 = f- tmp1; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = Rn_FPR64 f* Rm_FPR64; - local tmpd:8 = f- tmp1; - Zd = zext(tmpd); # assigning to Rd_FPR64 -@elif defined(SEMANTIC_pseudo) - Rd_FPR64 = NEON_fnmul(Rn_FPR64, Rm_FPR64); -@endif } # C7.2.143 FNMUL (scalar) page C7-1716 line 95969 MATCH x1e208800/mask=xff20fc00 @@ -15373,17 +9147,9 @@ is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=1 & Rm_FPR64 & b_1215=0 :fnmul Rd_FPR32, Rn_FPR32, Rm_FPR32 is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=1 & Rm_FPR32 & b_1215=0x8 & b_1011=2 & Rn_FPR32 & Rd_FPR32 & Zd { -@if defined(SEMANTIC_primitive) local tmp1:4 = Rn_FPR32 f* Rm_FPR32; Rd_FPR32 = f- tmp1; zext_zs(Zd); # zero upper 28 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:4 = Rn_FPR32 f* Rm_FPR32; - local tmpd:4 = f- tmp1; - Zd = zext(tmpd); # assigning to Rd_FPR32 -@elif defined(SEMANTIC_pseudo) - Rd_FPR32 = NEON_fnmul(Rn_FPR32, Rm_FPR32); -@endif } # C7.2.143 FNMUL (scalar) page C7-1716 line 95969 MATCH x1e208800/mask=xff20fc00 @@ -15395,17 +9161,9 @@ is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=1 & Rm_FPR32 & b_1215=0 :fnmul Rd_FPR16, Rn_FPR16, Rm_FPR16 is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=1 & Rm_FPR16 & b_1215=0x8 & b_1011=2 & Rn_FPR16 & Rd_FPR16 & Zd { -@if defined(SEMANTIC_primitive) local tmp1:2 = Rn_FPR16 f* Rm_FPR16; Rd_FPR16 = f- tmp1; zext_zh(Zd); # zero upper 30 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:2 = Rn_FPR16 f* Rm_FPR16; - local tmpd:2 = f- tmp1; - Zd = zext(tmpd); # assigning to Rd_FPR16 -@elif defined(SEMANTIC_pseudo) - Rd_FPR16 = NEON_fnmul(Rn_FPR16, Rm_FPR16); -@endif } # C7.2.144 FRECPE page C7-1718 line 96074 MATCH x0ea1d800/mask=xbfbffc00 @@ -15416,9 +9174,7 @@ is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=1 & Rm_FPR16 & b_1215=0 :frecpe Rd_VPR128.2D, Rn_VPR128.2D is b_3131=0 & q=1 & u=0 & b_2428=0xe & size_high=1 & b_1722=0x30 & b_1216=0x1d & b_1011=2 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.2D = NEON_frecpe(Rn_VPR128.2D, 8:1); -@endif } # C7.2.144 FRECPE page C7-1718 line 96074 MATCH x0ea1d800/mask=xbfbffc00 @@ -15429,9 +9185,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & size_high=1 & b_1722=0x30 & b_1216=0x1d & :frecpe Rd_VPR64.2S, Rn_VPR64.2S is b_3131=0 & q=0 & u=0 & b_2428=0xe & size_high=1 & b_1722=0x10 & b_1216=0x1d & b_1011=2 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.2S = NEON_frecpe(Rn_VPR64.2S, 4:1); -@endif } # C7.2.144 FRECPE page C7-1718 line 96074 MATCH x0ea1d800/mask=xbfbffc00 @@ -15442,9 +9196,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & size_high=1 & b_1722=0x10 & b_1216=0x1d & :frecpe Rd_VPR128.4S, Rn_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & size_high=1 & b_1722=0x10 & b_1216=0x1d & b_1011=2 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_frecpe(Rn_VPR128.4S, 4:1); -@endif } # C7.2.144 FRECPE page C7-1718 line 96074 MATCH x5ea1d800/mask=xffbffc00 @@ -15455,9 +9207,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & size_high=1 & b_1722=0x10 & b_1216=0x1d & :frecpe Rd_FPR64, Rn_FPR64 is b_3031=1 & u=0 & b_2428=0x1e & size_high=1 & b_1722=0x30 & b_1216=0x1d & b_1011=2 & Rn_FPR64 & Rd_FPR64 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR64 = NEON_frecpe(Rn_FPR64); -@endif } # C7.2.144 FRECPE page C7-1718 line 96074 MATCH x5ea1d800/mask=xffbffc00 @@ -15468,9 +9218,7 @@ is b_3031=1 & u=0 & b_2428=0x1e & size_high=1 & b_1722=0x30 & b_1216=0x1d & b_10 :frecpe Rd_FPR32, Rn_FPR32 is b_3031=1 & u=0 & b_2428=0x1e & size_high=1 & b_1722=0x10 & b_1216=0x1d & b_1011=2 & Rn_FPR32 & Rd_FPR32 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR32 = NEON_frecpe(Rn_FPR32); -@endif } # C7.2.144 FRECPE page C7-1718 line 96074 MATCH x5ef9d800/mask=xfffffc00 @@ -15482,9 +9230,7 @@ is b_3031=1 & u=0 & b_2428=0x1e & size_high=1 & b_1722=0x10 & b_1216=0x1d & b_10 :frecpe Rd_FPR16, Rn_FPR16 is b_1031=0b0101111011111001110110 & Rd_FPR16 & Rn_FPR16 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR16 = NEON_frecpe(Rn_FPR16); -@endif } # C7.2.144 FRECPE page C7-1718 line 96074 MATCH x0ef9d800/mask=xbffffc00 @@ -15496,9 +9242,7 @@ is b_1031=0b0101111011111001110110 & Rd_FPR16 & Rn_FPR16 & Zd :frecpe Rd_VPR64.4H, Rn_VPR64.4H is b_31=0 & b_30=0 & b_1029=0b00111011111001110110 & Rd_VPR64.4H & Rn_VPR64.4H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.4H = NEON_frecpe(Rn_VPR64.4H, 2:1); -@endif } # C7.2.144 FRECPE page C7-1718 line 96074 MATCH x0ef9d800/mask=xbffffc00 @@ -15510,9 +9254,7 @@ is b_31=0 & b_30=0 & b_1029=0b00111011111001110110 & Rd_VPR64.4H & Rn_VPR64.4H & :frecpe Rd_VPR128.8H, Rn_VPR128.8H is b_31=0 & b_30=1 & b_1029=0b00111011111001110110 & Rd_VPR128.8H & Rn_VPR128.8H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.8H = NEON_frecpe(Rn_VPR128.8H, 2:1); -@endif } # C7.2.145 FRECPS page C7-1721 line 96253 MATCH x5e20fc00/mask=xffa0fc00 @@ -15523,9 +9265,7 @@ is b_31=0 & b_30=1 & b_1029=0b00111011111001110110 & Rd_VPR128.8H & Rn_VPR128.8H :frecps Rd_FPR64, Rn_FPR64, Rm_FPR64 is b_3031=1 & u=0 & b_2428=0x1e & size_high=0 & b_2122=3 & Rm_FPR64 & b_1115=0x1f & b_1010=1 & Rn_FPR64 & Rd_FPR64 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR64 = NEON_frecps(Rn_FPR64, Rm_FPR64); -@endif } # C7.2.145 FRECPS page C7-1721 line 96253 MATCH x5e20fc00/mask=xffa0fc00 @@ -15536,9 +9276,7 @@ is b_3031=1 & u=0 & b_2428=0x1e & size_high=0 & b_2122=3 & Rm_FPR64 & b_1115=0x1 :frecps Rd_FPR32, Rn_FPR32, Rm_FPR32 is b_3031=1 & u=0 & b_2428=0x1e & size_high=0 & b_2122=1 & Rm_FPR32 & b_1115=0x1f & b_1010=1 & Rn_FPR32 & Rd_FPR32 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR32 = NEON_frecps(Rn_FPR32, Rm_FPR32); -@endif } # C7.2.145 FRECPS page C7-1721 line 96253 MATCH x0e20fc00/mask=xbfa0fc00 @@ -15549,9 +9287,7 @@ is b_3031=1 & u=0 & b_2428=0x1e & size_high=0 & b_2122=1 & Rm_FPR32 & b_1115=0x1 :frecps Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_21=1 & Rm_VPR128.2D & b_1115=0x1f & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.2D = NEON_frecps(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); -@endif } # C7.2.145 FRECPS page C7-1721 line 96253 MATCH x0e20fc00/mask=xbfa0fc00 @@ -15562,9 +9298,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_21=1 & Rm_VPR128.2D & :frecps Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_21=1 & Rm_VPR64.2S & b_1115=0x1f & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.2S = NEON_frecps(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); -@endif } # C7.2.145 FRECPS page C7-1721 line 96253 MATCH x0e20fc00/mask=xbfa0fc00 @@ -15575,9 +9309,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_21=1 & Rm_VPR64.2S & :frecps Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_21=1 & Rm_VPR128.4S & b_1115=0x1f & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_frecps(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); -@endif } # C7.2.145 FRECPS page C7-1721 line 96253 MATCH x5e403c00/mask=xffe0fc00 @@ -15589,9 +9321,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_21=1 & Rm_VPR128.4S & :frecps Rd_FPR16, Rn_FPR16, Rm_FPR16 is b_2131=0b01011110010 & b_1015=0b001111 & Rd_FPR16 & Rn_FPR16 & Rm_FPR16 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR16 = NEON_frecps(Rn_FPR16, Rm_FPR16); -@endif } # C7.2.145 FRECPS page C7-1721 line 96253 MATCH x0e403c00/mask=xbfe0fc00 @@ -15603,9 +9333,7 @@ is b_2131=0b01011110010 & b_1015=0b001111 & Rd_FPR16 & Rn_FPR16 & Rm_FPR16 & Zd :frecps Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_31=0 & b_30=0 & b_2129=0b001110010 & b_1015=0b001111 & Rd_VPR64.4H & Rn_VPR64.4H & Rm_VPR64.4H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.4H = NEON_frecps(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); -@endif } # C7.2.145 FRECPS page C7-1721 line 96253 MATCH x0e403c00/mask=xbfe0fc00 @@ -15617,9 +9345,7 @@ is b_31=0 & b_30=0 & b_2129=0b001110010 & b_1015=0b001111 & Rd_VPR64.4H & Rn_VPR :frecps Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_31=0 & b_30=1 & b_2129=0b001110010 & b_1015=0b001111 & Rd_VPR128.8H & Rn_VPR128.8H & Rm_VPR128.8H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.8H = NEON_frecps(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); -@endif } # C7.2.146 FRECPX page C7-1724 line 96442 MATCH x5ef9f800/mask=xfffffc00 @@ -15631,9 +9357,7 @@ is b_31=0 & b_30=1 & b_2129=0b001110010 & b_1015=0b001111 & Rd_VPR128.8H & Rn_VP :frecpx Rd_FPR16, Rn_FPR16 is b_1031=0b0101111011111001111110 & Rd_FPR16 & Rn_FPR16 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR16 = NEON_frecpx(Rn_FPR16); -@endif } # C7.2.146 FRECPX page C7-1724 line 96442 MATCH x5ea1f800/mask=xffbffc00 @@ -15645,9 +9369,7 @@ is b_1031=0b0101111011111001111110 & Rd_FPR16 & Rn_FPR16 & Zd :frecpx Rd_FPR32, Rn_FPR32 is b_2331=0b010111101 & b_22=0 & b_1021=0b100001111110 & Rd_FPR32 & Rn_FPR32 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR32 = NEON_frecpx(Rn_FPR32); -@endif } # C7.2.146 FRECPX page C7-1724 line 96442 MATCH x5ea1f800/mask=xffbffc00 @@ -15659,9 +9381,7 @@ is b_2331=0b010111101 & b_22=0 & b_1021=0b100001111110 & Rd_FPR32 & Rn_FPR32 & Z :frecpx Rd_FPR64, Rn_FPR64 is b_2331=0b010111101 & b_22=1 & b_1021=0b100001111110 & Rd_FPR64 & Rn_FPR64 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR64 = NEON_frecpx(Rn_FPR64); -@endif } # C7.2.140 FRINTA (vector) page C7-1313 line 76386 KEEPWITH @@ -15690,29 +9410,12 @@ frint_vmode: "z" is b_29=0 & b_23=1 & b_12=1 { } :frint^frint_vmode Rd_VPR64.4H, Rn_VPR64.4H is b_31=0 & b_30=0 & b_29 & b_2428=0b01110 & b_23 & b_1322=0b1111001100 & b_12 & b_1011=0b10 & frint_vmode & Rd_VPR64.4H & Rn_VPR64.4H & Zd { -@if defined(SEMANTIC_primitive) # simd unary Rd_VPR64.4H = trunc(Rn_VPR64.4H) on lane size 2 - local tmp1:4 = 0; - local tmp2:4 = 0; - simd_address_at(tmp1, Rn_VPR64.4H, 0, 2, 8); - simd_address_at(tmp2, Rd_VPR64.4H, 0, 2, 8); - * [register]:2 tmp2 = trunc(* [register]:2 tmp1); - simd_address_at(tmp1, Rn_VPR64.4H, 1, 2, 8); - simd_address_at(tmp2, Rd_VPR64.4H, 1, 2, 8); - * [register]:2 tmp2 = trunc(* [register]:2 tmp1); - simd_address_at(tmp1, Rn_VPR64.4H, 2, 2, 8); - simd_address_at(tmp2, Rd_VPR64.4H, 2, 2, 8); - * [register]:2 tmp2 = trunc(* [register]:2 tmp1); - simd_address_at(tmp1, Rn_VPR64.4H, 3, 2, 8); - simd_address_at(tmp2, Rd_VPR64.4H, 3, 2, 8); - * [register]:2 tmp2 = trunc(* [register]:2 tmp1); + Rd_VPR64.4H[0,16] = trunc(Rn_VPR64.4H[0,16]); + Rd_VPR64.4H[16,16] = trunc(Rn_VPR64.4H[16,16]); + Rd_VPR64.4H[32,16] = trunc(Rn_VPR64.4H[32,16]); + Rd_VPR64.4H[48,16] = trunc(Rn_VPR64.4H[48,16]); zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:8 = SIMD_TRUNC(Rn_VPR64.4H, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.4H -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.4H = NEON_frint_aimnpxz(Rn_VPR64.4H, 2:1); -@endif } # C7.2.155 FRINTA (vector) page C7-1742 line 97273 MATCH x2e798800/mask=xbffffc00 @@ -15731,41 +9434,16 @@ is b_31=0 & b_30=0 & b_29 & b_2428=0b01110 & b_23 & b_1322=0b1111001100 & b_12 & :frint^frint_vmode Rd_VPR128.8H, Rn_VPR128.8H is b_31=0 & b_30=1 & b_29 & b_2428=0b01110 & b_23 & b_1322=0b1111001100 & b_12 & b_1011=0b10 & frint_vmode & Rd_VPR128.8H & Rn_VPR128.8H & Zd { -@if defined(SEMANTIC_primitive) # simd unary Rd_VPR128.8H = trunc(Rn_VPR128.8H) on lane size 2 - local tmp1:4 = 0; - local tmp2:4 = 0; - simd_address_at(tmp1, Rn_VPR128.8H, 0, 2, 16); - simd_address_at(tmp2, Rd_VPR128.8H, 0, 2, 16); - * [register]:2 tmp2 = trunc(* [register]:2 tmp1); - simd_address_at(tmp1, Rn_VPR128.8H, 1, 2, 16); - simd_address_at(tmp2, Rd_VPR128.8H, 1, 2, 16); - * [register]:2 tmp2 = trunc(* [register]:2 tmp1); - simd_address_at(tmp1, Rn_VPR128.8H, 2, 2, 16); - simd_address_at(tmp2, Rd_VPR128.8H, 2, 2, 16); - * [register]:2 tmp2 = trunc(* [register]:2 tmp1); - simd_address_at(tmp1, Rn_VPR128.8H, 3, 2, 16); - simd_address_at(tmp2, Rd_VPR128.8H, 3, 2, 16); - * [register]:2 tmp2 = trunc(* [register]:2 tmp1); - simd_address_at(tmp1, Rn_VPR128.8H, 4, 2, 16); - simd_address_at(tmp2, Rd_VPR128.8H, 4, 2, 16); - * [register]:2 tmp2 = trunc(* [register]:2 tmp1); - simd_address_at(tmp1, Rn_VPR128.8H, 5, 2, 16); - simd_address_at(tmp2, Rd_VPR128.8H, 5, 2, 16); - * [register]:2 tmp2 = trunc(* [register]:2 tmp1); - simd_address_at(tmp1, Rn_VPR128.8H, 6, 2, 16); - simd_address_at(tmp2, Rd_VPR128.8H, 6, 2, 16); - * [register]:2 tmp2 = trunc(* [register]:2 tmp1); - simd_address_at(tmp1, Rn_VPR128.8H, 7, 2, 16); - simd_address_at(tmp2, Rd_VPR128.8H, 7, 2, 16); - * [register]:2 tmp2 = trunc(* [register]:2 tmp1); + Rd_VPR128.8H[0,16] = trunc(Rn_VPR128.8H[0,16]); + Rd_VPR128.8H[16,16] = trunc(Rn_VPR128.8H[16,16]); + Rd_VPR128.8H[32,16] = trunc(Rn_VPR128.8H[32,16]); + Rd_VPR128.8H[48,16] = trunc(Rn_VPR128.8H[48,16]); + Rd_VPR128.8H[64,16] = trunc(Rn_VPR128.8H[64,16]); + Rd_VPR128.8H[80,16] = trunc(Rn_VPR128.8H[80,16]); + Rd_VPR128.8H[96,16] = trunc(Rn_VPR128.8H[96,16]); + Rd_VPR128.8H[112,16] = trunc(Rn_VPR128.8H[112,16]); zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:16 = SIMD_TRUNC(Rn_VPR128.8H, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.8H -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.8H = NEON_frint_aimnpxz(Rn_VPR128.8H, 2:1); -@endif } # C7.2.155 FRINTA (vector) page C7-1742 line 97273 MATCH x2e218800/mask=xbfbffc00 @@ -15784,23 +9462,10 @@ is b_31=0 & b_30=1 & b_29 & b_2428=0b01110 & b_23 & b_1322=0b1111001100 & b_12 & :frint^frint_vmode Rd_VPR64.2S, Rn_VPR64.2S is b_31=0 & b_30=0 & b_29 & b_2428=0b01110 & b_23 & b_22=0b0 & b_1321=0b100001100 & b_12 & b_1011=0b10 & frint_vmode & Rd_VPR64.2S & Rn_VPR64.2S & Zd { -@if defined(SEMANTIC_primitive) # simd unary Rd_VPR64.2S = trunc(Rn_VPR64.2S) on lane size 4 - local tmp1:4 = 0; - local tmp2:4 = 0; - simd_address_at(tmp1, Rn_VPR64.2S, 0, 4, 8); - simd_address_at(tmp2, Rd_VPR64.2S, 0, 4, 8); - * [register]:4 tmp2 = trunc(* [register]:4 tmp1); - simd_address_at(tmp1, Rn_VPR64.2S, 1, 4, 8); - simd_address_at(tmp2, Rd_VPR64.2S, 1, 4, 8); - * [register]:4 tmp2 = trunc(* [register]:4 tmp1); + Rd_VPR64.2S[0,32] = trunc(Rn_VPR64.2S[0,32]); + Rd_VPR64.2S[32,32] = trunc(Rn_VPR64.2S[32,32]); zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:8 = SIMD_TRUNC(Rn_VPR64.2S, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.2S -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.2S = NEON_frint_aimnpxz(Rn_VPR64.2S, 4:1); -@endif } # C7.2.155 FRINTA (vector) page C7-1742 line 97273 MATCH x2e218800/mask=xbfbffc00 @@ -15819,29 +9484,12 @@ is b_31=0 & b_30=0 & b_29 & b_2428=0b01110 & b_23 & b_22=0b0 & b_1321=0b10000110 :frint^frint_vmode Rd_VPR128.4S, Rn_VPR128.4S is b_31=0 & b_30=1 & b_29 & b_2428=0b01110 & b_23 & b_22=0b0 & b_1321=0b100001100 & b_12 & b_1011=0b10 & frint_vmode & Rd_VPR128.4S & Rn_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) # simd unary Rd_VPR128.4S = trunc(Rn_VPR128.4S) on lane size 4 - local tmp1:4 = 0; - local tmp2:4 = 0; - simd_address_at(tmp1, Rn_VPR128.4S, 0, 4, 16); - simd_address_at(tmp2, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp2 = trunc(* [register]:4 tmp1); - simd_address_at(tmp1, Rn_VPR128.4S, 1, 4, 16); - simd_address_at(tmp2, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp2 = trunc(* [register]:4 tmp1); - simd_address_at(tmp1, Rn_VPR128.4S, 2, 4, 16); - simd_address_at(tmp2, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp2 = trunc(* [register]:4 tmp1); - simd_address_at(tmp1, Rn_VPR128.4S, 3, 4, 16); - simd_address_at(tmp2, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp2 = trunc(* [register]:4 tmp1); + Rd_VPR128.4S[0,32] = trunc(Rn_VPR128.4S[0,32]); + Rd_VPR128.4S[32,32] = trunc(Rn_VPR128.4S[32,32]); + Rd_VPR128.4S[64,32] = trunc(Rn_VPR128.4S[64,32]); + Rd_VPR128.4S[96,32] = trunc(Rn_VPR128.4S[96,32]); zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:16 = SIMD_TRUNC(Rn_VPR128.4S, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_frint_aimnpxz(Rn_VPR128.4S, 4:1); -@endif } # C7.2.155 FRINTA (vector) page C7-1742 line 97273 MATCH x2e218800/mask=xbfbffc00 @@ -15860,23 +9508,10 @@ is b_31=0 & b_30=1 & b_29 & b_2428=0b01110 & b_23 & b_22=0b0 & b_1321=0b10000110 :frint^frint_vmode Rd_VPR128.2D, Rn_VPR128.2D is b_31=0 & b_30=1 & b_29 & b_2428=0b01110 & b_23 & b_22=0b1 & b_1321=0b100001100 & b_12 & b_1011=0b10 & frint_vmode & Rd_VPR128.2D & Rn_VPR128.2D & Zd { -@if defined(SEMANTIC_primitive) # simd unary Rd_VPR128.2D = trunc(Rn_VPR128.2D) on lane size 8 - local tmp1:4 = 0; - local tmp2:4 = 0; - simd_address_at(tmp1, Rn_VPR128.2D, 0, 8, 16); - simd_address_at(tmp2, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp2 = trunc(* [register]:8 tmp1); - simd_address_at(tmp1, Rn_VPR128.2D, 1, 8, 16); - simd_address_at(tmp2, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp2 = trunc(* [register]:8 tmp1); + Rd_VPR128.2D[0,64] = trunc(Rn_VPR128.2D[0,64]); + Rd_VPR128.2D[64,64] = trunc(Rn_VPR128.2D[64,64]); zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:16 = SIMD_TRUNC(Rn_VPR128.2D, 8:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.2D = NEON_frint_aimnpxz(Rn_VPR128.2D, 8:1); -@endif } # C7.2.141 FRINTA (scalar) page C7-1315 line 76515 KEEPWITH @@ -15906,15 +9541,8 @@ frint_smode: "z" is b_1517=0b011 { } :frint^frint_smode Rd_FPR16, Rn_FPR16 is b_2431=0b00011110 & b_2223=0b11 & b_1821=0b1001 & b_1517 & b_1014=0b10000 & frint_smode & Rd_FPR16 & Rn_FPR16 & Zd { -@if defined(SEMANTIC_primitive) Rd_FPR16 = trunc(Rn_FPR16); zext_zh(Zd); # zero upper 30 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:2 = trunc(Rn_FPR16); - Zd = zext(tmpd); # assigning to Rd_FPR16 -@elif defined(SEMANTIC_pseudo) - Rd_FPR16 = NEON_frint_aimnpxz(Rn_FPR16); -@endif } # C7.2.156 FRINTA (scalar) page C7-1744 line 97402 MATCH x1e264000/mask=xff3ffc00 @@ -15933,15 +9561,8 @@ is b_2431=0b00011110 & b_2223=0b11 & b_1821=0b1001 & b_1517 & b_1014=0b10000 & f :frint^frint_smode Rd_FPR32, Rn_FPR32 is b_2431=0b00011110 & b_2223=0b00 & b_1821=0b1001 & b_1517 & b_1014=0b10000 & frint_smode & Rd_FPR32 & Rn_FPR32 & Zd { -@if defined(SEMANTIC_primitive) Rd_FPR32 = trunc(Rn_FPR32); zext_zs(Zd); # zero upper 28 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:4 = trunc(Rn_FPR32); - Zd = zext(tmpd); # assigning to Rd_FPR32 -@elif defined(SEMANTIC_pseudo) - Rd_FPR32 = NEON_frint_aimnpxz(Rn_FPR32); -@endif } # C7.2.156 FRINTA (scalar) page C7-1744 line 97402 MATCH x1e264000/mask=xff3ffc00 @@ -15960,15 +9581,8 @@ is b_2431=0b00011110 & b_2223=0b00 & b_1821=0b1001 & b_1517 & b_1014=0b10000 & f :frint^frint_smode Rd_FPR64, Rn_FPR64 is b_2431=0b00011110 & b_2223=0b01 & b_1821=0b1001 & b_1517 & b_1014=0b10000 & frint_smode & Rd_FPR64 & Rn_FPR64 & Zd { -@if defined(SEMANTIC_primitive) Rd_FPR64 = trunc(Rn_FPR64); zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:8 = trunc(Rn_FPR64); - Zd = zext(tmpd); # assigning to Rd_FPR64 -@elif defined(SEMANTIC_pseudo) - Rd_FPR64 = NEON_frint_aimnpxz(Rn_FPR64); -@endif } # C7.2.169 FRSQRTE page C7-1770 line 98898 MATCH x7ef9d800/mask=xfffffc00 @@ -15980,9 +9594,7 @@ is b_2431=0b00011110 & b_2223=0b01 & b_1821=0b1001 & b_1517 & b_1014=0b10000 & f :frsqrte Rd_FPR16, Rn_FPR16 is b_31=0 & b_30=1 & b_2329=0b1111101 & b_22=1 & b_1021=0b111001110110 & Rd_FPR16 & Rn_FPR16 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR16 = NEON_frsqrte(Rn_FPR16); -@endif } # C7.2.169 FRSQRTE page C7-1770 line 98898 MATCH x7ea1d800/mask=xffbffc00 @@ -15994,9 +9606,7 @@ is b_31=0 & b_30=1 & b_2329=0b1111101 & b_22=1 & b_1021=0b111001110110 & Rd_FPR1 :frsqrte Rd_FPR32, Rn_FPR32 is b_31=0 & b_30=1 & b_2329=0b1111101 & b_22=0 & b_1021=0b100001110110 & Rd_FPR32 & Rn_FPR32 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR32 = NEON_frsqrte(Rn_FPR32); -@endif } # C7.2.169 FRSQRTE page C7-1770 line 98898 MATCH x7ea1d800/mask=xffbffc00 @@ -16008,9 +9618,7 @@ is b_31=0 & b_30=1 & b_2329=0b1111101 & b_22=0 & b_1021=0b100001110110 & Rd_FPR3 :frsqrte Rd_FPR64, Rn_FPR64 is b_31=0 & b_30=1 & b_2329=0b1111101 & b_22=1 & b_1021=0b100001110110 & Rd_FPR64 & Rn_FPR64 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR64 = NEON_frsqrte(Rn_FPR64); -@endif } # C7.2.169 FRSQRTE page C7-1770 line 98898 MATCH x2ef9d800/mask=xbffffc00 @@ -16022,9 +9630,7 @@ is b_31=0 & b_30=1 & b_2329=0b1111101 & b_22=1 & b_1021=0b100001110110 & Rd_FPR6 :frsqrte Rd_VPR64.4H, Rn_VPR64.4H is b_31=0 & b_30=0 & b_2329=0b1011101 & b_22=1 & b_1021=0b111001110110 & Rd_VPR64.4H & Rn_VPR64.4H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.4H = NEON_frsqrte(Rn_VPR64.4H, 2:1); -@endif } # C7.2.169 FRSQRTE page C7-1770 line 98898 MATCH x2ef9d800/mask=xbffffc00 @@ -16036,9 +9642,7 @@ is b_31=0 & b_30=0 & b_2329=0b1011101 & b_22=1 & b_1021=0b111001110110 & Rd_VPR6 :frsqrte Rd_VPR128.8H, Rn_VPR128.8H is b_31=0 & b_30=1 & b_2329=0b1011101 & b_22=1 & b_1021=0b111001110110 & Rd_VPR128.8H & Rn_VPR128.8H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.8H = NEON_frsqrte(Rn_VPR128.8H, 2:1); -@endif } # C7.2.169 FRSQRTE page C7-1770 line 98898 MATCH x2ea1d800/mask=xbfbffc00 @@ -16050,9 +9654,7 @@ is b_31=0 & b_30=1 & b_2329=0b1011101 & b_22=1 & b_1021=0b111001110110 & Rd_VPR1 :frsqrte Rd_VPR64.2S, Rn_VPR64.2S is b_31=0 & b_30=0 & b_2329=0b1011101 & b_22=0 & b_1021=0b100001110110 & Rd_VPR64.2S & Rn_VPR64.2S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.2S = NEON_frsqrte(Rn_VPR64.2S, 4:1); -@endif } # C7.2.169 FRSQRTE page C7-1770 line 98898 MATCH x2ea1d800/mask=xbfbffc00 @@ -16064,9 +9666,7 @@ is b_31=0 & b_30=0 & b_2329=0b1011101 & b_22=0 & b_1021=0b100001110110 & Rd_VPR6 :frsqrte Rd_VPR128.4S, Rn_VPR128.4S is b_31=0 & b_30=1 & b_2329=0b1011101 & b_22=0 & b_1021=0b100001110110 & Rd_VPR128.4S & Rn_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_frsqrte(Rn_VPR128.4S, 4:1); -@endif } # C7.2.169 FRSQRTE page C7-1770 line 98898 MATCH x2ea1d800/mask=xbfbffc00 @@ -16078,9 +9678,7 @@ is b_31=0 & b_30=1 & b_2329=0b1011101 & b_22=0 & b_1021=0b100001110110 & Rd_VPR1 :frsqrte Rd_VPR128.2D, Rn_VPR128.2D is b_31=0 & b_30=1 & b_2329=0b1011101 & b_22=1 & b_1021=0b100001110110 & Rd_VPR128.2D & Rn_VPR128.2D & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.2D = NEON_frsqrte(Rn_VPR128.2D, 8:1); -@endif } # C7.2.170 FRSQRTS page C7-1773 line 99077 MATCH x5ec03c00/mask=xffe0fc00 @@ -16092,9 +9690,7 @@ is b_31=0 & b_30=1 & b_2329=0b1011101 & b_22=1 & b_1021=0b100001110110 & Rd_VPR1 :frsqrts Rd_FPR16, Rn_FPR16, Rm_FPR16 is b_31=0 & b_30=1 & b_2329=0b0111101 & b_22=1 & b_21=0 & b_1015=0b001111 & Rd_FPR16 & Rn_FPR16 & Rm_FPR16 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR16 = NEON_frsqrts(Rn_FPR16); -@endif } # C7.2.170 FRSQRTS page C7-1773 line 99077 MATCH x5ea0fc00/mask=xffa0fc00 @@ -16106,9 +9702,7 @@ is b_31=0 & b_30=1 & b_2329=0b0111101 & b_22=1 & b_21=0 & b_1015=0b001111 & Rd_F :frsqrts Rd_FPR32, Rn_FPR32, Rm_FPR32 is b_31=0 & b_30=1 & b_2329=0b0111101 & b_22=0 & b_21=1 & b_1015=0b111111 & Rd_FPR32 & Rn_FPR32 & Rm_FPR32 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR32 = NEON_frsqrts(Rn_FPR32, Rm_FPR32); -@endif } # C7.2.170 FRSQRTS page C7-1773 line 99077 MATCH x5ea0fc00/mask=xffa0fc00 @@ -16120,9 +9714,7 @@ is b_31=0 & b_30=1 & b_2329=0b0111101 & b_22=0 & b_21=1 & b_1015=0b111111 & Rd_F :frsqrts Rd_FPR64, Rn_FPR64, Rm_FPR64 is b_31=0 & b_30=1 & b_2329=0b0111101 & b_22=1 & b_21=1 & b_1015=0b111111 & Rd_FPR64 & Rn_FPR64 & Rm_FPR64 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR64 = NEON_frsqrts(Rn_FPR64, Rm_FPR64); -@endif } # C7.2.170 FRSQRTS page C7-1773 line 99077 MATCH x0ec03c00/mask=xbfe0fc00 @@ -16134,9 +9726,7 @@ is b_31=0 & b_30=1 & b_2329=0b0111101 & b_22=1 & b_21=1 & b_1015=0b111111 & Rd_F :frsqrts Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_31=0 & b_30=0 & b_2329=0b0011101 & b_22=1 & b_21=0 & b_1015=0b001111 & Rd_VPR64.4H & Rn_VPR64.4H & Rm_VPR64.4H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.4H = NEON_frsqrts(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); -@endif } # C7.2.170 FRSQRTS page C7-1773 line 99077 MATCH x0ec03c00/mask=xbfe0fc00 @@ -16148,9 +9738,7 @@ is b_31=0 & b_30=0 & b_2329=0b0011101 & b_22=1 & b_21=0 & b_1015=0b001111 & Rd_V :frsqrts Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_31=0 & b_30=1 & b_2329=0b0011101 & b_22=1 & b_21=0 & b_1015=0b001111 & Rd_VPR128.8H & Rn_VPR128.8H & Rm_VPR128.8H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.8H = NEON_frsqrts(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); -@endif } # C7.2.170 FRSQRTS page C7-1773 line 99077 MATCH x0ea0fc00/mask=xbfa0fc00 @@ -16162,9 +9750,7 @@ is b_31=0 & b_30=1 & b_2329=0b0011101 & b_22=1 & b_21=0 & b_1015=0b001111 & Rd_V :frsqrts Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_31=0 & b_30=0 & b_2329=0b0011101 & b_22=0 & b_21=1 & b_1015=0b111111 & Rd_VPR64.2S & Rn_VPR64.2S & Rm_VPR64.2S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.2S = NEON_frsqrts(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); -@endif } # C7.2.170 FRSQRTS page C7-1773 line 99077 MATCH x0ea0fc00/mask=xbfa0fc00 @@ -16176,9 +9762,7 @@ is b_31=0 & b_30=0 & b_2329=0b0011101 & b_22=0 & b_21=1 & b_1015=0b111111 & Rd_V :frsqrts Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_31=0 & b_30=1 & b_2329=0b0011101 & b_22=0 & b_21=1 & b_1015=0b111111 & Rd_VPR128.4S & Rn_VPR128.4S & Rm_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_frsqrts(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); -@endif } # C7.2.170 FRSQRTS page C7-1773 line 99077 MATCH x0ea0fc00/mask=xbfa0fc00 @@ -16190,9 +9774,7 @@ is b_31=0 & b_30=1 & b_2329=0b0011101 & b_22=0 & b_21=1 & b_1015=0b111111 & Rd_V :frsqrts Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D is b_31=0 & b_30=1 & b_2329=0b0011101 & b_22=1 & b_21=1 & b_1015=0b111111 & Rd_VPR128.2D & Rn_VPR128.2D & Rm_VPR128.2D & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.2D = NEON_frsqrts(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); -@endif } # C7.2.171 FSQRT (vector) page C7-1776 line 99266 MATCH x2ef9f800/mask=xbffffc00 @@ -16204,9 +9786,7 @@ is b_31=0 & b_30=1 & b_2329=0b0011101 & b_22=1 & b_21=1 & b_1015=0b111111 & Rd_V :fsqrt Rd_VPR64.4H, Rn_VPR64.4H is b_31=0 & b_30=0 & b_2329=0b1011101 & b_22=1 & b_1021=0b111001111110 & Rd_VPR64.4H & Rn_VPR64.4H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.4H = NEON_fsqrt(Rn_VPR64.4H, 2:1); -@endif } # C7.2.171 FSQRT (vector) page C7-1776 line 99266 MATCH x2ef9f800/mask=xbffffc00 @@ -16218,9 +9798,7 @@ is b_31=0 & b_30=0 & b_2329=0b1011101 & b_22=1 & b_1021=0b111001111110 & Rd_VPR6 :fsqrt Rd_VPR128.8H, Rn_VPR128.8H is b_31=0 & b_30=1 & b_2329=0b1011101 & b_22=1 & b_1021=0b111001111110 & Rd_VPR128.8H & Rn_VPR128.8H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.8H = NEON_fsqrt(Rn_VPR128.8H, 2:1); -@endif } # C7.2.171 FSQRT (vector) page C7-1776 line 99266 MATCH x2ea1f800/mask=xbfbffc00 @@ -16232,9 +9810,7 @@ is b_31=0 & b_30=1 & b_2329=0b1011101 & b_22=1 & b_1021=0b111001111110 & Rd_VPR1 :fsqrt Rd_VPR64.2S, Rn_VPR64.2S is b_31=0 & b_30=0 & b_2329=0b1011101 & b_22=0 & b_1021=0b100001111110 & Rd_VPR64.2S & Rn_VPR64.2S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.2S = NEON_fsqrt(Rn_VPR64.2S, 4:1); -@endif } # C7.2.171 FSQRT (vector) page C7-1776 line 99266 MATCH x2ea1f800/mask=xbfbffc00 @@ -16246,9 +9822,7 @@ is b_31=0 & b_30=0 & b_2329=0b1011101 & b_22=0 & b_1021=0b100001111110 & Rd_VPR6 :fsqrt Rd_VPR128.4S, Rn_VPR128.4S is b_31=0 & b_30=1 & b_2329=0b1011101 & b_22=0 & b_1021=0b100001111110 & Rd_VPR128.4S & Rn_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_fsqrt(Rn_VPR128.4S, 4:1); -@endif } # C7.2.171 FSQRT (vector) page C7-1776 line 99266 MATCH x2ea1f800/mask=xbfbffc00 @@ -16260,9 +9834,7 @@ is b_31=0 & b_30=1 & b_2329=0b1011101 & b_22=0 & b_1021=0b100001111110 & Rd_VPR1 :fsqrt Rd_VPR128.2D, Rn_VPR128.2D is b_31=0 & b_30=1 & b_2329=0b1011101 & b_22=1 & b_1021=0b100001111110 & Rd_VPR128.2D & Rn_VPR128.2D & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.2D = NEON_fsqrt(Rn_VPR128.2D, 8:1); -@endif } # C7.2.172 FSQRT (scalar) page C7-1778 line 99375 MATCH x1e21c000/mask=xff3ffc00 @@ -16275,15 +9847,8 @@ is b_31=0 & b_30=1 & b_2329=0b1011101 & b_22=1 & b_1021=0b100001111110 & Rd_VPR1 :fsqrt Rd_FPR16, Rn_FPR16 is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=1 & fpDpOpcode=0x3 & b_1014=0x10 & Rn_FPR16 & Rd_FPR16 & Zd { -@if defined(SEMANTIC_primitive) Rd_FPR16 = sqrt(Rn_FPR16); zext_zh(Zd); # zero upper 30 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:2 = sqrt(Rn_FPR16); - Zd = zext(tmpd); # assigning to Rd_FPR16 -@elif defined(SEMANTIC_pseudo) - Rd_FPR16 = NEON_fsqrt(Rn_FPR16); -@endif } # C7.2.172 FSQRT (scalar) page C7-1778 line 99375 MATCH x1e21c000/mask=xff3ffc00 @@ -16296,15 +9861,8 @@ is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=1 & fpDpOpcode=0x3 & b_ :fsqrt Rd_FPR32, Rn_FPR32 is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=1 & fpDpOpcode=0x3 & b_1014=0x10 & Rn_FPR32 & Rd_FPR32 & Zd { -@if defined(SEMANTIC_primitive) Rd_FPR32 = sqrt(Rn_FPR32); zext_zs(Zd); # zero upper 28 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:4 = sqrt(Rn_FPR32); - Zd = zext(tmpd); # assigning to Rd_FPR32 -@elif defined(SEMANTIC_pseudo) - Rd_FPR32 = NEON_fsqrt(Rn_FPR32); -@endif } # C7.2.172 FSQRT (scalar) page C7-1778 line 99375 MATCH x1e21c000/mask=xff3ffc00 @@ -16317,15 +9875,8 @@ is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=1 & fpDpOpcode=0x3 & b_ :fsqrt Rd_FPR64, Rn_FPR64 is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=1 & fpDpOpcode=0x3 & b_1014=0x10 & Rn_FPR64 & Rd_FPR64 & Zd { -@if defined(SEMANTIC_primitive) Rd_FPR64 = sqrt(Rn_FPR64); zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:8 = sqrt(Rn_FPR64); - Zd = zext(tmpd); # assigning to Rd_FPR64 -@elif defined(SEMANTIC_pseudo) - Rd_FPR64 = NEON_fsqrt(Rn_FPR64); -@endif } # C7.2.173 FSUB (vector) page C7-1780 line 99472 MATCH x0ea0d400/mask=xbfa0fc00 @@ -16337,26 +9888,10 @@ is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=1 & fpDpOpcode=0x3 & b_ :fsub Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=3 & b_21=1 & Rm_VPR128.2D & b_1115=0x1a & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_primitive) # simd infix Rd_VPR128.2D = Rn_VPR128.2D f- Rm_VPR128.2D on lane size 8 - local tmp1:4 = 0; - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp1, Rn_VPR128.2D, 0, 8, 16); - simd_address_at(tmp2, Rm_VPR128.2D, 0, 8, 16); - simd_address_at(tmp3, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp3 = (* [register]:8 tmp1) f- (* [register]:8 tmp2); - simd_address_at(tmp1, Rn_VPR128.2D, 1, 8, 16); - simd_address_at(tmp2, Rm_VPR128.2D, 1, 8, 16); - simd_address_at(tmp3, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp3 = (* [register]:8 tmp1) f- (* [register]:8 tmp2); + Rd_VPR128.2D[0,64] = Rn_VPR128.2D[0,64] f- Rm_VPR128.2D[0,64]; + Rd_VPR128.2D[64,64] = Rn_VPR128.2D[64,64] f- Rm_VPR128.2D[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:16 = SIMD_FLOAT_SUB(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.2D = NEON_fsub(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); -@endif } # C7.2.173 FSUB (vector) page C7-1780 line 99472 MATCH x0ea0d400/mask=xbfa0fc00 @@ -16368,26 +9903,10 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=3 & b_21=1 & Rm_VPR128.2D & :fsub Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_21=1 & Rm_VPR64.2S & b_1115=0x1a & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_primitive) # simd infix Rd_VPR64.2S = Rn_VPR64.2S f- Rm_VPR64.2S on lane size 4 - local tmp1:4 = 0; - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp1, Rn_VPR64.2S, 0, 4, 8); - simd_address_at(tmp2, Rm_VPR64.2S, 0, 4, 8); - simd_address_at(tmp3, Rd_VPR64.2S, 0, 4, 8); - * [register]:4 tmp3 = (* [register]:4 tmp1) f- (* [register]:4 tmp2); - simd_address_at(tmp1, Rn_VPR64.2S, 1, 4, 8); - simd_address_at(tmp2, Rm_VPR64.2S, 1, 4, 8); - simd_address_at(tmp3, Rd_VPR64.2S, 1, 4, 8); - * [register]:4 tmp3 = (* [register]:4 tmp1) f- (* [register]:4 tmp2); + Rd_VPR64.2S[0,32] = Rn_VPR64.2S[0,32] f- Rm_VPR64.2S[0,32]; + Rd_VPR64.2S[32,32] = Rn_VPR64.2S[32,32] f- Rm_VPR64.2S[32,32]; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:8 = SIMD_FLOAT_SUB(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.2S -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.2S = NEON_fsub(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); -@endif } # C7.2.173 FSUB (vector) page C7-1780 line 99472 MATCH x0ea0d400/mask=xbfa0fc00 @@ -16399,34 +9918,12 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_21=1 & Rm_VPR64.2S & :fsub Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_21=1 & Rm_VPR128.4S & b_1115=0x1a & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) # simd infix Rd_VPR128.4S = Rn_VPR128.4S f- Rm_VPR128.4S on lane size 4 - local tmp1:4 = 0; - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp1, Rn_VPR128.4S, 0, 4, 16); - simd_address_at(tmp2, Rm_VPR128.4S, 0, 4, 16); - simd_address_at(tmp3, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp3 = (* [register]:4 tmp1) f- (* [register]:4 tmp2); - simd_address_at(tmp1, Rn_VPR128.4S, 1, 4, 16); - simd_address_at(tmp2, Rm_VPR128.4S, 1, 4, 16); - simd_address_at(tmp3, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp3 = (* [register]:4 tmp1) f- (* [register]:4 tmp2); - simd_address_at(tmp1, Rn_VPR128.4S, 2, 4, 16); - simd_address_at(tmp2, Rm_VPR128.4S, 2, 4, 16); - simd_address_at(tmp3, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp3 = (* [register]:4 tmp1) f- (* [register]:4 tmp2); - simd_address_at(tmp1, Rn_VPR128.4S, 3, 4, 16); - simd_address_at(tmp2, Rm_VPR128.4S, 3, 4, 16); - simd_address_at(tmp3, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp3 = (* [register]:4 tmp1) f- (* [register]:4 tmp2); + Rd_VPR128.4S[0,32] = Rn_VPR128.4S[0,32] f- Rm_VPR128.4S[0,32]; + Rd_VPR128.4S[32,32] = Rn_VPR128.4S[32,32] f- Rm_VPR128.4S[32,32]; + Rd_VPR128.4S[64,32] = Rn_VPR128.4S[64,32] f- Rm_VPR128.4S[64,32]; + Rd_VPR128.4S[96,32] = Rn_VPR128.4S[96,32] f- Rm_VPR128.4S[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:16 = SIMD_FLOAT_SUB(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_fsub(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); -@endif } # C7.2.173 FSUB (vector) page C7-1780 line 99472 MATCH x0ec01400/mask=xbfe0fc00 @@ -16439,34 +9936,12 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_21=1 & Rm_VPR128.4S & :fsub Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_31=0 & b_30=0 & b_2129=0b001110110 & b_1015=0b000101 & Rd_VPR64.4H & Rn_VPR64.4H & Rm_VPR64.4H & Zd { -@if defined(SEMANTIC_primitive) # simd infix Rd_VPR64.4H = Rn_VPR64.4H f- Rm_VPR64.4H on lane size 2 - local tmp1:4 = 0; - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp1, Rn_VPR64.4H, 0, 2, 8); - simd_address_at(tmp2, Rm_VPR64.4H, 0, 2, 8); - simd_address_at(tmp3, Rd_VPR64.4H, 0, 2, 8); - * [register]:2 tmp3 = (* [register]:2 tmp1) f- (* [register]:2 tmp2); - simd_address_at(tmp1, Rn_VPR64.4H, 1, 2, 8); - simd_address_at(tmp2, Rm_VPR64.4H, 1, 2, 8); - simd_address_at(tmp3, Rd_VPR64.4H, 1, 2, 8); - * [register]:2 tmp3 = (* [register]:2 tmp1) f- (* [register]:2 tmp2); - simd_address_at(tmp1, Rn_VPR64.4H, 2, 2, 8); - simd_address_at(tmp2, Rm_VPR64.4H, 2, 2, 8); - simd_address_at(tmp3, Rd_VPR64.4H, 2, 2, 8); - * [register]:2 tmp3 = (* [register]:2 tmp1) f- (* [register]:2 tmp2); - simd_address_at(tmp1, Rn_VPR64.4H, 3, 2, 8); - simd_address_at(tmp2, Rm_VPR64.4H, 3, 2, 8); - simd_address_at(tmp3, Rd_VPR64.4H, 3, 2, 8); - * [register]:2 tmp3 = (* [register]:2 tmp1) f- (* [register]:2 tmp2); + Rd_VPR64.4H[0,16] = Rn_VPR64.4H[0,16] f- Rm_VPR64.4H[0,16]; + Rd_VPR64.4H[16,16] = Rn_VPR64.4H[16,16] f- Rm_VPR64.4H[16,16]; + Rd_VPR64.4H[32,16] = Rn_VPR64.4H[32,16] f- Rm_VPR64.4H[32,16]; + Rd_VPR64.4H[48,16] = Rn_VPR64.4H[48,16] f- Rm_VPR64.4H[48,16]; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:8 = SIMD_FLOAT_SUB(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.4H -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.4H = NEON_fsub(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); -@endif } # C7.2.173 FSUB (vector) page C7-1780 line 99472 MATCH x0ec01400/mask=xbfe0fc00 @@ -16479,50 +9954,16 @@ is b_31=0 & b_30=0 & b_2129=0b001110110 & b_1015=0b000101 & Rd_VPR64.4H & Rn_VPR :fsub Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_31=0 & b_30=1 & b_2129=0b001110110 & b_1015=0b000101 & Rd_VPR128.8H & Rn_VPR128.8H & Rm_VPR128.8H & Zd { -@if defined(SEMANTIC_primitive) # simd infix Rd_VPR128.8H = Rn_VPR128.8H f- Rm_VPR128.8H on lane size 2 - local tmp1:4 = 0; - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp1, Rn_VPR128.8H, 0, 2, 16); - simd_address_at(tmp2, Rm_VPR128.8H, 0, 2, 16); - simd_address_at(tmp3, Rd_VPR128.8H, 0, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp1) f- (* [register]:2 tmp2); - simd_address_at(tmp1, Rn_VPR128.8H, 1, 2, 16); - simd_address_at(tmp2, Rm_VPR128.8H, 1, 2, 16); - simd_address_at(tmp3, Rd_VPR128.8H, 1, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp1) f- (* [register]:2 tmp2); - simd_address_at(tmp1, Rn_VPR128.8H, 2, 2, 16); - simd_address_at(tmp2, Rm_VPR128.8H, 2, 2, 16); - simd_address_at(tmp3, Rd_VPR128.8H, 2, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp1) f- (* [register]:2 tmp2); - simd_address_at(tmp1, Rn_VPR128.8H, 3, 2, 16); - simd_address_at(tmp2, Rm_VPR128.8H, 3, 2, 16); - simd_address_at(tmp3, Rd_VPR128.8H, 3, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp1) f- (* [register]:2 tmp2); - simd_address_at(tmp1, Rn_VPR128.8H, 4, 2, 16); - simd_address_at(tmp2, Rm_VPR128.8H, 4, 2, 16); - simd_address_at(tmp3, Rd_VPR128.8H, 4, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp1) f- (* [register]:2 tmp2); - simd_address_at(tmp1, Rn_VPR128.8H, 5, 2, 16); - simd_address_at(tmp2, Rm_VPR128.8H, 5, 2, 16); - simd_address_at(tmp3, Rd_VPR128.8H, 5, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp1) f- (* [register]:2 tmp2); - simd_address_at(tmp1, Rn_VPR128.8H, 6, 2, 16); - simd_address_at(tmp2, Rm_VPR128.8H, 6, 2, 16); - simd_address_at(tmp3, Rd_VPR128.8H, 6, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp1) f- (* [register]:2 tmp2); - simd_address_at(tmp1, Rn_VPR128.8H, 7, 2, 16); - simd_address_at(tmp2, Rm_VPR128.8H, 7, 2, 16); - simd_address_at(tmp3, Rd_VPR128.8H, 7, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp1) f- (* [register]:2 tmp2); + Rd_VPR128.8H[0,16] = Rn_VPR128.8H[0,16] f- Rm_VPR128.8H[0,16]; + Rd_VPR128.8H[16,16] = Rn_VPR128.8H[16,16] f- Rm_VPR128.8H[16,16]; + Rd_VPR128.8H[32,16] = Rn_VPR128.8H[32,16] f- Rm_VPR128.8H[32,16]; + Rd_VPR128.8H[48,16] = Rn_VPR128.8H[48,16] f- Rm_VPR128.8H[48,16]; + Rd_VPR128.8H[64,16] = Rn_VPR128.8H[64,16] f- Rm_VPR128.8H[64,16]; + Rd_VPR128.8H[80,16] = Rn_VPR128.8H[80,16] f- Rm_VPR128.8H[80,16]; + Rd_VPR128.8H[96,16] = Rn_VPR128.8H[96,16] f- Rm_VPR128.8H[96,16]; + Rd_VPR128.8H[112,16] = Rn_VPR128.8H[112,16] f- Rm_VPR128.8H[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:16 = SIMD_FLOAT_SUB(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.8H -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.8H = NEON_fsub(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); -@endif } # C7.2.174 FSUB (scalar) page C7-1782 line 99588 MATCH x1e203800/mask=xff20fc00 @@ -16534,15 +9975,8 @@ is b_31=0 & b_30=1 & b_2129=0b001110110 & b_1015=0b000101 & Rd_VPR128.8H & Rn_VP :fsub Rd_FPR64, Rn_FPR64, Rm_FPR64 is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=1 & Rm_FPR64 & b_1215=0x3 & b_1011=2 & Rn_FPR64 & Rd_FPR64 & Zd { -@if defined(SEMANTIC_primitive) Rd_FPR64 = Rn_FPR64 f- Rm_FPR64; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - Rd_FPR64 = Rn_FPR64 f- Rm_FPR64; - zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pseudo) - Rd_FPR64 = NEON_fsub(Rn_FPR64, Rm_FPR64); -@endif } # C7.2.174 FSUB (scalar) page C7-1782 line 99588 MATCH x1e203800/mask=xff20fc00 @@ -16554,15 +9988,8 @@ is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=1 & Rm_FPR64 & b_1215=0 :fsub Rd_FPR32, Rn_FPR32, Rm_FPR32 is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=1 & Rm_FPR32 & b_1215=0x3 & b_1011=2 & Rn_FPR32 & Rd_FPR32 & Zd { -@if defined(SEMANTIC_primitive) Rd_FPR32 = Rn_FPR32 f- Rm_FPR32; zext_zs(Zd); # zero upper 28 bytes of Zd -@elif defined(SEMANTIC_pcode) - Rd_FPR32 = Rn_FPR32 f- Rm_FPR32; - zext_zs(Zd); # zero upper 28 bytes of Zd -@elif defined(SEMANTIC_pseudo) - Rd_FPR32 = NEON_fsub(Rn_FPR32, Rm_FPR32); -@endif } # C7.2.174 FSUB (scalar) page C7-1782 line 99588 MATCH x1e203800/mask=xff20fc00 @@ -16574,15 +10001,8 @@ is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=1 & Rm_FPR32 & b_1215=0 :fsub Rd_FPR16, Rn_FPR16, Rm_FPR16 is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=1 & Rm_FPR16 & b_1215=0x3 & b_1011=2 & Rn_FPR16 & Rd_FPR16 & Zd { -@if defined(SEMANTIC_primitive) Rd_FPR16 = Rn_FPR16 f- Rm_FPR16; zext_zh(Zd); # zero upper 30 bytes of Zd -@elif defined(SEMANTIC_pcode) - Rd_FPR16 = Rn_FPR16 f- Rm_FPR16; - zext_zh(Zd); # zero upper 30 bytes of Zd -@elif defined(SEMANTIC_pseudo) - Rd_FPR16 = NEON_fsub(Rn_FPR16, Rm_FPR16); -@endif } # C7.2.189 LDNP (SIMD&FP) page C7-1829 line 102510 MATCH x2c400000/mask=x3fc00000 @@ -16594,22 +10014,11 @@ is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=1 & Rm_FPR16 & b_1215=0 :ldnp Rt_FPR32, Rt2_FPR32, addrPairIndexed is b_3031=0b00 & b_2229=0b10110001 & Rt2_FPR32 & addrPairIndexed & Rt_FPR32 & Zt & Zt2 { -@if defined(SEMANTIC_primitive) Rt_FPR32 = * addrPairIndexed; zext_zs(Zt); # zero upper 28 bytes of Zt local tmp1:8 = addrPairIndexed + 4; Rt2_FPR32 = * tmp1; zext_zs(Zt2); # zero upper 28 bytes of Zt2 -@elif defined(SEMANTIC_pcode) - local tmpt:4 = * addrPairIndexed; - Zt = zext(tmpt); # assigning to Rt_FPR32 - local tmp1:8 = addrPairIndexed + 4; - local tmpt2:4 = * tmp1; - Zt2 = zext(tmpt2); # assigning to Rt2_FPR32 -@elif defined(SEMANTIC_pseudo) - Rt_FPR32 = NEON_ldnp1(addrPairIndexed); - Rt2_FPR32 = NEON_ldnp2(addrPairIndexed); -@endif } # C7.2.189 LDNP (SIMD&FP) page C7-1829 line 102510 MATCH x2c400000/mask=x3fc00000 @@ -16621,22 +10030,11 @@ is b_3031=0b00 & b_2229=0b10110001 & Rt2_FPR32 & addrPairIndexed & Rt_FPR32 & Zt :ldnp Rt_FPR64, Rt2_FPR64, addrPairIndexed is b_3031=0b01 & b_2229=0b10110001 & Rt2_FPR64 & addrPairIndexed & Rt_FPR64 & Zt & Zt2 { -@if defined(SEMANTIC_primitive) Rt_FPR64 = * addrPairIndexed; zext_zd(Zt); # zero upper 24 bytes of Zt local tmp1:8 = addrPairIndexed + 8; Rt2_FPR64 = * tmp1; zext_zd(Zt2); # zero upper 24 bytes of Zt2 -@elif defined(SEMANTIC_pcode) - local tmpt:8 = * addrPairIndexed; - Zt = zext(tmpt); # assigning to Rt_FPR64 - local tmp1:8 = addrPairIndexed + 8; - local tmpt2:8 = * tmp1; - Zt2 = zext(tmpt2); # assigning to Rt2_FPR64 -@elif defined(SEMANTIC_pseudo) - Rt_FPR64 = NEON_ldnp1(addrPairIndexed); - Rt2_FPR64 = NEON_ldnp2(addrPairIndexed); -@endif } # C7.2.189 LDNP (SIMD&FP) page C7-1829 line 102510 MATCH x2c400000/mask=x3fc00000 @@ -16648,22 +10046,11 @@ is b_3031=0b01 & b_2229=0b10110001 & Rt2_FPR64 & addrPairIndexed & Rt_FPR64 & Zt :ldnp Rt_FPR128, Rt2_FPR128, addrPairIndexed is b_3031=0b10 & b_2229=0b10110001 & Rt2_FPR128 & addrPairIndexed & Rt_FPR128 & Zt & Zt2 { -@if defined(SEMANTIC_primitive) Rt_FPR128 = * addrPairIndexed; zext_zq(Zt); # zero upper 16 bytes of Zt local tmp1:8 = addrPairIndexed + 16; Rt2_FPR128 = * tmp1; zext_zq(Zt2); # zero upper 16 bytes of Zt2 -@elif defined(SEMANTIC_pcode) - local tmpt:16 = * addrPairIndexed; - Zt = zext(tmpt); # assigning to Rt_FPR128 - local tmp1:8 = addrPairIndexed + 16; - local tmpt2:16 = * tmp1; - Zt2 = zext(tmpt2); # assigning to Rt2_FPR128 -@elif defined(SEMANTIC_pseudo) - Rt_FPR128 = NEON_ldnp1(addrPairIndexed); - Rt2_FPR128 = NEON_ldnp2(addrPairIndexed); -@endif } # C7.2.190 LDP (SIMD&FP) page C7-1831 line 102650 MATCH x2cc00000/mask=x3fc00000 @@ -16678,22 +10065,11 @@ is b_3031=0b10 & b_2229=0b10110001 & Rt2_FPR128 & addrPairIndexed & Rt_FPR128 & :ldp Rt_FPR128, Rt2_FPR128, addrPairIndexed is b_3031=0b10 & b_2529=0b10110 & b_22=0b1 & Rt2_FPR128 & addrPairIndexed & Rt_FPR128 & Zt & Zt2 { -@if defined(SEMANTIC_primitive) Rt_FPR128 = * addrPairIndexed; zext_zq(Zt); # zero upper 16 bytes of Zt local tmp1:8 = addrPairIndexed + 16; Rt2_FPR128 = * tmp1; zext_zq(Zt2); # zero upper 16 bytes of Zt2 -@elif defined(SEMANTIC_pcode) - Rt_FPR128 = * addrPairIndexed; - zext_zq(Zt); # zero upper 16 bytes of Zt - local tmp1:8 = addrPairIndexed + 16; - Rt2_FPR128 = * tmp1; - zext_zq(Zt2); # zero upper 16 bytes of Zt2 -@elif defined(SEMANTIC_pseudo) - Rt_FPR128 = NEON_ldp1(addrPairIndexed); - Rt2_FPR128 = NEON_ldp2(addrPairIndexed); -@endif } # C7.2.190 LDP (SIMD&FP) page C7-1831 line 102650 MATCH x2cc00000/mask=x3fc00000 @@ -16708,22 +10084,11 @@ is b_3031=0b10 & b_2529=0b10110 & b_22=0b1 & Rt2_FPR128 & addrPairIndexed & Rt_F :ldp Rt_FPR32, Rt2_FPR32, addrPairIndexed is b_3031=0b00 & b_2529=0b10110 & b_22=0b1 & Rt2_FPR32 & addrPairIndexed & Rt_FPR32 & Zt & Zt2 { -@if defined(SEMANTIC_primitive) Rt_FPR32 = * addrPairIndexed; zext_zs(Zt); # zero upper 28 bytes of Zt local tmp1:8 = addrPairIndexed + 4; Rt2_FPR32 = * tmp1; zext_zs(Zt2); # zero upper 28 bytes of Zt2 -@elif defined(SEMANTIC_pcode) - Rt_FPR32 = * addrPairIndexed; - zext_zs(Zt); # zero upper 28 bytes of Zt - local tmp1:8 = addrPairIndexed + 4; - Rt2_FPR32 = * tmp1; - zext_zs(Zt2); # zero upper 28 bytes of Zt2 -@elif defined(SEMANTIC_pseudo) - Rt_FPR32 = NEON_ldp1(addrPairIndexed); - Rt2_FPR32 = NEON_ldp2(addrPairIndexed); -@endif } # C7.2.190 LDP (SIMD&FP) page C7-1831 line 102650 MATCH x2cc00000/mask=x3fc00000 @@ -16738,22 +10103,11 @@ is b_3031=0b00 & b_2529=0b10110 & b_22=0b1 & Rt2_FPR32 & addrPairIndexed & Rt_FP :ldp Rt_FPR64, Rt2_FPR64, addrPairIndexed is b_3031=0b01 & b_2529=0b10110 & b_22=0b1 & Rt2_FPR64 & addrPairIndexed & Rt_FPR64 & Zt & Zt2 { -@if defined(SEMANTIC_primitive) Rt_FPR64 = * addrPairIndexed; zext_zd(Zt); # zero upper 24 bytes of Zt local tmp1:8 = addrPairIndexed + 8; Rt2_FPR64 = * tmp1; zext_zd(Zt2); # zero upper 24 bytes of Zt2 -@elif defined(SEMANTIC_pcode) - Rt_FPR64 = * addrPairIndexed; - zext_zd(Zt); # zero upper 24 bytes of Zt - local tmp1:8 = addrPairIndexed + 8; - Rt2_FPR64 = * tmp1; - zext_zd(Zt2); # zero upper 24 bytes of Zt2 -@elif defined(SEMANTIC_pseudo) - Rt_FPR64 = NEON_ldp1(addrPairIndexed); - Rt2_FPR64 = NEON_ldp2(addrPairIndexed); -@endif } # C7.2.191 LDR (immediate, SIMD&FP) page C7-1835 line 102884 MATCH x3c400400/mask=x3f600c00 @@ -16767,15 +10121,8 @@ is b_3031=0b01 & b_2529=0b10110 & b_22=0b1 & Rt2_FPR64 & addrPairIndexed & Rt_FP :ldr Rt_FPR8, addrIndexed is b_3031=0b00 & b_2429=0b111100 & b_2223=0b01 & b_21=0 & b_10=1 & Rt_FPR8 & addrIndexed & Zt { -@if defined(SEMANTIC_primitive) Rt_FPR8 = * addrIndexed; zext_zb(Zt); # zero upper 31 bytes of Zt -@elif defined(SEMANTIC_pcode) - Rt_FPR8 = * addrIndexed; - zext_zb(Zt); # zero upper 31 bytes of Zt -@elif defined(SEMANTIC_pseudo) - Rt_FPR8 = NEON_ldr(addrIndexed); -@endif } # C7.2.191 LDR (immediate, SIMD&FP) page C7-1835 line 102884 MATCH x3c400400/mask=x3f600c00 @@ -16789,15 +10136,8 @@ is b_3031=0b00 & b_2429=0b111100 & b_2223=0b01 & b_21=0 & b_10=1 & Rt_FPR8 & add :ldr Rt_FPR16, addrIndexed is b_3031=0b01 & b_2429=0b111100 & b_2223=0b01 & b_21=0 & b_10=1 & Rt_FPR16 & addrIndexed & Zt { -@if defined(SEMANTIC_primitive) Rt_FPR16 = * addrIndexed; zext_zh(Zt); # zero upper 30 bytes of Zt -@elif defined(SEMANTIC_pcode) - Rt_FPR16 = * addrIndexed; - zext_zh(Zt); # zero upper 30 bytes of Zt -@elif defined(SEMANTIC_pseudo) - Rt_FPR16 = NEON_ldr(addrIndexed); -@endif } # C7.2.191 LDR (immediate, SIMD&FP) page C7-1835 line 102884 MATCH x3c400400/mask=x3f600c00 @@ -16811,15 +10151,8 @@ is b_3031=0b01 & b_2429=0b111100 & b_2223=0b01 & b_21=0 & b_10=1 & Rt_FPR16 & ad :ldr Rt_FPR32, addrIndexed is b_3031=0b10 & b_2429=0b111100 & b_2223=0b01 & b_21=0 & b_10=1 & Rt_FPR32 & addrIndexed & Zt { -@if defined(SEMANTIC_primitive) Rt_FPR32 = * addrIndexed; zext_zs(Zt); # zero upper 28 bytes of Zt -@elif defined(SEMANTIC_pcode) - Rt_FPR32 = * addrIndexed; - zext_zs(Zt); # zero upper 28 bytes of Zt -@elif defined(SEMANTIC_pseudo) - Rt_FPR32 = NEON_ldr(addrIndexed); -@endif } # C7.2.191 LDR (immediate, SIMD&FP) page C7-1835 line 102884 MATCH x3c400400/mask=x3f600c00 @@ -16833,15 +10166,8 @@ is b_3031=0b10 & b_2429=0b111100 & b_2223=0b01 & b_21=0 & b_10=1 & Rt_FPR32 & ad :ldr Rt_FPR64, addrIndexed is b_3031=0b11 & b_2429=0b111100 & b_2223=0b01 & b_21=0 & b_10=1 & Rt_FPR64 & addrIndexed & Zt { -@if defined(SEMANTIC_primitive) Rt_FPR64 = * addrIndexed; zext_zd(Zt); # zero upper 24 bytes of Zt -@elif defined(SEMANTIC_pcode) - Rt_FPR64 = * addrIndexed; - zext_zd(Zt); # zero upper 24 bytes of Zt -@elif defined(SEMANTIC_pseudo) - Rt_FPR64 = NEON_ldr(addrIndexed); -@endif } # C7.2.191 LDR (immediate, SIMD&FP) page C7-1835 line 102884 MATCH x3c400400/mask=x3f600c00 @@ -16855,15 +10181,8 @@ is b_3031=0b11 & b_2429=0b111100 & b_2223=0b01 & b_21=0 & b_10=1 & Rt_FPR64 & ad :ldr Rt_FPR128, addrIndexed is b_3031=0b00 & b_2429=0b111100 & b_2223=0b11 & b_21=0 & b_10=1 & Rt_FPR128 & addrIndexed & Zt { -@if defined(SEMANTIC_primitive) Rt_FPR128 = * addrIndexed; zext_zq(Zt); # zero upper 16 bytes of Zt -@elif defined(SEMANTIC_pcode) - Rt_FPR128 = * addrIndexed; - zext_zq(Zt); # zero upper 16 bytes of Zt -@elif defined(SEMANTIC_pseudo) - Rt_FPR128 = NEON_ldr(addrIndexed); -@endif } # C7.2.191 LDR (immediate, SIMD&FP) page C7-1835 line 102884 MATCH x3d400000/mask=x3f400000 @@ -16876,15 +10195,8 @@ is b_3031=0b00 & b_2429=0b111100 & b_2223=0b11 & b_21=0 & b_10=1 & Rt_FPR128 & a :ldr Rt_FPR8, addrUIMM is b_3031=0b00 & b_2429=0b111101 & b_2223=0b01 & Rt_FPR8 & addrUIMM & Zt { -@if defined(SEMANTIC_primitive) Rt_FPR8 = * addrUIMM; zext_zb(Zt); # zero upper 31 bytes of Zt -@elif defined(SEMANTIC_pcode) - Rt_FPR8 = * addrUIMM; - zext_zb(Zt); # zero upper 31 bytes of Zt -@elif defined(SEMANTIC_pseudo) - Rt_FPR8 = NEON_ldr(addrUIMM); -@endif } # C7.2.191 LDR (immediate, SIMD&FP) page C7-1835 line 102884 MATCH x3d400000/mask=x3f400000 @@ -16897,15 +10209,8 @@ is b_3031=0b00 & b_2429=0b111101 & b_2223=0b01 & Rt_FPR8 & addrUIMM & Zt :ldr Rt_FPR16, addrUIMM is b_3031=0b01 & b_2429=0b111101 & b_2223=0b01 & Rt_FPR16 & addrUIMM & Zt { -@if defined(SEMANTIC_primitive) Rt_FPR16 = * addrUIMM; zext_zh(Zt); # zero upper 30 bytes of Zt -@elif defined(SEMANTIC_pcode) - Rt_FPR16 = * addrUIMM; - zext_zh(Zt); # zero upper 30 bytes of Zt -@elif defined(SEMANTIC_pseudo) - Rt_FPR16 = NEON_ldr(addrUIMM); -@endif } # C7.2.191 LDR (immediate, SIMD&FP) page C7-1835 line 102884 MATCH x3d400000/mask=x3f400000 @@ -16918,15 +10223,8 @@ is b_3031=0b01 & b_2429=0b111101 & b_2223=0b01 & Rt_FPR16 & addrUIMM & Zt :ldr Rt_FPR32, addrUIMM is b_3031=0b10 & b_2429=0b111101 & b_2223=0b01 & Rt_FPR32 & addrUIMM & Zt { -@if defined(SEMANTIC_primitive) Rt_FPR32 = * addrUIMM; zext_zs(Zt); # zero upper 28 bytes of Zt -@elif defined(SEMANTIC_pcode) - Rt_FPR32 = * addrUIMM; - zext_zs(Zt); # zero upper 28 bytes of Zt -@elif defined(SEMANTIC_pseudo) - Rt_FPR32 = NEON_ldr(addrUIMM); -@endif } # C7.2.191 LDR (immediate, SIMD&FP) page C7-1835 line 102884 MATCH x3d400000/mask=x3f400000 @@ -16939,15 +10237,8 @@ is b_3031=0b10 & b_2429=0b111101 & b_2223=0b01 & Rt_FPR32 & addrUIMM & Zt :ldr Rt_FPR64, addrUIMM is b_3031=0b11 & b_2429=0b111101 & b_2223=0b01 & Rt_FPR64 & addrUIMM & Zt { -@if defined(SEMANTIC_primitive) Rt_FPR64 = * addrUIMM; zext_zd(Zt); # zero upper 24 bytes of Zt -@elif defined(SEMANTIC_pcode) - Rt_FPR64 = * addrUIMM; - zext_zd(Zt); # zero upper 24 bytes of Zt -@elif defined(SEMANTIC_pseudo) - Rt_FPR64 = NEON_ldr(addrUIMM); -@endif } # C7.2.191 LDR (immediate, SIMD&FP) page C7-1835 line 102884 MATCH x3d400000/mask=x3f400000 @@ -16960,15 +10251,8 @@ is b_3031=0b11 & b_2429=0b111101 & b_2223=0b01 & Rt_FPR64 & addrUIMM & Zt :ldr Rt_FPR128, addrUIMM is b_3031=0b00 & b_2429=0b111101 & b_2223=0b11 & Rt_FPR128 & addrUIMM & Zt { -@if defined(SEMANTIC_primitive) Rt_FPR128 = * addrUIMM; zext_zq(Zt); # zero upper 16 bytes of Zt -@elif defined(SEMANTIC_pcode) - Rt_FPR128 = * addrUIMM; - zext_zq(Zt); # zero upper 16 bytes of Zt -@elif defined(SEMANTIC_pseudo) - Rt_FPR128 = NEON_ldr(addrUIMM); -@endif } # C7.2.192 LDR (literal, SIMD&FP) page C7-1839 line 103142 MATCH x1c000000/mask=x3f000000 @@ -16980,15 +10264,8 @@ is b_3031=0b00 & b_2429=0b111101 & b_2223=0b11 & Rt_FPR128 & addrUIMM & Zt :ldr Rt_FPR64, AddrLoc19 is size.ldstr=1 & b_2729=3 & v=1 & b_2425=0 & AddrLoc19 & Rt_FPR64 & Zt { -@if defined(SEMANTIC_primitive) Rt_FPR64 = *:8 AddrLoc19; zext_zd(Zt); # zero upper 24 bytes of Zt -@elif defined(SEMANTIC_pcode) - Rt_FPR64 = *:8 AddrLoc19; - zext_zd(Zt); # zero upper 24 bytes of Zt -@elif defined(SEMANTIC_pseudo) - Rt_FPR64 = NEON_ldr(AddrLoc19); -@endif } # C7.2.192 LDR (literal, SIMD&FP) page C7-1839 line 103142 MATCH x1c000000/mask=x3f000000 @@ -17000,15 +10277,8 @@ is size.ldstr=1 & b_2729=3 & v=1 & b_2425=0 & AddrLoc19 & Rt_FPR64 & Zt :ldr Rt_FPR128, AddrLoc19 is size.ldstr=2 & b_2729=3 & v=1 & b_2425=0 & AddrLoc19 & Rt_FPR128 & Zt { -@if defined(SEMANTIC_primitive) Rt_FPR128 = *:16 AddrLoc19; zext_zq(Zt); # zero upper 16 bytes of Zt -@elif defined(SEMANTIC_pcode) - Rt_FPR128 = *:16 AddrLoc19; - zext_zq(Zt); # zero upper 16 bytes of Zt -@elif defined(SEMANTIC_pseudo) - Rt_FPR128 = NEON_ldr(AddrLoc19); -@endif } # C7.2.192 LDR (literal, SIMD&FP) page C7-1839 line 103142 MATCH x1c000000/mask=x3f000000 @@ -17020,15 +10290,8 @@ is size.ldstr=2 & b_2729=3 & v=1 & b_2425=0 & AddrLoc19 & Rt_FPR128 & Zt :ldr Rt_FPR32, AddrLoc19 is size.ldstr=0 & b_2729=3 & v=1 & b_2425=0 & AddrLoc19 & Rt_FPR32 & Zt { -@if defined(SEMANTIC_primitive) Rt_FPR32 = *:4 AddrLoc19; zext_zs(Zt); # zero upper 28 bytes of Zt -@elif defined(SEMANTIC_pcode) - Rt_FPR32 = *:4 AddrLoc19; - zext_zs(Zt); # zero upper 28 bytes of Zt -@elif defined(SEMANTIC_pseudo) - Rt_FPR32 = NEON_ldr(AddrLoc19); -@endif } # C7.2.178 LDR (register, SIMD&FP) page C7-1411 line 82199 KEEPWITH @@ -17060,19 +10323,10 @@ extend_spec: "" is b_1315=0b011 & b_12=0 & Rm_GPR64 { export Rm_GPR64; } # same :ldr Rt_FPR8, [Rn_GPR64xsp, Rm_GPR32^extend_spec^extend_amount] is b_3031=0b00 & b_2429=0b111100 & b_2223=0b01 & b_21=1 & b_13=0 & b_1011=0b10 & Rt_FPR8 & Rn_GPR64xsp & Rm_GPR32 & extend_spec & extend_amount & Zt { -@if defined(SEMANTIC_primitive) local tmp1:8 = extend_spec << extend_amount; local tmp2:8 = Rn_GPR64xsp + tmp1; Rt_FPR8 = * tmp2; zext_zb(Zt); # zero upper 31 bytes of Zt -@elif defined(SEMANTIC_pcode) - local tmp1:8 = extend_spec << extend_amount; - local tmp2:8 = Rn_GPR64xsp + tmp1; - Rt_FPR8 = * tmp2; - zext_zb(Zt); # zero upper 31 bytes of Zt -@elif defined(SEMANTIC_pseudo) - Rt_FPR8 = NEON_ldr(Rn_GPR64xsp, extend_spec, extend_amount); -@endif } # C7.2.193 LDR (register, SIMD&FP) page C7-1841 line 103241 MATCH x3c600800/mask=x3f600c00 @@ -17085,19 +10339,10 @@ is b_3031=0b00 & b_2429=0b111100 & b_2223=0b01 & b_21=1 & b_13=0 & b_1011=0b10 & :ldr Rt_FPR8, [Rn_GPR64xsp, Rm_GPR64^extend_spec^extend_amount] is b_3031=0b00 & b_2429=0b111100 & b_2223=0b01 & b_21=1 & b_13=1 & b_1011=0b10 & Rt_FPR8 & Rn_GPR64xsp & Rm_GPR64 & extend_spec & extend_amount & Zt { -@if defined(SEMANTIC_primitive) local tmp1:8 = extend_spec << extend_amount; local tmp2:8 = Rn_GPR64xsp + tmp1; Rt_FPR8 = * tmp2; zext_zb(Zt); # zero upper 31 bytes of Zt -@elif defined(SEMANTIC_pcode) - local tmp1:8 = extend_spec << extend_amount; - local tmp2:8 = Rn_GPR64xsp + tmp1; - Rt_FPR8 = * tmp2; - zext_zb(Zt); # zero upper 31 bytes of Zt -@elif defined(SEMANTIC_pseudo) - Rt_FPR8 = NEON_ldr(Rn_GPR64xsp, extend_spec, extend_amount); -@endif } # C7.2.193 LDR (register, SIMD&FP) page C7-1841 line 103241 MATCH x3c600800/mask=x3f600c00 @@ -17110,19 +10355,10 @@ is b_3031=0b00 & b_2429=0b111100 & b_2223=0b01 & b_21=1 & b_13=1 & b_1011=0b10 & :ldr Rt_FPR8, [Rn_GPR64xsp, Rm_GPR64^extend_spec^extend_amount] is b_3031=0b00 & b_2429=0b111100 & b_2223=0b01 & b_21=1 & b_1315=0b011 & b_1011=0b10 & Rt_FPR8 & Rn_GPR64xsp & Rm_GPR64 & extend_spec & extend_amount & Zt { -@if defined(SEMANTIC_primitive) local tmp1:8 = extend_spec << extend_amount; local tmp2:8 = Rn_GPR64xsp + tmp1; Rt_FPR8 = * tmp2; zext_zb(Zt); # zero upper 31 bytes of Zt -@elif defined(SEMANTIC_pcode) - local tmp1:8 = extend_spec << extend_amount; - local tmp2:8 = Rn_GPR64xsp + tmp1; - Rt_FPR8 = * tmp2; - zext_zb(Zt); # zero upper 31 bytes of Zt -@elif defined(SEMANTIC_pseudo) - Rt_FPR8 = NEON_ldr(Rn_GPR64xsp, extend_spec, extend_amount); -@endif } # C7.2.193 LDR (register, SIMD&FP) page C7-1841 line 103241 MATCH x3c600800/mask=x3f600c00 @@ -17135,19 +10371,10 @@ is b_3031=0b00 & b_2429=0b111100 & b_2223=0b01 & b_21=1 & b_1315=0b011 & b_1011= :ldr Rt_FPR16, [Rn_GPR64xsp, Rm_GPR32^extend_spec^extend_amount] is b_3031=0b01 & b_2429=0b111100 & b_2223=0b01 & b_21=1 & b_13=0 & b_1011=0b10 & Rt_FPR16 & Rn_GPR64xsp & Rm_GPR32 & extend_spec & extend_amount & Zt { -@if defined(SEMANTIC_primitive) local tmp1:8 = extend_spec << extend_amount; local tmp2:8 = Rn_GPR64xsp + tmp1; Rt_FPR16 = * tmp2; zext_zh(Zt); # zero upper 30 bytes of Zt -@elif defined(SEMANTIC_pcode) - local tmp1:8 = extend_spec << extend_amount; - local tmp2:8 = Rn_GPR64xsp + tmp1; - Rt_FPR16 = * tmp2; - zext_zh(Zt); # zero upper 30 bytes of Zt -@elif defined(SEMANTIC_pseudo) - Rt_FPR16 = NEON_ldr(Rn_GPR64xsp, extend_spec, extend_amount); -@endif } # C7.2.193 LDR (register, SIMD&FP) page C7-1841 line 103241 MATCH x3c600800/mask=x3f600c00 @@ -17160,19 +10387,10 @@ is b_3031=0b01 & b_2429=0b111100 & b_2223=0b01 & b_21=1 & b_13=0 & b_1011=0b10 & :ldr Rt_FPR16, [Rn_GPR64xsp, Rm_GPR64^extend_spec^extend_amount] is b_3031=0b01 & b_2429=0b111100 & b_2223=0b01 & b_21=1 & b_13=1 & b_1011=0b10 & Rt_FPR16 & Rn_GPR64xsp & Rm_GPR64 & extend_spec & extend_amount & Zt { -@if defined(SEMANTIC_primitive) local tmp1:8 = extend_spec << extend_amount; local tmp2:8 = Rn_GPR64xsp + tmp1; Rt_FPR16 = * tmp2; zext_zh(Zt); # zero upper 30 bytes of Zt -@elif defined(SEMANTIC_pcode) - local tmp1:8 = extend_spec << extend_amount; - local tmp2:8 = Rn_GPR64xsp + tmp1; - Rt_FPR16 = * tmp2; - zext_zh(Zt); # zero upper 30 bytes of Zt -@elif defined(SEMANTIC_pseudo) - Rt_FPR16 = NEON_ldr(Rn_GPR64xsp, extend_spec, extend_amount); -@endif } # C7.2.193 LDR (register, SIMD&FP) page C7-1841 line 103241 MATCH x3c600800/mask=x3f600c00 @@ -17185,19 +10403,10 @@ is b_3031=0b01 & b_2429=0b111100 & b_2223=0b01 & b_21=1 & b_13=1 & b_1011=0b10 & :ldr Rt_FPR32, [Rn_GPR64xsp, Rm_GPR32^extend_spec^extend_amount] is b_3031=0b10 & b_2429=0b111100 & b_2223=0b01 & b_21=1 & b_13=0 & b_1011=0b10 & Rt_FPR32 & Rn_GPR64xsp & Rm_GPR32 & extend_spec & extend_amount & Zt { -@if defined(SEMANTIC_primitive) local tmp1:8 = extend_spec << extend_amount; local tmp2:8 = Rn_GPR64xsp + tmp1; Rt_FPR32 = * tmp2; zext_zs(Zt); # zero upper 28 bytes of Zt -@elif defined(SEMANTIC_pcode) - local tmp1:8 = extend_spec << extend_amount; - local tmp2:8 = Rn_GPR64xsp + tmp1; - Rt_FPR32 = * tmp2; - zext_zs(Zt); # zero upper 28 bytes of Zt -@elif defined(SEMANTIC_pseudo) - Rt_FPR32 = NEON_ldr(Rn_GPR64xsp, extend_spec, extend_amount); -@endif } # C7.2.193 LDR (register, SIMD&FP) page C7-1841 line 103241 MATCH x3c600800/mask=x3f600c00 @@ -17210,19 +10419,10 @@ is b_3031=0b10 & b_2429=0b111100 & b_2223=0b01 & b_21=1 & b_13=0 & b_1011=0b10 & :ldr Rt_FPR32, [Rn_GPR64xsp, Rm_GPR64^extend_spec^extend_amount] is b_3031=0b10 & b_2429=0b111100 & b_2223=0b01 & b_21=1 & b_13=1 & b_1011=0b10 & Rt_FPR32 & Rn_GPR64xsp & Rm_GPR64 & extend_spec & extend_amount & Zt { -@if defined(SEMANTIC_primitive) local tmp1:8 = extend_spec << extend_amount; local tmp2:8 = Rn_GPR64xsp + tmp1; Rt_FPR32 = * tmp2; zext_zs(Zt); # zero upper 28 bytes of Zt -@elif defined(SEMANTIC_pcode) - local tmp1:8 = extend_spec << extend_amount; - local tmp2:8 = Rn_GPR64xsp + tmp1; - Rt_FPR32 = * tmp2; - zext_zs(Zt); # zero upper 28 bytes of Zt -@elif defined(SEMANTIC_pseudo) - Rt_FPR32 = NEON_ldr(Rn_GPR64xsp, extend_spec, extend_amount); -@endif } # C7.2.193 LDR (register, SIMD&FP) page C7-1841 line 103241 MATCH x3c600800/mask=x3f600c00 @@ -17235,19 +10435,10 @@ is b_3031=0b10 & b_2429=0b111100 & b_2223=0b01 & b_21=1 & b_13=1 & b_1011=0b10 & :ldr Rt_FPR64, [Rn_GPR64xsp, Rm_GPR32^extend_spec^extend_amount] is b_3031=0b11 & b_2429=0b111100 & b_2223=0b01 & b_21=1 & b_13=0 & b_1011=0b10 & Rt_FPR64 & Rn_GPR64xsp & Rm_GPR32 & extend_spec & extend_amount & Zt { -@if defined(SEMANTIC_primitive) local tmp1:8 = extend_spec << extend_amount; local tmp2:8 = Rn_GPR64xsp + tmp1; Rt_FPR64 = * tmp2; zext_zd(Zt); # zero upper 24 bytes of Zt -@elif defined(SEMANTIC_pcode) - local tmp1:8 = extend_spec << extend_amount; - local tmp2:8 = Rn_GPR64xsp + tmp1; - Rt_FPR64 = * tmp2; - zext_zd(Zt); # zero upper 24 bytes of Zt -@elif defined(SEMANTIC_pseudo) - Rt_FPR64 = NEON_ldr(Rn_GPR64xsp, extend_spec, extend_amount); -@endif } # C7.2.193 LDR (register, SIMD&FP) page C7-1841 line 103241 MATCH x3c600800/mask=x3f600c00 @@ -17260,19 +10451,10 @@ is b_3031=0b11 & b_2429=0b111100 & b_2223=0b01 & b_21=1 & b_13=0 & b_1011=0b10 & :ldr Rt_FPR64, [Rn_GPR64xsp, Rm_GPR64^extend_spec^extend_amount] is b_3031=0b11 & b_2429=0b111100 & b_2223=0b01 & b_21=1 & b_13=1 & b_1011=0b10 & Rt_FPR64 & Rn_GPR64xsp & Rm_GPR64 & extend_spec & extend_amount & Zt { -@if defined(SEMANTIC_primitive) local tmp1:8 = extend_spec << extend_amount; local tmp2:8 = Rn_GPR64xsp + tmp1; Rt_FPR64 = * tmp2; zext_zd(Zt); # zero upper 24 bytes of Zt -@elif defined(SEMANTIC_pcode) - local tmp1:8 = extend_spec << extend_amount; - local tmp2:8 = Rn_GPR64xsp + tmp1; - Rt_FPR64 = * tmp2; - zext_zd(Zt); # zero upper 24 bytes of Zt -@elif defined(SEMANTIC_pseudo) - Rt_FPR64 = NEON_ldr(Rn_GPR64xsp, extend_spec, extend_amount); -@endif } # C7.2.193 LDR (register, SIMD&FP) page C7-1841 line 103241 MATCH x3c600800/mask=x3f600c00 @@ -17285,19 +10467,10 @@ is b_3031=0b11 & b_2429=0b111100 & b_2223=0b01 & b_21=1 & b_13=1 & b_1011=0b10 & :ldr Rt_FPR128, [Rn_GPR64xsp, Rm_GPR32^extend_spec^extend_amount] is b_3031=0b00 & b_2429=0b111100 & b_2223=0b11 & b_21=1 & b_13=0 & b_1011=0b10 & Rt_FPR128 & Rn_GPR64xsp & Rm_GPR32 & extend_spec & extend_amount & Zt { -@if defined(SEMANTIC_primitive) local tmp1:8 = extend_spec << extend_amount; local tmp2:8 = Rn_GPR64xsp + tmp1; Rt_FPR128 = * tmp2; zext_zq(Zt); # zero upper 16 bytes of Zt -@elif defined(SEMANTIC_pcode) - local tmp1:8 = extend_spec << extend_amount; - local tmp2:8 = Rn_GPR64xsp + tmp1; - Rt_FPR128 = * tmp2; - zext_zq(Zt); # zero upper 16 bytes of Zt -@elif defined(SEMANTIC_pseudo) - Rt_FPR128 = NEON_ldr(Rn_GPR64xsp, extend_spec, extend_amount); -@endif } # C7.2.193 LDR (register, SIMD&FP) page C7-1841 line 103241 MATCH x3c600800/mask=x3f600c00 @@ -17310,19 +10483,10 @@ is b_3031=0b00 & b_2429=0b111100 & b_2223=0b11 & b_21=1 & b_13=0 & b_1011=0b10 & :ldr Rt_FPR128, [Rn_GPR64xsp, Rm_GPR64^extend_spec^extend_amount] is b_3031=0b00 & b_2429=0b111100 & b_2223=0b11 & b_21=1 & b_13=1 & b_1011=0b10 & Rt_FPR128 & Rn_GPR64xsp & Rm_GPR64 & extend_spec & extend_amount & Zt { -@if defined(SEMANTIC_primitive) local tmp1:8 = extend_spec << extend_amount; local tmp2:8 = Rn_GPR64xsp + tmp1; Rt_FPR128 = * tmp2; zext_zq(Zt); # zero upper 16 bytes of Zt -@elif defined(SEMANTIC_pcode) - local tmp1:8 = extend_spec << extend_amount; - local tmp2:8 = Rn_GPR64xsp + tmp1; - Rt_FPR128 = * tmp2; - zext_zq(Zt); # zero upper 16 bytes of Zt -@elif defined(SEMANTIC_pseudo) - Rt_FPR128 = NEON_ldr(Rn_GPR64xsp, extend_spec, extend_amount); -@endif } # C7.2.194 LDUR (SIMD&FP) page C7-1844 line 103424 MATCH x3c400000/mask=x3f600c00 @@ -17334,15 +10498,8 @@ is b_3031=0b00 & b_2429=0b111100 & b_2223=0b11 & b_21=1 & b_13=1 & b_1011=0b10 & :ldur Rt_FPR128, addrIndexed is size.ldstr=0 & b_2729=7 & v=1 & b_2425=0 & b_23=1 & b_2222=1 & b_2121=0 & b_1011=0 & addrIndexed & Rt_FPR128 & Zt { -@if defined(SEMANTIC_primitive) Rt_FPR128 = * addrIndexed; zext_zq(Zt); # zero upper 16 bytes of Zt -@elif defined(SEMANTIC_pcode) - Rt_FPR128 = * addrIndexed; - zext_zq(Zt); # zero upper 16 bytes of Zt -@elif defined(SEMANTIC_pseudo) - Rt_FPR128 = NEON_ldur(addrIndexed); -@endif } # C7.2.194 LDUR (SIMD&FP) page C7-1844 line 103424 MATCH x3c400000/mask=x3f600c00 @@ -17354,15 +10511,8 @@ is size.ldstr=0 & b_2729=7 & v=1 & b_2425=0 & b_23=1 & b_2222=1 & b_2121=0 & b_1 :ldur Rt_FPR16, addrIndexed is size.ldstr=1 & b_2729=7 & v=1 & b_2425=0 & b_23=0 & b_2222=1 & b_2121=0 & b_1011=0 & addrIndexed & Rt_FPR16 & Zt { -@if defined(SEMANTIC_primitive) Rt_FPR16 = * addrIndexed; zext_zh(Zt); # zero upper 30 bytes of Zt -@elif defined(SEMANTIC_pcode) - Rt_FPR16 = * addrIndexed; - zext_zh(Zt); # zero upper 30 bytes of Zt -@elif defined(SEMANTIC_pseudo) - Rt_FPR16 = NEON_ldur(addrIndexed); -@endif } # C7.2.194 LDUR (SIMD&FP) page C7-1844 line 103424 MATCH x3c400000/mask=x3f600c00 @@ -17374,15 +10524,8 @@ is size.ldstr=1 & b_2729=7 & v=1 & b_2425=0 & b_23=0 & b_2222=1 & b_2121=0 & b_1 :ldur Rt_FPR32, addrIndexed is size.ldstr=2 & b_2729=7 & v=1 & b_2425=0 & b_23=0 & b_2222=1 & b_2121=0 & b_1011=0 & addrIndexed & Rt_FPR32 & Zt { -@if defined(SEMANTIC_primitive) Rt_FPR32 = * addrIndexed; zext_zs(Zt); # zero upper 28 bytes of Zt -@elif defined(SEMANTIC_pcode) - Rt_FPR32 = * addrIndexed; - zext_zs(Zt); # zero upper 28 bytes of Zt -@elif defined(SEMANTIC_pseudo) - Rt_FPR32 = NEON_ldur(addrIndexed); -@endif } # C7.2.194 LDUR (SIMD&FP) page C7-1844 line 103424 MATCH x3c400000/mask=x3f600c00 @@ -17394,15 +10537,8 @@ is size.ldstr=2 & b_2729=7 & v=1 & b_2425=0 & b_23=0 & b_2222=1 & b_2121=0 & b_1 :ldur Rt_FPR64, addrIndexed is size.ldstr=3 & b_2729=7 & v=1 & b_2425=0 & b_23=0 & b_2222=1 & b_2121=0 & b_1011=0 & addrIndexed & Rt_FPR64 & Zt { -@if defined(SEMANTIC_primitive) Rt_FPR64 = * addrIndexed; zext_zd(Zt); # zero upper 24 bytes of Zt -@elif defined(SEMANTIC_pcode) - Rt_FPR64 = * addrIndexed; - zext_zd(Zt); # zero upper 24 bytes of Zt -@elif defined(SEMANTIC_pseudo) - Rt_FPR64 = NEON_ldur(addrIndexed); -@endif } # C7.2.194 LDUR (SIMD&FP) page C7-1844 line 103424 MATCH x3c400000/mask=x3f600c00 @@ -17414,15 +10550,8 @@ is size.ldstr=3 & b_2729=7 & v=1 & b_2425=0 & b_23=0 & b_2222=1 & b_2121=0 & b_1 :ldur Rt_FPR8, addrIndexed is size.ldstr=0 & b_2729=7 & v=1 & b_2425=0 & b_23=0 & b_2222=1 & b_2121=0 & b_1011=0 & addrIndexed & Rt_FPR8 & Zt { -@if defined(SEMANTIC_primitive) Rt_FPR8 = * addrIndexed; zext_zb(Zt); # zero upper 31 bytes of Zt -@elif defined(SEMANTIC_pcode) - Rt_FPR8 = * addrIndexed; - zext_zb(Zt); # zero upper 31 bytes of Zt -@elif defined(SEMANTIC_pseudo) - Rt_FPR8 = NEON_ldur(addrIndexed); -@endif } # C7.2.195 MLA (by element) page C7-1846 line 103549 MATCH x2f000000/mask=xbf00f400 @@ -17434,42 +10563,15 @@ is size.ldstr=0 & b_2729=7 & v=1 & b_2425=0 & b_23=0 & b_2222=1 & b_2121=0 & b_1 :mla Rd_VPR64.2S, Rn_VPR64.2S, Re_VPR128.S.vIndex is b_3131=0 & q=0 & u=1 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & vIndex & Re_VPR128.S & b_1215=0x0 & b_1010=0 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; # simd element Re_VPR128.S[vIndex] lane size 4 - simd_address_at(tmp1, Re_VPR128.S, vIndex:4, 4, 16); - local tmp2:4 = * [register]:4 tmp1; - # simd infix TMPD1 = Rn_VPR64.2S * tmp2 on lane size 4 - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, Rn_VPR64.2S, 0, 4, 8); - simd_address_at(tmp5, TMPD1, 0, 4, 8); - * [register]:4 tmp5 = (* [register]:4 tmp4) * tmp2; - simd_address_at(tmp4, Rn_VPR64.2S, 1, 4, 8); - simd_address_at(tmp5, TMPD1, 1, 4, 8); - * [register]:4 tmp5 = (* [register]:4 tmp4) * tmp2; + local tmp1:4 = Re_VPR128.S.vIndex; + # simd infix TMPD1 = Rn_VPR64.2S * tmp1 on lane size 4 + TMPD1[0,32] = Rn_VPR64.2S[0,32] * tmp1; + TMPD1[32,32] = Rn_VPR64.2S[32,32] * tmp1; # simd infix Rd_VPR64.2S = Rd_VPR64.2S + TMPD1 on lane size 4 - local tmp6:4 = 0; - local tmp7:4 = 0; - local tmp8:4 = 0; - simd_address_at(tmp6, Rd_VPR64.2S, 0, 4, 8); - simd_address_at(tmp7, TMPD1, 0, 4, 8); - simd_address_at(tmp8, Rd_VPR64.2S, 0, 4, 8); - * [register]:4 tmp8 = (* [register]:4 tmp6) + (* [register]:4 tmp7); - simd_address_at(tmp6, Rd_VPR64.2S, 1, 4, 8); - simd_address_at(tmp7, TMPD1, 1, 4, 8); - simd_address_at(tmp8, Rd_VPR64.2S, 1, 4, 8); - * [register]:4 tmp8 = (* [register]:4 tmp6) + (* [register]:4 tmp7); + Rd_VPR64.2S[0,32] = Rd_VPR64.2S[0,32] + TMPD1[0,32]; + Rd_VPR64.2S[32,32] = Rd_VPR64.2S[32,32] + TMPD1[32,32]; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:4 = SIMD_PIECE(Re_VPR128.S, vIndex:1); - local tmp2:8 = SIMD_INT_MULT(Rn_VPR64.2S, tmp1); - local tmpd:8 = SIMD_INT_ADD(Rd_VPR64.2S, tmp2, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.2S -@elif defined(SEMANTIC_pseudo) - local tmp1:4 = SIMD_PIECE(Re_VPR128.S, vIndex:1); - Rd_VPR64.2S = NEON_mla(Rd_VPR64.2S, Rn_VPR64.2S, tmp1, 4:1); -@endif } # C7.2.195 MLA (by element) page C7-1846 line 103549 MATCH x2f000000/mask=xbf00f400 @@ -17481,56 +10583,19 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & vI :mla Rd_VPR64.4H, Rn_VPR64.4H, Re_VPR128Lo.H.vIndexHLM is b_3131=0 & q=0 & u=1 & b_2428=0xf & advSIMD3.size=1 & Re_VPR128Lo.H.vIndexHLM & Re_VPR128Lo.H & vIndexHLM & b_1215=0x0 & b_1010=0 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 - simd_address_at(tmp1, Re_VPR128Lo.H, vIndexHLM:4, 2, 16); - local tmp2:2 = * [register]:2 tmp1; - # simd infix TMPD1 = Rn_VPR64.4H * tmp2 on lane size 2 - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, Rn_VPR64.4H, 0, 2, 8); - simd_address_at(tmp5, TMPD1, 0, 2, 8); - * [register]:2 tmp5 = (* [register]:2 tmp4) * tmp2; - simd_address_at(tmp4, Rn_VPR64.4H, 1, 2, 8); - simd_address_at(tmp5, TMPD1, 1, 2, 8); - * [register]:2 tmp5 = (* [register]:2 tmp4) * tmp2; - simd_address_at(tmp4, Rn_VPR64.4H, 2, 2, 8); - simd_address_at(tmp5, TMPD1, 2, 2, 8); - * [register]:2 tmp5 = (* [register]:2 tmp4) * tmp2; - simd_address_at(tmp4, Rn_VPR64.4H, 3, 2, 8); - simd_address_at(tmp5, TMPD1, 3, 2, 8); - * [register]:2 tmp5 = (* [register]:2 tmp4) * tmp2; + local tmp1:2 = Re_VPR128Lo.H.vIndexHLM; + # simd infix TMPD1 = Rn_VPR64.4H * tmp1 on lane size 2 + TMPD1[0,16] = Rn_VPR64.4H[0,16] * tmp1; + TMPD1[16,16] = Rn_VPR64.4H[16,16] * tmp1; + TMPD1[32,16] = Rn_VPR64.4H[32,16] * tmp1; + TMPD1[48,16] = Rn_VPR64.4H[48,16] * tmp1; # simd infix Rd_VPR64.4H = Rd_VPR64.4H + TMPD1 on lane size 2 - local tmp6:4 = 0; - local tmp7:4 = 0; - local tmp8:4 = 0; - simd_address_at(tmp6, Rd_VPR64.4H, 0, 2, 8); - simd_address_at(tmp7, TMPD1, 0, 2, 8); - simd_address_at(tmp8, Rd_VPR64.4H, 0, 2, 8); - * [register]:2 tmp8 = (* [register]:2 tmp6) + (* [register]:2 tmp7); - simd_address_at(tmp6, Rd_VPR64.4H, 1, 2, 8); - simd_address_at(tmp7, TMPD1, 1, 2, 8); - simd_address_at(tmp8, Rd_VPR64.4H, 1, 2, 8); - * [register]:2 tmp8 = (* [register]:2 tmp6) + (* [register]:2 tmp7); - simd_address_at(tmp6, Rd_VPR64.4H, 2, 2, 8); - simd_address_at(tmp7, TMPD1, 2, 2, 8); - simd_address_at(tmp8, Rd_VPR64.4H, 2, 2, 8); - * [register]:2 tmp8 = (* [register]:2 tmp6) + (* [register]:2 tmp7); - simd_address_at(tmp6, Rd_VPR64.4H, 3, 2, 8); - simd_address_at(tmp7, TMPD1, 3, 2, 8); - simd_address_at(tmp8, Rd_VPR64.4H, 3, 2, 8); - * [register]:2 tmp8 = (* [register]:2 tmp6) + (* [register]:2 tmp7); + Rd_VPR64.4H[0,16] = Rd_VPR64.4H[0,16] + TMPD1[0,16]; + Rd_VPR64.4H[16,16] = Rd_VPR64.4H[16,16] + TMPD1[16,16]; + Rd_VPR64.4H[32,16] = Rd_VPR64.4H[32,16] + TMPD1[32,16]; + Rd_VPR64.4H[48,16] = Rd_VPR64.4H[48,16] + TMPD1[48,16]; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:2 = SIMD_PIECE(Re_VPR128Lo.H, vIndexHLM:1); - local tmp2:8 = SIMD_INT_MULT(Rn_VPR64.4H, tmp1); - local tmpd:8 = SIMD_INT_ADD(Rd_VPR64.4H, tmp2, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.4H -@elif defined(SEMANTIC_pseudo) - local tmp1:2 = SIMD_PIECE(Re_VPR128Lo.H, vIndexHLM:1); - Rd_VPR64.4H = NEON_mla(Rd_VPR64.4H, Rn_VPR64.4H, tmp1, 2:1); -@endif } # C7.2.195 MLA (by element) page C7-1846 line 103549 MATCH x2f000000/mask=xbf00f400 @@ -17542,56 +10607,19 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xf & advSIMD3.size=1 & Re_VPR128Lo.H.vIndexHLM :mla Rd_VPR128.4S, Rn_VPR128.4S, Re_VPR128.S.vIndex is b_3131=0 & q=1 & u=1 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & b_1215=0x0 & b_1010=0 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; # simd element Re_VPR128.S[vIndex] lane size 4 - simd_address_at(tmp1, Re_VPR128.S, vIndex:4, 4, 16); - local tmp2:4 = * [register]:4 tmp1; - # simd infix TMPQ1 = Rn_VPR128.4S * tmp2 on lane size 4 - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, Rn_VPR128.4S, 0, 4, 16); - simd_address_at(tmp5, TMPQ1, 0, 4, 16); - * [register]:4 tmp5 = (* [register]:4 tmp4) * tmp2; - simd_address_at(tmp4, Rn_VPR128.4S, 1, 4, 16); - simd_address_at(tmp5, TMPQ1, 1, 4, 16); - * [register]:4 tmp5 = (* [register]:4 tmp4) * tmp2; - simd_address_at(tmp4, Rn_VPR128.4S, 2, 4, 16); - simd_address_at(tmp5, TMPQ1, 2, 4, 16); - * [register]:4 tmp5 = (* [register]:4 tmp4) * tmp2; - simd_address_at(tmp4, Rn_VPR128.4S, 3, 4, 16); - simd_address_at(tmp5, TMPQ1, 3, 4, 16); - * [register]:4 tmp5 = (* [register]:4 tmp4) * tmp2; + local tmp1:4 = Re_VPR128.S.vIndex; + # simd infix TMPQ1 = Rn_VPR128.4S * tmp1 on lane size 4 + TMPQ1[0,32] = Rn_VPR128.4S[0,32] * tmp1; + TMPQ1[32,32] = Rn_VPR128.4S[32,32] * tmp1; + TMPQ1[64,32] = Rn_VPR128.4S[64,32] * tmp1; + TMPQ1[96,32] = Rn_VPR128.4S[96,32] * tmp1; # simd infix Rd_VPR128.4S = Rd_VPR128.4S + TMPQ1 on lane size 4 - local tmp6:4 = 0; - local tmp7:4 = 0; - local tmp8:4 = 0; - simd_address_at(tmp6, Rd_VPR128.4S, 0, 4, 16); - simd_address_at(tmp7, TMPQ1, 0, 4, 16); - simd_address_at(tmp8, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp8 = (* [register]:4 tmp6) + (* [register]:4 tmp7); - simd_address_at(tmp6, Rd_VPR128.4S, 1, 4, 16); - simd_address_at(tmp7, TMPQ1, 1, 4, 16); - simd_address_at(tmp8, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp8 = (* [register]:4 tmp6) + (* [register]:4 tmp7); - simd_address_at(tmp6, Rd_VPR128.4S, 2, 4, 16); - simd_address_at(tmp7, TMPQ1, 2, 4, 16); - simd_address_at(tmp8, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp8 = (* [register]:4 tmp6) + (* [register]:4 tmp7); - simd_address_at(tmp6, Rd_VPR128.4S, 3, 4, 16); - simd_address_at(tmp7, TMPQ1, 3, 4, 16); - simd_address_at(tmp8, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp8 = (* [register]:4 tmp6) + (* [register]:4 tmp7); + Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] + TMPQ1[0,32]; + Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] + TMPQ1[32,32]; + Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] + TMPQ1[64,32]; + Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] + TMPQ1[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:4 = SIMD_PIECE(Re_VPR128.S, vIndex:1); - local tmp2:16 = SIMD_INT_MULT(Rn_VPR128.4S, tmp1); - local tmpd:16 = SIMD_INT_ADD(Rd_VPR128.4S, tmp2, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - local tmp1:4 = SIMD_PIECE(Re_VPR128.S, vIndex:1); - Rd_VPR128.4S = NEON_mla(Rd_VPR128.4S, Rn_VPR128.4S, tmp1, 4:1); -@endif } # C7.2.195 MLA (by element) page C7-1846 line 103549 MATCH x2f000000/mask=xbf00f400 @@ -17603,84 +10631,27 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re :mla Rd_VPR128.8H, Rn_VPR128.8H, Re_VPR128Lo.H.vIndexHLM is b_3131=0 & q=1 & u=1 & b_2428=0xf & advSIMD3.size=1 & Re_VPR128Lo.H.vIndexHLM & Re_VPR128Lo.H & vIndexHLM & b_1215=0x0 & b_1010=0 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 - simd_address_at(tmp1, Re_VPR128Lo.H, vIndexHLM:4, 2, 16); - local tmp2:2 = * [register]:2 tmp1; - # simd infix TMPQ1 = Rn_VPR128.8H * tmp2 on lane size 2 - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, Rn_VPR128.8H, 0, 2, 16); - simd_address_at(tmp5, TMPQ1, 0, 2, 16); - * [register]:2 tmp5 = (* [register]:2 tmp4) * tmp2; - simd_address_at(tmp4, Rn_VPR128.8H, 1, 2, 16); - simd_address_at(tmp5, TMPQ1, 1, 2, 16); - * [register]:2 tmp5 = (* [register]:2 tmp4) * tmp2; - simd_address_at(tmp4, Rn_VPR128.8H, 2, 2, 16); - simd_address_at(tmp5, TMPQ1, 2, 2, 16); - * [register]:2 tmp5 = (* [register]:2 tmp4) * tmp2; - simd_address_at(tmp4, Rn_VPR128.8H, 3, 2, 16); - simd_address_at(tmp5, TMPQ1, 3, 2, 16); - * [register]:2 tmp5 = (* [register]:2 tmp4) * tmp2; - simd_address_at(tmp4, Rn_VPR128.8H, 4, 2, 16); - simd_address_at(tmp5, TMPQ1, 4, 2, 16); - * [register]:2 tmp5 = (* [register]:2 tmp4) * tmp2; - simd_address_at(tmp4, Rn_VPR128.8H, 5, 2, 16); - simd_address_at(tmp5, TMPQ1, 5, 2, 16); - * [register]:2 tmp5 = (* [register]:2 tmp4) * tmp2; - simd_address_at(tmp4, Rn_VPR128.8H, 6, 2, 16); - simd_address_at(tmp5, TMPQ1, 6, 2, 16); - * [register]:2 tmp5 = (* [register]:2 tmp4) * tmp2; - simd_address_at(tmp4, Rn_VPR128.8H, 7, 2, 16); - simd_address_at(tmp5, TMPQ1, 7, 2, 16); - * [register]:2 tmp5 = (* [register]:2 tmp4) * tmp2; + local tmp1:2 = Re_VPR128Lo.H.vIndexHLM; + # simd infix TMPQ1 = Rn_VPR128.8H * tmp1 on lane size 2 + TMPQ1[0,16] = Rn_VPR128.8H[0,16] * tmp1; + TMPQ1[16,16] = Rn_VPR128.8H[16,16] * tmp1; + TMPQ1[32,16] = Rn_VPR128.8H[32,16] * tmp1; + TMPQ1[48,16] = Rn_VPR128.8H[48,16] * tmp1; + TMPQ1[64,16] = Rn_VPR128.8H[64,16] * tmp1; + TMPQ1[80,16] = Rn_VPR128.8H[80,16] * tmp1; + TMPQ1[96,16] = Rn_VPR128.8H[96,16] * tmp1; + TMPQ1[112,16] = Rn_VPR128.8H[112,16] * tmp1; # simd infix Rd_VPR128.8H = Rd_VPR128.8H + TMPQ1 on lane size 2 - local tmp6:4 = 0; - local tmp7:4 = 0; - local tmp8:4 = 0; - simd_address_at(tmp6, Rd_VPR128.8H, 0, 2, 16); - simd_address_at(tmp7, TMPQ1, 0, 2, 16); - simd_address_at(tmp8, Rd_VPR128.8H, 0, 2, 16); - * [register]:2 tmp8 = (* [register]:2 tmp6) + (* [register]:2 tmp7); - simd_address_at(tmp6, Rd_VPR128.8H, 1, 2, 16); - simd_address_at(tmp7, TMPQ1, 1, 2, 16); - simd_address_at(tmp8, Rd_VPR128.8H, 1, 2, 16); - * [register]:2 tmp8 = (* [register]:2 tmp6) + (* [register]:2 tmp7); - simd_address_at(tmp6, Rd_VPR128.8H, 2, 2, 16); - simd_address_at(tmp7, TMPQ1, 2, 2, 16); - simd_address_at(tmp8, Rd_VPR128.8H, 2, 2, 16); - * [register]:2 tmp8 = (* [register]:2 tmp6) + (* [register]:2 tmp7); - simd_address_at(tmp6, Rd_VPR128.8H, 3, 2, 16); - simd_address_at(tmp7, TMPQ1, 3, 2, 16); - simd_address_at(tmp8, Rd_VPR128.8H, 3, 2, 16); - * [register]:2 tmp8 = (* [register]:2 tmp6) + (* [register]:2 tmp7); - simd_address_at(tmp6, Rd_VPR128.8H, 4, 2, 16); - simd_address_at(tmp7, TMPQ1, 4, 2, 16); - simd_address_at(tmp8, Rd_VPR128.8H, 4, 2, 16); - * [register]:2 tmp8 = (* [register]:2 tmp6) + (* [register]:2 tmp7); - simd_address_at(tmp6, Rd_VPR128.8H, 5, 2, 16); - simd_address_at(tmp7, TMPQ1, 5, 2, 16); - simd_address_at(tmp8, Rd_VPR128.8H, 5, 2, 16); - * [register]:2 tmp8 = (* [register]:2 tmp6) + (* [register]:2 tmp7); - simd_address_at(tmp6, Rd_VPR128.8H, 6, 2, 16); - simd_address_at(tmp7, TMPQ1, 6, 2, 16); - simd_address_at(tmp8, Rd_VPR128.8H, 6, 2, 16); - * [register]:2 tmp8 = (* [register]:2 tmp6) + (* [register]:2 tmp7); - simd_address_at(tmp6, Rd_VPR128.8H, 7, 2, 16); - simd_address_at(tmp7, TMPQ1, 7, 2, 16); - simd_address_at(tmp8, Rd_VPR128.8H, 7, 2, 16); - * [register]:2 tmp8 = (* [register]:2 tmp6) + (* [register]:2 tmp7); + Rd_VPR128.8H[0,16] = Rd_VPR128.8H[0,16] + TMPQ1[0,16]; + Rd_VPR128.8H[16,16] = Rd_VPR128.8H[16,16] + TMPQ1[16,16]; + Rd_VPR128.8H[32,16] = Rd_VPR128.8H[32,16] + TMPQ1[32,16]; + Rd_VPR128.8H[48,16] = Rd_VPR128.8H[48,16] + TMPQ1[48,16]; + Rd_VPR128.8H[64,16] = Rd_VPR128.8H[64,16] + TMPQ1[64,16]; + Rd_VPR128.8H[80,16] = Rd_VPR128.8H[80,16] + TMPQ1[80,16]; + Rd_VPR128.8H[96,16] = Rd_VPR128.8H[96,16] + TMPQ1[96,16]; + Rd_VPR128.8H[112,16] = Rd_VPR128.8H[112,16] + TMPQ1[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:2 = SIMD_PIECE(Re_VPR128Lo.H, vIndexHLM:1); - local tmp2:16 = SIMD_INT_MULT(Rn_VPR128.8H, tmp1); - local tmpd:16 = SIMD_INT_ADD(Rd_VPR128.8H, tmp2, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.8H -@elif defined(SEMANTIC_pseudo) - local tmp1:2 = SIMD_PIECE(Re_VPR128Lo.H, vIndexHLM:1); - Rd_VPR128.8H = NEON_mla(Rd_VPR128.8H, Rn_VPR128.8H, tmp1, 2:1); -@endif } # C7.2.196 MLA (vector) page C7-1848 line 103681 MATCH x0e209400/mask=xbf20fc00 @@ -17692,151 +10663,41 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xf & advSIMD3.size=1 & Re_VPR128Lo.H.vIndexHLM :mla Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0x12 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { -@if defined(SEMANTIC_primitive) # simd infix TMPQ1 = Rn_VPR128.16B * Rm_VPR128.16B on lane size 1 - local tmp2:4 = 0; - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp2, Rn_VPR128.16B, 0, 1, 16); - simd_address_at(tmp3, Rm_VPR128.16B, 0, 1, 16); - simd_address_at(tmp4, TMPQ1, 0, 1, 16); - * [register]:1 tmp4 = (* [register]:1 tmp2) * (* [register]:1 tmp3); - simd_address_at(tmp2, Rn_VPR128.16B, 1, 1, 16); - simd_address_at(tmp3, Rm_VPR128.16B, 1, 1, 16); - simd_address_at(tmp4, TMPQ1, 1, 1, 16); - * [register]:1 tmp4 = (* [register]:1 tmp2) * (* [register]:1 tmp3); - simd_address_at(tmp2, Rn_VPR128.16B, 2, 1, 16); - simd_address_at(tmp3, Rm_VPR128.16B, 2, 1, 16); - simd_address_at(tmp4, TMPQ1, 2, 1, 16); - * [register]:1 tmp4 = (* [register]:1 tmp2) * (* [register]:1 tmp3); - simd_address_at(tmp2, Rn_VPR128.16B, 3, 1, 16); - simd_address_at(tmp3, Rm_VPR128.16B, 3, 1, 16); - simd_address_at(tmp4, TMPQ1, 3, 1, 16); - * [register]:1 tmp4 = (* [register]:1 tmp2) * (* [register]:1 tmp3); - simd_address_at(tmp2, Rn_VPR128.16B, 4, 1, 16); - simd_address_at(tmp3, Rm_VPR128.16B, 4, 1, 16); - simd_address_at(tmp4, TMPQ1, 4, 1, 16); - * [register]:1 tmp4 = (* [register]:1 tmp2) * (* [register]:1 tmp3); - simd_address_at(tmp2, Rn_VPR128.16B, 5, 1, 16); - simd_address_at(tmp3, Rm_VPR128.16B, 5, 1, 16); - simd_address_at(tmp4, TMPQ1, 5, 1, 16); - * [register]:1 tmp4 = (* [register]:1 tmp2) * (* [register]:1 tmp3); - simd_address_at(tmp2, Rn_VPR128.16B, 6, 1, 16); - simd_address_at(tmp3, Rm_VPR128.16B, 6, 1, 16); - simd_address_at(tmp4, TMPQ1, 6, 1, 16); - * [register]:1 tmp4 = (* [register]:1 tmp2) * (* [register]:1 tmp3); - simd_address_at(tmp2, Rn_VPR128.16B, 7, 1, 16); - simd_address_at(tmp3, Rm_VPR128.16B, 7, 1, 16); - simd_address_at(tmp4, TMPQ1, 7, 1, 16); - * [register]:1 tmp4 = (* [register]:1 tmp2) * (* [register]:1 tmp3); - simd_address_at(tmp2, Rn_VPR128.16B, 8, 1, 16); - simd_address_at(tmp3, Rm_VPR128.16B, 8, 1, 16); - simd_address_at(tmp4, TMPQ1, 8, 1, 16); - * [register]:1 tmp4 = (* [register]:1 tmp2) * (* [register]:1 tmp3); - simd_address_at(tmp2, Rn_VPR128.16B, 9, 1, 16); - simd_address_at(tmp3, Rm_VPR128.16B, 9, 1, 16); - simd_address_at(tmp4, TMPQ1, 9, 1, 16); - * [register]:1 tmp4 = (* [register]:1 tmp2) * (* [register]:1 tmp3); - simd_address_at(tmp2, Rn_VPR128.16B, 10, 1, 16); - simd_address_at(tmp3, Rm_VPR128.16B, 10, 1, 16); - simd_address_at(tmp4, TMPQ1, 10, 1, 16); - * [register]:1 tmp4 = (* [register]:1 tmp2) * (* [register]:1 tmp3); - simd_address_at(tmp2, Rn_VPR128.16B, 11, 1, 16); - simd_address_at(tmp3, Rm_VPR128.16B, 11, 1, 16); - simd_address_at(tmp4, TMPQ1, 11, 1, 16); - * [register]:1 tmp4 = (* [register]:1 tmp2) * (* [register]:1 tmp3); - simd_address_at(tmp2, Rn_VPR128.16B, 12, 1, 16); - simd_address_at(tmp3, Rm_VPR128.16B, 12, 1, 16); - simd_address_at(tmp4, TMPQ1, 12, 1, 16); - * [register]:1 tmp4 = (* [register]:1 tmp2) * (* [register]:1 tmp3); - simd_address_at(tmp2, Rn_VPR128.16B, 13, 1, 16); - simd_address_at(tmp3, Rm_VPR128.16B, 13, 1, 16); - simd_address_at(tmp4, TMPQ1, 13, 1, 16); - * [register]:1 tmp4 = (* [register]:1 tmp2) * (* [register]:1 tmp3); - simd_address_at(tmp2, Rn_VPR128.16B, 14, 1, 16); - simd_address_at(tmp3, Rm_VPR128.16B, 14, 1, 16); - simd_address_at(tmp4, TMPQ1, 14, 1, 16); - * [register]:1 tmp4 = (* [register]:1 tmp2) * (* [register]:1 tmp3); - simd_address_at(tmp2, Rn_VPR128.16B, 15, 1, 16); - simd_address_at(tmp3, Rm_VPR128.16B, 15, 1, 16); - simd_address_at(tmp4, TMPQ1, 15, 1, 16); - * [register]:1 tmp4 = (* [register]:1 tmp2) * (* [register]:1 tmp3); + TMPQ1[0,8] = Rn_VPR128.16B[0,8] * Rm_VPR128.16B[0,8]; + TMPQ1[8,8] = Rn_VPR128.16B[8,8] * Rm_VPR128.16B[8,8]; + TMPQ1[16,8] = Rn_VPR128.16B[16,8] * Rm_VPR128.16B[16,8]; + TMPQ1[24,8] = Rn_VPR128.16B[24,8] * Rm_VPR128.16B[24,8]; + TMPQ1[32,8] = Rn_VPR128.16B[32,8] * Rm_VPR128.16B[32,8]; + TMPQ1[40,8] = Rn_VPR128.16B[40,8] * Rm_VPR128.16B[40,8]; + TMPQ1[48,8] = Rn_VPR128.16B[48,8] * Rm_VPR128.16B[48,8]; + TMPQ1[56,8] = Rn_VPR128.16B[56,8] * Rm_VPR128.16B[56,8]; + TMPQ1[64,8] = Rn_VPR128.16B[64,8] * Rm_VPR128.16B[64,8]; + TMPQ1[72,8] = Rn_VPR128.16B[72,8] * Rm_VPR128.16B[72,8]; + TMPQ1[80,8] = Rn_VPR128.16B[80,8] * Rm_VPR128.16B[80,8]; + TMPQ1[88,8] = Rn_VPR128.16B[88,8] * Rm_VPR128.16B[88,8]; + TMPQ1[96,8] = Rn_VPR128.16B[96,8] * Rm_VPR128.16B[96,8]; + TMPQ1[104,8] = Rn_VPR128.16B[104,8] * Rm_VPR128.16B[104,8]; + TMPQ1[112,8] = Rn_VPR128.16B[112,8] * Rm_VPR128.16B[112,8]; + TMPQ1[120,8] = Rn_VPR128.16B[120,8] * Rm_VPR128.16B[120,8]; # simd infix Rd_VPR128.16B = Rd_VPR128.16B + TMPQ1 on lane size 1 - local tmp5:4 = 0; - local tmp6:4 = 0; - local tmp7:4 = 0; - simd_address_at(tmp5, Rd_VPR128.16B, 0, 1, 16); - simd_address_at(tmp6, TMPQ1, 0, 1, 16); - simd_address_at(tmp7, Rd_VPR128.16B, 0, 1, 16); - * [register]:1 tmp7 = (* [register]:1 tmp5) + (* [register]:1 tmp6); - simd_address_at(tmp5, Rd_VPR128.16B, 1, 1, 16); - simd_address_at(tmp6, TMPQ1, 1, 1, 16); - simd_address_at(tmp7, Rd_VPR128.16B, 1, 1, 16); - * [register]:1 tmp7 = (* [register]:1 tmp5) + (* [register]:1 tmp6); - simd_address_at(tmp5, Rd_VPR128.16B, 2, 1, 16); - simd_address_at(tmp6, TMPQ1, 2, 1, 16); - simd_address_at(tmp7, Rd_VPR128.16B, 2, 1, 16); - * [register]:1 tmp7 = (* [register]:1 tmp5) + (* [register]:1 tmp6); - simd_address_at(tmp5, Rd_VPR128.16B, 3, 1, 16); - simd_address_at(tmp6, TMPQ1, 3, 1, 16); - simd_address_at(tmp7, Rd_VPR128.16B, 3, 1, 16); - * [register]:1 tmp7 = (* [register]:1 tmp5) + (* [register]:1 tmp6); - simd_address_at(tmp5, Rd_VPR128.16B, 4, 1, 16); - simd_address_at(tmp6, TMPQ1, 4, 1, 16); - simd_address_at(tmp7, Rd_VPR128.16B, 4, 1, 16); - * [register]:1 tmp7 = (* [register]:1 tmp5) + (* [register]:1 tmp6); - simd_address_at(tmp5, Rd_VPR128.16B, 5, 1, 16); - simd_address_at(tmp6, TMPQ1, 5, 1, 16); - simd_address_at(tmp7, Rd_VPR128.16B, 5, 1, 16); - * [register]:1 tmp7 = (* [register]:1 tmp5) + (* [register]:1 tmp6); - simd_address_at(tmp5, Rd_VPR128.16B, 6, 1, 16); - simd_address_at(tmp6, TMPQ1, 6, 1, 16); - simd_address_at(tmp7, Rd_VPR128.16B, 6, 1, 16); - * [register]:1 tmp7 = (* [register]:1 tmp5) + (* [register]:1 tmp6); - simd_address_at(tmp5, Rd_VPR128.16B, 7, 1, 16); - simd_address_at(tmp6, TMPQ1, 7, 1, 16); - simd_address_at(tmp7, Rd_VPR128.16B, 7, 1, 16); - * [register]:1 tmp7 = (* [register]:1 tmp5) + (* [register]:1 tmp6); - simd_address_at(tmp5, Rd_VPR128.16B, 8, 1, 16); - simd_address_at(tmp6, TMPQ1, 8, 1, 16); - simd_address_at(tmp7, Rd_VPR128.16B, 8, 1, 16); - * [register]:1 tmp7 = (* [register]:1 tmp5) + (* [register]:1 tmp6); - simd_address_at(tmp5, Rd_VPR128.16B, 9, 1, 16); - simd_address_at(tmp6, TMPQ1, 9, 1, 16); - simd_address_at(tmp7, Rd_VPR128.16B, 9, 1, 16); - * [register]:1 tmp7 = (* [register]:1 tmp5) + (* [register]:1 tmp6); - simd_address_at(tmp5, Rd_VPR128.16B, 10, 1, 16); - simd_address_at(tmp6, TMPQ1, 10, 1, 16); - simd_address_at(tmp7, Rd_VPR128.16B, 10, 1, 16); - * [register]:1 tmp7 = (* [register]:1 tmp5) + (* [register]:1 tmp6); - simd_address_at(tmp5, Rd_VPR128.16B, 11, 1, 16); - simd_address_at(tmp6, TMPQ1, 11, 1, 16); - simd_address_at(tmp7, Rd_VPR128.16B, 11, 1, 16); - * [register]:1 tmp7 = (* [register]:1 tmp5) + (* [register]:1 tmp6); - simd_address_at(tmp5, Rd_VPR128.16B, 12, 1, 16); - simd_address_at(tmp6, TMPQ1, 12, 1, 16); - simd_address_at(tmp7, Rd_VPR128.16B, 12, 1, 16); - * [register]:1 tmp7 = (* [register]:1 tmp5) + (* [register]:1 tmp6); - simd_address_at(tmp5, Rd_VPR128.16B, 13, 1, 16); - simd_address_at(tmp6, TMPQ1, 13, 1, 16); - simd_address_at(tmp7, Rd_VPR128.16B, 13, 1, 16); - * [register]:1 tmp7 = (* [register]:1 tmp5) + (* [register]:1 tmp6); - simd_address_at(tmp5, Rd_VPR128.16B, 14, 1, 16); - simd_address_at(tmp6, TMPQ1, 14, 1, 16); - simd_address_at(tmp7, Rd_VPR128.16B, 14, 1, 16); - * [register]:1 tmp7 = (* [register]:1 tmp5) + (* [register]:1 tmp6); - simd_address_at(tmp5, Rd_VPR128.16B, 15, 1, 16); - simd_address_at(tmp6, TMPQ1, 15, 1, 16); - simd_address_at(tmp7, Rd_VPR128.16B, 15, 1, 16); - * [register]:1 tmp7 = (* [register]:1 tmp5) + (* [register]:1 tmp6); + Rd_VPR128.16B[0,8] = Rd_VPR128.16B[0,8] + TMPQ1[0,8]; + Rd_VPR128.16B[8,8] = Rd_VPR128.16B[8,8] + TMPQ1[8,8]; + Rd_VPR128.16B[16,8] = Rd_VPR128.16B[16,8] + TMPQ1[16,8]; + Rd_VPR128.16B[24,8] = Rd_VPR128.16B[24,8] + TMPQ1[24,8]; + Rd_VPR128.16B[32,8] = Rd_VPR128.16B[32,8] + TMPQ1[32,8]; + Rd_VPR128.16B[40,8] = Rd_VPR128.16B[40,8] + TMPQ1[40,8]; + Rd_VPR128.16B[48,8] = Rd_VPR128.16B[48,8] + TMPQ1[48,8]; + Rd_VPR128.16B[56,8] = Rd_VPR128.16B[56,8] + TMPQ1[56,8]; + Rd_VPR128.16B[64,8] = Rd_VPR128.16B[64,8] + TMPQ1[64,8]; + Rd_VPR128.16B[72,8] = Rd_VPR128.16B[72,8] + TMPQ1[72,8]; + Rd_VPR128.16B[80,8] = Rd_VPR128.16B[80,8] + TMPQ1[80,8]; + Rd_VPR128.16B[88,8] = Rd_VPR128.16B[88,8] + TMPQ1[88,8]; + Rd_VPR128.16B[96,8] = Rd_VPR128.16B[96,8] + TMPQ1[96,8]; + Rd_VPR128.16B[104,8] = Rd_VPR128.16B[104,8] + TMPQ1[104,8]; + Rd_VPR128.16B[112,8] = Rd_VPR128.16B[112,8] + TMPQ1[112,8]; + Rd_VPR128.16B[120,8] = Rd_VPR128.16B[120,8] + TMPQ1[120,8]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_MULT(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); - local tmpd:16 = SIMD_INT_ADD(Rd_VPR128.16B, tmp1, 1:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.16B -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.16B = NEON_mla(Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B, 1:1); -@endif } # C7.2.196 MLA (vector) page C7-1848 line 103681 MATCH x0e209400/mask=xbf20fc00 @@ -17848,39 +10709,13 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16 :mla Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0x12 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_primitive) # simd infix TMPD1 = Rn_VPR64.2S * Rm_VPR64.2S on lane size 4 - local tmp2:4 = 0; - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp2, Rn_VPR64.2S, 0, 4, 8); - simd_address_at(tmp3, Rm_VPR64.2S, 0, 4, 8); - simd_address_at(tmp4, TMPD1, 0, 4, 8); - * [register]:4 tmp4 = (* [register]:4 tmp2) * (* [register]:4 tmp3); - simd_address_at(tmp2, Rn_VPR64.2S, 1, 4, 8); - simd_address_at(tmp3, Rm_VPR64.2S, 1, 4, 8); - simd_address_at(tmp4, TMPD1, 1, 4, 8); - * [register]:4 tmp4 = (* [register]:4 tmp2) * (* [register]:4 tmp3); + TMPD1[0,32] = Rn_VPR64.2S[0,32] * Rm_VPR64.2S[0,32]; + TMPD1[32,32] = Rn_VPR64.2S[32,32] * Rm_VPR64.2S[32,32]; # simd infix Rd_VPR64.2S = Rd_VPR64.2S + TMPD1 on lane size 4 - local tmp5:4 = 0; - local tmp6:4 = 0; - local tmp7:4 = 0; - simd_address_at(tmp5, Rd_VPR64.2S, 0, 4, 8); - simd_address_at(tmp6, TMPD1, 0, 4, 8); - simd_address_at(tmp7, Rd_VPR64.2S, 0, 4, 8); - * [register]:4 tmp7 = (* [register]:4 tmp5) + (* [register]:4 tmp6); - simd_address_at(tmp5, Rd_VPR64.2S, 1, 4, 8); - simd_address_at(tmp6, TMPD1, 1, 4, 8); - simd_address_at(tmp7, Rd_VPR64.2S, 1, 4, 8); - * [register]:4 tmp7 = (* [register]:4 tmp5) + (* [register]:4 tmp6); + Rd_VPR64.2S[0,32] = Rd_VPR64.2S[0,32] + TMPD1[0,32]; + Rd_VPR64.2S[32,32] = Rd_VPR64.2S[32,32] + TMPD1[32,32]; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_INT_MULT(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); - local tmpd:8 = SIMD_INT_ADD(Rd_VPR64.2S, tmp1, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.2S -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.2S = NEON_mla(Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S, 4:1); -@endif } # C7.2.196 MLA (vector) page C7-1848 line 103681 MATCH x0e209400/mask=xbf20fc00 @@ -17892,55 +10727,17 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S :mla Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0x12 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { -@if defined(SEMANTIC_primitive) # simd infix TMPD1 = Rn_VPR64.4H * Rm_VPR64.4H on lane size 2 - local tmp2:4 = 0; - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp2, Rn_VPR64.4H, 0, 2, 8); - simd_address_at(tmp3, Rm_VPR64.4H, 0, 2, 8); - simd_address_at(tmp4, TMPD1, 0, 2, 8); - * [register]:2 tmp4 = (* [register]:2 tmp2) * (* [register]:2 tmp3); - simd_address_at(tmp2, Rn_VPR64.4H, 1, 2, 8); - simd_address_at(tmp3, Rm_VPR64.4H, 1, 2, 8); - simd_address_at(tmp4, TMPD1, 1, 2, 8); - * [register]:2 tmp4 = (* [register]:2 tmp2) * (* [register]:2 tmp3); - simd_address_at(tmp2, Rn_VPR64.4H, 2, 2, 8); - simd_address_at(tmp3, Rm_VPR64.4H, 2, 2, 8); - simd_address_at(tmp4, TMPD1, 2, 2, 8); - * [register]:2 tmp4 = (* [register]:2 tmp2) * (* [register]:2 tmp3); - simd_address_at(tmp2, Rn_VPR64.4H, 3, 2, 8); - simd_address_at(tmp3, Rm_VPR64.4H, 3, 2, 8); - simd_address_at(tmp4, TMPD1, 3, 2, 8); - * [register]:2 tmp4 = (* [register]:2 tmp2) * (* [register]:2 tmp3); + TMPD1[0,16] = Rn_VPR64.4H[0,16] * Rm_VPR64.4H[0,16]; + TMPD1[16,16] = Rn_VPR64.4H[16,16] * Rm_VPR64.4H[16,16]; + TMPD1[32,16] = Rn_VPR64.4H[32,16] * Rm_VPR64.4H[32,16]; + TMPD1[48,16] = Rn_VPR64.4H[48,16] * Rm_VPR64.4H[48,16]; # simd infix Rd_VPR64.4H = Rd_VPR64.4H + TMPD1 on lane size 2 - local tmp5:4 = 0; - local tmp6:4 = 0; - local tmp7:4 = 0; - simd_address_at(tmp5, Rd_VPR64.4H, 0, 2, 8); - simd_address_at(tmp6, TMPD1, 0, 2, 8); - simd_address_at(tmp7, Rd_VPR64.4H, 0, 2, 8); - * [register]:2 tmp7 = (* [register]:2 tmp5) + (* [register]:2 tmp6); - simd_address_at(tmp5, Rd_VPR64.4H, 1, 2, 8); - simd_address_at(tmp6, TMPD1, 1, 2, 8); - simd_address_at(tmp7, Rd_VPR64.4H, 1, 2, 8); - * [register]:2 tmp7 = (* [register]:2 tmp5) + (* [register]:2 tmp6); - simd_address_at(tmp5, Rd_VPR64.4H, 2, 2, 8); - simd_address_at(tmp6, TMPD1, 2, 2, 8); - simd_address_at(tmp7, Rd_VPR64.4H, 2, 2, 8); - * [register]:2 tmp7 = (* [register]:2 tmp5) + (* [register]:2 tmp6); - simd_address_at(tmp5, Rd_VPR64.4H, 3, 2, 8); - simd_address_at(tmp6, TMPD1, 3, 2, 8); - simd_address_at(tmp7, Rd_VPR64.4H, 3, 2, 8); - * [register]:2 tmp7 = (* [register]:2 tmp5) + (* [register]:2 tmp6); + Rd_VPR64.4H[0,16] = Rd_VPR64.4H[0,16] + TMPD1[0,16]; + Rd_VPR64.4H[16,16] = Rd_VPR64.4H[16,16] + TMPD1[16,16]; + Rd_VPR64.4H[32,16] = Rd_VPR64.4H[32,16] + TMPD1[32,16]; + Rd_VPR64.4H[48,16] = Rd_VPR64.4H[48,16] + TMPD1[48,16]; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_INT_MULT(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); - local tmpd:8 = SIMD_INT_ADD(Rd_VPR64.4H, tmp1, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.4H -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.4H = NEON_mla(Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H, 2:1); -@endif } # C7.2.196 MLA (vector) page C7-1848 line 103681 MATCH x0e209400/mask=xbf20fc00 @@ -17952,55 +10749,17 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H :mla Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0x12 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) # simd infix TMPQ1 = Rn_VPR128.4S * Rm_VPR128.4S on lane size 4 - local tmp2:4 = 0; - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp2, Rn_VPR128.4S, 0, 4, 16); - simd_address_at(tmp3, Rm_VPR128.4S, 0, 4, 16); - simd_address_at(tmp4, TMPQ1, 0, 4, 16); - * [register]:4 tmp4 = (* [register]:4 tmp2) * (* [register]:4 tmp3); - simd_address_at(tmp2, Rn_VPR128.4S, 1, 4, 16); - simd_address_at(tmp3, Rm_VPR128.4S, 1, 4, 16); - simd_address_at(tmp4, TMPQ1, 1, 4, 16); - * [register]:4 tmp4 = (* [register]:4 tmp2) * (* [register]:4 tmp3); - simd_address_at(tmp2, Rn_VPR128.4S, 2, 4, 16); - simd_address_at(tmp3, Rm_VPR128.4S, 2, 4, 16); - simd_address_at(tmp4, TMPQ1, 2, 4, 16); - * [register]:4 tmp4 = (* [register]:4 tmp2) * (* [register]:4 tmp3); - simd_address_at(tmp2, Rn_VPR128.4S, 3, 4, 16); - simd_address_at(tmp3, Rm_VPR128.4S, 3, 4, 16); - simd_address_at(tmp4, TMPQ1, 3, 4, 16); - * [register]:4 tmp4 = (* [register]:4 tmp2) * (* [register]:4 tmp3); + TMPQ1[0,32] = Rn_VPR128.4S[0,32] * Rm_VPR128.4S[0,32]; + TMPQ1[32,32] = Rn_VPR128.4S[32,32] * Rm_VPR128.4S[32,32]; + TMPQ1[64,32] = Rn_VPR128.4S[64,32] * Rm_VPR128.4S[64,32]; + TMPQ1[96,32] = Rn_VPR128.4S[96,32] * Rm_VPR128.4S[96,32]; # simd infix Rd_VPR128.4S = Rd_VPR128.4S + TMPQ1 on lane size 4 - local tmp5:4 = 0; - local tmp6:4 = 0; - local tmp7:4 = 0; - simd_address_at(tmp5, Rd_VPR128.4S, 0, 4, 16); - simd_address_at(tmp6, TMPQ1, 0, 4, 16); - simd_address_at(tmp7, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp7 = (* [register]:4 tmp5) + (* [register]:4 tmp6); - simd_address_at(tmp5, Rd_VPR128.4S, 1, 4, 16); - simd_address_at(tmp6, TMPQ1, 1, 4, 16); - simd_address_at(tmp7, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp7 = (* [register]:4 tmp5) + (* [register]:4 tmp6); - simd_address_at(tmp5, Rd_VPR128.4S, 2, 4, 16); - simd_address_at(tmp6, TMPQ1, 2, 4, 16); - simd_address_at(tmp7, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp7 = (* [register]:4 tmp5) + (* [register]:4 tmp6); - simd_address_at(tmp5, Rd_VPR128.4S, 3, 4, 16); - simd_address_at(tmp6, TMPQ1, 3, 4, 16); - simd_address_at(tmp7, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp7 = (* [register]:4 tmp5) + (* [register]:4 tmp6); + Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] + TMPQ1[0,32]; + Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] + TMPQ1[32,32]; + Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] + TMPQ1[64,32]; + Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] + TMPQ1[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_MULT(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); - local tmpd:16 = SIMD_INT_ADD(Rd_VPR128.4S, tmp1, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_mla(Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S, 4:1); -@endif } # C7.2.196 MLA (vector) page C7-1848 line 103681 MATCH x0e209400/mask=xbf20fc00 @@ -18012,87 +10771,25 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S :mla Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0x12 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { -@if defined(SEMANTIC_primitive) # simd infix TMPD1 = Rn_VPR64.8B * Rm_VPR64.8B on lane size 1 - local tmp2:4 = 0; - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp2, Rn_VPR64.8B, 0, 1, 8); - simd_address_at(tmp3, Rm_VPR64.8B, 0, 1, 8); - simd_address_at(tmp4, TMPD1, 0, 1, 8); - * [register]:1 tmp4 = (* [register]:1 tmp2) * (* [register]:1 tmp3); - simd_address_at(tmp2, Rn_VPR64.8B, 1, 1, 8); - simd_address_at(tmp3, Rm_VPR64.8B, 1, 1, 8); - simd_address_at(tmp4, TMPD1, 1, 1, 8); - * [register]:1 tmp4 = (* [register]:1 tmp2) * (* [register]:1 tmp3); - simd_address_at(tmp2, Rn_VPR64.8B, 2, 1, 8); - simd_address_at(tmp3, Rm_VPR64.8B, 2, 1, 8); - simd_address_at(tmp4, TMPD1, 2, 1, 8); - * [register]:1 tmp4 = (* [register]:1 tmp2) * (* [register]:1 tmp3); - simd_address_at(tmp2, Rn_VPR64.8B, 3, 1, 8); - simd_address_at(tmp3, Rm_VPR64.8B, 3, 1, 8); - simd_address_at(tmp4, TMPD1, 3, 1, 8); - * [register]:1 tmp4 = (* [register]:1 tmp2) * (* [register]:1 tmp3); - simd_address_at(tmp2, Rn_VPR64.8B, 4, 1, 8); - simd_address_at(tmp3, Rm_VPR64.8B, 4, 1, 8); - simd_address_at(tmp4, TMPD1, 4, 1, 8); - * [register]:1 tmp4 = (* [register]:1 tmp2) * (* [register]:1 tmp3); - simd_address_at(tmp2, Rn_VPR64.8B, 5, 1, 8); - simd_address_at(tmp3, Rm_VPR64.8B, 5, 1, 8); - simd_address_at(tmp4, TMPD1, 5, 1, 8); - * [register]:1 tmp4 = (* [register]:1 tmp2) * (* [register]:1 tmp3); - simd_address_at(tmp2, Rn_VPR64.8B, 6, 1, 8); - simd_address_at(tmp3, Rm_VPR64.8B, 6, 1, 8); - simd_address_at(tmp4, TMPD1, 6, 1, 8); - * [register]:1 tmp4 = (* [register]:1 tmp2) * (* [register]:1 tmp3); - simd_address_at(tmp2, Rn_VPR64.8B, 7, 1, 8); - simd_address_at(tmp3, Rm_VPR64.8B, 7, 1, 8); - simd_address_at(tmp4, TMPD1, 7, 1, 8); - * [register]:1 tmp4 = (* [register]:1 tmp2) * (* [register]:1 tmp3); + TMPD1[0,8] = Rn_VPR64.8B[0,8] * Rm_VPR64.8B[0,8]; + TMPD1[8,8] = Rn_VPR64.8B[8,8] * Rm_VPR64.8B[8,8]; + TMPD1[16,8] = Rn_VPR64.8B[16,8] * Rm_VPR64.8B[16,8]; + TMPD1[24,8] = Rn_VPR64.8B[24,8] * Rm_VPR64.8B[24,8]; + TMPD1[32,8] = Rn_VPR64.8B[32,8] * Rm_VPR64.8B[32,8]; + TMPD1[40,8] = Rn_VPR64.8B[40,8] * Rm_VPR64.8B[40,8]; + TMPD1[48,8] = Rn_VPR64.8B[48,8] * Rm_VPR64.8B[48,8]; + TMPD1[56,8] = Rn_VPR64.8B[56,8] * Rm_VPR64.8B[56,8]; # simd infix Rd_VPR64.8B = Rd_VPR64.8B + TMPD1 on lane size 1 - local tmp5:4 = 0; - local tmp6:4 = 0; - local tmp7:4 = 0; - simd_address_at(tmp5, Rd_VPR64.8B, 0, 1, 8); - simd_address_at(tmp6, TMPD1, 0, 1, 8); - simd_address_at(tmp7, Rd_VPR64.8B, 0, 1, 8); - * [register]:1 tmp7 = (* [register]:1 tmp5) + (* [register]:1 tmp6); - simd_address_at(tmp5, Rd_VPR64.8B, 1, 1, 8); - simd_address_at(tmp6, TMPD1, 1, 1, 8); - simd_address_at(tmp7, Rd_VPR64.8B, 1, 1, 8); - * [register]:1 tmp7 = (* [register]:1 tmp5) + (* [register]:1 tmp6); - simd_address_at(tmp5, Rd_VPR64.8B, 2, 1, 8); - simd_address_at(tmp6, TMPD1, 2, 1, 8); - simd_address_at(tmp7, Rd_VPR64.8B, 2, 1, 8); - * [register]:1 tmp7 = (* [register]:1 tmp5) + (* [register]:1 tmp6); - simd_address_at(tmp5, Rd_VPR64.8B, 3, 1, 8); - simd_address_at(tmp6, TMPD1, 3, 1, 8); - simd_address_at(tmp7, Rd_VPR64.8B, 3, 1, 8); - * [register]:1 tmp7 = (* [register]:1 tmp5) + (* [register]:1 tmp6); - simd_address_at(tmp5, Rd_VPR64.8B, 4, 1, 8); - simd_address_at(tmp6, TMPD1, 4, 1, 8); - simd_address_at(tmp7, Rd_VPR64.8B, 4, 1, 8); - * [register]:1 tmp7 = (* [register]:1 tmp5) + (* [register]:1 tmp6); - simd_address_at(tmp5, Rd_VPR64.8B, 5, 1, 8); - simd_address_at(tmp6, TMPD1, 5, 1, 8); - simd_address_at(tmp7, Rd_VPR64.8B, 5, 1, 8); - * [register]:1 tmp7 = (* [register]:1 tmp5) + (* [register]:1 tmp6); - simd_address_at(tmp5, Rd_VPR64.8B, 6, 1, 8); - simd_address_at(tmp6, TMPD1, 6, 1, 8); - simd_address_at(tmp7, Rd_VPR64.8B, 6, 1, 8); - * [register]:1 tmp7 = (* [register]:1 tmp5) + (* [register]:1 tmp6); - simd_address_at(tmp5, Rd_VPR64.8B, 7, 1, 8); - simd_address_at(tmp6, TMPD1, 7, 1, 8); - simd_address_at(tmp7, Rd_VPR64.8B, 7, 1, 8); - * [register]:1 tmp7 = (* [register]:1 tmp5) + (* [register]:1 tmp6); + Rd_VPR64.8B[0,8] = Rd_VPR64.8B[0,8] + TMPD1[0,8]; + Rd_VPR64.8B[8,8] = Rd_VPR64.8B[8,8] + TMPD1[8,8]; + Rd_VPR64.8B[16,8] = Rd_VPR64.8B[16,8] + TMPD1[16,8]; + Rd_VPR64.8B[24,8] = Rd_VPR64.8B[24,8] + TMPD1[24,8]; + Rd_VPR64.8B[32,8] = Rd_VPR64.8B[32,8] + TMPD1[32,8]; + Rd_VPR64.8B[40,8] = Rd_VPR64.8B[40,8] + TMPD1[40,8]; + Rd_VPR64.8B[48,8] = Rd_VPR64.8B[48,8] + TMPD1[48,8]; + Rd_VPR64.8B[56,8] = Rd_VPR64.8B[56,8] + TMPD1[56,8]; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_INT_MULT(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); - local tmpd:8 = SIMD_INT_ADD(Rd_VPR64.8B, tmp1, 1:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.8B -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.8B = NEON_mla(Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B, 1:1); -@endif } # C7.2.196 MLA (vector) page C7-1848 line 103681 MATCH x0e209400/mask=xbf20fc00 @@ -18104,87 +10801,25 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B :mla Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0x12 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_primitive) # simd infix TMPQ1 = Rn_VPR128.8H * Rm_VPR128.8H on lane size 2 - local tmp2:4 = 0; - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp2, Rn_VPR128.8H, 0, 2, 16); - simd_address_at(tmp3, Rm_VPR128.8H, 0, 2, 16); - simd_address_at(tmp4, TMPQ1, 0, 2, 16); - * [register]:2 tmp4 = (* [register]:2 tmp2) * (* [register]:2 tmp3); - simd_address_at(tmp2, Rn_VPR128.8H, 1, 2, 16); - simd_address_at(tmp3, Rm_VPR128.8H, 1, 2, 16); - simd_address_at(tmp4, TMPQ1, 1, 2, 16); - * [register]:2 tmp4 = (* [register]:2 tmp2) * (* [register]:2 tmp3); - simd_address_at(tmp2, Rn_VPR128.8H, 2, 2, 16); - simd_address_at(tmp3, Rm_VPR128.8H, 2, 2, 16); - simd_address_at(tmp4, TMPQ1, 2, 2, 16); - * [register]:2 tmp4 = (* [register]:2 tmp2) * (* [register]:2 tmp3); - simd_address_at(tmp2, Rn_VPR128.8H, 3, 2, 16); - simd_address_at(tmp3, Rm_VPR128.8H, 3, 2, 16); - simd_address_at(tmp4, TMPQ1, 3, 2, 16); - * [register]:2 tmp4 = (* [register]:2 tmp2) * (* [register]:2 tmp3); - simd_address_at(tmp2, Rn_VPR128.8H, 4, 2, 16); - simd_address_at(tmp3, Rm_VPR128.8H, 4, 2, 16); - simd_address_at(tmp4, TMPQ1, 4, 2, 16); - * [register]:2 tmp4 = (* [register]:2 tmp2) * (* [register]:2 tmp3); - simd_address_at(tmp2, Rn_VPR128.8H, 5, 2, 16); - simd_address_at(tmp3, Rm_VPR128.8H, 5, 2, 16); - simd_address_at(tmp4, TMPQ1, 5, 2, 16); - * [register]:2 tmp4 = (* [register]:2 tmp2) * (* [register]:2 tmp3); - simd_address_at(tmp2, Rn_VPR128.8H, 6, 2, 16); - simd_address_at(tmp3, Rm_VPR128.8H, 6, 2, 16); - simd_address_at(tmp4, TMPQ1, 6, 2, 16); - * [register]:2 tmp4 = (* [register]:2 tmp2) * (* [register]:2 tmp3); - simd_address_at(tmp2, Rn_VPR128.8H, 7, 2, 16); - simd_address_at(tmp3, Rm_VPR128.8H, 7, 2, 16); - simd_address_at(tmp4, TMPQ1, 7, 2, 16); - * [register]:2 tmp4 = (* [register]:2 tmp2) * (* [register]:2 tmp3); + TMPQ1[0,16] = Rn_VPR128.8H[0,16] * Rm_VPR128.8H[0,16]; + TMPQ1[16,16] = Rn_VPR128.8H[16,16] * Rm_VPR128.8H[16,16]; + TMPQ1[32,16] = Rn_VPR128.8H[32,16] * Rm_VPR128.8H[32,16]; + TMPQ1[48,16] = Rn_VPR128.8H[48,16] * Rm_VPR128.8H[48,16]; + TMPQ1[64,16] = Rn_VPR128.8H[64,16] * Rm_VPR128.8H[64,16]; + TMPQ1[80,16] = Rn_VPR128.8H[80,16] * Rm_VPR128.8H[80,16]; + TMPQ1[96,16] = Rn_VPR128.8H[96,16] * Rm_VPR128.8H[96,16]; + TMPQ1[112,16] = Rn_VPR128.8H[112,16] * Rm_VPR128.8H[112,16]; # simd infix Rd_VPR128.8H = Rd_VPR128.8H + TMPQ1 on lane size 2 - local tmp5:4 = 0; - local tmp6:4 = 0; - local tmp7:4 = 0; - simd_address_at(tmp5, Rd_VPR128.8H, 0, 2, 16); - simd_address_at(tmp6, TMPQ1, 0, 2, 16); - simd_address_at(tmp7, Rd_VPR128.8H, 0, 2, 16); - * [register]:2 tmp7 = (* [register]:2 tmp5) + (* [register]:2 tmp6); - simd_address_at(tmp5, Rd_VPR128.8H, 1, 2, 16); - simd_address_at(tmp6, TMPQ1, 1, 2, 16); - simd_address_at(tmp7, Rd_VPR128.8H, 1, 2, 16); - * [register]:2 tmp7 = (* [register]:2 tmp5) + (* [register]:2 tmp6); - simd_address_at(tmp5, Rd_VPR128.8H, 2, 2, 16); - simd_address_at(tmp6, TMPQ1, 2, 2, 16); - simd_address_at(tmp7, Rd_VPR128.8H, 2, 2, 16); - * [register]:2 tmp7 = (* [register]:2 tmp5) + (* [register]:2 tmp6); - simd_address_at(tmp5, Rd_VPR128.8H, 3, 2, 16); - simd_address_at(tmp6, TMPQ1, 3, 2, 16); - simd_address_at(tmp7, Rd_VPR128.8H, 3, 2, 16); - * [register]:2 tmp7 = (* [register]:2 tmp5) + (* [register]:2 tmp6); - simd_address_at(tmp5, Rd_VPR128.8H, 4, 2, 16); - simd_address_at(tmp6, TMPQ1, 4, 2, 16); - simd_address_at(tmp7, Rd_VPR128.8H, 4, 2, 16); - * [register]:2 tmp7 = (* [register]:2 tmp5) + (* [register]:2 tmp6); - simd_address_at(tmp5, Rd_VPR128.8H, 5, 2, 16); - simd_address_at(tmp6, TMPQ1, 5, 2, 16); - simd_address_at(tmp7, Rd_VPR128.8H, 5, 2, 16); - * [register]:2 tmp7 = (* [register]:2 tmp5) + (* [register]:2 tmp6); - simd_address_at(tmp5, Rd_VPR128.8H, 6, 2, 16); - simd_address_at(tmp6, TMPQ1, 6, 2, 16); - simd_address_at(tmp7, Rd_VPR128.8H, 6, 2, 16); - * [register]:2 tmp7 = (* [register]:2 tmp5) + (* [register]:2 tmp6); - simd_address_at(tmp5, Rd_VPR128.8H, 7, 2, 16); - simd_address_at(tmp6, TMPQ1, 7, 2, 16); - simd_address_at(tmp7, Rd_VPR128.8H, 7, 2, 16); - * [register]:2 tmp7 = (* [register]:2 tmp5) + (* [register]:2 tmp6); + Rd_VPR128.8H[0,16] = Rd_VPR128.8H[0,16] + TMPQ1[0,16]; + Rd_VPR128.8H[16,16] = Rd_VPR128.8H[16,16] + TMPQ1[16,16]; + Rd_VPR128.8H[32,16] = Rd_VPR128.8H[32,16] + TMPQ1[32,16]; + Rd_VPR128.8H[48,16] = Rd_VPR128.8H[48,16] + TMPQ1[48,16]; + Rd_VPR128.8H[64,16] = Rd_VPR128.8H[64,16] + TMPQ1[64,16]; + Rd_VPR128.8H[80,16] = Rd_VPR128.8H[80,16] + TMPQ1[80,16]; + Rd_VPR128.8H[96,16] = Rd_VPR128.8H[96,16] + TMPQ1[96,16]; + Rd_VPR128.8H[112,16] = Rd_VPR128.8H[112,16] + TMPQ1[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_MULT(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); - local tmpd:16 = SIMD_INT_ADD(Rd_VPR128.8H, tmp1, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.8H -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.8H = NEON_mla(Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H, 2:1); -@endif } # C7.2.197 MLS (by element) page C7-1850 line 103784 MATCH x2f004000/mask=xbf00f400 @@ -18196,42 +10831,15 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H :mls Rd_VPR64.2S, Rn_VPR64.2S, Re_VPR128.S.vIndex is b_3131=0 & q=0 & u=1 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & b_1215=0x4 & b_1010=0 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; # simd element Re_VPR128.S[vIndex] lane size 4 - simd_address_at(tmp1, Re_VPR128.S, vIndex:4, 4, 16); - local tmp2:4 = * [register]:4 tmp1; - # simd infix TMPD1 = Rn_VPR64.2S * tmp2 on lane size 4 - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, Rn_VPR64.2S, 0, 4, 8); - simd_address_at(tmp5, TMPD1, 0, 4, 8); - * [register]:4 tmp5 = (* [register]:4 tmp4) * tmp2; - simd_address_at(tmp4, Rn_VPR64.2S, 1, 4, 8); - simd_address_at(tmp5, TMPD1, 1, 4, 8); - * [register]:4 tmp5 = (* [register]:4 tmp4) * tmp2; + local tmp1:4 = Re_VPR128.S.vIndex; + # simd infix TMPD1 = Rn_VPR64.2S * tmp1 on lane size 4 + TMPD1[0,32] = Rn_VPR64.2S[0,32] * tmp1; + TMPD1[32,32] = Rn_VPR64.2S[32,32] * tmp1; # simd infix Rd_VPR64.2S = Rd_VPR64.2S - TMPD1 on lane size 4 - local tmp6:4 = 0; - local tmp7:4 = 0; - local tmp8:4 = 0; - simd_address_at(tmp6, Rd_VPR64.2S, 0, 4, 8); - simd_address_at(tmp7, TMPD1, 0, 4, 8); - simd_address_at(tmp8, Rd_VPR64.2S, 0, 4, 8); - * [register]:4 tmp8 = (* [register]:4 tmp6) - (* [register]:4 tmp7); - simd_address_at(tmp6, Rd_VPR64.2S, 1, 4, 8); - simd_address_at(tmp7, TMPD1, 1, 4, 8); - simd_address_at(tmp8, Rd_VPR64.2S, 1, 4, 8); - * [register]:4 tmp8 = (* [register]:4 tmp6) - (* [register]:4 tmp7); + Rd_VPR64.2S[0,32] = Rd_VPR64.2S[0,32] - TMPD1[0,32]; + Rd_VPR64.2S[32,32] = Rd_VPR64.2S[32,32] - TMPD1[32,32]; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:4 = SIMD_PIECE(Re_VPR128.S, vIndex:1); - local tmp2:8 = SIMD_INT_MULT(Rn_VPR64.2S, tmp1); - local tmpd:8 = SIMD_INT_SUB(Rd_VPR64.2S, tmp2, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.2S -@elif defined(SEMANTIC_pseudo) - local tmp1:4 = SIMD_PIECE(Re_VPR128.S, vIndex:1); - Rd_VPR64.2S = NEON_mls(Rd_VPR64.2S, Rn_VPR64.2S, tmp1, 4:1); -@endif } # C7.2.197 MLS (by element) page C7-1850 line 103784 MATCH x2f004000/mask=xbf00f400 @@ -18243,56 +10851,19 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re :mls Rd_VPR64.4H, Rn_VPR64.4H, Re_VPR128Lo.H.vIndexHLM is b_3131=0 & q=0 & u=1 & b_2428=0xf & advSIMD3.size=1 & Re_VPR128Lo.H.vIndexHLM & Re_VPR128Lo.H & vIndexHLM & b_1215=0x4 & b_1010=0 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 - simd_address_at(tmp1, Re_VPR128Lo.H, vIndexHLM:4, 2, 16); - local tmp2:2 = * [register]:2 tmp1; - # simd infix TMPD1 = Rn_VPR64.4H * tmp2 on lane size 2 - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, Rn_VPR64.4H, 0, 2, 8); - simd_address_at(tmp5, TMPD1, 0, 2, 8); - * [register]:2 tmp5 = (* [register]:2 tmp4) * tmp2; - simd_address_at(tmp4, Rn_VPR64.4H, 1, 2, 8); - simd_address_at(tmp5, TMPD1, 1, 2, 8); - * [register]:2 tmp5 = (* [register]:2 tmp4) * tmp2; - simd_address_at(tmp4, Rn_VPR64.4H, 2, 2, 8); - simd_address_at(tmp5, TMPD1, 2, 2, 8); - * [register]:2 tmp5 = (* [register]:2 tmp4) * tmp2; - simd_address_at(tmp4, Rn_VPR64.4H, 3, 2, 8); - simd_address_at(tmp5, TMPD1, 3, 2, 8); - * [register]:2 tmp5 = (* [register]:2 tmp4) * tmp2; + local tmp1:2 = Re_VPR128Lo.H.vIndexHLM; + # simd infix TMPD1 = Rn_VPR64.4H * tmp1 on lane size 2 + TMPD1[0,16] = Rn_VPR64.4H[0,16] * tmp1; + TMPD1[16,16] = Rn_VPR64.4H[16,16] * tmp1; + TMPD1[32,16] = Rn_VPR64.4H[32,16] * tmp1; + TMPD1[48,16] = Rn_VPR64.4H[48,16] * tmp1; # simd infix Rd_VPR64.4H = Rd_VPR64.4H - TMPD1 on lane size 2 - local tmp6:4 = 0; - local tmp7:4 = 0; - local tmp8:4 = 0; - simd_address_at(tmp6, Rd_VPR64.4H, 0, 2, 8); - simd_address_at(tmp7, TMPD1, 0, 2, 8); - simd_address_at(tmp8, Rd_VPR64.4H, 0, 2, 8); - * [register]:2 tmp8 = (* [register]:2 tmp6) - (* [register]:2 tmp7); - simd_address_at(tmp6, Rd_VPR64.4H, 1, 2, 8); - simd_address_at(tmp7, TMPD1, 1, 2, 8); - simd_address_at(tmp8, Rd_VPR64.4H, 1, 2, 8); - * [register]:2 tmp8 = (* [register]:2 tmp6) - (* [register]:2 tmp7); - simd_address_at(tmp6, Rd_VPR64.4H, 2, 2, 8); - simd_address_at(tmp7, TMPD1, 2, 2, 8); - simd_address_at(tmp8, Rd_VPR64.4H, 2, 2, 8); - * [register]:2 tmp8 = (* [register]:2 tmp6) - (* [register]:2 tmp7); - simd_address_at(tmp6, Rd_VPR64.4H, 3, 2, 8); - simd_address_at(tmp7, TMPD1, 3, 2, 8); - simd_address_at(tmp8, Rd_VPR64.4H, 3, 2, 8); - * [register]:2 tmp8 = (* [register]:2 tmp6) - (* [register]:2 tmp7); + Rd_VPR64.4H[0,16] = Rd_VPR64.4H[0,16] - TMPD1[0,16]; + Rd_VPR64.4H[16,16] = Rd_VPR64.4H[16,16] - TMPD1[16,16]; + Rd_VPR64.4H[32,16] = Rd_VPR64.4H[32,16] - TMPD1[32,16]; + Rd_VPR64.4H[48,16] = Rd_VPR64.4H[48,16] - TMPD1[48,16]; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:2 = SIMD_PIECE(Re_VPR128Lo.H, vIndexHLM:1); - local tmp2:8 = SIMD_INT_MULT(Rn_VPR64.4H, tmp1); - local tmpd:8 = SIMD_INT_SUB(Rd_VPR64.4H, tmp2, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.4H -@elif defined(SEMANTIC_pseudo) - local tmp1:2 = SIMD_PIECE(Re_VPR128Lo.H, vIndexHLM:1); - Rd_VPR64.4H = NEON_mls(Rd_VPR64.4H, Rn_VPR64.4H, tmp1, 2:1); -@endif } # C7.2.197 MLS (by element) page C7-1850 line 103784 MATCH x2f004000/mask=xbf00f400 @@ -18304,56 +10875,19 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xf & advSIMD3.size=1 & Re_VPR128Lo.H.vIndexHLM :mls Rd_VPR128.4S, Rn_VPR128.4S, Re_VPR128.S.vIndex is b_3131=0 & q=1 & u=1 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & b_1215=0x4 & b_1010=0 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; # simd element Re_VPR128.S[vIndex] lane size 4 - simd_address_at(tmp1, Re_VPR128.S, vIndex:4, 4, 16); - local tmp2:4 = * [register]:4 tmp1; - # simd infix TMPQ1 = Rn_VPR128.4S * tmp2 on lane size 4 - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, Rn_VPR128.4S, 0, 4, 16); - simd_address_at(tmp5, TMPQ1, 0, 4, 16); - * [register]:4 tmp5 = (* [register]:4 tmp4) * tmp2; - simd_address_at(tmp4, Rn_VPR128.4S, 1, 4, 16); - simd_address_at(tmp5, TMPQ1, 1, 4, 16); - * [register]:4 tmp5 = (* [register]:4 tmp4) * tmp2; - simd_address_at(tmp4, Rn_VPR128.4S, 2, 4, 16); - simd_address_at(tmp5, TMPQ1, 2, 4, 16); - * [register]:4 tmp5 = (* [register]:4 tmp4) * tmp2; - simd_address_at(tmp4, Rn_VPR128.4S, 3, 4, 16); - simd_address_at(tmp5, TMPQ1, 3, 4, 16); - * [register]:4 tmp5 = (* [register]:4 tmp4) * tmp2; + local tmp1:4 = Re_VPR128.S.vIndex; + # simd infix TMPQ1 = Rn_VPR128.4S * tmp1 on lane size 4 + TMPQ1[0,32] = Rn_VPR128.4S[0,32] * tmp1; + TMPQ1[32,32] = Rn_VPR128.4S[32,32] * tmp1; + TMPQ1[64,32] = Rn_VPR128.4S[64,32] * tmp1; + TMPQ1[96,32] = Rn_VPR128.4S[96,32] * tmp1; # simd infix Rd_VPR128.4S = Rd_VPR128.4S - TMPQ1 on lane size 4 - local tmp6:4 = 0; - local tmp7:4 = 0; - local tmp8:4 = 0; - simd_address_at(tmp6, Rd_VPR128.4S, 0, 4, 16); - simd_address_at(tmp7, TMPQ1, 0, 4, 16); - simd_address_at(tmp8, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp8 = (* [register]:4 tmp6) - (* [register]:4 tmp7); - simd_address_at(tmp6, Rd_VPR128.4S, 1, 4, 16); - simd_address_at(tmp7, TMPQ1, 1, 4, 16); - simd_address_at(tmp8, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp8 = (* [register]:4 tmp6) - (* [register]:4 tmp7); - simd_address_at(tmp6, Rd_VPR128.4S, 2, 4, 16); - simd_address_at(tmp7, TMPQ1, 2, 4, 16); - simd_address_at(tmp8, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp8 = (* [register]:4 tmp6) - (* [register]:4 tmp7); - simd_address_at(tmp6, Rd_VPR128.4S, 3, 4, 16); - simd_address_at(tmp7, TMPQ1, 3, 4, 16); - simd_address_at(tmp8, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp8 = (* [register]:4 tmp6) - (* [register]:4 tmp7); + Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] - TMPQ1[0,32]; + Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] - TMPQ1[32,32]; + Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] - TMPQ1[64,32]; + Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] - TMPQ1[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:4 = SIMD_PIECE(Re_VPR128.S, vIndex:1); - local tmp2:16 = SIMD_INT_MULT(Rn_VPR128.4S, tmp1); - local tmpd:16 = SIMD_INT_SUB(Rd_VPR128.4S, tmp2, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - local tmp1:4 = SIMD_PIECE(Re_VPR128.S, vIndex:1); - Rd_VPR128.4S = NEON_mls(Rd_VPR128.4S, Rn_VPR128.4S, tmp1, 4:1); -@endif } # C7.2.197 MLS (by element) page C7-1850 line 103784 MATCH x2f004000/mask=xbf00f400 @@ -18365,84 +10899,27 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re :mls Rd_VPR128.8H, Rn_VPR128.8H, Re_VPR128Lo.H.vIndexHLM is b_3131=0 & q=1 & u=1 & b_2428=0xf & advSIMD3.size=1 & Re_VPR128Lo.H.vIndexHLM & Re_VPR128Lo.H & vIndexHLM & b_1215=0x4 & b_1010=0 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 - simd_address_at(tmp1, Re_VPR128Lo.H, vIndexHLM:4, 2, 16); - local tmp2:2 = * [register]:2 tmp1; - # simd infix TMPQ1 = Rn_VPR128.8H * tmp2 on lane size 2 - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, Rn_VPR128.8H, 0, 2, 16); - simd_address_at(tmp5, TMPQ1, 0, 2, 16); - * [register]:2 tmp5 = (* [register]:2 tmp4) * tmp2; - simd_address_at(tmp4, Rn_VPR128.8H, 1, 2, 16); - simd_address_at(tmp5, TMPQ1, 1, 2, 16); - * [register]:2 tmp5 = (* [register]:2 tmp4) * tmp2; - simd_address_at(tmp4, Rn_VPR128.8H, 2, 2, 16); - simd_address_at(tmp5, TMPQ1, 2, 2, 16); - * [register]:2 tmp5 = (* [register]:2 tmp4) * tmp2; - simd_address_at(tmp4, Rn_VPR128.8H, 3, 2, 16); - simd_address_at(tmp5, TMPQ1, 3, 2, 16); - * [register]:2 tmp5 = (* [register]:2 tmp4) * tmp2; - simd_address_at(tmp4, Rn_VPR128.8H, 4, 2, 16); - simd_address_at(tmp5, TMPQ1, 4, 2, 16); - * [register]:2 tmp5 = (* [register]:2 tmp4) * tmp2; - simd_address_at(tmp4, Rn_VPR128.8H, 5, 2, 16); - simd_address_at(tmp5, TMPQ1, 5, 2, 16); - * [register]:2 tmp5 = (* [register]:2 tmp4) * tmp2; - simd_address_at(tmp4, Rn_VPR128.8H, 6, 2, 16); - simd_address_at(tmp5, TMPQ1, 6, 2, 16); - * [register]:2 tmp5 = (* [register]:2 tmp4) * tmp2; - simd_address_at(tmp4, Rn_VPR128.8H, 7, 2, 16); - simd_address_at(tmp5, TMPQ1, 7, 2, 16); - * [register]:2 tmp5 = (* [register]:2 tmp4) * tmp2; + local tmp1:2 = Re_VPR128Lo.H.vIndexHLM; + # simd infix TMPQ1 = Rn_VPR128.8H * tmp1 on lane size 2 + TMPQ1[0,16] = Rn_VPR128.8H[0,16] * tmp1; + TMPQ1[16,16] = Rn_VPR128.8H[16,16] * tmp1; + TMPQ1[32,16] = Rn_VPR128.8H[32,16] * tmp1; + TMPQ1[48,16] = Rn_VPR128.8H[48,16] * tmp1; + TMPQ1[64,16] = Rn_VPR128.8H[64,16] * tmp1; + TMPQ1[80,16] = Rn_VPR128.8H[80,16] * tmp1; + TMPQ1[96,16] = Rn_VPR128.8H[96,16] * tmp1; + TMPQ1[112,16] = Rn_VPR128.8H[112,16] * tmp1; # simd infix Rd_VPR128.8H = Rd_VPR128.8H - TMPQ1 on lane size 2 - local tmp6:4 = 0; - local tmp7:4 = 0; - local tmp8:4 = 0; - simd_address_at(tmp6, Rd_VPR128.8H, 0, 2, 16); - simd_address_at(tmp7, TMPQ1, 0, 2, 16); - simd_address_at(tmp8, Rd_VPR128.8H, 0, 2, 16); - * [register]:2 tmp8 = (* [register]:2 tmp6) - (* [register]:2 tmp7); - simd_address_at(tmp6, Rd_VPR128.8H, 1, 2, 16); - simd_address_at(tmp7, TMPQ1, 1, 2, 16); - simd_address_at(tmp8, Rd_VPR128.8H, 1, 2, 16); - * [register]:2 tmp8 = (* [register]:2 tmp6) - (* [register]:2 tmp7); - simd_address_at(tmp6, Rd_VPR128.8H, 2, 2, 16); - simd_address_at(tmp7, TMPQ1, 2, 2, 16); - simd_address_at(tmp8, Rd_VPR128.8H, 2, 2, 16); - * [register]:2 tmp8 = (* [register]:2 tmp6) - (* [register]:2 tmp7); - simd_address_at(tmp6, Rd_VPR128.8H, 3, 2, 16); - simd_address_at(tmp7, TMPQ1, 3, 2, 16); - simd_address_at(tmp8, Rd_VPR128.8H, 3, 2, 16); - * [register]:2 tmp8 = (* [register]:2 tmp6) - (* [register]:2 tmp7); - simd_address_at(tmp6, Rd_VPR128.8H, 4, 2, 16); - simd_address_at(tmp7, TMPQ1, 4, 2, 16); - simd_address_at(tmp8, Rd_VPR128.8H, 4, 2, 16); - * [register]:2 tmp8 = (* [register]:2 tmp6) - (* [register]:2 tmp7); - simd_address_at(tmp6, Rd_VPR128.8H, 5, 2, 16); - simd_address_at(tmp7, TMPQ1, 5, 2, 16); - simd_address_at(tmp8, Rd_VPR128.8H, 5, 2, 16); - * [register]:2 tmp8 = (* [register]:2 tmp6) - (* [register]:2 tmp7); - simd_address_at(tmp6, Rd_VPR128.8H, 6, 2, 16); - simd_address_at(tmp7, TMPQ1, 6, 2, 16); - simd_address_at(tmp8, Rd_VPR128.8H, 6, 2, 16); - * [register]:2 tmp8 = (* [register]:2 tmp6) - (* [register]:2 tmp7); - simd_address_at(tmp6, Rd_VPR128.8H, 7, 2, 16); - simd_address_at(tmp7, TMPQ1, 7, 2, 16); - simd_address_at(tmp8, Rd_VPR128.8H, 7, 2, 16); - * [register]:2 tmp8 = (* [register]:2 tmp6) - (* [register]:2 tmp7); + Rd_VPR128.8H[0,16] = Rd_VPR128.8H[0,16] - TMPQ1[0,16]; + Rd_VPR128.8H[16,16] = Rd_VPR128.8H[16,16] - TMPQ1[16,16]; + Rd_VPR128.8H[32,16] = Rd_VPR128.8H[32,16] - TMPQ1[32,16]; + Rd_VPR128.8H[48,16] = Rd_VPR128.8H[48,16] - TMPQ1[48,16]; + Rd_VPR128.8H[64,16] = Rd_VPR128.8H[64,16] - TMPQ1[64,16]; + Rd_VPR128.8H[80,16] = Rd_VPR128.8H[80,16] - TMPQ1[80,16]; + Rd_VPR128.8H[96,16] = Rd_VPR128.8H[96,16] - TMPQ1[96,16]; + Rd_VPR128.8H[112,16] = Rd_VPR128.8H[112,16] - TMPQ1[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:2 = SIMD_PIECE(Re_VPR128Lo.H, vIndexHLM:1); - local tmp2:16 = SIMD_INT_MULT(Rn_VPR128.8H, tmp1); - local tmpd:16 = SIMD_INT_SUB(Rd_VPR128.8H, tmp2, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.8H -@elif defined(SEMANTIC_pseudo) - local tmp1:2 = SIMD_PIECE(Re_VPR128Lo.H, vIndexHLM:1); - Rd_VPR128.8H = NEON_mls(Rd_VPR128.8H, Rn_VPR128.8H, tmp1, 2:1); -@endif } # C7.2.198 MLS (vector) page C7-1852 line 103916 MATCH x2e209400/mask=xbf20fc00 @@ -18454,151 +10931,41 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xf & advSIMD3.size=1 & Re_VPR128Lo.H.vIndexHLM :mls Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0x12 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { -@if defined(SEMANTIC_primitive) # simd infix TMPQ1 = Rn_VPR128.16B * Rm_VPR128.16B on lane size 1 - local tmp2:4 = 0; - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp2, Rn_VPR128.16B, 0, 1, 16); - simd_address_at(tmp3, Rm_VPR128.16B, 0, 1, 16); - simd_address_at(tmp4, TMPQ1, 0, 1, 16); - * [register]:1 tmp4 = (* [register]:1 tmp2) * (* [register]:1 tmp3); - simd_address_at(tmp2, Rn_VPR128.16B, 1, 1, 16); - simd_address_at(tmp3, Rm_VPR128.16B, 1, 1, 16); - simd_address_at(tmp4, TMPQ1, 1, 1, 16); - * [register]:1 tmp4 = (* [register]:1 tmp2) * (* [register]:1 tmp3); - simd_address_at(tmp2, Rn_VPR128.16B, 2, 1, 16); - simd_address_at(tmp3, Rm_VPR128.16B, 2, 1, 16); - simd_address_at(tmp4, TMPQ1, 2, 1, 16); - * [register]:1 tmp4 = (* [register]:1 tmp2) * (* [register]:1 tmp3); - simd_address_at(tmp2, Rn_VPR128.16B, 3, 1, 16); - simd_address_at(tmp3, Rm_VPR128.16B, 3, 1, 16); - simd_address_at(tmp4, TMPQ1, 3, 1, 16); - * [register]:1 tmp4 = (* [register]:1 tmp2) * (* [register]:1 tmp3); - simd_address_at(tmp2, Rn_VPR128.16B, 4, 1, 16); - simd_address_at(tmp3, Rm_VPR128.16B, 4, 1, 16); - simd_address_at(tmp4, TMPQ1, 4, 1, 16); - * [register]:1 tmp4 = (* [register]:1 tmp2) * (* [register]:1 tmp3); - simd_address_at(tmp2, Rn_VPR128.16B, 5, 1, 16); - simd_address_at(tmp3, Rm_VPR128.16B, 5, 1, 16); - simd_address_at(tmp4, TMPQ1, 5, 1, 16); - * [register]:1 tmp4 = (* [register]:1 tmp2) * (* [register]:1 tmp3); - simd_address_at(tmp2, Rn_VPR128.16B, 6, 1, 16); - simd_address_at(tmp3, Rm_VPR128.16B, 6, 1, 16); - simd_address_at(tmp4, TMPQ1, 6, 1, 16); - * [register]:1 tmp4 = (* [register]:1 tmp2) * (* [register]:1 tmp3); - simd_address_at(tmp2, Rn_VPR128.16B, 7, 1, 16); - simd_address_at(tmp3, Rm_VPR128.16B, 7, 1, 16); - simd_address_at(tmp4, TMPQ1, 7, 1, 16); - * [register]:1 tmp4 = (* [register]:1 tmp2) * (* [register]:1 tmp3); - simd_address_at(tmp2, Rn_VPR128.16B, 8, 1, 16); - simd_address_at(tmp3, Rm_VPR128.16B, 8, 1, 16); - simd_address_at(tmp4, TMPQ1, 8, 1, 16); - * [register]:1 tmp4 = (* [register]:1 tmp2) * (* [register]:1 tmp3); - simd_address_at(tmp2, Rn_VPR128.16B, 9, 1, 16); - simd_address_at(tmp3, Rm_VPR128.16B, 9, 1, 16); - simd_address_at(tmp4, TMPQ1, 9, 1, 16); - * [register]:1 tmp4 = (* [register]:1 tmp2) * (* [register]:1 tmp3); - simd_address_at(tmp2, Rn_VPR128.16B, 10, 1, 16); - simd_address_at(tmp3, Rm_VPR128.16B, 10, 1, 16); - simd_address_at(tmp4, TMPQ1, 10, 1, 16); - * [register]:1 tmp4 = (* [register]:1 tmp2) * (* [register]:1 tmp3); - simd_address_at(tmp2, Rn_VPR128.16B, 11, 1, 16); - simd_address_at(tmp3, Rm_VPR128.16B, 11, 1, 16); - simd_address_at(tmp4, TMPQ1, 11, 1, 16); - * [register]:1 tmp4 = (* [register]:1 tmp2) * (* [register]:1 tmp3); - simd_address_at(tmp2, Rn_VPR128.16B, 12, 1, 16); - simd_address_at(tmp3, Rm_VPR128.16B, 12, 1, 16); - simd_address_at(tmp4, TMPQ1, 12, 1, 16); - * [register]:1 tmp4 = (* [register]:1 tmp2) * (* [register]:1 tmp3); - simd_address_at(tmp2, Rn_VPR128.16B, 13, 1, 16); - simd_address_at(tmp3, Rm_VPR128.16B, 13, 1, 16); - simd_address_at(tmp4, TMPQ1, 13, 1, 16); - * [register]:1 tmp4 = (* [register]:1 tmp2) * (* [register]:1 tmp3); - simd_address_at(tmp2, Rn_VPR128.16B, 14, 1, 16); - simd_address_at(tmp3, Rm_VPR128.16B, 14, 1, 16); - simd_address_at(tmp4, TMPQ1, 14, 1, 16); - * [register]:1 tmp4 = (* [register]:1 tmp2) * (* [register]:1 tmp3); - simd_address_at(tmp2, Rn_VPR128.16B, 15, 1, 16); - simd_address_at(tmp3, Rm_VPR128.16B, 15, 1, 16); - simd_address_at(tmp4, TMPQ1, 15, 1, 16); - * [register]:1 tmp4 = (* [register]:1 tmp2) * (* [register]:1 tmp3); + TMPQ1[0,8] = Rn_VPR128.16B[0,8] * Rm_VPR128.16B[0,8]; + TMPQ1[8,8] = Rn_VPR128.16B[8,8] * Rm_VPR128.16B[8,8]; + TMPQ1[16,8] = Rn_VPR128.16B[16,8] * Rm_VPR128.16B[16,8]; + TMPQ1[24,8] = Rn_VPR128.16B[24,8] * Rm_VPR128.16B[24,8]; + TMPQ1[32,8] = Rn_VPR128.16B[32,8] * Rm_VPR128.16B[32,8]; + TMPQ1[40,8] = Rn_VPR128.16B[40,8] * Rm_VPR128.16B[40,8]; + TMPQ1[48,8] = Rn_VPR128.16B[48,8] * Rm_VPR128.16B[48,8]; + TMPQ1[56,8] = Rn_VPR128.16B[56,8] * Rm_VPR128.16B[56,8]; + TMPQ1[64,8] = Rn_VPR128.16B[64,8] * Rm_VPR128.16B[64,8]; + TMPQ1[72,8] = Rn_VPR128.16B[72,8] * Rm_VPR128.16B[72,8]; + TMPQ1[80,8] = Rn_VPR128.16B[80,8] * Rm_VPR128.16B[80,8]; + TMPQ1[88,8] = Rn_VPR128.16B[88,8] * Rm_VPR128.16B[88,8]; + TMPQ1[96,8] = Rn_VPR128.16B[96,8] * Rm_VPR128.16B[96,8]; + TMPQ1[104,8] = Rn_VPR128.16B[104,8] * Rm_VPR128.16B[104,8]; + TMPQ1[112,8] = Rn_VPR128.16B[112,8] * Rm_VPR128.16B[112,8]; + TMPQ1[120,8] = Rn_VPR128.16B[120,8] * Rm_VPR128.16B[120,8]; # simd infix Rd_VPR128.16B = Rd_VPR128.16B - TMPQ1 on lane size 1 - local tmp5:4 = 0; - local tmp6:4 = 0; - local tmp7:4 = 0; - simd_address_at(tmp5, Rd_VPR128.16B, 0, 1, 16); - simd_address_at(tmp6, TMPQ1, 0, 1, 16); - simd_address_at(tmp7, Rd_VPR128.16B, 0, 1, 16); - * [register]:1 tmp7 = (* [register]:1 tmp5) - (* [register]:1 tmp6); - simd_address_at(tmp5, Rd_VPR128.16B, 1, 1, 16); - simd_address_at(tmp6, TMPQ1, 1, 1, 16); - simd_address_at(tmp7, Rd_VPR128.16B, 1, 1, 16); - * [register]:1 tmp7 = (* [register]:1 tmp5) - (* [register]:1 tmp6); - simd_address_at(tmp5, Rd_VPR128.16B, 2, 1, 16); - simd_address_at(tmp6, TMPQ1, 2, 1, 16); - simd_address_at(tmp7, Rd_VPR128.16B, 2, 1, 16); - * [register]:1 tmp7 = (* [register]:1 tmp5) - (* [register]:1 tmp6); - simd_address_at(tmp5, Rd_VPR128.16B, 3, 1, 16); - simd_address_at(tmp6, TMPQ1, 3, 1, 16); - simd_address_at(tmp7, Rd_VPR128.16B, 3, 1, 16); - * [register]:1 tmp7 = (* [register]:1 tmp5) - (* [register]:1 tmp6); - simd_address_at(tmp5, Rd_VPR128.16B, 4, 1, 16); - simd_address_at(tmp6, TMPQ1, 4, 1, 16); - simd_address_at(tmp7, Rd_VPR128.16B, 4, 1, 16); - * [register]:1 tmp7 = (* [register]:1 tmp5) - (* [register]:1 tmp6); - simd_address_at(tmp5, Rd_VPR128.16B, 5, 1, 16); - simd_address_at(tmp6, TMPQ1, 5, 1, 16); - simd_address_at(tmp7, Rd_VPR128.16B, 5, 1, 16); - * [register]:1 tmp7 = (* [register]:1 tmp5) - (* [register]:1 tmp6); - simd_address_at(tmp5, Rd_VPR128.16B, 6, 1, 16); - simd_address_at(tmp6, TMPQ1, 6, 1, 16); - simd_address_at(tmp7, Rd_VPR128.16B, 6, 1, 16); - * [register]:1 tmp7 = (* [register]:1 tmp5) - (* [register]:1 tmp6); - simd_address_at(tmp5, Rd_VPR128.16B, 7, 1, 16); - simd_address_at(tmp6, TMPQ1, 7, 1, 16); - simd_address_at(tmp7, Rd_VPR128.16B, 7, 1, 16); - * [register]:1 tmp7 = (* [register]:1 tmp5) - (* [register]:1 tmp6); - simd_address_at(tmp5, Rd_VPR128.16B, 8, 1, 16); - simd_address_at(tmp6, TMPQ1, 8, 1, 16); - simd_address_at(tmp7, Rd_VPR128.16B, 8, 1, 16); - * [register]:1 tmp7 = (* [register]:1 tmp5) - (* [register]:1 tmp6); - simd_address_at(tmp5, Rd_VPR128.16B, 9, 1, 16); - simd_address_at(tmp6, TMPQ1, 9, 1, 16); - simd_address_at(tmp7, Rd_VPR128.16B, 9, 1, 16); - * [register]:1 tmp7 = (* [register]:1 tmp5) - (* [register]:1 tmp6); - simd_address_at(tmp5, Rd_VPR128.16B, 10, 1, 16); - simd_address_at(tmp6, TMPQ1, 10, 1, 16); - simd_address_at(tmp7, Rd_VPR128.16B, 10, 1, 16); - * [register]:1 tmp7 = (* [register]:1 tmp5) - (* [register]:1 tmp6); - simd_address_at(tmp5, Rd_VPR128.16B, 11, 1, 16); - simd_address_at(tmp6, TMPQ1, 11, 1, 16); - simd_address_at(tmp7, Rd_VPR128.16B, 11, 1, 16); - * [register]:1 tmp7 = (* [register]:1 tmp5) - (* [register]:1 tmp6); - simd_address_at(tmp5, Rd_VPR128.16B, 12, 1, 16); - simd_address_at(tmp6, TMPQ1, 12, 1, 16); - simd_address_at(tmp7, Rd_VPR128.16B, 12, 1, 16); - * [register]:1 tmp7 = (* [register]:1 tmp5) - (* [register]:1 tmp6); - simd_address_at(tmp5, Rd_VPR128.16B, 13, 1, 16); - simd_address_at(tmp6, TMPQ1, 13, 1, 16); - simd_address_at(tmp7, Rd_VPR128.16B, 13, 1, 16); - * [register]:1 tmp7 = (* [register]:1 tmp5) - (* [register]:1 tmp6); - simd_address_at(tmp5, Rd_VPR128.16B, 14, 1, 16); - simd_address_at(tmp6, TMPQ1, 14, 1, 16); - simd_address_at(tmp7, Rd_VPR128.16B, 14, 1, 16); - * [register]:1 tmp7 = (* [register]:1 tmp5) - (* [register]:1 tmp6); - simd_address_at(tmp5, Rd_VPR128.16B, 15, 1, 16); - simd_address_at(tmp6, TMPQ1, 15, 1, 16); - simd_address_at(tmp7, Rd_VPR128.16B, 15, 1, 16); - * [register]:1 tmp7 = (* [register]:1 tmp5) - (* [register]:1 tmp6); + Rd_VPR128.16B[0,8] = Rd_VPR128.16B[0,8] - TMPQ1[0,8]; + Rd_VPR128.16B[8,8] = Rd_VPR128.16B[8,8] - TMPQ1[8,8]; + Rd_VPR128.16B[16,8] = Rd_VPR128.16B[16,8] - TMPQ1[16,8]; + Rd_VPR128.16B[24,8] = Rd_VPR128.16B[24,8] - TMPQ1[24,8]; + Rd_VPR128.16B[32,8] = Rd_VPR128.16B[32,8] - TMPQ1[32,8]; + Rd_VPR128.16B[40,8] = Rd_VPR128.16B[40,8] - TMPQ1[40,8]; + Rd_VPR128.16B[48,8] = Rd_VPR128.16B[48,8] - TMPQ1[48,8]; + Rd_VPR128.16B[56,8] = Rd_VPR128.16B[56,8] - TMPQ1[56,8]; + Rd_VPR128.16B[64,8] = Rd_VPR128.16B[64,8] - TMPQ1[64,8]; + Rd_VPR128.16B[72,8] = Rd_VPR128.16B[72,8] - TMPQ1[72,8]; + Rd_VPR128.16B[80,8] = Rd_VPR128.16B[80,8] - TMPQ1[80,8]; + Rd_VPR128.16B[88,8] = Rd_VPR128.16B[88,8] - TMPQ1[88,8]; + Rd_VPR128.16B[96,8] = Rd_VPR128.16B[96,8] - TMPQ1[96,8]; + Rd_VPR128.16B[104,8] = Rd_VPR128.16B[104,8] - TMPQ1[104,8]; + Rd_VPR128.16B[112,8] = Rd_VPR128.16B[112,8] - TMPQ1[112,8]; + Rd_VPR128.16B[120,8] = Rd_VPR128.16B[120,8] - TMPQ1[120,8]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_MULT(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); - local tmpd:16 = SIMD_INT_SUB(Rd_VPR128.16B, tmp1, 1:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.16B -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.16B = NEON_mls(Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B, 1:1); -@endif } # C7.2.198 MLS (vector) page C7-1852 line 103916 MATCH x2e209400/mask=xbf20fc00 @@ -18610,39 +10977,13 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16 :mls Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0x12 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_primitive) # simd infix TMPD1 = Rn_VPR64.2S * Rm_VPR64.2S on lane size 4 - local tmp2:4 = 0; - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp2, Rn_VPR64.2S, 0, 4, 8); - simd_address_at(tmp3, Rm_VPR64.2S, 0, 4, 8); - simd_address_at(tmp4, TMPD1, 0, 4, 8); - * [register]:4 tmp4 = (* [register]:4 tmp2) * (* [register]:4 tmp3); - simd_address_at(tmp2, Rn_VPR64.2S, 1, 4, 8); - simd_address_at(tmp3, Rm_VPR64.2S, 1, 4, 8); - simd_address_at(tmp4, TMPD1, 1, 4, 8); - * [register]:4 tmp4 = (* [register]:4 tmp2) * (* [register]:4 tmp3); + TMPD1[0,32] = Rn_VPR64.2S[0,32] * Rm_VPR64.2S[0,32]; + TMPD1[32,32] = Rn_VPR64.2S[32,32] * Rm_VPR64.2S[32,32]; # simd infix Rd_VPR64.2S = Rd_VPR64.2S - TMPD1 on lane size 4 - local tmp5:4 = 0; - local tmp6:4 = 0; - local tmp7:4 = 0; - simd_address_at(tmp5, Rd_VPR64.2S, 0, 4, 8); - simd_address_at(tmp6, TMPD1, 0, 4, 8); - simd_address_at(tmp7, Rd_VPR64.2S, 0, 4, 8); - * [register]:4 tmp7 = (* [register]:4 tmp5) - (* [register]:4 tmp6); - simd_address_at(tmp5, Rd_VPR64.2S, 1, 4, 8); - simd_address_at(tmp6, TMPD1, 1, 4, 8); - simd_address_at(tmp7, Rd_VPR64.2S, 1, 4, 8); - * [register]:4 tmp7 = (* [register]:4 tmp5) - (* [register]:4 tmp6); + Rd_VPR64.2S[0,32] = Rd_VPR64.2S[0,32] - TMPD1[0,32]; + Rd_VPR64.2S[32,32] = Rd_VPR64.2S[32,32] - TMPD1[32,32]; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_INT_MULT(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); - local tmpd:8 = SIMD_INT_SUB(Rd_VPR64.2S, tmp1, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.2S -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.2S = NEON_mls(Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S, 4:1); -@endif } # C7.2.198 MLS (vector) page C7-1852 line 103916 MATCH x2e209400/mask=xbf20fc00 @@ -18654,55 +10995,17 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S :mls Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0x12 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { -@if defined(SEMANTIC_primitive) # simd infix TMPD1 = Rn_VPR64.4H * Rm_VPR64.4H on lane size 2 - local tmp2:4 = 0; - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp2, Rn_VPR64.4H, 0, 2, 8); - simd_address_at(tmp3, Rm_VPR64.4H, 0, 2, 8); - simd_address_at(tmp4, TMPD1, 0, 2, 8); - * [register]:2 tmp4 = (* [register]:2 tmp2) * (* [register]:2 tmp3); - simd_address_at(tmp2, Rn_VPR64.4H, 1, 2, 8); - simd_address_at(tmp3, Rm_VPR64.4H, 1, 2, 8); - simd_address_at(tmp4, TMPD1, 1, 2, 8); - * [register]:2 tmp4 = (* [register]:2 tmp2) * (* [register]:2 tmp3); - simd_address_at(tmp2, Rn_VPR64.4H, 2, 2, 8); - simd_address_at(tmp3, Rm_VPR64.4H, 2, 2, 8); - simd_address_at(tmp4, TMPD1, 2, 2, 8); - * [register]:2 tmp4 = (* [register]:2 tmp2) * (* [register]:2 tmp3); - simd_address_at(tmp2, Rn_VPR64.4H, 3, 2, 8); - simd_address_at(tmp3, Rm_VPR64.4H, 3, 2, 8); - simd_address_at(tmp4, TMPD1, 3, 2, 8); - * [register]:2 tmp4 = (* [register]:2 tmp2) * (* [register]:2 tmp3); + TMPD1[0,16] = Rn_VPR64.4H[0,16] * Rm_VPR64.4H[0,16]; + TMPD1[16,16] = Rn_VPR64.4H[16,16] * Rm_VPR64.4H[16,16]; + TMPD1[32,16] = Rn_VPR64.4H[32,16] * Rm_VPR64.4H[32,16]; + TMPD1[48,16] = Rn_VPR64.4H[48,16] * Rm_VPR64.4H[48,16]; # simd infix Rd_VPR64.4H = Rd_VPR64.4H - TMPD1 on lane size 2 - local tmp5:4 = 0; - local tmp6:4 = 0; - local tmp7:4 = 0; - simd_address_at(tmp5, Rd_VPR64.4H, 0, 2, 8); - simd_address_at(tmp6, TMPD1, 0, 2, 8); - simd_address_at(tmp7, Rd_VPR64.4H, 0, 2, 8); - * [register]:2 tmp7 = (* [register]:2 tmp5) - (* [register]:2 tmp6); - simd_address_at(tmp5, Rd_VPR64.4H, 1, 2, 8); - simd_address_at(tmp6, TMPD1, 1, 2, 8); - simd_address_at(tmp7, Rd_VPR64.4H, 1, 2, 8); - * [register]:2 tmp7 = (* [register]:2 tmp5) - (* [register]:2 tmp6); - simd_address_at(tmp5, Rd_VPR64.4H, 2, 2, 8); - simd_address_at(tmp6, TMPD1, 2, 2, 8); - simd_address_at(tmp7, Rd_VPR64.4H, 2, 2, 8); - * [register]:2 tmp7 = (* [register]:2 tmp5) - (* [register]:2 tmp6); - simd_address_at(tmp5, Rd_VPR64.4H, 3, 2, 8); - simd_address_at(tmp6, TMPD1, 3, 2, 8); - simd_address_at(tmp7, Rd_VPR64.4H, 3, 2, 8); - * [register]:2 tmp7 = (* [register]:2 tmp5) - (* [register]:2 tmp6); + Rd_VPR64.4H[0,16] = Rd_VPR64.4H[0,16] - TMPD1[0,16]; + Rd_VPR64.4H[16,16] = Rd_VPR64.4H[16,16] - TMPD1[16,16]; + Rd_VPR64.4H[32,16] = Rd_VPR64.4H[32,16] - TMPD1[32,16]; + Rd_VPR64.4H[48,16] = Rd_VPR64.4H[48,16] - TMPD1[48,16]; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_INT_MULT(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); - local tmpd:8 = SIMD_INT_SUB(Rd_VPR64.4H, tmp1, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.4H -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.4H = NEON_mls(Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H, 2:1); -@endif } # C7.2.198 MLS (vector) page C7-1852 line 103916 MATCH x2e209400/mask=xbf20fc00 @@ -18714,55 +11017,17 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H :mls Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0x12 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) # simd infix TMPQ1 = Rn_VPR128.4S * Rm_VPR128.4S on lane size 4 - local tmp2:4 = 0; - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp2, Rn_VPR128.4S, 0, 4, 16); - simd_address_at(tmp3, Rm_VPR128.4S, 0, 4, 16); - simd_address_at(tmp4, TMPQ1, 0, 4, 16); - * [register]:4 tmp4 = (* [register]:4 tmp2) * (* [register]:4 tmp3); - simd_address_at(tmp2, Rn_VPR128.4S, 1, 4, 16); - simd_address_at(tmp3, Rm_VPR128.4S, 1, 4, 16); - simd_address_at(tmp4, TMPQ1, 1, 4, 16); - * [register]:4 tmp4 = (* [register]:4 tmp2) * (* [register]:4 tmp3); - simd_address_at(tmp2, Rn_VPR128.4S, 2, 4, 16); - simd_address_at(tmp3, Rm_VPR128.4S, 2, 4, 16); - simd_address_at(tmp4, TMPQ1, 2, 4, 16); - * [register]:4 tmp4 = (* [register]:4 tmp2) * (* [register]:4 tmp3); - simd_address_at(tmp2, Rn_VPR128.4S, 3, 4, 16); - simd_address_at(tmp3, Rm_VPR128.4S, 3, 4, 16); - simd_address_at(tmp4, TMPQ1, 3, 4, 16); - * [register]:4 tmp4 = (* [register]:4 tmp2) * (* [register]:4 tmp3); + TMPQ1[0,32] = Rn_VPR128.4S[0,32] * Rm_VPR128.4S[0,32]; + TMPQ1[32,32] = Rn_VPR128.4S[32,32] * Rm_VPR128.4S[32,32]; + TMPQ1[64,32] = Rn_VPR128.4S[64,32] * Rm_VPR128.4S[64,32]; + TMPQ1[96,32] = Rn_VPR128.4S[96,32] * Rm_VPR128.4S[96,32]; # simd infix Rd_VPR128.4S = Rd_VPR128.4S - TMPQ1 on lane size 4 - local tmp5:4 = 0; - local tmp6:4 = 0; - local tmp7:4 = 0; - simd_address_at(tmp5, Rd_VPR128.4S, 0, 4, 16); - simd_address_at(tmp6, TMPQ1, 0, 4, 16); - simd_address_at(tmp7, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp7 = (* [register]:4 tmp5) - (* [register]:4 tmp6); - simd_address_at(tmp5, Rd_VPR128.4S, 1, 4, 16); - simd_address_at(tmp6, TMPQ1, 1, 4, 16); - simd_address_at(tmp7, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp7 = (* [register]:4 tmp5) - (* [register]:4 tmp6); - simd_address_at(tmp5, Rd_VPR128.4S, 2, 4, 16); - simd_address_at(tmp6, TMPQ1, 2, 4, 16); - simd_address_at(tmp7, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp7 = (* [register]:4 tmp5) - (* [register]:4 tmp6); - simd_address_at(tmp5, Rd_VPR128.4S, 3, 4, 16); - simd_address_at(tmp6, TMPQ1, 3, 4, 16); - simd_address_at(tmp7, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp7 = (* [register]:4 tmp5) - (* [register]:4 tmp6); + Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] - TMPQ1[0,32]; + Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] - TMPQ1[32,32]; + Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] - TMPQ1[64,32]; + Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] - TMPQ1[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_MULT(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); - local tmpd:16 = SIMD_INT_SUB(Rd_VPR128.4S, tmp1, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_mls(Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S, 4:1); -@endif } # C7.2.198 MLS (vector) page C7-1852 line 103916 MATCH x2e209400/mask=xbf20fc00 @@ -18774,87 +11039,25 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S :mls Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0x12 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { -@if defined(SEMANTIC_primitive) # simd infix TMPD1 = Rn_VPR64.8B * Rm_VPR64.8B on lane size 1 - local tmp2:4 = 0; - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp2, Rn_VPR64.8B, 0, 1, 8); - simd_address_at(tmp3, Rm_VPR64.8B, 0, 1, 8); - simd_address_at(tmp4, TMPD1, 0, 1, 8); - * [register]:1 tmp4 = (* [register]:1 tmp2) * (* [register]:1 tmp3); - simd_address_at(tmp2, Rn_VPR64.8B, 1, 1, 8); - simd_address_at(tmp3, Rm_VPR64.8B, 1, 1, 8); - simd_address_at(tmp4, TMPD1, 1, 1, 8); - * [register]:1 tmp4 = (* [register]:1 tmp2) * (* [register]:1 tmp3); - simd_address_at(tmp2, Rn_VPR64.8B, 2, 1, 8); - simd_address_at(tmp3, Rm_VPR64.8B, 2, 1, 8); - simd_address_at(tmp4, TMPD1, 2, 1, 8); - * [register]:1 tmp4 = (* [register]:1 tmp2) * (* [register]:1 tmp3); - simd_address_at(tmp2, Rn_VPR64.8B, 3, 1, 8); - simd_address_at(tmp3, Rm_VPR64.8B, 3, 1, 8); - simd_address_at(tmp4, TMPD1, 3, 1, 8); - * [register]:1 tmp4 = (* [register]:1 tmp2) * (* [register]:1 tmp3); - simd_address_at(tmp2, Rn_VPR64.8B, 4, 1, 8); - simd_address_at(tmp3, Rm_VPR64.8B, 4, 1, 8); - simd_address_at(tmp4, TMPD1, 4, 1, 8); - * [register]:1 tmp4 = (* [register]:1 tmp2) * (* [register]:1 tmp3); - simd_address_at(tmp2, Rn_VPR64.8B, 5, 1, 8); - simd_address_at(tmp3, Rm_VPR64.8B, 5, 1, 8); - simd_address_at(tmp4, TMPD1, 5, 1, 8); - * [register]:1 tmp4 = (* [register]:1 tmp2) * (* [register]:1 tmp3); - simd_address_at(tmp2, Rn_VPR64.8B, 6, 1, 8); - simd_address_at(tmp3, Rm_VPR64.8B, 6, 1, 8); - simd_address_at(tmp4, TMPD1, 6, 1, 8); - * [register]:1 tmp4 = (* [register]:1 tmp2) * (* [register]:1 tmp3); - simd_address_at(tmp2, Rn_VPR64.8B, 7, 1, 8); - simd_address_at(tmp3, Rm_VPR64.8B, 7, 1, 8); - simd_address_at(tmp4, TMPD1, 7, 1, 8); - * [register]:1 tmp4 = (* [register]:1 tmp2) * (* [register]:1 tmp3); + TMPD1[0,8] = Rn_VPR64.8B[0,8] * Rm_VPR64.8B[0,8]; + TMPD1[8,8] = Rn_VPR64.8B[8,8] * Rm_VPR64.8B[8,8]; + TMPD1[16,8] = Rn_VPR64.8B[16,8] * Rm_VPR64.8B[16,8]; + TMPD1[24,8] = Rn_VPR64.8B[24,8] * Rm_VPR64.8B[24,8]; + TMPD1[32,8] = Rn_VPR64.8B[32,8] * Rm_VPR64.8B[32,8]; + TMPD1[40,8] = Rn_VPR64.8B[40,8] * Rm_VPR64.8B[40,8]; + TMPD1[48,8] = Rn_VPR64.8B[48,8] * Rm_VPR64.8B[48,8]; + TMPD1[56,8] = Rn_VPR64.8B[56,8] * Rm_VPR64.8B[56,8]; # simd infix Rd_VPR64.8B = Rd_VPR64.8B - TMPD1 on lane size 1 - local tmp5:4 = 0; - local tmp6:4 = 0; - local tmp7:4 = 0; - simd_address_at(tmp5, Rd_VPR64.8B, 0, 1, 8); - simd_address_at(tmp6, TMPD1, 0, 1, 8); - simd_address_at(tmp7, Rd_VPR64.8B, 0, 1, 8); - * [register]:1 tmp7 = (* [register]:1 tmp5) - (* [register]:1 tmp6); - simd_address_at(tmp5, Rd_VPR64.8B, 1, 1, 8); - simd_address_at(tmp6, TMPD1, 1, 1, 8); - simd_address_at(tmp7, Rd_VPR64.8B, 1, 1, 8); - * [register]:1 tmp7 = (* [register]:1 tmp5) - (* [register]:1 tmp6); - simd_address_at(tmp5, Rd_VPR64.8B, 2, 1, 8); - simd_address_at(tmp6, TMPD1, 2, 1, 8); - simd_address_at(tmp7, Rd_VPR64.8B, 2, 1, 8); - * [register]:1 tmp7 = (* [register]:1 tmp5) - (* [register]:1 tmp6); - simd_address_at(tmp5, Rd_VPR64.8B, 3, 1, 8); - simd_address_at(tmp6, TMPD1, 3, 1, 8); - simd_address_at(tmp7, Rd_VPR64.8B, 3, 1, 8); - * [register]:1 tmp7 = (* [register]:1 tmp5) - (* [register]:1 tmp6); - simd_address_at(tmp5, Rd_VPR64.8B, 4, 1, 8); - simd_address_at(tmp6, TMPD1, 4, 1, 8); - simd_address_at(tmp7, Rd_VPR64.8B, 4, 1, 8); - * [register]:1 tmp7 = (* [register]:1 tmp5) - (* [register]:1 tmp6); - simd_address_at(tmp5, Rd_VPR64.8B, 5, 1, 8); - simd_address_at(tmp6, TMPD1, 5, 1, 8); - simd_address_at(tmp7, Rd_VPR64.8B, 5, 1, 8); - * [register]:1 tmp7 = (* [register]:1 tmp5) - (* [register]:1 tmp6); - simd_address_at(tmp5, Rd_VPR64.8B, 6, 1, 8); - simd_address_at(tmp6, TMPD1, 6, 1, 8); - simd_address_at(tmp7, Rd_VPR64.8B, 6, 1, 8); - * [register]:1 tmp7 = (* [register]:1 tmp5) - (* [register]:1 tmp6); - simd_address_at(tmp5, Rd_VPR64.8B, 7, 1, 8); - simd_address_at(tmp6, TMPD1, 7, 1, 8); - simd_address_at(tmp7, Rd_VPR64.8B, 7, 1, 8); - * [register]:1 tmp7 = (* [register]:1 tmp5) - (* [register]:1 tmp6); + Rd_VPR64.8B[0,8] = Rd_VPR64.8B[0,8] - TMPD1[0,8]; + Rd_VPR64.8B[8,8] = Rd_VPR64.8B[8,8] - TMPD1[8,8]; + Rd_VPR64.8B[16,8] = Rd_VPR64.8B[16,8] - TMPD1[16,8]; + Rd_VPR64.8B[24,8] = Rd_VPR64.8B[24,8] - TMPD1[24,8]; + Rd_VPR64.8B[32,8] = Rd_VPR64.8B[32,8] - TMPD1[32,8]; + Rd_VPR64.8B[40,8] = Rd_VPR64.8B[40,8] - TMPD1[40,8]; + Rd_VPR64.8B[48,8] = Rd_VPR64.8B[48,8] - TMPD1[48,8]; + Rd_VPR64.8B[56,8] = Rd_VPR64.8B[56,8] - TMPD1[56,8]; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_INT_MULT(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); - local tmpd:8 = SIMD_INT_SUB(Rd_VPR64.8B, tmp1, 1:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.8B -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.8B = NEON_mls(Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B, 1:1); -@endif } # C7.2.198 MLS (vector) page C7-1852 line 103916 MATCH x2e209400/mask=xbf20fc00 @@ -18866,87 +11069,25 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B :mls Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0x12 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_primitive) # simd infix TMPQ1 = Rn_VPR128.8H * Rm_VPR128.8H on lane size 2 - local tmp2:4 = 0; - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp2, Rn_VPR128.8H, 0, 2, 16); - simd_address_at(tmp3, Rm_VPR128.8H, 0, 2, 16); - simd_address_at(tmp4, TMPQ1, 0, 2, 16); - * [register]:2 tmp4 = (* [register]:2 tmp2) * (* [register]:2 tmp3); - simd_address_at(tmp2, Rn_VPR128.8H, 1, 2, 16); - simd_address_at(tmp3, Rm_VPR128.8H, 1, 2, 16); - simd_address_at(tmp4, TMPQ1, 1, 2, 16); - * [register]:2 tmp4 = (* [register]:2 tmp2) * (* [register]:2 tmp3); - simd_address_at(tmp2, Rn_VPR128.8H, 2, 2, 16); - simd_address_at(tmp3, Rm_VPR128.8H, 2, 2, 16); - simd_address_at(tmp4, TMPQ1, 2, 2, 16); - * [register]:2 tmp4 = (* [register]:2 tmp2) * (* [register]:2 tmp3); - simd_address_at(tmp2, Rn_VPR128.8H, 3, 2, 16); - simd_address_at(tmp3, Rm_VPR128.8H, 3, 2, 16); - simd_address_at(tmp4, TMPQ1, 3, 2, 16); - * [register]:2 tmp4 = (* [register]:2 tmp2) * (* [register]:2 tmp3); - simd_address_at(tmp2, Rn_VPR128.8H, 4, 2, 16); - simd_address_at(tmp3, Rm_VPR128.8H, 4, 2, 16); - simd_address_at(tmp4, TMPQ1, 4, 2, 16); - * [register]:2 tmp4 = (* [register]:2 tmp2) * (* [register]:2 tmp3); - simd_address_at(tmp2, Rn_VPR128.8H, 5, 2, 16); - simd_address_at(tmp3, Rm_VPR128.8H, 5, 2, 16); - simd_address_at(tmp4, TMPQ1, 5, 2, 16); - * [register]:2 tmp4 = (* [register]:2 tmp2) * (* [register]:2 tmp3); - simd_address_at(tmp2, Rn_VPR128.8H, 6, 2, 16); - simd_address_at(tmp3, Rm_VPR128.8H, 6, 2, 16); - simd_address_at(tmp4, TMPQ1, 6, 2, 16); - * [register]:2 tmp4 = (* [register]:2 tmp2) * (* [register]:2 tmp3); - simd_address_at(tmp2, Rn_VPR128.8H, 7, 2, 16); - simd_address_at(tmp3, Rm_VPR128.8H, 7, 2, 16); - simd_address_at(tmp4, TMPQ1, 7, 2, 16); - * [register]:2 tmp4 = (* [register]:2 tmp2) * (* [register]:2 tmp3); + TMPQ1[0,16] = Rn_VPR128.8H[0,16] * Rm_VPR128.8H[0,16]; + TMPQ1[16,16] = Rn_VPR128.8H[16,16] * Rm_VPR128.8H[16,16]; + TMPQ1[32,16] = Rn_VPR128.8H[32,16] * Rm_VPR128.8H[32,16]; + TMPQ1[48,16] = Rn_VPR128.8H[48,16] * Rm_VPR128.8H[48,16]; + TMPQ1[64,16] = Rn_VPR128.8H[64,16] * Rm_VPR128.8H[64,16]; + TMPQ1[80,16] = Rn_VPR128.8H[80,16] * Rm_VPR128.8H[80,16]; + TMPQ1[96,16] = Rn_VPR128.8H[96,16] * Rm_VPR128.8H[96,16]; + TMPQ1[112,16] = Rn_VPR128.8H[112,16] * Rm_VPR128.8H[112,16]; # simd infix Rd_VPR128.8H = Rd_VPR128.8H - TMPQ1 on lane size 2 - local tmp5:4 = 0; - local tmp6:4 = 0; - local tmp7:4 = 0; - simd_address_at(tmp5, Rd_VPR128.8H, 0, 2, 16); - simd_address_at(tmp6, TMPQ1, 0, 2, 16); - simd_address_at(tmp7, Rd_VPR128.8H, 0, 2, 16); - * [register]:2 tmp7 = (* [register]:2 tmp5) - (* [register]:2 tmp6); - simd_address_at(tmp5, Rd_VPR128.8H, 1, 2, 16); - simd_address_at(tmp6, TMPQ1, 1, 2, 16); - simd_address_at(tmp7, Rd_VPR128.8H, 1, 2, 16); - * [register]:2 tmp7 = (* [register]:2 tmp5) - (* [register]:2 tmp6); - simd_address_at(tmp5, Rd_VPR128.8H, 2, 2, 16); - simd_address_at(tmp6, TMPQ1, 2, 2, 16); - simd_address_at(tmp7, Rd_VPR128.8H, 2, 2, 16); - * [register]:2 tmp7 = (* [register]:2 tmp5) - (* [register]:2 tmp6); - simd_address_at(tmp5, Rd_VPR128.8H, 3, 2, 16); - simd_address_at(tmp6, TMPQ1, 3, 2, 16); - simd_address_at(tmp7, Rd_VPR128.8H, 3, 2, 16); - * [register]:2 tmp7 = (* [register]:2 tmp5) - (* [register]:2 tmp6); - simd_address_at(tmp5, Rd_VPR128.8H, 4, 2, 16); - simd_address_at(tmp6, TMPQ1, 4, 2, 16); - simd_address_at(tmp7, Rd_VPR128.8H, 4, 2, 16); - * [register]:2 tmp7 = (* [register]:2 tmp5) - (* [register]:2 tmp6); - simd_address_at(tmp5, Rd_VPR128.8H, 5, 2, 16); - simd_address_at(tmp6, TMPQ1, 5, 2, 16); - simd_address_at(tmp7, Rd_VPR128.8H, 5, 2, 16); - * [register]:2 tmp7 = (* [register]:2 tmp5) - (* [register]:2 tmp6); - simd_address_at(tmp5, Rd_VPR128.8H, 6, 2, 16); - simd_address_at(tmp6, TMPQ1, 6, 2, 16); - simd_address_at(tmp7, Rd_VPR128.8H, 6, 2, 16); - * [register]:2 tmp7 = (* [register]:2 tmp5) - (* [register]:2 tmp6); - simd_address_at(tmp5, Rd_VPR128.8H, 7, 2, 16); - simd_address_at(tmp6, TMPQ1, 7, 2, 16); - simd_address_at(tmp7, Rd_VPR128.8H, 7, 2, 16); - * [register]:2 tmp7 = (* [register]:2 tmp5) - (* [register]:2 tmp6); + Rd_VPR128.8H[0,16] = Rd_VPR128.8H[0,16] - TMPQ1[0,16]; + Rd_VPR128.8H[16,16] = Rd_VPR128.8H[16,16] - TMPQ1[16,16]; + Rd_VPR128.8H[32,16] = Rd_VPR128.8H[32,16] - TMPQ1[32,16]; + Rd_VPR128.8H[48,16] = Rd_VPR128.8H[48,16] - TMPQ1[48,16]; + Rd_VPR128.8H[64,16] = Rd_VPR128.8H[64,16] - TMPQ1[64,16]; + Rd_VPR128.8H[80,16] = Rd_VPR128.8H[80,16] - TMPQ1[80,16]; + Rd_VPR128.8H[96,16] = Rd_VPR128.8H[96,16] - TMPQ1[96,16]; + Rd_VPR128.8H[112,16] = Rd_VPR128.8H[112,16] - TMPQ1[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_MULT(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); - local tmpd:16 = SIMD_INT_SUB(Rd_VPR128.8H, tmp1, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.8H -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.8H = NEON_mls(Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H, 2:1); -@endif } # C7.2.200 MOV (element) page C7-1856 line 104111 MATCH x6e000400/mask=xffe08400 @@ -18957,26 +11098,13 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H # AUNIT --inst x6e010400/mask=xffe18400 --status pass :mov Rd_VPR128.B.imm_neon_uimm4, Rn_VPR128.B.immN_neon_uimm4 -is b_3131=0 & q=1 & b_29=1 & b_2428=0xe & b_2123=0 & Rd_VPR128.B.imm_neon_uimm4 & imm_neon_uimm4 & b_1616=1 & b_1515=0 & Rn_VPR128.B.immN_neon_uimm4 & immN_neon_uimm4 & b_1010=1 & Rn_VPR128 & Rd_VPR128 & Zd +is b_3131=0 & q=1 & b_29=1 & b_2428=0xe & b_2123=0 & Rd_VPR128.B.imm_neon_uimm4 & b_1616=1 & b_1515=0 & Rn_VPR128.B.immN_neon_uimm4 & immN_neon_uimm4 & b_1010=1 & Rn_VPR128 & Rd_VPR128 & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; # simd element Rn_VPR128[immN_neon_uimm4] lane size 1 - simd_address_at(tmp1, Rn_VPR128, immN_neon_uimm4:4, 1, 16); - local tmp2:1 = * [register]:1 tmp1; - # simd copy Rd_VPR128 element imm_neon_uimm4:1 = tmp2 (lane size 1) - local tmp3:4 = 0; - simd_address_at(tmp3, Rd_VPR128, imm_neon_uimm4:4, 1, 16); - * [register]:1 tmp3 = tmp2; + local tmp1:1 = Rn_VPR128.B.immN_neon_uimm4; + # simd copy Rd_VPR128 element imm_neon_uimm4:1 = tmp1 (lane size 1) + Rd_VPR128.B.imm_neon_uimm4 = tmp1; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:1 = SIMD_PIECE(Rn_VPR128, immN_neon_uimm4:1); - local tmpd:16 = SIMD_COPY(Rd_VPR128, tmp1, imm_neon_uimm4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128 -@elif defined(SEMANTIC_pseudo) - local tmp1:1 = SIMD_PIECE(Rn_VPR128, immN_neon_uimm4:1); - Rd_VPR128 = NEON_mov(Rd_VPR128, tmp1, imm_neon_uimm4:1, 1:1); -@endif } # C7.2.200 MOV (element) page C7-1856 line 104111 MATCH x6e000400/mask=xffe08400 @@ -18987,26 +11115,13 @@ is b_3131=0 & q=1 & b_29=1 & b_2428=0xe & b_2123=0 & Rd_VPR128.B.imm_neon_uimm4 # AUNIT --inst x6e080400/mask=xffef8400 --status pass :mov Rd_VPR128.D.imm_neon_uimm1, Rn_VPR128.D.immN_neon_uimm1 -is b_3131=0 & q=1 & b_29=1 & b_2428=0xe & b_2123=0 & Rd_VPR128.D.imm_neon_uimm1 & imm_neon_uimm1 & b_1619=0x8 & b_1515=0 & Rn_VPR128.D.immN_neon_uimm1 & immN_neon_uimm1 & Imm4 & b_1010=1 & Rn_VPR128 & Rd_VPR128 & Zd +is b_3131=0 & q=1 & b_29=1 & b_2428=0xe & b_2123=0 & Rd_VPR128.D.imm_neon_uimm1 & b_1619=0x8 & b_1515=0 & Rn_VPR128.D.immN_neon_uimm1 & immN_neon_uimm1 & Imm4 & b_1010=1 & Rn_VPR128 & Rd_VPR128 & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; # simd element Rn_VPR128[immN_neon_uimm1] lane size 8 - simd_address_at(tmp1, Rn_VPR128, immN_neon_uimm1:4, 8, 16); - local tmp2:8 = * [register]:8 tmp1; - # simd copy Rd_VPR128 element imm_neon_uimm1:1 = tmp2 (lane size 8) - local tmp3:4 = 0; - simd_address_at(tmp3, Rd_VPR128, imm_neon_uimm1:4, 8, 16); - * [register]:8 tmp3 = tmp2; + local tmp1:8 = Rn_VPR128.D.immN_neon_uimm1; + # simd copy Rd_VPR128 element imm_neon_uimm1:1 = tmp1 (lane size 8) + Rd_VPR128.D.imm_neon_uimm1 = tmp1; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Rn_VPR128, immN_neon_uimm1:1); - local tmpd:16 = SIMD_COPY(Rd_VPR128, tmp1, imm_neon_uimm1:1); - Zd = zext(tmpd); # assigning to Rd_VPR128 -@elif defined(SEMANTIC_pseudo) - local tmp1:8 = SIMD_PIECE(Rn_VPR128, immN_neon_uimm1:1); - Rd_VPR128 = NEON_mov(Rd_VPR128, tmp1, imm_neon_uimm1:1, 8:1); -@endif } # C7.2.200 MOV (element) page C7-1856 line 104111 MATCH x6e000400/mask=xffe08400 @@ -19017,26 +11132,13 @@ is b_3131=0 & q=1 & b_29=1 & b_2428=0xe & b_2123=0 & Rd_VPR128.D.imm_neon_uimm1 # AUNIT --inst x6e020400/mask=xffe38400 --status pass :mov Rd_VPR128.H.imm_neon_uimm3, Rn_VPR128.H.immN_neon_uimm3 -is b_3131=0 & q=1 & b_29=1 & b_2428=0xe & b_2123=0 & Rd_VPR128.H.imm_neon_uimm3 & imm_neon_uimm3 & b_1617=2 & b_1515=0 & Rn_VPR128.H.immN_neon_uimm3 & immN_neon_uimm3 & Imm4 & b_1010=1 & Rn_VPR128 & Rd_VPR128 & Zd +is b_3131=0 & q=1 & b_29=1 & b_2428=0xe & b_2123=0 & Rd_VPR128.H.imm_neon_uimm3 & b_1617=2 & b_1515=0 & Rn_VPR128.H.immN_neon_uimm3 & immN_neon_uimm3 & Imm4 & b_1010=1 & Rn_VPR128 & Rd_VPR128 & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; # simd element Rn_VPR128[immN_neon_uimm3] lane size 2 - simd_address_at(tmp1, Rn_VPR128, immN_neon_uimm3:4, 2, 16); - local tmp2:2 = * [register]:2 tmp1; - # simd copy Rd_VPR128 element imm_neon_uimm3:1 = tmp2 (lane size 2) - local tmp3:4 = 0; - simd_address_at(tmp3, Rd_VPR128, imm_neon_uimm3:4, 2, 16); - * [register]:2 tmp3 = tmp2; + local tmp1:2 = Rn_VPR128.H.immN_neon_uimm3; + # simd copy Rd_VPR128 element imm_neon_uimm3:1 = tmp1 (lane size 2) + Rd_VPR128.H.imm_neon_uimm3 = tmp1; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:2 = SIMD_PIECE(Rn_VPR128, immN_neon_uimm3:1); - local tmpd:16 = SIMD_COPY(Rd_VPR128, tmp1, imm_neon_uimm3:1); - Zd = zext(tmpd); # assigning to Rd_VPR128 -@elif defined(SEMANTIC_pseudo) - local tmp1:2 = SIMD_PIECE(Rn_VPR128, immN_neon_uimm3:1); - Rd_VPR128 = NEON_mov(Rd_VPR128, tmp1, imm_neon_uimm3:1, 2:1); -@endif } # C7.2.200 MOV (element) page C7-1856 line 104111 MATCH x6e000400/mask=xffe08400 @@ -19047,26 +11149,13 @@ is b_3131=0 & q=1 & b_29=1 & b_2428=0xe & b_2123=0 & Rd_VPR128.H.imm_neon_uimm3 # AUNIT --inst x6e040400/mask=xffe78400 --status pass :mov Rd_VPR128.S.imm_neon_uimm2, Rn_VPR128.S.immN_neon_uimm2 -is b_3131=0 & q=1 & b_29=1 & b_2428=0xe & b_2123=0 & Rd_VPR128.S.imm_neon_uimm2 & imm_neon_uimm2 & b_1618=4 & b_1515=0 & Rn_VPR128.S.immN_neon_uimm2 & immN_neon_uimm2 & Imm4 & b_1010=1 & Rn_VPR128 & Rd_VPR128 & Zd +is b_3131=0 & q=1 & b_29=1 & b_2428=0xe & b_2123=0 & Rd_VPR128.S.imm_neon_uimm2 & b_1618=4 & b_1515=0 & Rn_VPR128.S.immN_neon_uimm2 & immN_neon_uimm2 & Imm4 & b_1010=1 & Rn_VPR128 & Rd_VPR128 & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; # simd element Rn_VPR128[immN_neon_uimm2] lane size 4 - simd_address_at(tmp1, Rn_VPR128, immN_neon_uimm2:4, 4, 16); - local tmp2:4 = * [register]:4 tmp1; - # simd copy Rd_VPR128 element imm_neon_uimm2:1 = tmp2 (lane size 4) - local tmp3:4 = 0; - simd_address_at(tmp3, Rd_VPR128, imm_neon_uimm2:4, 4, 16); - * [register]:4 tmp3 = tmp2; + local tmp1:4 = Rn_VPR128.S.immN_neon_uimm2; + # simd copy Rd_VPR128 element imm_neon_uimm2:1 = tmp1 (lane size 4) + Rd_VPR128.S.imm_neon_uimm2 = tmp1; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:4 = SIMD_PIECE(Rn_VPR128, immN_neon_uimm2:1); - local tmpd:16 = SIMD_COPY(Rd_VPR128, tmp1, imm_neon_uimm2:1); - Zd = zext(tmpd); # assigning to Rd_VPR128 -@elif defined(SEMANTIC_pseudo) - local tmp1:4 = SIMD_PIECE(Rn_VPR128, immN_neon_uimm2:1); - Rd_VPR128 = NEON_mov(Rd_VPR128, tmp1, imm_neon_uimm2:1, 4:1); -@endif } # C7.2.201 MOV (from general) page C7-1858 line 104209 MATCH x4e001c00/mask=xffe0fc00 @@ -19077,24 +11166,12 @@ is b_3131=0 & q=1 & b_29=1 & b_2428=0xe & b_2123=0 & Rd_VPR128.S.imm_neon_uimm2 # AUNIT --inst x4e011c00/mask=xffe1fc00 --status pass :mov Rd_VPR128.B.imm_neon_uimm4, Rn_GPR32 -is b_3131=0 & q=1 & b_29=0 & b_2428=0xe & b_2123=0 & Rd_VPR128.B.imm_neon_uimm4 & imm_neon_uimm4 & b_1616=1 & b_1515=0 & imm4=0x3 & b_1010=1 & Rn_GPR32 & Rd_VPR128 & Zd +is b_3131=0 & q=1 & b_29=0 & b_2428=0xe & b_2123=0 & Rd_VPR128.B.imm_neon_uimm4 & b_1616=1 & b_1515=0 & imm4=0x3 & b_1010=1 & Rn_GPR32 & Rd_VPR128 & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rn_GPR32, 0, 1, 4); - local tmp2:1 = * [register]:1 tmp1; - # simd copy Rd_VPR128 element imm_neon_uimm4:1 = tmp2 (lane size 1) - local tmp3:4 = 0; - simd_address_at(tmp3, Rd_VPR128, imm_neon_uimm4:4, 1, 16); - * [register]:1 tmp3 = tmp2; + local tmp1:1 = Rn_GPR32[0,8]; + # simd copy Rd_VPR128 element imm_neon_uimm4:1 = tmp1 (lane size 1) + Rd_VPR128.B.imm_neon_uimm4 = tmp1; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:1 = SIMD_PIECE(Rn_GPR32, 0:1); - local tmpd:16 = SIMD_COPY(Rd_VPR128, tmp1, imm_neon_uimm4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128 -@elif defined(SEMANTIC_pseudo) - Rd_VPR128 = NEON_mov(Rd_VPR128, Rn_GPR32, imm_neon_uimm4:1, 1:1); -@endif } # C7.2.201 MOV (from general) page C7-1858 line 104209 MATCH x4e001c00/mask=xffe0fc00 @@ -19105,20 +11182,11 @@ is b_3131=0 & q=1 & b_29=0 & b_2428=0xe & b_2123=0 & Rd_VPR128.B.imm_neon_uimm4 # AUNIT --inst x4e081c00/mask=xffeffc00 --status pass :mov Rd_VPR128.D.imm_neon_uimm1, Rn_GPR64 -is b_3131=0 & q=1 & b_29=0 & b_2428=0xe & b_2123=0 & Rd_VPR128.D.imm_neon_uimm1 & imm_neon_uimm1 & b_1619=0x8 & b_1515=0 & imm4=0x3 & b_1010=1 & Rn_GPR64 & Rd_VPR128 & Zd +is b_3131=0 & q=1 & b_29=0 & b_2428=0xe & b_2123=0 & Rd_VPR128.D.imm_neon_uimm1 & b_1619=0x8 & b_1515=0 & imm4=0x3 & b_1010=1 & Rn_GPR64 & Rd_VPR128 & Zd { -@if defined(SEMANTIC_primitive) # simd copy Rd_VPR128 element imm_neon_uimm1:1 = Rn_GPR64 (lane size 8) - local tmp1:4 = 0; - simd_address_at(tmp1, Rd_VPR128, imm_neon_uimm1:4, 8, 16); - * [register]:8 tmp1 = Rn_GPR64; + Rd_VPR128.D.imm_neon_uimm1 = Rn_GPR64; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:16 = SIMD_COPY(Rd_VPR128, Rn_GPR64, imm_neon_uimm1:1); - Zd = zext(tmpd); # assigning to Rd_VPR128 -@elif defined(SEMANTIC_pseudo) - Rd_VPR128 = NEON_mov(Rd_VPR128, Rn_GPR64, imm_neon_uimm1:1, 8:1); -@endif } # C7.2.201 MOV (from general) page C7-1858 line 104209 MATCH x4e001c00/mask=xffe0fc00 @@ -19129,24 +11197,12 @@ is b_3131=0 & q=1 & b_29=0 & b_2428=0xe & b_2123=0 & Rd_VPR128.D.imm_neon_uimm1 # AUNIT --inst x4e021c00/mask=xffe3fc00 --status pass :mov Rd_VPR128.H.imm_neon_uimm3, Rn_GPR32 -is b_3131=0 & q=1 & b_29=0 & b_2428=0xe & b_2123=0 & Rd_VPR128.H.imm_neon_uimm3 & imm_neon_uimm3 & b_1617=2 & b_1515=0 & imm4=0x3 & b_1010=1 & Rn_GPR32 & Rd_VPR128 & Zd +is b_3131=0 & q=1 & b_29=0 & b_2428=0xe & b_2123=0 & Rd_VPR128.H.imm_neon_uimm3 & b_1617=2 & b_1515=0 & imm4=0x3 & b_1010=1 & Rn_GPR32 & Rd_VPR128 & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rn_GPR32, 0, 2, 4); - local tmp2:2 = * [register]:2 tmp1; - # simd copy Rd_VPR128 element imm_neon_uimm3:1 = tmp2 (lane size 2) - local tmp3:4 = 0; - simd_address_at(tmp3, Rd_VPR128, imm_neon_uimm3:4, 2, 16); - * [register]:2 tmp3 = tmp2; + local tmp1:2 = Rn_GPR32[0,16]; + # simd copy Rd_VPR128 element imm_neon_uimm3:1 = tmp1 (lane size 2) + Rd_VPR128.H.imm_neon_uimm3 = tmp1; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:2 = SIMD_PIECE(Rn_GPR32, 0:1); - local tmpd:16 = SIMD_COPY(Rd_VPR128, tmp1, imm_neon_uimm3:1); - Zd = zext(tmpd); # assigning to Rd_VPR128 -@elif defined(SEMANTIC_pseudo) - Rd_VPR128 = NEON_mov(Rd_VPR128, Rn_GPR32, imm_neon_uimm3:1, 2:1); -@endif } # C7.2.201 MOV (from general) page C7-1858 line 104209 MATCH x4e001c00/mask=xffe0fc00 @@ -19157,20 +11213,11 @@ is b_3131=0 & q=1 & b_29=0 & b_2428=0xe & b_2123=0 & Rd_VPR128.H.imm_neon_uimm3 # AUNIT --inst x4e041c00/mask=xffe7fc00 --status pass :mov Rd_VPR128.S.imm_neon_uimm2, Rn_GPR32 -is b_3131=0 & q=1 & b_29=0 & b_2428=0xe & b_2123=0 & Rd_VPR128.S.imm_neon_uimm2 & imm_neon_uimm2 & b_1618=4 & b_1515=0 & imm4=0x3 & b_1010=1 & Rn_GPR32 & Rd_VPR128 & Zd +is b_3131=0 & q=1 & b_29=0 & b_2428=0xe & b_2123=0 & Rd_VPR128.S.imm_neon_uimm2 & b_1618=4 & b_1515=0 & imm4=0x3 & b_1010=1 & Rn_GPR32 & Rd_VPR128 & Zd { -@if defined(SEMANTIC_primitive) || defined(SEMANTIC_force) # simd copy Rd_VPR128 element imm_neon_uimm2:1 = Rn_GPR32 (lane size 4) - local tmp1:4 = 0; - simd_address_at(tmp1, Rd_VPR128, imm_neon_uimm2:4, 4, 16); - * [register]:4 tmp1 = Rn_GPR32; + Rd_VPR128.S.imm_neon_uimm2 = Rn_GPR32; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:16 = SIMD_COPY(Rd_VPR128, Rn_GPR32, imm_neon_uimm2:1); - Zd = zext(tmpd); # assigning to Rd_VPR128 -@elif defined(SEMANTIC_pseudo) - Rd_VPR128 = NEON_mov(Rd_VPR128, Rn_GPR32, imm_neon_uimm2:1, 2:1); -@endif } # C7.2.202 MOV (vector) page C7-1860 line 104306 MATCH x0ea01c00/mask=xbfe0fc00 @@ -19183,15 +11230,8 @@ is b_3131=0 & q=1 & b_29=0 & b_2428=0xe & b_2123=0 & Rd_VPR128.S.imm_neon_uimm2 :mov Rd_VPR128.16B, Rn_VPR128.16B is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.16B & b_1115=0x3 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Rn=Rm & Zd { -@if defined(SEMANTIC_primitive) Rd_VPR128.16B = Rn_VPR128.16B; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:16 = Rn_VPR128.16B; - Zd = zext(tmpd); # assigning to Rd_VPR128.16B -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.16B = NEON_mov(Rn_VPR128.16B, 1:1); -@endif } # C7.2.202 MOV (vector) page C7-1860 line 104306 MATCH x0ea01c00/mask=xbfe0fc00 @@ -19204,15 +11244,8 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.16 :mov Rd_VPR64.8B, Rn_VPR64.8B is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.8B & b_1115=0x3 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Rn=Rm & Zd { -@if defined(SEMANTIC_primitive) Rd_VPR64.8B = Rn_VPR64.8B; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:8 = Rn_VPR64.8B; - Zd = zext(tmpd); # assigning to Rd_VPR64.8B -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.8B = NEON_mov(Rn_VPR64.8B, 1:1); -@endif } # C7.2.203 MOV (to general) page C7-1861 line 104373 MATCH x0e003c00/mask=xbfe3fc00 @@ -19223,23 +11256,12 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.8B # AUNIT --inst x0e043c00/mask=xffe7fc00 --status pass :mov Rd_GPR32, Rn_VPR128.S.imm_neon_uimm2 -is b_3131=0 & Q=0 & b_29=0 & b_2428=0xe & b_2123=0 & Rn_VPR128.S.imm_neon_uimm2 & imm_neon_uimm2 & b_1618=4 & b_1515=0 & imm4=0x7 & b_1010=1 & Rn_VPR128 & Rd_GPR32 & Rd_GPR64 & Rd_VPR128 +is b_3131=0 & Q=0 & b_29=0 & b_2428=0xe & b_2123=0 & Rn_VPR128.S.imm_neon_uimm2 & b_1618=4 & b_1515=0 & imm4=0x7 & b_1010=1 & Rn_VPR128 & Rd_GPR32 & Rd_GPR64 & Rd_VPR128 { -@if defined(SEMANTIC_primitive) || defined(SEMANTIC_force) - local tmp1:4 = 0; # simd element Rn_VPR128[imm_neon_uimm2] lane size 4 - simd_address_at(tmp1, Rn_VPR128, imm_neon_uimm2:4, 4, 16); - local tmp2:4 = * [register]:4 tmp1; - Rd_GPR32 = tmp2; + local tmp1:4 = Rn_VPR128.S.imm_neon_uimm2; + Rd_GPR32 = tmp1; zext_rs(Rd_GPR64); # zero upper 28 bytes of Rd_GPR64 -@elif defined(SEMANTIC_pcode) - local tmp1:4 = SIMD_PIECE(Rn_VPR128, imm_neon_uimm2:1); - local tmpd:4 = tmp1; - Rd_GPR64 = zext(tmpd); # assigning to Rd_GPR32 -@elif defined(SEMANTIC_pseudo) - local tmp1:4 = SIMD_PIECE(Rn_VPR128, imm_neon_uimm2:1); - Rd_GPR32 = NEON_mov(tmp1, 4:1); -@endif } # C7.2.203 MOV (to general) page C7-1861 line 104373 MATCH x0e003c00/mask=xbfe3fc00 @@ -19250,21 +11272,11 @@ is b_3131=0 & Q=0 & b_29=0 & b_2428=0xe & b_2123=0 & Rn_VPR128.S.imm_neon_uimm2 # AUNIT --inst x4e083c00/mask=xffeffc00 --status pass :mov Rd_GPR64, Rn_VPR128.D.imm_neon_uimm1 -is b_3131=0 & Q=1 & b_29=0 & b_2428=0xe & b_2123=0 & Rn_VPR128.D.imm_neon_uimm1 & imm_neon_uimm1 & b_1619=0x8 & b_1515=0 & imm4=0x7 & b_1010=1 & Rn_VPR128 & Rd_GPR64 +is b_3131=0 & Q=1 & b_29=0 & b_2428=0xe & b_2123=0 & Rn_VPR128.D.imm_neon_uimm1 & b_1619=0x8 & b_1515=0 & imm4=0x7 & b_1010=1 & Rn_VPR128 & Rd_GPR64 { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; # simd element Rn_VPR128[imm_neon_uimm1] lane size 8 - simd_address_at(tmp1, Rn_VPR128, imm_neon_uimm1:4, 8, 16); - local tmp2:8 = * [register]:8 tmp1; - Rd_GPR64 = tmp2; -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Rn_VPR128, imm_neon_uimm1:1); + local tmp1:8 = Rn_VPR128.D.imm_neon_uimm1; Rd_GPR64 = tmp1; -@elif defined(SEMANTIC_pseudo) - local tmp1:8 = SIMD_PIECE(Rn_VPR128, imm_neon_uimm1:1); - Rd_GPR64 = NEON_mov(tmp1, 8:1); -@endif } # C7.2.204 MOVI page C7-1863 line 104465 MATCH x0f000400/mask=x9ff80c00 @@ -19279,15 +11291,8 @@ is b_3131=0 & Q=1 & b_29=0 & b_2428=0xe & b_2123=0 & Rn_VPR128.D.imm_neon_uimm1 :movi Rd_FPR64, Imm_neon_uimm8Shift is b_31=0 & b_30=0 & b_29=1 & b_1928=0b0111100000 & b_1215=0b1110 & b_1011=0b01 & Imm_neon_uimm8Shift & Rd_FPR64 & Zd { -@if defined(SEMANTIC_primitive) Rd_FPR64 = Imm_neon_uimm8Shift:8; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:8 = Imm_neon_uimm8Shift:8; - Zd = zext(tmpd); # assigning to Rd_FPR64 -@elif defined(SEMANTIC_pseudo) - Rd_FPR64 = NEON_movi(Imm_neon_uimm8Shift); -@endif } # C7.2.204 MOVI page C7-1863 line 104465 MATCH x0f000400/mask=x9ff80c00 @@ -19301,48 +11306,24 @@ is b_31=0 & b_30=0 & b_29=1 & b_1928=0b0111100000 & b_1215=0b1110 & b_1011=0b01 :movi Rd_VPR128.16B, Imm_neon_uimm8Shift is b_31=0 & b_30=1 & b_29=0 & b_1928=0b0111100000 & b_1215=0b1110 & b_1011=0b01 & Imm_neon_uimm8Shift & Rd_VPR128.16B & Zd { -@if defined(SEMANTIC_primitive) # simd duplicate Rd_VPR128.16B = all elements Imm_neon_uimm8Shift:1 (lane size 1) - local tmp1:4 = 0; - simd_address_at(tmp1, Rd_VPR128.16B, 0, 1, 16); - * [register]:1 tmp1 = Imm_neon_uimm8Shift:1; - simd_address_at(tmp1, Rd_VPR128.16B, 1, 1, 16); - * [register]:1 tmp1 = Imm_neon_uimm8Shift:1; - simd_address_at(tmp1, Rd_VPR128.16B, 2, 1, 16); - * [register]:1 tmp1 = Imm_neon_uimm8Shift:1; - simd_address_at(tmp1, Rd_VPR128.16B, 3, 1, 16); - * [register]:1 tmp1 = Imm_neon_uimm8Shift:1; - simd_address_at(tmp1, Rd_VPR128.16B, 4, 1, 16); - * [register]:1 tmp1 = Imm_neon_uimm8Shift:1; - simd_address_at(tmp1, Rd_VPR128.16B, 5, 1, 16); - * [register]:1 tmp1 = Imm_neon_uimm8Shift:1; - simd_address_at(tmp1, Rd_VPR128.16B, 6, 1, 16); - * [register]:1 tmp1 = Imm_neon_uimm8Shift:1; - simd_address_at(tmp1, Rd_VPR128.16B, 7, 1, 16); - * [register]:1 tmp1 = Imm_neon_uimm8Shift:1; - simd_address_at(tmp1, Rd_VPR128.16B, 8, 1, 16); - * [register]:1 tmp1 = Imm_neon_uimm8Shift:1; - simd_address_at(tmp1, Rd_VPR128.16B, 9, 1, 16); - * [register]:1 tmp1 = Imm_neon_uimm8Shift:1; - simd_address_at(tmp1, Rd_VPR128.16B, 10, 1, 16); - * [register]:1 tmp1 = Imm_neon_uimm8Shift:1; - simd_address_at(tmp1, Rd_VPR128.16B, 11, 1, 16); - * [register]:1 tmp1 = Imm_neon_uimm8Shift:1; - simd_address_at(tmp1, Rd_VPR128.16B, 12, 1, 16); - * [register]:1 tmp1 = Imm_neon_uimm8Shift:1; - simd_address_at(tmp1, Rd_VPR128.16B, 13, 1, 16); - * [register]:1 tmp1 = Imm_neon_uimm8Shift:1; - simd_address_at(tmp1, Rd_VPR128.16B, 14, 1, 16); - * [register]:1 tmp1 = Imm_neon_uimm8Shift:1; - simd_address_at(tmp1, Rd_VPR128.16B, 15, 1, 16); - * [register]:1 tmp1 = Imm_neon_uimm8Shift:1; + Rd_VPR128.16B[0,8] = Imm_neon_uimm8Shift:1; + Rd_VPR128.16B[8,8] = Imm_neon_uimm8Shift:1; + Rd_VPR128.16B[16,8] = Imm_neon_uimm8Shift:1; + Rd_VPR128.16B[24,8] = Imm_neon_uimm8Shift:1; + Rd_VPR128.16B[32,8] = Imm_neon_uimm8Shift:1; + Rd_VPR128.16B[40,8] = Imm_neon_uimm8Shift:1; + Rd_VPR128.16B[48,8] = Imm_neon_uimm8Shift:1; + Rd_VPR128.16B[56,8] = Imm_neon_uimm8Shift:1; + Rd_VPR128.16B[64,8] = Imm_neon_uimm8Shift:1; + Rd_VPR128.16B[72,8] = Imm_neon_uimm8Shift:1; + Rd_VPR128.16B[80,8] = Imm_neon_uimm8Shift:1; + Rd_VPR128.16B[88,8] = Imm_neon_uimm8Shift:1; + Rd_VPR128.16B[96,8] = Imm_neon_uimm8Shift:1; + Rd_VPR128.16B[104,8] = Imm_neon_uimm8Shift:1; + Rd_VPR128.16B[112,8] = Imm_neon_uimm8Shift:1; + Rd_VPR128.16B[120,8] = Imm_neon_uimm8Shift:1; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:16 = SIMD_COPY(Rd_VPR128.16B, Imm_neon_uimm8Shift:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.16B -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.16B = NEON_movi(Imm_neon_uimm8Shift:1, 1:1); -@endif } # C7.2.204 MOVI page C7-1863 line 104465 MATCH x0f000400/mask=x9ff80c00 @@ -19357,22 +11338,11 @@ is b_31=0 & b_30=1 & b_29=0 & b_1928=0b0111100000 & b_1215=0b1110 & b_1011=0b01 :movi Rd_VPR128.2D, Imm_neon_uimm8Shift is b_31=0 & b_30=1 & b_29=1 & b_1928=0b0111100000 & b_1215=0b1110 & b_1011=0b01 & Imm_neon_uimm8Shift & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_primitive) || defined(SEMANTIC_force) local tmp1:8 = Imm_neon_uimm8Shift; # simd duplicate Rd_VPR128.2D = all elements tmp1 (lane size 8) - local tmp2:4 = 0; - simd_address_at(tmp2, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp2 = tmp1; - simd_address_at(tmp2, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp2 = tmp1; + Rd_VPR128.2D[0,64] = tmp1; + Rd_VPR128.2D[64,64] = tmp1; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = Imm_neon_uimm8Shift; - local tmpd:16 = SIMD_COPY(Rd_VPR128.2D, tmp1); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.2D = NEON_movi(Imm_neon_uimm8Shift, 8:1); -@endif } # C7.2.204 MOVI page C7-1863 line 104465 MATCH x0f000400/mask=x9ff80c00 @@ -19386,15 +11356,8 @@ is b_31=0 & b_30=1 & b_29=1 & b_1928=0b0111100000 & b_1215=0b1110 & b_1011=0b01 :movi Rd_VPR64.8B, Imm_neon_uimm8Shift is b_31=0 & b_30=0 & b_29=0 & b_1928=0b0111100000 & b_1215=0b1110 & b_1011=0b01 & Imm_neon_uimm8Shift & Rd_VPR64.8B & Zd { -@if defined(SEMANTIC_primitive) Rd_VPR64.8B = Imm_neon_uimm8Shift:8; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:8 = Imm_neon_uimm8Shift:8; - Zd = zext(tmpd); # assigning to Rd_VPR64.8B -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.8B = NEON_movi(Imm_neon_uimm8Shift, 1:1); -@endif } # C7.2.204 MOVI page C7-1863 line 104465 MATCH x0f000400/mask=x9ff80c00 @@ -19409,15 +11372,8 @@ is b_31=0 & b_30=0 & b_29=0 & b_1928=0b0111100000 & b_1215=0b1110 & b_1011=0b01 :movi Rd_VPR64.2S, Imm_neon_uimm8Shift is b_31=0 & b_30=0 & b_29=0 & b_1928=0b0111100000 & b_15=0 & b_12=0 & b_1011=0b01 & Imm_neon_uimm8Shift & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_primitive) Rd_VPR64.2S = Imm_neon_uimm8Shift:8; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:8 = Imm_neon_uimm8Shift:8; - Zd = zext(tmpd); # assigning to Rd_VPR64.2S -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.2S = NEON_movi(Imm_neon_uimm8Shift, 4:1); -@endif } # C7.2.204 MOVI page C7-1863 line 104465 MATCH x0f000400/mask=x9ff80c00 @@ -19433,15 +11389,8 @@ is b_31=0 & b_30=0 & b_29=0 & b_1928=0b0111100000 & b_15=0 & b_12=0 & b_1011=0b0 :movi Rd_VPR64.4H, Imm_neon_uimm8Shift is b_31=0 & b_30=0 & b_29=0 & b_1928=0b0111100000 & b_1415=0b10 & b_12=0 & b_1011=0b01 & Imm_neon_uimm8Shift & Rd_VPR64.4H & Zd { -@if defined(SEMANTIC_primitive) Rd_VPR64.4H = Imm_neon_uimm8Shift:8; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:8 = Imm_neon_uimm8Shift:8; - Zd = zext(tmpd); # assigning to Rd_VPR64.4H -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.4H = NEON_movi(Imm_neon_uimm8Shift, 2:1); -@endif } # C7.2.204 MOVI page C7-1863 line 104465 MATCH x0f000400/mask=x9ff80c00 @@ -19456,24 +11405,12 @@ is b_31=0 & b_30=0 & b_29=0 & b_1928=0b0111100000 & b_1415=0b10 & b_12=0 & b_101 :movi Rd_VPR128.4S, Imm_neon_uimm8Shift is b_31=0 & b_30=1 & b_29=0 & b_1928=0b0111100000 & b_15=0 & b_12=0 & b_1011=0b01 & Imm_neon_uimm8Shift & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) || defined(SEMANTIC_force) # simd duplicate Rd_VPR128.4S = all elements Imm_neon_uimm8Shift:4 (lane size 4) - local tmp1:4 = 0; - simd_address_at(tmp1, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp1 = Imm_neon_uimm8Shift:4; - simd_address_at(tmp1, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp1 = Imm_neon_uimm8Shift:4; - simd_address_at(tmp1, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp1 = Imm_neon_uimm8Shift:4; - simd_address_at(tmp1, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp1 = Imm_neon_uimm8Shift:4; + Rd_VPR128.4S[0,32] = Imm_neon_uimm8Shift:4; + Rd_VPR128.4S[32,32] = Imm_neon_uimm8Shift:4; + Rd_VPR128.4S[64,32] = Imm_neon_uimm8Shift:4; + Rd_VPR128.4S[96,32] = Imm_neon_uimm8Shift:4; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:16 = SIMD_COPY(Rd_VPR128.4S, Imm_neon_uimm8Shift:4); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_movi(Imm_neon_uimm8Shift, 4:1); -@endif } # C7.2.204 MOVI page C7-1863 line 104465 MATCH x0f000400/mask=x9ff80c00 @@ -19489,32 +11426,16 @@ is b_31=0 & b_30=1 & b_29=0 & b_1928=0b0111100000 & b_15=0 & b_12=0 & b_1011=0b0 :movi Rd_VPR128.8H, Imm_neon_uimm8Shift is b_31=0 & b_30=1 & b_29=0 & b_1928=0b0111100000 & b_1415=0b10 & b_12=0 & b_1011=0b01 & Imm_neon_uimm8Shift & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_primitive) # simd duplicate Rd_VPR128.8H = all elements Imm_neon_uimm8Shift:2 (lane size 2) - local tmp1:4 = 0; - simd_address_at(tmp1, Rd_VPR128.8H, 0, 2, 16); - * [register]:2 tmp1 = Imm_neon_uimm8Shift:2; - simd_address_at(tmp1, Rd_VPR128.8H, 1, 2, 16); - * [register]:2 tmp1 = Imm_neon_uimm8Shift:2; - simd_address_at(tmp1, Rd_VPR128.8H, 2, 2, 16); - * [register]:2 tmp1 = Imm_neon_uimm8Shift:2; - simd_address_at(tmp1, Rd_VPR128.8H, 3, 2, 16); - * [register]:2 tmp1 = Imm_neon_uimm8Shift:2; - simd_address_at(tmp1, Rd_VPR128.8H, 4, 2, 16); - * [register]:2 tmp1 = Imm_neon_uimm8Shift:2; - simd_address_at(tmp1, Rd_VPR128.8H, 5, 2, 16); - * [register]:2 tmp1 = Imm_neon_uimm8Shift:2; - simd_address_at(tmp1, Rd_VPR128.8H, 6, 2, 16); - * [register]:2 tmp1 = Imm_neon_uimm8Shift:2; - simd_address_at(tmp1, Rd_VPR128.8H, 7, 2, 16); - * [register]:2 tmp1 = Imm_neon_uimm8Shift:2; + Rd_VPR128.8H[0,16] = Imm_neon_uimm8Shift:2; + Rd_VPR128.8H[16,16] = Imm_neon_uimm8Shift:2; + Rd_VPR128.8H[32,16] = Imm_neon_uimm8Shift:2; + Rd_VPR128.8H[48,16] = Imm_neon_uimm8Shift:2; + Rd_VPR128.8H[64,16] = Imm_neon_uimm8Shift:2; + Rd_VPR128.8H[80,16] = Imm_neon_uimm8Shift:2; + Rd_VPR128.8H[96,16] = Imm_neon_uimm8Shift:2; + Rd_VPR128.8H[112,16] = Imm_neon_uimm8Shift:2; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:16 = SIMD_COPY(Rd_VPR128.8H, Imm_neon_uimm8Shift:2); - Zd = zext(tmpd); # assigning to Rd_VPR128.8H -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.8H = NEON_movi(Imm_neon_uimm8Shift, 2:1); -@endif } # C7.2.204 MOVI page C7-1863 line 104465 MATCH x0f000400/mask=x9ff80c00 @@ -19528,15 +11449,8 @@ is b_31=0 & b_30=1 & b_29=0 & b_1928=0b0111100000 & b_1415=0b10 & b_12=0 & b_101 :movi Rd_VPR64.2S, Imm_neon_uimm8Shift is b_31=0 & b_30=0 & b_29=0 & b_1928=0b0111100000 & b_1315=0b110 & b_1011=0b01 & Imm_neon_uimm8Shift & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_primitive) Rd_VPR64.2S = Imm_neon_uimm8Shift:8; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:8 = Imm_neon_uimm8Shift:8; - Zd = zext(tmpd); # assigning to Rd_VPR64.2S -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.2S = NEON_movi(Imm_neon_uimm8Shift, 4:1); -@endif } # C7.2.204 MOVI page C7-1863 line 104465 MATCH x0f000400/mask=x9ff80c00 @@ -19550,24 +11464,12 @@ is b_31=0 & b_30=0 & b_29=0 & b_1928=0b0111100000 & b_1315=0b110 & b_1011=0b01 & :movi Rd_VPR128.4S, Imm_neon_uimm8Shift is b_31=0 & b_30=1 & b_29=0 & b_1928=0b0111100000 & b_1315=0b110 & b_1011=0b01 & Imm_neon_uimm8Shift & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) # simd duplicate Rd_VPR128.4S = all elements Imm_neon_uimm8Shift:4 (lane size 4) - local tmp1:4 = 0; - simd_address_at(tmp1, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp1 = Imm_neon_uimm8Shift:4; - simd_address_at(tmp1, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp1 = Imm_neon_uimm8Shift:4; - simd_address_at(tmp1, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp1 = Imm_neon_uimm8Shift:4; - simd_address_at(tmp1, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp1 = Imm_neon_uimm8Shift:4; + Rd_VPR128.4S[0,32] = Imm_neon_uimm8Shift:4; + Rd_VPR128.4S[32,32] = Imm_neon_uimm8Shift:4; + Rd_VPR128.4S[64,32] = Imm_neon_uimm8Shift:4; + Rd_VPR128.4S[96,32] = Imm_neon_uimm8Shift:4; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:16 = SIMD_COPY(Rd_VPR128.4S, Imm_neon_uimm8Shift:4); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_movi(Imm_neon_uimm8Shift:4, 4:1); -@endif } # C7.2.205 MUL (by element) page C7-1866 line 104646 MATCH x0f008000/mask=xbf00f400 @@ -19579,29 +11481,12 @@ is b_31=0 & b_30=1 & b_29=0 & b_1928=0b0111100000 & b_1315=0b110 & b_1011=0b01 & :mul Rd_VPR64.2S, Rn_VPR64.2S, Re_VPR128.S.vIndex is b_3131=0 & q=0 & u=0 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & b_1215=0x8 & b_1010=0 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; # simd element Re_VPR128.S[vIndex] lane size 4 - simd_address_at(tmp1, Re_VPR128.S, vIndex:4, 4, 16); - local tmp2:4 = * [register]:4 tmp1; - # simd infix Rd_VPR64.2S = Rn_VPR64.2S * tmp2 on lane size 4 - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp3, Rn_VPR64.2S, 0, 4, 8); - simd_address_at(tmp4, Rd_VPR64.2S, 0, 4, 8); - * [register]:4 tmp4 = (* [register]:4 tmp3) * tmp2; - simd_address_at(tmp3, Rn_VPR64.2S, 1, 4, 8); - simd_address_at(tmp4, Rd_VPR64.2S, 1, 4, 8); - * [register]:4 tmp4 = (* [register]:4 tmp3) * tmp2; + local tmp1:4 = Re_VPR128.S.vIndex; + # simd infix Rd_VPR64.2S = Rn_VPR64.2S * tmp1 on lane size 4 + Rd_VPR64.2S[0,32] = Rn_VPR64.2S[0,32] * tmp1; + Rd_VPR64.2S[32,32] = Rn_VPR64.2S[32,32] * tmp1; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:4 = SIMD_PIECE(Re_VPR128.S, vIndex:1); - local tmpd:8 = SIMD_INT_MULT(Rn_VPR64.2S, tmp1); - Zd = zext(tmpd); # assigning to Rd_VPR64.2S -@elif defined(SEMANTIC_pseudo) - local tmp1:4 = SIMD_PIECE(Re_VPR128.S, vIndex:1); - Rd_VPR64.2S = NEON_mul(Rn_VPR64.2S, tmp1, 4:1); -@endif } # C7.2.205 MUL (by element) page C7-1866 line 104646 MATCH x0f008000/mask=xbf00f400 @@ -19613,35 +11498,14 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re :mul Rd_VPR64.4H, Rn_VPR64.4H, Re_VPR128Lo.H.vIndexHLM is b_3131=0 & q=0 & u=0 & b_2428=0xf & advSIMD3.size=1 & Re_VPR128Lo.H.vIndexHLM & Re_VPR128Lo.H & vIndexHLM & b_1215=0x8 & b_1010=0 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 - simd_address_at(tmp1, Re_VPR128Lo.H, vIndexHLM:4, 2, 16); - local tmp2:2 = * [register]:2 tmp1; - # simd infix Rd_VPR64.4H = Rn_VPR64.4H * tmp2 on lane size 2 - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp3, Rn_VPR64.4H, 0, 2, 8); - simd_address_at(tmp4, Rd_VPR64.4H, 0, 2, 8); - * [register]:2 tmp4 = (* [register]:2 tmp3) * tmp2; - simd_address_at(tmp3, Rn_VPR64.4H, 1, 2, 8); - simd_address_at(tmp4, Rd_VPR64.4H, 1, 2, 8); - * [register]:2 tmp4 = (* [register]:2 tmp3) * tmp2; - simd_address_at(tmp3, Rn_VPR64.4H, 2, 2, 8); - simd_address_at(tmp4, Rd_VPR64.4H, 2, 2, 8); - * [register]:2 tmp4 = (* [register]:2 tmp3) * tmp2; - simd_address_at(tmp3, Rn_VPR64.4H, 3, 2, 8); - simd_address_at(tmp4, Rd_VPR64.4H, 3, 2, 8); - * [register]:2 tmp4 = (* [register]:2 tmp3) * tmp2; + local tmp1:2 = Re_VPR128Lo.H.vIndexHLM; + # simd infix Rd_VPR64.4H = Rn_VPR64.4H * tmp1 on lane size 2 + Rd_VPR64.4H[0,16] = Rn_VPR64.4H[0,16] * tmp1; + Rd_VPR64.4H[16,16] = Rn_VPR64.4H[16,16] * tmp1; + Rd_VPR64.4H[32,16] = Rn_VPR64.4H[32,16] * tmp1; + Rd_VPR64.4H[48,16] = Rn_VPR64.4H[48,16] * tmp1; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:2 = SIMD_PIECE(Re_VPR128Lo.H, vIndexHLM:1); - local tmpd:8 = SIMD_INT_MULT(Rn_VPR64.4H, tmp1); - Zd = zext(tmpd); # assigning to Rd_VPR64.4H -@elif defined(SEMANTIC_pseudo) - local tmp1:2 = SIMD_PIECE(Re_VPR128Lo.H, vIndexHLM:1); - Rd_VPR64.4H = NEON_mul(Rn_VPR64.4H, tmp1, 2:1); -@endif } # C7.2.205 MUL (by element) page C7-1866 line 104646 MATCH x0f008000/mask=xbf00f400 @@ -19653,35 +11517,14 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xf & advSIMD3.size=1 & Re_VPR128Lo.H.vIndexHLM :mul Rd_VPR128.4S, Rn_VPR128.4S, Re_VPR128.S.vIndex is b_3131=0 & q=1 & u=0 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & b_1215=0x8 & b_1010=0 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) || defined(SEMANTIC_force) - local tmp1:4 = 0; # simd element Re_VPR128.S[vIndex] lane size 4 - simd_address_at(tmp1, Re_VPR128.S, vIndex:4, 4, 16); - local tmp2:4 = * [register]:4 tmp1; - # simd infix Rd_VPR128.4S = Rn_VPR128.4S * tmp2 on lane size 4 - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp3, Rn_VPR128.4S, 0, 4, 16); - simd_address_at(tmp4, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp4 = (* [register]:4 tmp3) * tmp2; - simd_address_at(tmp3, Rn_VPR128.4S, 1, 4, 16); - simd_address_at(tmp4, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp4 = (* [register]:4 tmp3) * tmp2; - simd_address_at(tmp3, Rn_VPR128.4S, 2, 4, 16); - simd_address_at(tmp4, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp4 = (* [register]:4 tmp3) * tmp2; - simd_address_at(tmp3, Rn_VPR128.4S, 3, 4, 16); - simd_address_at(tmp4, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp4 = (* [register]:4 tmp3) * tmp2; + local tmp1:4 = Re_VPR128.S.vIndex; + # simd infix Rd_VPR128.4S = Rn_VPR128.4S * tmp1 on lane size 4 + Rd_VPR128.4S[0,32] = Rn_VPR128.4S[0,32] * tmp1; + Rd_VPR128.4S[32,32] = Rn_VPR128.4S[32,32] * tmp1; + Rd_VPR128.4S[64,32] = Rn_VPR128.4S[64,32] * tmp1; + Rd_VPR128.4S[96,32] = Rn_VPR128.4S[96,32] * tmp1; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:4 = SIMD_PIECE(Re_VPR128.S, vIndex:1); - local tmpd:16 = SIMD_INT_MULT(Rn_VPR128.4S, tmp1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - local tmp1:4 = SIMD_PIECE(Re_VPR128.S, vIndex:1); - Rd_VPR128.4S = NEON_mul(Rn_VPR128.4S, tmp1, 4:1); -@endif } # C7.2.205 MUL (by element) page C7-1866 line 104646 MATCH x0f008000/mask=xbf00f400 @@ -19693,47 +11536,18 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re :mul Rd_VPR128.8H, Rn_VPR128.8H, Re_VPR128Lo.H.vIndexHLM is b_3131=0 & q=1 & u=0 & b_2428=0xf & advSIMD3.size=1 & Re_VPR128Lo.H.vIndexHLM & Re_VPR128Lo.H & vIndexHLM & b_1215=0x8 & b_1010=0 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 - simd_address_at(tmp1, Re_VPR128Lo.H, vIndexHLM:4, 2, 16); - local tmp2:2 = * [register]:2 tmp1; - # simd infix Rd_VPR128.8H = Rn_VPR128.8H * tmp2 on lane size 2 - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp3, Rn_VPR128.8H, 0, 2, 16); - simd_address_at(tmp4, Rd_VPR128.8H, 0, 2, 16); - * [register]:2 tmp4 = (* [register]:2 tmp3) * tmp2; - simd_address_at(tmp3, Rn_VPR128.8H, 1, 2, 16); - simd_address_at(tmp4, Rd_VPR128.8H, 1, 2, 16); - * [register]:2 tmp4 = (* [register]:2 tmp3) * tmp2; - simd_address_at(tmp3, Rn_VPR128.8H, 2, 2, 16); - simd_address_at(tmp4, Rd_VPR128.8H, 2, 2, 16); - * [register]:2 tmp4 = (* [register]:2 tmp3) * tmp2; - simd_address_at(tmp3, Rn_VPR128.8H, 3, 2, 16); - simd_address_at(tmp4, Rd_VPR128.8H, 3, 2, 16); - * [register]:2 tmp4 = (* [register]:2 tmp3) * tmp2; - simd_address_at(tmp3, Rn_VPR128.8H, 4, 2, 16); - simd_address_at(tmp4, Rd_VPR128.8H, 4, 2, 16); - * [register]:2 tmp4 = (* [register]:2 tmp3) * tmp2; - simd_address_at(tmp3, Rn_VPR128.8H, 5, 2, 16); - simd_address_at(tmp4, Rd_VPR128.8H, 5, 2, 16); - * [register]:2 tmp4 = (* [register]:2 tmp3) * tmp2; - simd_address_at(tmp3, Rn_VPR128.8H, 6, 2, 16); - simd_address_at(tmp4, Rd_VPR128.8H, 6, 2, 16); - * [register]:2 tmp4 = (* [register]:2 tmp3) * tmp2; - simd_address_at(tmp3, Rn_VPR128.8H, 7, 2, 16); - simd_address_at(tmp4, Rd_VPR128.8H, 7, 2, 16); - * [register]:2 tmp4 = (* [register]:2 tmp3) * tmp2; + local tmp1:2 = Re_VPR128Lo.H.vIndexHLM; + # simd infix Rd_VPR128.8H = Rn_VPR128.8H * tmp1 on lane size 2 + Rd_VPR128.8H[0,16] = Rn_VPR128.8H[0,16] * tmp1; + Rd_VPR128.8H[16,16] = Rn_VPR128.8H[16,16] * tmp1; + Rd_VPR128.8H[32,16] = Rn_VPR128.8H[32,16] * tmp1; + Rd_VPR128.8H[48,16] = Rn_VPR128.8H[48,16] * tmp1; + Rd_VPR128.8H[64,16] = Rn_VPR128.8H[64,16] * tmp1; + Rd_VPR128.8H[80,16] = Rn_VPR128.8H[80,16] * tmp1; + Rd_VPR128.8H[96,16] = Rn_VPR128.8H[96,16] * tmp1; + Rd_VPR128.8H[112,16] = Rn_VPR128.8H[112,16] * tmp1; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:2 = SIMD_PIECE(Re_VPR128Lo.H, vIndexHLM:1); - local tmpd:16 = SIMD_INT_MULT(Rn_VPR128.8H, tmp1); - Zd = zext(tmpd); # assigning to Rd_VPR128.8H -@elif defined(SEMANTIC_pseudo) - local tmp1:2 = SIMD_PIECE(Re_VPR128Lo.H, vIndexHLM:1); - Rd_VPR128.8H = NEON_mul(Rn_VPR128.8H, tmp1, 2:1); -@endif } # C7.2.206 MUL (vector) page C7-1868 line 104774 MATCH x0e209c00/mask=xbf20fc00 @@ -19745,82 +11559,24 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xf & advSIMD3.size=1 & Re_VPR128Lo.H.vIndexHLM :mul Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0x13 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { -@if defined(SEMANTIC_primitive) # simd infix Rd_VPR128.16B = Rn_VPR128.16B * Rm_VPR128.16B on lane size 1 - local tmp1:4 = 0; - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp1, Rn_VPR128.16B, 0, 1, 16); - simd_address_at(tmp2, Rm_VPR128.16B, 0, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 0, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) * (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR128.16B, 1, 1, 16); - simd_address_at(tmp2, Rm_VPR128.16B, 1, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 1, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) * (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR128.16B, 2, 1, 16); - simd_address_at(tmp2, Rm_VPR128.16B, 2, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 2, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) * (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR128.16B, 3, 1, 16); - simd_address_at(tmp2, Rm_VPR128.16B, 3, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 3, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) * (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR128.16B, 4, 1, 16); - simd_address_at(tmp2, Rm_VPR128.16B, 4, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 4, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) * (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR128.16B, 5, 1, 16); - simd_address_at(tmp2, Rm_VPR128.16B, 5, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 5, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) * (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR128.16B, 6, 1, 16); - simd_address_at(tmp2, Rm_VPR128.16B, 6, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 6, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) * (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR128.16B, 7, 1, 16); - simd_address_at(tmp2, Rm_VPR128.16B, 7, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 7, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) * (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR128.16B, 8, 1, 16); - simd_address_at(tmp2, Rm_VPR128.16B, 8, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 8, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) * (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR128.16B, 9, 1, 16); - simd_address_at(tmp2, Rm_VPR128.16B, 9, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 9, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) * (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR128.16B, 10, 1, 16); - simd_address_at(tmp2, Rm_VPR128.16B, 10, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 10, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) * (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR128.16B, 11, 1, 16); - simd_address_at(tmp2, Rm_VPR128.16B, 11, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 11, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) * (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR128.16B, 12, 1, 16); - simd_address_at(tmp2, Rm_VPR128.16B, 12, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 12, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) * (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR128.16B, 13, 1, 16); - simd_address_at(tmp2, Rm_VPR128.16B, 13, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 13, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) * (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR128.16B, 14, 1, 16); - simd_address_at(tmp2, Rm_VPR128.16B, 14, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 14, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) * (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR128.16B, 15, 1, 16); - simd_address_at(tmp2, Rm_VPR128.16B, 15, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 15, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) * (* [register]:1 tmp2); + Rd_VPR128.16B[0,8] = Rn_VPR128.16B[0,8] * Rm_VPR128.16B[0,8]; + Rd_VPR128.16B[8,8] = Rn_VPR128.16B[8,8] * Rm_VPR128.16B[8,8]; + Rd_VPR128.16B[16,8] = Rn_VPR128.16B[16,8] * Rm_VPR128.16B[16,8]; + Rd_VPR128.16B[24,8] = Rn_VPR128.16B[24,8] * Rm_VPR128.16B[24,8]; + Rd_VPR128.16B[32,8] = Rn_VPR128.16B[32,8] * Rm_VPR128.16B[32,8]; + Rd_VPR128.16B[40,8] = Rn_VPR128.16B[40,8] * Rm_VPR128.16B[40,8]; + Rd_VPR128.16B[48,8] = Rn_VPR128.16B[48,8] * Rm_VPR128.16B[48,8]; + Rd_VPR128.16B[56,8] = Rn_VPR128.16B[56,8] * Rm_VPR128.16B[56,8]; + Rd_VPR128.16B[64,8] = Rn_VPR128.16B[64,8] * Rm_VPR128.16B[64,8]; + Rd_VPR128.16B[72,8] = Rn_VPR128.16B[72,8] * Rm_VPR128.16B[72,8]; + Rd_VPR128.16B[80,8] = Rn_VPR128.16B[80,8] * Rm_VPR128.16B[80,8]; + Rd_VPR128.16B[88,8] = Rn_VPR128.16B[88,8] * Rm_VPR128.16B[88,8]; + Rd_VPR128.16B[96,8] = Rn_VPR128.16B[96,8] * Rm_VPR128.16B[96,8]; + Rd_VPR128.16B[104,8] = Rn_VPR128.16B[104,8] * Rm_VPR128.16B[104,8]; + Rd_VPR128.16B[112,8] = Rn_VPR128.16B[112,8] * Rm_VPR128.16B[112,8]; + Rd_VPR128.16B[120,8] = Rn_VPR128.16B[120,8] * Rm_VPR128.16B[120,8]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:16 = SIMD_INT_MULT(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.16B -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.16B = NEON_mul(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); -@endif } # C7.2.206 MUL (vector) page C7-1868 line 104774 MATCH x0e209c00/mask=xbf20fc00 @@ -19832,26 +11588,10 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16 :mul Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0x13 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_primitive) # simd infix Rd_VPR64.2S = Rn_VPR64.2S * Rm_VPR64.2S on lane size 4 - local tmp1:4 = 0; - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp1, Rn_VPR64.2S, 0, 4, 8); - simd_address_at(tmp2, Rm_VPR64.2S, 0, 4, 8); - simd_address_at(tmp3, Rd_VPR64.2S, 0, 4, 8); - * [register]:4 tmp3 = (* [register]:4 tmp1) * (* [register]:4 tmp2); - simd_address_at(tmp1, Rn_VPR64.2S, 1, 4, 8); - simd_address_at(tmp2, Rm_VPR64.2S, 1, 4, 8); - simd_address_at(tmp3, Rd_VPR64.2S, 1, 4, 8); - * [register]:4 tmp3 = (* [register]:4 tmp1) * (* [register]:4 tmp2); + Rd_VPR64.2S[0,32] = Rn_VPR64.2S[0,32] * Rm_VPR64.2S[0,32]; + Rd_VPR64.2S[32,32] = Rn_VPR64.2S[32,32] * Rm_VPR64.2S[32,32]; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:8 = SIMD_INT_MULT(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.2S -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.2S = NEON_mul(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); -@endif } # C7.2.206 MUL (vector) page C7-1868 line 104774 MATCH x0e209c00/mask=xbf20fc00 @@ -19863,34 +11603,12 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S :mul Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0x13 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { -@if defined(SEMANTIC_primitive) # simd infix Rd_VPR64.4H = Rn_VPR64.4H * Rm_VPR64.4H on lane size 2 - local tmp1:4 = 0; - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp1, Rn_VPR64.4H, 0, 2, 8); - simd_address_at(tmp2, Rm_VPR64.4H, 0, 2, 8); - simd_address_at(tmp3, Rd_VPR64.4H, 0, 2, 8); - * [register]:2 tmp3 = (* [register]:2 tmp1) * (* [register]:2 tmp2); - simd_address_at(tmp1, Rn_VPR64.4H, 1, 2, 8); - simd_address_at(tmp2, Rm_VPR64.4H, 1, 2, 8); - simd_address_at(tmp3, Rd_VPR64.4H, 1, 2, 8); - * [register]:2 tmp3 = (* [register]:2 tmp1) * (* [register]:2 tmp2); - simd_address_at(tmp1, Rn_VPR64.4H, 2, 2, 8); - simd_address_at(tmp2, Rm_VPR64.4H, 2, 2, 8); - simd_address_at(tmp3, Rd_VPR64.4H, 2, 2, 8); - * [register]:2 tmp3 = (* [register]:2 tmp1) * (* [register]:2 tmp2); - simd_address_at(tmp1, Rn_VPR64.4H, 3, 2, 8); - simd_address_at(tmp2, Rm_VPR64.4H, 3, 2, 8); - simd_address_at(tmp3, Rd_VPR64.4H, 3, 2, 8); - * [register]:2 tmp3 = (* [register]:2 tmp1) * (* [register]:2 tmp2); + Rd_VPR64.4H[0,16] = Rn_VPR64.4H[0,16] * Rm_VPR64.4H[0,16]; + Rd_VPR64.4H[16,16] = Rn_VPR64.4H[16,16] * Rm_VPR64.4H[16,16]; + Rd_VPR64.4H[32,16] = Rn_VPR64.4H[32,16] * Rm_VPR64.4H[32,16]; + Rd_VPR64.4H[48,16] = Rn_VPR64.4H[48,16] * Rm_VPR64.4H[48,16]; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:8 = SIMD_INT_MULT(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.4H -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.4H = NEON_mul(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); -@endif } # C7.2.206 MUL (vector) page C7-1868 line 104774 MATCH x0e209c00/mask=xbf20fc00 @@ -19902,34 +11620,12 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H :mul Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0x13 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) # simd infix Rd_VPR128.4S = Rn_VPR128.4S * Rm_VPR128.4S on lane size 4 - local tmp1:4 = 0; - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp1, Rn_VPR128.4S, 0, 4, 16); - simd_address_at(tmp2, Rm_VPR128.4S, 0, 4, 16); - simd_address_at(tmp3, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp3 = (* [register]:4 tmp1) * (* [register]:4 tmp2); - simd_address_at(tmp1, Rn_VPR128.4S, 1, 4, 16); - simd_address_at(tmp2, Rm_VPR128.4S, 1, 4, 16); - simd_address_at(tmp3, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp3 = (* [register]:4 tmp1) * (* [register]:4 tmp2); - simd_address_at(tmp1, Rn_VPR128.4S, 2, 4, 16); - simd_address_at(tmp2, Rm_VPR128.4S, 2, 4, 16); - simd_address_at(tmp3, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp3 = (* [register]:4 tmp1) * (* [register]:4 tmp2); - simd_address_at(tmp1, Rn_VPR128.4S, 3, 4, 16); - simd_address_at(tmp2, Rm_VPR128.4S, 3, 4, 16); - simd_address_at(tmp3, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp3 = (* [register]:4 tmp1) * (* [register]:4 tmp2); + Rd_VPR128.4S[0,32] = Rn_VPR128.4S[0,32] * Rm_VPR128.4S[0,32]; + Rd_VPR128.4S[32,32] = Rn_VPR128.4S[32,32] * Rm_VPR128.4S[32,32]; + Rd_VPR128.4S[64,32] = Rn_VPR128.4S[64,32] * Rm_VPR128.4S[64,32]; + Rd_VPR128.4S[96,32] = Rn_VPR128.4S[96,32] * Rm_VPR128.4S[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:16 = SIMD_INT_MULT(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_mul(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); -@endif } # C7.2.206 MUL (vector) page C7-1868 line 104774 MATCH x0e209c00/mask=xbf20fc00 @@ -19941,50 +11637,16 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S :mul Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0x13 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { -@if defined(SEMANTIC_primitive) # simd infix Rd_VPR64.8B = Rn_VPR64.8B * Rm_VPR64.8B on lane size 1 - local tmp1:4 = 0; - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp1, Rn_VPR64.8B, 0, 1, 8); - simd_address_at(tmp2, Rm_VPR64.8B, 0, 1, 8); - simd_address_at(tmp3, Rd_VPR64.8B, 0, 1, 8); - * [register]:1 tmp3 = (* [register]:1 tmp1) * (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR64.8B, 1, 1, 8); - simd_address_at(tmp2, Rm_VPR64.8B, 1, 1, 8); - simd_address_at(tmp3, Rd_VPR64.8B, 1, 1, 8); - * [register]:1 tmp3 = (* [register]:1 tmp1) * (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR64.8B, 2, 1, 8); - simd_address_at(tmp2, Rm_VPR64.8B, 2, 1, 8); - simd_address_at(tmp3, Rd_VPR64.8B, 2, 1, 8); - * [register]:1 tmp3 = (* [register]:1 tmp1) * (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR64.8B, 3, 1, 8); - simd_address_at(tmp2, Rm_VPR64.8B, 3, 1, 8); - simd_address_at(tmp3, Rd_VPR64.8B, 3, 1, 8); - * [register]:1 tmp3 = (* [register]:1 tmp1) * (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR64.8B, 4, 1, 8); - simd_address_at(tmp2, Rm_VPR64.8B, 4, 1, 8); - simd_address_at(tmp3, Rd_VPR64.8B, 4, 1, 8); - * [register]:1 tmp3 = (* [register]:1 tmp1) * (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR64.8B, 5, 1, 8); - simd_address_at(tmp2, Rm_VPR64.8B, 5, 1, 8); - simd_address_at(tmp3, Rd_VPR64.8B, 5, 1, 8); - * [register]:1 tmp3 = (* [register]:1 tmp1) * (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR64.8B, 6, 1, 8); - simd_address_at(tmp2, Rm_VPR64.8B, 6, 1, 8); - simd_address_at(tmp3, Rd_VPR64.8B, 6, 1, 8); - * [register]:1 tmp3 = (* [register]:1 tmp1) * (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR64.8B, 7, 1, 8); - simd_address_at(tmp2, Rm_VPR64.8B, 7, 1, 8); - simd_address_at(tmp3, Rd_VPR64.8B, 7, 1, 8); - * [register]:1 tmp3 = (* [register]:1 tmp1) * (* [register]:1 tmp2); + Rd_VPR64.8B[0,8] = Rn_VPR64.8B[0,8] * Rm_VPR64.8B[0,8]; + Rd_VPR64.8B[8,8] = Rn_VPR64.8B[8,8] * Rm_VPR64.8B[8,8]; + Rd_VPR64.8B[16,8] = Rn_VPR64.8B[16,8] * Rm_VPR64.8B[16,8]; + Rd_VPR64.8B[24,8] = Rn_VPR64.8B[24,8] * Rm_VPR64.8B[24,8]; + Rd_VPR64.8B[32,8] = Rn_VPR64.8B[32,8] * Rm_VPR64.8B[32,8]; + Rd_VPR64.8B[40,8] = Rn_VPR64.8B[40,8] * Rm_VPR64.8B[40,8]; + Rd_VPR64.8B[48,8] = Rn_VPR64.8B[48,8] * Rm_VPR64.8B[48,8]; + Rd_VPR64.8B[56,8] = Rn_VPR64.8B[56,8] * Rm_VPR64.8B[56,8]; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:8 = SIMD_INT_MULT(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.8B -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.8B = NEON_mul(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); -@endif } # C7.2.206 MUL (vector) page C7-1868 line 104774 MATCH x0e209c00/mask=xbf20fc00 @@ -19996,50 +11658,16 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B :mul Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0x13 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_primitive) # simd infix Rd_VPR128.8H = Rn_VPR128.8H * Rm_VPR128.8H on lane size 2 - local tmp1:4 = 0; - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp1, Rn_VPR128.8H, 0, 2, 16); - simd_address_at(tmp2, Rm_VPR128.8H, 0, 2, 16); - simd_address_at(tmp3, Rd_VPR128.8H, 0, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp1) * (* [register]:2 tmp2); - simd_address_at(tmp1, Rn_VPR128.8H, 1, 2, 16); - simd_address_at(tmp2, Rm_VPR128.8H, 1, 2, 16); - simd_address_at(tmp3, Rd_VPR128.8H, 1, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp1) * (* [register]:2 tmp2); - simd_address_at(tmp1, Rn_VPR128.8H, 2, 2, 16); - simd_address_at(tmp2, Rm_VPR128.8H, 2, 2, 16); - simd_address_at(tmp3, Rd_VPR128.8H, 2, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp1) * (* [register]:2 tmp2); - simd_address_at(tmp1, Rn_VPR128.8H, 3, 2, 16); - simd_address_at(tmp2, Rm_VPR128.8H, 3, 2, 16); - simd_address_at(tmp3, Rd_VPR128.8H, 3, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp1) * (* [register]:2 tmp2); - simd_address_at(tmp1, Rn_VPR128.8H, 4, 2, 16); - simd_address_at(tmp2, Rm_VPR128.8H, 4, 2, 16); - simd_address_at(tmp3, Rd_VPR128.8H, 4, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp1) * (* [register]:2 tmp2); - simd_address_at(tmp1, Rn_VPR128.8H, 5, 2, 16); - simd_address_at(tmp2, Rm_VPR128.8H, 5, 2, 16); - simd_address_at(tmp3, Rd_VPR128.8H, 5, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp1) * (* [register]:2 tmp2); - simd_address_at(tmp1, Rn_VPR128.8H, 6, 2, 16); - simd_address_at(tmp2, Rm_VPR128.8H, 6, 2, 16); - simd_address_at(tmp3, Rd_VPR128.8H, 6, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp1) * (* [register]:2 tmp2); - simd_address_at(tmp1, Rn_VPR128.8H, 7, 2, 16); - simd_address_at(tmp2, Rm_VPR128.8H, 7, 2, 16); - simd_address_at(tmp3, Rd_VPR128.8H, 7, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp1) * (* [register]:2 tmp2); + Rd_VPR128.8H[0,16] = Rn_VPR128.8H[0,16] * Rm_VPR128.8H[0,16]; + Rd_VPR128.8H[16,16] = Rn_VPR128.8H[16,16] * Rm_VPR128.8H[16,16]; + Rd_VPR128.8H[32,16] = Rn_VPR128.8H[32,16] * Rm_VPR128.8H[32,16]; + Rd_VPR128.8H[48,16] = Rn_VPR128.8H[48,16] * Rm_VPR128.8H[48,16]; + Rd_VPR128.8H[64,16] = Rn_VPR128.8H[64,16] * Rm_VPR128.8H[64,16]; + Rd_VPR128.8H[80,16] = Rn_VPR128.8H[80,16] * Rm_VPR128.8H[80,16]; + Rd_VPR128.8H[96,16] = Rn_VPR128.8H[96,16] * Rm_VPR128.8H[96,16]; + Rd_VPR128.8H[112,16] = Rn_VPR128.8H[112,16] * Rm_VPR128.8H[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:16 = SIMD_INT_MULT(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.8H -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.8H = NEON_mul(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); -@endif } # C7.2.207 MVN page C7-1870 line 104876 MATCH x2e205800/mask=xbffffc00 @@ -20052,65 +11680,24 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H :mvn Rd_VPR128.16B, Rn_VPR128.16B is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_1721=16 & b_1216=5 & b_1011=2 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { -@if defined(SEMANTIC_primitive) # simd unary Rd_VPR128.16B = ~(Rn_VPR128.16B) on lane size 1 - local tmp1:4 = 0; - local tmp2:4 = 0; - simd_address_at(tmp1, Rn_VPR128.16B, 0, 1, 16); - simd_address_at(tmp2, Rd_VPR128.16B, 0, 1, 16); - * [register]:1 tmp2 = ~(* [register]:1 tmp1); - simd_address_at(tmp1, Rn_VPR128.16B, 1, 1, 16); - simd_address_at(tmp2, Rd_VPR128.16B, 1, 1, 16); - * [register]:1 tmp2 = ~(* [register]:1 tmp1); - simd_address_at(tmp1, Rn_VPR128.16B, 2, 1, 16); - simd_address_at(tmp2, Rd_VPR128.16B, 2, 1, 16); - * [register]:1 tmp2 = ~(* [register]:1 tmp1); - simd_address_at(tmp1, Rn_VPR128.16B, 3, 1, 16); - simd_address_at(tmp2, Rd_VPR128.16B, 3, 1, 16); - * [register]:1 tmp2 = ~(* [register]:1 tmp1); - simd_address_at(tmp1, Rn_VPR128.16B, 4, 1, 16); - simd_address_at(tmp2, Rd_VPR128.16B, 4, 1, 16); - * [register]:1 tmp2 = ~(* [register]:1 tmp1); - simd_address_at(tmp1, Rn_VPR128.16B, 5, 1, 16); - simd_address_at(tmp2, Rd_VPR128.16B, 5, 1, 16); - * [register]:1 tmp2 = ~(* [register]:1 tmp1); - simd_address_at(tmp1, Rn_VPR128.16B, 6, 1, 16); - simd_address_at(tmp2, Rd_VPR128.16B, 6, 1, 16); - * [register]:1 tmp2 = ~(* [register]:1 tmp1); - simd_address_at(tmp1, Rn_VPR128.16B, 7, 1, 16); - simd_address_at(tmp2, Rd_VPR128.16B, 7, 1, 16); - * [register]:1 tmp2 = ~(* [register]:1 tmp1); - simd_address_at(tmp1, Rn_VPR128.16B, 8, 1, 16); - simd_address_at(tmp2, Rd_VPR128.16B, 8, 1, 16); - * [register]:1 tmp2 = ~(* [register]:1 tmp1); - simd_address_at(tmp1, Rn_VPR128.16B, 9, 1, 16); - simd_address_at(tmp2, Rd_VPR128.16B, 9, 1, 16); - * [register]:1 tmp2 = ~(* [register]:1 tmp1); - simd_address_at(tmp1, Rn_VPR128.16B, 10, 1, 16); - simd_address_at(tmp2, Rd_VPR128.16B, 10, 1, 16); - * [register]:1 tmp2 = ~(* [register]:1 tmp1); - simd_address_at(tmp1, Rn_VPR128.16B, 11, 1, 16); - simd_address_at(tmp2, Rd_VPR128.16B, 11, 1, 16); - * [register]:1 tmp2 = ~(* [register]:1 tmp1); - simd_address_at(tmp1, Rn_VPR128.16B, 12, 1, 16); - simd_address_at(tmp2, Rd_VPR128.16B, 12, 1, 16); - * [register]:1 tmp2 = ~(* [register]:1 tmp1); - simd_address_at(tmp1, Rn_VPR128.16B, 13, 1, 16); - simd_address_at(tmp2, Rd_VPR128.16B, 13, 1, 16); - * [register]:1 tmp2 = ~(* [register]:1 tmp1); - simd_address_at(tmp1, Rn_VPR128.16B, 14, 1, 16); - simd_address_at(tmp2, Rd_VPR128.16B, 14, 1, 16); - * [register]:1 tmp2 = ~(* [register]:1 tmp1); - simd_address_at(tmp1, Rn_VPR128.16B, 15, 1, 16); - simd_address_at(tmp2, Rd_VPR128.16B, 15, 1, 16); - * [register]:1 tmp2 = ~(* [register]:1 tmp1); + Rd_VPR128.16B[0,8] = ~(Rn_VPR128.16B[0,8]); + Rd_VPR128.16B[8,8] = ~(Rn_VPR128.16B[8,8]); + Rd_VPR128.16B[16,8] = ~(Rn_VPR128.16B[16,8]); + Rd_VPR128.16B[24,8] = ~(Rn_VPR128.16B[24,8]); + Rd_VPR128.16B[32,8] = ~(Rn_VPR128.16B[32,8]); + Rd_VPR128.16B[40,8] = ~(Rn_VPR128.16B[40,8]); + Rd_VPR128.16B[48,8] = ~(Rn_VPR128.16B[48,8]); + Rd_VPR128.16B[56,8] = ~(Rn_VPR128.16B[56,8]); + Rd_VPR128.16B[64,8] = ~(Rn_VPR128.16B[64,8]); + Rd_VPR128.16B[72,8] = ~(Rn_VPR128.16B[72,8]); + Rd_VPR128.16B[80,8] = ~(Rn_VPR128.16B[80,8]); + Rd_VPR128.16B[88,8] = ~(Rn_VPR128.16B[88,8]); + Rd_VPR128.16B[96,8] = ~(Rn_VPR128.16B[96,8]); + Rd_VPR128.16B[104,8] = ~(Rn_VPR128.16B[104,8]); + Rd_VPR128.16B[112,8] = ~(Rn_VPR128.16B[112,8]); + Rd_VPR128.16B[120,8] = ~(Rn_VPR128.16B[120,8]); zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:16 = SIMD_INT_NEGATE(Rn_VPR128.16B, 1:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.16B -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.16B = NEON_mvn(Rn_VPR128.16B, 1:1); -@endif } # C7.2.207 MVN page C7-1870 line 104876 MATCH x2e205800/mask=xbffffc00 @@ -20123,41 +11710,16 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_1721=16 & b_1216=5 & :mvn Rd_VPR64.8B, Rn_VPR64.8B is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_1721=16 & b_1216=5 & b_1011=2 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { -@if defined(SEMANTIC_primitive) # simd unary Rd_VPR64.8B = ~(Rn_VPR64.8B) on lane size 1 - local tmp1:4 = 0; - local tmp2:4 = 0; - simd_address_at(tmp1, Rn_VPR64.8B, 0, 1, 8); - simd_address_at(tmp2, Rd_VPR64.8B, 0, 1, 8); - * [register]:1 tmp2 = ~(* [register]:1 tmp1); - simd_address_at(tmp1, Rn_VPR64.8B, 1, 1, 8); - simd_address_at(tmp2, Rd_VPR64.8B, 1, 1, 8); - * [register]:1 tmp2 = ~(* [register]:1 tmp1); - simd_address_at(tmp1, Rn_VPR64.8B, 2, 1, 8); - simd_address_at(tmp2, Rd_VPR64.8B, 2, 1, 8); - * [register]:1 tmp2 = ~(* [register]:1 tmp1); - simd_address_at(tmp1, Rn_VPR64.8B, 3, 1, 8); - simd_address_at(tmp2, Rd_VPR64.8B, 3, 1, 8); - * [register]:1 tmp2 = ~(* [register]:1 tmp1); - simd_address_at(tmp1, Rn_VPR64.8B, 4, 1, 8); - simd_address_at(tmp2, Rd_VPR64.8B, 4, 1, 8); - * [register]:1 tmp2 = ~(* [register]:1 tmp1); - simd_address_at(tmp1, Rn_VPR64.8B, 5, 1, 8); - simd_address_at(tmp2, Rd_VPR64.8B, 5, 1, 8); - * [register]:1 tmp2 = ~(* [register]:1 tmp1); - simd_address_at(tmp1, Rn_VPR64.8B, 6, 1, 8); - simd_address_at(tmp2, Rd_VPR64.8B, 6, 1, 8); - * [register]:1 tmp2 = ~(* [register]:1 tmp1); - simd_address_at(tmp1, Rn_VPR64.8B, 7, 1, 8); - simd_address_at(tmp2, Rd_VPR64.8B, 7, 1, 8); - * [register]:1 tmp2 = ~(* [register]:1 tmp1); + Rd_VPR64.8B[0,8] = ~(Rn_VPR64.8B[0,8]); + Rd_VPR64.8B[8,8] = ~(Rn_VPR64.8B[8,8]); + Rd_VPR64.8B[16,8] = ~(Rn_VPR64.8B[16,8]); + Rd_VPR64.8B[24,8] = ~(Rn_VPR64.8B[24,8]); + Rd_VPR64.8B[32,8] = ~(Rn_VPR64.8B[32,8]); + Rd_VPR64.8B[40,8] = ~(Rn_VPR64.8B[40,8]); + Rd_VPR64.8B[48,8] = ~(Rn_VPR64.8B[48,8]); + Rd_VPR64.8B[56,8] = ~(Rn_VPR64.8B[56,8]); zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:8 = SIMD_INT_NEGATE(Rn_VPR64.8B, 1:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.8B -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.8B = NEON_mvn(Rn_VPR64.8B, 1:1); -@endif } # C7.2.208 MVNI page C7-1871 line 104944 MATCH x2f000400/mask=xbff80c00 @@ -20174,22 +11736,11 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_1721=16 & b_1216=5 & :mvni Rd_VPR64.2S, Imm_neon_uimm8Shift is b_3131=0 & q=0 & b_29=1 & b_2428=0xf & b_1923=0x0 & b_1515=0 & Imm_neon_uimm8Shift & b_1012=1 & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_primitive) local tmp1:4 = ~ Imm_neon_uimm8Shift:4; # simd duplicate Rd_VPR64.2S = all elements tmp1 (lane size 4) - local tmp2:4 = 0; - simd_address_at(tmp2, Rd_VPR64.2S, 0, 4, 8); - * [register]:4 tmp2 = tmp1; - simd_address_at(tmp2, Rd_VPR64.2S, 1, 4, 8); - * [register]:4 tmp2 = tmp1; + Rd_VPR64.2S[0,32] = tmp1; + Rd_VPR64.2S[32,32] = tmp1; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:4 = ~ Imm_neon_uimm8Shift:4; - local tmpd:8 = SIMD_COPY(Rd_VPR64.2S, tmp1); - Zd = zext(tmpd); # assigning to Rd_VPR64.2S -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.2S = NEON_mvni(Imm_neon_uimm8Shift:4, 4:1); -@endif } # C7.2.208 MVNI page C7-1871 line 104944 MATCH x2f000400/mask=xbff80c00 @@ -20205,26 +11756,13 @@ is b_3131=0 & q=0 & b_29=1 & b_2428=0xf & b_1923=0x0 & b_1515=0 & Imm_neon_uimm8 :mvni Rd_VPR64.4H, Imm_neon_uimm8Shift is b_3131=0 & q=0 & b_29=1 & b_2428=0xf & b_1923=0x0 & Imm_neon_uimm8Shift & b_1415=2 & b_1012=1 & Rd_VPR64.4H & Zd { -@if defined(SEMANTIC_primitive) local tmp1:2 = ~ Imm_neon_uimm8Shift:2; # simd duplicate Rd_VPR64.4H = all elements tmp1 (lane size 2) - local tmp2:4 = 0; - simd_address_at(tmp2, Rd_VPR64.4H, 0, 2, 8); - * [register]:2 tmp2 = tmp1; - simd_address_at(tmp2, Rd_VPR64.4H, 1, 2, 8); - * [register]:2 tmp2 = tmp1; - simd_address_at(tmp2, Rd_VPR64.4H, 2, 2, 8); - * [register]:2 tmp2 = tmp1; - simd_address_at(tmp2, Rd_VPR64.4H, 3, 2, 8); - * [register]:2 tmp2 = tmp1; + Rd_VPR64.4H[0,16] = tmp1; + Rd_VPR64.4H[16,16] = tmp1; + Rd_VPR64.4H[32,16] = tmp1; + Rd_VPR64.4H[48,16] = tmp1; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:2 = ~ Imm_neon_uimm8Shift:2; - local tmpd:8 = SIMD_COPY(Rd_VPR64.4H, tmp1); - Zd = zext(tmpd); # assigning to Rd_VPR64.4H -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.4H = NEON_mvni(Imm_neon_uimm8Shift:2, 2:1); -@endif } # C7.2.208 MVNI page C7-1871 line 104944 MATCH x2f000400/mask=xbff80c00 @@ -20241,26 +11779,13 @@ is b_3131=0 & q=0 & b_29=1 & b_2428=0xf & b_1923=0x0 & Imm_neon_uimm8Shift & b_1 :mvni Rd_VPR128.4S, Imm_neon_uimm8Shift is b_3131=0 & q=1 & b_29=1 & b_2428=0xf & b_1923=0x0 & Imm_neon_uimm8Shift & b_1515=0 & b_1012=1 & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) local tmp1:4 = ~ Imm_neon_uimm8Shift:4; # simd duplicate Rd_VPR128.4S = all elements tmp1 (lane size 4) - local tmp2:4 = 0; - simd_address_at(tmp2, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp2 = tmp1; - simd_address_at(tmp2, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp2 = tmp1; - simd_address_at(tmp2, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp2 = tmp1; - simd_address_at(tmp2, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp2 = tmp1; + Rd_VPR128.4S[0,32] = tmp1; + Rd_VPR128.4S[32,32] = tmp1; + Rd_VPR128.4S[64,32] = tmp1; + Rd_VPR128.4S[96,32] = tmp1; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:4 = ~ Imm_neon_uimm8Shift:4; - local tmpd:16 = SIMD_COPY(Rd_VPR128.4S, tmp1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_mvni(Imm_neon_uimm8Shift:4, 4:1); -@endif } # C7.2.208 MVNI page C7-1871 line 104944 MATCH x2f000400/mask=xbff80c00 @@ -20276,34 +11801,17 @@ is b_3131=0 & q=1 & b_29=1 & b_2428=0xf & b_1923=0x0 & Imm_neon_uimm8Shift & b_1 :mvni Rd_VPR128.8H, Imm_neon_uimm8Shift is b_3131=0 & q=1 & b_29=1 & b_2428=0xf & b_1923=0x0 & Imm_neon_uimm8Shift & b_1415=2 & b_1012=1 & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_primitive) local tmp1:2 = ~ Imm_neon_uimm8Shift:2; # simd duplicate Rd_VPR128.8H = all elements tmp1 (lane size 2) - local tmp2:4 = 0; - simd_address_at(tmp2, Rd_VPR128.8H, 0, 2, 16); - * [register]:2 tmp2 = tmp1; - simd_address_at(tmp2, Rd_VPR128.8H, 1, 2, 16); - * [register]:2 tmp2 = tmp1; - simd_address_at(tmp2, Rd_VPR128.8H, 2, 2, 16); - * [register]:2 tmp2 = tmp1; - simd_address_at(tmp2, Rd_VPR128.8H, 3, 2, 16); - * [register]:2 tmp2 = tmp1; - simd_address_at(tmp2, Rd_VPR128.8H, 4, 2, 16); - * [register]:2 tmp2 = tmp1; - simd_address_at(tmp2, Rd_VPR128.8H, 5, 2, 16); - * [register]:2 tmp2 = tmp1; - simd_address_at(tmp2, Rd_VPR128.8H, 6, 2, 16); - * [register]:2 tmp2 = tmp1; - simd_address_at(tmp2, Rd_VPR128.8H, 7, 2, 16); - * [register]:2 tmp2 = tmp1; + Rd_VPR128.8H[0,16] = tmp1; + Rd_VPR128.8H[16,16] = tmp1; + Rd_VPR128.8H[32,16] = tmp1; + Rd_VPR128.8H[48,16] = tmp1; + Rd_VPR128.8H[64,16] = tmp1; + Rd_VPR128.8H[80,16] = tmp1; + Rd_VPR128.8H[96,16] = tmp1; + Rd_VPR128.8H[112,16] = tmp1; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:2 = ~ Imm_neon_uimm8Shift:2; - local tmpd:16 = SIMD_COPY(Rd_VPR128.8H, tmp1); - Zd = zext(tmpd); # assigning to Rd_VPR128.8H -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.8H = NEON_mvni(Imm_neon_uimm8Shift:2, 2:1); -@endif } # C7.2.208 MVNI page C7-1871 line 104944 MATCH x2f000400/mask=xbff80c00 @@ -20317,22 +11825,11 @@ is b_3131=0 & q=1 & b_29=1 & b_2428=0xf & b_1923=0x0 & Imm_neon_uimm8Shift & b_1 :mvni Rd_VPR64.2S, Imm_neon_uimm8Shift is b_3131=0 & q=0 & b_29=1 & b_2428=0xf & b_1923=0x0 & Imm_neon_uimm8Shift & b_1315=6 & b_1011=1 & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_primitive) local tmp1:4 = ~ Imm_neon_uimm8Shift:4; # simd duplicate Rd_VPR64.2S = all elements tmp1 (lane size 4) - local tmp2:4 = 0; - simd_address_at(tmp2, Rd_VPR64.2S, 0, 4, 8); - * [register]:4 tmp2 = tmp1; - simd_address_at(tmp2, Rd_VPR64.2S, 1, 4, 8); - * [register]:4 tmp2 = tmp1; + Rd_VPR64.2S[0,32] = tmp1; + Rd_VPR64.2S[32,32] = tmp1; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:4 = ~ Imm_neon_uimm8Shift:4; - local tmpd:8 = SIMD_COPY(Rd_VPR64.2S, tmp1); - Zd = zext(tmpd); # assigning to Rd_VPR64.2S -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.2S = NEON_mvni(Imm_neon_uimm8Shift:4, 4:1); -@endif } # C7.2.208 MVNI page C7-1871 line 104944 MATCH x2f000400/mask=xbff80c00 @@ -20346,26 +11843,13 @@ is b_3131=0 & q=0 & b_29=1 & b_2428=0xf & b_1923=0x0 & Imm_neon_uimm8Shift & b_1 :mvni Rd_VPR128.4S, Imm_neon_uimm8Shift is b_3131=0 & q=1 & b_29=1 & b_2428=0xf & b_1923=0x0 & Imm_neon_uimm8Shift & b_1315=6 & b_1011=1 & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) local tmp1:4 = ~ Imm_neon_uimm8Shift:4; # simd duplicate Rd_VPR128.4S = all elements tmp1 (lane size 4) - local tmp2:4 = 0; - simd_address_at(tmp2, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp2 = tmp1; - simd_address_at(tmp2, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp2 = tmp1; - simd_address_at(tmp2, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp2 = tmp1; - simd_address_at(tmp2, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp2 = tmp1; + Rd_VPR128.4S[0,32] = tmp1; + Rd_VPR128.4S[32,32] = tmp1; + Rd_VPR128.4S[64,32] = tmp1; + Rd_VPR128.4S[96,32] = tmp1; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:4 = ~ Imm_neon_uimm8Shift:4; - local tmpd:16 = SIMD_COPY(Rd_VPR128.4S, tmp1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_mvni(Imm_neon_uimm8Shift:4, 4:1); -@endif } # C7.2.209 NEG (vector) page C7-1874 line 105094 MATCH x7e20b800/mask=xff3ffc00 @@ -20377,15 +11861,8 @@ is b_3131=0 & q=1 & b_29=1 & b_2428=0xf & b_1923=0x0 & Imm_neon_uimm8Shift & b_1 :neg Rd_VPR64, Rn_VPR64 is b_3131=0 & q=1 & u=1 & b_2428=0x1e & advSIMD3.size=3 & b_1721=0x10 & b_1216=0xb & b_1011=2 & Rn_VPR64 & Rd_VPR64 & Zd { -@if defined(SEMANTIC_primitive) Rd_VPR64 = - Rn_VPR64; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:8 = - Rn_VPR64; - Zd = zext(tmpd); # assigning to Rd_VPR64 -@elif defined(SEMANTIC_pseudo) - Rd_VPR64 = NEON_neg(Rn_VPR64); -@endif } # C7.2.209 NEG (vector) page C7-1874 line 105094 MATCH x2e20b800/mask=xbf3ffc00 @@ -20396,9 +11873,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0x1e & advSIMD3.size=3 & b_1721=0x10 & b_1216=0 :neg Rd_VPR64.8B, Rn_VPR64.8B is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0xb & b_1011=2 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.8B = NEON_neg(Rn_VPR64.8B, 1:1); -@endif } # C7.2.209 NEG (vector) page C7-1874 line 105094 MATCH x2e20b800/mask=xbf3ffc00 @@ -20409,9 +11884,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x :neg Rd_VPR128.16B, Rn_VPR128.16B is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0xb & b_1011=2 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.16B = NEON_neg(Rn_VPR128.16B, 1:1); -@endif } # C7.2.209 NEG (vector) page C7-1874 line 105094 MATCH x2e20b800/mask=xbf3ffc00 @@ -20422,9 +11895,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x :neg Rd_VPR64.4H, Rn_VPR64.4H is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=0xb & b_1011=2 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.4H = NEON_neg(Rn_VPR64.4H, 2:1); -@endif } # C7.2.209 NEG (vector) page C7-1874 line 105094 MATCH x2e20b800/mask=xbf3ffc00 @@ -20435,9 +11906,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=0x :neg Rd_VPR128.8H, Rn_VPR128.8H is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=0xb & b_1011=2 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.8H = NEON_neg(Rn_VPR128.8H, 2:1); -@endif } # C7.2.209 NEG (vector) page C7-1874 line 105094 MATCH x2e20b800/mask=xbf3ffc00 @@ -20448,9 +11917,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=0x :neg Rd_VPR64.2S, Rn_VPR64.2S is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x10 & b_1216=0xb & b_1011=2 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.2S = NEON_neg(Rn_VPR64.2S, 4:1); -@endif } # C7.2.209 NEG (vector) page C7-1874 line 105094 MATCH x2e20b800/mask=xbf3ffc00 @@ -20461,9 +11928,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x10 & b_1216=0x :neg Rd_VPR128.4S, Rn_VPR128.4S is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x10 & b_1216=0xb & b_1011=2 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_neg(Rn_VPR128.4S, 4:1); -@endif } # C7.2.209 NEG (vector) page C7-1874 line 105094 MATCH x2e20b800/mask=xbf3ffc00 @@ -20474,9 +11939,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x10 & b_1216=0x :neg Rd_VPR128.2D, Rn_VPR128.2D is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=3 & b_1721=0x10 & b_1216=0xb & b_1011=2 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.2D = NEON_neg(Rn_VPR128.2D, 8:1); -@endif } # C7.2.211 ORN (vector) page C7-1878 line 105307 MATCH x0ee01c00/mask=xbfe0fc00 @@ -20488,134 +11951,41 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=3 & b_1721=0x10 & b_1216=0x :orn Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=3 & b_2121=1 & Rm_VPR128.16B & b_1115=0x3 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { -@if defined(SEMANTIC_primitive) # simd unary TMPQ1 = ~(Rm_VPR128.16B) on lane size 1 - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rm_VPR128.16B, 0, 1, 16); - simd_address_at(tmp3, TMPQ1, 0, 1, 16); - * [register]:1 tmp3 = ~(* [register]:1 tmp2); - simd_address_at(tmp2, Rm_VPR128.16B, 1, 1, 16); - simd_address_at(tmp3, TMPQ1, 1, 1, 16); - * [register]:1 tmp3 = ~(* [register]:1 tmp2); - simd_address_at(tmp2, Rm_VPR128.16B, 2, 1, 16); - simd_address_at(tmp3, TMPQ1, 2, 1, 16); - * [register]:1 tmp3 = ~(* [register]:1 tmp2); - simd_address_at(tmp2, Rm_VPR128.16B, 3, 1, 16); - simd_address_at(tmp3, TMPQ1, 3, 1, 16); - * [register]:1 tmp3 = ~(* [register]:1 tmp2); - simd_address_at(tmp2, Rm_VPR128.16B, 4, 1, 16); - simd_address_at(tmp3, TMPQ1, 4, 1, 16); - * [register]:1 tmp3 = ~(* [register]:1 tmp2); - simd_address_at(tmp2, Rm_VPR128.16B, 5, 1, 16); - simd_address_at(tmp3, TMPQ1, 5, 1, 16); - * [register]:1 tmp3 = ~(* [register]:1 tmp2); - simd_address_at(tmp2, Rm_VPR128.16B, 6, 1, 16); - simd_address_at(tmp3, TMPQ1, 6, 1, 16); - * [register]:1 tmp3 = ~(* [register]:1 tmp2); - simd_address_at(tmp2, Rm_VPR128.16B, 7, 1, 16); - simd_address_at(tmp3, TMPQ1, 7, 1, 16); - * [register]:1 tmp3 = ~(* [register]:1 tmp2); - simd_address_at(tmp2, Rm_VPR128.16B, 8, 1, 16); - simd_address_at(tmp3, TMPQ1, 8, 1, 16); - * [register]:1 tmp3 = ~(* [register]:1 tmp2); - simd_address_at(tmp2, Rm_VPR128.16B, 9, 1, 16); - simd_address_at(tmp3, TMPQ1, 9, 1, 16); - * [register]:1 tmp3 = ~(* [register]:1 tmp2); - simd_address_at(tmp2, Rm_VPR128.16B, 10, 1, 16); - simd_address_at(tmp3, TMPQ1, 10, 1, 16); - * [register]:1 tmp3 = ~(* [register]:1 tmp2); - simd_address_at(tmp2, Rm_VPR128.16B, 11, 1, 16); - simd_address_at(tmp3, TMPQ1, 11, 1, 16); - * [register]:1 tmp3 = ~(* [register]:1 tmp2); - simd_address_at(tmp2, Rm_VPR128.16B, 12, 1, 16); - simd_address_at(tmp3, TMPQ1, 12, 1, 16); - * [register]:1 tmp3 = ~(* [register]:1 tmp2); - simd_address_at(tmp2, Rm_VPR128.16B, 13, 1, 16); - simd_address_at(tmp3, TMPQ1, 13, 1, 16); - * [register]:1 tmp3 = ~(* [register]:1 tmp2); - simd_address_at(tmp2, Rm_VPR128.16B, 14, 1, 16); - simd_address_at(tmp3, TMPQ1, 14, 1, 16); - * [register]:1 tmp3 = ~(* [register]:1 tmp2); - simd_address_at(tmp2, Rm_VPR128.16B, 15, 1, 16); - simd_address_at(tmp3, TMPQ1, 15, 1, 16); - * [register]:1 tmp3 = ~(* [register]:1 tmp2); + TMPQ1[0,8] = ~(Rm_VPR128.16B[0,8]); + TMPQ1[8,8] = ~(Rm_VPR128.16B[8,8]); + TMPQ1[16,8] = ~(Rm_VPR128.16B[16,8]); + TMPQ1[24,8] = ~(Rm_VPR128.16B[24,8]); + TMPQ1[32,8] = ~(Rm_VPR128.16B[32,8]); + TMPQ1[40,8] = ~(Rm_VPR128.16B[40,8]); + TMPQ1[48,8] = ~(Rm_VPR128.16B[48,8]); + TMPQ1[56,8] = ~(Rm_VPR128.16B[56,8]); + TMPQ1[64,8] = ~(Rm_VPR128.16B[64,8]); + TMPQ1[72,8] = ~(Rm_VPR128.16B[72,8]); + TMPQ1[80,8] = ~(Rm_VPR128.16B[80,8]); + TMPQ1[88,8] = ~(Rm_VPR128.16B[88,8]); + TMPQ1[96,8] = ~(Rm_VPR128.16B[96,8]); + TMPQ1[104,8] = ~(Rm_VPR128.16B[104,8]); + TMPQ1[112,8] = ~(Rm_VPR128.16B[112,8]); + TMPQ1[120,8] = ~(Rm_VPR128.16B[120,8]); # simd infix Rd_VPR128.16B = Rn_VPR128.16B | TMPQ1 on lane size 1 - local tmp4:4 = 0; - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp4, Rn_VPR128.16B, 0, 1, 16); - simd_address_at(tmp5, TMPQ1, 0, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 0, 1, 16); - * [register]:1 tmp6 = (* [register]:1 tmp4) | (* [register]:1 tmp5); - simd_address_at(tmp4, Rn_VPR128.16B, 1, 1, 16); - simd_address_at(tmp5, TMPQ1, 1, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 1, 1, 16); - * [register]:1 tmp6 = (* [register]:1 tmp4) | (* [register]:1 tmp5); - simd_address_at(tmp4, Rn_VPR128.16B, 2, 1, 16); - simd_address_at(tmp5, TMPQ1, 2, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 2, 1, 16); - * [register]:1 tmp6 = (* [register]:1 tmp4) | (* [register]:1 tmp5); - simd_address_at(tmp4, Rn_VPR128.16B, 3, 1, 16); - simd_address_at(tmp5, TMPQ1, 3, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 3, 1, 16); - * [register]:1 tmp6 = (* [register]:1 tmp4) | (* [register]:1 tmp5); - simd_address_at(tmp4, Rn_VPR128.16B, 4, 1, 16); - simd_address_at(tmp5, TMPQ1, 4, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 4, 1, 16); - * [register]:1 tmp6 = (* [register]:1 tmp4) | (* [register]:1 tmp5); - simd_address_at(tmp4, Rn_VPR128.16B, 5, 1, 16); - simd_address_at(tmp5, TMPQ1, 5, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 5, 1, 16); - * [register]:1 tmp6 = (* [register]:1 tmp4) | (* [register]:1 tmp5); - simd_address_at(tmp4, Rn_VPR128.16B, 6, 1, 16); - simd_address_at(tmp5, TMPQ1, 6, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 6, 1, 16); - * [register]:1 tmp6 = (* [register]:1 tmp4) | (* [register]:1 tmp5); - simd_address_at(tmp4, Rn_VPR128.16B, 7, 1, 16); - simd_address_at(tmp5, TMPQ1, 7, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 7, 1, 16); - * [register]:1 tmp6 = (* [register]:1 tmp4) | (* [register]:1 tmp5); - simd_address_at(tmp4, Rn_VPR128.16B, 8, 1, 16); - simd_address_at(tmp5, TMPQ1, 8, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 8, 1, 16); - * [register]:1 tmp6 = (* [register]:1 tmp4) | (* [register]:1 tmp5); - simd_address_at(tmp4, Rn_VPR128.16B, 9, 1, 16); - simd_address_at(tmp5, TMPQ1, 9, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 9, 1, 16); - * [register]:1 tmp6 = (* [register]:1 tmp4) | (* [register]:1 tmp5); - simd_address_at(tmp4, Rn_VPR128.16B, 10, 1, 16); - simd_address_at(tmp5, TMPQ1, 10, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 10, 1, 16); - * [register]:1 tmp6 = (* [register]:1 tmp4) | (* [register]:1 tmp5); - simd_address_at(tmp4, Rn_VPR128.16B, 11, 1, 16); - simd_address_at(tmp5, TMPQ1, 11, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 11, 1, 16); - * [register]:1 tmp6 = (* [register]:1 tmp4) | (* [register]:1 tmp5); - simd_address_at(tmp4, Rn_VPR128.16B, 12, 1, 16); - simd_address_at(tmp5, TMPQ1, 12, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 12, 1, 16); - * [register]:1 tmp6 = (* [register]:1 tmp4) | (* [register]:1 tmp5); - simd_address_at(tmp4, Rn_VPR128.16B, 13, 1, 16); - simd_address_at(tmp5, TMPQ1, 13, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 13, 1, 16); - * [register]:1 tmp6 = (* [register]:1 tmp4) | (* [register]:1 tmp5); - simd_address_at(tmp4, Rn_VPR128.16B, 14, 1, 16); - simd_address_at(tmp5, TMPQ1, 14, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 14, 1, 16); - * [register]:1 tmp6 = (* [register]:1 tmp4) | (* [register]:1 tmp5); - simd_address_at(tmp4, Rn_VPR128.16B, 15, 1, 16); - simd_address_at(tmp5, TMPQ1, 15, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 15, 1, 16); - * [register]:1 tmp6 = (* [register]:1 tmp4) | (* [register]:1 tmp5); + Rd_VPR128.16B[0,8] = Rn_VPR128.16B[0,8] | TMPQ1[0,8]; + Rd_VPR128.16B[8,8] = Rn_VPR128.16B[8,8] | TMPQ1[8,8]; + Rd_VPR128.16B[16,8] = Rn_VPR128.16B[16,8] | TMPQ1[16,8]; + Rd_VPR128.16B[24,8] = Rn_VPR128.16B[24,8] | TMPQ1[24,8]; + Rd_VPR128.16B[32,8] = Rn_VPR128.16B[32,8] | TMPQ1[32,8]; + Rd_VPR128.16B[40,8] = Rn_VPR128.16B[40,8] | TMPQ1[40,8]; + Rd_VPR128.16B[48,8] = Rn_VPR128.16B[48,8] | TMPQ1[48,8]; + Rd_VPR128.16B[56,8] = Rn_VPR128.16B[56,8] | TMPQ1[56,8]; + Rd_VPR128.16B[64,8] = Rn_VPR128.16B[64,8] | TMPQ1[64,8]; + Rd_VPR128.16B[72,8] = Rn_VPR128.16B[72,8] | TMPQ1[72,8]; + Rd_VPR128.16B[80,8] = Rn_VPR128.16B[80,8] | TMPQ1[80,8]; + Rd_VPR128.16B[88,8] = Rn_VPR128.16B[88,8] | TMPQ1[88,8]; + Rd_VPR128.16B[96,8] = Rn_VPR128.16B[96,8] | TMPQ1[96,8]; + Rd_VPR128.16B[104,8] = Rn_VPR128.16B[104,8] | TMPQ1[104,8]; + Rd_VPR128.16B[112,8] = Rn_VPR128.16B[112,8] | TMPQ1[112,8]; + Rd_VPR128.16B[120,8] = Rn_VPR128.16B[120,8] | TMPQ1[120,8]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_NEGATE(Rm_VPR128.16B, 1:1); - local tmpd:16 = SIMD_INT_OR(Rn_VPR128.16B, tmp1, 1:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.16B -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.16B = NEON_orn(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); -@endif } # C7.2.211 ORN (vector) page C7-1878 line 105307 MATCH x0ee01c00/mask=xbfe0fc00 @@ -20627,78 +11997,25 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=3 & b_2121=1 & Rm_VPR128.16 :orn Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=3 & b_2121=1 & Rm_VPR64.8B & b_1115=0x3 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { -@if defined(SEMANTIC_primitive) # simd unary TMPD1 = ~(Rm_VPR64.8B) on lane size 1 - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rm_VPR64.8B, 0, 1, 8); - simd_address_at(tmp3, TMPD1, 0, 1, 8); - * [register]:1 tmp3 = ~(* [register]:1 tmp2); - simd_address_at(tmp2, Rm_VPR64.8B, 1, 1, 8); - simd_address_at(tmp3, TMPD1, 1, 1, 8); - * [register]:1 tmp3 = ~(* [register]:1 tmp2); - simd_address_at(tmp2, Rm_VPR64.8B, 2, 1, 8); - simd_address_at(tmp3, TMPD1, 2, 1, 8); - * [register]:1 tmp3 = ~(* [register]:1 tmp2); - simd_address_at(tmp2, Rm_VPR64.8B, 3, 1, 8); - simd_address_at(tmp3, TMPD1, 3, 1, 8); - * [register]:1 tmp3 = ~(* [register]:1 tmp2); - simd_address_at(tmp2, Rm_VPR64.8B, 4, 1, 8); - simd_address_at(tmp3, TMPD1, 4, 1, 8); - * [register]:1 tmp3 = ~(* [register]:1 tmp2); - simd_address_at(tmp2, Rm_VPR64.8B, 5, 1, 8); - simd_address_at(tmp3, TMPD1, 5, 1, 8); - * [register]:1 tmp3 = ~(* [register]:1 tmp2); - simd_address_at(tmp2, Rm_VPR64.8B, 6, 1, 8); - simd_address_at(tmp3, TMPD1, 6, 1, 8); - * [register]:1 tmp3 = ~(* [register]:1 tmp2); - simd_address_at(tmp2, Rm_VPR64.8B, 7, 1, 8); - simd_address_at(tmp3, TMPD1, 7, 1, 8); - * [register]:1 tmp3 = ~(* [register]:1 tmp2); + TMPD1[0,8] = ~(Rm_VPR64.8B[0,8]); + TMPD1[8,8] = ~(Rm_VPR64.8B[8,8]); + TMPD1[16,8] = ~(Rm_VPR64.8B[16,8]); + TMPD1[24,8] = ~(Rm_VPR64.8B[24,8]); + TMPD1[32,8] = ~(Rm_VPR64.8B[32,8]); + TMPD1[40,8] = ~(Rm_VPR64.8B[40,8]); + TMPD1[48,8] = ~(Rm_VPR64.8B[48,8]); + TMPD1[56,8] = ~(Rm_VPR64.8B[56,8]); # simd infix Rd_VPR64.8B = Rn_VPR64.8B | TMPD1 on lane size 1 - local tmp4:4 = 0; - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp4, Rn_VPR64.8B, 0, 1, 8); - simd_address_at(tmp5, TMPD1, 0, 1, 8); - simd_address_at(tmp6, Rd_VPR64.8B, 0, 1, 8); - * [register]:1 tmp6 = (* [register]:1 tmp4) | (* [register]:1 tmp5); - simd_address_at(tmp4, Rn_VPR64.8B, 1, 1, 8); - simd_address_at(tmp5, TMPD1, 1, 1, 8); - simd_address_at(tmp6, Rd_VPR64.8B, 1, 1, 8); - * [register]:1 tmp6 = (* [register]:1 tmp4) | (* [register]:1 tmp5); - simd_address_at(tmp4, Rn_VPR64.8B, 2, 1, 8); - simd_address_at(tmp5, TMPD1, 2, 1, 8); - simd_address_at(tmp6, Rd_VPR64.8B, 2, 1, 8); - * [register]:1 tmp6 = (* [register]:1 tmp4) | (* [register]:1 tmp5); - simd_address_at(tmp4, Rn_VPR64.8B, 3, 1, 8); - simd_address_at(tmp5, TMPD1, 3, 1, 8); - simd_address_at(tmp6, Rd_VPR64.8B, 3, 1, 8); - * [register]:1 tmp6 = (* [register]:1 tmp4) | (* [register]:1 tmp5); - simd_address_at(tmp4, Rn_VPR64.8B, 4, 1, 8); - simd_address_at(tmp5, TMPD1, 4, 1, 8); - simd_address_at(tmp6, Rd_VPR64.8B, 4, 1, 8); - * [register]:1 tmp6 = (* [register]:1 tmp4) | (* [register]:1 tmp5); - simd_address_at(tmp4, Rn_VPR64.8B, 5, 1, 8); - simd_address_at(tmp5, TMPD1, 5, 1, 8); - simd_address_at(tmp6, Rd_VPR64.8B, 5, 1, 8); - * [register]:1 tmp6 = (* [register]:1 tmp4) | (* [register]:1 tmp5); - simd_address_at(tmp4, Rn_VPR64.8B, 6, 1, 8); - simd_address_at(tmp5, TMPD1, 6, 1, 8); - simd_address_at(tmp6, Rd_VPR64.8B, 6, 1, 8); - * [register]:1 tmp6 = (* [register]:1 tmp4) | (* [register]:1 tmp5); - simd_address_at(tmp4, Rn_VPR64.8B, 7, 1, 8); - simd_address_at(tmp5, TMPD1, 7, 1, 8); - simd_address_at(tmp6, Rd_VPR64.8B, 7, 1, 8); - * [register]:1 tmp6 = (* [register]:1 tmp4) | (* [register]:1 tmp5); + Rd_VPR64.8B[0,8] = Rn_VPR64.8B[0,8] | TMPD1[0,8]; + Rd_VPR64.8B[8,8] = Rn_VPR64.8B[8,8] | TMPD1[8,8]; + Rd_VPR64.8B[16,8] = Rn_VPR64.8B[16,8] | TMPD1[16,8]; + Rd_VPR64.8B[24,8] = Rn_VPR64.8B[24,8] | TMPD1[24,8]; + Rd_VPR64.8B[32,8] = Rn_VPR64.8B[32,8] | TMPD1[32,8]; + Rd_VPR64.8B[40,8] = Rn_VPR64.8B[40,8] | TMPD1[40,8]; + Rd_VPR64.8B[48,8] = Rn_VPR64.8B[48,8] | TMPD1[48,8]; + Rd_VPR64.8B[56,8] = Rn_VPR64.8B[56,8] | TMPD1[56,8]; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_INT_NEGATE(Rm_VPR64.8B, 1:1); - local tmpd:8 = SIMD_INT_OR(Rn_VPR64.8B, tmp1, 1:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.8B -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.8B = NEON_orn(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); -@endif } # C7.2.212 ORR (vector, immediate) page C7-1880 line 105389 MATCH x0f001400/mask=xbff81c00 @@ -20715,23 +12032,10 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=3 & b_2121=1 & Rm_VPR64.8B :orr Rd_VPR64.2S, Imm_neon_uimm8Shift is b_3131=0 & q=0 & b_29=0 & b_2428=0xf & b_1923=0x0 & Imm_neon_uimm8Shift & b_1515=0 & b_1012=5 & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_primitive) # simd infix Rd_VPR64.2S = Rd_VPR64.2S | Imm_neon_uimm8Shift:4 on lane size 4 - local tmp1:4 = 0; - local tmp2:4 = 0; - simd_address_at(tmp1, Rd_VPR64.2S, 0, 4, 8); - simd_address_at(tmp2, Rd_VPR64.2S, 0, 4, 8); - * [register]:4 tmp2 = (* [register]:4 tmp1) | Imm_neon_uimm8Shift:4; - simd_address_at(tmp1, Rd_VPR64.2S, 1, 4, 8); - simd_address_at(tmp2, Rd_VPR64.2S, 1, 4, 8); - * [register]:4 tmp2 = (* [register]:4 tmp1) | Imm_neon_uimm8Shift:4; + Rd_VPR64.2S[0,32] = Rd_VPR64.2S[0,32] | Imm_neon_uimm8Shift:4; + Rd_VPR64.2S[32,32] = Rd_VPR64.2S[32,32] | Imm_neon_uimm8Shift:4; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:8 = SIMD_INT_OR(Rd_VPR64.2S, Imm_neon_uimm8Shift:4, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.2S -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.2S = NEON_orn(Rd_VPR64.2S, Imm_neon_uimm8Shift:4, 4:1); -@endif } # C7.2.212 ORR (vector, immediate) page C7-1880 line 105389 MATCH x0f001400/mask=xbff81c00 @@ -20745,29 +12049,12 @@ is b_3131=0 & q=0 & b_29=0 & b_2428=0xf & b_1923=0x0 & Imm_neon_uimm8Shift & b_1 :orr Rd_VPR64.4H, Imm_neon_uimm8Shift is b_3131=0 & q=0 & b_29=0 & b_2428=0xf & b_1923=0x0 & Imm_neon_uimm8Shift & b_1415=2 & b_1012=5 & Rd_VPR64.4H & Zd { -@if defined(SEMANTIC_primitive) # simd infix Rd_VPR64.4H = Rd_VPR64.4H | Imm_neon_uimm8Shift:2 on lane size 2 - local tmp1:4 = 0; - local tmp2:4 = 0; - simd_address_at(tmp1, Rd_VPR64.4H, 0, 2, 8); - simd_address_at(tmp2, Rd_VPR64.4H, 0, 2, 8); - * [register]:2 tmp2 = (* [register]:2 tmp1) | Imm_neon_uimm8Shift:2; - simd_address_at(tmp1, Rd_VPR64.4H, 1, 2, 8); - simd_address_at(tmp2, Rd_VPR64.4H, 1, 2, 8); - * [register]:2 tmp2 = (* [register]:2 tmp1) | Imm_neon_uimm8Shift:2; - simd_address_at(tmp1, Rd_VPR64.4H, 2, 2, 8); - simd_address_at(tmp2, Rd_VPR64.4H, 2, 2, 8); - * [register]:2 tmp2 = (* [register]:2 tmp1) | Imm_neon_uimm8Shift:2; - simd_address_at(tmp1, Rd_VPR64.4H, 3, 2, 8); - simd_address_at(tmp2, Rd_VPR64.4H, 3, 2, 8); - * [register]:2 tmp2 = (* [register]:2 tmp1) | Imm_neon_uimm8Shift:2; + Rd_VPR64.4H[0,16] = Rd_VPR64.4H[0,16] | Imm_neon_uimm8Shift:2; + Rd_VPR64.4H[16,16] = Rd_VPR64.4H[16,16] | Imm_neon_uimm8Shift:2; + Rd_VPR64.4H[32,16] = Rd_VPR64.4H[32,16] | Imm_neon_uimm8Shift:2; + Rd_VPR64.4H[48,16] = Rd_VPR64.4H[48,16] | Imm_neon_uimm8Shift:2; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:8 = SIMD_INT_OR(Rd_VPR64.4H, Imm_neon_uimm8Shift:2, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.4H -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.4H = NEON_orn(Rd_VPR64.4H, Imm_neon_uimm8Shift:2, 2:1); -@endif } # C7.2.212 ORR (vector, immediate) page C7-1880 line 105389 MATCH x0f001400/mask=xbff81c00 @@ -20784,29 +12071,12 @@ is b_3131=0 & q=0 & b_29=0 & b_2428=0xf & b_1923=0x0 & Imm_neon_uimm8Shift & b_1 :orr Rd_VPR128.4S, Imm_neon_uimm8Shift is b_3131=0 & q=1 & b_29=0 & b_2428=0xf & b_1923=0x0 & Imm_neon_uimm8Shift & b_1515=0 & b_1012=5 & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) # simd infix Rd_VPR128.4S = Rd_VPR128.4S | Imm_neon_uimm8Shift:4 on lane size 4 - local tmp1:4 = 0; - local tmp2:4 = 0; - simd_address_at(tmp1, Rd_VPR128.4S, 0, 4, 16); - simd_address_at(tmp2, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp2 = (* [register]:4 tmp1) | Imm_neon_uimm8Shift:4; - simd_address_at(tmp1, Rd_VPR128.4S, 1, 4, 16); - simd_address_at(tmp2, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp2 = (* [register]:4 tmp1) | Imm_neon_uimm8Shift:4; - simd_address_at(tmp1, Rd_VPR128.4S, 2, 4, 16); - simd_address_at(tmp2, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp2 = (* [register]:4 tmp1) | Imm_neon_uimm8Shift:4; - simd_address_at(tmp1, Rd_VPR128.4S, 3, 4, 16); - simd_address_at(tmp2, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp2 = (* [register]:4 tmp1) | Imm_neon_uimm8Shift:4; + Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] | Imm_neon_uimm8Shift:4; + Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] | Imm_neon_uimm8Shift:4; + Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] | Imm_neon_uimm8Shift:4; + Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] | Imm_neon_uimm8Shift:4; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:16 = SIMD_INT_OR(Rd_VPR128.4S, Imm_neon_uimm8Shift:4); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_orn(Rd_VPR128.4S, Imm_neon_uimm8Shift:4, 4:1); -@endif } # C7.2.212 ORR (vector, immediate) page C7-1880 line 105389 MATCH x0f001400/mask=xbff81c00 @@ -20820,41 +12090,16 @@ is b_3131=0 & q=1 & b_29=0 & b_2428=0xf & b_1923=0x0 & Imm_neon_uimm8Shift & b_1 :orr Rd_VPR128.8H, Imm_neon_uimm8Shift is b_3131=0 & q=1 & b_29=0 & b_2428=0xf & b_1923=0x0 & Imm_neon_uimm8Shift & b_1415=2 & b_1012=5 & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_primitive) # simd infix Rd_VPR128.8H = Rd_VPR128.8H | Imm_neon_uimm8Shift:2 on lane size 2 - local tmp1:4 = 0; - local tmp2:4 = 0; - simd_address_at(tmp1, Rd_VPR128.8H, 0, 2, 16); - simd_address_at(tmp2, Rd_VPR128.8H, 0, 2, 16); - * [register]:2 tmp2 = (* [register]:2 tmp1) | Imm_neon_uimm8Shift:2; - simd_address_at(tmp1, Rd_VPR128.8H, 1, 2, 16); - simd_address_at(tmp2, Rd_VPR128.8H, 1, 2, 16); - * [register]:2 tmp2 = (* [register]:2 tmp1) | Imm_neon_uimm8Shift:2; - simd_address_at(tmp1, Rd_VPR128.8H, 2, 2, 16); - simd_address_at(tmp2, Rd_VPR128.8H, 2, 2, 16); - * [register]:2 tmp2 = (* [register]:2 tmp1) | Imm_neon_uimm8Shift:2; - simd_address_at(tmp1, Rd_VPR128.8H, 3, 2, 16); - simd_address_at(tmp2, Rd_VPR128.8H, 3, 2, 16); - * [register]:2 tmp2 = (* [register]:2 tmp1) | Imm_neon_uimm8Shift:2; - simd_address_at(tmp1, Rd_VPR128.8H, 4, 2, 16); - simd_address_at(tmp2, Rd_VPR128.8H, 4, 2, 16); - * [register]:2 tmp2 = (* [register]:2 tmp1) | Imm_neon_uimm8Shift:2; - simd_address_at(tmp1, Rd_VPR128.8H, 5, 2, 16); - simd_address_at(tmp2, Rd_VPR128.8H, 5, 2, 16); - * [register]:2 tmp2 = (* [register]:2 tmp1) | Imm_neon_uimm8Shift:2; - simd_address_at(tmp1, Rd_VPR128.8H, 6, 2, 16); - simd_address_at(tmp2, Rd_VPR128.8H, 6, 2, 16); - * [register]:2 tmp2 = (* [register]:2 tmp1) | Imm_neon_uimm8Shift:2; - simd_address_at(tmp1, Rd_VPR128.8H, 7, 2, 16); - simd_address_at(tmp2, Rd_VPR128.8H, 7, 2, 16); - * [register]:2 tmp2 = (* [register]:2 tmp1) | Imm_neon_uimm8Shift:2; + Rd_VPR128.8H[0,16] = Rd_VPR128.8H[0,16] | Imm_neon_uimm8Shift:2; + Rd_VPR128.8H[16,16] = Rd_VPR128.8H[16,16] | Imm_neon_uimm8Shift:2; + Rd_VPR128.8H[32,16] = Rd_VPR128.8H[32,16] | Imm_neon_uimm8Shift:2; + Rd_VPR128.8H[48,16] = Rd_VPR128.8H[48,16] | Imm_neon_uimm8Shift:2; + Rd_VPR128.8H[64,16] = Rd_VPR128.8H[64,16] | Imm_neon_uimm8Shift:2; + Rd_VPR128.8H[80,16] = Rd_VPR128.8H[80,16] | Imm_neon_uimm8Shift:2; + Rd_VPR128.8H[96,16] = Rd_VPR128.8H[96,16] | Imm_neon_uimm8Shift:2; + Rd_VPR128.8H[112,16] = Rd_VPR128.8H[112,16] | Imm_neon_uimm8Shift:2; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:16 = SIMD_INT_OR(Rd_VPR128.8H, Imm_neon_uimm8Shift:2); - Zd = zext(tmpd); # assigning to Rd_VPR128.8H -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.8H = NEON_orr(Rd_VPR128.8H, Imm_neon_uimm8Shift:2, 2:1); -@endif } # C7.2.213 ORR (vector, register) page C7-1882 line 105515 MATCH x0ea01c00/mask=xbfe0fc00 @@ -20867,82 +12112,24 @@ is b_3131=0 & q=1 & b_29=0 & b_2428=0xf & b_1923=0x0 & Imm_neon_uimm8Shift & b_1 :orr Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.16B & b_1115=0x3 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { -@if defined(SEMANTIC_primitive) # simd infix Rd_VPR128.16B = Rn_VPR128.16B | Rm_VPR128.16B on lane size 1 - local tmp1:4 = 0; - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp1, Rn_VPR128.16B, 0, 1, 16); - simd_address_at(tmp2, Rm_VPR128.16B, 0, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 0, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) | (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR128.16B, 1, 1, 16); - simd_address_at(tmp2, Rm_VPR128.16B, 1, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 1, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) | (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR128.16B, 2, 1, 16); - simd_address_at(tmp2, Rm_VPR128.16B, 2, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 2, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) | (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR128.16B, 3, 1, 16); - simd_address_at(tmp2, Rm_VPR128.16B, 3, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 3, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) | (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR128.16B, 4, 1, 16); - simd_address_at(tmp2, Rm_VPR128.16B, 4, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 4, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) | (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR128.16B, 5, 1, 16); - simd_address_at(tmp2, Rm_VPR128.16B, 5, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 5, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) | (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR128.16B, 6, 1, 16); - simd_address_at(tmp2, Rm_VPR128.16B, 6, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 6, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) | (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR128.16B, 7, 1, 16); - simd_address_at(tmp2, Rm_VPR128.16B, 7, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 7, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) | (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR128.16B, 8, 1, 16); - simd_address_at(tmp2, Rm_VPR128.16B, 8, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 8, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) | (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR128.16B, 9, 1, 16); - simd_address_at(tmp2, Rm_VPR128.16B, 9, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 9, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) | (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR128.16B, 10, 1, 16); - simd_address_at(tmp2, Rm_VPR128.16B, 10, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 10, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) | (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR128.16B, 11, 1, 16); - simd_address_at(tmp2, Rm_VPR128.16B, 11, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 11, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) | (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR128.16B, 12, 1, 16); - simd_address_at(tmp2, Rm_VPR128.16B, 12, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 12, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) | (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR128.16B, 13, 1, 16); - simd_address_at(tmp2, Rm_VPR128.16B, 13, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 13, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) | (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR128.16B, 14, 1, 16); - simd_address_at(tmp2, Rm_VPR128.16B, 14, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 14, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) | (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR128.16B, 15, 1, 16); - simd_address_at(tmp2, Rm_VPR128.16B, 15, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 15, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) | (* [register]:1 tmp2); + Rd_VPR128.16B[0,8] = Rn_VPR128.16B[0,8] | Rm_VPR128.16B[0,8]; + Rd_VPR128.16B[8,8] = Rn_VPR128.16B[8,8] | Rm_VPR128.16B[8,8]; + Rd_VPR128.16B[16,8] = Rn_VPR128.16B[16,8] | Rm_VPR128.16B[16,8]; + Rd_VPR128.16B[24,8] = Rn_VPR128.16B[24,8] | Rm_VPR128.16B[24,8]; + Rd_VPR128.16B[32,8] = Rn_VPR128.16B[32,8] | Rm_VPR128.16B[32,8]; + Rd_VPR128.16B[40,8] = Rn_VPR128.16B[40,8] | Rm_VPR128.16B[40,8]; + Rd_VPR128.16B[48,8] = Rn_VPR128.16B[48,8] | Rm_VPR128.16B[48,8]; + Rd_VPR128.16B[56,8] = Rn_VPR128.16B[56,8] | Rm_VPR128.16B[56,8]; + Rd_VPR128.16B[64,8] = Rn_VPR128.16B[64,8] | Rm_VPR128.16B[64,8]; + Rd_VPR128.16B[72,8] = Rn_VPR128.16B[72,8] | Rm_VPR128.16B[72,8]; + Rd_VPR128.16B[80,8] = Rn_VPR128.16B[80,8] | Rm_VPR128.16B[80,8]; + Rd_VPR128.16B[88,8] = Rn_VPR128.16B[88,8] | Rm_VPR128.16B[88,8]; + Rd_VPR128.16B[96,8] = Rn_VPR128.16B[96,8] | Rm_VPR128.16B[96,8]; + Rd_VPR128.16B[104,8] = Rn_VPR128.16B[104,8] | Rm_VPR128.16B[104,8]; + Rd_VPR128.16B[112,8] = Rn_VPR128.16B[112,8] | Rm_VPR128.16B[112,8]; + Rd_VPR128.16B[120,8] = Rn_VPR128.16B[120,8] | Rm_VPR128.16B[120,8]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:16 = SIMD_INT_OR(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.16B -@elif defined(SEMANTIC_pseudo) - Rn_VPR128.16B = NEON_orr(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); -@endif } # C7.2.213 ORR (vector, register) page C7-1882 line 105515 MATCH x0ea01c00/mask=xbfe0fc00 @@ -20955,50 +12142,16 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.16 :orr Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.8B & b_1115=0x3 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { -@if defined(SEMANTIC_primitive) # simd infix Rd_VPR64.8B = Rn_VPR64.8B | Rm_VPR64.8B on lane size 1 - local tmp1:4 = 0; - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp1, Rn_VPR64.8B, 0, 1, 8); - simd_address_at(tmp2, Rm_VPR64.8B, 0, 1, 8); - simd_address_at(tmp3, Rd_VPR64.8B, 0, 1, 8); - * [register]:1 tmp3 = (* [register]:1 tmp1) | (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR64.8B, 1, 1, 8); - simd_address_at(tmp2, Rm_VPR64.8B, 1, 1, 8); - simd_address_at(tmp3, Rd_VPR64.8B, 1, 1, 8); - * [register]:1 tmp3 = (* [register]:1 tmp1) | (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR64.8B, 2, 1, 8); - simd_address_at(tmp2, Rm_VPR64.8B, 2, 1, 8); - simd_address_at(tmp3, Rd_VPR64.8B, 2, 1, 8); - * [register]:1 tmp3 = (* [register]:1 tmp1) | (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR64.8B, 3, 1, 8); - simd_address_at(tmp2, Rm_VPR64.8B, 3, 1, 8); - simd_address_at(tmp3, Rd_VPR64.8B, 3, 1, 8); - * [register]:1 tmp3 = (* [register]:1 tmp1) | (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR64.8B, 4, 1, 8); - simd_address_at(tmp2, Rm_VPR64.8B, 4, 1, 8); - simd_address_at(tmp3, Rd_VPR64.8B, 4, 1, 8); - * [register]:1 tmp3 = (* [register]:1 tmp1) | (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR64.8B, 5, 1, 8); - simd_address_at(tmp2, Rm_VPR64.8B, 5, 1, 8); - simd_address_at(tmp3, Rd_VPR64.8B, 5, 1, 8); - * [register]:1 tmp3 = (* [register]:1 tmp1) | (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR64.8B, 6, 1, 8); - simd_address_at(tmp2, Rm_VPR64.8B, 6, 1, 8); - simd_address_at(tmp3, Rd_VPR64.8B, 6, 1, 8); - * [register]:1 tmp3 = (* [register]:1 tmp1) | (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR64.8B, 7, 1, 8); - simd_address_at(tmp2, Rm_VPR64.8B, 7, 1, 8); - simd_address_at(tmp3, Rd_VPR64.8B, 7, 1, 8); - * [register]:1 tmp3 = (* [register]:1 tmp1) | (* [register]:1 tmp2); + Rd_VPR64.8B[0,8] = Rn_VPR64.8B[0,8] | Rm_VPR64.8B[0,8]; + Rd_VPR64.8B[8,8] = Rn_VPR64.8B[8,8] | Rm_VPR64.8B[8,8]; + Rd_VPR64.8B[16,8] = Rn_VPR64.8B[16,8] | Rm_VPR64.8B[16,8]; + Rd_VPR64.8B[24,8] = Rn_VPR64.8B[24,8] | Rm_VPR64.8B[24,8]; + Rd_VPR64.8B[32,8] = Rn_VPR64.8B[32,8] | Rm_VPR64.8B[32,8]; + Rd_VPR64.8B[40,8] = Rn_VPR64.8B[40,8] | Rm_VPR64.8B[40,8]; + Rd_VPR64.8B[48,8] = Rn_VPR64.8B[48,8] | Rm_VPR64.8B[48,8]; + Rd_VPR64.8B[56,8] = Rn_VPR64.8B[56,8] | Rm_VPR64.8B[56,8]; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:8 = SIMD_INT_OR(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.8B -@elif defined(SEMANTIC_pseudo) - Rn_VPR64.8B = NEON_orr(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); -@endif } # C7.2.214 PMUL page C7-1884 line 105605 MATCH x2e209c00/mask=xbf20fc00 @@ -21009,9 +12162,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.8B :pmul Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0x13 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.16B = NEON_pmul(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); -@endif } # C7.2.214 PMUL page C7-1884 line 105605 MATCH x2e209c00/mask=xbf20fc00 @@ -21022,9 +12173,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16 :pmul Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0x13 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.8B = NEON_pmul(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); -@endif } # C7.2.215 PMULL, PMULL2 page C7-1886 line 105707 MATCH x0e20e000/mask=xbf20fc00 @@ -21035,9 +12184,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B :pmull Rd_VPR128.1Q, Rn_VPR64.1D, Rm_VPR64.1D is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=3 & b_2121=1 & Rm_VPR64.1D & b_1215=0xe & b_1011=0 & Rn_VPR64.1D & Rd_VPR128.1Q & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.1Q = NEON_pmull(Rn_VPR64.1D, Rm_VPR64.1D, 8:1); -@endif } # C7.2.215 PMULL, PMULL2 page C7-1886 line 105707 MATCH x0e20e000/mask=xbf20fc00 @@ -21048,9 +12195,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=3 & b_2121=1 & Rm_VPR64.1D :pmull Rd_VPR128.8H, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1215=0xe & b_1011=0 & Rn_VPR64.8B & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.8H = NEON_pmull(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); -@endif } # C7.2.215 PMULL, PMULL2 page C7-1886 line 105707 MATCH x0e20e000/mask=xbf20fc00 @@ -21061,9 +12206,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B :pmull2 Rd_VPR128.1Q, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=3 & b_2121=1 & Rm_VPR128.2D & b_1215=0xe & b_1011=0 & Rn_VPR128.2D & Rd_VPR128.1Q & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.1Q = NEON_pmull2(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); -@endif } # C7.2.215 PMULL, PMULL2 page C7-1886 line 105707 MATCH x0e20e000/mask=xbf20fc00 @@ -21074,9 +12217,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=3 & b_2121=1 & Rm_VPR128.2D :pmull2 Rd_VPR128.8H, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1215=0xe & b_1011=0 & Rn_VPR128.16B & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.8H = NEON_pmull2(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); -@endif } # C7.2.216 RADDHN, RADDHN2 page C7-1888 line 105826 MATCH x2e204000/mask=xbf20fc00 @@ -21088,115 +12229,34 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16 :raddhn2 Rd_VPR128.16B, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.8H & b_1215=0x4 & b_1011=0 & Rn_VPR128.8H & Rd_VPR128.16B & Zd { -@if defined(SEMANTIC_primitive) # simd infix TMPQ1 = Rn_VPR128.8H + Rm_VPR128.8H on lane size 2 - local tmp2:4 = 0; - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp2, Rn_VPR128.8H, 0, 2, 16); - simd_address_at(tmp3, Rm_VPR128.8H, 0, 2, 16); - simd_address_at(tmp4, TMPQ1, 0, 2, 16); - * [register]:2 tmp4 = (* [register]:2 tmp2) + (* [register]:2 tmp3); - simd_address_at(tmp2, Rn_VPR128.8H, 1, 2, 16); - simd_address_at(tmp3, Rm_VPR128.8H, 1, 2, 16); - simd_address_at(tmp4, TMPQ1, 1, 2, 16); - * [register]:2 tmp4 = (* [register]:2 tmp2) + (* [register]:2 tmp3); - simd_address_at(tmp2, Rn_VPR128.8H, 2, 2, 16); - simd_address_at(tmp3, Rm_VPR128.8H, 2, 2, 16); - simd_address_at(tmp4, TMPQ1, 2, 2, 16); - * [register]:2 tmp4 = (* [register]:2 tmp2) + (* [register]:2 tmp3); - simd_address_at(tmp2, Rn_VPR128.8H, 3, 2, 16); - simd_address_at(tmp3, Rm_VPR128.8H, 3, 2, 16); - simd_address_at(tmp4, TMPQ1, 3, 2, 16); - * [register]:2 tmp4 = (* [register]:2 tmp2) + (* [register]:2 tmp3); - simd_address_at(tmp2, Rn_VPR128.8H, 4, 2, 16); - simd_address_at(tmp3, Rm_VPR128.8H, 4, 2, 16); - simd_address_at(tmp4, TMPQ1, 4, 2, 16); - * [register]:2 tmp4 = (* [register]:2 tmp2) + (* [register]:2 tmp3); - simd_address_at(tmp2, Rn_VPR128.8H, 5, 2, 16); - simd_address_at(tmp3, Rm_VPR128.8H, 5, 2, 16); - simd_address_at(tmp4, TMPQ1, 5, 2, 16); - * [register]:2 tmp4 = (* [register]:2 tmp2) + (* [register]:2 tmp3); - simd_address_at(tmp2, Rn_VPR128.8H, 6, 2, 16); - simd_address_at(tmp3, Rm_VPR128.8H, 6, 2, 16); - simd_address_at(tmp4, TMPQ1, 6, 2, 16); - * [register]:2 tmp4 = (* [register]:2 tmp2) + (* [register]:2 tmp3); - simd_address_at(tmp2, Rn_VPR128.8H, 7, 2, 16); - simd_address_at(tmp3, Rm_VPR128.8H, 7, 2, 16); - simd_address_at(tmp4, TMPQ1, 7, 2, 16); - * [register]:2 tmp4 = (* [register]:2 tmp2) + (* [register]:2 tmp3); + TMPQ1[0,16] = Rn_VPR128.8H[0,16] + Rm_VPR128.8H[0,16]; + TMPQ1[16,16] = Rn_VPR128.8H[16,16] + Rm_VPR128.8H[16,16]; + TMPQ1[32,16] = Rn_VPR128.8H[32,16] + Rm_VPR128.8H[32,16]; + TMPQ1[48,16] = Rn_VPR128.8H[48,16] + Rm_VPR128.8H[48,16]; + TMPQ1[64,16] = Rn_VPR128.8H[64,16] + Rm_VPR128.8H[64,16]; + TMPQ1[80,16] = Rn_VPR128.8H[80,16] + Rm_VPR128.8H[80,16]; + TMPQ1[96,16] = Rn_VPR128.8H[96,16] + Rm_VPR128.8H[96,16]; + TMPQ1[112,16] = Rn_VPR128.8H[112,16] + Rm_VPR128.8H[112,16]; # simd infix TMPQ1 = TMPQ1 + 0x80:2 on lane size 2 - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, TMPQ1, 0, 2, 16); - simd_address_at(tmp6, TMPQ1, 0, 2, 16); - * [register]:2 tmp6 = (* [register]:2 tmp5) + 0x80:2; - simd_address_at(tmp5, TMPQ1, 1, 2, 16); - simd_address_at(tmp6, TMPQ1, 1, 2, 16); - * [register]:2 tmp6 = (* [register]:2 tmp5) + 0x80:2; - simd_address_at(tmp5, TMPQ1, 2, 2, 16); - simd_address_at(tmp6, TMPQ1, 2, 2, 16); - * [register]:2 tmp6 = (* [register]:2 tmp5) + 0x80:2; - simd_address_at(tmp5, TMPQ1, 3, 2, 16); - simd_address_at(tmp6, TMPQ1, 3, 2, 16); - * [register]:2 tmp6 = (* [register]:2 tmp5) + 0x80:2; - simd_address_at(tmp5, TMPQ1, 4, 2, 16); - simd_address_at(tmp6, TMPQ1, 4, 2, 16); - * [register]:2 tmp6 = (* [register]:2 tmp5) + 0x80:2; - simd_address_at(tmp5, TMPQ1, 5, 2, 16); - simd_address_at(tmp6, TMPQ1, 5, 2, 16); - * [register]:2 tmp6 = (* [register]:2 tmp5) + 0x80:2; - simd_address_at(tmp5, TMPQ1, 6, 2, 16); - simd_address_at(tmp6, TMPQ1, 6, 2, 16); - * [register]:2 tmp6 = (* [register]:2 tmp5) + 0x80:2; - simd_address_at(tmp5, TMPQ1, 7, 2, 16); - simd_address_at(tmp6, TMPQ1, 7, 2, 16); - * [register]:2 tmp6 = (* [register]:2 tmp5) + 0x80:2; + TMPQ1[0,16] = TMPQ1[0,16] + 0x80:2; + TMPQ1[16,16] = TMPQ1[16,16] + 0x80:2; + TMPQ1[32,16] = TMPQ1[32,16] + 0x80:2; + TMPQ1[48,16] = TMPQ1[48,16] + 0x80:2; + TMPQ1[64,16] = TMPQ1[64,16] + 0x80:2; + TMPQ1[80,16] = TMPQ1[80,16] + 0x80:2; + TMPQ1[96,16] = TMPQ1[96,16] + 0x80:2; + TMPQ1[112,16] = TMPQ1[112,16] + 0x80:2; # simd shuffle Rd_VPR128.16B = TMPQ1 (@1-8@3-9@5-10@7-11@9-12@11-13@13-14@15-15) lane size 1 - local tmp7:4 = 0; - local tmp8:4 = 0; - simd_address_at(tmp7, TMPQ1, 1, 1, 16); - simd_address_at(tmp8, Rd_VPR128.16B, 8, 1, 16); - * [register]:1 tmp8 = * [register]:1 tmp7; - simd_address_at(tmp7, TMPQ1, 3, 1, 16); - simd_address_at(tmp8, Rd_VPR128.16B, 9, 1, 16); - * [register]:1 tmp8 = * [register]:1 tmp7; - simd_address_at(tmp7, TMPQ1, 5, 1, 16); - simd_address_at(tmp8, Rd_VPR128.16B, 10, 1, 16); - * [register]:1 tmp8 = * [register]:1 tmp7; - simd_address_at(tmp7, TMPQ1, 7, 1, 16); - simd_address_at(tmp8, Rd_VPR128.16B, 11, 1, 16); - * [register]:1 tmp8 = * [register]:1 tmp7; - simd_address_at(tmp7, TMPQ1, 9, 1, 16); - simd_address_at(tmp8, Rd_VPR128.16B, 12, 1, 16); - * [register]:1 tmp8 = * [register]:1 tmp7; - simd_address_at(tmp7, TMPQ1, 11, 1, 16); - simd_address_at(tmp8, Rd_VPR128.16B, 13, 1, 16); - * [register]:1 tmp8 = * [register]:1 tmp7; - simd_address_at(tmp7, TMPQ1, 13, 1, 16); - simd_address_at(tmp8, Rd_VPR128.16B, 14, 1, 16); - * [register]:1 tmp8 = * [register]:1 tmp7; - simd_address_at(tmp7, TMPQ1, 15, 1, 16); - simd_address_at(tmp8, Rd_VPR128.16B, 15, 1, 16); - * [register]:1 tmp8 = * [register]:1 tmp7; + Rd_VPR128.16B[64,8] = TMPQ1[8,8]; + Rd_VPR128.16B[72,8] = TMPQ1[24,8]; + Rd_VPR128.16B[80,8] = TMPQ1[40,8]; + Rd_VPR128.16B[88,8] = TMPQ1[56,8]; + Rd_VPR128.16B[96,8] = TMPQ1[72,8]; + Rd_VPR128.16B[104,8] = TMPQ1[88,8]; + Rd_VPR128.16B[112,8] = TMPQ1[104,8]; + Rd_VPR128.16B[120,8] = TMPQ1[120,8]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_ADD(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); - tmp1 = SIMD_INT_ADD(tmp1, 0x80:2, 2:1); - local tmp2:1 = 0; - local tmpd:16 = Rd_VPR128.16B; - tmp2 = SIMD_PIECE(tmp1, 1:1); tmpd = SIMD_COPY(tmpd, tmp2, 8:1); - tmp2 = SIMD_PIECE(tmp1, 3:1); tmpd = SIMD_COPY(tmpd, tmp2, 9:1); - tmp2 = SIMD_PIECE(tmp1, 5:1); tmpd = SIMD_COPY(tmpd, tmp2, 10:1); - tmp2 = SIMD_PIECE(tmp1, 7:1); tmpd = SIMD_COPY(tmpd, tmp2, 11:1); - tmp2 = SIMD_PIECE(tmp1, 9:1); tmpd = SIMD_COPY(tmpd, tmp2, 12:1); - tmp2 = SIMD_PIECE(tmp1, 11:1); tmpd = SIMD_COPY(tmpd, tmp2, 13:1); - tmp2 = SIMD_PIECE(tmp1, 13:1); tmpd = SIMD_COPY(tmpd, tmp2, 14:1); - tmp2 = SIMD_PIECE(tmp1, 15:1); tmpd = SIMD_COPY(tmpd, tmp2, 15:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.16B -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.16B = NEON_raddhn2(Rd_VPR128.16B, Rn_VPR128.8H, Rm_VPR128.8H, 2:1); -@endif } # C7.2.216 RADDHN, RADDHN2 page C7-1888 line 105826 MATCH x2e204000/mask=xbf20fc00 @@ -21208,49 +12268,16 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.8H :raddhn2 Rd_VPR128.4S, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.2D & b_1215=0x4 & b_1011=0 & Rn_VPR128.2D & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) # simd infix TMPQ1 = Rn_VPR128.2D + Rm_VPR128.2D on lane size 8 - local tmp2:4 = 0; - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp2, Rn_VPR128.2D, 0, 8, 16); - simd_address_at(tmp3, Rm_VPR128.2D, 0, 8, 16); - simd_address_at(tmp4, TMPQ1, 0, 8, 16); - * [register]:8 tmp4 = (* [register]:8 tmp2) + (* [register]:8 tmp3); - simd_address_at(tmp2, Rn_VPR128.2D, 1, 8, 16); - simd_address_at(tmp3, Rm_VPR128.2D, 1, 8, 16); - simd_address_at(tmp4, TMPQ1, 1, 8, 16); - * [register]:8 tmp4 = (* [register]:8 tmp2) + (* [register]:8 tmp3); + TMPQ1[0,64] = Rn_VPR128.2D[0,64] + Rm_VPR128.2D[0,64]; + TMPQ1[64,64] = Rn_VPR128.2D[64,64] + Rm_VPR128.2D[64,64]; # simd infix TMPQ1 = TMPQ1 + 0x80000000:8 on lane size 8 - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, TMPQ1, 0, 8, 16); - simd_address_at(tmp6, TMPQ1, 0, 8, 16); - * [register]:8 tmp6 = (* [register]:8 tmp5) + 0x80000000:8; - simd_address_at(tmp5, TMPQ1, 1, 8, 16); - simd_address_at(tmp6, TMPQ1, 1, 8, 16); - * [register]:8 tmp6 = (* [register]:8 tmp5) + 0x80000000:8; + TMPQ1[0,64] = TMPQ1[0,64] + 0x80000000:8; + TMPQ1[64,64] = TMPQ1[64,64] + 0x80000000:8; # simd shuffle Rd_VPR128.4S = TMPQ1 (@1-2@3-3) lane size 4 - local tmp7:4 = 0; - local tmp8:4 = 0; - simd_address_at(tmp7, TMPQ1, 1, 4, 16); - simd_address_at(tmp8, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp8 = * [register]:4 tmp7; - simd_address_at(tmp7, TMPQ1, 3, 4, 16); - simd_address_at(tmp8, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp8 = * [register]:4 tmp7; + Rd_VPR128.4S[64,32] = TMPQ1[32,32]; + Rd_VPR128.4S[96,32] = TMPQ1[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_ADD(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); - tmp1 = SIMD_INT_ADD(tmp1, 0x80000000:8, 8:1); - local tmp2:4 = 0; - local tmpd:16 = Rd_VPR128.4S; - tmp2 = SIMD_PIECE(tmp1, 1:1); tmpd = SIMD_COPY(tmpd, tmp2, 2:1); - tmp2 = SIMD_PIECE(tmp1, 3:1); tmpd = SIMD_COPY(tmpd, tmp2, 3:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_raddhn2(Rd_VPR128.4S, Rn_VPR128.2D, Rm_VPR128.2D, 8:1); -@endif } # C7.2.216 RADDHN, RADDHN2 page C7-1888 line 105826 MATCH x2e204000/mask=xbf20fc00 @@ -21262,71 +12289,22 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.2D :raddhn2 Rd_VPR128.8H, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.4S & b_1215=0x4 & b_1011=0 & Rn_VPR128.4S & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_primitive) # simd infix TMPQ1 = Rn_VPR128.4S + Rm_VPR128.4S on lane size 4 - local tmp2:4 = 0; - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp2, Rn_VPR128.4S, 0, 4, 16); - simd_address_at(tmp3, Rm_VPR128.4S, 0, 4, 16); - simd_address_at(tmp4, TMPQ1, 0, 4, 16); - * [register]:4 tmp4 = (* [register]:4 tmp2) + (* [register]:4 tmp3); - simd_address_at(tmp2, Rn_VPR128.4S, 1, 4, 16); - simd_address_at(tmp3, Rm_VPR128.4S, 1, 4, 16); - simd_address_at(tmp4, TMPQ1, 1, 4, 16); - * [register]:4 tmp4 = (* [register]:4 tmp2) + (* [register]:4 tmp3); - simd_address_at(tmp2, Rn_VPR128.4S, 2, 4, 16); - simd_address_at(tmp3, Rm_VPR128.4S, 2, 4, 16); - simd_address_at(tmp4, TMPQ1, 2, 4, 16); - * [register]:4 tmp4 = (* [register]:4 tmp2) + (* [register]:4 tmp3); - simd_address_at(tmp2, Rn_VPR128.4S, 3, 4, 16); - simd_address_at(tmp3, Rm_VPR128.4S, 3, 4, 16); - simd_address_at(tmp4, TMPQ1, 3, 4, 16); - * [register]:4 tmp4 = (* [register]:4 tmp2) + (* [register]:4 tmp3); + TMPQ1[0,32] = Rn_VPR128.4S[0,32] + Rm_VPR128.4S[0,32]; + TMPQ1[32,32] = Rn_VPR128.4S[32,32] + Rm_VPR128.4S[32,32]; + TMPQ1[64,32] = Rn_VPR128.4S[64,32] + Rm_VPR128.4S[64,32]; + TMPQ1[96,32] = Rn_VPR128.4S[96,32] + Rm_VPR128.4S[96,32]; # simd infix TMPQ1 = TMPQ1 + 0x8000:4 on lane size 4 - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, TMPQ1, 0, 4, 16); - simd_address_at(tmp6, TMPQ1, 0, 4, 16); - * [register]:4 tmp6 = (* [register]:4 tmp5) + 0x8000:4; - simd_address_at(tmp5, TMPQ1, 1, 4, 16); - simd_address_at(tmp6, TMPQ1, 1, 4, 16); - * [register]:4 tmp6 = (* [register]:4 tmp5) + 0x8000:4; - simd_address_at(tmp5, TMPQ1, 2, 4, 16); - simd_address_at(tmp6, TMPQ1, 2, 4, 16); - * [register]:4 tmp6 = (* [register]:4 tmp5) + 0x8000:4; - simd_address_at(tmp5, TMPQ1, 3, 4, 16); - simd_address_at(tmp6, TMPQ1, 3, 4, 16); - * [register]:4 tmp6 = (* [register]:4 tmp5) + 0x8000:4; + TMPQ1[0,32] = TMPQ1[0,32] + 0x8000:4; + TMPQ1[32,32] = TMPQ1[32,32] + 0x8000:4; + TMPQ1[64,32] = TMPQ1[64,32] + 0x8000:4; + TMPQ1[96,32] = TMPQ1[96,32] + 0x8000:4; # simd shuffle Rd_VPR128.8H = TMPQ1 (@1-4@3-5@5-6@7-7) lane size 2 - local tmp7:4 = 0; - local tmp8:4 = 0; - simd_address_at(tmp7, TMPQ1, 1, 2, 16); - simd_address_at(tmp8, Rd_VPR128.8H, 4, 2, 16); - * [register]:2 tmp8 = * [register]:2 tmp7; - simd_address_at(tmp7, TMPQ1, 3, 2, 16); - simd_address_at(tmp8, Rd_VPR128.8H, 5, 2, 16); - * [register]:2 tmp8 = * [register]:2 tmp7; - simd_address_at(tmp7, TMPQ1, 5, 2, 16); - simd_address_at(tmp8, Rd_VPR128.8H, 6, 2, 16); - * [register]:2 tmp8 = * [register]:2 tmp7; - simd_address_at(tmp7, TMPQ1, 7, 2, 16); - simd_address_at(tmp8, Rd_VPR128.8H, 7, 2, 16); - * [register]:2 tmp8 = * [register]:2 tmp7; + Rd_VPR128.8H[64,16] = TMPQ1[16,16]; + Rd_VPR128.8H[80,16] = TMPQ1[48,16]; + Rd_VPR128.8H[96,16] = TMPQ1[80,16]; + Rd_VPR128.8H[112,16] = TMPQ1[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_ADD(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); - tmp1 = SIMD_INT_ADD(tmp1, 0x8000:4, 4:1); - local tmp2:2 = 0; - local tmpd:16 = Rd_VPR128.8H; - tmp2 = SIMD_PIECE(tmp1, 1:1); tmpd = SIMD_COPY(tmpd, tmp2, 4:1); - tmp2 = SIMD_PIECE(tmp1, 3:1); tmpd = SIMD_COPY(tmpd, tmp2, 5:1); - tmp2 = SIMD_PIECE(tmp1, 5:1); tmpd = SIMD_COPY(tmpd, tmp2, 6:1); - tmp2 = SIMD_PIECE(tmp1, 7:1); tmpd = SIMD_COPY(tmpd, tmp2, 7:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.8H -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.8H = NEON_raddhn2(Rd_VPR128.8H, Rn_VPR128.4S, Rm_VPR128.4S, 4:1); -@endif } # C7.2.216 RADDHN, RADDHN2 page C7-1888 line 105826 MATCH x2e204000/mask=xbf20fc00 @@ -21337,9 +12315,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.4S :raddhn Rd_VPR64.2S, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.2D & b_1215=0x4 & b_1011=0 & Rn_VPR128.2D & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.2S = NEON_raddhn(Rd_VPR64.2S, Rn_VPR128.2D, Rm_VPR128.2D, 8:1); -@endif } # C7.2.216 RADDHN, RADDHN2 page C7-1888 line 105826 MATCH x2e204000/mask=xbf20fc00 @@ -21350,9 +12326,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.2D :raddhn Rd_VPR64.4H, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.4S & b_1215=0x4 & b_1011=0 & Rn_VPR128.4S & Rd_VPR64.4H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.4H = NEON_raddhn(Rd_VPR64.4H, Rn_VPR128.4S, Rm_VPR128.4S, 4:1); -@endif } # C7.2.216 RADDHN, RADDHN2 page C7-1888 line 105826 MATCH x2e204000/mask=xbf20fc00 @@ -21363,9 +12337,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.4S :raddhn Rd_VPR64.8B, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.8H & b_1215=0x4 & b_1011=0 & Rn_VPR128.8H & Rd_VPR64.8B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.8B = NEON_raddhn(Rd_VPR64.8B, Rn_VPR128.8H, Rm_VPR128.8H, 2:1); -@endif } # C7.2.217 RAX1 page C7-1890 line 105949 MATCH xce608c00/mask=xffe0fc00 @@ -21377,36 +12349,13 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.8H :rax1 Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D is b_2131=0b11001110011 & b_1015=0b100011 & Rd_VPR128.2D & Rn_VPR128.2D & Rm_VPR128.2D & Zd { -@if defined(SEMANTIC_primitive) # simd infix TMPQ1 = Rm_VPR128.2D << 1:8 on lane size 8 - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rm_VPR128.2D, 0, 8, 16); - simd_address_at(tmp3, TMPQ1, 0, 8, 16); - * [register]:8 tmp3 = (* [register]:8 tmp2) << 1:8; - simd_address_at(tmp2, Rm_VPR128.2D, 1, 8, 16); - simd_address_at(tmp3, TMPQ1, 1, 8, 16); - * [register]:8 tmp3 = (* [register]:8 tmp2) << 1:8; + TMPQ1[0,64] = Rm_VPR128.2D[0,64] << 1:8; + TMPQ1[64,64] = Rm_VPR128.2D[64,64] << 1:8; # simd infix Rd_VPR128.2D = Rn_VPR128.2D | TMPQ1 on lane size 8 - local tmp4:4 = 0; - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp4, Rn_VPR128.2D, 0, 8, 16); - simd_address_at(tmp5, TMPQ1, 0, 8, 16); - simd_address_at(tmp6, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp6 = (* [register]:8 tmp4) | (* [register]:8 tmp5); - simd_address_at(tmp4, Rn_VPR128.2D, 1, 8, 16); - simd_address_at(tmp5, TMPQ1, 1, 8, 16); - simd_address_at(tmp6, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp6 = (* [register]:8 tmp4) | (* [register]:8 tmp5); + Rd_VPR128.2D[0,64] = Rn_VPR128.2D[0,64] | TMPQ1[0,64]; + Rd_VPR128.2D[64,64] = Rn_VPR128.2D[64,64] | TMPQ1[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_LEFT(Rm_VPR128.2D, 1:8, 8:1); - local tmpd:16 = SIMD_INT_OR(Rn_VPR128.2D, tmp1, 8:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.2D = NEON_rax1(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); -@endif } # C7.2.218 RBIT (vector) page C7-1891 line 106016 MATCH x2e605800/mask=xbffffc00 @@ -21417,9 +12366,7 @@ is b_2131=0b11001110011 & b_1015=0b100011 & Rd_VPR128.2D & Rn_VPR128.2D & Rm_VPR :rbit Rd_VPR64.8B, Rn_VPR64.8B is b_31=0 & b_30=0 & b_1029=0b10111001100000010110 & Rd_VPR64.8B & Rn_VPR64.8B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.8B = NEON_rbit(Rn_VPR64.8B, 1:1); -@endif } # C7.2.218 RBIT (vector) page C7-1891 line 106016 MATCH x2e605800/mask=xbffffc00 @@ -21430,9 +12377,7 @@ is b_31=0 & b_30=0 & b_1029=0b10111001100000010110 & Rd_VPR64.8B & Rn_VPR64.8B & :rbit Rd_VPR128.16B, Rn_VPR128.16B is b_31=0 & b_30=1 & b_1029=0b10111001100000010110 & Rd_VPR128.16B & Rn_VPR128.16B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.16B = NEON_rbit(Rn_VPR128.16B, 1:1); -@endif } # C7.2.219 REV16 (vector) page C7-1893 line 106101 MATCH x0e201800/mask=xbf3ffc00 @@ -21443,9 +12388,7 @@ is b_31=0 & b_30=1 & b_1029=0b10111001100000010110 & Rd_VPR128.16B & Rn_VPR128.1 :rev16 Rd_VPR128.16B, Rn_VPR128.16B is b_3131=0 & Q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x1 & b_1011=2 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.16B = NEON_rev16(Rn_VPR128.16B, 1:1); -@endif } # C7.2.219 REV16 (vector) page C7-1893 line 106101 MATCH x0e201800/mask=xbf3ffc00 @@ -21456,9 +12399,7 @@ is b_3131=0 & Q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x :rev16 Rd_VPR64.8B, Rn_VPR64.8B is b_3131=0 & Q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x1 & b_1011=2 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.8B = NEON_rev16(Rn_VPR64.8B, 1:1); -@endif } # C7.2.220 REV32 (vector) page C7-1895 line 106218 MATCH x2e200800/mask=xbf3ffc00 @@ -21469,9 +12410,7 @@ is b_3131=0 & Q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x :rev32 Rd_VPR128.16B, Rn_VPR128.16B is b_3131=0 & Q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x0 & b_1011=2 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.16B = NEON_rev32(Rn_VPR128.16B, 1:1); -@endif } # C7.2.220 REV32 (vector) page C7-1895 line 106218 MATCH x2e200800/mask=xbf3ffc00 @@ -21482,9 +12421,7 @@ is b_3131=0 & Q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x :rev32 Rd_VPR64.4H, Rn_VPR64.4H is b_3131=0 & Q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=0x0 & b_1011=2 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.4H = NEON_rev32(Rn_VPR64.4H, 2:1); -@endif } # C7.2.220 REV32 (vector) page C7-1895 line 106218 MATCH x2e200800/mask=xbf3ffc00 @@ -21495,9 +12432,7 @@ is b_3131=0 & Q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=0x :rev32 Rd_VPR64.8B, Rn_VPR64.8B is b_3131=0 & Q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x0 & b_1011=2 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.8B = NEON_rev32(Rn_VPR64.8B, 1:1); -@endif } # C7.2.220 REV32 (vector) page C7-1895 line 106218 MATCH x2e200800/mask=xbf3ffc00 @@ -21508,9 +12443,7 @@ is b_3131=0 & Q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x :rev32 Rd_VPR128.8H, Rn_VPR128.8H is b_3131=0 & Q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=0x0 & b_1011=2 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.8H = NEON_rev32(Rn_VPR128.8H, 2:1); -@endif } # C7.2.221 REV64 page C7-1897 line 106333 MATCH x0e200800/mask=xbf3ffc00 @@ -21521,9 +12454,7 @@ is b_3131=0 & Q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=0x :rev64 Rd_VPR128.16B, Rn_VPR128.16B is b_3131=0 & Q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x0 & b_1011=2 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.16B = NEON_rev64(Rn_VPR128.16B, 1:1); -@endif } # C7.2.221 REV64 page C7-1897 line 106333 MATCH x0e200800/mask=xbf3ffc00 @@ -21534,9 +12465,7 @@ is b_3131=0 & Q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x :rev64 Rd_VPR64.2S, Rn_VPR64.2S is b_3131=0 & Q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x10 & b_1216=0x0 & b_1011=2 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.2S = NEON_rev64(Rn_VPR64.2S, 4:1); -@endif } # C7.2.221 REV64 page C7-1897 line 106333 MATCH x0e200800/mask=xbf3ffc00 @@ -21547,9 +12476,7 @@ is b_3131=0 & Q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x10 & b_1216=0x :rev64 Rd_VPR64.4H, Rn_VPR64.4H is b_3131=0 & Q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=0x0 & b_1011=2 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.4H = NEON_rev64(Rn_VPR64.4H, 2:1); -@endif } # C7.2.221 REV64 page C7-1897 line 106333 MATCH x0e200800/mask=xbf3ffc00 @@ -21560,9 +12487,7 @@ is b_3131=0 & Q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=0x :rev64 Rd_VPR128.4S, Rn_VPR128.4S is b_3131=0 & Q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x10 & b_1216=0x0 & b_1011=2 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_rev64(Rn_VPR128.4S, 4:1); -@endif } # C7.2.221 REV64 page C7-1897 line 106333 MATCH x0e200800/mask=xbf3ffc00 @@ -21573,9 +12498,7 @@ is b_3131=0 & Q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x10 & b_1216=0x :rev64 Rd_VPR64.8B, Rn_VPR64.8B is b_3131=0 & Q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x0 & b_1011=2 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.8B = NEON_rev64(Rn_VPR64.8B, 1:1); -@endif } # C7.2.221 REV64 page C7-1897 line 106333 MATCH x0e200800/mask=xbf3ffc00 @@ -21586,9 +12509,7 @@ is b_3131=0 & Q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x :rev64 Rd_VPR128.8H, Rn_VPR128.8H is b_3131=0 & Q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=0x0 & b_1011=2 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.8H = NEON_rev64(Rn_VPR128.8H, 2:1); -@endif } # C7.2.222 RSHRN, RSHRN2 page C7-1899 line 106450 MATCH x0f008c00/mask=xbf80fc00 @@ -21599,9 +12520,7 @@ is b_3131=0 & Q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=0x :rshrn2 Rd_VPR128.16B, Rn_VPR128.8H, Imm_shr_imm8 is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x11 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.16B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.16B = NEON_rshrn2(Rd_VPR128.16B, Rn_VPR128.8H, Imm_shr_imm8:1, 2:1); -@endif } # C7.2.222 RSHRN, RSHRN2 page C7-1899 line 106450 MATCH x0f008c00/mask=xbf80fc00 @@ -21612,9 +12531,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x11 & :rshrn Rd_VPR64.2S, Rn_VPR128.2D, Imm_shr_imm32 is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x11 & b_1010=1 & Rn_VPR128.2D & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.2S = NEON_rshrn(Rd_VPR64.2S, Rn_VPR128.2D, Imm_shr_imm32:1, 8:1); -@endif } # C7.2.222 RSHRN, RSHRN2 page C7-1899 line 106450 MATCH x0f008c00/mask=xbf80fc00 @@ -21625,9 +12542,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x11 & :rshrn Rd_VPR64.4H, Rn_VPR128.4S, Imm_shr_imm16 is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x11 & b_1010=1 & Rn_VPR128.4S & Rd_VPR64.4H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.4H = NEON_rshrn(Rd_VPR64.4H, Rn_VPR128.4S, Imm_shr_imm16:1, 4:1); -@endif } # C7.2.222 RSHRN, RSHRN2 page C7-1899 line 106450 MATCH x0f008c00/mask=xbf80fc00 @@ -21638,9 +12553,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x11 :rshrn2 Rd_VPR128.4S, Rn_VPR128.2D, Imm_shr_imm32 is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x11 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_rshrn2(Rd_VPR128.4S, Rn_VPR128.2D, Imm_shr_imm32:1, 8:1); -@endif } # C7.2.222 RSHRN, RSHRN2 page C7-1899 line 106450 MATCH x0f008c00/mask=xbf80fc00 @@ -21651,9 +12564,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x11 & :rshrn Rd_VPR64.8B, Rn_VPR128.8H, Imm_shr_imm8 is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x11 & b_1010=1 & Rn_VPR128.8H & Rd_VPR64.8B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.8B = NEON_rshrn(Rd_VPR64.8B, Rn_VPR128.8H, Imm_shr_imm8:1, 2:1); -@endif } # C7.2.222 RSHRN, RSHRN2 page C7-1899 line 106450 MATCH x0f008c00/mask=xbf80fc00 @@ -21664,9 +12575,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x11 & :rshrn2 Rd_VPR128.8H, Rn_VPR128.4S, Imm_shr_imm16 is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x11 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.8H = NEON_rshrn2(Rd_VPR128.8H, Rn_VPR128.4S, Imm_shr_imm16:1, 4:1); -@endif } # C7.2.223 RSUBHN, RSUBHN2 page C7-1901 line 106573 MATCH x2e206000/mask=xbf20fc00 @@ -21677,9 +12586,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x11 :rsubhn2 Rd_VPR128.16B, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.8H & b_1215=0x6 & b_1011=0 & Rn_VPR128.8H & Rd_VPR128.16B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.16B = NEON_rsubhn2(Rd_VPR128.16B, Rn_VPR128.8H, Rm_VPR128.8H, 2:1); -@endif } # C7.2.223 RSUBHN, RSUBHN2 page C7-1901 line 106573 MATCH x2e206000/mask=xbf20fc00 @@ -21690,9 +12597,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.8H :rsubhn2 Rd_VPR128.4S, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.2D & b_1215=0x6 & b_1011=0 & Rn_VPR128.2D & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_rsubhn2(Rd_VPR128.4S, Rn_VPR128.2D, Rm_VPR128.2D, 8:1); -@endif } # C7.2.223 RSUBHN, RSUBHN2 page C7-1901 line 106573 MATCH x2e206000/mask=xbf20fc00 @@ -21703,9 +12608,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.2D :rsubhn2 Rd_VPR128.8H, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.4S & b_1215=0x6 & b_1011=0 & Rn_VPR128.4S & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.8H = NEON_rsubhn2(Rd_VPR128.8H, Rn_VPR128.4S, Rm_VPR128.4S, 4:1); -@endif } # C7.2.223 RSUBHN, RSUBHN2 page C7-1901 line 106573 MATCH x2e206000/mask=xbf20fc00 @@ -21716,9 +12619,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.4S :rsubhn Rd_VPR64.2S, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.2D & b_1215=0x6 & b_1011=0 & Rn_VPR128.2D & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.2S = NEON_rsubhn(Rd_VPR64.2S, Rn_VPR128.2D, Rm_VPR128.2D, 8:1); -@endif } # C7.2.223 RSUBHN, RSUBHN2 page C7-1901 line 106573 MATCH x2e206000/mask=xbf20fc00 @@ -21729,9 +12630,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.2D :rsubhn Rd_VPR64.4H, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.4S & b_1215=0x6 & b_1011=0 & Rn_VPR128.4S & Rd_VPR64.4H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.4H = NEON_rsubhn(Rd_VPR64.4H, Rn_VPR128.4S, Rm_VPR128.4S, 4:1); -@endif } # C7.2.223 RSUBHN, RSUBHN2 page C7-1901 line 106573 MATCH x2e206000/mask=xbf20fc00 @@ -21742,9 +12641,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.4S :rsubhn Rd_VPR64.8B, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.8H & b_1215=0x6 & b_1011=0 & Rn_VPR128.8H & Rd_VPR64.8B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.8B = NEON_rsubhn(Rd_VPR64.8B, Rn_VPR128.8H, Rm_VPR128.8H, 2:1); -@endif } # C7.2.224 SABA page C7-1903 line 106699 MATCH x0e207c00/mask=xbf20fc00 @@ -21755,9 +12652,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.8H :saba Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0xf & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.16B = NEON_saba(Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B, 1:1); -@endif } # C7.2.224 SABA page C7-1903 line 106699 MATCH x0e207c00/mask=xbf20fc00 @@ -21768,9 +12663,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16 :saba Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0xf & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.2S = NEON_saba(Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S, 4:1); -@endif } # C7.2.224 SABA page C7-1903 line 106699 MATCH x0e207c00/mask=xbf20fc00 @@ -21781,9 +12674,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S :saba Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0xf & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.4H = NEON_saba(Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H, 2:1); -@endif } # C7.2.224 SABA page C7-1903 line 106699 MATCH x0e207c00/mask=xbf20fc00 @@ -21794,9 +12685,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H :saba Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0xf & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_saba(Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S, 4:1); -@endif } # C7.2.224 SABA page C7-1903 line 106699 MATCH x0e207c00/mask=xbf20fc00 @@ -21807,9 +12696,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S :saba Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0xf & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.8B = NEON_saba(Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B, 1:1); -@endif } # C7.2.224 SABA page C7-1903 line 106699 MATCH x0e207c00/mask=xbf20fc00 @@ -21820,9 +12707,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B :saba Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0xf & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.8H = NEON_saba(Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H, 2:1); -@endif } # C7.2.225 SABAL, SABAL2 page C7-1905 line 106799 MATCH x0e205000/mask=xbf20fc00 @@ -21834,69 +12719,22 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H :sabal Rd_VPR128.2D, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1215=0x5 & b_1011=0 & Rn_VPR64.2S & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_primitive) # simd resize TMPQ1 = sext(Rn_VPR64.2S) (lane size 4 to 8) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR64.2S, 0, 4, 8); - simd_address_at(tmp3, TMPQ1, 0, 8, 16); - * [register]:8 tmp3 = sext(* [register]:4 tmp2); - simd_address_at(tmp2, Rn_VPR64.2S, 1, 4, 8); - simd_address_at(tmp3, TMPQ1, 1, 8, 16); - * [register]:8 tmp3 = sext(* [register]:4 tmp2); + TMPQ1[0,64] = sext(Rn_VPR64.2S[0,32]); + TMPQ1[64,64] = sext(Rn_VPR64.2S[32,32]); # simd resize TMPQ2 = sext(Rm_VPR64.2S) (lane size 4 to 8) - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, Rm_VPR64.2S, 0, 4, 8); - simd_address_at(tmp6, TMPQ2, 0, 8, 16); - * [register]:8 tmp6 = sext(* [register]:4 tmp5); - simd_address_at(tmp5, Rm_VPR64.2S, 1, 4, 8); - simd_address_at(tmp6, TMPQ2, 1, 8, 16); - * [register]:8 tmp6 = sext(* [register]:4 tmp5); + TMPQ2[0,64] = sext(Rm_VPR64.2S[0,32]); + TMPQ2[64,64] = sext(Rm_VPR64.2S[32,32]); # simd infix TMPQ3 = TMPQ1 - TMPQ2 on lane size 8 - local tmp8:4 = 0; - local tmp9:4 = 0; - local tmp10:4 = 0; - simd_address_at(tmp8, TMPQ1, 0, 8, 16); - simd_address_at(tmp9, TMPQ2, 0, 8, 16); - simd_address_at(tmp10, TMPQ3, 0, 8, 16); - * [register]:8 tmp10 = (* [register]:8 tmp8) - (* [register]:8 tmp9); - simd_address_at(tmp8, TMPQ1, 1, 8, 16); - simd_address_at(tmp9, TMPQ2, 1, 8, 16); - simd_address_at(tmp10, TMPQ3, 1, 8, 16); - * [register]:8 tmp10 = (* [register]:8 tmp8) - (* [register]:8 tmp9); + TMPQ3[0,64] = TMPQ1[0,64] - TMPQ2[0,64]; + TMPQ3[64,64] = TMPQ1[64,64] - TMPQ2[64,64]; # simd unary TMPQ4 = MP_INT_ABS(TMPQ3) on lane size 8 - local tmp12:4 = 0; - local tmp13:4 = 0; - simd_address_at(tmp12, TMPQ3, 0, 8, 16); - simd_address_at(tmp13, TMPQ4, 0, 8, 16); - * [register]:8 tmp13 = MP_INT_ABS(* [register]:8 tmp12); - simd_address_at(tmp12, TMPQ3, 1, 8, 16); - simd_address_at(tmp13, TMPQ4, 1, 8, 16); - * [register]:8 tmp13 = MP_INT_ABS(* [register]:8 tmp12); + TMPQ4[0,64] = MP_INT_ABS(TMPQ3[0,64]); + TMPQ4[64,64] = MP_INT_ABS(TMPQ3[64,64]); # simd infix Rd_VPR128.2D = Rd_VPR128.2D + TMPQ4 on lane size 8 - local tmp14:4 = 0; - local tmp15:4 = 0; - local tmp16:4 = 0; - simd_address_at(tmp14, Rd_VPR128.2D, 0, 8, 16); - simd_address_at(tmp15, TMPQ4, 0, 8, 16); - simd_address_at(tmp16, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp16 = (* [register]:8 tmp14) + (* [register]:8 tmp15); - simd_address_at(tmp14, Rd_VPR128.2D, 1, 8, 16); - simd_address_at(tmp15, TMPQ4, 1, 8, 16); - simd_address_at(tmp16, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp16 = (* [register]:8 tmp14) + (* [register]:8 tmp15); + Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] + TMPQ4[0,64]; + Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] + TMPQ4[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_SEXT(Rn_VPR64.2S, 4:1); - local tmp2:16 = SIMD_INT_SEXT(Rm_VPR64.2S, 4:1); - local tmp3:16 = SIMD_INT_SUB(tmp1, tmp2, 8:1); - local tmp4:16 = SIMD_INT_ABS(tmp3, 8:1); - local tmpd:16 = SIMD_INT_ADD(Rd_VPR128.2D, tmp4, 8:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.2D = NEON_sabal(Rd_VPR128.2D, Rn_VPR64.2S, Rm_VPR64.2S, 4:1); -@endif } # C7.2.225 SABAL, SABAL2 page C7-1905 line 106799 MATCH x0e205000/mask=xbf20fc00 @@ -21908,103 +12746,32 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S :sabal Rd_VPR128.4S, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1215=0x5 & b_1011=0 & Rn_VPR64.4H & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) # simd resize TMPQ1 = sext(Rn_VPR64.4H) (lane size 2 to 4) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR64.4H, 0, 2, 8); - simd_address_at(tmp3, TMPQ1, 0, 4, 16); - * [register]:4 tmp3 = sext(* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR64.4H, 1, 2, 8); - simd_address_at(tmp3, TMPQ1, 1, 4, 16); - * [register]:4 tmp3 = sext(* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR64.4H, 2, 2, 8); - simd_address_at(tmp3, TMPQ1, 2, 4, 16); - * [register]:4 tmp3 = sext(* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR64.4H, 3, 2, 8); - simd_address_at(tmp3, TMPQ1, 3, 4, 16); - * [register]:4 tmp3 = sext(* [register]:2 tmp2); + TMPQ1[0,32] = sext(Rn_VPR64.4H[0,16]); + TMPQ1[32,32] = sext(Rn_VPR64.4H[16,16]); + TMPQ1[64,32] = sext(Rn_VPR64.4H[32,16]); + TMPQ1[96,32] = sext(Rn_VPR64.4H[48,16]); # simd resize TMPQ2 = sext(Rm_VPR64.4H) (lane size 2 to 4) - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, Rm_VPR64.4H, 0, 2, 8); - simd_address_at(tmp6, TMPQ2, 0, 4, 16); - * [register]:4 tmp6 = sext(* [register]:2 tmp5); - simd_address_at(tmp5, Rm_VPR64.4H, 1, 2, 8); - simd_address_at(tmp6, TMPQ2, 1, 4, 16); - * [register]:4 tmp6 = sext(* [register]:2 tmp5); - simd_address_at(tmp5, Rm_VPR64.4H, 2, 2, 8); - simd_address_at(tmp6, TMPQ2, 2, 4, 16); - * [register]:4 tmp6 = sext(* [register]:2 tmp5); - simd_address_at(tmp5, Rm_VPR64.4H, 3, 2, 8); - simd_address_at(tmp6, TMPQ2, 3, 4, 16); - * [register]:4 tmp6 = sext(* [register]:2 tmp5); + TMPQ2[0,32] = sext(Rm_VPR64.4H[0,16]); + TMPQ2[32,32] = sext(Rm_VPR64.4H[16,16]); + TMPQ2[64,32] = sext(Rm_VPR64.4H[32,16]); + TMPQ2[96,32] = sext(Rm_VPR64.4H[48,16]); # simd infix TMPQ3 = TMPQ1 - TMPQ2 on lane size 4 - local tmp8:4 = 0; - local tmp9:4 = 0; - local tmp10:4 = 0; - simd_address_at(tmp8, TMPQ1, 0, 4, 16); - simd_address_at(tmp9, TMPQ2, 0, 4, 16); - simd_address_at(tmp10, TMPQ3, 0, 4, 16); - * [register]:4 tmp10 = (* [register]:4 tmp8) - (* [register]:4 tmp9); - simd_address_at(tmp8, TMPQ1, 1, 4, 16); - simd_address_at(tmp9, TMPQ2, 1, 4, 16); - simd_address_at(tmp10, TMPQ3, 1, 4, 16); - * [register]:4 tmp10 = (* [register]:4 tmp8) - (* [register]:4 tmp9); - simd_address_at(tmp8, TMPQ1, 2, 4, 16); - simd_address_at(tmp9, TMPQ2, 2, 4, 16); - simd_address_at(tmp10, TMPQ3, 2, 4, 16); - * [register]:4 tmp10 = (* [register]:4 tmp8) - (* [register]:4 tmp9); - simd_address_at(tmp8, TMPQ1, 3, 4, 16); - simd_address_at(tmp9, TMPQ2, 3, 4, 16); - simd_address_at(tmp10, TMPQ3, 3, 4, 16); - * [register]:4 tmp10 = (* [register]:4 tmp8) - (* [register]:4 tmp9); + TMPQ3[0,32] = TMPQ1[0,32] - TMPQ2[0,32]; + TMPQ3[32,32] = TMPQ1[32,32] - TMPQ2[32,32]; + TMPQ3[64,32] = TMPQ1[64,32] - TMPQ2[64,32]; + TMPQ3[96,32] = TMPQ1[96,32] - TMPQ2[96,32]; # simd unary TMPQ4 = MP_INT_ABS(TMPQ3) on lane size 4 - local tmp12:4 = 0; - local tmp13:4 = 0; - simd_address_at(tmp12, TMPQ3, 0, 4, 16); - simd_address_at(tmp13, TMPQ4, 0, 4, 16); - * [register]:4 tmp13 = MP_INT_ABS(* [register]:4 tmp12); - simd_address_at(tmp12, TMPQ3, 1, 4, 16); - simd_address_at(tmp13, TMPQ4, 1, 4, 16); - * [register]:4 tmp13 = MP_INT_ABS(* [register]:4 tmp12); - simd_address_at(tmp12, TMPQ3, 2, 4, 16); - simd_address_at(tmp13, TMPQ4, 2, 4, 16); - * [register]:4 tmp13 = MP_INT_ABS(* [register]:4 tmp12); - simd_address_at(tmp12, TMPQ3, 3, 4, 16); - simd_address_at(tmp13, TMPQ4, 3, 4, 16); - * [register]:4 tmp13 = MP_INT_ABS(* [register]:4 tmp12); + TMPQ4[0,32] = MP_INT_ABS(TMPQ3[0,32]); + TMPQ4[32,32] = MP_INT_ABS(TMPQ3[32,32]); + TMPQ4[64,32] = MP_INT_ABS(TMPQ3[64,32]); + TMPQ4[96,32] = MP_INT_ABS(TMPQ3[96,32]); # simd infix Rd_VPR128.4S = Rd_VPR128.4S + TMPQ4 on lane size 4 - local tmp14:4 = 0; - local tmp15:4 = 0; - local tmp16:4 = 0; - simd_address_at(tmp14, Rd_VPR128.4S, 0, 4, 16); - simd_address_at(tmp15, TMPQ4, 0, 4, 16); - simd_address_at(tmp16, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp16 = (* [register]:4 tmp14) + (* [register]:4 tmp15); - simd_address_at(tmp14, Rd_VPR128.4S, 1, 4, 16); - simd_address_at(tmp15, TMPQ4, 1, 4, 16); - simd_address_at(tmp16, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp16 = (* [register]:4 tmp14) + (* [register]:4 tmp15); - simd_address_at(tmp14, Rd_VPR128.4S, 2, 4, 16); - simd_address_at(tmp15, TMPQ4, 2, 4, 16); - simd_address_at(tmp16, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp16 = (* [register]:4 tmp14) + (* [register]:4 tmp15); - simd_address_at(tmp14, Rd_VPR128.4S, 3, 4, 16); - simd_address_at(tmp15, TMPQ4, 3, 4, 16); - simd_address_at(tmp16, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp16 = (* [register]:4 tmp14) + (* [register]:4 tmp15); + Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] + TMPQ4[0,32]; + Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] + TMPQ4[32,32]; + Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] + TMPQ4[64,32]; + Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] + TMPQ4[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_SEXT(Rn_VPR64.4H, 2:1); - local tmp2:16 = SIMD_INT_SEXT(Rm_VPR64.4H, 2:1); - local tmp3:16 = SIMD_INT_SUB(tmp1, tmp2, 4:1); - local tmp4:16 = SIMD_INT_ABS(tmp3, 4:1); - local tmpd:16 = SIMD_INT_ADD(Rd_VPR128.4S, tmp4, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_sabal(Rd_VPR128.4S, Rn_VPR64.4H, Rm_VPR64.4H, 2:1); -@endif } # C7.2.225 SABAL, SABAL2 page C7-1905 line 106799 MATCH x0e205000/mask=xbf20fc00 @@ -22016,171 +12783,52 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H :sabal Rd_VPR128.8H, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1215=0x5 & b_1011=0 & Rn_VPR64.8B & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_primitive) # simd resize TMPQ1 = sext(Rn_VPR64.8B) (lane size 1 to 2) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR64.8B, 0, 1, 8); - simd_address_at(tmp3, TMPQ1, 0, 2, 16); - * [register]:2 tmp3 = sext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 1, 1, 8); - simd_address_at(tmp3, TMPQ1, 1, 2, 16); - * [register]:2 tmp3 = sext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 2, 1, 8); - simd_address_at(tmp3, TMPQ1, 2, 2, 16); - * [register]:2 tmp3 = sext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 3, 1, 8); - simd_address_at(tmp3, TMPQ1, 3, 2, 16); - * [register]:2 tmp3 = sext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 4, 1, 8); - simd_address_at(tmp3, TMPQ1, 4, 2, 16); - * [register]:2 tmp3 = sext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 5, 1, 8); - simd_address_at(tmp3, TMPQ1, 5, 2, 16); - * [register]:2 tmp3 = sext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 6, 1, 8); - simd_address_at(tmp3, TMPQ1, 6, 2, 16); - * [register]:2 tmp3 = sext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 7, 1, 8); - simd_address_at(tmp3, TMPQ1, 7, 2, 16); - * [register]:2 tmp3 = sext(* [register]:1 tmp2); + TMPQ1[0,16] = sext(Rn_VPR64.8B[0,8]); + TMPQ1[16,16] = sext(Rn_VPR64.8B[8,8]); + TMPQ1[32,16] = sext(Rn_VPR64.8B[16,8]); + TMPQ1[48,16] = sext(Rn_VPR64.8B[24,8]); + TMPQ1[64,16] = sext(Rn_VPR64.8B[32,8]); + TMPQ1[80,16] = sext(Rn_VPR64.8B[40,8]); + TMPQ1[96,16] = sext(Rn_VPR64.8B[48,8]); + TMPQ1[112,16] = sext(Rn_VPR64.8B[56,8]); # simd resize TMPQ2 = sext(Rm_VPR64.8B) (lane size 1 to 2) - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, Rm_VPR64.8B, 0, 1, 8); - simd_address_at(tmp6, TMPQ2, 0, 2, 16); - * [register]:2 tmp6 = sext(* [register]:1 tmp5); - simd_address_at(tmp5, Rm_VPR64.8B, 1, 1, 8); - simd_address_at(tmp6, TMPQ2, 1, 2, 16); - * [register]:2 tmp6 = sext(* [register]:1 tmp5); - simd_address_at(tmp5, Rm_VPR64.8B, 2, 1, 8); - simd_address_at(tmp6, TMPQ2, 2, 2, 16); - * [register]:2 tmp6 = sext(* [register]:1 tmp5); - simd_address_at(tmp5, Rm_VPR64.8B, 3, 1, 8); - simd_address_at(tmp6, TMPQ2, 3, 2, 16); - * [register]:2 tmp6 = sext(* [register]:1 tmp5); - simd_address_at(tmp5, Rm_VPR64.8B, 4, 1, 8); - simd_address_at(tmp6, TMPQ2, 4, 2, 16); - * [register]:2 tmp6 = sext(* [register]:1 tmp5); - simd_address_at(tmp5, Rm_VPR64.8B, 5, 1, 8); - simd_address_at(tmp6, TMPQ2, 5, 2, 16); - * [register]:2 tmp6 = sext(* [register]:1 tmp5); - simd_address_at(tmp5, Rm_VPR64.8B, 6, 1, 8); - simd_address_at(tmp6, TMPQ2, 6, 2, 16); - * [register]:2 tmp6 = sext(* [register]:1 tmp5); - simd_address_at(tmp5, Rm_VPR64.8B, 7, 1, 8); - simd_address_at(tmp6, TMPQ2, 7, 2, 16); - * [register]:2 tmp6 = sext(* [register]:1 tmp5); + TMPQ2[0,16] = sext(Rm_VPR64.8B[0,8]); + TMPQ2[16,16] = sext(Rm_VPR64.8B[8,8]); + TMPQ2[32,16] = sext(Rm_VPR64.8B[16,8]); + TMPQ2[48,16] = sext(Rm_VPR64.8B[24,8]); + TMPQ2[64,16] = sext(Rm_VPR64.8B[32,8]); + TMPQ2[80,16] = sext(Rm_VPR64.8B[40,8]); + TMPQ2[96,16] = sext(Rm_VPR64.8B[48,8]); + TMPQ2[112,16] = sext(Rm_VPR64.8B[56,8]); # simd infix TMPQ3 = TMPQ1 - TMPQ2 on lane size 2 - local tmp8:4 = 0; - local tmp9:4 = 0; - local tmp10:4 = 0; - simd_address_at(tmp8, TMPQ1, 0, 2, 16); - simd_address_at(tmp9, TMPQ2, 0, 2, 16); - simd_address_at(tmp10, TMPQ3, 0, 2, 16); - * [register]:2 tmp10 = (* [register]:2 tmp8) - (* [register]:2 tmp9); - simd_address_at(tmp8, TMPQ1, 1, 2, 16); - simd_address_at(tmp9, TMPQ2, 1, 2, 16); - simd_address_at(tmp10, TMPQ3, 1, 2, 16); - * [register]:2 tmp10 = (* [register]:2 tmp8) - (* [register]:2 tmp9); - simd_address_at(tmp8, TMPQ1, 2, 2, 16); - simd_address_at(tmp9, TMPQ2, 2, 2, 16); - simd_address_at(tmp10, TMPQ3, 2, 2, 16); - * [register]:2 tmp10 = (* [register]:2 tmp8) - (* [register]:2 tmp9); - simd_address_at(tmp8, TMPQ1, 3, 2, 16); - simd_address_at(tmp9, TMPQ2, 3, 2, 16); - simd_address_at(tmp10, TMPQ3, 3, 2, 16); - * [register]:2 tmp10 = (* [register]:2 tmp8) - (* [register]:2 tmp9); - simd_address_at(tmp8, TMPQ1, 4, 2, 16); - simd_address_at(tmp9, TMPQ2, 4, 2, 16); - simd_address_at(tmp10, TMPQ3, 4, 2, 16); - * [register]:2 tmp10 = (* [register]:2 tmp8) - (* [register]:2 tmp9); - simd_address_at(tmp8, TMPQ1, 5, 2, 16); - simd_address_at(tmp9, TMPQ2, 5, 2, 16); - simd_address_at(tmp10, TMPQ3, 5, 2, 16); - * [register]:2 tmp10 = (* [register]:2 tmp8) - (* [register]:2 tmp9); - simd_address_at(tmp8, TMPQ1, 6, 2, 16); - simd_address_at(tmp9, TMPQ2, 6, 2, 16); - simd_address_at(tmp10, TMPQ3, 6, 2, 16); - * [register]:2 tmp10 = (* [register]:2 tmp8) - (* [register]:2 tmp9); - simd_address_at(tmp8, TMPQ1, 7, 2, 16); - simd_address_at(tmp9, TMPQ2, 7, 2, 16); - simd_address_at(tmp10, TMPQ3, 7, 2, 16); - * [register]:2 tmp10 = (* [register]:2 tmp8) - (* [register]:2 tmp9); + TMPQ3[0,16] = TMPQ1[0,16] - TMPQ2[0,16]; + TMPQ3[16,16] = TMPQ1[16,16] - TMPQ2[16,16]; + TMPQ3[32,16] = TMPQ1[32,16] - TMPQ2[32,16]; + TMPQ3[48,16] = TMPQ1[48,16] - TMPQ2[48,16]; + TMPQ3[64,16] = TMPQ1[64,16] - TMPQ2[64,16]; + TMPQ3[80,16] = TMPQ1[80,16] - TMPQ2[80,16]; + TMPQ3[96,16] = TMPQ1[96,16] - TMPQ2[96,16]; + TMPQ3[112,16] = TMPQ1[112,16] - TMPQ2[112,16]; # simd unary TMPQ4 = MP_INT_ABS(TMPQ3) on lane size 2 - local tmp12:4 = 0; - local tmp13:4 = 0; - simd_address_at(tmp12, TMPQ3, 0, 2, 16); - simd_address_at(tmp13, TMPQ4, 0, 2, 16); - * [register]:2 tmp13 = MP_INT_ABS(* [register]:2 tmp12); - simd_address_at(tmp12, TMPQ3, 1, 2, 16); - simd_address_at(tmp13, TMPQ4, 1, 2, 16); - * [register]:2 tmp13 = MP_INT_ABS(* [register]:2 tmp12); - simd_address_at(tmp12, TMPQ3, 2, 2, 16); - simd_address_at(tmp13, TMPQ4, 2, 2, 16); - * [register]:2 tmp13 = MP_INT_ABS(* [register]:2 tmp12); - simd_address_at(tmp12, TMPQ3, 3, 2, 16); - simd_address_at(tmp13, TMPQ4, 3, 2, 16); - * [register]:2 tmp13 = MP_INT_ABS(* [register]:2 tmp12); - simd_address_at(tmp12, TMPQ3, 4, 2, 16); - simd_address_at(tmp13, TMPQ4, 4, 2, 16); - * [register]:2 tmp13 = MP_INT_ABS(* [register]:2 tmp12); - simd_address_at(tmp12, TMPQ3, 5, 2, 16); - simd_address_at(tmp13, TMPQ4, 5, 2, 16); - * [register]:2 tmp13 = MP_INT_ABS(* [register]:2 tmp12); - simd_address_at(tmp12, TMPQ3, 6, 2, 16); - simd_address_at(tmp13, TMPQ4, 6, 2, 16); - * [register]:2 tmp13 = MP_INT_ABS(* [register]:2 tmp12); - simd_address_at(tmp12, TMPQ3, 7, 2, 16); - simd_address_at(tmp13, TMPQ4, 7, 2, 16); - * [register]:2 tmp13 = MP_INT_ABS(* [register]:2 tmp12); + TMPQ4[0,16] = MP_INT_ABS(TMPQ3[0,16]); + TMPQ4[16,16] = MP_INT_ABS(TMPQ3[16,16]); + TMPQ4[32,16] = MP_INT_ABS(TMPQ3[32,16]); + TMPQ4[48,16] = MP_INT_ABS(TMPQ3[48,16]); + TMPQ4[64,16] = MP_INT_ABS(TMPQ3[64,16]); + TMPQ4[80,16] = MP_INT_ABS(TMPQ3[80,16]); + TMPQ4[96,16] = MP_INT_ABS(TMPQ3[96,16]); + TMPQ4[112,16] = MP_INT_ABS(TMPQ3[112,16]); # simd infix Rd_VPR128.8H = Rd_VPR128.8H + TMPQ4 on lane size 2 - local tmp14:4 = 0; - local tmp15:4 = 0; - local tmp16:4 = 0; - simd_address_at(tmp14, Rd_VPR128.8H, 0, 2, 16); - simd_address_at(tmp15, TMPQ4, 0, 2, 16); - simd_address_at(tmp16, Rd_VPR128.8H, 0, 2, 16); - * [register]:2 tmp16 = (* [register]:2 tmp14) + (* [register]:2 tmp15); - simd_address_at(tmp14, Rd_VPR128.8H, 1, 2, 16); - simd_address_at(tmp15, TMPQ4, 1, 2, 16); - simd_address_at(tmp16, Rd_VPR128.8H, 1, 2, 16); - * [register]:2 tmp16 = (* [register]:2 tmp14) + (* [register]:2 tmp15); - simd_address_at(tmp14, Rd_VPR128.8H, 2, 2, 16); - simd_address_at(tmp15, TMPQ4, 2, 2, 16); - simd_address_at(tmp16, Rd_VPR128.8H, 2, 2, 16); - * [register]:2 tmp16 = (* [register]:2 tmp14) + (* [register]:2 tmp15); - simd_address_at(tmp14, Rd_VPR128.8H, 3, 2, 16); - simd_address_at(tmp15, TMPQ4, 3, 2, 16); - simd_address_at(tmp16, Rd_VPR128.8H, 3, 2, 16); - * [register]:2 tmp16 = (* [register]:2 tmp14) + (* [register]:2 tmp15); - simd_address_at(tmp14, Rd_VPR128.8H, 4, 2, 16); - simd_address_at(tmp15, TMPQ4, 4, 2, 16); - simd_address_at(tmp16, Rd_VPR128.8H, 4, 2, 16); - * [register]:2 tmp16 = (* [register]:2 tmp14) + (* [register]:2 tmp15); - simd_address_at(tmp14, Rd_VPR128.8H, 5, 2, 16); - simd_address_at(tmp15, TMPQ4, 5, 2, 16); - simd_address_at(tmp16, Rd_VPR128.8H, 5, 2, 16); - * [register]:2 tmp16 = (* [register]:2 tmp14) + (* [register]:2 tmp15); - simd_address_at(tmp14, Rd_VPR128.8H, 6, 2, 16); - simd_address_at(tmp15, TMPQ4, 6, 2, 16); - simd_address_at(tmp16, Rd_VPR128.8H, 6, 2, 16); - * [register]:2 tmp16 = (* [register]:2 tmp14) + (* [register]:2 tmp15); - simd_address_at(tmp14, Rd_VPR128.8H, 7, 2, 16); - simd_address_at(tmp15, TMPQ4, 7, 2, 16); - simd_address_at(tmp16, Rd_VPR128.8H, 7, 2, 16); - * [register]:2 tmp16 = (* [register]:2 tmp14) + (* [register]:2 tmp15); + Rd_VPR128.8H[0,16] = Rd_VPR128.8H[0,16] + TMPQ4[0,16]; + Rd_VPR128.8H[16,16] = Rd_VPR128.8H[16,16] + TMPQ4[16,16]; + Rd_VPR128.8H[32,16] = Rd_VPR128.8H[32,16] + TMPQ4[32,16]; + Rd_VPR128.8H[48,16] = Rd_VPR128.8H[48,16] + TMPQ4[48,16]; + Rd_VPR128.8H[64,16] = Rd_VPR128.8H[64,16] + TMPQ4[64,16]; + Rd_VPR128.8H[80,16] = Rd_VPR128.8H[80,16] + TMPQ4[80,16]; + Rd_VPR128.8H[96,16] = Rd_VPR128.8H[96,16] + TMPQ4[96,16]; + Rd_VPR128.8H[112,16] = Rd_VPR128.8H[112,16] + TMPQ4[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_SEXT(Rn_VPR64.8B, 1:1); - local tmp2:16 = SIMD_INT_SEXT(Rm_VPR64.8B, 1:1); - local tmp3:16 = SIMD_INT_SUB(tmp1, tmp2, 2:1); - local tmp4:16 = SIMD_INT_ABS(tmp3, 2:1); - local tmpd:16 = SIMD_INT_ADD(Rd_VPR128.8H, tmp4, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.8H -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.8H = NEON_sabal(Rd_VPR128.8H, Rn_VPR64.8B, Rm_VPR64.8B, 1:1); -@endif } # C7.2.225 SABAL, SABAL2 page C7-1905 line 106799 MATCH x0e205000/mask=xbf20fc00 @@ -22192,77 +12840,24 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B :sabal2 Rd_VPR128.2D, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1215=0x5 & b_1011=0 & Rn_VPR128.4S & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rn_VPR128.4S, 1, 8, 16); - TMPD1 = * [register]:8 tmp1; + TMPD1 = Rn_VPR128.4S[64,64]; # simd resize TMPQ2 = sext(TMPD1) (lane size 4 to 8) - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, TMPD1, 0, 4, 8); - simd_address_at(tmp5, TMPQ2, 0, 8, 16); - * [register]:8 tmp5 = sext(* [register]:4 tmp4); - simd_address_at(tmp4, TMPD1, 1, 4, 8); - simd_address_at(tmp5, TMPQ2, 1, 8, 16); - * [register]:8 tmp5 = sext(* [register]:4 tmp4); - local tmp6:4 = 0; - simd_address_at(tmp6, Rm_VPR128.4S, 1, 8, 16); - TMPD3 = * [register]:8 tmp6; + TMPQ2[0,64] = sext(TMPD1[0,32]); + TMPQ2[64,64] = sext(TMPD1[32,32]); + TMPD3 = Rm_VPR128.4S[64,64]; # simd resize TMPQ4 = sext(TMPD3) (lane size 4 to 8) - local tmp9:4 = 0; - local tmp10:4 = 0; - simd_address_at(tmp9, TMPD3, 0, 4, 8); - simd_address_at(tmp10, TMPQ4, 0, 8, 16); - * [register]:8 tmp10 = sext(* [register]:4 tmp9); - simd_address_at(tmp9, TMPD3, 1, 4, 8); - simd_address_at(tmp10, TMPQ4, 1, 8, 16); - * [register]:8 tmp10 = sext(* [register]:4 tmp9); + TMPQ4[0,64] = sext(TMPD3[0,32]); + TMPQ4[64,64] = sext(TMPD3[32,32]); # simd infix TMPQ5 = TMPQ2 - TMPQ4 on lane size 8 - local tmp12:4 = 0; - local tmp13:4 = 0; - local tmp14:4 = 0; - simd_address_at(tmp12, TMPQ2, 0, 8, 16); - simd_address_at(tmp13, TMPQ4, 0, 8, 16); - simd_address_at(tmp14, TMPQ5, 0, 8, 16); - * [register]:8 tmp14 = (* [register]:8 tmp12) - (* [register]:8 tmp13); - simd_address_at(tmp12, TMPQ2, 1, 8, 16); - simd_address_at(tmp13, TMPQ4, 1, 8, 16); - simd_address_at(tmp14, TMPQ5, 1, 8, 16); - * [register]:8 tmp14 = (* [register]:8 tmp12) - (* [register]:8 tmp13); + TMPQ5[0,64] = TMPQ2[0,64] - TMPQ4[0,64]; + TMPQ5[64,64] = TMPQ2[64,64] - TMPQ4[64,64]; # simd unary TMPQ6 = MP_INT_ABS(TMPQ5) on lane size 8 - local tmp16:4 = 0; - local tmp17:4 = 0; - simd_address_at(tmp16, TMPQ5, 0, 8, 16); - simd_address_at(tmp17, TMPQ6, 0, 8, 16); - * [register]:8 tmp17 = MP_INT_ABS(* [register]:8 tmp16); - simd_address_at(tmp16, TMPQ5, 1, 8, 16); - simd_address_at(tmp17, TMPQ6, 1, 8, 16); - * [register]:8 tmp17 = MP_INT_ABS(* [register]:8 tmp16); + TMPQ6[0,64] = MP_INT_ABS(TMPQ5[0,64]); + TMPQ6[64,64] = MP_INT_ABS(TMPQ5[64,64]); # simd infix Rd_VPR128.2D = Rd_VPR128.2D + TMPQ6 on lane size 8 - local tmp18:4 = 0; - local tmp19:4 = 0; - local tmp20:4 = 0; - simd_address_at(tmp18, Rd_VPR128.2D, 0, 8, 16); - simd_address_at(tmp19, TMPQ6, 0, 8, 16); - simd_address_at(tmp20, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp20 = (* [register]:8 tmp18) + (* [register]:8 tmp19); - simd_address_at(tmp18, Rd_VPR128.2D, 1, 8, 16); - simd_address_at(tmp19, TMPQ6, 1, 8, 16); - simd_address_at(tmp20, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp20 = (* [register]:8 tmp18) + (* [register]:8 tmp19); + Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] + TMPQ6[0,64]; + Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] + TMPQ6[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Rn_VPR128.4S, 1:1); - local tmp2:16 = SIMD_INT_SEXT(tmp1, 4:1); - local tmp3:8 = SIMD_PIECE(Rm_VPR128.4S, 1:1); - local tmp4:16 = SIMD_INT_SEXT(tmp3, 4:1); - local tmp5:16 = SIMD_INT_SUB(tmp2, tmp4, 8:1); - local tmp6:16 = SIMD_INT_ABS(tmp5, 8:1); - local tmpd:16 = SIMD_INT_ADD(Rd_VPR128.2D, tmp6, 8:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.2D = NEON_sabal2(Rd_VPR128.2D, Rn_VPR128.4S, Rm_VPR128.4S, 4:1); -@endif } # C7.2.225 SABAL, SABAL2 page C7-1905 line 106799 MATCH x0e205000/mask=xbf20fc00 @@ -22274,111 +12869,34 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S :sabal2 Rd_VPR128.4S, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1215=0x5 & b_1011=0 & Rn_VPR128.8H & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rn_VPR128.8H, 1, 8, 16); - TMPD1 = * [register]:8 tmp1; + TMPD1 = Rn_VPR128.8H[64,64]; # simd resize TMPQ2 = sext(TMPD1) (lane size 2 to 4) - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, TMPD1, 0, 2, 8); - simd_address_at(tmp5, TMPQ2, 0, 4, 16); - * [register]:4 tmp5 = sext(* [register]:2 tmp4); - simd_address_at(tmp4, TMPD1, 1, 2, 8); - simd_address_at(tmp5, TMPQ2, 1, 4, 16); - * [register]:4 tmp5 = sext(* [register]:2 tmp4); - simd_address_at(tmp4, TMPD1, 2, 2, 8); - simd_address_at(tmp5, TMPQ2, 2, 4, 16); - * [register]:4 tmp5 = sext(* [register]:2 tmp4); - simd_address_at(tmp4, TMPD1, 3, 2, 8); - simd_address_at(tmp5, TMPQ2, 3, 4, 16); - * [register]:4 tmp5 = sext(* [register]:2 tmp4); - local tmp6:4 = 0; - simd_address_at(tmp6, Rm_VPR128.8H, 1, 8, 16); - TMPD3 = * [register]:8 tmp6; + TMPQ2[0,32] = sext(TMPD1[0,16]); + TMPQ2[32,32] = sext(TMPD1[16,16]); + TMPQ2[64,32] = sext(TMPD1[32,16]); + TMPQ2[96,32] = sext(TMPD1[48,16]); + TMPD3 = Rm_VPR128.8H[64,64]; # simd resize TMPQ4 = sext(TMPD3) (lane size 2 to 4) - local tmp9:4 = 0; - local tmp10:4 = 0; - simd_address_at(tmp9, TMPD3, 0, 2, 8); - simd_address_at(tmp10, TMPQ4, 0, 4, 16); - * [register]:4 tmp10 = sext(* [register]:2 tmp9); - simd_address_at(tmp9, TMPD3, 1, 2, 8); - simd_address_at(tmp10, TMPQ4, 1, 4, 16); - * [register]:4 tmp10 = sext(* [register]:2 tmp9); - simd_address_at(tmp9, TMPD3, 2, 2, 8); - simd_address_at(tmp10, TMPQ4, 2, 4, 16); - * [register]:4 tmp10 = sext(* [register]:2 tmp9); - simd_address_at(tmp9, TMPD3, 3, 2, 8); - simd_address_at(tmp10, TMPQ4, 3, 4, 16); - * [register]:4 tmp10 = sext(* [register]:2 tmp9); + TMPQ4[0,32] = sext(TMPD3[0,16]); + TMPQ4[32,32] = sext(TMPD3[16,16]); + TMPQ4[64,32] = sext(TMPD3[32,16]); + TMPQ4[96,32] = sext(TMPD3[48,16]); # simd infix TMPQ5 = TMPQ2 - TMPQ4 on lane size 4 - local tmp12:4 = 0; - local tmp13:4 = 0; - local tmp14:4 = 0; - simd_address_at(tmp12, TMPQ2, 0, 4, 16); - simd_address_at(tmp13, TMPQ4, 0, 4, 16); - simd_address_at(tmp14, TMPQ5, 0, 4, 16); - * [register]:4 tmp14 = (* [register]:4 tmp12) - (* [register]:4 tmp13); - simd_address_at(tmp12, TMPQ2, 1, 4, 16); - simd_address_at(tmp13, TMPQ4, 1, 4, 16); - simd_address_at(tmp14, TMPQ5, 1, 4, 16); - * [register]:4 tmp14 = (* [register]:4 tmp12) - (* [register]:4 tmp13); - simd_address_at(tmp12, TMPQ2, 2, 4, 16); - simd_address_at(tmp13, TMPQ4, 2, 4, 16); - simd_address_at(tmp14, TMPQ5, 2, 4, 16); - * [register]:4 tmp14 = (* [register]:4 tmp12) - (* [register]:4 tmp13); - simd_address_at(tmp12, TMPQ2, 3, 4, 16); - simd_address_at(tmp13, TMPQ4, 3, 4, 16); - simd_address_at(tmp14, TMPQ5, 3, 4, 16); - * [register]:4 tmp14 = (* [register]:4 tmp12) - (* [register]:4 tmp13); + TMPQ5[0,32] = TMPQ2[0,32] - TMPQ4[0,32]; + TMPQ5[32,32] = TMPQ2[32,32] - TMPQ4[32,32]; + TMPQ5[64,32] = TMPQ2[64,32] - TMPQ4[64,32]; + TMPQ5[96,32] = TMPQ2[96,32] - TMPQ4[96,32]; # simd unary TMPQ6 = MP_INT_ABS(TMPQ5) on lane size 4 - local tmp16:4 = 0; - local tmp17:4 = 0; - simd_address_at(tmp16, TMPQ5, 0, 4, 16); - simd_address_at(tmp17, TMPQ6, 0, 4, 16); - * [register]:4 tmp17 = MP_INT_ABS(* [register]:4 tmp16); - simd_address_at(tmp16, TMPQ5, 1, 4, 16); - simd_address_at(tmp17, TMPQ6, 1, 4, 16); - * [register]:4 tmp17 = MP_INT_ABS(* [register]:4 tmp16); - simd_address_at(tmp16, TMPQ5, 2, 4, 16); - simd_address_at(tmp17, TMPQ6, 2, 4, 16); - * [register]:4 tmp17 = MP_INT_ABS(* [register]:4 tmp16); - simd_address_at(tmp16, TMPQ5, 3, 4, 16); - simd_address_at(tmp17, TMPQ6, 3, 4, 16); - * [register]:4 tmp17 = MP_INT_ABS(* [register]:4 tmp16); + TMPQ6[0,32] = MP_INT_ABS(TMPQ5[0,32]); + TMPQ6[32,32] = MP_INT_ABS(TMPQ5[32,32]); + TMPQ6[64,32] = MP_INT_ABS(TMPQ5[64,32]); + TMPQ6[96,32] = MP_INT_ABS(TMPQ5[96,32]); # simd infix Rd_VPR128.4S = Rd_VPR128.4S + TMPQ6 on lane size 4 - local tmp18:4 = 0; - local tmp19:4 = 0; - local tmp20:4 = 0; - simd_address_at(tmp18, Rd_VPR128.4S, 0, 4, 16); - simd_address_at(tmp19, TMPQ6, 0, 4, 16); - simd_address_at(tmp20, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp20 = (* [register]:4 tmp18) + (* [register]:4 tmp19); - simd_address_at(tmp18, Rd_VPR128.4S, 1, 4, 16); - simd_address_at(tmp19, TMPQ6, 1, 4, 16); - simd_address_at(tmp20, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp20 = (* [register]:4 tmp18) + (* [register]:4 tmp19); - simd_address_at(tmp18, Rd_VPR128.4S, 2, 4, 16); - simd_address_at(tmp19, TMPQ6, 2, 4, 16); - simd_address_at(tmp20, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp20 = (* [register]:4 tmp18) + (* [register]:4 tmp19); - simd_address_at(tmp18, Rd_VPR128.4S, 3, 4, 16); - simd_address_at(tmp19, TMPQ6, 3, 4, 16); - simd_address_at(tmp20, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp20 = (* [register]:4 tmp18) + (* [register]:4 tmp19); + Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] + TMPQ6[0,32]; + Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] + TMPQ6[32,32]; + Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] + TMPQ6[64,32]; + Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] + TMPQ6[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Rn_VPR128.8H, 1:1); - local tmp2:16 = SIMD_INT_SEXT(tmp1, 2:1); - local tmp3:8 = SIMD_PIECE(Rm_VPR128.8H, 1:1); - local tmp4:16 = SIMD_INT_SEXT(tmp3, 2:1); - local tmp5:16 = SIMD_INT_SUB(tmp2, tmp4, 4:1); - local tmp6:16 = SIMD_INT_ABS(tmp5, 4:1); - local tmpd:16 = SIMD_INT_ADD(Rd_VPR128.4S, tmp6, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_sabal2(Rd_VPR128.4S, Rn_VPR128.8H, Rm_VPR128.8H, 2:1); -@endif } # C7.2.225 SABAL, SABAL2 page C7-1905 line 106799 MATCH x0e205000/mask=xbf20fc00 @@ -22390,179 +12908,54 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H :sabal2 Rd_VPR128.8H, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1215=0x5 & b_1011=0 & Rn_VPR128.16B & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rn_VPR128.16B, 1, 8, 16); - TMPD1 = * [register]:8 tmp1; + TMPD1 = Rn_VPR128.16B[64,64]; # simd resize TMPQ2 = sext(TMPD1) (lane size 1 to 2) - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, TMPD1, 0, 1, 8); - simd_address_at(tmp5, TMPQ2, 0, 2, 16); - * [register]:2 tmp5 = sext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 1, 1, 8); - simd_address_at(tmp5, TMPQ2, 1, 2, 16); - * [register]:2 tmp5 = sext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 2, 1, 8); - simd_address_at(tmp5, TMPQ2, 2, 2, 16); - * [register]:2 tmp5 = sext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 3, 1, 8); - simd_address_at(tmp5, TMPQ2, 3, 2, 16); - * [register]:2 tmp5 = sext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 4, 1, 8); - simd_address_at(tmp5, TMPQ2, 4, 2, 16); - * [register]:2 tmp5 = sext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 5, 1, 8); - simd_address_at(tmp5, TMPQ2, 5, 2, 16); - * [register]:2 tmp5 = sext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 6, 1, 8); - simd_address_at(tmp5, TMPQ2, 6, 2, 16); - * [register]:2 tmp5 = sext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 7, 1, 8); - simd_address_at(tmp5, TMPQ2, 7, 2, 16); - * [register]:2 tmp5 = sext(* [register]:1 tmp4); - local tmp6:4 = 0; - simd_address_at(tmp6, Rm_VPR128.16B, 1, 8, 16); - TMPD3 = * [register]:8 tmp6; + TMPQ2[0,16] = sext(TMPD1[0,8]); + TMPQ2[16,16] = sext(TMPD1[8,8]); + TMPQ2[32,16] = sext(TMPD1[16,8]); + TMPQ2[48,16] = sext(TMPD1[24,8]); + TMPQ2[64,16] = sext(TMPD1[32,8]); + TMPQ2[80,16] = sext(TMPD1[40,8]); + TMPQ2[96,16] = sext(TMPD1[48,8]); + TMPQ2[112,16] = sext(TMPD1[56,8]); + TMPD3 = Rm_VPR128.16B[64,64]; # simd resize TMPQ4 = sext(TMPD3) (lane size 1 to 2) - local tmp9:4 = 0; - local tmp10:4 = 0; - simd_address_at(tmp9, TMPD3, 0, 1, 8); - simd_address_at(tmp10, TMPQ4, 0, 2, 16); - * [register]:2 tmp10 = sext(* [register]:1 tmp9); - simd_address_at(tmp9, TMPD3, 1, 1, 8); - simd_address_at(tmp10, TMPQ4, 1, 2, 16); - * [register]:2 tmp10 = sext(* [register]:1 tmp9); - simd_address_at(tmp9, TMPD3, 2, 1, 8); - simd_address_at(tmp10, TMPQ4, 2, 2, 16); - * [register]:2 tmp10 = sext(* [register]:1 tmp9); - simd_address_at(tmp9, TMPD3, 3, 1, 8); - simd_address_at(tmp10, TMPQ4, 3, 2, 16); - * [register]:2 tmp10 = sext(* [register]:1 tmp9); - simd_address_at(tmp9, TMPD3, 4, 1, 8); - simd_address_at(tmp10, TMPQ4, 4, 2, 16); - * [register]:2 tmp10 = sext(* [register]:1 tmp9); - simd_address_at(tmp9, TMPD3, 5, 1, 8); - simd_address_at(tmp10, TMPQ4, 5, 2, 16); - * [register]:2 tmp10 = sext(* [register]:1 tmp9); - simd_address_at(tmp9, TMPD3, 6, 1, 8); - simd_address_at(tmp10, TMPQ4, 6, 2, 16); - * [register]:2 tmp10 = sext(* [register]:1 tmp9); - simd_address_at(tmp9, TMPD3, 7, 1, 8); - simd_address_at(tmp10, TMPQ4, 7, 2, 16); - * [register]:2 tmp10 = sext(* [register]:1 tmp9); + TMPQ4[0,16] = sext(TMPD3[0,8]); + TMPQ4[16,16] = sext(TMPD3[8,8]); + TMPQ4[32,16] = sext(TMPD3[16,8]); + TMPQ4[48,16] = sext(TMPD3[24,8]); + TMPQ4[64,16] = sext(TMPD3[32,8]); + TMPQ4[80,16] = sext(TMPD3[40,8]); + TMPQ4[96,16] = sext(TMPD3[48,8]); + TMPQ4[112,16] = sext(TMPD3[56,8]); # simd infix TMPQ5 = TMPQ2 - TMPQ4 on lane size 2 - local tmp12:4 = 0; - local tmp13:4 = 0; - local tmp14:4 = 0; - simd_address_at(tmp12, TMPQ2, 0, 2, 16); - simd_address_at(tmp13, TMPQ4, 0, 2, 16); - simd_address_at(tmp14, TMPQ5, 0, 2, 16); - * [register]:2 tmp14 = (* [register]:2 tmp12) - (* [register]:2 tmp13); - simd_address_at(tmp12, TMPQ2, 1, 2, 16); - simd_address_at(tmp13, TMPQ4, 1, 2, 16); - simd_address_at(tmp14, TMPQ5, 1, 2, 16); - * [register]:2 tmp14 = (* [register]:2 tmp12) - (* [register]:2 tmp13); - simd_address_at(tmp12, TMPQ2, 2, 2, 16); - simd_address_at(tmp13, TMPQ4, 2, 2, 16); - simd_address_at(tmp14, TMPQ5, 2, 2, 16); - * [register]:2 tmp14 = (* [register]:2 tmp12) - (* [register]:2 tmp13); - simd_address_at(tmp12, TMPQ2, 3, 2, 16); - simd_address_at(tmp13, TMPQ4, 3, 2, 16); - simd_address_at(tmp14, TMPQ5, 3, 2, 16); - * [register]:2 tmp14 = (* [register]:2 tmp12) - (* [register]:2 tmp13); - simd_address_at(tmp12, TMPQ2, 4, 2, 16); - simd_address_at(tmp13, TMPQ4, 4, 2, 16); - simd_address_at(tmp14, TMPQ5, 4, 2, 16); - * [register]:2 tmp14 = (* [register]:2 tmp12) - (* [register]:2 tmp13); - simd_address_at(tmp12, TMPQ2, 5, 2, 16); - simd_address_at(tmp13, TMPQ4, 5, 2, 16); - simd_address_at(tmp14, TMPQ5, 5, 2, 16); - * [register]:2 tmp14 = (* [register]:2 tmp12) - (* [register]:2 tmp13); - simd_address_at(tmp12, TMPQ2, 6, 2, 16); - simd_address_at(tmp13, TMPQ4, 6, 2, 16); - simd_address_at(tmp14, TMPQ5, 6, 2, 16); - * [register]:2 tmp14 = (* [register]:2 tmp12) - (* [register]:2 tmp13); - simd_address_at(tmp12, TMPQ2, 7, 2, 16); - simd_address_at(tmp13, TMPQ4, 7, 2, 16); - simd_address_at(tmp14, TMPQ5, 7, 2, 16); - * [register]:2 tmp14 = (* [register]:2 tmp12) - (* [register]:2 tmp13); + TMPQ5[0,16] = TMPQ2[0,16] - TMPQ4[0,16]; + TMPQ5[16,16] = TMPQ2[16,16] - TMPQ4[16,16]; + TMPQ5[32,16] = TMPQ2[32,16] - TMPQ4[32,16]; + TMPQ5[48,16] = TMPQ2[48,16] - TMPQ4[48,16]; + TMPQ5[64,16] = TMPQ2[64,16] - TMPQ4[64,16]; + TMPQ5[80,16] = TMPQ2[80,16] - TMPQ4[80,16]; + TMPQ5[96,16] = TMPQ2[96,16] - TMPQ4[96,16]; + TMPQ5[112,16] = TMPQ2[112,16] - TMPQ4[112,16]; # simd unary TMPQ6 = MP_INT_ABS(TMPQ5) on lane size 2 - local tmp16:4 = 0; - local tmp17:4 = 0; - simd_address_at(tmp16, TMPQ5, 0, 2, 16); - simd_address_at(tmp17, TMPQ6, 0, 2, 16); - * [register]:2 tmp17 = MP_INT_ABS(* [register]:2 tmp16); - simd_address_at(tmp16, TMPQ5, 1, 2, 16); - simd_address_at(tmp17, TMPQ6, 1, 2, 16); - * [register]:2 tmp17 = MP_INT_ABS(* [register]:2 tmp16); - simd_address_at(tmp16, TMPQ5, 2, 2, 16); - simd_address_at(tmp17, TMPQ6, 2, 2, 16); - * [register]:2 tmp17 = MP_INT_ABS(* [register]:2 tmp16); - simd_address_at(tmp16, TMPQ5, 3, 2, 16); - simd_address_at(tmp17, TMPQ6, 3, 2, 16); - * [register]:2 tmp17 = MP_INT_ABS(* [register]:2 tmp16); - simd_address_at(tmp16, TMPQ5, 4, 2, 16); - simd_address_at(tmp17, TMPQ6, 4, 2, 16); - * [register]:2 tmp17 = MP_INT_ABS(* [register]:2 tmp16); - simd_address_at(tmp16, TMPQ5, 5, 2, 16); - simd_address_at(tmp17, TMPQ6, 5, 2, 16); - * [register]:2 tmp17 = MP_INT_ABS(* [register]:2 tmp16); - simd_address_at(tmp16, TMPQ5, 6, 2, 16); - simd_address_at(tmp17, TMPQ6, 6, 2, 16); - * [register]:2 tmp17 = MP_INT_ABS(* [register]:2 tmp16); - simd_address_at(tmp16, TMPQ5, 7, 2, 16); - simd_address_at(tmp17, TMPQ6, 7, 2, 16); - * [register]:2 tmp17 = MP_INT_ABS(* [register]:2 tmp16); + TMPQ6[0,16] = MP_INT_ABS(TMPQ5[0,16]); + TMPQ6[16,16] = MP_INT_ABS(TMPQ5[16,16]); + TMPQ6[32,16] = MP_INT_ABS(TMPQ5[32,16]); + TMPQ6[48,16] = MP_INT_ABS(TMPQ5[48,16]); + TMPQ6[64,16] = MP_INT_ABS(TMPQ5[64,16]); + TMPQ6[80,16] = MP_INT_ABS(TMPQ5[80,16]); + TMPQ6[96,16] = MP_INT_ABS(TMPQ5[96,16]); + TMPQ6[112,16] = MP_INT_ABS(TMPQ5[112,16]); # simd infix Rd_VPR128.8H = Rd_VPR128.8H + TMPQ6 on lane size 2 - local tmp18:4 = 0; - local tmp19:4 = 0; - local tmp20:4 = 0; - simd_address_at(tmp18, Rd_VPR128.8H, 0, 2, 16); - simd_address_at(tmp19, TMPQ6, 0, 2, 16); - simd_address_at(tmp20, Rd_VPR128.8H, 0, 2, 16); - * [register]:2 tmp20 = (* [register]:2 tmp18) + (* [register]:2 tmp19); - simd_address_at(tmp18, Rd_VPR128.8H, 1, 2, 16); - simd_address_at(tmp19, TMPQ6, 1, 2, 16); - simd_address_at(tmp20, Rd_VPR128.8H, 1, 2, 16); - * [register]:2 tmp20 = (* [register]:2 tmp18) + (* [register]:2 tmp19); - simd_address_at(tmp18, Rd_VPR128.8H, 2, 2, 16); - simd_address_at(tmp19, TMPQ6, 2, 2, 16); - simd_address_at(tmp20, Rd_VPR128.8H, 2, 2, 16); - * [register]:2 tmp20 = (* [register]:2 tmp18) + (* [register]:2 tmp19); - simd_address_at(tmp18, Rd_VPR128.8H, 3, 2, 16); - simd_address_at(tmp19, TMPQ6, 3, 2, 16); - simd_address_at(tmp20, Rd_VPR128.8H, 3, 2, 16); - * [register]:2 tmp20 = (* [register]:2 tmp18) + (* [register]:2 tmp19); - simd_address_at(tmp18, Rd_VPR128.8H, 4, 2, 16); - simd_address_at(tmp19, TMPQ6, 4, 2, 16); - simd_address_at(tmp20, Rd_VPR128.8H, 4, 2, 16); - * [register]:2 tmp20 = (* [register]:2 tmp18) + (* [register]:2 tmp19); - simd_address_at(tmp18, Rd_VPR128.8H, 5, 2, 16); - simd_address_at(tmp19, TMPQ6, 5, 2, 16); - simd_address_at(tmp20, Rd_VPR128.8H, 5, 2, 16); - * [register]:2 tmp20 = (* [register]:2 tmp18) + (* [register]:2 tmp19); - simd_address_at(tmp18, Rd_VPR128.8H, 6, 2, 16); - simd_address_at(tmp19, TMPQ6, 6, 2, 16); - simd_address_at(tmp20, Rd_VPR128.8H, 6, 2, 16); - * [register]:2 tmp20 = (* [register]:2 tmp18) + (* [register]:2 tmp19); - simd_address_at(tmp18, Rd_VPR128.8H, 7, 2, 16); - simd_address_at(tmp19, TMPQ6, 7, 2, 16); - simd_address_at(tmp20, Rd_VPR128.8H, 7, 2, 16); - * [register]:2 tmp20 = (* [register]:2 tmp18) + (* [register]:2 tmp19); + Rd_VPR128.8H[0,16] = Rd_VPR128.8H[0,16] + TMPQ6[0,16]; + Rd_VPR128.8H[16,16] = Rd_VPR128.8H[16,16] + TMPQ6[16,16]; + Rd_VPR128.8H[32,16] = Rd_VPR128.8H[32,16] + TMPQ6[32,16]; + Rd_VPR128.8H[48,16] = Rd_VPR128.8H[48,16] + TMPQ6[48,16]; + Rd_VPR128.8H[64,16] = Rd_VPR128.8H[64,16] + TMPQ6[64,16]; + Rd_VPR128.8H[80,16] = Rd_VPR128.8H[80,16] + TMPQ6[80,16]; + Rd_VPR128.8H[96,16] = Rd_VPR128.8H[96,16] + TMPQ6[96,16]; + Rd_VPR128.8H[112,16] = Rd_VPR128.8H[112,16] + TMPQ6[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Rn_VPR128.16B, 1:1); - local tmp2:16 = SIMD_INT_SEXT(tmp1, 1:1); - local tmp3:8 = SIMD_PIECE(Rm_VPR128.16B, 1:1); - local tmp4:16 = SIMD_INT_SEXT(tmp3, 1:1); - local tmp5:16 = SIMD_INT_SUB(tmp2, tmp4, 2:1); - local tmp6:16 = SIMD_INT_ABS(tmp5, 2:1); - local tmpd:16 = SIMD_INT_ADD(Rd_VPR128.8H, tmp6, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.8H -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.8H = NEON_sabal2(Rd_VPR128.8H, Rn_VPR128.16B, Rm_VPR128.16B, 1:1); -@endif } # C7.2.226 SABD page C7-1907 line 106916 MATCH x0e207400/mask=xbf20fc00 @@ -22573,9 +12966,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16 :sabd Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0xe & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.16B = NEON_sabd(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); -@endif } # C7.2.226 SABD page C7-1907 line 106916 MATCH x0e207400/mask=xbf20fc00 @@ -22589,88 +12980,25 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16 :sabd Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0xe & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_primitive) # simd infix TMPD1 = Rn_VPR64.2S - Rm_VPR64.2S on lane size 4 - local tmp2:4 = 0; - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp2, Rn_VPR64.2S, 0, 4, 8); - simd_address_at(tmp3, Rm_VPR64.2S, 0, 4, 8); - simd_address_at(tmp4, TMPD1, 0, 4, 8); - * [register]:4 tmp4 = (* [register]:4 tmp2) - (* [register]:4 tmp3); - simd_address_at(tmp2, Rn_VPR64.2S, 1, 4, 8); - simd_address_at(tmp3, Rm_VPR64.2S, 1, 4, 8); - simd_address_at(tmp4, TMPD1, 1, 4, 8); - * [register]:4 tmp4 = (* [register]:4 tmp2) - (* [register]:4 tmp3); + TMPD1[0,32] = Rn_VPR64.2S[0,32] - Rm_VPR64.2S[0,32]; + TMPD1[32,32] = Rn_VPR64.2S[32,32] - Rm_VPR64.2S[32,32]; # simd infix TMPD2 = Rm_VPR64.2S - Rn_VPR64.2S on lane size 4 - local tmp6:4 = 0; - local tmp7:4 = 0; - local tmp8:4 = 0; - simd_address_at(tmp6, Rm_VPR64.2S, 0, 4, 8); - simd_address_at(tmp7, Rn_VPR64.2S, 0, 4, 8); - simd_address_at(tmp8, TMPD2, 0, 4, 8); - * [register]:4 tmp8 = (* [register]:4 tmp6) - (* [register]:4 tmp7); - simd_address_at(tmp6, Rm_VPR64.2S, 1, 4, 8); - simd_address_at(tmp7, Rn_VPR64.2S, 1, 4, 8); - simd_address_at(tmp8, TMPD2, 1, 4, 8); - * [register]:4 tmp8 = (* [register]:4 tmp6) - (* [register]:4 tmp7); + TMPD2[0,32] = Rm_VPR64.2S[0,32] - Rn_VPR64.2S[0,32]; + TMPD2[32,32] = Rm_VPR64.2S[32,32] - Rn_VPR64.2S[32,32]; # simd infix TMPD2 = TMPD2 * 2:4 on lane size 4 - local tmp9:4 = 0; - local tmp10:4 = 0; - simd_address_at(tmp9, TMPD2, 0, 4, 8); - simd_address_at(tmp10, TMPD2, 0, 4, 8); - * [register]:4 tmp10 = (* [register]:4 tmp9) * 2:4; - simd_address_at(tmp9, TMPD2, 1, 4, 8); - simd_address_at(tmp10, TMPD2, 1, 4, 8); - * [register]:4 tmp10 = (* [register]:4 tmp9) * 2:4; + TMPD2[0,32] = TMPD2[0,32] * 2:4; + TMPD2[32,32] = TMPD2[32,32] * 2:4; # simd infix TMPD3 = Rn_VPR64.2S s< Rm_VPR64.2S on lane size 4 - local tmp12:4 = 0; - local tmp13:4 = 0; - local tmp14:4 = 0; - simd_address_at(tmp12, Rn_VPR64.2S, 0, 4, 8); - simd_address_at(tmp13, Rm_VPR64.2S, 0, 4, 8); - simd_address_at(tmp14, TMPD3, 0, 4, 8); - * [register]:4 tmp14 = zext((* [register]:4 tmp12) s< (* [register]:4 tmp13)); - simd_address_at(tmp12, Rn_VPR64.2S, 1, 4, 8); - simd_address_at(tmp13, Rm_VPR64.2S, 1, 4, 8); - simd_address_at(tmp14, TMPD3, 1, 4, 8); - * [register]:4 tmp14 = zext((* [register]:4 tmp12) s< (* [register]:4 tmp13)); + TMPD3[0,32] = zext(Rn_VPR64.2S[0,32] s< Rm_VPR64.2S[0,32]); + TMPD3[32,32] = zext(Rn_VPR64.2S[32,32] s< Rm_VPR64.2S[32,32]); # simd infix TMPD2 = TMPD2 * TMPD3 on lane size 4 - local tmp15:4 = 0; - local tmp16:4 = 0; - local tmp17:4 = 0; - simd_address_at(tmp15, TMPD2, 0, 4, 8); - simd_address_at(tmp16, TMPD3, 0, 4, 8); - simd_address_at(tmp17, TMPD2, 0, 4, 8); - * [register]:4 tmp17 = (* [register]:4 tmp15) * (* [register]:4 tmp16); - simd_address_at(tmp15, TMPD2, 1, 4, 8); - simd_address_at(tmp16, TMPD3, 1, 4, 8); - simd_address_at(tmp17, TMPD2, 1, 4, 8); - * [register]:4 tmp17 = (* [register]:4 tmp15) * (* [register]:4 tmp16); + TMPD2[0,32] = TMPD2[0,32] * TMPD3[0,32]; + TMPD2[32,32] = TMPD2[32,32] * TMPD3[32,32]; # simd infix Rd_VPR64.2S = TMPD1 + TMPD2 on lane size 4 - local tmp18:4 = 0; - local tmp19:4 = 0; - local tmp20:4 = 0; - simd_address_at(tmp18, TMPD1, 0, 4, 8); - simd_address_at(tmp19, TMPD2, 0, 4, 8); - simd_address_at(tmp20, Rd_VPR64.2S, 0, 4, 8); - * [register]:4 tmp20 = (* [register]:4 tmp18) + (* [register]:4 tmp19); - simd_address_at(tmp18, TMPD1, 1, 4, 8); - simd_address_at(tmp19, TMPD2, 1, 4, 8); - simd_address_at(tmp20, Rd_VPR64.2S, 1, 4, 8); - * [register]:4 tmp20 = (* [register]:4 tmp18) + (* [register]:4 tmp19); + Rd_VPR64.2S[0,32] = TMPD1[0,32] + TMPD2[0,32]; + Rd_VPR64.2S[32,32] = TMPD1[32,32] + TMPD2[32,32]; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_INT_SUB(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); - local tmp2:8 = SIMD_INT_SUB(Rm_VPR64.2S, Rn_VPR64.2S, 4:1); - tmp2 = SIMD_INT_MULT(tmp2, 2:4); - local tmp3:8 = SIMD_INT_SLESS(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); - tmp2 = SIMD_INT_MULT(tmp2, tmp3, 4:1); - local tmpd:8 = SIMD_INT_ADD(tmp1, tmp2, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.2S -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.2S = NEON_sabd(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); -@endif } # C7.2.226 SABD page C7-1907 line 106916 MATCH x0e207400/mask=xbf20fc00 @@ -22681,9 +13009,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S :sabd Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0xe & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.4H = NEON_sabd(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); -@endif } # C7.2.226 SABD page C7-1907 line 106916 MATCH x0e207400/mask=xbf20fc00 @@ -22694,9 +13020,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H :sabd Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0xe & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_sabd(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); -@endif } # C7.2.226 SABD page C7-1907 line 106916 MATCH x0e207400/mask=xbf20fc00 @@ -22707,9 +13031,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S :sabd Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0xe & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.8B = NEON_sabd(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); -@endif } # C7.2.226 SABD page C7-1907 line 106916 MATCH x0e207400/mask=xbf20fc00 @@ -22720,9 +13042,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B :sabd Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0xe & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.8H = NEON_sabd(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); -@endif } # C7.2.227 SABDL, SABDL2 page C7-1909 line 107016 MATCH x0e207000/mask=xbf20fc00 @@ -22734,56 +13054,19 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H :sabdl Rd_VPR128.2D, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1215=0x7 & b_1011=0 & Rn_VPR64.2S & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_primitive) # simd resize TMPQ1 = sext(Rn_VPR64.2S) (lane size 4 to 8) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR64.2S, 0, 4, 8); - simd_address_at(tmp3, TMPQ1, 0, 8, 16); - * [register]:8 tmp3 = sext(* [register]:4 tmp2); - simd_address_at(tmp2, Rn_VPR64.2S, 1, 4, 8); - simd_address_at(tmp3, TMPQ1, 1, 8, 16); - * [register]:8 tmp3 = sext(* [register]:4 tmp2); + TMPQ1[0,64] = sext(Rn_VPR64.2S[0,32]); + TMPQ1[64,64] = sext(Rn_VPR64.2S[32,32]); # simd resize TMPQ2 = sext(Rm_VPR64.2S) (lane size 4 to 8) - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, Rm_VPR64.2S, 0, 4, 8); - simd_address_at(tmp6, TMPQ2, 0, 8, 16); - * [register]:8 tmp6 = sext(* [register]:4 tmp5); - simd_address_at(tmp5, Rm_VPR64.2S, 1, 4, 8); - simd_address_at(tmp6, TMPQ2, 1, 8, 16); - * [register]:8 tmp6 = sext(* [register]:4 tmp5); + TMPQ2[0,64] = sext(Rm_VPR64.2S[0,32]); + TMPQ2[64,64] = sext(Rm_VPR64.2S[32,32]); # simd infix TMPQ3 = TMPQ1 - TMPQ2 on lane size 8 - local tmp8:4 = 0; - local tmp9:4 = 0; - local tmp10:4 = 0; - simd_address_at(tmp8, TMPQ1, 0, 8, 16); - simd_address_at(tmp9, TMPQ2, 0, 8, 16); - simd_address_at(tmp10, TMPQ3, 0, 8, 16); - * [register]:8 tmp10 = (* [register]:8 tmp8) - (* [register]:8 tmp9); - simd_address_at(tmp8, TMPQ1, 1, 8, 16); - simd_address_at(tmp9, TMPQ2, 1, 8, 16); - simd_address_at(tmp10, TMPQ3, 1, 8, 16); - * [register]:8 tmp10 = (* [register]:8 tmp8) - (* [register]:8 tmp9); + TMPQ3[0,64] = TMPQ1[0,64] - TMPQ2[0,64]; + TMPQ3[64,64] = TMPQ1[64,64] - TMPQ2[64,64]; # simd unary Rd_VPR128.2D = MP_INT_ABS(TMPQ3) on lane size 8 - local tmp11:4 = 0; - local tmp12:4 = 0; - simd_address_at(tmp11, TMPQ3, 0, 8, 16); - simd_address_at(tmp12, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp12 = MP_INT_ABS(* [register]:8 tmp11); - simd_address_at(tmp11, TMPQ3, 1, 8, 16); - simd_address_at(tmp12, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp12 = MP_INT_ABS(* [register]:8 tmp11); + Rd_VPR128.2D[0,64] = MP_INT_ABS(TMPQ3[0,64]); + Rd_VPR128.2D[64,64] = MP_INT_ABS(TMPQ3[64,64]); zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_SEXT(Rn_VPR64.2S, 4:1); - local tmp2:16 = SIMD_INT_SEXT(Rm_VPR64.2S, 4:1); - local tmp3:16 = SIMD_INT_SUB(tmp1, tmp2, 8:1); - local tmpd:16 = SIMD_INT_ABS(tmp3, 8:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.2D = NEON_sabdl(Rd_VPR128.2D, Rn_VPR64.2S, Rm_VPR64.2S, 4:1); -@endif } # C7.2.227 SABDL, SABDL2 page C7-1909 line 107016 MATCH x0e207000/mask=xbf20fc00 @@ -22795,82 +13078,27 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S :sabdl Rd_VPR128.4S, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1215=0x7 & b_1011=0 & Rn_VPR64.4H & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) # simd resize TMPQ1 = sext(Rn_VPR64.4H) (lane size 2 to 4) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR64.4H, 0, 2, 8); - simd_address_at(tmp3, TMPQ1, 0, 4, 16); - * [register]:4 tmp3 = sext(* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR64.4H, 1, 2, 8); - simd_address_at(tmp3, TMPQ1, 1, 4, 16); - * [register]:4 tmp3 = sext(* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR64.4H, 2, 2, 8); - simd_address_at(tmp3, TMPQ1, 2, 4, 16); - * [register]:4 tmp3 = sext(* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR64.4H, 3, 2, 8); - simd_address_at(tmp3, TMPQ1, 3, 4, 16); - * [register]:4 tmp3 = sext(* [register]:2 tmp2); + TMPQ1[0,32] = sext(Rn_VPR64.4H[0,16]); + TMPQ1[32,32] = sext(Rn_VPR64.4H[16,16]); + TMPQ1[64,32] = sext(Rn_VPR64.4H[32,16]); + TMPQ1[96,32] = sext(Rn_VPR64.4H[48,16]); # simd resize TMPQ2 = sext(Rm_VPR64.4H) (lane size 2 to 4) - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, Rm_VPR64.4H, 0, 2, 8); - simd_address_at(tmp6, TMPQ2, 0, 4, 16); - * [register]:4 tmp6 = sext(* [register]:2 tmp5); - simd_address_at(tmp5, Rm_VPR64.4H, 1, 2, 8); - simd_address_at(tmp6, TMPQ2, 1, 4, 16); - * [register]:4 tmp6 = sext(* [register]:2 tmp5); - simd_address_at(tmp5, Rm_VPR64.4H, 2, 2, 8); - simd_address_at(tmp6, TMPQ2, 2, 4, 16); - * [register]:4 tmp6 = sext(* [register]:2 tmp5); - simd_address_at(tmp5, Rm_VPR64.4H, 3, 2, 8); - simd_address_at(tmp6, TMPQ2, 3, 4, 16); - * [register]:4 tmp6 = sext(* [register]:2 tmp5); + TMPQ2[0,32] = sext(Rm_VPR64.4H[0,16]); + TMPQ2[32,32] = sext(Rm_VPR64.4H[16,16]); + TMPQ2[64,32] = sext(Rm_VPR64.4H[32,16]); + TMPQ2[96,32] = sext(Rm_VPR64.4H[48,16]); # simd infix TMPQ3 = TMPQ1 - TMPQ2 on lane size 4 - local tmp8:4 = 0; - local tmp9:4 = 0; - local tmp10:4 = 0; - simd_address_at(tmp8, TMPQ1, 0, 4, 16); - simd_address_at(tmp9, TMPQ2, 0, 4, 16); - simd_address_at(tmp10, TMPQ3, 0, 4, 16); - * [register]:4 tmp10 = (* [register]:4 tmp8) - (* [register]:4 tmp9); - simd_address_at(tmp8, TMPQ1, 1, 4, 16); - simd_address_at(tmp9, TMPQ2, 1, 4, 16); - simd_address_at(tmp10, TMPQ3, 1, 4, 16); - * [register]:4 tmp10 = (* [register]:4 tmp8) - (* [register]:4 tmp9); - simd_address_at(tmp8, TMPQ1, 2, 4, 16); - simd_address_at(tmp9, TMPQ2, 2, 4, 16); - simd_address_at(tmp10, TMPQ3, 2, 4, 16); - * [register]:4 tmp10 = (* [register]:4 tmp8) - (* [register]:4 tmp9); - simd_address_at(tmp8, TMPQ1, 3, 4, 16); - simd_address_at(tmp9, TMPQ2, 3, 4, 16); - simd_address_at(tmp10, TMPQ3, 3, 4, 16); - * [register]:4 tmp10 = (* [register]:4 tmp8) - (* [register]:4 tmp9); + TMPQ3[0,32] = TMPQ1[0,32] - TMPQ2[0,32]; + TMPQ3[32,32] = TMPQ1[32,32] - TMPQ2[32,32]; + TMPQ3[64,32] = TMPQ1[64,32] - TMPQ2[64,32]; + TMPQ3[96,32] = TMPQ1[96,32] - TMPQ2[96,32]; # simd unary Rd_VPR128.4S = MP_INT_ABS(TMPQ3) on lane size 4 - local tmp11:4 = 0; - local tmp12:4 = 0; - simd_address_at(tmp11, TMPQ3, 0, 4, 16); - simd_address_at(tmp12, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp12 = MP_INT_ABS(* [register]:4 tmp11); - simd_address_at(tmp11, TMPQ3, 1, 4, 16); - simd_address_at(tmp12, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp12 = MP_INT_ABS(* [register]:4 tmp11); - simd_address_at(tmp11, TMPQ3, 2, 4, 16); - simd_address_at(tmp12, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp12 = MP_INT_ABS(* [register]:4 tmp11); - simd_address_at(tmp11, TMPQ3, 3, 4, 16); - simd_address_at(tmp12, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp12 = MP_INT_ABS(* [register]:4 tmp11); + Rd_VPR128.4S[0,32] = MP_INT_ABS(TMPQ3[0,32]); + Rd_VPR128.4S[32,32] = MP_INT_ABS(TMPQ3[32,32]); + Rd_VPR128.4S[64,32] = MP_INT_ABS(TMPQ3[64,32]); + Rd_VPR128.4S[96,32] = MP_INT_ABS(TMPQ3[96,32]); zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_SEXT(Rn_VPR64.4H, 2:1); - local tmp2:16 = SIMD_INT_SEXT(Rm_VPR64.4H, 2:1); - local tmp3:16 = SIMD_INT_SUB(tmp1, tmp2, 4:1); - local tmpd:16 = SIMD_INT_ABS(tmp3, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_sabdl(Rd_VPR128.4S, Rn_VPR64.4H, Rm_VPR64.4H, 2:1); -@endif } # C7.2.227 SABDL, SABDL2 page C7-1909 line 107016 MATCH x0e207000/mask=xbf20fc00 @@ -22882,134 +13110,43 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H :sabdl Rd_VPR128.8H, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1215=0x7 & b_1011=0 & Rn_VPR64.8B & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_primitive) # simd resize TMPQ1 = sext(Rn_VPR64.8B) (lane size 1 to 2) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR64.8B, 0, 1, 8); - simd_address_at(tmp3, TMPQ1, 0, 2, 16); - * [register]:2 tmp3 = sext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 1, 1, 8); - simd_address_at(tmp3, TMPQ1, 1, 2, 16); - * [register]:2 tmp3 = sext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 2, 1, 8); - simd_address_at(tmp3, TMPQ1, 2, 2, 16); - * [register]:2 tmp3 = sext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 3, 1, 8); - simd_address_at(tmp3, TMPQ1, 3, 2, 16); - * [register]:2 tmp3 = sext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 4, 1, 8); - simd_address_at(tmp3, TMPQ1, 4, 2, 16); - * [register]:2 tmp3 = sext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 5, 1, 8); - simd_address_at(tmp3, TMPQ1, 5, 2, 16); - * [register]:2 tmp3 = sext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 6, 1, 8); - simd_address_at(tmp3, TMPQ1, 6, 2, 16); - * [register]:2 tmp3 = sext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 7, 1, 8); - simd_address_at(tmp3, TMPQ1, 7, 2, 16); - * [register]:2 tmp3 = sext(* [register]:1 tmp2); + TMPQ1[0,16] = sext(Rn_VPR64.8B[0,8]); + TMPQ1[16,16] = sext(Rn_VPR64.8B[8,8]); + TMPQ1[32,16] = sext(Rn_VPR64.8B[16,8]); + TMPQ1[48,16] = sext(Rn_VPR64.8B[24,8]); + TMPQ1[64,16] = sext(Rn_VPR64.8B[32,8]); + TMPQ1[80,16] = sext(Rn_VPR64.8B[40,8]); + TMPQ1[96,16] = sext(Rn_VPR64.8B[48,8]); + TMPQ1[112,16] = sext(Rn_VPR64.8B[56,8]); # simd resize TMPQ2 = sext(Rm_VPR64.8B) (lane size 1 to 2) - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, Rm_VPR64.8B, 0, 1, 8); - simd_address_at(tmp6, TMPQ2, 0, 2, 16); - * [register]:2 tmp6 = sext(* [register]:1 tmp5); - simd_address_at(tmp5, Rm_VPR64.8B, 1, 1, 8); - simd_address_at(tmp6, TMPQ2, 1, 2, 16); - * [register]:2 tmp6 = sext(* [register]:1 tmp5); - simd_address_at(tmp5, Rm_VPR64.8B, 2, 1, 8); - simd_address_at(tmp6, TMPQ2, 2, 2, 16); - * [register]:2 tmp6 = sext(* [register]:1 tmp5); - simd_address_at(tmp5, Rm_VPR64.8B, 3, 1, 8); - simd_address_at(tmp6, TMPQ2, 3, 2, 16); - * [register]:2 tmp6 = sext(* [register]:1 tmp5); - simd_address_at(tmp5, Rm_VPR64.8B, 4, 1, 8); - simd_address_at(tmp6, TMPQ2, 4, 2, 16); - * [register]:2 tmp6 = sext(* [register]:1 tmp5); - simd_address_at(tmp5, Rm_VPR64.8B, 5, 1, 8); - simd_address_at(tmp6, TMPQ2, 5, 2, 16); - * [register]:2 tmp6 = sext(* [register]:1 tmp5); - simd_address_at(tmp5, Rm_VPR64.8B, 6, 1, 8); - simd_address_at(tmp6, TMPQ2, 6, 2, 16); - * [register]:2 tmp6 = sext(* [register]:1 tmp5); - simd_address_at(tmp5, Rm_VPR64.8B, 7, 1, 8); - simd_address_at(tmp6, TMPQ2, 7, 2, 16); - * [register]:2 tmp6 = sext(* [register]:1 tmp5); + TMPQ2[0,16] = sext(Rm_VPR64.8B[0,8]); + TMPQ2[16,16] = sext(Rm_VPR64.8B[8,8]); + TMPQ2[32,16] = sext(Rm_VPR64.8B[16,8]); + TMPQ2[48,16] = sext(Rm_VPR64.8B[24,8]); + TMPQ2[64,16] = sext(Rm_VPR64.8B[32,8]); + TMPQ2[80,16] = sext(Rm_VPR64.8B[40,8]); + TMPQ2[96,16] = sext(Rm_VPR64.8B[48,8]); + TMPQ2[112,16] = sext(Rm_VPR64.8B[56,8]); # simd infix TMPQ3 = TMPQ1 - TMPQ2 on lane size 2 - local tmp8:4 = 0; - local tmp9:4 = 0; - local tmp10:4 = 0; - simd_address_at(tmp8, TMPQ1, 0, 2, 16); - simd_address_at(tmp9, TMPQ2, 0, 2, 16); - simd_address_at(tmp10, TMPQ3, 0, 2, 16); - * [register]:2 tmp10 = (* [register]:2 tmp8) - (* [register]:2 tmp9); - simd_address_at(tmp8, TMPQ1, 1, 2, 16); - simd_address_at(tmp9, TMPQ2, 1, 2, 16); - simd_address_at(tmp10, TMPQ3, 1, 2, 16); - * [register]:2 tmp10 = (* [register]:2 tmp8) - (* [register]:2 tmp9); - simd_address_at(tmp8, TMPQ1, 2, 2, 16); - simd_address_at(tmp9, TMPQ2, 2, 2, 16); - simd_address_at(tmp10, TMPQ3, 2, 2, 16); - * [register]:2 tmp10 = (* [register]:2 tmp8) - (* [register]:2 tmp9); - simd_address_at(tmp8, TMPQ1, 3, 2, 16); - simd_address_at(tmp9, TMPQ2, 3, 2, 16); - simd_address_at(tmp10, TMPQ3, 3, 2, 16); - * [register]:2 tmp10 = (* [register]:2 tmp8) - (* [register]:2 tmp9); - simd_address_at(tmp8, TMPQ1, 4, 2, 16); - simd_address_at(tmp9, TMPQ2, 4, 2, 16); - simd_address_at(tmp10, TMPQ3, 4, 2, 16); - * [register]:2 tmp10 = (* [register]:2 tmp8) - (* [register]:2 tmp9); - simd_address_at(tmp8, TMPQ1, 5, 2, 16); - simd_address_at(tmp9, TMPQ2, 5, 2, 16); - simd_address_at(tmp10, TMPQ3, 5, 2, 16); - * [register]:2 tmp10 = (* [register]:2 tmp8) - (* [register]:2 tmp9); - simd_address_at(tmp8, TMPQ1, 6, 2, 16); - simd_address_at(tmp9, TMPQ2, 6, 2, 16); - simd_address_at(tmp10, TMPQ3, 6, 2, 16); - * [register]:2 tmp10 = (* [register]:2 tmp8) - (* [register]:2 tmp9); - simd_address_at(tmp8, TMPQ1, 7, 2, 16); - simd_address_at(tmp9, TMPQ2, 7, 2, 16); - simd_address_at(tmp10, TMPQ3, 7, 2, 16); - * [register]:2 tmp10 = (* [register]:2 tmp8) - (* [register]:2 tmp9); + TMPQ3[0,16] = TMPQ1[0,16] - TMPQ2[0,16]; + TMPQ3[16,16] = TMPQ1[16,16] - TMPQ2[16,16]; + TMPQ3[32,16] = TMPQ1[32,16] - TMPQ2[32,16]; + TMPQ3[48,16] = TMPQ1[48,16] - TMPQ2[48,16]; + TMPQ3[64,16] = TMPQ1[64,16] - TMPQ2[64,16]; + TMPQ3[80,16] = TMPQ1[80,16] - TMPQ2[80,16]; + TMPQ3[96,16] = TMPQ1[96,16] - TMPQ2[96,16]; + TMPQ3[112,16] = TMPQ1[112,16] - TMPQ2[112,16]; # simd unary Rd_VPR128.8H = MP_INT_ABS(TMPQ3) on lane size 2 - local tmp11:4 = 0; - local tmp12:4 = 0; - simd_address_at(tmp11, TMPQ3, 0, 2, 16); - simd_address_at(tmp12, Rd_VPR128.8H, 0, 2, 16); - * [register]:2 tmp12 = MP_INT_ABS(* [register]:2 tmp11); - simd_address_at(tmp11, TMPQ3, 1, 2, 16); - simd_address_at(tmp12, Rd_VPR128.8H, 1, 2, 16); - * [register]:2 tmp12 = MP_INT_ABS(* [register]:2 tmp11); - simd_address_at(tmp11, TMPQ3, 2, 2, 16); - simd_address_at(tmp12, Rd_VPR128.8H, 2, 2, 16); - * [register]:2 tmp12 = MP_INT_ABS(* [register]:2 tmp11); - simd_address_at(tmp11, TMPQ3, 3, 2, 16); - simd_address_at(tmp12, Rd_VPR128.8H, 3, 2, 16); - * [register]:2 tmp12 = MP_INT_ABS(* [register]:2 tmp11); - simd_address_at(tmp11, TMPQ3, 4, 2, 16); - simd_address_at(tmp12, Rd_VPR128.8H, 4, 2, 16); - * [register]:2 tmp12 = MP_INT_ABS(* [register]:2 tmp11); - simd_address_at(tmp11, TMPQ3, 5, 2, 16); - simd_address_at(tmp12, Rd_VPR128.8H, 5, 2, 16); - * [register]:2 tmp12 = MP_INT_ABS(* [register]:2 tmp11); - simd_address_at(tmp11, TMPQ3, 6, 2, 16); - simd_address_at(tmp12, Rd_VPR128.8H, 6, 2, 16); - * [register]:2 tmp12 = MP_INT_ABS(* [register]:2 tmp11); - simd_address_at(tmp11, TMPQ3, 7, 2, 16); - simd_address_at(tmp12, Rd_VPR128.8H, 7, 2, 16); - * [register]:2 tmp12 = MP_INT_ABS(* [register]:2 tmp11); + Rd_VPR128.8H[0,16] = MP_INT_ABS(TMPQ3[0,16]); + Rd_VPR128.8H[16,16] = MP_INT_ABS(TMPQ3[16,16]); + Rd_VPR128.8H[32,16] = MP_INT_ABS(TMPQ3[32,16]); + Rd_VPR128.8H[48,16] = MP_INT_ABS(TMPQ3[48,16]); + Rd_VPR128.8H[64,16] = MP_INT_ABS(TMPQ3[64,16]); + Rd_VPR128.8H[80,16] = MP_INT_ABS(TMPQ3[80,16]); + Rd_VPR128.8H[96,16] = MP_INT_ABS(TMPQ3[96,16]); + Rd_VPR128.8H[112,16] = MP_INT_ABS(TMPQ3[112,16]); zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_SEXT(Rn_VPR64.8B, 1:1); - local tmp2:16 = SIMD_INT_SEXT(Rm_VPR64.8B, 1:1); - local tmp3:16 = SIMD_INT_SUB(tmp1, tmp2, 2:1); - local tmpd:16 = SIMD_INT_ABS(tmp3, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.8H -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.8H = NEON_sabdl(Rd_VPR128.8H, Rn_VPR64.8B, Rm_VPR64.8B, 1:1); -@endif } # C7.2.227 SABDL, SABDL2 page C7-1909 line 107016 MATCH x0e207000/mask=xbf20fc00 @@ -23021,64 +13158,21 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B :sabdl2 Rd_VPR128.2D, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1215=0x7 & b_1011=0 & Rn_VPR128.4S & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rn_VPR128.4S, 1, 8, 16); - TMPD1 = * [register]:8 tmp1; + TMPD1 = Rn_VPR128.4S[64,64]; # simd resize TMPQ2 = sext(TMPD1) (lane size 4 to 8) - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, TMPD1, 0, 4, 8); - simd_address_at(tmp5, TMPQ2, 0, 8, 16); - * [register]:8 tmp5 = sext(* [register]:4 tmp4); - simd_address_at(tmp4, TMPD1, 1, 4, 8); - simd_address_at(tmp5, TMPQ2, 1, 8, 16); - * [register]:8 tmp5 = sext(* [register]:4 tmp4); - local tmp6:4 = 0; - simd_address_at(tmp6, Rm_VPR128.4S, 1, 8, 16); - TMPD3 = * [register]:8 tmp6; + TMPQ2[0,64] = sext(TMPD1[0,32]); + TMPQ2[64,64] = sext(TMPD1[32,32]); + TMPD3 = Rm_VPR128.4S[64,64]; # simd resize TMPQ4 = sext(TMPD3) (lane size 4 to 8) - local tmp9:4 = 0; - local tmp10:4 = 0; - simd_address_at(tmp9, TMPD3, 0, 4, 8); - simd_address_at(tmp10, TMPQ4, 0, 8, 16); - * [register]:8 tmp10 = sext(* [register]:4 tmp9); - simd_address_at(tmp9, TMPD3, 1, 4, 8); - simd_address_at(tmp10, TMPQ4, 1, 8, 16); - * [register]:8 tmp10 = sext(* [register]:4 tmp9); + TMPQ4[0,64] = sext(TMPD3[0,32]); + TMPQ4[64,64] = sext(TMPD3[32,32]); # simd infix TMPQ5 = TMPQ2 - TMPQ4 on lane size 8 - local tmp12:4 = 0; - local tmp13:4 = 0; - local tmp14:4 = 0; - simd_address_at(tmp12, TMPQ2, 0, 8, 16); - simd_address_at(tmp13, TMPQ4, 0, 8, 16); - simd_address_at(tmp14, TMPQ5, 0, 8, 16); - * [register]:8 tmp14 = (* [register]:8 tmp12) - (* [register]:8 tmp13); - simd_address_at(tmp12, TMPQ2, 1, 8, 16); - simd_address_at(tmp13, TMPQ4, 1, 8, 16); - simd_address_at(tmp14, TMPQ5, 1, 8, 16); - * [register]:8 tmp14 = (* [register]:8 tmp12) - (* [register]:8 tmp13); + TMPQ5[0,64] = TMPQ2[0,64] - TMPQ4[0,64]; + TMPQ5[64,64] = TMPQ2[64,64] - TMPQ4[64,64]; # simd unary Rd_VPR128.2D = MP_INT_ABS(TMPQ5) on lane size 8 - local tmp15:4 = 0; - local tmp16:4 = 0; - simd_address_at(tmp15, TMPQ5, 0, 8, 16); - simd_address_at(tmp16, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp16 = MP_INT_ABS(* [register]:8 tmp15); - simd_address_at(tmp15, TMPQ5, 1, 8, 16); - simd_address_at(tmp16, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp16 = MP_INT_ABS(* [register]:8 tmp15); + Rd_VPR128.2D[0,64] = MP_INT_ABS(TMPQ5[0,64]); + Rd_VPR128.2D[64,64] = MP_INT_ABS(TMPQ5[64,64]); zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Rn_VPR128.4S, 1:1); - local tmp2:16 = SIMD_INT_SEXT(tmp1, 4:1); - local tmp3:8 = SIMD_PIECE(Rm_VPR128.4S, 1:1); - local tmp4:16 = SIMD_INT_SEXT(tmp3, 4:1); - local tmp5:16 = SIMD_INT_SUB(tmp2, tmp4, 8:1); - local tmpd:16 = SIMD_INT_ABS(tmp5, 8:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.2D = NEON_sabdl2(Rd_VPR128.2D, Rn_VPR128.4S, Rm_VPR128.4S, 4:1); -@endif } # C7.2.227 SABDL, SABDL2 page C7-1909 line 107016 MATCH x0e207000/mask=xbf20fc00 @@ -23090,90 +13184,29 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S :sabdl2 Rd_VPR128.4S, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1215=0x7 & b_1011=0 & Rn_VPR128.8H & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rn_VPR128.8H, 1, 8, 16); - TMPD1 = * [register]:8 tmp1; + TMPD1 = Rn_VPR128.8H[64,64]; # simd resize TMPQ2 = sext(TMPD1) (lane size 2 to 4) - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, TMPD1, 0, 2, 8); - simd_address_at(tmp5, TMPQ2, 0, 4, 16); - * [register]:4 tmp5 = sext(* [register]:2 tmp4); - simd_address_at(tmp4, TMPD1, 1, 2, 8); - simd_address_at(tmp5, TMPQ2, 1, 4, 16); - * [register]:4 tmp5 = sext(* [register]:2 tmp4); - simd_address_at(tmp4, TMPD1, 2, 2, 8); - simd_address_at(tmp5, TMPQ2, 2, 4, 16); - * [register]:4 tmp5 = sext(* [register]:2 tmp4); - simd_address_at(tmp4, TMPD1, 3, 2, 8); - simd_address_at(tmp5, TMPQ2, 3, 4, 16); - * [register]:4 tmp5 = sext(* [register]:2 tmp4); - local tmp6:4 = 0; - simd_address_at(tmp6, Rm_VPR128.8H, 1, 8, 16); - TMPD3 = * [register]:8 tmp6; + TMPQ2[0,32] = sext(TMPD1[0,16]); + TMPQ2[32,32] = sext(TMPD1[16,16]); + TMPQ2[64,32] = sext(TMPD1[32,16]); + TMPQ2[96,32] = sext(TMPD1[48,16]); + TMPD3 = Rm_VPR128.8H[64,64]; # simd resize TMPQ4 = sext(TMPD3) (lane size 2 to 4) - local tmp9:4 = 0; - local tmp10:4 = 0; - simd_address_at(tmp9, TMPD3, 0, 2, 8); - simd_address_at(tmp10, TMPQ4, 0, 4, 16); - * [register]:4 tmp10 = sext(* [register]:2 tmp9); - simd_address_at(tmp9, TMPD3, 1, 2, 8); - simd_address_at(tmp10, TMPQ4, 1, 4, 16); - * [register]:4 tmp10 = sext(* [register]:2 tmp9); - simd_address_at(tmp9, TMPD3, 2, 2, 8); - simd_address_at(tmp10, TMPQ4, 2, 4, 16); - * [register]:4 tmp10 = sext(* [register]:2 tmp9); - simd_address_at(tmp9, TMPD3, 3, 2, 8); - simd_address_at(tmp10, TMPQ4, 3, 4, 16); - * [register]:4 tmp10 = sext(* [register]:2 tmp9); + TMPQ4[0,32] = sext(TMPD3[0,16]); + TMPQ4[32,32] = sext(TMPD3[16,16]); + TMPQ4[64,32] = sext(TMPD3[32,16]); + TMPQ4[96,32] = sext(TMPD3[48,16]); # simd infix TMPQ5 = TMPQ2 - TMPQ4 on lane size 4 - local tmp12:4 = 0; - local tmp13:4 = 0; - local tmp14:4 = 0; - simd_address_at(tmp12, TMPQ2, 0, 4, 16); - simd_address_at(tmp13, TMPQ4, 0, 4, 16); - simd_address_at(tmp14, TMPQ5, 0, 4, 16); - * [register]:4 tmp14 = (* [register]:4 tmp12) - (* [register]:4 tmp13); - simd_address_at(tmp12, TMPQ2, 1, 4, 16); - simd_address_at(tmp13, TMPQ4, 1, 4, 16); - simd_address_at(tmp14, TMPQ5, 1, 4, 16); - * [register]:4 tmp14 = (* [register]:4 tmp12) - (* [register]:4 tmp13); - simd_address_at(tmp12, TMPQ2, 2, 4, 16); - simd_address_at(tmp13, TMPQ4, 2, 4, 16); - simd_address_at(tmp14, TMPQ5, 2, 4, 16); - * [register]:4 tmp14 = (* [register]:4 tmp12) - (* [register]:4 tmp13); - simd_address_at(tmp12, TMPQ2, 3, 4, 16); - simd_address_at(tmp13, TMPQ4, 3, 4, 16); - simd_address_at(tmp14, TMPQ5, 3, 4, 16); - * [register]:4 tmp14 = (* [register]:4 tmp12) - (* [register]:4 tmp13); + TMPQ5[0,32] = TMPQ2[0,32] - TMPQ4[0,32]; + TMPQ5[32,32] = TMPQ2[32,32] - TMPQ4[32,32]; + TMPQ5[64,32] = TMPQ2[64,32] - TMPQ4[64,32]; + TMPQ5[96,32] = TMPQ2[96,32] - TMPQ4[96,32]; # simd unary Rd_VPR128.4S = MP_INT_ABS(TMPQ5) on lane size 4 - local tmp15:4 = 0; - local tmp16:4 = 0; - simd_address_at(tmp15, TMPQ5, 0, 4, 16); - simd_address_at(tmp16, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp16 = MP_INT_ABS(* [register]:4 tmp15); - simd_address_at(tmp15, TMPQ5, 1, 4, 16); - simd_address_at(tmp16, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp16 = MP_INT_ABS(* [register]:4 tmp15); - simd_address_at(tmp15, TMPQ5, 2, 4, 16); - simd_address_at(tmp16, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp16 = MP_INT_ABS(* [register]:4 tmp15); - simd_address_at(tmp15, TMPQ5, 3, 4, 16); - simd_address_at(tmp16, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp16 = MP_INT_ABS(* [register]:4 tmp15); + Rd_VPR128.4S[0,32] = MP_INT_ABS(TMPQ5[0,32]); + Rd_VPR128.4S[32,32] = MP_INT_ABS(TMPQ5[32,32]); + Rd_VPR128.4S[64,32] = MP_INT_ABS(TMPQ5[64,32]); + Rd_VPR128.4S[96,32] = MP_INT_ABS(TMPQ5[96,32]); zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Rn_VPR128.8H, 1:1); - local tmp2:16 = SIMD_INT_SEXT(tmp1, 2:1); - local tmp3:8 = SIMD_PIECE(Rm_VPR128.8H, 1:1); - local tmp4:16 = SIMD_INT_SEXT(tmp3, 2:1); - local tmp5:16 = SIMD_INT_SUB(tmp2, tmp4, 4:1); - local tmpd:16 = SIMD_INT_ABS(tmp5, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_sabdl2(Rd_VPR128.4S, Rn_VPR128.8H, Rm_VPR128.8H, 2:1); -@endif } # C7.2.227 SABDL, SABDL2 page C7-1909 line 107016 MATCH x0e207000/mask=xbf20fc00 @@ -23185,142 +13218,45 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H :sabdl2 Rd_VPR128.8H, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1215=0x7 & b_1011=0 & Rn_VPR128.16B & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rn_VPR128.16B, 1, 8, 16); - TMPD1 = * [register]:8 tmp1; + TMPD1 = Rn_VPR128.16B[64,64]; # simd resize TMPQ2 = sext(TMPD1) (lane size 1 to 2) - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, TMPD1, 0, 1, 8); - simd_address_at(tmp5, TMPQ2, 0, 2, 16); - * [register]:2 tmp5 = sext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 1, 1, 8); - simd_address_at(tmp5, TMPQ2, 1, 2, 16); - * [register]:2 tmp5 = sext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 2, 1, 8); - simd_address_at(tmp5, TMPQ2, 2, 2, 16); - * [register]:2 tmp5 = sext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 3, 1, 8); - simd_address_at(tmp5, TMPQ2, 3, 2, 16); - * [register]:2 tmp5 = sext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 4, 1, 8); - simd_address_at(tmp5, TMPQ2, 4, 2, 16); - * [register]:2 tmp5 = sext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 5, 1, 8); - simd_address_at(tmp5, TMPQ2, 5, 2, 16); - * [register]:2 tmp5 = sext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 6, 1, 8); - simd_address_at(tmp5, TMPQ2, 6, 2, 16); - * [register]:2 tmp5 = sext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 7, 1, 8); - simd_address_at(tmp5, TMPQ2, 7, 2, 16); - * [register]:2 tmp5 = sext(* [register]:1 tmp4); - local tmp6:4 = 0; - simd_address_at(tmp6, Rm_VPR128.16B, 1, 8, 16); - TMPD3 = * [register]:8 tmp6; + TMPQ2[0,16] = sext(TMPD1[0,8]); + TMPQ2[16,16] = sext(TMPD1[8,8]); + TMPQ2[32,16] = sext(TMPD1[16,8]); + TMPQ2[48,16] = sext(TMPD1[24,8]); + TMPQ2[64,16] = sext(TMPD1[32,8]); + TMPQ2[80,16] = sext(TMPD1[40,8]); + TMPQ2[96,16] = sext(TMPD1[48,8]); + TMPQ2[112,16] = sext(TMPD1[56,8]); + TMPD3 = Rm_VPR128.16B[64,64]; # simd resize TMPQ4 = sext(TMPD3) (lane size 1 to 2) - local tmp9:4 = 0; - local tmp10:4 = 0; - simd_address_at(tmp9, TMPD3, 0, 1, 8); - simd_address_at(tmp10, TMPQ4, 0, 2, 16); - * [register]:2 tmp10 = sext(* [register]:1 tmp9); - simd_address_at(tmp9, TMPD3, 1, 1, 8); - simd_address_at(tmp10, TMPQ4, 1, 2, 16); - * [register]:2 tmp10 = sext(* [register]:1 tmp9); - simd_address_at(tmp9, TMPD3, 2, 1, 8); - simd_address_at(tmp10, TMPQ4, 2, 2, 16); - * [register]:2 tmp10 = sext(* [register]:1 tmp9); - simd_address_at(tmp9, TMPD3, 3, 1, 8); - simd_address_at(tmp10, TMPQ4, 3, 2, 16); - * [register]:2 tmp10 = sext(* [register]:1 tmp9); - simd_address_at(tmp9, TMPD3, 4, 1, 8); - simd_address_at(tmp10, TMPQ4, 4, 2, 16); - * [register]:2 tmp10 = sext(* [register]:1 tmp9); - simd_address_at(tmp9, TMPD3, 5, 1, 8); - simd_address_at(tmp10, TMPQ4, 5, 2, 16); - * [register]:2 tmp10 = sext(* [register]:1 tmp9); - simd_address_at(tmp9, TMPD3, 6, 1, 8); - simd_address_at(tmp10, TMPQ4, 6, 2, 16); - * [register]:2 tmp10 = sext(* [register]:1 tmp9); - simd_address_at(tmp9, TMPD3, 7, 1, 8); - simd_address_at(tmp10, TMPQ4, 7, 2, 16); - * [register]:2 tmp10 = sext(* [register]:1 tmp9); + TMPQ4[0,16] = sext(TMPD3[0,8]); + TMPQ4[16,16] = sext(TMPD3[8,8]); + TMPQ4[32,16] = sext(TMPD3[16,8]); + TMPQ4[48,16] = sext(TMPD3[24,8]); + TMPQ4[64,16] = sext(TMPD3[32,8]); + TMPQ4[80,16] = sext(TMPD3[40,8]); + TMPQ4[96,16] = sext(TMPD3[48,8]); + TMPQ4[112,16] = sext(TMPD3[56,8]); # simd infix TMPQ5 = TMPQ2 - TMPQ4 on lane size 2 - local tmp12:4 = 0; - local tmp13:4 = 0; - local tmp14:4 = 0; - simd_address_at(tmp12, TMPQ2, 0, 2, 16); - simd_address_at(tmp13, TMPQ4, 0, 2, 16); - simd_address_at(tmp14, TMPQ5, 0, 2, 16); - * [register]:2 tmp14 = (* [register]:2 tmp12) - (* [register]:2 tmp13); - simd_address_at(tmp12, TMPQ2, 1, 2, 16); - simd_address_at(tmp13, TMPQ4, 1, 2, 16); - simd_address_at(tmp14, TMPQ5, 1, 2, 16); - * [register]:2 tmp14 = (* [register]:2 tmp12) - (* [register]:2 tmp13); - simd_address_at(tmp12, TMPQ2, 2, 2, 16); - simd_address_at(tmp13, TMPQ4, 2, 2, 16); - simd_address_at(tmp14, TMPQ5, 2, 2, 16); - * [register]:2 tmp14 = (* [register]:2 tmp12) - (* [register]:2 tmp13); - simd_address_at(tmp12, TMPQ2, 3, 2, 16); - simd_address_at(tmp13, TMPQ4, 3, 2, 16); - simd_address_at(tmp14, TMPQ5, 3, 2, 16); - * [register]:2 tmp14 = (* [register]:2 tmp12) - (* [register]:2 tmp13); - simd_address_at(tmp12, TMPQ2, 4, 2, 16); - simd_address_at(tmp13, TMPQ4, 4, 2, 16); - simd_address_at(tmp14, TMPQ5, 4, 2, 16); - * [register]:2 tmp14 = (* [register]:2 tmp12) - (* [register]:2 tmp13); - simd_address_at(tmp12, TMPQ2, 5, 2, 16); - simd_address_at(tmp13, TMPQ4, 5, 2, 16); - simd_address_at(tmp14, TMPQ5, 5, 2, 16); - * [register]:2 tmp14 = (* [register]:2 tmp12) - (* [register]:2 tmp13); - simd_address_at(tmp12, TMPQ2, 6, 2, 16); - simd_address_at(tmp13, TMPQ4, 6, 2, 16); - simd_address_at(tmp14, TMPQ5, 6, 2, 16); - * [register]:2 tmp14 = (* [register]:2 tmp12) - (* [register]:2 tmp13); - simd_address_at(tmp12, TMPQ2, 7, 2, 16); - simd_address_at(tmp13, TMPQ4, 7, 2, 16); - simd_address_at(tmp14, TMPQ5, 7, 2, 16); - * [register]:2 tmp14 = (* [register]:2 tmp12) - (* [register]:2 tmp13); + TMPQ5[0,16] = TMPQ2[0,16] - TMPQ4[0,16]; + TMPQ5[16,16] = TMPQ2[16,16] - TMPQ4[16,16]; + TMPQ5[32,16] = TMPQ2[32,16] - TMPQ4[32,16]; + TMPQ5[48,16] = TMPQ2[48,16] - TMPQ4[48,16]; + TMPQ5[64,16] = TMPQ2[64,16] - TMPQ4[64,16]; + TMPQ5[80,16] = TMPQ2[80,16] - TMPQ4[80,16]; + TMPQ5[96,16] = TMPQ2[96,16] - TMPQ4[96,16]; + TMPQ5[112,16] = TMPQ2[112,16] - TMPQ4[112,16]; # simd unary Rd_VPR128.8H = MP_INT_ABS(TMPQ5) on lane size 2 - local tmp15:4 = 0; - local tmp16:4 = 0; - simd_address_at(tmp15, TMPQ5, 0, 2, 16); - simd_address_at(tmp16, Rd_VPR128.8H, 0, 2, 16); - * [register]:2 tmp16 = MP_INT_ABS(* [register]:2 tmp15); - simd_address_at(tmp15, TMPQ5, 1, 2, 16); - simd_address_at(tmp16, Rd_VPR128.8H, 1, 2, 16); - * [register]:2 tmp16 = MP_INT_ABS(* [register]:2 tmp15); - simd_address_at(tmp15, TMPQ5, 2, 2, 16); - simd_address_at(tmp16, Rd_VPR128.8H, 2, 2, 16); - * [register]:2 tmp16 = MP_INT_ABS(* [register]:2 tmp15); - simd_address_at(tmp15, TMPQ5, 3, 2, 16); - simd_address_at(tmp16, Rd_VPR128.8H, 3, 2, 16); - * [register]:2 tmp16 = MP_INT_ABS(* [register]:2 tmp15); - simd_address_at(tmp15, TMPQ5, 4, 2, 16); - simd_address_at(tmp16, Rd_VPR128.8H, 4, 2, 16); - * [register]:2 tmp16 = MP_INT_ABS(* [register]:2 tmp15); - simd_address_at(tmp15, TMPQ5, 5, 2, 16); - simd_address_at(tmp16, Rd_VPR128.8H, 5, 2, 16); - * [register]:2 tmp16 = MP_INT_ABS(* [register]:2 tmp15); - simd_address_at(tmp15, TMPQ5, 6, 2, 16); - simd_address_at(tmp16, Rd_VPR128.8H, 6, 2, 16); - * [register]:2 tmp16 = MP_INT_ABS(* [register]:2 tmp15); - simd_address_at(tmp15, TMPQ5, 7, 2, 16); - simd_address_at(tmp16, Rd_VPR128.8H, 7, 2, 16); - * [register]:2 tmp16 = MP_INT_ABS(* [register]:2 tmp15); + Rd_VPR128.8H[0,16] = MP_INT_ABS(TMPQ5[0,16]); + Rd_VPR128.8H[16,16] = MP_INT_ABS(TMPQ5[16,16]); + Rd_VPR128.8H[32,16] = MP_INT_ABS(TMPQ5[32,16]); + Rd_VPR128.8H[48,16] = MP_INT_ABS(TMPQ5[48,16]); + Rd_VPR128.8H[64,16] = MP_INT_ABS(TMPQ5[64,16]); + Rd_VPR128.8H[80,16] = MP_INT_ABS(TMPQ5[80,16]); + Rd_VPR128.8H[96,16] = MP_INT_ABS(TMPQ5[96,16]); + Rd_VPR128.8H[112,16] = MP_INT_ABS(TMPQ5[112,16]); zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Rn_VPR128.16B, 1:1); - local tmp2:16 = SIMD_INT_SEXT(tmp1, 1:1); - local tmp3:8 = SIMD_PIECE(Rm_VPR128.16B, 1:1); - local tmp4:16 = SIMD_INT_SEXT(tmp3, 1:1); - local tmp5:16 = SIMD_INT_SUB(tmp2, tmp4, 2:1); - local tmpd:16 = SIMD_INT_ABS(tmp5, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.8H -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.8H = NEON_sabdl2(Rd_VPR128.8H, Rn_VPR128.16B, Rm_VPR128.16B, 1:1); -@endif } # C7.2.228 SADALP page C7-1911 line 107134 MATCH x0e206800/mask=xbf3ffc00 @@ -23333,77 +13269,34 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16 :sadalp Rd_VPR64.4H, Rn_VPR64.8B is b_31=0 & b_30=0 & b_2429=0b001110 & b_2223=0b00 & b_1021=0b100000011010 & Rd_VPR64.4H & Rn_VPR64.8B & Zd { -@if defined(SEMANTIC_primitive) TMPD1 = 0; # sipd infix TMPD1 = +(Rn_VPR64.8B) on pairs lane size (1 to 2) - local tmp2:4 = 0; - local tmp3:4 = 0; - local tmp4:4 = 0; - local tmp5:1 = 0; - local tmp6:1 = 0; - local tmp7:2 = 0; - local tmp8:2 = 0; - simd_address_at(tmp2, Rn_VPR64.8B, 0, 1, 8); - simd_address_at(tmp3, Rn_VPR64.8B, 1, 1, 8); - simd_address_at(tmp4, TMPD1, 0, 2, 8); - tmp5 = * [register]:1 tmp2; - tmp7 = sext(tmp5); - tmp6 = * [register]:1 tmp3; - tmp8 = sext(tmp6); - * [register]:2 tmp4 = tmp7 + tmp8; - simd_address_at(tmp2, Rn_VPR64.8B, 2, 1, 8); - simd_address_at(tmp3, Rn_VPR64.8B, 3, 1, 8); - simd_address_at(tmp4, TMPD1, 1, 2, 8); - tmp5 = * [register]:1 tmp2; - tmp7 = sext(tmp5); - tmp6 = * [register]:1 tmp3; - tmp8 = sext(tmp6); - * [register]:2 tmp4 = tmp7 + tmp8; - simd_address_at(tmp2, Rn_VPR64.8B, 4, 1, 8); - simd_address_at(tmp3, Rn_VPR64.8B, 5, 1, 8); - simd_address_at(tmp4, TMPD1, 2, 2, 8); - tmp5 = * [register]:1 tmp2; - tmp7 = sext(tmp5); - tmp6 = * [register]:1 tmp3; - tmp8 = sext(tmp6); - * [register]:2 tmp4 = tmp7 + tmp8; - simd_address_at(tmp2, Rn_VPR64.8B, 6, 1, 8); - simd_address_at(tmp3, Rn_VPR64.8B, 7, 1, 8); - simd_address_at(tmp4, TMPD1, 3, 2, 8); - tmp5 = * [register]:1 tmp2; - tmp7 = sext(tmp5); - tmp6 = * [register]:1 tmp3; - tmp8 = sext(tmp6); - * [register]:2 tmp4 = tmp7 + tmp8; + tmp2 = Rn_VPR64.8B[0,8]; + tmp4 = sext(tmp2); + tmp3 = Rn_VPR64.8B[8,8]; + tmp5 = sext(tmp3); + TMPD1[0,16] = tmp4 + tmp5; + tmp2 = Rn_VPR64.8B[16,8]; + tmp4 = sext(tmp2); + tmp3 = Rn_VPR64.8B[24,8]; + tmp5 = sext(tmp3); + TMPD1[16,16] = tmp4 + tmp5; + tmp2 = Rn_VPR64.8B[32,8]; + tmp4 = sext(tmp2); + tmp3 = Rn_VPR64.8B[40,8]; + tmp5 = sext(tmp3); + TMPD1[32,16] = tmp4 + tmp5; + tmp2 = Rn_VPR64.8B[48,8]; + tmp4 = sext(tmp2); + tmp3 = Rn_VPR64.8B[56,8]; + tmp5 = sext(tmp3); + TMPD1[48,16] = tmp4 + tmp5; # simd infix Rd_VPR64.4H = Rd_VPR64.4H + TMPD1 on lane size 2 - local tmp9:4 = 0; - local tmp10:4 = 0; - local tmp11:4 = 0; - simd_address_at(tmp9, Rd_VPR64.4H, 0, 2, 8); - simd_address_at(tmp10, TMPD1, 0, 2, 8); - simd_address_at(tmp11, Rd_VPR64.4H, 0, 2, 8); - * [register]:2 tmp11 = (* [register]:2 tmp9) + (* [register]:2 tmp10); - simd_address_at(tmp9, Rd_VPR64.4H, 1, 2, 8); - simd_address_at(tmp10, TMPD1, 1, 2, 8); - simd_address_at(tmp11, Rd_VPR64.4H, 1, 2, 8); - * [register]:2 tmp11 = (* [register]:2 tmp9) + (* [register]:2 tmp10); - simd_address_at(tmp9, Rd_VPR64.4H, 2, 2, 8); - simd_address_at(tmp10, TMPD1, 2, 2, 8); - simd_address_at(tmp11, Rd_VPR64.4H, 2, 2, 8); - * [register]:2 tmp11 = (* [register]:2 tmp9) + (* [register]:2 tmp10); - simd_address_at(tmp9, Rd_VPR64.4H, 3, 2, 8); - simd_address_at(tmp10, TMPD1, 3, 2, 8); - simd_address_at(tmp11, Rd_VPR64.4H, 3, 2, 8); - * [register]:2 tmp11 = (* [register]:2 tmp9) + (* [register]:2 tmp10); + Rd_VPR64.4H[0,16] = Rd_VPR64.4H[0,16] + TMPD1[0,16]; + Rd_VPR64.4H[16,16] = Rd_VPR64.4H[16,16] + TMPD1[16,16]; + Rd_VPR64.4H[32,16] = Rd_VPR64.4H[32,16] + TMPD1[32,16]; + Rd_VPR64.4H[48,16] = Rd_VPR64.4H[48,16] + TMPD1[48,16]; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = 0; - tmp1 = SIMD_INT_ADD(Rn_VPR64.8B); - local tmpd:8 = SIMD_INT_ADD(Rd_VPR64.4H, tmp1, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.4H -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.4H = NEON_sadalp(Rd_VPR64.4H, Rn_VPR64.8B, 1:1); -@endif } # C7.2.228 SADALP page C7-1911 line 107134 MATCH x0e206800/mask=xbf3ffc00 @@ -23416,125 +13309,58 @@ is b_31=0 & b_30=0 & b_2429=0b001110 & b_2223=0b00 & b_1021=0b100000011010 & Rd_ :sadalp Rd_VPR128.8H, Rn_VPR128.16B is b_31=0 & b_30=1 & b_2429=0b001110 & b_2223=0b00 & b_1021=0b100000011010 & Rd_VPR128.8H & Rn_VPR128.16B & Zd { -@if defined(SEMANTIC_primitive) TMPQ1 = 0; # sipd infix TMPQ1 = +(Rn_VPR128.16B) on pairs lane size (1 to 2) - local tmp2:4 = 0; - local tmp3:4 = 0; - local tmp4:4 = 0; - local tmp5:1 = 0; - local tmp6:1 = 0; - local tmp7:2 = 0; - local tmp8:2 = 0; - simd_address_at(tmp2, Rn_VPR128.16B, 0, 1, 16); - simd_address_at(tmp3, Rn_VPR128.16B, 1, 1, 16); - simd_address_at(tmp4, TMPQ1, 0, 2, 16); - tmp5 = * [register]:1 tmp2; - tmp7 = sext(tmp5); - tmp6 = * [register]:1 tmp3; - tmp8 = sext(tmp6); - * [register]:2 tmp4 = tmp7 + tmp8; - simd_address_at(tmp2, Rn_VPR128.16B, 2, 1, 16); - simd_address_at(tmp3, Rn_VPR128.16B, 3, 1, 16); - simd_address_at(tmp4, TMPQ1, 1, 2, 16); - tmp5 = * [register]:1 tmp2; - tmp7 = sext(tmp5); - tmp6 = * [register]:1 tmp3; - tmp8 = sext(tmp6); - * [register]:2 tmp4 = tmp7 + tmp8; - simd_address_at(tmp2, Rn_VPR128.16B, 4, 1, 16); - simd_address_at(tmp3, Rn_VPR128.16B, 5, 1, 16); - simd_address_at(tmp4, TMPQ1, 2, 2, 16); - tmp5 = * [register]:1 tmp2; - tmp7 = sext(tmp5); - tmp6 = * [register]:1 tmp3; - tmp8 = sext(tmp6); - * [register]:2 tmp4 = tmp7 + tmp8; - simd_address_at(tmp2, Rn_VPR128.16B, 6, 1, 16); - simd_address_at(tmp3, Rn_VPR128.16B, 7, 1, 16); - simd_address_at(tmp4, TMPQ1, 3, 2, 16); - tmp5 = * [register]:1 tmp2; - tmp7 = sext(tmp5); - tmp6 = * [register]:1 tmp3; - tmp8 = sext(tmp6); - * [register]:2 tmp4 = tmp7 + tmp8; - simd_address_at(tmp2, Rn_VPR128.16B, 8, 1, 16); - simd_address_at(tmp3, Rn_VPR128.16B, 9, 1, 16); - simd_address_at(tmp4, TMPQ1, 4, 2, 16); - tmp5 = * [register]:1 tmp2; - tmp7 = sext(tmp5); - tmp6 = * [register]:1 tmp3; - tmp8 = sext(tmp6); - * [register]:2 tmp4 = tmp7 + tmp8; - simd_address_at(tmp2, Rn_VPR128.16B, 10, 1, 16); - simd_address_at(tmp3, Rn_VPR128.16B, 11, 1, 16); - simd_address_at(tmp4, TMPQ1, 5, 2, 16); - tmp5 = * [register]:1 tmp2; - tmp7 = sext(tmp5); - tmp6 = * [register]:1 tmp3; - tmp8 = sext(tmp6); - * [register]:2 tmp4 = tmp7 + tmp8; - simd_address_at(tmp2, Rn_VPR128.16B, 12, 1, 16); - simd_address_at(tmp3, Rn_VPR128.16B, 13, 1, 16); - simd_address_at(tmp4, TMPQ1, 6, 2, 16); - tmp5 = * [register]:1 tmp2; - tmp7 = sext(tmp5); - tmp6 = * [register]:1 tmp3; - tmp8 = sext(tmp6); - * [register]:2 tmp4 = tmp7 + tmp8; - simd_address_at(tmp2, Rn_VPR128.16B, 14, 1, 16); - simd_address_at(tmp3, Rn_VPR128.16B, 15, 1, 16); - simd_address_at(tmp4, TMPQ1, 7, 2, 16); - tmp5 = * [register]:1 tmp2; - tmp7 = sext(tmp5); - tmp6 = * [register]:1 tmp3; - tmp8 = sext(tmp6); - * [register]:2 tmp4 = tmp7 + tmp8; + tmp2 = Rn_VPR128.16B[0,8]; + tmp4 = sext(tmp2); + tmp3 = Rn_VPR128.16B[8,8]; + tmp5 = sext(tmp3); + TMPQ1[0,16] = tmp4 + tmp5; + tmp2 = Rn_VPR128.16B[16,8]; + tmp4 = sext(tmp2); + tmp3 = Rn_VPR128.16B[24,8]; + tmp5 = sext(tmp3); + TMPQ1[16,16] = tmp4 + tmp5; + tmp2 = Rn_VPR128.16B[32,8]; + tmp4 = sext(tmp2); + tmp3 = Rn_VPR128.16B[40,8]; + tmp5 = sext(tmp3); + TMPQ1[32,16] = tmp4 + tmp5; + tmp2 = Rn_VPR128.16B[48,8]; + tmp4 = sext(tmp2); + tmp3 = Rn_VPR128.16B[56,8]; + tmp5 = sext(tmp3); + TMPQ1[48,16] = tmp4 + tmp5; + tmp2 = Rn_VPR128.16B[64,8]; + tmp4 = sext(tmp2); + tmp3 = Rn_VPR128.16B[72,8]; + tmp5 = sext(tmp3); + TMPQ1[64,16] = tmp4 + tmp5; + tmp2 = Rn_VPR128.16B[80,8]; + tmp4 = sext(tmp2); + tmp3 = Rn_VPR128.16B[88,8]; + tmp5 = sext(tmp3); + TMPQ1[80,16] = tmp4 + tmp5; + tmp2 = Rn_VPR128.16B[96,8]; + tmp4 = sext(tmp2); + tmp3 = Rn_VPR128.16B[104,8]; + tmp5 = sext(tmp3); + TMPQ1[96,16] = tmp4 + tmp5; + tmp2 = Rn_VPR128.16B[112,8]; + tmp4 = sext(tmp2); + tmp3 = Rn_VPR128.16B[120,8]; + tmp5 = sext(tmp3); + TMPQ1[112,16] = tmp4 + tmp5; # simd infix Rd_VPR128.8H = Rd_VPR128.8H + TMPQ1 on lane size 2 - local tmp9:4 = 0; - local tmp10:4 = 0; - local tmp11:4 = 0; - simd_address_at(tmp9, Rd_VPR128.8H, 0, 2, 16); - simd_address_at(tmp10, TMPQ1, 0, 2, 16); - simd_address_at(tmp11, Rd_VPR128.8H, 0, 2, 16); - * [register]:2 tmp11 = (* [register]:2 tmp9) + (* [register]:2 tmp10); - simd_address_at(tmp9, Rd_VPR128.8H, 1, 2, 16); - simd_address_at(tmp10, TMPQ1, 1, 2, 16); - simd_address_at(tmp11, Rd_VPR128.8H, 1, 2, 16); - * [register]:2 tmp11 = (* [register]:2 tmp9) + (* [register]:2 tmp10); - simd_address_at(tmp9, Rd_VPR128.8H, 2, 2, 16); - simd_address_at(tmp10, TMPQ1, 2, 2, 16); - simd_address_at(tmp11, Rd_VPR128.8H, 2, 2, 16); - * [register]:2 tmp11 = (* [register]:2 tmp9) + (* [register]:2 tmp10); - simd_address_at(tmp9, Rd_VPR128.8H, 3, 2, 16); - simd_address_at(tmp10, TMPQ1, 3, 2, 16); - simd_address_at(tmp11, Rd_VPR128.8H, 3, 2, 16); - * [register]:2 tmp11 = (* [register]:2 tmp9) + (* [register]:2 tmp10); - simd_address_at(tmp9, Rd_VPR128.8H, 4, 2, 16); - simd_address_at(tmp10, TMPQ1, 4, 2, 16); - simd_address_at(tmp11, Rd_VPR128.8H, 4, 2, 16); - * [register]:2 tmp11 = (* [register]:2 tmp9) + (* [register]:2 tmp10); - simd_address_at(tmp9, Rd_VPR128.8H, 5, 2, 16); - simd_address_at(tmp10, TMPQ1, 5, 2, 16); - simd_address_at(tmp11, Rd_VPR128.8H, 5, 2, 16); - * [register]:2 tmp11 = (* [register]:2 tmp9) + (* [register]:2 tmp10); - simd_address_at(tmp9, Rd_VPR128.8H, 6, 2, 16); - simd_address_at(tmp10, TMPQ1, 6, 2, 16); - simd_address_at(tmp11, Rd_VPR128.8H, 6, 2, 16); - * [register]:2 tmp11 = (* [register]:2 tmp9) + (* [register]:2 tmp10); - simd_address_at(tmp9, Rd_VPR128.8H, 7, 2, 16); - simd_address_at(tmp10, TMPQ1, 7, 2, 16); - simd_address_at(tmp11, Rd_VPR128.8H, 7, 2, 16); - * [register]:2 tmp11 = (* [register]:2 tmp9) + (* [register]:2 tmp10); + Rd_VPR128.8H[0,16] = Rd_VPR128.8H[0,16] + TMPQ1[0,16]; + Rd_VPR128.8H[16,16] = Rd_VPR128.8H[16,16] + TMPQ1[16,16]; + Rd_VPR128.8H[32,16] = Rd_VPR128.8H[32,16] + TMPQ1[32,16]; + Rd_VPR128.8H[48,16] = Rd_VPR128.8H[48,16] + TMPQ1[48,16]; + Rd_VPR128.8H[64,16] = Rd_VPR128.8H[64,16] + TMPQ1[64,16]; + Rd_VPR128.8H[80,16] = Rd_VPR128.8H[80,16] + TMPQ1[80,16]; + Rd_VPR128.8H[96,16] = Rd_VPR128.8H[96,16] + TMPQ1[96,16]; + Rd_VPR128.8H[112,16] = Rd_VPR128.8H[112,16] + TMPQ1[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = 0; - tmp1 = SIMD_INT_ADD(Rn_VPR128.16B); - local tmpd:16 = SIMD_INT_ADD(Rd_VPR128.8H, tmp1, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.8H -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.8H = NEON_sadalp(Rd_VPR128.8H, Rn_VPR128.16B, 1:1); -@endif } # C7.2.228 SADALP page C7-1911 line 107134 MATCH x0e206800/mask=xbf3ffc00 @@ -23547,53 +13373,22 @@ is b_31=0 & b_30=1 & b_2429=0b001110 & b_2223=0b00 & b_1021=0b100000011010 & Rd_ :sadalp Rd_VPR64.2S, Rn_VPR64.4H is b_31=0 & b_30=0 & b_2429=0b001110 & b_2223=0b01 & b_1021=0b100000011010 & Rd_VPR64.2S & Rn_VPR64.4H & Zd { -@if defined(SEMANTIC_primitive) TMPD1 = 0; # sipd infix TMPD1 = +(Rn_VPR64.4H) on pairs lane size (2 to 4) - local tmp2:4 = 0; - local tmp3:4 = 0; - local tmp4:4 = 0; - local tmp5:2 = 0; - local tmp6:2 = 0; - local tmp7:4 = 0; - local tmp8:4 = 0; - simd_address_at(tmp2, Rn_VPR64.4H, 0, 2, 8); - simd_address_at(tmp3, Rn_VPR64.4H, 1, 2, 8); - simd_address_at(tmp4, TMPD1, 0, 4, 8); - tmp5 = * [register]:2 tmp2; - tmp7 = sext(tmp5); - tmp6 = * [register]:2 tmp3; - tmp8 = sext(tmp6); - * [register]:4 tmp4 = tmp7 + tmp8; - simd_address_at(tmp2, Rn_VPR64.4H, 2, 2, 8); - simd_address_at(tmp3, Rn_VPR64.4H, 3, 2, 8); - simd_address_at(tmp4, TMPD1, 1, 4, 8); - tmp5 = * [register]:2 tmp2; - tmp7 = sext(tmp5); - tmp6 = * [register]:2 tmp3; - tmp8 = sext(tmp6); - * [register]:4 tmp4 = tmp7 + tmp8; + tmp2 = Rn_VPR64.4H[0,16]; + tmp4 = sext(tmp2); + tmp3 = Rn_VPR64.4H[16,16]; + tmp5 = sext(tmp3); + TMPD1[0,32] = tmp4 + tmp5; + tmp2 = Rn_VPR64.4H[32,16]; + tmp4 = sext(tmp2); + tmp3 = Rn_VPR64.4H[48,16]; + tmp5 = sext(tmp3); + TMPD1[32,32] = tmp4 + tmp5; # simd infix Rd_VPR64.2S = Rd_VPR64.2S + TMPD1 on lane size 4 - local tmp9:4 = 0; - local tmp10:4 = 0; - local tmp11:4 = 0; - simd_address_at(tmp9, Rd_VPR64.2S, 0, 4, 8); - simd_address_at(tmp10, TMPD1, 0, 4, 8); - simd_address_at(tmp11, Rd_VPR64.2S, 0, 4, 8); - * [register]:4 tmp11 = (* [register]:4 tmp9) + (* [register]:4 tmp10); - simd_address_at(tmp9, Rd_VPR64.2S, 1, 4, 8); - simd_address_at(tmp10, TMPD1, 1, 4, 8); - simd_address_at(tmp11, Rd_VPR64.2S, 1, 4, 8); - * [register]:4 tmp11 = (* [register]:4 tmp9) + (* [register]:4 tmp10); + Rd_VPR64.2S[0,32] = Rd_VPR64.2S[0,32] + TMPD1[0,32]; + Rd_VPR64.2S[32,32] = Rd_VPR64.2S[32,32] + TMPD1[32,32]; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = 0; - tmp1 = SIMD_INT_ADD(Rn_VPR64.4H); - local tmpd:8 = SIMD_INT_ADD(Rd_VPR64.2S, tmp1, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.2S -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.2S = NEON_sadalp(Rd_VPR64.2S, Rn_VPR64.4H, 2:1); -@endif } # C7.2.228 SADALP page C7-1911 line 107134 MATCH x0e206800/mask=xbf3ffc00 @@ -23606,77 +13401,34 @@ is b_31=0 & b_30=0 & b_2429=0b001110 & b_2223=0b01 & b_1021=0b100000011010 & Rd_ :sadalp Rd_VPR128.4S, Rn_VPR128.8H is b_31=0 & b_30=1 & b_2429=0b001110 & b_2223=0b01 & b_1021=0b100000011010 & Rd_VPR128.4S & Rn_VPR128.8H & Zd { -@if defined(SEMANTIC_primitive) TMPQ1 = 0; # sipd infix TMPQ1 = +(Rn_VPR128.8H) on pairs lane size (2 to 4) - local tmp2:4 = 0; - local tmp3:4 = 0; - local tmp4:4 = 0; - local tmp5:2 = 0; - local tmp6:2 = 0; - local tmp7:4 = 0; - local tmp8:4 = 0; - simd_address_at(tmp2, Rn_VPR128.8H, 0, 2, 16); - simd_address_at(tmp3, Rn_VPR128.8H, 1, 2, 16); - simd_address_at(tmp4, TMPQ1, 0, 4, 16); - tmp5 = * [register]:2 tmp2; - tmp7 = sext(tmp5); - tmp6 = * [register]:2 tmp3; - tmp8 = sext(tmp6); - * [register]:4 tmp4 = tmp7 + tmp8; - simd_address_at(tmp2, Rn_VPR128.8H, 2, 2, 16); - simd_address_at(tmp3, Rn_VPR128.8H, 3, 2, 16); - simd_address_at(tmp4, TMPQ1, 1, 4, 16); - tmp5 = * [register]:2 tmp2; - tmp7 = sext(tmp5); - tmp6 = * [register]:2 tmp3; - tmp8 = sext(tmp6); - * [register]:4 tmp4 = tmp7 + tmp8; - simd_address_at(tmp2, Rn_VPR128.8H, 4, 2, 16); - simd_address_at(tmp3, Rn_VPR128.8H, 5, 2, 16); - simd_address_at(tmp4, TMPQ1, 2, 4, 16); - tmp5 = * [register]:2 tmp2; - tmp7 = sext(tmp5); - tmp6 = * [register]:2 tmp3; - tmp8 = sext(tmp6); - * [register]:4 tmp4 = tmp7 + tmp8; - simd_address_at(tmp2, Rn_VPR128.8H, 6, 2, 16); - simd_address_at(tmp3, Rn_VPR128.8H, 7, 2, 16); - simd_address_at(tmp4, TMPQ1, 3, 4, 16); - tmp5 = * [register]:2 tmp2; - tmp7 = sext(tmp5); - tmp6 = * [register]:2 tmp3; - tmp8 = sext(tmp6); - * [register]:4 tmp4 = tmp7 + tmp8; + tmp2 = Rn_VPR128.8H[0,16]; + tmp4 = sext(tmp2); + tmp3 = Rn_VPR128.8H[16,16]; + tmp5 = sext(tmp3); + TMPQ1[0,32] = tmp4 + tmp5; + tmp2 = Rn_VPR128.8H[32,16]; + tmp4 = sext(tmp2); + tmp3 = Rn_VPR128.8H[48,16]; + tmp5 = sext(tmp3); + TMPQ1[32,32] = tmp4 + tmp5; + tmp2 = Rn_VPR128.8H[64,16]; + tmp4 = sext(tmp2); + tmp3 = Rn_VPR128.8H[80,16]; + tmp5 = sext(tmp3); + TMPQ1[64,32] = tmp4 + tmp5; + tmp2 = Rn_VPR128.8H[96,16]; + tmp4 = sext(tmp2); + tmp3 = Rn_VPR128.8H[112,16]; + tmp5 = sext(tmp3); + TMPQ1[96,32] = tmp4 + tmp5; # simd infix Rd_VPR128.4S = Rd_VPR128.4S + TMPQ1 on lane size 4 - local tmp9:4 = 0; - local tmp10:4 = 0; - local tmp11:4 = 0; - simd_address_at(tmp9, Rd_VPR128.4S, 0, 4, 16); - simd_address_at(tmp10, TMPQ1, 0, 4, 16); - simd_address_at(tmp11, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp11 = (* [register]:4 tmp9) + (* [register]:4 tmp10); - simd_address_at(tmp9, Rd_VPR128.4S, 1, 4, 16); - simd_address_at(tmp10, TMPQ1, 1, 4, 16); - simd_address_at(tmp11, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp11 = (* [register]:4 tmp9) + (* [register]:4 tmp10); - simd_address_at(tmp9, Rd_VPR128.4S, 2, 4, 16); - simd_address_at(tmp10, TMPQ1, 2, 4, 16); - simd_address_at(tmp11, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp11 = (* [register]:4 tmp9) + (* [register]:4 tmp10); - simd_address_at(tmp9, Rd_VPR128.4S, 3, 4, 16); - simd_address_at(tmp10, TMPQ1, 3, 4, 16); - simd_address_at(tmp11, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp11 = (* [register]:4 tmp9) + (* [register]:4 tmp10); + Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] + TMPQ1[0,32]; + Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] + TMPQ1[32,32]; + Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] + TMPQ1[64,32]; + Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] + TMPQ1[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = 0; - tmp1 = SIMD_INT_ADD(Rn_VPR128.8H); - local tmpd:16 = SIMD_INT_ADD(Rd_VPR128.4S, tmp1, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_sadalp(Rd_VPR128.4S, Rn_VPR128.8H, 2:1); -@endif } # C7.2.228 SADALP page C7-1911 line 107134 MATCH x0e206800/mask=xbf3ffc00 @@ -23689,41 +13441,15 @@ is b_31=0 & b_30=1 & b_2429=0b001110 & b_2223=0b01 & b_1021=0b100000011010 & Rd_ :sadalp Rd_VPR64.1D, Rn_VPR64.2S is b_31=0 & b_30=0 & b_2429=0b001110 & b_2223=0b10 & b_1021=0b100000011010 & Rd_VPR64.1D & Rn_VPR64.2S & Zd { -@if defined(SEMANTIC_primitive) - TMPD1 = 0; # sipd infix TMPD1 = +(Rn_VPR64.2S) on pairs lane size (4 to 8) - local tmp2:4 = 0; - local tmp3:4 = 0; - local tmp4:4 = 0; - local tmp5:4 = 0; - local tmp6:4 = 0; - local tmp7:8 = 0; - local tmp8:8 = 0; - simd_address_at(tmp2, Rn_VPR64.2S, 0, 4, 8); - simd_address_at(tmp3, Rn_VPR64.2S, 1, 4, 8); - simd_address_at(tmp4, TMPD1, 0, 8, 8); - tmp5 = * [register]:4 tmp2; - tmp7 = sext(tmp5); - tmp6 = * [register]:4 tmp3; - tmp8 = sext(tmp6); - * [register]:8 tmp4 = tmp7 + tmp8; + tmp2 = Rn_VPR64.2S[0,32]; + tmp4 = sext(tmp2); + tmp3 = Rn_VPR64.2S[32,32]; + tmp5 = sext(tmp3); + tmpd1 = tmp4 + tmp5; # simd infix Rd_VPR64.1D = Rd_VPR64.1D + TMPD1 on lane size 8 - local tmp9:4 = 0; - local tmp10:4 = 0; - local tmp11:4 = 0; - simd_address_at(tmp9, Rd_VPR64.1D, 0, 8, 8); - simd_address_at(tmp10, TMPD1, 0, 8, 8); - simd_address_at(tmp11, Rd_VPR64.1D, 0, 8, 8); - * [register]:8 tmp11 = (* [register]:8 tmp9) + (* [register]:8 tmp10); + Rd_VPR64.1D[0,64] = Rd_VPR64.1D[0,64] + tmpd1; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = 0; - tmp1 = SIMD_INT_ADD(Rn_VPR64.2S); - local tmpd:8 = SIMD_INT_ADD(Rd_VPR64.1D, tmp1, 8:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.1D -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.1D = NEON_sadalp(Rd_VPR64.1D, Rn_VPR64.2S, 4:1); -@endif } # C7.2.228 SADALP page C7-1911 line 107134 MATCH x0e206800/mask=xbf3ffc00 @@ -23736,53 +13462,22 @@ is b_31=0 & b_30=0 & b_2429=0b001110 & b_2223=0b10 & b_1021=0b100000011010 & Rd_ :sadalp Rd_VPR128.2D, Rn_VPR128.4S is b_31=0 & b_30=1 & b_2429=0b001110 & b_2223=0b10 & b_1021=0b100000011010 & Rd_VPR128.2D & Rn_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) TMPQ1 = 0; # sipd infix TMPQ1 = +(Rn_VPR128.4S) on pairs lane size (4 to 8) - local tmp2:4 = 0; - local tmp3:4 = 0; - local tmp4:4 = 0; - local tmp5:4 = 0; - local tmp6:4 = 0; - local tmp7:8 = 0; - local tmp8:8 = 0; - simd_address_at(tmp2, Rn_VPR128.4S, 0, 4, 16); - simd_address_at(tmp3, Rn_VPR128.4S, 1, 4, 16); - simd_address_at(tmp4, TMPQ1, 0, 8, 16); - tmp5 = * [register]:4 tmp2; - tmp7 = sext(tmp5); - tmp6 = * [register]:4 tmp3; - tmp8 = sext(tmp6); - * [register]:8 tmp4 = tmp7 + tmp8; - simd_address_at(tmp2, Rn_VPR128.4S, 2, 4, 16); - simd_address_at(tmp3, Rn_VPR128.4S, 3, 4, 16); - simd_address_at(tmp4, TMPQ1, 1, 8, 16); - tmp5 = * [register]:4 tmp2; - tmp7 = sext(tmp5); - tmp6 = * [register]:4 tmp3; - tmp8 = sext(tmp6); - * [register]:8 tmp4 = tmp7 + tmp8; + tmp2 = Rn_VPR128.4S[0,32]; + tmp4 = sext(tmp2); + tmp3 = Rn_VPR128.4S[32,32]; + tmp5 = sext(tmp3); + TMPQ1[0,64] = tmp4 + tmp5; + tmp2 = Rn_VPR128.4S[64,32]; + tmp4 = sext(tmp2); + tmp3 = Rn_VPR128.4S[96,32]; + tmp5 = sext(tmp3); + TMPQ1[64,64] = tmp4 + tmp5; # simd infix Rd_VPR128.2D = Rd_VPR128.2D + TMPQ1 on lane size 8 - local tmp9:4 = 0; - local tmp10:4 = 0; - local tmp11:4 = 0; - simd_address_at(tmp9, Rd_VPR128.2D, 0, 8, 16); - simd_address_at(tmp10, TMPQ1, 0, 8, 16); - simd_address_at(tmp11, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp11 = (* [register]:8 tmp9) + (* [register]:8 tmp10); - simd_address_at(tmp9, Rd_VPR128.2D, 1, 8, 16); - simd_address_at(tmp10, TMPQ1, 1, 8, 16); - simd_address_at(tmp11, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp11 = (* [register]:8 tmp9) + (* [register]:8 tmp10); + Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] + TMPQ1[0,64]; + Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] + TMPQ1[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = 0; - tmp1 = SIMD_INT_ADD(Rn_VPR128.4S); - local tmpd:16 = SIMD_INT_ADD(Rd_VPR128.2D, tmp1, 8:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.2D = NEON_sadalp(Rd_VPR128.2D, Rn_VPR128.4S, 4:1); -@endif } # C7.2.229 SADDL, SADDL2 page C7-1913 line 107243 MATCH x0e200000/mask=xbf20fc00 @@ -23794,46 +13489,16 @@ is b_31=0 & b_30=1 & b_2429=0b001110 & b_2223=0b10 & b_1021=0b100000011010 & Rd_ :saddl Rd_VPR128.2D, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1215=0x0 & b_1011=0 & Rn_VPR64.2S & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_primitive) # simd resize TMPQ1 = sext(Rn_VPR64.2S) (lane size 4 to 8) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR64.2S, 0, 4, 8); - simd_address_at(tmp3, TMPQ1, 0, 8, 16); - * [register]:8 tmp3 = sext(* [register]:4 tmp2); - simd_address_at(tmp2, Rn_VPR64.2S, 1, 4, 8); - simd_address_at(tmp3, TMPQ1, 1, 8, 16); - * [register]:8 tmp3 = sext(* [register]:4 tmp2); + TMPQ1[0,64] = sext(Rn_VPR64.2S[0,32]); + TMPQ1[64,64] = sext(Rn_VPR64.2S[32,32]); # simd resize TMPQ2 = sext(Rm_VPR64.2S) (lane size 4 to 8) - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, Rm_VPR64.2S, 0, 4, 8); - simd_address_at(tmp6, TMPQ2, 0, 8, 16); - * [register]:8 tmp6 = sext(* [register]:4 tmp5); - simd_address_at(tmp5, Rm_VPR64.2S, 1, 4, 8); - simd_address_at(tmp6, TMPQ2, 1, 8, 16); - * [register]:8 tmp6 = sext(* [register]:4 tmp5); + TMPQ2[0,64] = sext(Rm_VPR64.2S[0,32]); + TMPQ2[64,64] = sext(Rm_VPR64.2S[32,32]); # simd infix Rd_VPR128.2D = TMPQ1 + TMPQ2 on lane size 8 - local tmp7:4 = 0; - local tmp8:4 = 0; - local tmp9:4 = 0; - simd_address_at(tmp7, TMPQ1, 0, 8, 16); - simd_address_at(tmp8, TMPQ2, 0, 8, 16); - simd_address_at(tmp9, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp9 = (* [register]:8 tmp7) + (* [register]:8 tmp8); - simd_address_at(tmp7, TMPQ1, 1, 8, 16); - simd_address_at(tmp8, TMPQ2, 1, 8, 16); - simd_address_at(tmp9, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp9 = (* [register]:8 tmp7) + (* [register]:8 tmp8); + Rd_VPR128.2D[0,64] = TMPQ1[0,64] + TMPQ2[0,64]; + Rd_VPR128.2D[64,64] = TMPQ1[64,64] + TMPQ2[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_SEXT(Rn_VPR64.2S, 4:1); - local tmp2:16 = SIMD_INT_SEXT(Rm_VPR64.2S, 4:1); - local tmpd:16 = SIMD_INT_ADD(tmp1, tmp2, 8:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - Rn_VPR64.2S = NEON_saddl(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); -@endif } # C7.2.229 SADDL, SADDL2 page C7-1913 line 107243 MATCH x0e200000/mask=xbf20fc00 @@ -23845,66 +13510,22 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S :saddl Rd_VPR128.4S, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1215=0x0 & b_1011=0 & Rn_VPR64.4H & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) # simd resize TMPQ1 = sext(Rn_VPR64.4H) (lane size 2 to 4) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR64.4H, 0, 2, 8); - simd_address_at(tmp3, TMPQ1, 0, 4, 16); - * [register]:4 tmp3 = sext(* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR64.4H, 1, 2, 8); - simd_address_at(tmp3, TMPQ1, 1, 4, 16); - * [register]:4 tmp3 = sext(* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR64.4H, 2, 2, 8); - simd_address_at(tmp3, TMPQ1, 2, 4, 16); - * [register]:4 tmp3 = sext(* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR64.4H, 3, 2, 8); - simd_address_at(tmp3, TMPQ1, 3, 4, 16); - * [register]:4 tmp3 = sext(* [register]:2 tmp2); + TMPQ1[0,32] = sext(Rn_VPR64.4H[0,16]); + TMPQ1[32,32] = sext(Rn_VPR64.4H[16,16]); + TMPQ1[64,32] = sext(Rn_VPR64.4H[32,16]); + TMPQ1[96,32] = sext(Rn_VPR64.4H[48,16]); # simd resize TMPQ2 = sext(Rm_VPR64.4H) (lane size 2 to 4) - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, Rm_VPR64.4H, 0, 2, 8); - simd_address_at(tmp6, TMPQ2, 0, 4, 16); - * [register]:4 tmp6 = sext(* [register]:2 tmp5); - simd_address_at(tmp5, Rm_VPR64.4H, 1, 2, 8); - simd_address_at(tmp6, TMPQ2, 1, 4, 16); - * [register]:4 tmp6 = sext(* [register]:2 tmp5); - simd_address_at(tmp5, Rm_VPR64.4H, 2, 2, 8); - simd_address_at(tmp6, TMPQ2, 2, 4, 16); - * [register]:4 tmp6 = sext(* [register]:2 tmp5); - simd_address_at(tmp5, Rm_VPR64.4H, 3, 2, 8); - simd_address_at(tmp6, TMPQ2, 3, 4, 16); - * [register]:4 tmp6 = sext(* [register]:2 tmp5); + TMPQ2[0,32] = sext(Rm_VPR64.4H[0,16]); + TMPQ2[32,32] = sext(Rm_VPR64.4H[16,16]); + TMPQ2[64,32] = sext(Rm_VPR64.4H[32,16]); + TMPQ2[96,32] = sext(Rm_VPR64.4H[48,16]); # simd infix Rd_VPR128.4S = TMPQ1 + TMPQ2 on lane size 4 - local tmp7:4 = 0; - local tmp8:4 = 0; - local tmp9:4 = 0; - simd_address_at(tmp7, TMPQ1, 0, 4, 16); - simd_address_at(tmp8, TMPQ2, 0, 4, 16); - simd_address_at(tmp9, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp9 = (* [register]:4 tmp7) + (* [register]:4 tmp8); - simd_address_at(tmp7, TMPQ1, 1, 4, 16); - simd_address_at(tmp8, TMPQ2, 1, 4, 16); - simd_address_at(tmp9, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp9 = (* [register]:4 tmp7) + (* [register]:4 tmp8); - simd_address_at(tmp7, TMPQ1, 2, 4, 16); - simd_address_at(tmp8, TMPQ2, 2, 4, 16); - simd_address_at(tmp9, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp9 = (* [register]:4 tmp7) + (* [register]:4 tmp8); - simd_address_at(tmp7, TMPQ1, 3, 4, 16); - simd_address_at(tmp8, TMPQ2, 3, 4, 16); - simd_address_at(tmp9, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp9 = (* [register]:4 tmp7) + (* [register]:4 tmp8); + Rd_VPR128.4S[0,32] = TMPQ1[0,32] + TMPQ2[0,32]; + Rd_VPR128.4S[32,32] = TMPQ1[32,32] + TMPQ2[32,32]; + Rd_VPR128.4S[64,32] = TMPQ1[64,32] + TMPQ2[64,32]; + Rd_VPR128.4S[96,32] = TMPQ1[96,32] + TMPQ2[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_SEXT(Rn_VPR64.4H, 2:1); - local tmp2:16 = SIMD_INT_SEXT(Rm_VPR64.4H, 2:1); - local tmpd:16 = SIMD_INT_ADD(tmp1, tmp2, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_saddl(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); -@endif } # C7.2.229 SADDL, SADDL2 page C7-1913 line 107243 MATCH x0e200000/mask=xbf20fc00 @@ -23916,106 +13537,34 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H :saddl Rd_VPR128.8H, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1215=0x0 & b_1011=0 & Rn_VPR64.8B & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_primitive) # simd resize TMPQ1 = sext(Rn_VPR64.8B) (lane size 1 to 2) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR64.8B, 0, 1, 8); - simd_address_at(tmp3, TMPQ1, 0, 2, 16); - * [register]:2 tmp3 = sext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 1, 1, 8); - simd_address_at(tmp3, TMPQ1, 1, 2, 16); - * [register]:2 tmp3 = sext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 2, 1, 8); - simd_address_at(tmp3, TMPQ1, 2, 2, 16); - * [register]:2 tmp3 = sext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 3, 1, 8); - simd_address_at(tmp3, TMPQ1, 3, 2, 16); - * [register]:2 tmp3 = sext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 4, 1, 8); - simd_address_at(tmp3, TMPQ1, 4, 2, 16); - * [register]:2 tmp3 = sext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 5, 1, 8); - simd_address_at(tmp3, TMPQ1, 5, 2, 16); - * [register]:2 tmp3 = sext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 6, 1, 8); - simd_address_at(tmp3, TMPQ1, 6, 2, 16); - * [register]:2 tmp3 = sext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 7, 1, 8); - simd_address_at(tmp3, TMPQ1, 7, 2, 16); - * [register]:2 tmp3 = sext(* [register]:1 tmp2); + TMPQ1[0,16] = sext(Rn_VPR64.8B[0,8]); + TMPQ1[16,16] = sext(Rn_VPR64.8B[8,8]); + TMPQ1[32,16] = sext(Rn_VPR64.8B[16,8]); + TMPQ1[48,16] = sext(Rn_VPR64.8B[24,8]); + TMPQ1[64,16] = sext(Rn_VPR64.8B[32,8]); + TMPQ1[80,16] = sext(Rn_VPR64.8B[40,8]); + TMPQ1[96,16] = sext(Rn_VPR64.8B[48,8]); + TMPQ1[112,16] = sext(Rn_VPR64.8B[56,8]); # simd resize TMPQ2 = sext(Rm_VPR64.8B) (lane size 1 to 2) - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, Rm_VPR64.8B, 0, 1, 8); - simd_address_at(tmp6, TMPQ2, 0, 2, 16); - * [register]:2 tmp6 = sext(* [register]:1 tmp5); - simd_address_at(tmp5, Rm_VPR64.8B, 1, 1, 8); - simd_address_at(tmp6, TMPQ2, 1, 2, 16); - * [register]:2 tmp6 = sext(* [register]:1 tmp5); - simd_address_at(tmp5, Rm_VPR64.8B, 2, 1, 8); - simd_address_at(tmp6, TMPQ2, 2, 2, 16); - * [register]:2 tmp6 = sext(* [register]:1 tmp5); - simd_address_at(tmp5, Rm_VPR64.8B, 3, 1, 8); - simd_address_at(tmp6, TMPQ2, 3, 2, 16); - * [register]:2 tmp6 = sext(* [register]:1 tmp5); - simd_address_at(tmp5, Rm_VPR64.8B, 4, 1, 8); - simd_address_at(tmp6, TMPQ2, 4, 2, 16); - * [register]:2 tmp6 = sext(* [register]:1 tmp5); - simd_address_at(tmp5, Rm_VPR64.8B, 5, 1, 8); - simd_address_at(tmp6, TMPQ2, 5, 2, 16); - * [register]:2 tmp6 = sext(* [register]:1 tmp5); - simd_address_at(tmp5, Rm_VPR64.8B, 6, 1, 8); - simd_address_at(tmp6, TMPQ2, 6, 2, 16); - * [register]:2 tmp6 = sext(* [register]:1 tmp5); - simd_address_at(tmp5, Rm_VPR64.8B, 7, 1, 8); - simd_address_at(tmp6, TMPQ2, 7, 2, 16); - * [register]:2 tmp6 = sext(* [register]:1 tmp5); + TMPQ2[0,16] = sext(Rm_VPR64.8B[0,8]); + TMPQ2[16,16] = sext(Rm_VPR64.8B[8,8]); + TMPQ2[32,16] = sext(Rm_VPR64.8B[16,8]); + TMPQ2[48,16] = sext(Rm_VPR64.8B[24,8]); + TMPQ2[64,16] = sext(Rm_VPR64.8B[32,8]); + TMPQ2[80,16] = sext(Rm_VPR64.8B[40,8]); + TMPQ2[96,16] = sext(Rm_VPR64.8B[48,8]); + TMPQ2[112,16] = sext(Rm_VPR64.8B[56,8]); # simd infix Rd_VPR128.8H = TMPQ1 + TMPQ2 on lane size 2 - local tmp7:4 = 0; - local tmp8:4 = 0; - local tmp9:4 = 0; - simd_address_at(tmp7, TMPQ1, 0, 2, 16); - simd_address_at(tmp8, TMPQ2, 0, 2, 16); - simd_address_at(tmp9, Rd_VPR128.8H, 0, 2, 16); - * [register]:2 tmp9 = (* [register]:2 tmp7) + (* [register]:2 tmp8); - simd_address_at(tmp7, TMPQ1, 1, 2, 16); - simd_address_at(tmp8, TMPQ2, 1, 2, 16); - simd_address_at(tmp9, Rd_VPR128.8H, 1, 2, 16); - * [register]:2 tmp9 = (* [register]:2 tmp7) + (* [register]:2 tmp8); - simd_address_at(tmp7, TMPQ1, 2, 2, 16); - simd_address_at(tmp8, TMPQ2, 2, 2, 16); - simd_address_at(tmp9, Rd_VPR128.8H, 2, 2, 16); - * [register]:2 tmp9 = (* [register]:2 tmp7) + (* [register]:2 tmp8); - simd_address_at(tmp7, TMPQ1, 3, 2, 16); - simd_address_at(tmp8, TMPQ2, 3, 2, 16); - simd_address_at(tmp9, Rd_VPR128.8H, 3, 2, 16); - * [register]:2 tmp9 = (* [register]:2 tmp7) + (* [register]:2 tmp8); - simd_address_at(tmp7, TMPQ1, 4, 2, 16); - simd_address_at(tmp8, TMPQ2, 4, 2, 16); - simd_address_at(tmp9, Rd_VPR128.8H, 4, 2, 16); - * [register]:2 tmp9 = (* [register]:2 tmp7) + (* [register]:2 tmp8); - simd_address_at(tmp7, TMPQ1, 5, 2, 16); - simd_address_at(tmp8, TMPQ2, 5, 2, 16); - simd_address_at(tmp9, Rd_VPR128.8H, 5, 2, 16); - * [register]:2 tmp9 = (* [register]:2 tmp7) + (* [register]:2 tmp8); - simd_address_at(tmp7, TMPQ1, 6, 2, 16); - simd_address_at(tmp8, TMPQ2, 6, 2, 16); - simd_address_at(tmp9, Rd_VPR128.8H, 6, 2, 16); - * [register]:2 tmp9 = (* [register]:2 tmp7) + (* [register]:2 tmp8); - simd_address_at(tmp7, TMPQ1, 7, 2, 16); - simd_address_at(tmp8, TMPQ2, 7, 2, 16); - simd_address_at(tmp9, Rd_VPR128.8H, 7, 2, 16); - * [register]:2 tmp9 = (* [register]:2 tmp7) + (* [register]:2 tmp8); + Rd_VPR128.8H[0,16] = TMPQ1[0,16] + TMPQ2[0,16]; + Rd_VPR128.8H[16,16] = TMPQ1[16,16] + TMPQ2[16,16]; + Rd_VPR128.8H[32,16] = TMPQ1[32,16] + TMPQ2[32,16]; + Rd_VPR128.8H[48,16] = TMPQ1[48,16] + TMPQ2[48,16]; + Rd_VPR128.8H[64,16] = TMPQ1[64,16] + TMPQ2[64,16]; + Rd_VPR128.8H[80,16] = TMPQ1[80,16] + TMPQ2[80,16]; + Rd_VPR128.8H[96,16] = TMPQ1[96,16] + TMPQ2[96,16]; + Rd_VPR128.8H[112,16] = TMPQ1[112,16] + TMPQ2[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_SEXT(Rn_VPR64.8B, 1:1); - local tmp2:16 = SIMD_INT_SEXT(Rm_VPR64.8B, 1:1); - local tmpd:16 = SIMD_INT_ADD(tmp1, tmp2, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.8H -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.8H = NEON_saddl(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); -@endif } # C7.2.229 SADDL, SADDL2 page C7-1913 line 107243 MATCH x0e200000/mask=xbf20fc00 @@ -24027,54 +13576,18 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B :saddl2 Rd_VPR128.2D, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1215=0x0 & b_1011=0 & Rn_VPR128.4S & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rn_VPR128.4S, 1, 8, 16); - TMPD1 = * [register]:8 tmp1; + TMPD1 = Rn_VPR128.4S[64,64]; # simd resize TMPQ2 = sext(TMPD1) (lane size 4 to 8) - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, TMPD1, 0, 4, 8); - simd_address_at(tmp5, TMPQ2, 0, 8, 16); - * [register]:8 tmp5 = sext(* [register]:4 tmp4); - simd_address_at(tmp4, TMPD1, 1, 4, 8); - simd_address_at(tmp5, TMPQ2, 1, 8, 16); - * [register]:8 tmp5 = sext(* [register]:4 tmp4); - local tmp6:4 = 0; - simd_address_at(tmp6, Rm_VPR128.4S, 1, 8, 16); - TMPD3 = * [register]:8 tmp6; + TMPQ2[0,64] = sext(TMPD1[0,32]); + TMPQ2[64,64] = sext(TMPD1[32,32]); + TMPD3 = Rm_VPR128.4S[64,64]; # simd resize TMPQ4 = sext(TMPD3) (lane size 4 to 8) - local tmp9:4 = 0; - local tmp10:4 = 0; - simd_address_at(tmp9, TMPD3, 0, 4, 8); - simd_address_at(tmp10, TMPQ4, 0, 8, 16); - * [register]:8 tmp10 = sext(* [register]:4 tmp9); - simd_address_at(tmp9, TMPD3, 1, 4, 8); - simd_address_at(tmp10, TMPQ4, 1, 8, 16); - * [register]:8 tmp10 = sext(* [register]:4 tmp9); + TMPQ4[0,64] = sext(TMPD3[0,32]); + TMPQ4[64,64] = sext(TMPD3[32,32]); # simd infix Rd_VPR128.2D = TMPQ2 + TMPQ4 on lane size 8 - local tmp11:4 = 0; - local tmp12:4 = 0; - local tmp13:4 = 0; - simd_address_at(tmp11, TMPQ2, 0, 8, 16); - simd_address_at(tmp12, TMPQ4, 0, 8, 16); - simd_address_at(tmp13, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp13 = (* [register]:8 tmp11) + (* [register]:8 tmp12); - simd_address_at(tmp11, TMPQ2, 1, 8, 16); - simd_address_at(tmp12, TMPQ4, 1, 8, 16); - simd_address_at(tmp13, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp13 = (* [register]:8 tmp11) + (* [register]:8 tmp12); + Rd_VPR128.2D[0,64] = TMPQ2[0,64] + TMPQ4[0,64]; + Rd_VPR128.2D[64,64] = TMPQ2[64,64] + TMPQ4[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Rn_VPR128.4S, 1:1); - local tmp2:16 = SIMD_INT_SEXT(tmp1, 4:1); - local tmp3:8 = SIMD_PIECE(Rm_VPR128.4S, 1:1); - local tmp4:16 = SIMD_INT_SEXT(tmp3, 4:1); - local tmpd:16 = SIMD_INT_ADD(tmp2, tmp4, 8:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.2D = NEON_saddl2(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); -@endif } # C7.2.229 SADDL, SADDL2 page C7-1913 line 107243 MATCH x0e200000/mask=xbf20fc00 @@ -24086,74 +13599,24 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S :saddl2 Rd_VPR128.4S, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1215=0x0 & b_1011=0 & Rn_VPR128.8H & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rn_VPR128.8H, 1, 8, 16); - TMPD1 = * [register]:8 tmp1; + TMPD1 = Rn_VPR128.8H[64,64]; # simd resize TMPQ2 = sext(TMPD1) (lane size 2 to 4) - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, TMPD1, 0, 2, 8); - simd_address_at(tmp5, TMPQ2, 0, 4, 16); - * [register]:4 tmp5 = sext(* [register]:2 tmp4); - simd_address_at(tmp4, TMPD1, 1, 2, 8); - simd_address_at(tmp5, TMPQ2, 1, 4, 16); - * [register]:4 tmp5 = sext(* [register]:2 tmp4); - simd_address_at(tmp4, TMPD1, 2, 2, 8); - simd_address_at(tmp5, TMPQ2, 2, 4, 16); - * [register]:4 tmp5 = sext(* [register]:2 tmp4); - simd_address_at(tmp4, TMPD1, 3, 2, 8); - simd_address_at(tmp5, TMPQ2, 3, 4, 16); - * [register]:4 tmp5 = sext(* [register]:2 tmp4); - local tmp6:4 = 0; - simd_address_at(tmp6, Rm_VPR128.8H, 1, 8, 16); - TMPD3 = * [register]:8 tmp6; + TMPQ2[0,32] = sext(TMPD1[0,16]); + TMPQ2[32,32] = sext(TMPD1[16,16]); + TMPQ2[64,32] = sext(TMPD1[32,16]); + TMPQ2[96,32] = sext(TMPD1[48,16]); + TMPD3 = Rm_VPR128.8H[64,64]; # simd resize TMPQ4 = sext(TMPD3) (lane size 2 to 4) - local tmp9:4 = 0; - local tmp10:4 = 0; - simd_address_at(tmp9, TMPD3, 0, 2, 8); - simd_address_at(tmp10, TMPQ4, 0, 4, 16); - * [register]:4 tmp10 = sext(* [register]:2 tmp9); - simd_address_at(tmp9, TMPD3, 1, 2, 8); - simd_address_at(tmp10, TMPQ4, 1, 4, 16); - * [register]:4 tmp10 = sext(* [register]:2 tmp9); - simd_address_at(tmp9, TMPD3, 2, 2, 8); - simd_address_at(tmp10, TMPQ4, 2, 4, 16); - * [register]:4 tmp10 = sext(* [register]:2 tmp9); - simd_address_at(tmp9, TMPD3, 3, 2, 8); - simd_address_at(tmp10, TMPQ4, 3, 4, 16); - * [register]:4 tmp10 = sext(* [register]:2 tmp9); + TMPQ4[0,32] = sext(TMPD3[0,16]); + TMPQ4[32,32] = sext(TMPD3[16,16]); + TMPQ4[64,32] = sext(TMPD3[32,16]); + TMPQ4[96,32] = sext(TMPD3[48,16]); # simd infix Rd_VPR128.4S = TMPQ2 + TMPQ4 on lane size 4 - local tmp11:4 = 0; - local tmp12:4 = 0; - local tmp13:4 = 0; - simd_address_at(tmp11, TMPQ2, 0, 4, 16); - simd_address_at(tmp12, TMPQ4, 0, 4, 16); - simd_address_at(tmp13, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp13 = (* [register]:4 tmp11) + (* [register]:4 tmp12); - simd_address_at(tmp11, TMPQ2, 1, 4, 16); - simd_address_at(tmp12, TMPQ4, 1, 4, 16); - simd_address_at(tmp13, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp13 = (* [register]:4 tmp11) + (* [register]:4 tmp12); - simd_address_at(tmp11, TMPQ2, 2, 4, 16); - simd_address_at(tmp12, TMPQ4, 2, 4, 16); - simd_address_at(tmp13, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp13 = (* [register]:4 tmp11) + (* [register]:4 tmp12); - simd_address_at(tmp11, TMPQ2, 3, 4, 16); - simd_address_at(tmp12, TMPQ4, 3, 4, 16); - simd_address_at(tmp13, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp13 = (* [register]:4 tmp11) + (* [register]:4 tmp12); + Rd_VPR128.4S[0,32] = TMPQ2[0,32] + TMPQ4[0,32]; + Rd_VPR128.4S[32,32] = TMPQ2[32,32] + TMPQ4[32,32]; + Rd_VPR128.4S[64,32] = TMPQ2[64,32] + TMPQ4[64,32]; + Rd_VPR128.4S[96,32] = TMPQ2[96,32] + TMPQ4[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Rn_VPR128.8H, 1:1); - local tmp2:16 = SIMD_INT_SEXT(tmp1, 2:1); - local tmp3:8 = SIMD_PIECE(Rm_VPR128.8H, 1:1); - local tmp4:16 = SIMD_INT_SEXT(tmp3, 2:1); - local tmpd:16 = SIMD_INT_ADD(tmp2, tmp4, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_saddl2(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); -@endif } # C7.2.229 SADDL, SADDL2 page C7-1913 line 107243 MATCH x0e200000/mask=xbf20fc00 @@ -24165,114 +13628,36 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H :saddl2 Rd_VPR128.8H, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1215=0x0 & b_1011=0 & Rn_VPR128.16B & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rn_VPR128.16B, 1, 8, 16); - TMPD1 = * [register]:8 tmp1; + TMPD1 = Rn_VPR128.16B[64,64]; # simd resize TMPQ2 = sext(TMPD1) (lane size 1 to 2) - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, TMPD1, 0, 1, 8); - simd_address_at(tmp5, TMPQ2, 0, 2, 16); - * [register]:2 tmp5 = sext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 1, 1, 8); - simd_address_at(tmp5, TMPQ2, 1, 2, 16); - * [register]:2 tmp5 = sext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 2, 1, 8); - simd_address_at(tmp5, TMPQ2, 2, 2, 16); - * [register]:2 tmp5 = sext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 3, 1, 8); - simd_address_at(tmp5, TMPQ2, 3, 2, 16); - * [register]:2 tmp5 = sext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 4, 1, 8); - simd_address_at(tmp5, TMPQ2, 4, 2, 16); - * [register]:2 tmp5 = sext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 5, 1, 8); - simd_address_at(tmp5, TMPQ2, 5, 2, 16); - * [register]:2 tmp5 = sext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 6, 1, 8); - simd_address_at(tmp5, TMPQ2, 6, 2, 16); - * [register]:2 tmp5 = sext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 7, 1, 8); - simd_address_at(tmp5, TMPQ2, 7, 2, 16); - * [register]:2 tmp5 = sext(* [register]:1 tmp4); - local tmp6:4 = 0; - simd_address_at(tmp6, Rm_VPR128.16B, 1, 8, 16); - TMPD3 = * [register]:8 tmp6; + TMPQ2[0,16] = sext(TMPD1[0,8]); + TMPQ2[16,16] = sext(TMPD1[8,8]); + TMPQ2[32,16] = sext(TMPD1[16,8]); + TMPQ2[48,16] = sext(TMPD1[24,8]); + TMPQ2[64,16] = sext(TMPD1[32,8]); + TMPQ2[80,16] = sext(TMPD1[40,8]); + TMPQ2[96,16] = sext(TMPD1[48,8]); + TMPQ2[112,16] = sext(TMPD1[56,8]); + TMPD3 = Rm_VPR128.16B[64,64]; # simd resize TMPQ4 = sext(TMPD3) (lane size 1 to 2) - local tmp9:4 = 0; - local tmp10:4 = 0; - simd_address_at(tmp9, TMPD3, 0, 1, 8); - simd_address_at(tmp10, TMPQ4, 0, 2, 16); - * [register]:2 tmp10 = sext(* [register]:1 tmp9); - simd_address_at(tmp9, TMPD3, 1, 1, 8); - simd_address_at(tmp10, TMPQ4, 1, 2, 16); - * [register]:2 tmp10 = sext(* [register]:1 tmp9); - simd_address_at(tmp9, TMPD3, 2, 1, 8); - simd_address_at(tmp10, TMPQ4, 2, 2, 16); - * [register]:2 tmp10 = sext(* [register]:1 tmp9); - simd_address_at(tmp9, TMPD3, 3, 1, 8); - simd_address_at(tmp10, TMPQ4, 3, 2, 16); - * [register]:2 tmp10 = sext(* [register]:1 tmp9); - simd_address_at(tmp9, TMPD3, 4, 1, 8); - simd_address_at(tmp10, TMPQ4, 4, 2, 16); - * [register]:2 tmp10 = sext(* [register]:1 tmp9); - simd_address_at(tmp9, TMPD3, 5, 1, 8); - simd_address_at(tmp10, TMPQ4, 5, 2, 16); - * [register]:2 tmp10 = sext(* [register]:1 tmp9); - simd_address_at(tmp9, TMPD3, 6, 1, 8); - simd_address_at(tmp10, TMPQ4, 6, 2, 16); - * [register]:2 tmp10 = sext(* [register]:1 tmp9); - simd_address_at(tmp9, TMPD3, 7, 1, 8); - simd_address_at(tmp10, TMPQ4, 7, 2, 16); - * [register]:2 tmp10 = sext(* [register]:1 tmp9); + TMPQ4[0,16] = sext(TMPD3[0,8]); + TMPQ4[16,16] = sext(TMPD3[8,8]); + TMPQ4[32,16] = sext(TMPD3[16,8]); + TMPQ4[48,16] = sext(TMPD3[24,8]); + TMPQ4[64,16] = sext(TMPD3[32,8]); + TMPQ4[80,16] = sext(TMPD3[40,8]); + TMPQ4[96,16] = sext(TMPD3[48,8]); + TMPQ4[112,16] = sext(TMPD3[56,8]); # simd infix Rd_VPR128.8H = TMPQ2 + TMPQ4 on lane size 2 - local tmp11:4 = 0; - local tmp12:4 = 0; - local tmp13:4 = 0; - simd_address_at(tmp11, TMPQ2, 0, 2, 16); - simd_address_at(tmp12, TMPQ4, 0, 2, 16); - simd_address_at(tmp13, Rd_VPR128.8H, 0, 2, 16); - * [register]:2 tmp13 = (* [register]:2 tmp11) + (* [register]:2 tmp12); - simd_address_at(tmp11, TMPQ2, 1, 2, 16); - simd_address_at(tmp12, TMPQ4, 1, 2, 16); - simd_address_at(tmp13, Rd_VPR128.8H, 1, 2, 16); - * [register]:2 tmp13 = (* [register]:2 tmp11) + (* [register]:2 tmp12); - simd_address_at(tmp11, TMPQ2, 2, 2, 16); - simd_address_at(tmp12, TMPQ4, 2, 2, 16); - simd_address_at(tmp13, Rd_VPR128.8H, 2, 2, 16); - * [register]:2 tmp13 = (* [register]:2 tmp11) + (* [register]:2 tmp12); - simd_address_at(tmp11, TMPQ2, 3, 2, 16); - simd_address_at(tmp12, TMPQ4, 3, 2, 16); - simd_address_at(tmp13, Rd_VPR128.8H, 3, 2, 16); - * [register]:2 tmp13 = (* [register]:2 tmp11) + (* [register]:2 tmp12); - simd_address_at(tmp11, TMPQ2, 4, 2, 16); - simd_address_at(tmp12, TMPQ4, 4, 2, 16); - simd_address_at(tmp13, Rd_VPR128.8H, 4, 2, 16); - * [register]:2 tmp13 = (* [register]:2 tmp11) + (* [register]:2 tmp12); - simd_address_at(tmp11, TMPQ2, 5, 2, 16); - simd_address_at(tmp12, TMPQ4, 5, 2, 16); - simd_address_at(tmp13, Rd_VPR128.8H, 5, 2, 16); - * [register]:2 tmp13 = (* [register]:2 tmp11) + (* [register]:2 tmp12); - simd_address_at(tmp11, TMPQ2, 6, 2, 16); - simd_address_at(tmp12, TMPQ4, 6, 2, 16); - simd_address_at(tmp13, Rd_VPR128.8H, 6, 2, 16); - * [register]:2 tmp13 = (* [register]:2 tmp11) + (* [register]:2 tmp12); - simd_address_at(tmp11, TMPQ2, 7, 2, 16); - simd_address_at(tmp12, TMPQ4, 7, 2, 16); - simd_address_at(tmp13, Rd_VPR128.8H, 7, 2, 16); - * [register]:2 tmp13 = (* [register]:2 tmp11) + (* [register]:2 tmp12); + Rd_VPR128.8H[0,16] = TMPQ2[0,16] + TMPQ4[0,16]; + Rd_VPR128.8H[16,16] = TMPQ2[16,16] + TMPQ4[16,16]; + Rd_VPR128.8H[32,16] = TMPQ2[32,16] + TMPQ4[32,16]; + Rd_VPR128.8H[48,16] = TMPQ2[48,16] + TMPQ4[48,16]; + Rd_VPR128.8H[64,16] = TMPQ2[64,16] + TMPQ4[64,16]; + Rd_VPR128.8H[80,16] = TMPQ2[80,16] + TMPQ4[80,16]; + Rd_VPR128.8H[96,16] = TMPQ2[96,16] + TMPQ4[96,16]; + Rd_VPR128.8H[112,16] = TMPQ2[112,16] + TMPQ4[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Rn_VPR128.16B, 1:1); - local tmp2:16 = SIMD_INT_SEXT(tmp1, 1:1); - local tmp3:8 = SIMD_PIECE(Rm_VPR128.16B, 1:1); - local tmp4:16 = SIMD_INT_SEXT(tmp3, 1:1); - local tmpd:16 = SIMD_INT_ADD(tmp2, tmp4, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.8H -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.8H = NEON_saddl2(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); -@endif } # C7.2.230 SADDLP page C7-1915 line 107363 MATCH x0e202800/mask=xbf3ffc00 @@ -24284,32 +13669,14 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16 :saddlp Rd_VPR64.1D, Rn_VPR64.2S is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x10 & b_1216=2 & b_1011=2 & Rn_VPR64.2S & Rd_VPR64.1D & Zd { -@if defined(SEMANTIC_primitive) TMPD1 = Rn_VPR64.2S; # sipd infix Rd_VPR64.1D = +(TMPD1) on pairs lane size (4 to 8) - local tmp2:4 = 0; - local tmp3:4 = 0; - local tmp4:4 = 0; - local tmp5:4 = 0; - local tmp6:4 = 0; - local tmp7:8 = 0; - local tmp8:8 = 0; - simd_address_at(tmp2, TMPD1, 0, 4, 8); - simd_address_at(tmp3, TMPD1, 1, 4, 8); - simd_address_at(tmp4, Rd_VPR64.1D, 0, 8, 8); - tmp5 = * [register]:4 tmp2; - tmp7 = sext(tmp5); - tmp6 = * [register]:4 tmp3; - tmp8 = sext(tmp6); - * [register]:8 tmp4 = tmp7 + tmp8; + tmp2 = TMPD1[0,32]; + tmp4 = sext(tmp2); + tmp3 = TMPD1[32,32]; + tmp5 = sext(tmp3); + Rd_VPR64.1D[0,64] = tmp4 + tmp5; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = Rn_VPR64.2S; - local tmpd:8 = SIMD_INT_ADD(tmp1, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.1D -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.1D = NEON_saddlp(Rn_VPR64.2S, 4:1); -@endif } # C7.2.230 SADDLP page C7-1915 line 107363 MATCH x0e202800/mask=xbf3ffc00 @@ -24321,40 +13688,19 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x10 & b_1216=2 :saddlp Rd_VPR64.2S, Rn_VPR64.4H is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=2 & b_1011=2 & Rn_VPR64.4H & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_primitive) TMPD1 = Rn_VPR64.4H; # sipd infix Rd_VPR64.2S = +(TMPD1) on pairs lane size (2 to 4) - local tmp2:4 = 0; - local tmp3:4 = 0; - local tmp4:4 = 0; - local tmp5:2 = 0; - local tmp6:2 = 0; - local tmp7:4 = 0; - local tmp8:4 = 0; - simd_address_at(tmp2, TMPD1, 0, 2, 8); - simd_address_at(tmp3, TMPD1, 1, 2, 8); - simd_address_at(tmp4, Rd_VPR64.2S, 0, 4, 8); - tmp5 = * [register]:2 tmp2; - tmp7 = sext(tmp5); - tmp6 = * [register]:2 tmp3; - tmp8 = sext(tmp6); - * [register]:4 tmp4 = tmp7 + tmp8; - simd_address_at(tmp2, TMPD1, 2, 2, 8); - simd_address_at(tmp3, TMPD1, 3, 2, 8); - simd_address_at(tmp4, Rd_VPR64.2S, 1, 4, 8); - tmp5 = * [register]:2 tmp2; - tmp7 = sext(tmp5); - tmp6 = * [register]:2 tmp3; - tmp8 = sext(tmp6); - * [register]:4 tmp4 = tmp7 + tmp8; + tmp2 = TMPD1[0,16]; + tmp4 = sext(tmp2); + tmp3 = TMPD1[16,16]; + tmp5 = sext(tmp3); + Rd_VPR64.2S[0,32] = tmp4 + tmp5; + tmp2 = TMPD1[32,16]; + tmp4 = sext(tmp2); + tmp3 = TMPD1[48,16]; + tmp5 = sext(tmp3); + Rd_VPR64.2S[32,32] = tmp4 + tmp5; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = Rn_VPR64.4H; - local tmpd:8 = SIMD_INT_ADD(tmp1, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.2S -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.2S = NEON_saddlp(Rn_VPR64.4H, 2:1); -@endif } # C7.2.230 SADDLP page C7-1915 line 107363 MATCH x0e202800/mask=xbf3ffc00 @@ -24366,56 +13712,29 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=2 :saddlp Rd_VPR64.4H, Rn_VPR64.8B is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=2 & b_1011=2 & Rn_VPR64.8B & Rd_VPR64.4H & Zd { -@if defined(SEMANTIC_primitive) TMPD1 = Rn_VPR64.8B; # sipd infix Rd_VPR64.4H = +(TMPD1) on pairs lane size (1 to 2) - local tmp2:4 = 0; - local tmp3:4 = 0; - local tmp4:4 = 0; - local tmp5:1 = 0; - local tmp6:1 = 0; - local tmp7:2 = 0; - local tmp8:2 = 0; - simd_address_at(tmp2, TMPD1, 0, 1, 8); - simd_address_at(tmp3, TMPD1, 1, 1, 8); - simd_address_at(tmp4, Rd_VPR64.4H, 0, 2, 8); - tmp5 = * [register]:1 tmp2; - tmp7 = sext(tmp5); - tmp6 = * [register]:1 tmp3; - tmp8 = sext(tmp6); - * [register]:2 tmp4 = tmp7 + tmp8; - simd_address_at(tmp2, TMPD1, 2, 1, 8); - simd_address_at(tmp3, TMPD1, 3, 1, 8); - simd_address_at(tmp4, Rd_VPR64.4H, 1, 2, 8); - tmp5 = * [register]:1 tmp2; - tmp7 = sext(tmp5); - tmp6 = * [register]:1 tmp3; - tmp8 = sext(tmp6); - * [register]:2 tmp4 = tmp7 + tmp8; - simd_address_at(tmp2, TMPD1, 4, 1, 8); - simd_address_at(tmp3, TMPD1, 5, 1, 8); - simd_address_at(tmp4, Rd_VPR64.4H, 2, 2, 8); - tmp5 = * [register]:1 tmp2; - tmp7 = sext(tmp5); - tmp6 = * [register]:1 tmp3; - tmp8 = sext(tmp6); - * [register]:2 tmp4 = tmp7 + tmp8; - simd_address_at(tmp2, TMPD1, 6, 1, 8); - simd_address_at(tmp3, TMPD1, 7, 1, 8); - simd_address_at(tmp4, Rd_VPR64.4H, 3, 2, 8); - tmp5 = * [register]:1 tmp2; - tmp7 = sext(tmp5); - tmp6 = * [register]:1 tmp3; - tmp8 = sext(tmp6); - * [register]:2 tmp4 = tmp7 + tmp8; + tmp2 = TMPD1[0,8]; + tmp4 = sext(tmp2); + tmp3 = TMPD1[8,8]; + tmp5 = sext(tmp3); + Rd_VPR64.4H[0,16] = tmp4 + tmp5; + tmp2 = TMPD1[16,8]; + tmp4 = sext(tmp2); + tmp3 = TMPD1[24,8]; + tmp5 = sext(tmp3); + Rd_VPR64.4H[16,16] = tmp4 + tmp5; + tmp2 = TMPD1[32,8]; + tmp4 = sext(tmp2); + tmp3 = TMPD1[40,8]; + tmp5 = sext(tmp3); + Rd_VPR64.4H[32,16] = tmp4 + tmp5; + tmp2 = TMPD1[48,8]; + tmp4 = sext(tmp2); + tmp3 = TMPD1[56,8]; + tmp5 = sext(tmp3); + Rd_VPR64.4H[48,16] = tmp4 + tmp5; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = Rn_VPR64.8B; - local tmpd:8 = SIMD_INT_ADD(tmp1, 1:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.4H -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.4H = NEON_saddlp(Rn_VPR64.8B, 1:1); -@endif } # C7.2.230 SADDLP page C7-1915 line 107363 MATCH x0e202800/mask=xbf3ffc00 @@ -24427,40 +13746,19 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=2 :saddlp Rd_VPR128.2D, Rn_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x10 & b_1216=2 & b_1011=2 & Rn_VPR128.4S & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_primitive) TMPQ1 = Rn_VPR128.4S; # sipd infix Rd_VPR128.2D = +(TMPQ1) on pairs lane size (4 to 8) - local tmp2:4 = 0; - local tmp3:4 = 0; - local tmp4:4 = 0; - local tmp5:4 = 0; - local tmp6:4 = 0; - local tmp7:8 = 0; - local tmp8:8 = 0; - simd_address_at(tmp2, TMPQ1, 0, 4, 16); - simd_address_at(tmp3, TMPQ1, 1, 4, 16); - simd_address_at(tmp4, Rd_VPR128.2D, 0, 8, 16); - tmp5 = * [register]:4 tmp2; - tmp7 = sext(tmp5); - tmp6 = * [register]:4 tmp3; - tmp8 = sext(tmp6); - * [register]:8 tmp4 = tmp7 + tmp8; - simd_address_at(tmp2, TMPQ1, 2, 4, 16); - simd_address_at(tmp3, TMPQ1, 3, 4, 16); - simd_address_at(tmp4, Rd_VPR128.2D, 1, 8, 16); - tmp5 = * [register]:4 tmp2; - tmp7 = sext(tmp5); - tmp6 = * [register]:4 tmp3; - tmp8 = sext(tmp6); - * [register]:8 tmp4 = tmp7 + tmp8; + tmp2 = TMPQ1[0,32]; + tmp4 = sext(tmp2); + tmp3 = TMPQ1[32,32]; + tmp5 = sext(tmp3); + Rd_VPR128.2D[0,64] = tmp4 + tmp5; + tmp2 = TMPQ1[64,32]; + tmp4 = sext(tmp2); + tmp3 = TMPQ1[96,32]; + tmp5 = sext(tmp3); + Rd_VPR128.2D[64,64] = tmp4 + tmp5; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = Rn_VPR128.4S; - local tmpd:16 = SIMD_INT_ADD(tmp1, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.2D = NEON_saddlp(Rn_VPR128.4S, 4:1); -@endif } # C7.2.230 SADDLP page C7-1915 line 107363 MATCH x0e202800/mask=xbf3ffc00 @@ -24472,56 +13770,29 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x10 & b_1216=2 :saddlp Rd_VPR128.4S, Rn_VPR128.8H is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=2 & b_1011=2 & Rn_VPR128.8H & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) TMPQ1 = Rn_VPR128.8H; # sipd infix Rd_VPR128.4S = +(TMPQ1) on pairs lane size (2 to 4) - local tmp2:4 = 0; - local tmp3:4 = 0; - local tmp4:4 = 0; - local tmp5:2 = 0; - local tmp6:2 = 0; - local tmp7:4 = 0; - local tmp8:4 = 0; - simd_address_at(tmp2, TMPQ1, 0, 2, 16); - simd_address_at(tmp3, TMPQ1, 1, 2, 16); - simd_address_at(tmp4, Rd_VPR128.4S, 0, 4, 16); - tmp5 = * [register]:2 tmp2; - tmp7 = sext(tmp5); - tmp6 = * [register]:2 tmp3; - tmp8 = sext(tmp6); - * [register]:4 tmp4 = tmp7 + tmp8; - simd_address_at(tmp2, TMPQ1, 2, 2, 16); - simd_address_at(tmp3, TMPQ1, 3, 2, 16); - simd_address_at(tmp4, Rd_VPR128.4S, 1, 4, 16); - tmp5 = * [register]:2 tmp2; - tmp7 = sext(tmp5); - tmp6 = * [register]:2 tmp3; - tmp8 = sext(tmp6); - * [register]:4 tmp4 = tmp7 + tmp8; - simd_address_at(tmp2, TMPQ1, 4, 2, 16); - simd_address_at(tmp3, TMPQ1, 5, 2, 16); - simd_address_at(tmp4, Rd_VPR128.4S, 2, 4, 16); - tmp5 = * [register]:2 tmp2; - tmp7 = sext(tmp5); - tmp6 = * [register]:2 tmp3; - tmp8 = sext(tmp6); - * [register]:4 tmp4 = tmp7 + tmp8; - simd_address_at(tmp2, TMPQ1, 6, 2, 16); - simd_address_at(tmp3, TMPQ1, 7, 2, 16); - simd_address_at(tmp4, Rd_VPR128.4S, 3, 4, 16); - tmp5 = * [register]:2 tmp2; - tmp7 = sext(tmp5); - tmp6 = * [register]:2 tmp3; - tmp8 = sext(tmp6); - * [register]:4 tmp4 = tmp7 + tmp8; + tmp2 = TMPQ1[0,16]; + tmp4 = sext(tmp2); + tmp3 = TMPQ1[16,16]; + tmp5 = sext(tmp3); + Rd_VPR128.4S[0,32] = tmp4 + tmp5; + tmp2 = TMPQ1[32,16]; + tmp4 = sext(tmp2); + tmp3 = TMPQ1[48,16]; + tmp5 = sext(tmp3); + Rd_VPR128.4S[32,32] = tmp4 + tmp5; + tmp2 = TMPQ1[64,16]; + tmp4 = sext(tmp2); + tmp3 = TMPQ1[80,16]; + tmp5 = sext(tmp3); + Rd_VPR128.4S[64,32] = tmp4 + tmp5; + tmp2 = TMPQ1[96,16]; + tmp4 = sext(tmp2); + tmp3 = TMPQ1[112,16]; + tmp5 = sext(tmp3); + Rd_VPR128.4S[96,32] = tmp4 + tmp5; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = Rn_VPR128.8H; - local tmpd:16 = SIMD_INT_ADD(tmp1, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_saddlp(Rn_VPR128.8H, 2:1); -@endif } # C7.2.230 SADDLP page C7-1915 line 107363 MATCH x0e202800/mask=xbf3ffc00 @@ -24533,88 +13804,49 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=2 :saddlp Rd_VPR128.8H, Rn_VPR128.16B is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=2 & b_1011=2 & Rn_VPR128.16B & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_primitive) TMPQ1 = Rn_VPR128.16B; # sipd infix Rd_VPR128.8H = +(TMPQ1) on pairs lane size (1 to 2) - local tmp2:4 = 0; - local tmp3:4 = 0; - local tmp4:4 = 0; - local tmp5:1 = 0; - local tmp6:1 = 0; - local tmp7:2 = 0; - local tmp8:2 = 0; - simd_address_at(tmp2, TMPQ1, 0, 1, 16); - simd_address_at(tmp3, TMPQ1, 1, 1, 16); - simd_address_at(tmp4, Rd_VPR128.8H, 0, 2, 16); - tmp5 = * [register]:1 tmp2; - tmp7 = sext(tmp5); - tmp6 = * [register]:1 tmp3; - tmp8 = sext(tmp6); - * [register]:2 tmp4 = tmp7 + tmp8; - simd_address_at(tmp2, TMPQ1, 2, 1, 16); - simd_address_at(tmp3, TMPQ1, 3, 1, 16); - simd_address_at(tmp4, Rd_VPR128.8H, 1, 2, 16); - tmp5 = * [register]:1 tmp2; - tmp7 = sext(tmp5); - tmp6 = * [register]:1 tmp3; - tmp8 = sext(tmp6); - * [register]:2 tmp4 = tmp7 + tmp8; - simd_address_at(tmp2, TMPQ1, 4, 1, 16); - simd_address_at(tmp3, TMPQ1, 5, 1, 16); - simd_address_at(tmp4, Rd_VPR128.8H, 2, 2, 16); - tmp5 = * [register]:1 tmp2; - tmp7 = sext(tmp5); - tmp6 = * [register]:1 tmp3; - tmp8 = sext(tmp6); - * [register]:2 tmp4 = tmp7 + tmp8; - simd_address_at(tmp2, TMPQ1, 6, 1, 16); - simd_address_at(tmp3, TMPQ1, 7, 1, 16); - simd_address_at(tmp4, Rd_VPR128.8H, 3, 2, 16); - tmp5 = * [register]:1 tmp2; - tmp7 = sext(tmp5); - tmp6 = * [register]:1 tmp3; - tmp8 = sext(tmp6); - * [register]:2 tmp4 = tmp7 + tmp8; - simd_address_at(tmp2, TMPQ1, 8, 1, 16); - simd_address_at(tmp3, TMPQ1, 9, 1, 16); - simd_address_at(tmp4, Rd_VPR128.8H, 4, 2, 16); - tmp5 = * [register]:1 tmp2; - tmp7 = sext(tmp5); - tmp6 = * [register]:1 tmp3; - tmp8 = sext(tmp6); - * [register]:2 tmp4 = tmp7 + tmp8; - simd_address_at(tmp2, TMPQ1, 10, 1, 16); - simd_address_at(tmp3, TMPQ1, 11, 1, 16); - simd_address_at(tmp4, Rd_VPR128.8H, 5, 2, 16); - tmp5 = * [register]:1 tmp2; - tmp7 = sext(tmp5); - tmp6 = * [register]:1 tmp3; - tmp8 = sext(tmp6); - * [register]:2 tmp4 = tmp7 + tmp8; - simd_address_at(tmp2, TMPQ1, 12, 1, 16); - simd_address_at(tmp3, TMPQ1, 13, 1, 16); - simd_address_at(tmp4, Rd_VPR128.8H, 6, 2, 16); - tmp5 = * [register]:1 tmp2; - tmp7 = sext(tmp5); - tmp6 = * [register]:1 tmp3; - tmp8 = sext(tmp6); - * [register]:2 tmp4 = tmp7 + tmp8; - simd_address_at(tmp2, TMPQ1, 14, 1, 16); - simd_address_at(tmp3, TMPQ1, 15, 1, 16); - simd_address_at(tmp4, Rd_VPR128.8H, 7, 2, 16); - tmp5 = * [register]:1 tmp2; - tmp7 = sext(tmp5); - tmp6 = * [register]:1 tmp3; - tmp8 = sext(tmp6); - * [register]:2 tmp4 = tmp7 + tmp8; + tmp2 = TMPQ1[0,8]; + tmp4 = sext(tmp2); + tmp3 = TMPQ1[8,8]; + tmp5 = sext(tmp3); + Rd_VPR128.8H[0,16] = tmp4 + tmp5; + tmp2 = TMPQ1[16,8]; + tmp4 = sext(tmp2); + tmp3 = TMPQ1[24,8]; + tmp5 = sext(tmp3); + Rd_VPR128.8H[16,16] = tmp4 + tmp5; + tmp2 = TMPQ1[32,8]; + tmp4 = sext(tmp2); + tmp3 = TMPQ1[40,8]; + tmp5 = sext(tmp3); + Rd_VPR128.8H[32,16] = tmp4 + tmp5; + tmp2 = TMPQ1[48,8]; + tmp4 = sext(tmp2); + tmp3 = TMPQ1[56,8]; + tmp5 = sext(tmp3); + Rd_VPR128.8H[48,16] = tmp4 + tmp5; + tmp2 = TMPQ1[64,8]; + tmp4 = sext(tmp2); + tmp3 = TMPQ1[72,8]; + tmp5 = sext(tmp3); + Rd_VPR128.8H[64,16] = tmp4 + tmp5; + tmp2 = TMPQ1[80,8]; + tmp4 = sext(tmp2); + tmp3 = TMPQ1[88,8]; + tmp5 = sext(tmp3); + Rd_VPR128.8H[80,16] = tmp4 + tmp5; + tmp2 = TMPQ1[96,8]; + tmp4 = sext(tmp2); + tmp3 = TMPQ1[104,8]; + tmp5 = sext(tmp3); + Rd_VPR128.8H[96,16] = tmp4 + tmp5; + tmp2 = TMPQ1[112,8]; + tmp4 = sext(tmp2); + tmp3 = TMPQ1[120,8]; + tmp5 = sext(tmp3); + Rd_VPR128.8H[112,16] = tmp4 + tmp5; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = Rn_VPR128.16B; - local tmpd:16 = SIMD_INT_ADD(tmp1, 1:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.8H -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.8H = NEON_saddlp(Rn_VPR128.16B, 1:1); -@endif } # C7.2.231 SADDLV page C7-1917 line 107472 MATCH x0e303800/mask=xbf3ffc00 @@ -24625,9 +13857,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=2 :saddlv Rd_FPR64, Rn_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x18 & b_1216=0x3 & b_1011=2 & Rn_VPR128.4S & Rd_FPR64 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR64 = NEON_saddlv(Rn_VPR128.4S, 4:1); -@endif } # C7.2.231 SADDLV page C7-1917 line 107472 MATCH x0e303800/mask=xbf3ffc00 @@ -24638,9 +13868,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x18 & b_1216=0x :saddlv Rd_FPR16, Rn_VPR128.16B is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x18 & b_1216=0x3 & b_1011=2 & Rn_VPR128.16B & Rd_FPR16 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR16 = NEON_saddlv(Rn_VPR128.16B, 1:1); -@endif } # C7.2.231 SADDLV page C7-1917 line 107472 MATCH x0e303800/mask=xbf3ffc00 @@ -24651,9 +13879,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x18 & b_1216=0x :saddlv Rd_FPR16, Rn_VPR64.8B is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x18 & b_1216=0x3 & b_1011=2 & Rn_VPR64.8B & Rd_FPR16 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR16 = NEON_saddlv(Rn_VPR64.8B, 1:1); -@endif } # C7.2.231 SADDLV page C7-1917 line 107472 MATCH x0e303800/mask=xbf3ffc00 @@ -24664,9 +13890,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x18 & b_1216=0x :saddlv Rd_FPR32, Rn_VPR64.4H is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x18 & b_1216=0x3 & b_1011=2 & Rn_VPR64.4H & Rd_FPR32 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR32 = NEON_saddlv(Rn_VPR64.4H, 2:1); -@endif } # C7.2.231 SADDLV page C7-1917 line 107472 MATCH x0e303800/mask=xbf3ffc00 @@ -24677,9 +13901,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x18 & b_1216=0x :saddlv Rd_FPR32, Rn_VPR128.8H is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x18 & b_1216=0x3 & b_1011=2 & Rn_VPR128.8H & Rd_FPR32 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR32 = NEON_saddlv(Rn_VPR128.8H, 2:1); -@endif } # C7.2.232 SADDW, SADDW2 page C7-1919 line 107570 MATCH x0e201000/mask=xbf20fc00 @@ -24691,36 +13913,13 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x18 & b_1216=0x :saddw Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR64.2S is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1215=0x1 & b_1011=0 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_primitive) # simd resize TMPQ1 = sext(Rm_VPR64.2S) (lane size 4 to 8) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rm_VPR64.2S, 0, 4, 8); - simd_address_at(tmp3, TMPQ1, 0, 8, 16); - * [register]:8 tmp3 = sext(* [register]:4 tmp2); - simd_address_at(tmp2, Rm_VPR64.2S, 1, 4, 8); - simd_address_at(tmp3, TMPQ1, 1, 8, 16); - * [register]:8 tmp3 = sext(* [register]:4 tmp2); + TMPQ1[0,64] = sext(Rm_VPR64.2S[0,32]); + TMPQ1[64,64] = sext(Rm_VPR64.2S[32,32]); # simd infix Rd_VPR128.2D = Rn_VPR128.2D + TMPQ1 on lane size 8 - local tmp4:4 = 0; - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp4, Rn_VPR128.2D, 0, 8, 16); - simd_address_at(tmp5, TMPQ1, 0, 8, 16); - simd_address_at(tmp6, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp6 = (* [register]:8 tmp4) + (* [register]:8 tmp5); - simd_address_at(tmp4, Rn_VPR128.2D, 1, 8, 16); - simd_address_at(tmp5, TMPQ1, 1, 8, 16); - simd_address_at(tmp6, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp6 = (* [register]:8 tmp4) + (* [register]:8 tmp5); + Rd_VPR128.2D[0,64] = Rn_VPR128.2D[0,64] + TMPQ1[0,64]; + Rd_VPR128.2D[64,64] = Rn_VPR128.2D[64,64] + TMPQ1[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_SEXT(Rm_VPR64.2S, 4:1); - local tmpd:16 = SIMD_INT_ADD(Rn_VPR128.2D, tmp1, 8:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.2D = NEON_saddw(Rn_VPR128.2D, Rm_VPR64.2S, 4:1); -@endif } # C7.2.232 SADDW, SADDW2 page C7-1919 line 107570 MATCH x0e201000/mask=xbf20fc00 @@ -24732,50 +13931,17 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S :saddw Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR64.4H is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1215=0x1 & b_1011=0 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) # simd resize TMPQ1 = sext(Rm_VPR64.4H) (lane size 2 to 4) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rm_VPR64.4H, 0, 2, 8); - simd_address_at(tmp3, TMPQ1, 0, 4, 16); - * [register]:4 tmp3 = sext(* [register]:2 tmp2); - simd_address_at(tmp2, Rm_VPR64.4H, 1, 2, 8); - simd_address_at(tmp3, TMPQ1, 1, 4, 16); - * [register]:4 tmp3 = sext(* [register]:2 tmp2); - simd_address_at(tmp2, Rm_VPR64.4H, 2, 2, 8); - simd_address_at(tmp3, TMPQ1, 2, 4, 16); - * [register]:4 tmp3 = sext(* [register]:2 tmp2); - simd_address_at(tmp2, Rm_VPR64.4H, 3, 2, 8); - simd_address_at(tmp3, TMPQ1, 3, 4, 16); - * [register]:4 tmp3 = sext(* [register]:2 tmp2); + TMPQ1[0,32] = sext(Rm_VPR64.4H[0,16]); + TMPQ1[32,32] = sext(Rm_VPR64.4H[16,16]); + TMPQ1[64,32] = sext(Rm_VPR64.4H[32,16]); + TMPQ1[96,32] = sext(Rm_VPR64.4H[48,16]); # simd infix Rd_VPR128.4S = Rn_VPR128.4S + TMPQ1 on lane size 4 - local tmp4:4 = 0; - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp4, Rn_VPR128.4S, 0, 4, 16); - simd_address_at(tmp5, TMPQ1, 0, 4, 16); - simd_address_at(tmp6, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp6 = (* [register]:4 tmp4) + (* [register]:4 tmp5); - simd_address_at(tmp4, Rn_VPR128.4S, 1, 4, 16); - simd_address_at(tmp5, TMPQ1, 1, 4, 16); - simd_address_at(tmp6, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp6 = (* [register]:4 tmp4) + (* [register]:4 tmp5); - simd_address_at(tmp4, Rn_VPR128.4S, 2, 4, 16); - simd_address_at(tmp5, TMPQ1, 2, 4, 16); - simd_address_at(tmp6, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp6 = (* [register]:4 tmp4) + (* [register]:4 tmp5); - simd_address_at(tmp4, Rn_VPR128.4S, 3, 4, 16); - simd_address_at(tmp5, TMPQ1, 3, 4, 16); - simd_address_at(tmp6, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp6 = (* [register]:4 tmp4) + (* [register]:4 tmp5); + Rd_VPR128.4S[0,32] = Rn_VPR128.4S[0,32] + TMPQ1[0,32]; + Rd_VPR128.4S[32,32] = Rn_VPR128.4S[32,32] + TMPQ1[32,32]; + Rd_VPR128.4S[64,32] = Rn_VPR128.4S[64,32] + TMPQ1[64,32]; + Rd_VPR128.4S[96,32] = Rn_VPR128.4S[96,32] + TMPQ1[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_SEXT(Rm_VPR64.4H, 2:1); - local tmpd:16 = SIMD_INT_ADD(Rn_VPR128.4S, tmp1, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_saddw(Rn_VPR128.4S, Rm_VPR64.4H, 2:1); -@endif } # C7.2.232 SADDW, SADDW2 page C7-1919 line 107570 MATCH x0e201000/mask=xbf20fc00 @@ -24787,78 +13953,25 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H :saddw Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR64.8B is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1215=0x1 & b_1011=0 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_primitive) # simd resize TMPQ1 = sext(Rm_VPR64.8B) (lane size 1 to 2) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rm_VPR64.8B, 0, 1, 8); - simd_address_at(tmp3, TMPQ1, 0, 2, 16); - * [register]:2 tmp3 = sext(* [register]:1 tmp2); - simd_address_at(tmp2, Rm_VPR64.8B, 1, 1, 8); - simd_address_at(tmp3, TMPQ1, 1, 2, 16); - * [register]:2 tmp3 = sext(* [register]:1 tmp2); - simd_address_at(tmp2, Rm_VPR64.8B, 2, 1, 8); - simd_address_at(tmp3, TMPQ1, 2, 2, 16); - * [register]:2 tmp3 = sext(* [register]:1 tmp2); - simd_address_at(tmp2, Rm_VPR64.8B, 3, 1, 8); - simd_address_at(tmp3, TMPQ1, 3, 2, 16); - * [register]:2 tmp3 = sext(* [register]:1 tmp2); - simd_address_at(tmp2, Rm_VPR64.8B, 4, 1, 8); - simd_address_at(tmp3, TMPQ1, 4, 2, 16); - * [register]:2 tmp3 = sext(* [register]:1 tmp2); - simd_address_at(tmp2, Rm_VPR64.8B, 5, 1, 8); - simd_address_at(tmp3, TMPQ1, 5, 2, 16); - * [register]:2 tmp3 = sext(* [register]:1 tmp2); - simd_address_at(tmp2, Rm_VPR64.8B, 6, 1, 8); - simd_address_at(tmp3, TMPQ1, 6, 2, 16); - * [register]:2 tmp3 = sext(* [register]:1 tmp2); - simd_address_at(tmp2, Rm_VPR64.8B, 7, 1, 8); - simd_address_at(tmp3, TMPQ1, 7, 2, 16); - * [register]:2 tmp3 = sext(* [register]:1 tmp2); + TMPQ1[0,16] = sext(Rm_VPR64.8B[0,8]); + TMPQ1[16,16] = sext(Rm_VPR64.8B[8,8]); + TMPQ1[32,16] = sext(Rm_VPR64.8B[16,8]); + TMPQ1[48,16] = sext(Rm_VPR64.8B[24,8]); + TMPQ1[64,16] = sext(Rm_VPR64.8B[32,8]); + TMPQ1[80,16] = sext(Rm_VPR64.8B[40,8]); + TMPQ1[96,16] = sext(Rm_VPR64.8B[48,8]); + TMPQ1[112,16] = sext(Rm_VPR64.8B[56,8]); # simd infix Rd_VPR128.8H = Rn_VPR128.8H + TMPQ1 on lane size 2 - local tmp4:4 = 0; - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp4, Rn_VPR128.8H, 0, 2, 16); - simd_address_at(tmp5, TMPQ1, 0, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 0, 2, 16); - * [register]:2 tmp6 = (* [register]:2 tmp4) + (* [register]:2 tmp5); - simd_address_at(tmp4, Rn_VPR128.8H, 1, 2, 16); - simd_address_at(tmp5, TMPQ1, 1, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 1, 2, 16); - * [register]:2 tmp6 = (* [register]:2 tmp4) + (* [register]:2 tmp5); - simd_address_at(tmp4, Rn_VPR128.8H, 2, 2, 16); - simd_address_at(tmp5, TMPQ1, 2, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 2, 2, 16); - * [register]:2 tmp6 = (* [register]:2 tmp4) + (* [register]:2 tmp5); - simd_address_at(tmp4, Rn_VPR128.8H, 3, 2, 16); - simd_address_at(tmp5, TMPQ1, 3, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 3, 2, 16); - * [register]:2 tmp6 = (* [register]:2 tmp4) + (* [register]:2 tmp5); - simd_address_at(tmp4, Rn_VPR128.8H, 4, 2, 16); - simd_address_at(tmp5, TMPQ1, 4, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 4, 2, 16); - * [register]:2 tmp6 = (* [register]:2 tmp4) + (* [register]:2 tmp5); - simd_address_at(tmp4, Rn_VPR128.8H, 5, 2, 16); - simd_address_at(tmp5, TMPQ1, 5, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 5, 2, 16); - * [register]:2 tmp6 = (* [register]:2 tmp4) + (* [register]:2 tmp5); - simd_address_at(tmp4, Rn_VPR128.8H, 6, 2, 16); - simd_address_at(tmp5, TMPQ1, 6, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 6, 2, 16); - * [register]:2 tmp6 = (* [register]:2 tmp4) + (* [register]:2 tmp5); - simd_address_at(tmp4, Rn_VPR128.8H, 7, 2, 16); - simd_address_at(tmp5, TMPQ1, 7, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 7, 2, 16); - * [register]:2 tmp6 = (* [register]:2 tmp4) + (* [register]:2 tmp5); + Rd_VPR128.8H[0,16] = Rn_VPR128.8H[0,16] + TMPQ1[0,16]; + Rd_VPR128.8H[16,16] = Rn_VPR128.8H[16,16] + TMPQ1[16,16]; + Rd_VPR128.8H[32,16] = Rn_VPR128.8H[32,16] + TMPQ1[32,16]; + Rd_VPR128.8H[48,16] = Rn_VPR128.8H[48,16] + TMPQ1[48,16]; + Rd_VPR128.8H[64,16] = Rn_VPR128.8H[64,16] + TMPQ1[64,16]; + Rd_VPR128.8H[80,16] = Rn_VPR128.8H[80,16] + TMPQ1[80,16]; + Rd_VPR128.8H[96,16] = Rn_VPR128.8H[96,16] + TMPQ1[96,16]; + Rd_VPR128.8H[112,16] = Rn_VPR128.8H[112,16] + TMPQ1[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_SEXT(Rm_VPR64.8B, 1:1); - local tmpd:16 = SIMD_INT_ADD(Rn_VPR128.8H, tmp1, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.8H -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.8H = NEON_saddw(Rn_VPR128.8H, Rm_VPR64.8B, 1:1); -@endif } # C7.2.232 SADDW, SADDW2 page C7-1919 line 107570 MATCH x0e201000/mask=xbf20fc00 @@ -24870,40 +13983,14 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B :saddw2 Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1215=0x1 & b_1011=0 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rm_VPR128.4S, 1, 8, 16); - TMPD1 = * [register]:8 tmp1; + TMPD1 = Rm_VPR128.4S[64,64]; # simd resize TMPQ2 = sext(TMPD1) (lane size 4 to 8) - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, TMPD1, 0, 4, 8); - simd_address_at(tmp5, TMPQ2, 0, 8, 16); - * [register]:8 tmp5 = sext(* [register]:4 tmp4); - simd_address_at(tmp4, TMPD1, 1, 4, 8); - simd_address_at(tmp5, TMPQ2, 1, 8, 16); - * [register]:8 tmp5 = sext(* [register]:4 tmp4); + TMPQ2[0,64] = sext(TMPD1[0,32]); + TMPQ2[64,64] = sext(TMPD1[32,32]); # simd infix Rd_VPR128.2D = Rn_VPR128.2D + TMPQ2 on lane size 8 - local tmp6:4 = 0; - local tmp7:4 = 0; - local tmp8:4 = 0; - simd_address_at(tmp6, Rn_VPR128.2D, 0, 8, 16); - simd_address_at(tmp7, TMPQ2, 0, 8, 16); - simd_address_at(tmp8, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp8 = (* [register]:8 tmp6) + (* [register]:8 tmp7); - simd_address_at(tmp6, Rn_VPR128.2D, 1, 8, 16); - simd_address_at(tmp7, TMPQ2, 1, 8, 16); - simd_address_at(tmp8, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp8 = (* [register]:8 tmp6) + (* [register]:8 tmp7); + Rd_VPR128.2D[0,64] = Rn_VPR128.2D[0,64] + TMPQ2[0,64]; + Rd_VPR128.2D[64,64] = Rn_VPR128.2D[64,64] + TMPQ2[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Rm_VPR128.4S, 1:1); - local tmp2:16 = SIMD_INT_SEXT(tmp1, 4:1); - local tmpd:16 = SIMD_INT_ADD(Rn_VPR128.2D, tmp2, 8:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.2D = NEON_saddw2(Rn_VPR128.2D, Rm_VPR128.4S, 4:1); -@endif } # C7.2.232 SADDW, SADDW2 page C7-1919 line 107570 MATCH x0e201000/mask=xbf20fc00 @@ -24915,54 +14002,18 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S :saddw2 Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.8H is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1215=0x1 & b_1011=0 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rm_VPR128.8H, 1, 8, 16); - TMPD1 = * [register]:8 tmp1; + TMPD1 = Rm_VPR128.8H[64,64]; # simd resize TMPQ2 = sext(TMPD1) (lane size 2 to 4) - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, TMPD1, 0, 2, 8); - simd_address_at(tmp5, TMPQ2, 0, 4, 16); - * [register]:4 tmp5 = sext(* [register]:2 tmp4); - simd_address_at(tmp4, TMPD1, 1, 2, 8); - simd_address_at(tmp5, TMPQ2, 1, 4, 16); - * [register]:4 tmp5 = sext(* [register]:2 tmp4); - simd_address_at(tmp4, TMPD1, 2, 2, 8); - simd_address_at(tmp5, TMPQ2, 2, 4, 16); - * [register]:4 tmp5 = sext(* [register]:2 tmp4); - simd_address_at(tmp4, TMPD1, 3, 2, 8); - simd_address_at(tmp5, TMPQ2, 3, 4, 16); - * [register]:4 tmp5 = sext(* [register]:2 tmp4); + TMPQ2[0,32] = sext(TMPD1[0,16]); + TMPQ2[32,32] = sext(TMPD1[16,16]); + TMPQ2[64,32] = sext(TMPD1[32,16]); + TMPQ2[96,32] = sext(TMPD1[48,16]); # simd infix Rd_VPR128.4S = Rn_VPR128.4S + TMPQ2 on lane size 4 - local tmp6:4 = 0; - local tmp7:4 = 0; - local tmp8:4 = 0; - simd_address_at(tmp6, Rn_VPR128.4S, 0, 4, 16); - simd_address_at(tmp7, TMPQ2, 0, 4, 16); - simd_address_at(tmp8, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp8 = (* [register]:4 tmp6) + (* [register]:4 tmp7); - simd_address_at(tmp6, Rn_VPR128.4S, 1, 4, 16); - simd_address_at(tmp7, TMPQ2, 1, 4, 16); - simd_address_at(tmp8, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp8 = (* [register]:4 tmp6) + (* [register]:4 tmp7); - simd_address_at(tmp6, Rn_VPR128.4S, 2, 4, 16); - simd_address_at(tmp7, TMPQ2, 2, 4, 16); - simd_address_at(tmp8, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp8 = (* [register]:4 tmp6) + (* [register]:4 tmp7); - simd_address_at(tmp6, Rn_VPR128.4S, 3, 4, 16); - simd_address_at(tmp7, TMPQ2, 3, 4, 16); - simd_address_at(tmp8, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp8 = (* [register]:4 tmp6) + (* [register]:4 tmp7); + Rd_VPR128.4S[0,32] = Rn_VPR128.4S[0,32] + TMPQ2[0,32]; + Rd_VPR128.4S[32,32] = Rn_VPR128.4S[32,32] + TMPQ2[32,32]; + Rd_VPR128.4S[64,32] = Rn_VPR128.4S[64,32] + TMPQ2[64,32]; + Rd_VPR128.4S[96,32] = Rn_VPR128.4S[96,32] + TMPQ2[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Rm_VPR128.8H, 1:1); - local tmp2:16 = SIMD_INT_SEXT(tmp1, 2:1); - local tmpd:16 = SIMD_INT_ADD(Rn_VPR128.4S, tmp2, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_saddw2(Rn_VPR128.4S, Rm_VPR128.8H, 2:1); -@endif } # C7.2.232 SADDW, SADDW2 page C7-1919 line 107570 MATCH x0e201000/mask=xbf20fc00 @@ -24974,82 +14025,26 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H :saddw2 Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.16B is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1215=0x1 & b_1011=0 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rm_VPR128.16B, 1, 8, 16); - TMPD1 = * [register]:8 tmp1; + TMPD1 = Rm_VPR128.16B[64,64]; # simd resize TMPQ2 = sext(TMPD1) (lane size 1 to 2) - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, TMPD1, 0, 1, 8); - simd_address_at(tmp5, TMPQ2, 0, 2, 16); - * [register]:2 tmp5 = sext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 1, 1, 8); - simd_address_at(tmp5, TMPQ2, 1, 2, 16); - * [register]:2 tmp5 = sext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 2, 1, 8); - simd_address_at(tmp5, TMPQ2, 2, 2, 16); - * [register]:2 tmp5 = sext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 3, 1, 8); - simd_address_at(tmp5, TMPQ2, 3, 2, 16); - * [register]:2 tmp5 = sext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 4, 1, 8); - simd_address_at(tmp5, TMPQ2, 4, 2, 16); - * [register]:2 tmp5 = sext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 5, 1, 8); - simd_address_at(tmp5, TMPQ2, 5, 2, 16); - * [register]:2 tmp5 = sext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 6, 1, 8); - simd_address_at(tmp5, TMPQ2, 6, 2, 16); - * [register]:2 tmp5 = sext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 7, 1, 8); - simd_address_at(tmp5, TMPQ2, 7, 2, 16); - * [register]:2 tmp5 = sext(* [register]:1 tmp4); + TMPQ2[0,16] = sext(TMPD1[0,8]); + TMPQ2[16,16] = sext(TMPD1[8,8]); + TMPQ2[32,16] = sext(TMPD1[16,8]); + TMPQ2[48,16] = sext(TMPD1[24,8]); + TMPQ2[64,16] = sext(TMPD1[32,8]); + TMPQ2[80,16] = sext(TMPD1[40,8]); + TMPQ2[96,16] = sext(TMPD1[48,8]); + TMPQ2[112,16] = sext(TMPD1[56,8]); # simd infix Rd_VPR128.8H = Rn_VPR128.8H + TMPQ2 on lane size 2 - local tmp6:4 = 0; - local tmp7:4 = 0; - local tmp8:4 = 0; - simd_address_at(tmp6, Rn_VPR128.8H, 0, 2, 16); - simd_address_at(tmp7, TMPQ2, 0, 2, 16); - simd_address_at(tmp8, Rd_VPR128.8H, 0, 2, 16); - * [register]:2 tmp8 = (* [register]:2 tmp6) + (* [register]:2 tmp7); - simd_address_at(tmp6, Rn_VPR128.8H, 1, 2, 16); - simd_address_at(tmp7, TMPQ2, 1, 2, 16); - simd_address_at(tmp8, Rd_VPR128.8H, 1, 2, 16); - * [register]:2 tmp8 = (* [register]:2 tmp6) + (* [register]:2 tmp7); - simd_address_at(tmp6, Rn_VPR128.8H, 2, 2, 16); - simd_address_at(tmp7, TMPQ2, 2, 2, 16); - simd_address_at(tmp8, Rd_VPR128.8H, 2, 2, 16); - * [register]:2 tmp8 = (* [register]:2 tmp6) + (* [register]:2 tmp7); - simd_address_at(tmp6, Rn_VPR128.8H, 3, 2, 16); - simd_address_at(tmp7, TMPQ2, 3, 2, 16); - simd_address_at(tmp8, Rd_VPR128.8H, 3, 2, 16); - * [register]:2 tmp8 = (* [register]:2 tmp6) + (* [register]:2 tmp7); - simd_address_at(tmp6, Rn_VPR128.8H, 4, 2, 16); - simd_address_at(tmp7, TMPQ2, 4, 2, 16); - simd_address_at(tmp8, Rd_VPR128.8H, 4, 2, 16); - * [register]:2 tmp8 = (* [register]:2 tmp6) + (* [register]:2 tmp7); - simd_address_at(tmp6, Rn_VPR128.8H, 5, 2, 16); - simd_address_at(tmp7, TMPQ2, 5, 2, 16); - simd_address_at(tmp8, Rd_VPR128.8H, 5, 2, 16); - * [register]:2 tmp8 = (* [register]:2 tmp6) + (* [register]:2 tmp7); - simd_address_at(tmp6, Rn_VPR128.8H, 6, 2, 16); - simd_address_at(tmp7, TMPQ2, 6, 2, 16); - simd_address_at(tmp8, Rd_VPR128.8H, 6, 2, 16); - * [register]:2 tmp8 = (* [register]:2 tmp6) + (* [register]:2 tmp7); - simd_address_at(tmp6, Rn_VPR128.8H, 7, 2, 16); - simd_address_at(tmp7, TMPQ2, 7, 2, 16); - simd_address_at(tmp8, Rd_VPR128.8H, 7, 2, 16); - * [register]:2 tmp8 = (* [register]:2 tmp6) + (* [register]:2 tmp7); + Rd_VPR128.8H[0,16] = Rn_VPR128.8H[0,16] + TMPQ2[0,16]; + Rd_VPR128.8H[16,16] = Rn_VPR128.8H[16,16] + TMPQ2[16,16]; + Rd_VPR128.8H[32,16] = Rn_VPR128.8H[32,16] + TMPQ2[32,16]; + Rd_VPR128.8H[48,16] = Rn_VPR128.8H[48,16] + TMPQ2[48,16]; + Rd_VPR128.8H[64,16] = Rn_VPR128.8H[64,16] + TMPQ2[64,16]; + Rd_VPR128.8H[80,16] = Rn_VPR128.8H[80,16] + TMPQ2[80,16]; + Rd_VPR128.8H[96,16] = Rn_VPR128.8H[96,16] + TMPQ2[96,16]; + Rd_VPR128.8H[112,16] = Rn_VPR128.8H[112,16] + TMPQ2[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Rm_VPR128.16B, 1:1); - local tmp2:16 = SIMD_INT_SEXT(tmp1, 1:1); - local tmpd:16 = SIMD_INT_ADD(Rn_VPR128.8H, tmp2, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.8H -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.8H = NEON_saddw2(Rn_VPR128.8H, Rm_VPR128.16B, 1:1); -@endif } # C7.2.233 SCVTF (vector, fixed-point) page C7-1921 line 107690 MATCH x5f00e400/mask=xff80fc00 @@ -25060,9 +14055,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16 :scvtf Rd_FPR64, Rn_FPR64, Imm_shr_imm64 is b_3031=1 & u=0 & b_2428=0x1f & b_2223=0b01 & Imm_shr_imm64 & b_1115=0x1c & b_1010=1 & Rn_FPR64 & Rd_FPR64 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR64 = NEON_scvtf(Rn_FPR64, Imm_shr_imm64:4); -@endif } # C7.2.233 SCVTF (vector, fixed-point) page C7-1921 line 107690 MATCH x5f00e400/mask=xff80fc00 @@ -25073,9 +14066,7 @@ is b_3031=1 & u=0 & b_2428=0x1f & b_2223=0b01 & Imm_shr_imm64 & b_1115=0x1c & b_ :scvtf Rd_FPR32, Rn_FPR32, Imm_shr_imm32 is b_3031=1 & u=0 & b_2428=0x1f & b_2123=1 & Imm_shr_imm32 & b_1115=0x1c & b_1010=1 & Rn_FPR32 & Rd_FPR32 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR32 = NEON_scvtf(Rn_FPR32, Imm_shr_imm32:4); -@endif } # C7.2.233 SCVTF (vector, fixed-point) page C7-1921 line 107690 MATCH x5f00e400/mask=xff80fc00 @@ -25086,9 +14077,7 @@ is b_3031=1 & u=0 & b_2428=0x1f & b_2123=1 & Imm_shr_imm32 & b_1115=0x1c & b_101 :scvtf Rd_FPR16, Rn_FPR16, Imm_shr_imm16 is b_3031=1 & u=0 & b_2428=0x1f & b_2023=1 & Imm_shr_imm16 & b_1115=0x1c & b_1010=1 & Rn_FPR16 & Rd_FPR16 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR16 = NEON_scvtf(Rn_FPR16, Imm_shr_imm16:4); -@endif } # C7.2.233 SCVTF (vector, fixed-point) page C7-1921 line 107690 MATCH x0f00e400/mask=xbf80fc00 @@ -25099,9 +14088,7 @@ is b_3031=1 & u=0 & b_2428=0x1f & b_2023=1 & Imm_shr_imm16 & b_1115=0x1c & b_101 :scvtf Rd_VPR128.2D, Rn_VPR128.2D, Imm_shr_imm64 is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2223=0b01 & Imm_shr_imm64 & b_1115=0x1c & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.2D = NEON_scvtf(Rn_VPR128.2D, Imm_shr_imm64:4, 8:1); -@endif } # C7.2.233 SCVTF (vector, fixed-point) page C7-1921 line 107690 MATCH x0f00e400/mask=xbf80fc00 @@ -25112,9 +14099,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2223=0b01 & Imm_shr_imm64 & b_1115=0x1c :scvtf Rd_VPR64.2S, Rn_VPR64.2S, Imm_shr_imm32 is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x1c & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.2S = NEON_scvtf(Rn_VPR64.2S, Imm_shr_imm32:4, 4:1); -@endif } # C7.2.233 SCVTF (vector, fixed-point) page C7-1921 line 107690 MATCH x0f00e400/mask=xbf80fc00 @@ -25125,9 +14110,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x1c & :scvtf Rd_VPR128.4S, Rn_VPR128.4S, Imm_shr_imm32 is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x1c & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_scvtf(Rn_VPR128.4S, Imm_shr_imm32:4, 4:1); -@endif } # C7.2.233 SCVTF (vector, fixed-point) page C7-1921 line 107690 MATCH x0f00e400/mask=xbf80fc00 @@ -25138,9 +14121,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x1c & :scvtf Rd_VPR64.4H, Rn_VPR64.4H, Imm_shr_imm32 is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_2023=1 & Imm_shr_imm32 & b_1115=0x1c & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.4H = NEON_scvtf(Rn_VPR64.4H, Imm_shr_imm32:4, 2:1); -@endif } # C7.2.233 SCVTF (vector, fixed-point) page C7-1921 line 107690 MATCH x0f00e400/mask=xbf80fc00 @@ -25151,9 +14132,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_2023=1 & Imm_shr_imm32 & b_1115=0x1c & :scvtf Rd_VPR128.8H, Rn_VPR128.8H, Imm_shr_imm32 is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2023=1 & Imm_shr_imm32 & b_1115=0x1c & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.8H = NEON_scvtf(Rn_VPR128.8H, Imm_shr_imm32:4, 2:1); -@endif } # C7.2.234 SCVTF (vector, integer) page C7-1924 line 107840 MATCH x5e21d800/mask=xffbffc00 @@ -25165,15 +14144,8 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2023=1 & Imm_shr_imm32 & b_1115=0x1c & :scvtf Rd_FPR32, Rn_FPR32 is b_3031=1 & u=0 & b_2428=0x1e & size_high=0 & b_1722=0x10 & b_1216=0x1d & b_1011=2 & Rn_FPR32 & Rd_FPR32 & Zd { -@if defined(SEMANTIC_primitive) Rd_FPR32 = int2float(Rn_FPR32); zext_zs(Zd); # zero upper 28 bytes of Zd -@elif defined(SEMANTIC_pcode) - Rd_FPR32 = int2float(Rn_FPR32); - zext_zs(Zd); # zero upper 28 bytes of Zd -@elif defined(SEMANTIC_pseudo) - Rd_FPR32 = NEON_scvtf(Rn_FPR32); -@endif } # C7.2.234 SCVTF (vector, integer) page C7-1924 line 107840 MATCH x5e21d800/mask=xffbffc00 @@ -25185,15 +14157,8 @@ is b_3031=1 & u=0 & b_2428=0x1e & size_high=0 & b_1722=0x10 & b_1216=0x1d & b_10 :scvtf Rd_FPR64, Rn_FPR64 is b_3031=1 & u=0 & b_2428=0x1e & size_high=0 & b_1722=0x30 & b_1216=0x1d & b_1011=2 & Rn_FPR64 & Rd_FPR64 & Zd { -@if defined(SEMANTIC_primitive) Rd_FPR64 = int2float(Rn_FPR64); zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - Rd_FPR64 = int2float(Rn_FPR64); - zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pseudo) - Rd_FPR64 = NEON_scvtf(Rn_FPR64); -@endif } # C7.2.234 SCVTF (vector, integer) page C7-1924 line 107840 MATCH x0e21d800/mask=xbfbffc00 @@ -25204,9 +14169,7 @@ is b_3031=1 & u=0 & b_2428=0x1e & size_high=0 & b_1722=0x30 & b_1216=0x1d & b_10 :scvtf Rd_VPR128.2D, Rn_VPR128.2D is b_3131=0 & q=1 & u=0 & b_2428=0xe & size_high=0 & b_1722=0x30 & b_1216=0x1d & b_1011=2 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.2D = NEON_scvtf(Rn_VPR128.2D, 8:1); -@endif } # C7.2.234 SCVTF (vector, integer) page C7-1924 line 107840 MATCH x0e21d800/mask=xbfbffc00 @@ -25217,9 +14180,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & size_high=0 & b_1722=0x30 & b_1216=0x1d & :scvtf Rd_VPR64.2S, Rn_VPR64.2S is b_3131=0 & q=0 & u=0 & b_2428=0xe & size_high=0 & b_1722=0x10 & b_1216=0x1d & b_1011=2 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.2S = NEON_scvtf(Rn_VPR64.2S, 4:1); -@endif } # C7.2.234 SCVTF (vector, integer) page C7-1924 line 107840 MATCH x0e21d800/mask=xbfbffc00 @@ -25230,9 +14191,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & size_high=0 & b_1722=0x10 & b_1216=0x1d & :scvtf Rd_VPR128.4S, Rn_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & size_high=0 & b_1722=0x10 & b_1216=0x1d & b_1011=2 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_scvtf(Rn_VPR128.4S, 4:1); -@endif } # C7.2.234 SCVTF (vector, integer) page C7-1924 line 107840 MATCH x5e79d800/mask=xfffffc00 @@ -25245,15 +14204,8 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & size_high=0 & b_1722=0x10 & b_1216=0x1d & :scvtf Rd_FPR16, Rn_FPR16 is b_1031=0b0101111001111001110110 & Rd_FPR16 & Rn_FPR16 & Zd { -@if defined(SEMANTIC_primitive) Rd_FPR16 = int2float(Rn_FPR16); zext_zh(Zd); # zero upper 30 bytes of Zd -@elif defined(SEMANTIC_pcode) - Rd_FPR16 = int2float(Rn_FPR16); - zext_zh(Zd); # zero upper 30 bytes of Zd -@elif defined(SEMANTIC_pseudo) - Rd_FPR16 = NEON_scvtf(Rn_FPR16); -@endif } # C7.2.234 SCVTF (vector, integer) page C7-1924 line 107840 MATCH x0e79d800/mask=xbffffc00 @@ -25265,9 +14217,7 @@ is b_1031=0b0101111001111001110110 & Rd_FPR16 & Rn_FPR16 & Zd :scvtf Rd_VPR64.4H, Rn_VPR64.4H is b_31=0 & b_30=0 & b_1029=0b00111001111001110110 & Rd_VPR64.4H & Rn_VPR64.4H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.4H = NEON_scvtf(Rn_VPR64.4H, 2:1); -@endif } # C7.2.234 SCVTF (vector, integer) page C7-1924 line 107840 MATCH x0e79d800/mask=xbffffc00 @@ -25279,9 +14229,7 @@ is b_31=0 & b_30=0 & b_1029=0b00111001111001110110 & Rd_VPR64.4H & Rn_VPR64.4H & :scvtf Rd_VPR128.8H, Rn_VPR128.8H is b_31=0 & b_30=1 & b_1029=0b00111001111001110110 & Rd_VPR128.8H & Rn_VPR128.8H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.8H = NEON_scvtf(Rn_VPR128.8H, 2:1); -@endif } # C7.2.235 SCVTF (scalar, fixed-point) page C7-1927 line 108018 MATCH x1e020000/mask=x7f3f0000 @@ -25294,17 +14242,9 @@ is b_31=0 & b_30=1 & b_1029=0b00111001111001110110 & Rd_VPR128.8H & Rn_VPR128.8H :scvtf Rd_FPR16, Rn_GPR32, FBitsOp is sf=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=0 & mode=0 & fpOpcode=2 & b_15=1 & FBitsOp & FBits16 & Rn_GPR32 & Rd_FPR16 & Zd { -@if defined(SEMANTIC_primitive) local tmp1:2 = int2float(Rn_GPR32); Rd_FPR16 = tmp1 f/ FBits16; zext_zh(Zd); # zero upper 30 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:2 = int2float(Rn_GPR32); - Rd_FPR16 = tmp1 f/ FBits16; - zext_zh(Zd); # zero upper 30 bytes of Zd -@elif defined(SEMANTIC_pseudo) - Rd_FPR16 = NEON_scvtf(Rn_GPR32, FBits16); -@endif } # C7.2.235 SCVTF (scalar, fixed-point) page C7-1927 line 108018 MATCH x1e020000/mask=x7f3f0000 @@ -25317,17 +14257,9 @@ is sf=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=0 & mode=0 & fpOpcode= :scvtf Rd_FPR16, Rn_GPR64, FBitsOp is sf=1 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=0 & mode=0 & fpOpcode=2 & FBitsOp & FBits16 & Rn_GPR64 & Rd_FPR16 & Zd { -@if defined(SEMANTIC_primitive) local tmp1:2 = int2float(Rn_GPR64); Rd_FPR16 = tmp1 f/ FBits16; zext_zh(Zd); # zero upper 30 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:2 = int2float(Rn_GPR64); - Rd_FPR16 = tmp1 f/ FBits16; - zext_zh(Zd); # zero upper 30 bytes of Zd -@elif defined(SEMANTIC_pseudo) - Rd_FPR16 = NEON_scvtf(Rn_GPR64, FBits16); -@endif } # C7.2.235 SCVTF (scalar, fixed-point) page C7-1927 line 108018 MATCH x1e020000/mask=x7f3f0000 @@ -25339,17 +14271,9 @@ is sf=1 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=0 & mode=0 & fpOpcode= :scvtf Rd_FPR64, Rn_GPR32, FBitsOp is sf=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=0 & mode=0 & fpOpcode=2 & b_15=1 & FBitsOp & FBits64 & Rn_GPR32 & Rd_FPR64 & Zd { -@if defined(SEMANTIC_primitive) local tmp1:8 = int2float(Rn_GPR32); Rd_FPR64 = tmp1 f/ FBits64; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = int2float(Rn_GPR32); - Rd_FPR64 = tmp1 f/ FBits64; - zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pseudo) - Rd_FPR64 = NEON_scvtf(Rn_GPR32, FBits64); -@endif } # C7.2.235 SCVTF (scalar, fixed-point) page C7-1927 line 108018 MATCH x1e020000/mask=x7f3f0000 @@ -25361,17 +14285,9 @@ is sf=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=0 & mode=0 & fpOpcode= :scvtf Rd_FPR32, Rn_GPR32, FBitsOp is sf=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=0 & mode=0 & fpOpcode=2 & b_15=1 & FBitsOp & FBits32 & Rn_GPR32 & Rd_FPR32 & Zd { -@if defined(SEMANTIC_primitive) local tmp1:4 = int2float(Rn_GPR32); Rd_FPR32 = tmp1 f/ FBits32; zext_zs(Zd); # zero upper 28 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:4 = int2float(Rn_GPR32); - Rd_FPR32 = tmp1 f/ FBits32; - zext_zs(Zd); # zero upper 28 bytes of Zd -@elif defined(SEMANTIC_pseudo) - Rd_FPR32 = NEON_scvtf(Rn_GPR32, FBits32); -@endif } # C7.2.235 SCVTF (scalar, fixed-point) page C7-1927 line 108018 MATCH x1e020000/mask=x7f3f0000 @@ -25383,17 +14299,9 @@ is sf=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=0 & mode=0 & fpOpcode= :scvtf Rd_FPR64, Rn_GPR64, FBitsOp is sf=1 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=0 & mode=0 & fpOpcode=2 & FBitsOp & FBits64 & Rn_GPR64 & Rd_FPR64 & Zd { -@if defined(SEMANTIC_primitive) local tmp1:8 = int2float(Rn_GPR64); Rd_FPR64 = tmp1 f/ FBits64; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = int2float(Rn_GPR64); - Rd_FPR64 = tmp1 f/ FBits64; - zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pseudo) - Rd_FPR64 = NEON_scvtf(Rn_GPR64, FBits64); -@endif } # C7.2.235 SCVTF (scalar, fixed-point) page C7-1927 line 108018 MATCH x1e020000/mask=x7f3f0000 @@ -25405,17 +14313,9 @@ is sf=1 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=0 & mode=0 & fpOpcode= :scvtf Rd_FPR32, Rn_GPR64, FBitsOp is sf=1 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=0 & mode=0 & fpOpcode=2 & FBitsOp & FBits32 & Rn_GPR64 & Rd_FPR32 & Rd_FPR64 & Zd { -@if defined(SEMANTIC_primitive) local tmp1:4 = int2float(Rn_GPR64); Rd_FPR32 = tmp1 f/ FBits32; zext_zs(Zd); # zero upper 28 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:4 = int2float(Rn_GPR64); - Rd_FPR32 = tmp1 f/ FBits32; - zext_zs(Zd); # zero upper 28 bytes of Zd -@elif defined(SEMANTIC_pseudo) - Rd_FPR32 = NEON_scvtf(Rn_GPR64, FBits32); -@endif } # C7.2.236 SCVTF (scalar, integer) page C7-1929 line 108148 MATCH x1e220000/mask=x7f3ffc00 @@ -25427,15 +14327,8 @@ is sf=1 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=0 & mode=0 & fpOpcode= :scvtf Rd_FPR16, Rn_GPR32 is sf=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=1 & rmode=0 & fpOpcode=2 & b_1015=0x0 & Rn_GPR32 & Rd_FPR16 & Zd { -@if defined(SEMANTIC_primitive) Rd_FPR16 = int2float(Rn_GPR32); zext_zh(Zd); # zero upper 30 bytes of Zd -@elif defined(SEMANTIC_pcode) - Rd_FPR16 = int2float(Rn_GPR32); - zext_zh(Zd); # zero upper 30 bytes of Zd -@elif defined(SEMANTIC_pseudo) - Rd_FPR16 = NEON_scvtf(Rn_GPR32); -@endif } # C7.2.236 SCVTF (scalar, integer) page C7-1929 line 108148 MATCH x1e220000/mask=x7f3ffc00 @@ -25447,15 +14340,8 @@ is sf=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=1 & rmode=0 & fpOpcode :scvtf Rd_FPR16, Rn_GPR64 is sf=1 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=1 & rmode=0 & fpOpcode=2 & b_1015=0x0 & Rn_GPR64 & Rd_FPR16 & Zd { -@if defined(SEMANTIC_primitive) Rd_FPR16 = int2float(Rn_GPR64); zext_zh(Zd); # zero upper 30 bytes of Zd -@elif defined(SEMANTIC_pcode) - Rd_FPR16 = int2float(Rn_GPR64); - zext_zh(Zd); # zero upper 30 bytes of Zd -@elif defined(SEMANTIC_pseudo) - Rd_FPR16 = NEON_scvtf(Rn_GPR64); -@endif } # C7.2.236 SCVTF (scalar, integer) page C7-1929 line 108148 MATCH x1e220000/mask=x7f3ffc00 @@ -25467,15 +14353,8 @@ is sf=1 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=1 & rmode=0 & fpOpcode :scvtf Rd_FPR64, Rn_GPR32 is sf=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=1 & rmode=0 & fpOpcode=2 & b_1015=0x0 & Rn_GPR32 & Rd_FPR64 & Zd { -@if defined(SEMANTIC_primitive) Rd_FPR64 = int2float(Rn_GPR32); zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - Rd_FPR64 = int2float(Rn_GPR32); - zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pseudo) - Rd_FPR64 = NEON_scvtf(Rn_GPR32); -@endif } # C7.2.236 SCVTF (scalar, integer) page C7-1929 line 108148 MATCH x1e220000/mask=x7f3ffc00 @@ -25487,15 +14366,8 @@ is sf=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=1 & rmode=0 & fpOpcode :scvtf Rd_FPR64, Rn_GPR64 is sf=1 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=1 & rmode=0 & fpOpcode=2 & b_1015=0x0 & Rn_GPR64 & Rd_FPR64 & Zd { -@if defined(SEMANTIC_primitive) Rd_FPR64 = int2float(Rn_GPR64); zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - Rd_FPR64 = int2float(Rn_GPR64); - zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pseudo) - Rd_FPR64 = NEON_scvtf(Rn_GPR64); -@endif } # C7.2.236 SCVTF (scalar, integer) page C7-1929 line 108148 MATCH x1e220000/mask=x7f3ffc00 @@ -25507,15 +14379,8 @@ is sf=1 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=1 & rmode=0 & fpOpcode :scvtf Rd_FPR32, Rn_GPR32 is sf=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=1 & rmode=0 & fpOpcode=2 & b_1015=0x0 & Rn_GPR32 & Rd_FPR32 & Zd { -@if defined(SEMANTIC_primitive) Rd_FPR32 = int2float(Rn_GPR32); zext_zs(Zd); # zero upper 28 bytes of Zd -@elif defined(SEMANTIC_pcode) - Rd_FPR32 = int2float(Rn_GPR32); - zext_zs(Zd); # zero upper 28 bytes of Zd -@elif defined(SEMANTIC_pseudo) - Rd_FPR32 = NEON_scvtf(Rn_GPR32); -@endif } # C7.2.236 SCVTF (scalar, integer) page C7-1929 line 108148 MATCH x1e220000/mask=x7f3ffc00 @@ -25527,15 +14392,8 @@ is sf=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=1 & rmode=0 & fpOpcode :scvtf Rd_FPR32, Rn_GPR64 is sf=1 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=1 & rmode=0 & fpOpcode=2 & b_1015=0x0 & Rn_GPR64 & Rd_FPR32 & Zd { -@if defined(SEMANTIC_primitive) Rd_FPR32 = int2float(Rn_GPR64); zext_zs(Zd); # zero upper 28 bytes of Zd -@elif defined(SEMANTIC_pcode) - Rd_FPR32 = int2float(Rn_GPR64); - zext_zs(Zd); # zero upper 28 bytes of Zd -@elif defined(SEMANTIC_pseudo) - Rd_FPR32 = NEON_scvtf(Rn_GPR64); -@endif } # C7.2.237 SDOT (by element) page C7-1931 line 108271 MATCH x0f00e000/mask=xbf00f400 @@ -25547,10 +14405,8 @@ is sf=1 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=1 & rmode=0 & fpOpcode :sdot Rd_VPR64.2S, Rn_VPR64.8B, Re_VPR128.B.vIndex is b_31=0 & b_30=0 & b_2429=0b001111 & b_2223=0b10 & b_1215=0b1110 & b_10=0 & Rd_VPR64.2S & Rn_VPR64.8B & Re_VPR128.B.vIndex & Re_VPR128.S & vIndex & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) local tmp1:4 = SIMD_PIECE(Re_VPR128.S, vIndex:1); Rd_VPR64.2S = NEON_sdot(Rn_VPR64.8B, tmp1, 1:1); -@endif } # C7.2.237 SDOT (by element) page C7-1931 line 108271 MATCH x0f00e000/mask=xbf00f400 @@ -25562,10 +14418,8 @@ is b_31=0 & b_30=0 & b_2429=0b001111 & b_2223=0b10 & b_1215=0b1110 & b_10=0 & Rd :sdot Rd_VPR128.4S, Rn_VPR128.16B, Re_VPR128.B.vIndex is b_31=0 & b_30=1 & b_2429=0b001111 & b_2223=0b10 & b_1215=0b1110 & b_10=0 & Rd_VPR128.4S & Rn_VPR128.16B & Re_VPR128.B.vIndex & Re_VPR128.S & vIndex & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) local tmp1:4 = SIMD_PIECE(Re_VPR128.S, vIndex:1); Rd_VPR128.4S = NEON_sdot(Rn_VPR128.16B, tmp1, 1:1); -@endif } # C7.2.238 SDOT (vector) page C7-1933 line 108370 MATCH x0e009400/mask=xbf20fc00 @@ -25577,9 +14431,7 @@ is b_31=0 & b_30=1 & b_2429=0b001111 & b_2223=0b10 & b_1215=0b1110 & b_10=0 & Rd :sdot Rd_VPR64.2S, Rn_VPR64.8B, Rm_VPR64.8B is b_31=0 & b_30=0 & b_2429=0b001110 & b_2223=0b10 & b_21=0 & b_1015=0b100101 & Rd_VPR64.2S & Rn_VPR64.8B & Rm_VPR64.8B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.2S = NEON_sdot(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); -@endif } # C7.2.238 SDOT (vector) page C7-1933 line 108370 MATCH x0e009400/mask=xbf20fc00 @@ -25591,9 +14443,7 @@ is b_31=0 & b_30=0 & b_2429=0b001110 & b_2223=0b10 & b_21=0 & b_1015=0b100101 & :sdot Rd_VPR128.4S, Rn_VPR128.16B, Rm_VPR128.16B is b_31=0 & b_30=1 & b_2429=0b001110 & b_2223=0b10 & b_21=0 & b_1015=0b100101 & Rd_VPR128.4S & Rn_VPR128.16B & Rm_VPR128.16B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_sdot(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); -@endif } # C7.2.239 SHA1C page C7-1935 line 108468 MATCH x5e000000/mask=xffe0fc00 @@ -25604,9 +14454,7 @@ is b_31=0 & b_30=1 & b_2429=0b001110 & b_2223=0b10 & b_21=0 & b_1015=0b100101 & :sha1c Rd_VPR128, Rn_FPR32, Rm_VPR128.4S is b_2431=0b01011110 & b_2223=0b00 & b_2121=0 & Rm_VPR128.4S & b_1015=0b000000 & Rn_FPR32 & Rd_VPR128 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128 = NEON_sha1c(Rd_VPR128, Rn_FPR32, Rm_VPR128.4S, 4:1); -@endif } # C7.2.240 SHA1H page C7-1936 line 108537 MATCH x5e280800/mask=xfffffc00 @@ -25618,15 +14466,8 @@ is b_2431=0b01011110 & b_2223=0b00 & b_2121=0 & Rm_VPR128.4S & b_1015=0b000000 & :sha1h Rd_FPR32, Rn_FPR32 is b_2431=0b01011110 & b_2223=0b00 & b_1721=0b10100 & b_1216=0b00000 & b_1011=0b10 & Rn_FPR32 & Rd_FPR32 & Zd { -@if defined(SEMANTIC_primitive) Rd_FPR32 = Rn_FPR32 << 30:1; zext_zs(Zd); # zero upper 28 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:4 = Rn_FPR32 << 30:1; - Zd = zext(tmpd); # assigning to Rd_FPR32 -@elif defined(SEMANTIC_pseudo) - Rd_FPR32 = NEON_sha1h(Rn_FPR32); -@endif } # C7.2.241 SHA1M page C7-1937 line 108594 MATCH x5e002000/mask=xffe0fc00 @@ -25637,9 +14478,7 @@ is b_2431=0b01011110 & b_2223=0b00 & b_1721=0b10100 & b_1216=0b00000 & b_1011=0b :sha1m Rd_VPR128, Rn_FPR32, Rm_VPR128.4S is b_2431=0b01011110 & b_2223=0b00 & b_2121=0 & Rm_VPR128.4S & b_1015=0b001000 & Rn_FPR32 & Rd_VPR128 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128 = NEON_sha1m(Rd_VPR128, Rn_FPR32, Rm_VPR128.4S, 4:1); -@endif } # C7.2.242 SHA1P page C7-1938 line 108663 MATCH x5e001000/mask=xffe0fc00 @@ -25650,9 +14489,7 @@ is b_2431=0b01011110 & b_2223=0b00 & b_2121=0 & Rm_VPR128.4S & b_1015=0b001000 & :sha1p Rd_VPR128, Rn_FPR32, Rm_VPR128.4S is b_2431=0b01011110 & b_2223=0b00 & b_2121=0 & Rm_VPR128.4S & b_1015=0b000100 & Rn_FPR32 & Rd_VPR128 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128 = NEON_sha1p(Rd_VPR128, Rn_FPR32, Rm_VPR128.4S, 4:1); -@endif } # C7.2.243 SHA1SU0 page C7-1939 line 108732 MATCH x5e003000/mask=xffe0fc00 @@ -25663,9 +14500,7 @@ is b_2431=0b01011110 & b_2223=0b00 & b_2121=0 & Rm_VPR128.4S & b_1015=0b000100 & :sha1su0 Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_2431=0b01011110 & b_2223=0b00 & b_2121=0 & Rm_VPR128.4S & b_1015=0b001100 & Rn_VPR128.4S & Rd_VPR128.4S & Rd_VPR128 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_sha1su0(Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S, 4:1); -@endif } # C7.2.244 SHA1SU1 page C7-1940 line 108798 MATCH x5e281800/mask=xfffffc00 @@ -25676,9 +14511,7 @@ is b_2431=0b01011110 & b_2223=0b00 & b_2121=0 & Rm_VPR128.4S & b_1015=0b001100 & :sha1su1 Rd_VPR128.4S, Rn_VPR128.4S is b_2431=0b01011110 & b_2223=0b00 & b_2121=1 & b_1620=0b01000 & b_1015=0b000110 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_sha1su1(Rd_VPR128.4S, Rn_VPR128.4S, 4:1); -@endif } # C7.2.245 SHA256H2 page C7-1941 line 108862 MATCH x5e005000/mask=xffe0fc00 @@ -25689,9 +14522,7 @@ is b_2431=0b01011110 & b_2223=0b00 & b_2121=1 & b_1620=0b01000 & b_1015=0b000110 :sha256h2 Rd_VPR128, Rn_VPR128, Rm_VPR128.4S is b_2431=0b01011110 & b_2223=0b00 & b_2121=0 & Rm_VPR128.4S & b_1015=0b010100 & Rn_VPR128 & Rd_VPR128 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128 = NEON_sha256h2(Rd_VPR128, Rn_VPR128, Rm_VPR128.4S, 4:1); -@endif } # C7.2.246 SHA256H page C7-1942 line 108922 MATCH x5e004000/mask=xffe0fc00 @@ -25702,9 +14533,7 @@ is b_2431=0b01011110 & b_2223=0b00 & b_2121=0 & Rm_VPR128.4S & b_1015=0b010100 & :sha256h Rd_VPR128, Rn_VPR128, Rm_VPR128.4S is b_2431=0b01011110 & b_2223=0b00 & b_2121=0 & Rm_VPR128.4S & b_1015=0b010000 & Rn_VPR128 & Rd_VPR128 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128 = NEON_sha256h(Rd_VPR128, Rn_VPR128, Rm_VPR128.4S, 4:1); -@endif } # C7.2.247 SHA256SU0 page C7-1943 line 108982 MATCH x5e282800/mask=xfffffc00 @@ -25715,9 +14544,7 @@ is b_2431=0b01011110 & b_2223=0b00 & b_2121=0 & Rm_VPR128.4S & b_1015=0b010000 & :sha256su0 Rd_VPR128.4S, Rn_VPR128.4S is b_2431=0b01011110 & b_2223=0b00 & b_2121=1 & b_1620=0b01000 & b_1015=0b001010 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_sha256su0(Rd_VPR128.4S, Rn_VPR128.4S, 4:1); -@endif } # C7.2.248 SHA256SU1 page C7-1944 line 109048 MATCH x5e006000/mask=xffe0fc00 @@ -25728,9 +14555,7 @@ is b_2431=0b01011110 & b_2223=0b00 & b_2121=1 & b_1620=0b01000 & b_1015=0b001010 :sha256su1 Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_2431=0b01011110 & b_2223=0b00 & b_2121=0 & Rm_VPR128.4S & b_1015=0b011000 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_sha256su1(Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S, 4:1); -@endif } # C7.2.249 SHA512H page C7-1946 line 109138 MATCH xce608000/mask=xffe0fc00 @@ -25741,9 +14566,7 @@ is b_2431=0b01011110 & b_2223=0b00 & b_2121=0 & Rm_VPR128.4S & b_1015=0b011000 & :sha512h Rd_VPR128, Rn_VPR128, Rm_VPR128.2D is b_2131=0b11001110011 & b_1015=0b100000 & Rd_VPR128 & Rn_VPR128 & Rm_VPR128.2D & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128 = NEON_sha512h(Rd_VPR128, Rn_VPR128, Rm_VPR128.2D, 8:1); -@endif } # C7.2.250 SHA512H2 page C7-1948 line 109227 MATCH xce608400/mask=xffe0fc00 @@ -25754,9 +14577,7 @@ is b_2131=0b11001110011 & b_1015=0b100000 & Rd_VPR128 & Rn_VPR128 & Rm_VPR128.2D :sha512h2 Rd_VPR128, Rn_VPR128, Rm_VPR128.2D is b_2131=0b11001110011 & b_1015=0b100001 & Rd_VPR128 & Rn_VPR128 & Rm_VPR128.2D & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128 = NEON_sha512h2(Rd_VPR128, Rn_VPR128, Rm_VPR128.2D, 8:1); -@endif } # C7.2.251 SHA512SU0 page C7-1950 line 109313 MATCH xcec08000/mask=xfffffc00 @@ -25767,9 +14588,7 @@ is b_2131=0b11001110011 & b_1015=0b100001 & Rd_VPR128 & Rn_VPR128 & Rm_VPR128.2D :sha512su0 Rd_VPR128.2D, Rn_VPR128.2D is b_1031=0b1100111011000000100000 & Rd_VPR128.2D & Rn_VPR128.2D & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.2D = NEON_sha512su0(Rd_VPR128.2D, Rn_VPR128.2D, 8:1); -@endif } # C7.2.252 SHA512SU1 page C7-1951 line 109383 MATCH xce608800/mask=xffe0fc00 @@ -25780,9 +14599,7 @@ is b_1031=0b1100111011000000100000 & Rd_VPR128.2D & Rn_VPR128.2D & Zd :sha512su1 Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D is b_2131=0b11001110011 & b_1015=0b100010 & Rd_VPR128.2D & Rn_VPR128.2D & Rm_VPR128.2D & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.2D = NEON_sha512su1(Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D, 8:1); -@endif } # C7.2.253 SHADD page C7-1953 line 109467 MATCH x0e200400/mask=xbf20fc00 @@ -25793,9 +14610,7 @@ is b_2131=0b11001110011 & b_1015=0b100010 & Rd_VPR128.2D & Rn_VPR128.2D & Rm_VPR :shadd Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0x0 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.16B = NEON_shadd(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); -@endif } # C7.2.253 SHADD page C7-1953 line 109467 MATCH x0e200400/mask=xbf20fc00 @@ -25806,9 +14621,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16 :shadd Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0x0 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.2S = NEON_shadd(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); -@endif } # C7.2.253 SHADD page C7-1953 line 109467 MATCH x0e200400/mask=xbf20fc00 @@ -25819,9 +14632,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S :shadd Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0x0 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.4H = NEON_shadd(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); -@endif } # C7.2.253 SHADD page C7-1953 line 109467 MATCH x0e200400/mask=xbf20fc00 @@ -25832,9 +14643,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H :shadd Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0x0 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_shadd(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); -@endif } # C7.2.253 SHADD page C7-1953 line 109467 MATCH x0e200400/mask=xbf20fc00 @@ -25845,9 +14654,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S :shadd Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0x0 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.8B = NEON_shadd(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); -@endif } # C7.2.253 SHADD page C7-1953 line 109467 MATCH x0e200400/mask=xbf20fc00 @@ -25858,9 +14665,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B :shadd Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0x0 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.8H = NEON_shadd(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); -@endif } # C7.2.254 SHL page C7-1955 line 109567 MATCH x5f005400/mask=xff80fc00 @@ -25871,9 +14676,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H :shl Rd_FPR64, Rn_FPR64, Imm_imm0_63 is b_3031=1 & u=0 & b_2428=0x1f & b_2223=0b01 & Imm_imm0_63 & b_1115=0xa & b_1010=1 & Rn_FPR64 & Rd_FPR64 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR64 = NEON_shl(Rn_FPR64, Imm_imm0_63:1); -@endif } # C7.2.254 SHL page C7-1955 line 109567 MATCH x0f005400/mask=xbf80fc00 @@ -25885,67 +14688,25 @@ is b_3031=1 & u=0 & b_2428=0x1f & b_2223=0b01 & Imm_imm0_63 & b_1115=0xa & b_101 :shl Rd_VPR128.16B, Rn_VPR128.16B, Imm_uimm3 is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_1923=0x1 & Imm_uimm3 & b_1115=0xa & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { -@if defined(SEMANTIC_primitive) local tmp1:1 = Imm_uimm3; # simd infix Rd_VPR128.16B = Rn_VPR128.16B << tmp1 on lane size 1 - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR128.16B, 0, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 0, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp2) << tmp1; - simd_address_at(tmp2, Rn_VPR128.16B, 1, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 1, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp2) << tmp1; - simd_address_at(tmp2, Rn_VPR128.16B, 2, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 2, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp2) << tmp1; - simd_address_at(tmp2, Rn_VPR128.16B, 3, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 3, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp2) << tmp1; - simd_address_at(tmp2, Rn_VPR128.16B, 4, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 4, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp2) << tmp1; - simd_address_at(tmp2, Rn_VPR128.16B, 5, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 5, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp2) << tmp1; - simd_address_at(tmp2, Rn_VPR128.16B, 6, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 6, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp2) << tmp1; - simd_address_at(tmp2, Rn_VPR128.16B, 7, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 7, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp2) << tmp1; - simd_address_at(tmp2, Rn_VPR128.16B, 8, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 8, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp2) << tmp1; - simd_address_at(tmp2, Rn_VPR128.16B, 9, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 9, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp2) << tmp1; - simd_address_at(tmp2, Rn_VPR128.16B, 10, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 10, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp2) << tmp1; - simd_address_at(tmp2, Rn_VPR128.16B, 11, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 11, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp2) << tmp1; - simd_address_at(tmp2, Rn_VPR128.16B, 12, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 12, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp2) << tmp1; - simd_address_at(tmp2, Rn_VPR128.16B, 13, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 13, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp2) << tmp1; - simd_address_at(tmp2, Rn_VPR128.16B, 14, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 14, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp2) << tmp1; - simd_address_at(tmp2, Rn_VPR128.16B, 15, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 15, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp2) << tmp1; + Rd_VPR128.16B[0,8] = Rn_VPR128.16B[0,8] << tmp1; + Rd_VPR128.16B[8,8] = Rn_VPR128.16B[8,8] << tmp1; + Rd_VPR128.16B[16,8] = Rn_VPR128.16B[16,8] << tmp1; + Rd_VPR128.16B[24,8] = Rn_VPR128.16B[24,8] << tmp1; + Rd_VPR128.16B[32,8] = Rn_VPR128.16B[32,8] << tmp1; + Rd_VPR128.16B[40,8] = Rn_VPR128.16B[40,8] << tmp1; + Rd_VPR128.16B[48,8] = Rn_VPR128.16B[48,8] << tmp1; + Rd_VPR128.16B[56,8] = Rn_VPR128.16B[56,8] << tmp1; + Rd_VPR128.16B[64,8] = Rn_VPR128.16B[64,8] << tmp1; + Rd_VPR128.16B[72,8] = Rn_VPR128.16B[72,8] << tmp1; + Rd_VPR128.16B[80,8] = Rn_VPR128.16B[80,8] << tmp1; + Rd_VPR128.16B[88,8] = Rn_VPR128.16B[88,8] << tmp1; + Rd_VPR128.16B[96,8] = Rn_VPR128.16B[96,8] << tmp1; + Rd_VPR128.16B[104,8] = Rn_VPR128.16B[104,8] << tmp1; + Rd_VPR128.16B[112,8] = Rn_VPR128.16B[112,8] << tmp1; + Rd_VPR128.16B[120,8] = Rn_VPR128.16B[120,8] << tmp1; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:1 = Imm_uimm3; - local tmpd:16 = SIMD_INT_LEFT(Rn_VPR128.16B, tmp1, 1:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.16B -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.16B = NEON_shl(Rn_VPR128.16B, Imm_uimm3:1, 1:1); -@endif } # C7.2.254 SHL page C7-1955 line 109567 MATCH x0f005400/mask=xbf80fc00 @@ -25957,25 +14718,11 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_1923=0x1 & Imm_uimm3 & b_1115=0xa & b_1 :shl Rd_VPR128.2D, Rn_VPR128.2D, Imm_imm0_63 is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2223=0b01 & Imm_imm0_63 & b_1115=0xa & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_primitive) local tmp1:8 = Imm_imm0_63; # simd infix Rd_VPR128.2D = Rn_VPR128.2D << tmp1 on lane size 8 - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR128.2D, 0, 8, 16); - simd_address_at(tmp3, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp3 = (* [register]:8 tmp2) << tmp1; - simd_address_at(tmp2, Rn_VPR128.2D, 1, 8, 16); - simd_address_at(tmp3, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp3 = (* [register]:8 tmp2) << tmp1; + Rd_VPR128.2D[0,64] = Rn_VPR128.2D[0,64] << tmp1; + Rd_VPR128.2D[64,64] = Rn_VPR128.2D[64,64] << tmp1; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = Imm_imm0_63; - local tmpd:16 = SIMD_INT_LEFT(Rn_VPR128.2D, tmp1, 8:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.2D = NEON_shl(Rn_VPR128.2D, Imm_imm0_63:1, 8:1); -@endif } # C7.2.254 SHL page C7-1955 line 109567 MATCH x0f005400/mask=xbf80fc00 @@ -25987,25 +14734,11 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2223=0b01 & Imm_imm0_63 & b_1115=0xa & :shl Rd_VPR64.2S, Rn_VPR64.2S, Imm_uimm5 is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_2123=1 & Imm_uimm5 & b_1115=0xa & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_primitive) local tmp1:4 = Imm_uimm5; # simd infix Rd_VPR64.2S = Rn_VPR64.2S << tmp1 on lane size 4 - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR64.2S, 0, 4, 8); - simd_address_at(tmp3, Rd_VPR64.2S, 0, 4, 8); - * [register]:4 tmp3 = (* [register]:4 tmp2) << tmp1; - simd_address_at(tmp2, Rn_VPR64.2S, 1, 4, 8); - simd_address_at(tmp3, Rd_VPR64.2S, 1, 4, 8); - * [register]:4 tmp3 = (* [register]:4 tmp2) << tmp1; + Rd_VPR64.2S[0,32] = Rn_VPR64.2S[0,32] << tmp1; + Rd_VPR64.2S[32,32] = Rn_VPR64.2S[32,32] << tmp1; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:4 = Imm_uimm5; - local tmpd:8 = SIMD_INT_LEFT(Rn_VPR64.2S, tmp1, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.2S -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.2S = NEON_shl(Rn_VPR64.2S, Imm_uimm5:1, 4:1); -@endif } # C7.2.254 SHL page C7-1955 line 109567 MATCH x0f005400/mask=xbf80fc00 @@ -26017,31 +14750,13 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_2123=1 & Imm_uimm5 & b_1115=0xa & b_101 :shl Rd_VPR64.4H, Rn_VPR64.4H, Imm_uimm4 is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_2023=0x1 & Imm_uimm4 & b_1115=0xa & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { -@if defined(SEMANTIC_primitive) local tmp1:2 = Imm_uimm4; # simd infix Rd_VPR64.4H = Rn_VPR64.4H << tmp1 on lane size 2 - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR64.4H, 0, 2, 8); - simd_address_at(tmp3, Rd_VPR64.4H, 0, 2, 8); - * [register]:2 tmp3 = (* [register]:2 tmp2) << tmp1; - simd_address_at(tmp2, Rn_VPR64.4H, 1, 2, 8); - simd_address_at(tmp3, Rd_VPR64.4H, 1, 2, 8); - * [register]:2 tmp3 = (* [register]:2 tmp2) << tmp1; - simd_address_at(tmp2, Rn_VPR64.4H, 2, 2, 8); - simd_address_at(tmp3, Rd_VPR64.4H, 2, 2, 8); - * [register]:2 tmp3 = (* [register]:2 tmp2) << tmp1; - simd_address_at(tmp2, Rn_VPR64.4H, 3, 2, 8); - simd_address_at(tmp3, Rd_VPR64.4H, 3, 2, 8); - * [register]:2 tmp3 = (* [register]:2 tmp2) << tmp1; + Rd_VPR64.4H[0,16] = Rn_VPR64.4H[0,16] << tmp1; + Rd_VPR64.4H[16,16] = Rn_VPR64.4H[16,16] << tmp1; + Rd_VPR64.4H[32,16] = Rn_VPR64.4H[32,16] << tmp1; + Rd_VPR64.4H[48,16] = Rn_VPR64.4H[48,16] << tmp1; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:2 = Imm_uimm4; - local tmpd:8 = SIMD_INT_LEFT(Rn_VPR64.4H, tmp1, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.4H -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.4H = NEON_shl(Rn_VPR64.4H, Imm_uimm4:1, 2:1); -@endif } # C7.2.254 SHL page C7-1955 line 109567 MATCH x0f005400/mask=xbf80fc00 @@ -26053,31 +14768,13 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_2023=0x1 & Imm_uimm4 & b_1115=0xa & b_1 :shl Rd_VPR128.4S, Rn_VPR128.4S, Imm_uimm5 is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2123=1 & Imm_uimm5 & b_1115=0xa & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) local tmp1:4 = Imm_uimm5; # simd infix Rd_VPR128.4S = Rn_VPR128.4S << tmp1 on lane size 4 - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR128.4S, 0, 4, 16); - simd_address_at(tmp3, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp3 = (* [register]:4 tmp2) << tmp1; - simd_address_at(tmp2, Rn_VPR128.4S, 1, 4, 16); - simd_address_at(tmp3, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp3 = (* [register]:4 tmp2) << tmp1; - simd_address_at(tmp2, Rn_VPR128.4S, 2, 4, 16); - simd_address_at(tmp3, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp3 = (* [register]:4 tmp2) << tmp1; - simd_address_at(tmp2, Rn_VPR128.4S, 3, 4, 16); - simd_address_at(tmp3, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp3 = (* [register]:4 tmp2) << tmp1; + Rd_VPR128.4S[0,32] = Rn_VPR128.4S[0,32] << tmp1; + Rd_VPR128.4S[32,32] = Rn_VPR128.4S[32,32] << tmp1; + Rd_VPR128.4S[64,32] = Rn_VPR128.4S[64,32] << tmp1; + Rd_VPR128.4S[96,32] = Rn_VPR128.4S[96,32] << tmp1; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:4 = Imm_uimm5; - local tmpd:16 = SIMD_INT_LEFT(Rn_VPR128.4S, tmp1, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_shl(Rn_VPR128.4S, Imm_uimm5:1, 4:1); -@endif } # C7.2.254 SHL page C7-1955 line 109567 MATCH x0f005400/mask=xbf80fc00 @@ -26089,43 +14786,17 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2123=1 & Imm_uimm5 & b_1115=0xa & b_101 :shl Rd_VPR64.8B, Rn_VPR64.8B, Imm_uimm3 is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_1923=0x1 & Imm_uimm3 & b_1115=0xa & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { -@if defined(SEMANTIC_primitive) local tmp1:1 = Imm_uimm3; # simd infix Rd_VPR64.8B = Rn_VPR64.8B << tmp1 on lane size 1 - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR64.8B, 0, 1, 8); - simd_address_at(tmp3, Rd_VPR64.8B, 0, 1, 8); - * [register]:1 tmp3 = (* [register]:1 tmp2) << tmp1; - simd_address_at(tmp2, Rn_VPR64.8B, 1, 1, 8); - simd_address_at(tmp3, Rd_VPR64.8B, 1, 1, 8); - * [register]:1 tmp3 = (* [register]:1 tmp2) << tmp1; - simd_address_at(tmp2, Rn_VPR64.8B, 2, 1, 8); - simd_address_at(tmp3, Rd_VPR64.8B, 2, 1, 8); - * [register]:1 tmp3 = (* [register]:1 tmp2) << tmp1; - simd_address_at(tmp2, Rn_VPR64.8B, 3, 1, 8); - simd_address_at(tmp3, Rd_VPR64.8B, 3, 1, 8); - * [register]:1 tmp3 = (* [register]:1 tmp2) << tmp1; - simd_address_at(tmp2, Rn_VPR64.8B, 4, 1, 8); - simd_address_at(tmp3, Rd_VPR64.8B, 4, 1, 8); - * [register]:1 tmp3 = (* [register]:1 tmp2) << tmp1; - simd_address_at(tmp2, Rn_VPR64.8B, 5, 1, 8); - simd_address_at(tmp3, Rd_VPR64.8B, 5, 1, 8); - * [register]:1 tmp3 = (* [register]:1 tmp2) << tmp1; - simd_address_at(tmp2, Rn_VPR64.8B, 6, 1, 8); - simd_address_at(tmp3, Rd_VPR64.8B, 6, 1, 8); - * [register]:1 tmp3 = (* [register]:1 tmp2) << tmp1; - simd_address_at(tmp2, Rn_VPR64.8B, 7, 1, 8); - simd_address_at(tmp3, Rd_VPR64.8B, 7, 1, 8); - * [register]:1 tmp3 = (* [register]:1 tmp2) << tmp1; + Rd_VPR64.8B[0,8] = Rn_VPR64.8B[0,8] << tmp1; + Rd_VPR64.8B[8,8] = Rn_VPR64.8B[8,8] << tmp1; + Rd_VPR64.8B[16,8] = Rn_VPR64.8B[16,8] << tmp1; + Rd_VPR64.8B[24,8] = Rn_VPR64.8B[24,8] << tmp1; + Rd_VPR64.8B[32,8] = Rn_VPR64.8B[32,8] << tmp1; + Rd_VPR64.8B[40,8] = Rn_VPR64.8B[40,8] << tmp1; + Rd_VPR64.8B[48,8] = Rn_VPR64.8B[48,8] << tmp1; + Rd_VPR64.8B[56,8] = Rn_VPR64.8B[56,8] << tmp1; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:1 = Imm_uimm3; - local tmpd:8 = SIMD_INT_LEFT(Rn_VPR64.8B, tmp1, 1:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.8B -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.8B = NEON_shl(Rn_VPR64.8B, Imm_uimm3:1, 1:1); -@endif } # C7.2.254 SHL page C7-1955 line 109567 MATCH x0f005400/mask=xbf80fc00 @@ -26137,43 +14808,17 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_1923=0x1 & Imm_uimm3 & b_1115=0xa & b_1 :shl Rd_VPR128.8H, Rn_VPR128.8H, Imm_uimm4 is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2023=0x1 & Imm_uimm4 & b_1115=0xa & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_primitive) local tmp1:2 = Imm_uimm4; # simd infix Rd_VPR128.8H = Rn_VPR128.8H << tmp1 on lane size 2 - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR128.8H, 0, 2, 16); - simd_address_at(tmp3, Rd_VPR128.8H, 0, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp2) << tmp1; - simd_address_at(tmp2, Rn_VPR128.8H, 1, 2, 16); - simd_address_at(tmp3, Rd_VPR128.8H, 1, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp2) << tmp1; - simd_address_at(tmp2, Rn_VPR128.8H, 2, 2, 16); - simd_address_at(tmp3, Rd_VPR128.8H, 2, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp2) << tmp1; - simd_address_at(tmp2, Rn_VPR128.8H, 3, 2, 16); - simd_address_at(tmp3, Rd_VPR128.8H, 3, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp2) << tmp1; - simd_address_at(tmp2, Rn_VPR128.8H, 4, 2, 16); - simd_address_at(tmp3, Rd_VPR128.8H, 4, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp2) << tmp1; - simd_address_at(tmp2, Rn_VPR128.8H, 5, 2, 16); - simd_address_at(tmp3, Rd_VPR128.8H, 5, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp2) << tmp1; - simd_address_at(tmp2, Rn_VPR128.8H, 6, 2, 16); - simd_address_at(tmp3, Rd_VPR128.8H, 6, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp2) << tmp1; - simd_address_at(tmp2, Rn_VPR128.8H, 7, 2, 16); - simd_address_at(tmp3, Rd_VPR128.8H, 7, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp2) << tmp1; + Rd_VPR128.8H[0,16] = Rn_VPR128.8H[0,16] << tmp1; + Rd_VPR128.8H[16,16] = Rn_VPR128.8H[16,16] << tmp1; + Rd_VPR128.8H[32,16] = Rn_VPR128.8H[32,16] << tmp1; + Rd_VPR128.8H[48,16] = Rn_VPR128.8H[48,16] << tmp1; + Rd_VPR128.8H[64,16] = Rn_VPR128.8H[64,16] << tmp1; + Rd_VPR128.8H[80,16] = Rn_VPR128.8H[80,16] << tmp1; + Rd_VPR128.8H[96,16] = Rn_VPR128.8H[96,16] << tmp1; + Rd_VPR128.8H[112,16] = Rn_VPR128.8H[112,16] << tmp1; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:2 = Imm_uimm4; - local tmpd:16 = SIMD_INT_LEFT(Rn_VPR128.8H, tmp1, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.8H -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.8H = NEON_shl(Rn_VPR128.8H, Imm_uimm4:1, 2:1); -@endif } # C7.2.255 SHLL, SHLL2 page C7-1957 line 109703 MATCH x2e213800/mask=xbf3ffc00 @@ -26185,35 +14830,14 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2023=0x1 & Imm_uimm4 & b_1115=0xa & b_1 :shll Rd_VPR128.2D, Rn_VPR64.2S, Imm_uimm_exact32 is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & Imm_uimm_exact32 & b_1721=0x10 & b_1216=0x13 & b_1011=2 & Rn_VPR64.2S & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_primitive) # simd resize TMPQ1 = sext(Rn_VPR64.2S) (lane size 4 to 8) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR64.2S, 0, 4, 8); - simd_address_at(tmp3, TMPQ1, 0, 8, 16); - * [register]:8 tmp3 = sext(* [register]:4 tmp2); - simd_address_at(tmp2, Rn_VPR64.2S, 1, 4, 8); - simd_address_at(tmp3, TMPQ1, 1, 8, 16); - * [register]:8 tmp3 = sext(* [register]:4 tmp2); - local tmp4:8 = zext(Imm_uimm_exact32); - # simd infix Rd_VPR128.2D = TMPQ1 << tmp4 on lane size 8 - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, TMPQ1, 0, 8, 16); - simd_address_at(tmp6, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp6 = (* [register]:8 tmp5) << tmp4; - simd_address_at(tmp5, TMPQ1, 1, 8, 16); - simd_address_at(tmp6, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp6 = (* [register]:8 tmp5) << tmp4; - zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_SEXT(Rn_VPR64.2S, 4:1); + TMPQ1[0,64] = sext(Rn_VPR64.2S[0,32]); + TMPQ1[64,64] = sext(Rn_VPR64.2S[32,32]); local tmp2:8 = zext(Imm_uimm_exact32); - local tmpd:16 = SIMD_INT_LEFT(tmp1, tmp2, 8:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.2D = NEON_shll(Rn_VPR64.2S, Imm_uimm_exact32, 4:1); -@endif + # simd infix Rd_VPR128.2D = TMPQ1 << tmp2 on lane size 8 + Rd_VPR128.2D[0,64] = TMPQ1[0,64] << tmp2; + Rd_VPR128.2D[64,64] = TMPQ1[64,64] << tmp2; + zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.255 SHLL, SHLL2 page C7-1957 line 109703 MATCH x2e213800/mask=xbf3ffc00 @@ -26225,45 +14849,17 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & Imm_uimm_exact32 & b_17 :shll Rd_VPR128.4S, Rn_VPR64.4H, Imm_uimm_exact16 is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & Imm_uimm_exact16 & b_1721=0x10 & b_1216=0x13 & b_1011=2 & Rn_VPR64.4H & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) # simd resize TMPQ1 = sext(Rn_VPR64.4H) (lane size 2 to 4) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR64.4H, 0, 2, 8); - simd_address_at(tmp3, TMPQ1, 0, 4, 16); - * [register]:4 tmp3 = sext(* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR64.4H, 1, 2, 8); - simd_address_at(tmp3, TMPQ1, 1, 4, 16); - * [register]:4 tmp3 = sext(* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR64.4H, 2, 2, 8); - simd_address_at(tmp3, TMPQ1, 2, 4, 16); - * [register]:4 tmp3 = sext(* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR64.4H, 3, 2, 8); - simd_address_at(tmp3, TMPQ1, 3, 4, 16); - * [register]:4 tmp3 = sext(* [register]:2 tmp2); + TMPQ1[0,32] = sext(Rn_VPR64.4H[0,16]); + TMPQ1[32,32] = sext(Rn_VPR64.4H[16,16]); + TMPQ1[64,32] = sext(Rn_VPR64.4H[32,16]); + TMPQ1[96,32] = sext(Rn_VPR64.4H[48,16]); # simd infix Rd_VPR128.4S = TMPQ1 << Imm_uimm_exact16:4 on lane size 4 - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, TMPQ1, 0, 4, 16); - simd_address_at(tmp5, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp5 = (* [register]:4 tmp4) << Imm_uimm_exact16:4; - simd_address_at(tmp4, TMPQ1, 1, 4, 16); - simd_address_at(tmp5, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp5 = (* [register]:4 tmp4) << Imm_uimm_exact16:4; - simd_address_at(tmp4, TMPQ1, 2, 4, 16); - simd_address_at(tmp5, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp5 = (* [register]:4 tmp4) << Imm_uimm_exact16:4; - simd_address_at(tmp4, TMPQ1, 3, 4, 16); - simd_address_at(tmp5, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp5 = (* [register]:4 tmp4) << Imm_uimm_exact16:4; + Rd_VPR128.4S[0,32] = TMPQ1[0,32] << Imm_uimm_exact16:4; + Rd_VPR128.4S[32,32] = TMPQ1[32,32] << Imm_uimm_exact16:4; + Rd_VPR128.4S[64,32] = TMPQ1[64,32] << Imm_uimm_exact16:4; + Rd_VPR128.4S[96,32] = TMPQ1[96,32] << Imm_uimm_exact16:4; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_SEXT(Rn_VPR64.4H, 2:1); - local tmpd:16 = SIMD_INT_LEFT(tmp1, Imm_uimm_exact16:4, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_shll(Rn_VPR64.4H, Imm_uimm_exact16, 2:1); -@endif } # C7.2.255 SHLL, SHLL2 page C7-1957 line 109703 MATCH x2e213800/mask=xbf3ffc00 @@ -26275,69 +14871,25 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & Imm_uimm_exact16 & b_17 :shll Rd_VPR128.8H, Rn_VPR64.8B, Imm_uimm_exact8 is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & Imm_uimm_exact8 & b_1721=0x10 & b_1216=0x13 & b_1011=2 & Rn_VPR64.8B & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_primitive) # simd resize TMPQ1 = sext(Rn_VPR64.8B) (lane size 1 to 2) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR64.8B, 0, 1, 8); - simd_address_at(tmp3, TMPQ1, 0, 2, 16); - * [register]:2 tmp3 = sext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 1, 1, 8); - simd_address_at(tmp3, TMPQ1, 1, 2, 16); - * [register]:2 tmp3 = sext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 2, 1, 8); - simd_address_at(tmp3, TMPQ1, 2, 2, 16); - * [register]:2 tmp3 = sext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 3, 1, 8); - simd_address_at(tmp3, TMPQ1, 3, 2, 16); - * [register]:2 tmp3 = sext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 4, 1, 8); - simd_address_at(tmp3, TMPQ1, 4, 2, 16); - * [register]:2 tmp3 = sext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 5, 1, 8); - simd_address_at(tmp3, TMPQ1, 5, 2, 16); - * [register]:2 tmp3 = sext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 6, 1, 8); - simd_address_at(tmp3, TMPQ1, 6, 2, 16); - * [register]:2 tmp3 = sext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 7, 1, 8); - simd_address_at(tmp3, TMPQ1, 7, 2, 16); - * [register]:2 tmp3 = sext(* [register]:1 tmp2); + TMPQ1[0,16] = sext(Rn_VPR64.8B[0,8]); + TMPQ1[16,16] = sext(Rn_VPR64.8B[8,8]); + TMPQ1[32,16] = sext(Rn_VPR64.8B[16,8]); + TMPQ1[48,16] = sext(Rn_VPR64.8B[24,8]); + TMPQ1[64,16] = sext(Rn_VPR64.8B[32,8]); + TMPQ1[80,16] = sext(Rn_VPR64.8B[40,8]); + TMPQ1[96,16] = sext(Rn_VPR64.8B[48,8]); + TMPQ1[112,16] = sext(Rn_VPR64.8B[56,8]); # simd infix Rd_VPR128.8H = TMPQ1 << Imm_uimm_exact8:2 on lane size 2 - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, TMPQ1, 0, 2, 16); - simd_address_at(tmp5, Rd_VPR128.8H, 0, 2, 16); - * [register]:2 tmp5 = (* [register]:2 tmp4) << Imm_uimm_exact8:2; - simd_address_at(tmp4, TMPQ1, 1, 2, 16); - simd_address_at(tmp5, Rd_VPR128.8H, 1, 2, 16); - * [register]:2 tmp5 = (* [register]:2 tmp4) << Imm_uimm_exact8:2; - simd_address_at(tmp4, TMPQ1, 2, 2, 16); - simd_address_at(tmp5, Rd_VPR128.8H, 2, 2, 16); - * [register]:2 tmp5 = (* [register]:2 tmp4) << Imm_uimm_exact8:2; - simd_address_at(tmp4, TMPQ1, 3, 2, 16); - simd_address_at(tmp5, Rd_VPR128.8H, 3, 2, 16); - * [register]:2 tmp5 = (* [register]:2 tmp4) << Imm_uimm_exact8:2; - simd_address_at(tmp4, TMPQ1, 4, 2, 16); - simd_address_at(tmp5, Rd_VPR128.8H, 4, 2, 16); - * [register]:2 tmp5 = (* [register]:2 tmp4) << Imm_uimm_exact8:2; - simd_address_at(tmp4, TMPQ1, 5, 2, 16); - simd_address_at(tmp5, Rd_VPR128.8H, 5, 2, 16); - * [register]:2 tmp5 = (* [register]:2 tmp4) << Imm_uimm_exact8:2; - simd_address_at(tmp4, TMPQ1, 6, 2, 16); - simd_address_at(tmp5, Rd_VPR128.8H, 6, 2, 16); - * [register]:2 tmp5 = (* [register]:2 tmp4) << Imm_uimm_exact8:2; - simd_address_at(tmp4, TMPQ1, 7, 2, 16); - simd_address_at(tmp5, Rd_VPR128.8H, 7, 2, 16); - * [register]:2 tmp5 = (* [register]:2 tmp4) << Imm_uimm_exact8:2; + Rd_VPR128.8H[0,16] = TMPQ1[0,16] << Imm_uimm_exact8:2; + Rd_VPR128.8H[16,16] = TMPQ1[16,16] << Imm_uimm_exact8:2; + Rd_VPR128.8H[32,16] = TMPQ1[32,16] << Imm_uimm_exact8:2; + Rd_VPR128.8H[48,16] = TMPQ1[48,16] << Imm_uimm_exact8:2; + Rd_VPR128.8H[64,16] = TMPQ1[64,16] << Imm_uimm_exact8:2; + Rd_VPR128.8H[80,16] = TMPQ1[80,16] << Imm_uimm_exact8:2; + Rd_VPR128.8H[96,16] = TMPQ1[96,16] << Imm_uimm_exact8:2; + Rd_VPR128.8H[112,16] = TMPQ1[112,16] << Imm_uimm_exact8:2; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_SEXT(Rn_VPR64.8B, 1:1); - local tmpd:16 = SIMD_INT_LEFT(tmp1, Imm_uimm_exact8:2, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.8H -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.8H = NEON_shll(Rn_VPR64.8B, Imm_uimm_exact8, 1:1); -@endif } # C7.2.255 SHLL, SHLL2 page C7-1957 line 109703 MATCH x2e213800/mask=xbf3ffc00 @@ -26349,39 +14901,15 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & Imm_uimm_exact8 & b_172 :shll2 Rd_VPR128.2D, Rn_VPR128.4S, Imm_uimm_exact32 is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & Imm_uimm_exact32 & b_1721=0x10 & b_1216=0x13 & b_1011=2 & Rn_VPR128.4S & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rn_VPR128.4S, 1, 8, 16); - TMPD1 = * [register]:8 tmp1; + TMPD1 = Rn_VPR128.4S[64,64]; # simd resize TMPQ2 = sext(TMPD1) (lane size 4 to 8) - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, TMPD1, 0, 4, 8); - simd_address_at(tmp5, TMPQ2, 0, 8, 16); - * [register]:8 tmp5 = sext(* [register]:4 tmp4); - simd_address_at(tmp4, TMPD1, 1, 4, 8); - simd_address_at(tmp5, TMPQ2, 1, 8, 16); - * [register]:8 tmp5 = sext(* [register]:4 tmp4); - local tmp6:8 = zext(Imm_uimm_exact32); - # simd infix Rd_VPR128.2D = TMPQ2 << tmp6 on lane size 8 - local tmp7:4 = 0; - local tmp8:4 = 0; - simd_address_at(tmp7, TMPQ2, 0, 8, 16); - simd_address_at(tmp8, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp8 = (* [register]:8 tmp7) << tmp6; - simd_address_at(tmp7, TMPQ2, 1, 8, 16); - simd_address_at(tmp8, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp8 = (* [register]:8 tmp7) << tmp6; - zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Rn_VPR128.4S, 1:1); - local tmp2:16 = SIMD_INT_SEXT(tmp1, 4:1); + TMPQ2[0,64] = sext(TMPD1[0,32]); + TMPQ2[64,64] = sext(TMPD1[32,32]); local tmp3:8 = zext(Imm_uimm_exact32); - local tmpd:16 = SIMD_INT_LEFT(tmp2, tmp3, 8:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.2D = NEON_shll2(Rn_VPR128.4S, Imm_uimm_exact32, 4:1); -@endif + # simd infix Rd_VPR128.2D = TMPQ2 << tmp3 on lane size 8 + Rd_VPR128.2D[0,64] = TMPQ2[0,64] << tmp3; + Rd_VPR128.2D[64,64] = TMPQ2[64,64] << tmp3; + zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.255 SHLL, SHLL2 page C7-1957 line 109703 MATCH x2e213800/mask=xbf3ffc00 @@ -26393,49 +14921,18 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & Imm_uimm_exact32 & b_17 :shll2 Rd_VPR128.4S, Rn_VPR128.8H, Imm_uimm_exact16 is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & Imm_uimm_exact16 & b_1721=0x10 & b_1216=0x13 & b_1011=2 & Rn_VPR128.8H & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rn_VPR128.8H, 1, 8, 16); - TMPD1 = * [register]:8 tmp1; + TMPD1 = Rn_VPR128.8H[64,64]; # simd resize TMPQ2 = sext(TMPD1) (lane size 2 to 4) - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, TMPD1, 0, 2, 8); - simd_address_at(tmp5, TMPQ2, 0, 4, 16); - * [register]:4 tmp5 = sext(* [register]:2 tmp4); - simd_address_at(tmp4, TMPD1, 1, 2, 8); - simd_address_at(tmp5, TMPQ2, 1, 4, 16); - * [register]:4 tmp5 = sext(* [register]:2 tmp4); - simd_address_at(tmp4, TMPD1, 2, 2, 8); - simd_address_at(tmp5, TMPQ2, 2, 4, 16); - * [register]:4 tmp5 = sext(* [register]:2 tmp4); - simd_address_at(tmp4, TMPD1, 3, 2, 8); - simd_address_at(tmp5, TMPQ2, 3, 4, 16); - * [register]:4 tmp5 = sext(* [register]:2 tmp4); + TMPQ2[0,32] = sext(TMPD1[0,16]); + TMPQ2[32,32] = sext(TMPD1[16,16]); + TMPQ2[64,32] = sext(TMPD1[32,16]); + TMPQ2[96,32] = sext(TMPD1[48,16]); # simd infix Rd_VPR128.4S = TMPQ2 << Imm_uimm_exact16:4 on lane size 4 - local tmp6:4 = 0; - local tmp7:4 = 0; - simd_address_at(tmp6, TMPQ2, 0, 4, 16); - simd_address_at(tmp7, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp7 = (* [register]:4 tmp6) << Imm_uimm_exact16:4; - simd_address_at(tmp6, TMPQ2, 1, 4, 16); - simd_address_at(tmp7, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp7 = (* [register]:4 tmp6) << Imm_uimm_exact16:4; - simd_address_at(tmp6, TMPQ2, 2, 4, 16); - simd_address_at(tmp7, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp7 = (* [register]:4 tmp6) << Imm_uimm_exact16:4; - simd_address_at(tmp6, TMPQ2, 3, 4, 16); - simd_address_at(tmp7, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp7 = (* [register]:4 tmp6) << Imm_uimm_exact16:4; + Rd_VPR128.4S[0,32] = TMPQ2[0,32] << Imm_uimm_exact16:4; + Rd_VPR128.4S[32,32] = TMPQ2[32,32] << Imm_uimm_exact16:4; + Rd_VPR128.4S[64,32] = TMPQ2[64,32] << Imm_uimm_exact16:4; + Rd_VPR128.4S[96,32] = TMPQ2[96,32] << Imm_uimm_exact16:4; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Rn_VPR128.8H, 1:1); - local tmp2:16 = SIMD_INT_SEXT(tmp1, 2:1); - local tmpd:16 = SIMD_INT_LEFT(tmp2, Imm_uimm_exact16:4, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_shll2(Rn_VPR128.8H, Imm_uimm_exact16, 2:1); -@endif } # C7.2.255 SHLL, SHLL2 page C7-1957 line 109703 MATCH x2e213800/mask=xbf3ffc00 @@ -26447,73 +14944,26 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & Imm_uimm_exact16 & b_17 :shll2 Rd_VPR128.8H, Rn_VPR128.16B, Imm_uimm_exact8 is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & Imm_uimm_exact8 & b_1721=0x10 & b_1216=0x13 & b_1011=2 & Rn_VPR128.16B & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rn_VPR128.16B, 1, 8, 16); - TMPD1 = * [register]:8 tmp1; + TMPD1 = Rn_VPR128.16B[64,64]; # simd resize TMPQ2 = sext(TMPD1) (lane size 1 to 2) - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, TMPD1, 0, 1, 8); - simd_address_at(tmp5, TMPQ2, 0, 2, 16); - * [register]:2 tmp5 = sext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 1, 1, 8); - simd_address_at(tmp5, TMPQ2, 1, 2, 16); - * [register]:2 tmp5 = sext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 2, 1, 8); - simd_address_at(tmp5, TMPQ2, 2, 2, 16); - * [register]:2 tmp5 = sext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 3, 1, 8); - simd_address_at(tmp5, TMPQ2, 3, 2, 16); - * [register]:2 tmp5 = sext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 4, 1, 8); - simd_address_at(tmp5, TMPQ2, 4, 2, 16); - * [register]:2 tmp5 = sext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 5, 1, 8); - simd_address_at(tmp5, TMPQ2, 5, 2, 16); - * [register]:2 tmp5 = sext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 6, 1, 8); - simd_address_at(tmp5, TMPQ2, 6, 2, 16); - * [register]:2 tmp5 = sext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 7, 1, 8); - simd_address_at(tmp5, TMPQ2, 7, 2, 16); - * [register]:2 tmp5 = sext(* [register]:1 tmp4); + TMPQ2[0,16] = sext(TMPD1[0,8]); + TMPQ2[16,16] = sext(TMPD1[8,8]); + TMPQ2[32,16] = sext(TMPD1[16,8]); + TMPQ2[48,16] = sext(TMPD1[24,8]); + TMPQ2[64,16] = sext(TMPD1[32,8]); + TMPQ2[80,16] = sext(TMPD1[40,8]); + TMPQ2[96,16] = sext(TMPD1[48,8]); + TMPQ2[112,16] = sext(TMPD1[56,8]); # simd infix Rd_VPR128.8H = TMPQ2 << Imm_uimm_exact8:2 on lane size 2 - local tmp6:4 = 0; - local tmp7:4 = 0; - simd_address_at(tmp6, TMPQ2, 0, 2, 16); - simd_address_at(tmp7, Rd_VPR128.8H, 0, 2, 16); - * [register]:2 tmp7 = (* [register]:2 tmp6) << Imm_uimm_exact8:2; - simd_address_at(tmp6, TMPQ2, 1, 2, 16); - simd_address_at(tmp7, Rd_VPR128.8H, 1, 2, 16); - * [register]:2 tmp7 = (* [register]:2 tmp6) << Imm_uimm_exact8:2; - simd_address_at(tmp6, TMPQ2, 2, 2, 16); - simd_address_at(tmp7, Rd_VPR128.8H, 2, 2, 16); - * [register]:2 tmp7 = (* [register]:2 tmp6) << Imm_uimm_exact8:2; - simd_address_at(tmp6, TMPQ2, 3, 2, 16); - simd_address_at(tmp7, Rd_VPR128.8H, 3, 2, 16); - * [register]:2 tmp7 = (* [register]:2 tmp6) << Imm_uimm_exact8:2; - simd_address_at(tmp6, TMPQ2, 4, 2, 16); - simd_address_at(tmp7, Rd_VPR128.8H, 4, 2, 16); - * [register]:2 tmp7 = (* [register]:2 tmp6) << Imm_uimm_exact8:2; - simd_address_at(tmp6, TMPQ2, 5, 2, 16); - simd_address_at(tmp7, Rd_VPR128.8H, 5, 2, 16); - * [register]:2 tmp7 = (* [register]:2 tmp6) << Imm_uimm_exact8:2; - simd_address_at(tmp6, TMPQ2, 6, 2, 16); - simd_address_at(tmp7, Rd_VPR128.8H, 6, 2, 16); - * [register]:2 tmp7 = (* [register]:2 tmp6) << Imm_uimm_exact8:2; - simd_address_at(tmp6, TMPQ2, 7, 2, 16); - simd_address_at(tmp7, Rd_VPR128.8H, 7, 2, 16); - * [register]:2 tmp7 = (* [register]:2 tmp6) << Imm_uimm_exact8:2; + Rd_VPR128.8H[0,16] = TMPQ2[0,16] << Imm_uimm_exact8:2; + Rd_VPR128.8H[16,16] = TMPQ2[16,16] << Imm_uimm_exact8:2; + Rd_VPR128.8H[32,16] = TMPQ2[32,16] << Imm_uimm_exact8:2; + Rd_VPR128.8H[48,16] = TMPQ2[48,16] << Imm_uimm_exact8:2; + Rd_VPR128.8H[64,16] = TMPQ2[64,16] << Imm_uimm_exact8:2; + Rd_VPR128.8H[80,16] = TMPQ2[80,16] << Imm_uimm_exact8:2; + Rd_VPR128.8H[96,16] = TMPQ2[96,16] << Imm_uimm_exact8:2; + Rd_VPR128.8H[112,16] = TMPQ2[112,16] << Imm_uimm_exact8:2; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Rn_VPR128.16B, 1:1); - local tmp2:16 = SIMD_INT_SEXT(tmp1, 1:1); - local tmpd:16 = SIMD_INT_LEFT(tmp2, Imm_uimm_exact8:2, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.8H -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.8H = NEON_shll2(Rn_VPR128.16B, Imm_uimm_exact8, 1:1); -@endif } # C7.2.256 SHRN, SHRN2 page C7-1959 line 109821 MATCH x0f008400/mask=xbf80fc00 @@ -26525,35 +14975,14 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & Imm_uimm_exact8 & b_172 :shrn Rd_VPR64.2S, Rn_VPR128.2D, Imm_shr_imm32 is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x10 & b_1010=1 & Rn_VPR128.2D & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_primitive) local tmp1:8 = zext(Imm_shr_imm32); # simd infix TMPQ1 = Rn_VPR128.2D >> tmp1 on lane size 8 - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp3, Rn_VPR128.2D, 0, 8, 16); - simd_address_at(tmp4, TMPQ1, 0, 8, 16); - * [register]:8 tmp4 = (* [register]:8 tmp3) >> tmp1; - simd_address_at(tmp3, Rn_VPR128.2D, 1, 8, 16); - simd_address_at(tmp4, TMPQ1, 1, 8, 16); - * [register]:8 tmp4 = (* [register]:8 tmp3) >> tmp1; + TMPQ1[0,64] = Rn_VPR128.2D[0,64] >> tmp1; + TMPQ1[64,64] = Rn_VPR128.2D[64,64] >> tmp1; # simd resize Rd_VPR64.2S = zext(TMPQ1) (lane size 8 to 4) - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, TMPQ1, 0, 8, 16); - simd_address_at(tmp6, Rd_VPR64.2S, 0, 4, 8); - * [register]:4 tmp6 = (* [register]:8 tmp5); - simd_address_at(tmp5, TMPQ1, 1, 8, 16); - simd_address_at(tmp6, Rd_VPR64.2S, 1, 4, 8); - * [register]:4 tmp6 = (* [register]:8 tmp5); + Rd_VPR64.2S[0,32] = TMPQ1[0,32]; + Rd_VPR64.2S[32,32] = TMPQ1[64,32]; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = zext(Imm_shr_imm32); - local tmp2:16 = SIMD_INT_RIGHT(Rn_VPR128.2D, tmp1, 8:1); - local tmpd:8 = SIMD_INT_ZEXT(tmp2, 8:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.2S -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.2S = NEON_shrn(Rn_VPR128.2D, Imm_shr_imm32, 8:1); -@endif } # C7.2.256 SHRN, SHRN2 page C7-1959 line 109821 MATCH x0f008400/mask=xbf80fc00 @@ -26565,45 +14994,17 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x10 & :shrn Rd_VPR64.4H, Rn_VPR128.4S, Imm_shr_imm16 is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x10 & b_1010=1 & Rn_VPR128.4S & Rd_VPR64.4H & Zd { -@if defined(SEMANTIC_primitive) # simd infix TMPQ1 = Rn_VPR128.4S >> Imm_shr_imm16:4 on lane size 4 - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR128.4S, 0, 4, 16); - simd_address_at(tmp3, TMPQ1, 0, 4, 16); - * [register]:4 tmp3 = (* [register]:4 tmp2) >> Imm_shr_imm16:4; - simd_address_at(tmp2, Rn_VPR128.4S, 1, 4, 16); - simd_address_at(tmp3, TMPQ1, 1, 4, 16); - * [register]:4 tmp3 = (* [register]:4 tmp2) >> Imm_shr_imm16:4; - simd_address_at(tmp2, Rn_VPR128.4S, 2, 4, 16); - simd_address_at(tmp3, TMPQ1, 2, 4, 16); - * [register]:4 tmp3 = (* [register]:4 tmp2) >> Imm_shr_imm16:4; - simd_address_at(tmp2, Rn_VPR128.4S, 3, 4, 16); - simd_address_at(tmp3, TMPQ1, 3, 4, 16); - * [register]:4 tmp3 = (* [register]:4 tmp2) >> Imm_shr_imm16:4; + TMPQ1[0,32] = Rn_VPR128.4S[0,32] >> Imm_shr_imm16:4; + TMPQ1[32,32] = Rn_VPR128.4S[32,32] >> Imm_shr_imm16:4; + TMPQ1[64,32] = Rn_VPR128.4S[64,32] >> Imm_shr_imm16:4; + TMPQ1[96,32] = Rn_VPR128.4S[96,32] >> Imm_shr_imm16:4; # simd resize Rd_VPR64.4H = zext(TMPQ1) (lane size 4 to 2) - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, TMPQ1, 0, 4, 16); - simd_address_at(tmp5, Rd_VPR64.4H, 0, 2, 8); - * [register]:2 tmp5 = (* [register]:4 tmp4); - simd_address_at(tmp4, TMPQ1, 1, 4, 16); - simd_address_at(tmp5, Rd_VPR64.4H, 1, 2, 8); - * [register]:2 tmp5 = (* [register]:4 tmp4); - simd_address_at(tmp4, TMPQ1, 2, 4, 16); - simd_address_at(tmp5, Rd_VPR64.4H, 2, 2, 8); - * [register]:2 tmp5 = (* [register]:4 tmp4); - simd_address_at(tmp4, TMPQ1, 3, 4, 16); - simd_address_at(tmp5, Rd_VPR64.4H, 3, 2, 8); - * [register]:2 tmp5 = (* [register]:4 tmp4); + Rd_VPR64.4H[0,16] = TMPQ1[0,16]; + Rd_VPR64.4H[16,16] = TMPQ1[32,16]; + Rd_VPR64.4H[32,16] = TMPQ1[64,16]; + Rd_VPR64.4H[48,16] = TMPQ1[96,16]; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_RIGHT(Rn_VPR128.4S, Imm_shr_imm16:4, 4:1); - local tmpd:8 = SIMD_INT_ZEXT(tmp1, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.4H -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.4H = NEON_shrn(Rn_VPR128.4S, Imm_shr_imm16, 4:1); -@endif } # C7.2.256 SHRN, SHRN2 page C7-1959 line 109821 MATCH x0f008400/mask=xbf80fc00 @@ -26615,69 +15016,25 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x10 :shrn Rd_VPR64.8B, Rn_VPR128.8H, Imm_shr_imm8 is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x10 & b_1010=1 & Rn_VPR128.8H & Rd_VPR64.8B & Zd { -@if defined(SEMANTIC_primitive) # simd infix TMPQ1 = Rn_VPR128.8H >> Imm_shr_imm8:2 on lane size 2 - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR128.8H, 0, 2, 16); - simd_address_at(tmp3, TMPQ1, 0, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp2) >> Imm_shr_imm8:2; - simd_address_at(tmp2, Rn_VPR128.8H, 1, 2, 16); - simd_address_at(tmp3, TMPQ1, 1, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp2) >> Imm_shr_imm8:2; - simd_address_at(tmp2, Rn_VPR128.8H, 2, 2, 16); - simd_address_at(tmp3, TMPQ1, 2, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp2) >> Imm_shr_imm8:2; - simd_address_at(tmp2, Rn_VPR128.8H, 3, 2, 16); - simd_address_at(tmp3, TMPQ1, 3, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp2) >> Imm_shr_imm8:2; - simd_address_at(tmp2, Rn_VPR128.8H, 4, 2, 16); - simd_address_at(tmp3, TMPQ1, 4, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp2) >> Imm_shr_imm8:2; - simd_address_at(tmp2, Rn_VPR128.8H, 5, 2, 16); - simd_address_at(tmp3, TMPQ1, 5, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp2) >> Imm_shr_imm8:2; - simd_address_at(tmp2, Rn_VPR128.8H, 6, 2, 16); - simd_address_at(tmp3, TMPQ1, 6, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp2) >> Imm_shr_imm8:2; - simd_address_at(tmp2, Rn_VPR128.8H, 7, 2, 16); - simd_address_at(tmp3, TMPQ1, 7, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp2) >> Imm_shr_imm8:2; + TMPQ1[0,16] = Rn_VPR128.8H[0,16] >> Imm_shr_imm8:2; + TMPQ1[16,16] = Rn_VPR128.8H[16,16] >> Imm_shr_imm8:2; + TMPQ1[32,16] = Rn_VPR128.8H[32,16] >> Imm_shr_imm8:2; + TMPQ1[48,16] = Rn_VPR128.8H[48,16] >> Imm_shr_imm8:2; + TMPQ1[64,16] = Rn_VPR128.8H[64,16] >> Imm_shr_imm8:2; + TMPQ1[80,16] = Rn_VPR128.8H[80,16] >> Imm_shr_imm8:2; + TMPQ1[96,16] = Rn_VPR128.8H[96,16] >> Imm_shr_imm8:2; + TMPQ1[112,16] = Rn_VPR128.8H[112,16] >> Imm_shr_imm8:2; # simd resize Rd_VPR64.8B = zext(TMPQ1) (lane size 2 to 1) - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, TMPQ1, 0, 2, 16); - simd_address_at(tmp5, Rd_VPR64.8B, 0, 1, 8); - * [register]:1 tmp5 = (* [register]:2 tmp4); - simd_address_at(tmp4, TMPQ1, 1, 2, 16); - simd_address_at(tmp5, Rd_VPR64.8B, 1, 1, 8); - * [register]:1 tmp5 = (* [register]:2 tmp4); - simd_address_at(tmp4, TMPQ1, 2, 2, 16); - simd_address_at(tmp5, Rd_VPR64.8B, 2, 1, 8); - * [register]:1 tmp5 = (* [register]:2 tmp4); - simd_address_at(tmp4, TMPQ1, 3, 2, 16); - simd_address_at(tmp5, Rd_VPR64.8B, 3, 1, 8); - * [register]:1 tmp5 = (* [register]:2 tmp4); - simd_address_at(tmp4, TMPQ1, 4, 2, 16); - simd_address_at(tmp5, Rd_VPR64.8B, 4, 1, 8); - * [register]:1 tmp5 = (* [register]:2 tmp4); - simd_address_at(tmp4, TMPQ1, 5, 2, 16); - simd_address_at(tmp5, Rd_VPR64.8B, 5, 1, 8); - * [register]:1 tmp5 = (* [register]:2 tmp4); - simd_address_at(tmp4, TMPQ1, 6, 2, 16); - simd_address_at(tmp5, Rd_VPR64.8B, 6, 1, 8); - * [register]:1 tmp5 = (* [register]:2 tmp4); - simd_address_at(tmp4, TMPQ1, 7, 2, 16); - simd_address_at(tmp5, Rd_VPR64.8B, 7, 1, 8); - * [register]:1 tmp5 = (* [register]:2 tmp4); + Rd_VPR64.8B[0,8] = TMPQ1[0,8]; + Rd_VPR64.8B[8,8] = TMPQ1[16,8]; + Rd_VPR64.8B[16,8] = TMPQ1[32,8]; + Rd_VPR64.8B[24,8] = TMPQ1[48,8]; + Rd_VPR64.8B[32,8] = TMPQ1[64,8]; + Rd_VPR64.8B[40,8] = TMPQ1[80,8]; + Rd_VPR64.8B[48,8] = TMPQ1[96,8]; + Rd_VPR64.8B[56,8] = TMPQ1[112,8]; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_RIGHT(Rn_VPR128.8H, Imm_shr_imm8:2, 2:1); - local tmpd:8 = SIMD_INT_ZEXT(tmp1, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.8B -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.8B = NEON_shrn(Rn_VPR128.8H, Imm_shr_imm8, 2:1); -@endif } # C7.2.256 SHRN, SHRN2 page C7-1959 line 109821 MATCH x0f008400/mask=xbf80fc00 @@ -26689,40 +15046,16 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x10 & :shrn2 Rd_VPR128.4S, Rn_VPR128.2D, Imm_shr_imm32 is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x10 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) local tmp1:8 = zext(Imm_shr_imm32); # simd infix TMPQ1 = Rn_VPR128.2D >> tmp1 on lane size 8 - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp3, Rn_VPR128.2D, 0, 8, 16); - simd_address_at(tmp4, TMPQ1, 0, 8, 16); - * [register]:8 tmp4 = (* [register]:8 tmp3) >> tmp1; - simd_address_at(tmp3, Rn_VPR128.2D, 1, 8, 16); - simd_address_at(tmp4, TMPQ1, 1, 8, 16); - * [register]:8 tmp4 = (* [register]:8 tmp3) >> tmp1; + TMPQ1[0,64] = Rn_VPR128.2D[0,64] >> tmp1; + TMPQ1[64,64] = Rn_VPR128.2D[64,64] >> tmp1; # simd resize TMPD2 = zext(TMPQ1) (lane size 8 to 4) - local tmp6:4 = 0; - local tmp7:4 = 0; - simd_address_at(tmp6, TMPQ1, 0, 8, 16); - simd_address_at(tmp7, TMPD2, 0, 4, 8); - * [register]:4 tmp7 = (* [register]:8 tmp6); - simd_address_at(tmp6, TMPQ1, 1, 8, 16); - simd_address_at(tmp7, TMPD2, 1, 4, 8); - * [register]:4 tmp7 = (* [register]:8 tmp6); + TMPD2[0,32] = TMPQ1[0,32]; + TMPD2[32,32] = TMPQ1[64,32]; # simd copy Rd_VPR128.4S element 1:1 = TMPD2 (lane size 8) - local tmp8:4 = 0; - simd_address_at(tmp8, Rd_VPR128.4S, 1, 8, 16); - * [register]:8 tmp8 = TMPD2; + Rd_VPR128.4S[64,64] = TMPD2; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = zext(Imm_shr_imm32); - local tmp2:16 = SIMD_INT_RIGHT(Rn_VPR128.2D, tmp1, 8:1); - local tmp3:8 = SIMD_INT_ZEXT(tmp2, 8:1); - local tmpd:16 = SIMD_COPY(Rd_VPR128.4S, tmp3, 1:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_shrn2(Rn_VPR128.2D, Imm_shr_imm32, 8:1); -@endif } # C7.2.256 SHRN, SHRN2 page C7-1959 line 109821 MATCH x0f008400/mask=xbf80fc00 @@ -26734,50 +15067,19 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x10 & :shrn2 Rd_VPR128.8H, Rn_VPR128.4S, Imm_shr_imm16 is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x10 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_primitive) # simd infix TMPQ1 = Rn_VPR128.4S >> Imm_shr_imm16:4 on lane size 4 - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR128.4S, 0, 4, 16); - simd_address_at(tmp3, TMPQ1, 0, 4, 16); - * [register]:4 tmp3 = (* [register]:4 tmp2) >> Imm_shr_imm16:4; - simd_address_at(tmp2, Rn_VPR128.4S, 1, 4, 16); - simd_address_at(tmp3, TMPQ1, 1, 4, 16); - * [register]:4 tmp3 = (* [register]:4 tmp2) >> Imm_shr_imm16:4; - simd_address_at(tmp2, Rn_VPR128.4S, 2, 4, 16); - simd_address_at(tmp3, TMPQ1, 2, 4, 16); - * [register]:4 tmp3 = (* [register]:4 tmp2) >> Imm_shr_imm16:4; - simd_address_at(tmp2, Rn_VPR128.4S, 3, 4, 16); - simd_address_at(tmp3, TMPQ1, 3, 4, 16); - * [register]:4 tmp3 = (* [register]:4 tmp2) >> Imm_shr_imm16:4; + TMPQ1[0,32] = Rn_VPR128.4S[0,32] >> Imm_shr_imm16:4; + TMPQ1[32,32] = Rn_VPR128.4S[32,32] >> Imm_shr_imm16:4; + TMPQ1[64,32] = Rn_VPR128.4S[64,32] >> Imm_shr_imm16:4; + TMPQ1[96,32] = Rn_VPR128.4S[96,32] >> Imm_shr_imm16:4; # simd resize TMPD2 = zext(TMPQ1) (lane size 4 to 2) - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, TMPQ1, 0, 4, 16); - simd_address_at(tmp6, TMPD2, 0, 2, 8); - * [register]:2 tmp6 = (* [register]:4 tmp5); - simd_address_at(tmp5, TMPQ1, 1, 4, 16); - simd_address_at(tmp6, TMPD2, 1, 2, 8); - * [register]:2 tmp6 = (* [register]:4 tmp5); - simd_address_at(tmp5, TMPQ1, 2, 4, 16); - simd_address_at(tmp6, TMPD2, 2, 2, 8); - * [register]:2 tmp6 = (* [register]:4 tmp5); - simd_address_at(tmp5, TMPQ1, 3, 4, 16); - simd_address_at(tmp6, TMPD2, 3, 2, 8); - * [register]:2 tmp6 = (* [register]:4 tmp5); + TMPD2[0,16] = TMPQ1[0,16]; + TMPD2[16,16] = TMPQ1[32,16]; + TMPD2[32,16] = TMPQ1[64,16]; + TMPD2[48,16] = TMPQ1[96,16]; # simd copy Rd_VPR128.8H element 1:1 = TMPD2 (lane size 8) - local tmp7:4 = 0; - simd_address_at(tmp7, Rd_VPR128.8H, 1, 8, 16); - * [register]:8 tmp7 = TMPD2; + Rd_VPR128.8H[64,64] = TMPD2; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_RIGHT(Rn_VPR128.4S, Imm_shr_imm16:4, 4:1); - local tmp2:8 = SIMD_INT_ZEXT(tmp1, 4:1); - local tmpd:16 = SIMD_COPY(Rd_VPR128.8H, tmp2, 1:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.8H -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.8H = NEON_shrn2(Rn_VPR128.4S, Imm_shr_imm16, 4:1); -@endif } # C7.2.256 SHRN, SHRN2 page C7-1959 line 109821 MATCH x0f008400/mask=xbf80fc00 @@ -26789,74 +15091,27 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x10 :shrn2 Rd_VPR128.16B, Rn_VPR128.8H, Imm_shr_imm8 is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x10 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.16B & Zd { -@if defined(SEMANTIC_primitive) # simd infix TMPQ1 = Rn_VPR128.8H >> Imm_shr_imm8:2 on lane size 2 - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR128.8H, 0, 2, 16); - simd_address_at(tmp3, TMPQ1, 0, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp2) >> Imm_shr_imm8:2; - simd_address_at(tmp2, Rn_VPR128.8H, 1, 2, 16); - simd_address_at(tmp3, TMPQ1, 1, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp2) >> Imm_shr_imm8:2; - simd_address_at(tmp2, Rn_VPR128.8H, 2, 2, 16); - simd_address_at(tmp3, TMPQ1, 2, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp2) >> Imm_shr_imm8:2; - simd_address_at(tmp2, Rn_VPR128.8H, 3, 2, 16); - simd_address_at(tmp3, TMPQ1, 3, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp2) >> Imm_shr_imm8:2; - simd_address_at(tmp2, Rn_VPR128.8H, 4, 2, 16); - simd_address_at(tmp3, TMPQ1, 4, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp2) >> Imm_shr_imm8:2; - simd_address_at(tmp2, Rn_VPR128.8H, 5, 2, 16); - simd_address_at(tmp3, TMPQ1, 5, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp2) >> Imm_shr_imm8:2; - simd_address_at(tmp2, Rn_VPR128.8H, 6, 2, 16); - simd_address_at(tmp3, TMPQ1, 6, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp2) >> Imm_shr_imm8:2; - simd_address_at(tmp2, Rn_VPR128.8H, 7, 2, 16); - simd_address_at(tmp3, TMPQ1, 7, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp2) >> Imm_shr_imm8:2; + TMPQ1[0,16] = Rn_VPR128.8H[0,16] >> Imm_shr_imm8:2; + TMPQ1[16,16] = Rn_VPR128.8H[16,16] >> Imm_shr_imm8:2; + TMPQ1[32,16] = Rn_VPR128.8H[32,16] >> Imm_shr_imm8:2; + TMPQ1[48,16] = Rn_VPR128.8H[48,16] >> Imm_shr_imm8:2; + TMPQ1[64,16] = Rn_VPR128.8H[64,16] >> Imm_shr_imm8:2; + TMPQ1[80,16] = Rn_VPR128.8H[80,16] >> Imm_shr_imm8:2; + TMPQ1[96,16] = Rn_VPR128.8H[96,16] >> Imm_shr_imm8:2; + TMPQ1[112,16] = Rn_VPR128.8H[112,16] >> Imm_shr_imm8:2; # simd resize TMPD2 = zext(TMPQ1) (lane size 2 to 1) - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, TMPQ1, 0, 2, 16); - simd_address_at(tmp6, TMPD2, 0, 1, 8); - * [register]:1 tmp6 = (* [register]:2 tmp5); - simd_address_at(tmp5, TMPQ1, 1, 2, 16); - simd_address_at(tmp6, TMPD2, 1, 1, 8); - * [register]:1 tmp6 = (* [register]:2 tmp5); - simd_address_at(tmp5, TMPQ1, 2, 2, 16); - simd_address_at(tmp6, TMPD2, 2, 1, 8); - * [register]:1 tmp6 = (* [register]:2 tmp5); - simd_address_at(tmp5, TMPQ1, 3, 2, 16); - simd_address_at(tmp6, TMPD2, 3, 1, 8); - * [register]:1 tmp6 = (* [register]:2 tmp5); - simd_address_at(tmp5, TMPQ1, 4, 2, 16); - simd_address_at(tmp6, TMPD2, 4, 1, 8); - * [register]:1 tmp6 = (* [register]:2 tmp5); - simd_address_at(tmp5, TMPQ1, 5, 2, 16); - simd_address_at(tmp6, TMPD2, 5, 1, 8); - * [register]:1 tmp6 = (* [register]:2 tmp5); - simd_address_at(tmp5, TMPQ1, 6, 2, 16); - simd_address_at(tmp6, TMPD2, 6, 1, 8); - * [register]:1 tmp6 = (* [register]:2 tmp5); - simd_address_at(tmp5, TMPQ1, 7, 2, 16); - simd_address_at(tmp6, TMPD2, 7, 1, 8); - * [register]:1 tmp6 = (* [register]:2 tmp5); + TMPD2[0,8] = TMPQ1[0,8]; + TMPD2[8,8] = TMPQ1[16,8]; + TMPD2[16,8] = TMPQ1[32,8]; + TMPD2[24,8] = TMPQ1[48,8]; + TMPD2[32,8] = TMPQ1[64,8]; + TMPD2[40,8] = TMPQ1[80,8]; + TMPD2[48,8] = TMPQ1[96,8]; + TMPD2[56,8] = TMPQ1[112,8]; # simd copy Rd_VPR128.16B element 1:1 = TMPD2 (lane size 8) - local tmp7:4 = 0; - simd_address_at(tmp7, Rd_VPR128.16B, 1, 8, 16); - * [register]:8 tmp7 = TMPD2; + Rd_VPR128.16B[64,64] = TMPD2; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_RIGHT(Rn_VPR128.8H, Imm_shr_imm8:2, 2:1); - local tmp2:8 = SIMD_INT_ZEXT(tmp1, 2:1); - local tmpd:16 = SIMD_COPY(Rd_VPR128.16B, tmp2, 1:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.16B -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.16B = NEON_shrn2(Rn_VPR128.8H, Imm_shr_imm8, 2:1); -@endif } # C7.2.257 SHSUB page C7-1961 line 109944 MATCH x0e202400/mask=xbf20fc00 @@ -26867,9 +15122,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x10 & :shsub Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0x4 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.16B = NEON_shsub(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); -@endif } # C7.2.257 SHSUB page C7-1961 line 109944 MATCH x0e202400/mask=xbf20fc00 @@ -26880,9 +15133,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16 :shsub Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0x4 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.2S = NEON_shsub(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); -@endif } # C7.2.257 SHSUB page C7-1961 line 109944 MATCH x0e202400/mask=xbf20fc00 @@ -26893,9 +15144,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S :shsub Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0x4 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.4H = NEON_shsub(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); -@endif } # C7.2.257 SHSUB page C7-1961 line 109944 MATCH x0e202400/mask=xbf20fc00 @@ -26906,9 +15155,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H :shsub Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0x4 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_shsub(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); -@endif } # C7.2.257 SHSUB page C7-1961 line 109944 MATCH x0e202400/mask=xbf20fc00 @@ -26919,9 +15166,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S :shsub Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0x4 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.8B = NEON_shsub(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); -@endif } # C7.2.257 SHSUB page C7-1961 line 109944 MATCH x0e202400/mask=xbf20fc00 @@ -26932,9 +15177,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B :shsub Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0x4 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.8H = NEON_shsub(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); -@endif } # C7.2.258 SLI page C7-1963 line 110042 MATCH x2f005400/mask=xbf80fc00 @@ -26945,9 +15188,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H :sli Rd_VPR128.16B, Rn_VPR128.16B, Imm_uimm3 is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_uimm3 & b_1115=0xa & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.16B = NEON_sli(Rd_VPR128.16B, Rn_VPR128.16B, Imm_uimm3:1, 1:1); -@endif } # C7.2.258 SLI page C7-1963 line 110042 MATCH x2f005400/mask=xbf80fc00 @@ -26958,9 +15199,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_uimm3 & b_1115=0xa & b_1 :sli Rd_VPR128.2D, Rn_VPR128.2D, Imm_imm0_63 is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2223=0b01 & Imm_imm0_63 & b_1115=0xa & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.2D = NEON_sli(Rd_VPR128.2D, Rn_VPR128.2D, Imm_imm0_63:1, 8:1); -@endif } # C7.2.258 SLI page C7-1963 line 110042 MATCH x2f005400/mask=xbf80fc00 @@ -26971,9 +15210,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2223=0b01 & Imm_imm0_63 & b_1115=0xa & :sli Rd_VPR64.2S, Rn_VPR64.2S, Imm_uimm5 is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2123=1 & Imm_uimm5 & b_1115=0xa & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.2S = NEON_sli(Rd_VPR64.2S, Rn_VPR64.2S, Imm_uimm5:1, 4:1); -@endif } # C7.2.258 SLI page C7-1963 line 110042 MATCH x2f005400/mask=xbf80fc00 @@ -26984,9 +15221,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2123=1 & Imm_uimm5 & b_1115=0xa & b_101 :sli Rd_VPR64.4H, Rn_VPR64.4H, Imm_uimm4 is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_uimm4 & b_1115=0xa & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.4H = NEON_sli(Rd_VPR64.4H, Rn_VPR64.4H, Imm_uimm4:1, 2:1); -@endif } # C7.2.258 SLI page C7-1963 line 110042 MATCH x2f005400/mask=xbf80fc00 @@ -26997,9 +15232,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_uimm4 & b_1115=0xa & b_1 :sli Rd_VPR128.4S, Rn_VPR128.4S, Imm_uimm5 is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2123=1 & Imm_uimm5 & b_1115=0xa & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_sli(Rd_VPR128.4S, Rn_VPR128.4S, Imm_uimm5:1, 4:1); -@endif } # C7.2.258 SLI page C7-1963 line 110042 MATCH x2f005400/mask=xbf80fc00 @@ -27010,9 +15243,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2123=1 & Imm_uimm5 & b_1115=0xa & b_101 :sli Rd_VPR64.8B, Rn_VPR64.8B, Imm_uimm3 is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_uimm3 & b_1115=0xa & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.8B = NEON_sli(Rd_VPR64.8B, Rn_VPR64.8B, Imm_uimm3:1, 1:1); -@endif } # C7.2.258 SLI page C7-1963 line 110042 MATCH x2f005400/mask=xbf80fc00 @@ -27023,9 +15254,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_uimm3 & b_1115=0xa & b_1 :sli Rd_VPR128.8H, Rn_VPR128.8H, Imm_uimm4 is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_uimm4 & b_1115=0xa & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.8H = NEON_sli(Rd_VPR128.8H, Rn_VPR128.8H, Imm_uimm4:1, 2:1); -@endif } # C7.2.258 SLI page C7-1963 line 110042 MATCH x7f005400/mask=xff80fc00 @@ -27036,9 +15265,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_uimm4 & b_1115=0xa & b_1 :sli Rd_VPR64, Rn_VPR64, Imm_uimm5 is b_2331=0b011111110 & b_22=1 & b_1015=0b010101 & Rd_VPR64 & Rn_VPR64 & Imm_uimm5 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64 = NEON_sli(Rd_VPR64, Rn_VPR64, Imm_uimm5:1); -@endif } # C7.2.259 SM3PARTW1 page C7-1966 line 110207 MATCH xce60c000/mask=xffe0fc00 @@ -27049,9 +15276,7 @@ is b_2331=0b011111110 & b_22=1 & b_1015=0b010101 & Rd_VPR64 & Rn_VPR64 & Imm_uim :sm3partw1 Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_2131=0b11001110011 & b_1015=0b110000 & Rd_VPR128.4S & Rn_VPR128.4S & Rm_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_sm3partw1(Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S, 4:1); -@endif } # C7.2.260 SM3PARTW2 page C7-1968 line 110294 MATCH xce60c400/mask=xffe0fc00 @@ -27062,9 +15287,7 @@ is b_2131=0b11001110011 & b_1015=0b110000 & Rd_VPR128.4S & Rn_VPR128.4S & Rm_VPR :sm3partw2 Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_2131=0b11001110011 & b_1015=0b110001 & Rd_VPR128.4S & Rn_VPR128.4S & Rm_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_sm3partw2(Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S, 4:1); -@endif } # C7.2.261 SM3SS1 page C7-1970 line 110380 MATCH xce400000/mask=xffe08000 @@ -27075,9 +15298,7 @@ is b_2131=0b11001110011 & b_1015=0b110001 & Rd_VPR128.4S & Rn_VPR128.4S & Rm_VPR :sm3ss1 Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S, Ra_VPR128.4S is b_2131=0b11001110010 & b_15=0 & Rd_VPR128.4S & Rn_VPR128.4S & Rm_VPR128.4S & Ra_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_sm3ss1(Rn_VPR128.4S, Rm_VPR128.4S, Ra_VPR128.4S, 4:1); -@endif } # C7.2.247 SM3TT1A page C7-1529 line 88534 KEEPWITH @@ -27093,10 +15314,8 @@ Re_VPR128.S.sm3imm2: Re_VPR128.S^"["^sm3imm2^"]" is Re_VPR128.S & sm3imm2 { expo :sm3tt1a Rd_VPR128.4S, Rn_VPR128.4S, Re_VPR128.S.sm3imm2 is b_2131=0b11001110010 & b_1415=0b10 & b_1011=0b00 & Rd_VPR128.4S & Rn_VPR128.4S & Re_VPR128.S.sm3imm2 & Re_VPR128.S & sm3imm2 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) local tmp1:4 = SIMD_PIECE(Re_VPR128.S, sm3imm2:1); Rd_VPR128.4S = NEON_sm3tt1a(Rd_VPR128.4S, Rn_VPR128.4S, tmp1, 4:1); -@endif } # C7.2.263 SM3TT1B page C7-1974 line 110572 MATCH xce408400/mask=xffe0cc00 @@ -27107,10 +15326,8 @@ is b_2131=0b11001110010 & b_1415=0b10 & b_1011=0b00 & Rd_VPR128.4S & Rn_VPR128.4 :sm3tt1b Rd_VPR128.4S, Rn_VPR128.4S, Re_VPR128.S.sm3imm2 is b_2131=0b11001110010 & b_1415=0b10 & b_1011=0b01 & Rd_VPR128.4S & Rn_VPR128.4S & Re_VPR128.S.sm3imm2 & Re_VPR128.S & sm3imm2 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) local tmp1:4 = SIMD_PIECE(Re_VPR128.S, sm3imm2:1); Rd_VPR128.4S = NEON_sm3tt1b(Rd_VPR128.4S, Rn_VPR128.4S, tmp1, 4:1); -@endif } # C7.2.264 SM3TT2A page C7-1976 line 110678 MATCH xce408800/mask=xffe0cc00 @@ -27121,10 +15338,8 @@ is b_2131=0b11001110010 & b_1415=0b10 & b_1011=0b01 & Rd_VPR128.4S & Rn_VPR128.4 :sm3tt2a Rd_VPR128.4S, Rn_VPR128.4S, Re_VPR128.S.sm3imm2 is b_2131=0b11001110010 & b_1415=0b10 & b_1011=0b10 & Rd_VPR128.4S & Rn_VPR128.4S & Re_VPR128.S.sm3imm2 & Re_VPR128.S & sm3imm2 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) local tmp1:4 = SIMD_PIECE(Re_VPR128.S, sm3imm2:1); Rd_VPR128.4S = NEON_sm3tt2a(Rd_VPR128.4S, Rn_VPR128.4S, tmp1, 4:1); -@endif } # C7.2.265 SM3TT2B page C7-1978 line 110783 MATCH xce408c00/mask=xffe0cc00 @@ -27135,10 +15350,8 @@ is b_2131=0b11001110010 & b_1415=0b10 & b_1011=0b10 & Rd_VPR128.4S & Rn_VPR128.4 :sm3tt2b Rd_VPR128.S, Rn_VPR128.S, Re_VPR128.S.sm3imm2 is b_2131=0b11001110010 & b_1415=0b10 & b_1011=0b11 & Rd_VPR128.S & Rn_VPR128.S & Re_VPR128.S.sm3imm2 & Re_VPR128.S & sm3imm2 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) local tmp1:4 = SIMD_PIECE(Re_VPR128.S, sm3imm2:1); Rd_VPR128.S = NEON_sm3tt2b(Rd_VPR128.S, Rn_VPR128.S, tmp1, 4:1); -@endif } # C7.2.266 SM4E page C7-1980 line 110888 MATCH xcec08400/mask=xfffffc00 @@ -27149,9 +15362,7 @@ is b_2131=0b11001110010 & b_1415=0b10 & b_1011=0b11 & Rd_VPR128.S & Rn_VPR128.S :sm4e Rd_VPR128.4S, Rn_VPR128.4S is b_1031=0b1100111011000000100001 & Rd_VPR128.4S & Rn_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_sm4e(Rd_VPR128.4S, Rn_VPR128.4S, 4:1); -@endif } # C7.2.267 SM4EKEY page C7-1982 line 110982 MATCH xce60c800/mask=xffe0fc00 @@ -27162,9 +15373,7 @@ is b_1031=0b1100111011000000100001 & Rd_VPR128.4S & Rn_VPR128.4S & Zd :sm4ekey Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_2131=0b11001110011 & b_1015=0b110010 & Rd_VPR128.4S & Rn_VPR128.4S & Rm_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_sm4ekey(Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S, 4:1); -@endif } # C7.2.268 SMAX page C7-1984 line 111078 MATCH x0e206400/mask=xbf20fc00 @@ -27175,9 +15384,7 @@ is b_2131=0b11001110011 & b_1015=0b110010 & Rd_VPR128.4S & Rn_VPR128.4S & Rm_VPR :smax Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0xc & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.16B = NEON_smax(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); -@endif } # C7.2.268 SMAX page C7-1984 line 111078 MATCH x0e206400/mask=xbf20fc00 @@ -27188,9 +15395,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16 :smax Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0xc & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.2S = NEON_smax(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); -@endif } # C7.2.268 SMAX page C7-1984 line 111078 MATCH x0e206400/mask=xbf20fc00 @@ -27201,9 +15406,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S :smax Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0xc & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.4H = NEON_smax(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); -@endif } # C7.2.268 SMAX page C7-1984 line 111078 MATCH x0e206400/mask=xbf20fc00 @@ -27214,9 +15417,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H :smax Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0xc & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_smax(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); -@endif } # C7.2.268 SMAX page C7-1984 line 111078 MATCH x0e206400/mask=xbf20fc00 @@ -27227,9 +15428,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S :smax Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0xc & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.8B = NEON_smax(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); -@endif } # C7.2.268 SMAX page C7-1984 line 111078 MATCH x0e206400/mask=xbf20fc00 @@ -27240,9 +15439,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B :smax Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0xc & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.8H = NEON_smax(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); -@endif } # C7.2.269 SMAXP page C7-1986 line 111178 MATCH x0e20a400/mask=xbf20fc00 @@ -27253,9 +15450,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H :smaxp Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0x14 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.16B = NEON_smax(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); -@endif } # C7.2.269 SMAXP page C7-1986 line 111178 MATCH x0e20a400/mask=xbf20fc00 @@ -27266,9 +15461,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16 :smaxp Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0x14 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.2S = NEON_smaxp(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); -@endif } # C7.2.269 SMAXP page C7-1986 line 111178 MATCH x0e20a400/mask=xbf20fc00 @@ -27279,9 +15472,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S :smaxp Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0x14 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.4H = NEON_smaxp(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); -@endif } # C7.2.269 SMAXP page C7-1986 line 111178 MATCH x0e20a400/mask=xbf20fc00 @@ -27292,9 +15483,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H :smaxp Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0x14 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_smaxp(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); -@endif } # C7.2.269 SMAXP page C7-1986 line 111178 MATCH x0e20a400/mask=xbf20fc00 @@ -27305,9 +15494,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S :smaxp Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0x14 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.8B = NEON_smaxp(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); -@endif } # C7.2.269 SMAXP page C7-1986 line 111178 MATCH x0e20a400/mask=xbf20fc00 @@ -27318,9 +15505,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B :smaxp Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0x14 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.8H = NEON_smaxp(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); -@endif } # C7.2.270 SMAXV page C7-1988 line 111280 MATCH x0e30a800/mask=xbf3ffc00 @@ -27331,9 +15516,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H :smaxv Rd_FPR8, Rn_VPR128.16B is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x18 & b_1216=0xa & b_1011=2 & Rn_VPR128.16B & Rd_FPR8 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR8 = NEON_smaxv(Rn_VPR128.16B, 1:1); -@endif } # C7.2.270 SMAXV page C7-1988 line 111280 MATCH x0e30a800/mask=xbf3ffc00 @@ -27344,9 +15527,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x18 & b_1216=0x :smaxv Rd_FPR8, Rn_VPR64.8B is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x18 & b_1216=0xa & b_1011=2 & Rn_VPR64.8B & Rd_FPR8 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR8 = NEON_smaxv(Rn_VPR64.8B, 1:1); -@endif } # C7.2.270 SMAXV page C7-1988 line 111280 MATCH x0e30a800/mask=xbf3ffc00 @@ -27357,9 +15538,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x18 & b_1216=0x :smaxv Rd_FPR16, Rn_VPR64.4H is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x18 & b_1216=0xa & b_1011=2 & Rn_VPR64.4H & Rd_FPR16 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR16 = NEON_smaxv(Rn_VPR64.4H, 2:1); -@endif } # C7.2.270 SMAXV page C7-1988 line 111280 MATCH x0e30a800/mask=xbf3ffc00 @@ -27370,9 +15549,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x18 & b_1216=0x :smaxv Rd_FPR16, Rn_VPR128.8H is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x18 & b_1216=0xa & b_1011=2 & Rn_VPR128.8H & Rd_FPR16 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR16 = NEON_smaxv(Rn_VPR128.8H, 2:1); -@endif } # C7.2.270 SMAXV page C7-1988 line 111280 MATCH x0e30a800/mask=xbf3ffc00 @@ -27383,9 +15560,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x18 & b_1216=0x :smaxv Rd_FPR32, Rn_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x18 & b_1216=0xa & b_1011=2 & Rn_VPR128.4S & Rd_FPR32 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR32 = NEON_smaxv(Rn_VPR128.4S, 4:1); -@endif } # C7.2.271 SMIN page C7-1990 line 111381 MATCH x0e206c00/mask=xbf20fc00 @@ -27396,9 +15571,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x18 & b_1216=0x :smin Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0xd & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.16B = NEON_smin(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); -@endif } # C7.2.271 SMIN page C7-1990 line 111381 MATCH x0e206c00/mask=xbf20fc00 @@ -27409,9 +15582,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16 :smin Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0xd & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.2S = NEON_smin(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); -@endif } # C7.2.271 SMIN page C7-1990 line 111381 MATCH x0e206c00/mask=xbf20fc00 @@ -27422,9 +15593,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S :smin Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0xd & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.4H = NEON_smin(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); -@endif } # C7.2.271 SMIN page C7-1990 line 111381 MATCH x0e206c00/mask=xbf20fc00 @@ -27435,9 +15604,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H :smin Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0xd & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_smin(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); -@endif } # C7.2.271 SMIN page C7-1990 line 111381 MATCH x0e206c00/mask=xbf20fc00 @@ -27448,9 +15615,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S :smin Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0xd & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.8B = NEON_smin(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); -@endif } # C7.2.271 SMIN page C7-1990 line 111381 MATCH x0e206c00/mask=xbf20fc00 @@ -27461,9 +15626,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B :smin Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0xd & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.8H = NEON_smin(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); -@endif } # C7.2.272 SMINP page C7-1992 line 111481 MATCH x0e20ac00/mask=xbf20fc00 @@ -27474,9 +15637,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H :sminp Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0x15 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.16B = NEON_sminp(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); -@endif } # C7.2.272 SMINP page C7-1992 line 111481 MATCH x0e20ac00/mask=xbf20fc00 @@ -27487,9 +15648,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16 :sminp Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0x15 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.2S = NEON_sminp(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); -@endif } # C7.2.272 SMINP page C7-1992 line 111481 MATCH x0e20ac00/mask=xbf20fc00 @@ -27500,9 +15659,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S :sminp Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0x15 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.4H = NEON_sminp(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); -@endif } # C7.2.272 SMINP page C7-1992 line 111481 MATCH x0e20ac00/mask=xbf20fc00 @@ -27513,9 +15670,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H :sminp Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0x15 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_sminp(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); -@endif } # C7.2.272 SMINP page C7-1992 line 111481 MATCH x0e20ac00/mask=xbf20fc00 @@ -27526,9 +15681,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S :sminp Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0x15 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.8B = NEON_sminp(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); -@endif } # C7.2.272 SMINP page C7-1992 line 111481 MATCH x0e20ac00/mask=xbf20fc00 @@ -27539,9 +15692,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B :sminp Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0x15 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.8H = NEON_sminp(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); -@endif } # C7.2.273 SMINV page C7-1994 line 111583 MATCH x0e31a800/mask=xbf3ffc00 @@ -27552,9 +15703,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H :sminv Rd_FPR8, Rn_VPR128.16B is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x18 & b_1216=0x1a & b_1011=2 & Rn_VPR128.16B & Rd_FPR8 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR8 = NEON_sminv(Rn_VPR128.16B, 1:1); -@endif } # C7.2.273 SMINV page C7-1994 line 111583 MATCH x0e31a800/mask=xbf3ffc00 @@ -27565,9 +15714,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x18 & b_1216=0x :sminv Rd_FPR8, Rn_VPR64.8B is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x18 & b_1216=0x1a & b_1011=2 & Rn_VPR64.8B & Rd_FPR8 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR8 = NEON_sminv(Rn_VPR64.8B, 1:1); -@endif } # C7.2.273 SMINV page C7-1994 line 111583 MATCH x0e31a800/mask=xbf3ffc00 @@ -27578,9 +15725,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x18 & b_1216=0x :sminv Rd_FPR16, Rn_VPR64.4H is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x18 & b_1216=0x1a & b_1011=2 & Rn_VPR64.4H & Rd_FPR16 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR16 = NEON_sminv(Rn_VPR64.4H, 2:1); -@endif } # C7.2.273 SMINV page C7-1994 line 111583 MATCH x0e31a800/mask=xbf3ffc00 @@ -27591,9 +15736,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x18 & b_1216=0x :sminv Rd_FPR16, Rn_VPR128.8H is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x18 & b_1216=0x1a & b_1011=2 & Rn_VPR128.8H & Rd_FPR16 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR16 = NEON_sminv(Rn_VPR128.8H, 2:1); -@endif } # C7.2.273 SMINV page C7-1994 line 111583 MATCH x0e31a800/mask=xbf3ffc00 @@ -27604,9 +15747,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x18 & b_1216=0x :sminv Rd_FPR32, Rn_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x18 & b_1216=0x1a & b_1011=2 & Rn_VPR128.4S & Rd_FPR32 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR32 = NEON_sminv(Rn_VPR128.4S, 4:1); -@endif } # C7.2.274 SMLAL, SMLAL2 (by element) page C7-1996 line 111684 MATCH x0f002000/mask=xbf00f400 @@ -27618,54 +15759,19 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x18 & b_1216=0x :smlal Rd_VPR128.2D, Rn_VPR64.2S, Re_VPR128.S.vIndex is b_3131=0 & q=0 & u=0 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & b_1215=0x2 & b_1010=0 & Rn_VPR64.2S & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_primitive) # simd resize TMPQ1 = sext(Rn_VPR64.2S) (lane size 4 to 8) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR64.2S, 0, 4, 8); - simd_address_at(tmp3, TMPQ1, 0, 8, 16); - * [register]:8 tmp3 = sext(* [register]:4 tmp2); - simd_address_at(tmp2, Rn_VPR64.2S, 1, 4, 8); - simd_address_at(tmp3, TMPQ1, 1, 8, 16); - * [register]:8 tmp3 = sext(* [register]:4 tmp2); - local tmp4:4 = 0; + TMPQ1[0,64] = sext(Rn_VPR64.2S[0,32]); + TMPQ1[64,64] = sext(Rn_VPR64.2S[32,32]); # simd element Re_VPR128.S[vIndex] lane size 4 - simd_address_at(tmp4, Re_VPR128.S, vIndex:4, 4, 16); - local tmp5:4 = * [register]:4 tmp4; - local tmp6:8 = sext(tmp5); - # simd infix TMPQ2 = TMPQ1 * tmp6 on lane size 8 - local tmp8:4 = 0; - local tmp9:4 = 0; - simd_address_at(tmp8, TMPQ1, 0, 8, 16); - simd_address_at(tmp9, TMPQ2, 0, 8, 16); - * [register]:8 tmp9 = (* [register]:8 tmp8) * tmp6; - simd_address_at(tmp8, TMPQ1, 1, 8, 16); - simd_address_at(tmp9, TMPQ2, 1, 8, 16); - * [register]:8 tmp9 = (* [register]:8 tmp8) * tmp6; - # simd infix Rd_VPR128.2D = Rd_VPR128.2D + TMPQ2 on lane size 8 - local tmp10:4 = 0; - local tmp11:4 = 0; - local tmp12:4 = 0; - simd_address_at(tmp10, Rd_VPR128.2D, 0, 8, 16); - simd_address_at(tmp11, TMPQ2, 0, 8, 16); - simd_address_at(tmp12, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp12 = (* [register]:8 tmp10) + (* [register]:8 tmp11); - simd_address_at(tmp10, Rd_VPR128.2D, 1, 8, 16); - simd_address_at(tmp11, TMPQ2, 1, 8, 16); - simd_address_at(tmp12, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp12 = (* [register]:8 tmp10) + (* [register]:8 tmp11); - zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_SEXT(Rn_VPR64.2S, 4:1); - local tmp2:4 = SIMD_PIECE(Re_VPR128.S, vIndex:1); + local tmp2:4 = Re_VPR128.S.vIndex; local tmp3:8 = sext(tmp2); - local tmp4:16 = SIMD_INT_MULT(tmp1, tmp3); - local tmpd:16 = SIMD_INT_ADD(Rd_VPR128.2D, tmp4, 8:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - local tmp1:4 = SIMD_PIECE(Re_VPR128.S, vIndex:1); - Rd_VPR128.2D = NEON_smlal(Rd_VPR128.2D, Rn_VPR64.2S, tmp1, 4:1); -@endif + # simd infix TMPQ2 = TMPQ1 * tmp3 on lane size 8 + TMPQ2[0,64] = TMPQ1[0,64] * tmp3; + TMPQ2[64,64] = TMPQ1[64,64] * tmp3; + # simd infix Rd_VPR128.2D = Rd_VPR128.2D + TMPQ2 on lane size 8 + Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] + TMPQ2[0,64]; + Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] + TMPQ2[64,64]; + zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.274 SMLAL, SMLAL2 (by element) page C7-1996 line 111684 MATCH x0f002000/mask=xbf00f400 @@ -27677,74 +15783,25 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re :smlal Rd_VPR128.4S, Rn_VPR64.4H, Re_VPR128Lo.H.vIndexHLM is b_3131=0 & q=0 & u=0 & b_2428=0xf & advSIMD3.size=1 & Re_VPR128Lo.H.vIndexHLM & Re_VPR128Lo.H & vIndexHLM & b_1215=0x2 & b_1010=0 & Rn_VPR64.4H & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) # simd resize TMPQ1 = sext(Rn_VPR64.4H) (lane size 2 to 4) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR64.4H, 0, 2, 8); - simd_address_at(tmp3, TMPQ1, 0, 4, 16); - * [register]:4 tmp3 = sext(* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR64.4H, 1, 2, 8); - simd_address_at(tmp3, TMPQ1, 1, 4, 16); - * [register]:4 tmp3 = sext(* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR64.4H, 2, 2, 8); - simd_address_at(tmp3, TMPQ1, 2, 4, 16); - * [register]:4 tmp3 = sext(* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR64.4H, 3, 2, 8); - simd_address_at(tmp3, TMPQ1, 3, 4, 16); - * [register]:4 tmp3 = sext(* [register]:2 tmp2); - local tmp4:4 = 0; + TMPQ1[0,32] = sext(Rn_VPR64.4H[0,16]); + TMPQ1[32,32] = sext(Rn_VPR64.4H[16,16]); + TMPQ1[64,32] = sext(Rn_VPR64.4H[32,16]); + TMPQ1[96,32] = sext(Rn_VPR64.4H[48,16]); # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 - simd_address_at(tmp4, Re_VPR128Lo.H, vIndexHLM:4, 2, 16); - local tmp5:2 = * [register]:2 tmp4; - local tmp6:4 = sext(tmp5); - # simd infix TMPQ2 = TMPQ1 * tmp6 on lane size 4 - local tmp8:4 = 0; - local tmp9:4 = 0; - simd_address_at(tmp8, TMPQ1, 0, 4, 16); - simd_address_at(tmp9, TMPQ2, 0, 4, 16); - * [register]:4 tmp9 = (* [register]:4 tmp8) * tmp6; - simd_address_at(tmp8, TMPQ1, 1, 4, 16); - simd_address_at(tmp9, TMPQ2, 1, 4, 16); - * [register]:4 tmp9 = (* [register]:4 tmp8) * tmp6; - simd_address_at(tmp8, TMPQ1, 2, 4, 16); - simd_address_at(tmp9, TMPQ2, 2, 4, 16); - * [register]:4 tmp9 = (* [register]:4 tmp8) * tmp6; - simd_address_at(tmp8, TMPQ1, 3, 4, 16); - simd_address_at(tmp9, TMPQ2, 3, 4, 16); - * [register]:4 tmp9 = (* [register]:4 tmp8) * tmp6; - # simd infix Rd_VPR128.4S = Rd_VPR128.4S + TMPQ2 on lane size 4 - local tmp10:4 = 0; - local tmp11:4 = 0; - local tmp12:4 = 0; - simd_address_at(tmp10, Rd_VPR128.4S, 0, 4, 16); - simd_address_at(tmp11, TMPQ2, 0, 4, 16); - simd_address_at(tmp12, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp12 = (* [register]:4 tmp10) + (* [register]:4 tmp11); - simd_address_at(tmp10, Rd_VPR128.4S, 1, 4, 16); - simd_address_at(tmp11, TMPQ2, 1, 4, 16); - simd_address_at(tmp12, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp12 = (* [register]:4 tmp10) + (* [register]:4 tmp11); - simd_address_at(tmp10, Rd_VPR128.4S, 2, 4, 16); - simd_address_at(tmp11, TMPQ2, 2, 4, 16); - simd_address_at(tmp12, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp12 = (* [register]:4 tmp10) + (* [register]:4 tmp11); - simd_address_at(tmp10, Rd_VPR128.4S, 3, 4, 16); - simd_address_at(tmp11, TMPQ2, 3, 4, 16); - simd_address_at(tmp12, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp12 = (* [register]:4 tmp10) + (* [register]:4 tmp11); - zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_SEXT(Rn_VPR64.4H, 2:1); - local tmp2:2 = SIMD_PIECE(Re_VPR128Lo.H, vIndexHLM:1); + local tmp2:2 = Re_VPR128Lo.H.vIndexHLM; local tmp3:4 = sext(tmp2); - local tmp4:16 = SIMD_INT_MULT(tmp1, tmp3); - local tmpd:16 = SIMD_INT_ADD(Rd_VPR128.4S, tmp4, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - local tmp1:2 = SIMD_PIECE(Re_VPR128Lo.H, vIndexHLM:1); - Rd_VPR128.4S = NEON_smlal(Rd_VPR128.4S, Rn_VPR64.4H, tmp1, 2:1); -@endif + # simd infix TMPQ2 = TMPQ1 * tmp3 on lane size 4 + TMPQ2[0,32] = TMPQ1[0,32] * tmp3; + TMPQ2[32,32] = TMPQ1[32,32] * tmp3; + TMPQ2[64,32] = TMPQ1[64,32] * tmp3; + TMPQ2[96,32] = TMPQ1[96,32] * tmp3; + # simd infix Rd_VPR128.4S = Rd_VPR128.4S + TMPQ2 on lane size 4 + Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] + TMPQ2[0,32]; + Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] + TMPQ2[32,32]; + Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] + TMPQ2[64,32]; + Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] + TMPQ2[96,32]; + zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.274 SMLAL, SMLAL2 (by element) page C7-1996 line 111684 MATCH x0f002000/mask=xbf00f400 @@ -27756,58 +15813,20 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xf & advSIMD3.size=1 & Re_VPR128Lo.H.vIndexHLM :smlal2 Rd_VPR128.2D, Rn_VPR128.4S, Re_VPR128.S.vIndex is b_3131=0 & q=1 & u=0 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & b_1215=0x2 & b_1010=0 & Rn_VPR128.4S & Rd_VPR128.2D & Rn_VPR128 & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rn_VPR128.4S, 1, 8, 16); - TMPD1 = * [register]:8 tmp1; + TMPD1 = Rn_VPR128.4S[64,64]; # simd resize TMPQ2 = sext(TMPD1) (lane size 4 to 8) - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, TMPD1, 0, 4, 8); - simd_address_at(tmp5, TMPQ2, 0, 8, 16); - * [register]:8 tmp5 = sext(* [register]:4 tmp4); - simd_address_at(tmp4, TMPD1, 1, 4, 8); - simd_address_at(tmp5, TMPQ2, 1, 8, 16); - * [register]:8 tmp5 = sext(* [register]:4 tmp4); - local tmp6:4 = 0; + TMPQ2[0,64] = sext(TMPD1[0,32]); + TMPQ2[64,64] = sext(TMPD1[32,32]); # simd element Re_VPR128.S[vIndex] lane size 4 - simd_address_at(tmp6, Re_VPR128.S, vIndex:4, 4, 16); - local tmp7:4 = * [register]:4 tmp6; - local tmp8:8 = sext(tmp7); - # simd infix TMPQ3 = TMPQ2 * tmp8 on lane size 8 - local tmp10:4 = 0; - local tmp11:4 = 0; - simd_address_at(tmp10, TMPQ2, 0, 8, 16); - simd_address_at(tmp11, TMPQ3, 0, 8, 16); - * [register]:8 tmp11 = (* [register]:8 tmp10) * tmp8; - simd_address_at(tmp10, TMPQ2, 1, 8, 16); - simd_address_at(tmp11, TMPQ3, 1, 8, 16); - * [register]:8 tmp11 = (* [register]:8 tmp10) * tmp8; - # simd infix Rd_VPR128.2D = Rd_VPR128.2D + TMPQ3 on lane size 8 - local tmp12:4 = 0; - local tmp13:4 = 0; - local tmp14:4 = 0; - simd_address_at(tmp12, Rd_VPR128.2D, 0, 8, 16); - simd_address_at(tmp13, TMPQ3, 0, 8, 16); - simd_address_at(tmp14, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp14 = (* [register]:8 tmp12) + (* [register]:8 tmp13); - simd_address_at(tmp12, Rd_VPR128.2D, 1, 8, 16); - simd_address_at(tmp13, TMPQ3, 1, 8, 16); - simd_address_at(tmp14, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp14 = (* [register]:8 tmp12) + (* [register]:8 tmp13); - zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Rn_VPR128.4S, 1:1); - local tmp2:16 = SIMD_INT_SEXT(tmp1, 4:1); - local tmp3:4 = SIMD_PIECE(Re_VPR128.S, vIndex:1); + local tmp3:4 = Re_VPR128.S.vIndex; local tmp4:8 = sext(tmp3); - local tmp5:16 = SIMD_INT_MULT(tmp2, tmp4); - local tmpd:16 = SIMD_INT_ADD(Rd_VPR128.2D, tmp5, 8:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - local tmp1:4 = SIMD_PIECE(Re_VPR128.S, vIndex:1); - Rd_VPR128.2D = NEON_smlal2(Rd_VPR128.2D, Rn_VPR128.4S, tmp1, 4:1); -@endif + # simd infix TMPQ3 = TMPQ2 * tmp4 on lane size 8 + TMPQ3[0,64] = TMPQ2[0,64] * tmp4; + TMPQ3[64,64] = TMPQ2[64,64] * tmp4; + # simd infix Rd_VPR128.2D = Rd_VPR128.2D + TMPQ3 on lane size 8 + Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] + TMPQ3[0,64]; + Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] + TMPQ3[64,64]; + zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.274 SMLAL, SMLAL2 (by element) page C7-1996 line 111684 MATCH x0f002000/mask=xbf00f400 @@ -27819,78 +15838,26 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re :smlal2 Rd_VPR128.4S, Rn_VPR128.8H, Re_VPR128Lo.H.vIndexHLM is b_3131=0 & q=1 & u=0 & b_2428=0xf & advSIMD3.size=1 & Re_VPR128Lo.H.vIndexHLM & Re_VPR128Lo.H & vIndexHLM & b_1215=0x2 & b_1010=0 & Rn_VPR128.8H & Rd_VPR128.4S & Rn_VPR128 & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rn_VPR128.8H, 1, 8, 16); - TMPD1 = * [register]:8 tmp1; + TMPD1 = Rn_VPR128.8H[64,64]; # simd resize TMPQ2 = sext(TMPD1) (lane size 2 to 4) - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, TMPD1, 0, 2, 8); - simd_address_at(tmp5, TMPQ2, 0, 4, 16); - * [register]:4 tmp5 = sext(* [register]:2 tmp4); - simd_address_at(tmp4, TMPD1, 1, 2, 8); - simd_address_at(tmp5, TMPQ2, 1, 4, 16); - * [register]:4 tmp5 = sext(* [register]:2 tmp4); - simd_address_at(tmp4, TMPD1, 2, 2, 8); - simd_address_at(tmp5, TMPQ2, 2, 4, 16); - * [register]:4 tmp5 = sext(* [register]:2 tmp4); - simd_address_at(tmp4, TMPD1, 3, 2, 8); - simd_address_at(tmp5, TMPQ2, 3, 4, 16); - * [register]:4 tmp5 = sext(* [register]:2 tmp4); - local tmp6:4 = 0; + TMPQ2[0,32] = sext(TMPD1[0,16]); + TMPQ2[32,32] = sext(TMPD1[16,16]); + TMPQ2[64,32] = sext(TMPD1[32,16]); + TMPQ2[96,32] = sext(TMPD1[48,16]); # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 - simd_address_at(tmp6, Re_VPR128Lo.H, vIndexHLM:4, 2, 16); - local tmp7:2 = * [register]:2 tmp6; - local tmp8:4 = sext(tmp7); - # simd infix TMPQ3 = TMPQ2 * tmp8 on lane size 4 - local tmp10:4 = 0; - local tmp11:4 = 0; - simd_address_at(tmp10, TMPQ2, 0, 4, 16); - simd_address_at(tmp11, TMPQ3, 0, 4, 16); - * [register]:4 tmp11 = (* [register]:4 tmp10) * tmp8; - simd_address_at(tmp10, TMPQ2, 1, 4, 16); - simd_address_at(tmp11, TMPQ3, 1, 4, 16); - * [register]:4 tmp11 = (* [register]:4 tmp10) * tmp8; - simd_address_at(tmp10, TMPQ2, 2, 4, 16); - simd_address_at(tmp11, TMPQ3, 2, 4, 16); - * [register]:4 tmp11 = (* [register]:4 tmp10) * tmp8; - simd_address_at(tmp10, TMPQ2, 3, 4, 16); - simd_address_at(tmp11, TMPQ3, 3, 4, 16); - * [register]:4 tmp11 = (* [register]:4 tmp10) * tmp8; - # simd infix Rd_VPR128.4S = Rd_VPR128.4S + TMPQ3 on lane size 4 - local tmp12:4 = 0; - local tmp13:4 = 0; - local tmp14:4 = 0; - simd_address_at(tmp12, Rd_VPR128.4S, 0, 4, 16); - simd_address_at(tmp13, TMPQ3, 0, 4, 16); - simd_address_at(tmp14, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp14 = (* [register]:4 tmp12) + (* [register]:4 tmp13); - simd_address_at(tmp12, Rd_VPR128.4S, 1, 4, 16); - simd_address_at(tmp13, TMPQ3, 1, 4, 16); - simd_address_at(tmp14, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp14 = (* [register]:4 tmp12) + (* [register]:4 tmp13); - simd_address_at(tmp12, Rd_VPR128.4S, 2, 4, 16); - simd_address_at(tmp13, TMPQ3, 2, 4, 16); - simd_address_at(tmp14, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp14 = (* [register]:4 tmp12) + (* [register]:4 tmp13); - simd_address_at(tmp12, Rd_VPR128.4S, 3, 4, 16); - simd_address_at(tmp13, TMPQ3, 3, 4, 16); - simd_address_at(tmp14, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp14 = (* [register]:4 tmp12) + (* [register]:4 tmp13); - zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Rn_VPR128.8H, 1:1); - local tmp2:16 = SIMD_INT_SEXT(tmp1, 2:1); - local tmp3:2 = SIMD_PIECE(Re_VPR128Lo.H, vIndexHLM:1); + local tmp3:2 = Re_VPR128Lo.H.vIndexHLM; local tmp4:4 = sext(tmp3); - local tmp5:16 = SIMD_INT_MULT(tmp2, tmp4); - local tmpd:16 = SIMD_INT_ADD(Rd_VPR128.4S, tmp5, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - local tmp1:2 = SIMD_PIECE(Re_VPR128Lo.H, vIndexHLM:1); - Rd_VPR128.4S = NEON_smlal2(Rd_VPR128.4S, Rn_VPR128.8H, tmp1, 2:1); -@endif + # simd infix TMPQ3 = TMPQ2 * tmp4 on lane size 4 + TMPQ3[0,32] = TMPQ2[0,32] * tmp4; + TMPQ3[32,32] = TMPQ2[32,32] * tmp4; + TMPQ3[64,32] = TMPQ2[64,32] * tmp4; + TMPQ3[96,32] = TMPQ2[96,32] * tmp4; + # simd infix Rd_VPR128.4S = Rd_VPR128.4S + TMPQ3 on lane size 4 + Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] + TMPQ3[0,32]; + Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] + TMPQ3[32,32]; + Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] + TMPQ3[64,32]; + Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] + TMPQ3[96,32]; + zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.275 SMLAL, SMLAL2 (vector) page C7-1999 line 111847 MATCH x0e208000/mask=xbf20fc00 @@ -27902,59 +15869,19 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xf & advSIMD3.size=1 & Re_VPR128Lo.H.vIndexHLM :smlal Rd_VPR128.2D, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1215=0x8 & b_1011=0 & Rn_VPR64.2S & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_primitive) # simd resize TMPQ1 = sext(Rn_VPR64.2S) (lane size 4 to 8) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR64.2S, 0, 4, 8); - simd_address_at(tmp3, TMPQ1, 0, 8, 16); - * [register]:8 tmp3 = sext(* [register]:4 tmp2); - simd_address_at(tmp2, Rn_VPR64.2S, 1, 4, 8); - simd_address_at(tmp3, TMPQ1, 1, 8, 16); - * [register]:8 tmp3 = sext(* [register]:4 tmp2); + TMPQ1[0,64] = sext(Rn_VPR64.2S[0,32]); + TMPQ1[64,64] = sext(Rn_VPR64.2S[32,32]); # simd resize TMPQ2 = sext(Rm_VPR64.2S) (lane size 4 to 8) - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, Rm_VPR64.2S, 0, 4, 8); - simd_address_at(tmp6, TMPQ2, 0, 8, 16); - * [register]:8 tmp6 = sext(* [register]:4 tmp5); - simd_address_at(tmp5, Rm_VPR64.2S, 1, 4, 8); - simd_address_at(tmp6, TMPQ2, 1, 8, 16); - * [register]:8 tmp6 = sext(* [register]:4 tmp5); + TMPQ2[0,64] = sext(Rm_VPR64.2S[0,32]); + TMPQ2[64,64] = sext(Rm_VPR64.2S[32,32]); # simd infix TMPQ3 = TMPQ1 * TMPQ2 on lane size 8 - local tmp8:4 = 0; - local tmp9:4 = 0; - local tmp10:4 = 0; - simd_address_at(tmp8, TMPQ1, 0, 8, 16); - simd_address_at(tmp9, TMPQ2, 0, 8, 16); - simd_address_at(tmp10, TMPQ3, 0, 8, 16); - * [register]:8 tmp10 = (* [register]:8 tmp8) * (* [register]:8 tmp9); - simd_address_at(tmp8, TMPQ1, 1, 8, 16); - simd_address_at(tmp9, TMPQ2, 1, 8, 16); - simd_address_at(tmp10, TMPQ3, 1, 8, 16); - * [register]:8 tmp10 = (* [register]:8 tmp8) * (* [register]:8 tmp9); + TMPQ3[0,64] = TMPQ1[0,64] * TMPQ2[0,64]; + TMPQ3[64,64] = TMPQ1[64,64] * TMPQ2[64,64]; # simd infix Rd_VPR128.2D = Rd_VPR128.2D + TMPQ3 on lane size 8 - local tmp11:4 = 0; - local tmp12:4 = 0; - local tmp13:4 = 0; - simd_address_at(tmp11, Rd_VPR128.2D, 0, 8, 16); - simd_address_at(tmp12, TMPQ3, 0, 8, 16); - simd_address_at(tmp13, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp13 = (* [register]:8 tmp11) + (* [register]:8 tmp12); - simd_address_at(tmp11, Rd_VPR128.2D, 1, 8, 16); - simd_address_at(tmp12, TMPQ3, 1, 8, 16); - simd_address_at(tmp13, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp13 = (* [register]:8 tmp11) + (* [register]:8 tmp12); + Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] + TMPQ3[0,64]; + Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] + TMPQ3[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_SEXT(Rn_VPR64.2S, 4:1); - local tmp2:16 = SIMD_INT_SEXT(Rm_VPR64.2S, 4:1); - local tmp3:16 = SIMD_INT_MULT(tmp1, tmp2, 8:1); - local tmpd:16 = SIMD_INT_ADD(Rd_VPR128.2D, tmp3, 8:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.2D = NEON_smlal(Rd_VPR128.2D, Rn_VPR64.2S, Rm_VPR64.2S, 4:1); -@endif } # C7.2.275 SMLAL, SMLAL2 (vector) page C7-1999 line 111847 MATCH x0e208000/mask=xbf20fc00 @@ -27966,87 +15893,27 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S :smlal Rd_VPR128.4S, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1215=0x8 & b_1011=0 & Rn_VPR64.4H & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) # simd resize TMPQ1 = sext(Rn_VPR64.4H) (lane size 2 to 4) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR64.4H, 0, 2, 8); - simd_address_at(tmp3, TMPQ1, 0, 4, 16); - * [register]:4 tmp3 = sext(* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR64.4H, 1, 2, 8); - simd_address_at(tmp3, TMPQ1, 1, 4, 16); - * [register]:4 tmp3 = sext(* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR64.4H, 2, 2, 8); - simd_address_at(tmp3, TMPQ1, 2, 4, 16); - * [register]:4 tmp3 = sext(* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR64.4H, 3, 2, 8); - simd_address_at(tmp3, TMPQ1, 3, 4, 16); - * [register]:4 tmp3 = sext(* [register]:2 tmp2); + TMPQ1[0,32] = sext(Rn_VPR64.4H[0,16]); + TMPQ1[32,32] = sext(Rn_VPR64.4H[16,16]); + TMPQ1[64,32] = sext(Rn_VPR64.4H[32,16]); + TMPQ1[96,32] = sext(Rn_VPR64.4H[48,16]); # simd resize TMPQ2 = sext(Rm_VPR64.4H) (lane size 2 to 4) - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, Rm_VPR64.4H, 0, 2, 8); - simd_address_at(tmp6, TMPQ2, 0, 4, 16); - * [register]:4 tmp6 = sext(* [register]:2 tmp5); - simd_address_at(tmp5, Rm_VPR64.4H, 1, 2, 8); - simd_address_at(tmp6, TMPQ2, 1, 4, 16); - * [register]:4 tmp6 = sext(* [register]:2 tmp5); - simd_address_at(tmp5, Rm_VPR64.4H, 2, 2, 8); - simd_address_at(tmp6, TMPQ2, 2, 4, 16); - * [register]:4 tmp6 = sext(* [register]:2 tmp5); - simd_address_at(tmp5, Rm_VPR64.4H, 3, 2, 8); - simd_address_at(tmp6, TMPQ2, 3, 4, 16); - * [register]:4 tmp6 = sext(* [register]:2 tmp5); + TMPQ2[0,32] = sext(Rm_VPR64.4H[0,16]); + TMPQ2[32,32] = sext(Rm_VPR64.4H[16,16]); + TMPQ2[64,32] = sext(Rm_VPR64.4H[32,16]); + TMPQ2[96,32] = sext(Rm_VPR64.4H[48,16]); # simd infix TMPQ3 = TMPQ1 * TMPQ2 on lane size 4 - local tmp8:4 = 0; - local tmp9:4 = 0; - local tmp10:4 = 0; - simd_address_at(tmp8, TMPQ1, 0, 4, 16); - simd_address_at(tmp9, TMPQ2, 0, 4, 16); - simd_address_at(tmp10, TMPQ3, 0, 4, 16); - * [register]:4 tmp10 = (* [register]:4 tmp8) * (* [register]:4 tmp9); - simd_address_at(tmp8, TMPQ1, 1, 4, 16); - simd_address_at(tmp9, TMPQ2, 1, 4, 16); - simd_address_at(tmp10, TMPQ3, 1, 4, 16); - * [register]:4 tmp10 = (* [register]:4 tmp8) * (* [register]:4 tmp9); - simd_address_at(tmp8, TMPQ1, 2, 4, 16); - simd_address_at(tmp9, TMPQ2, 2, 4, 16); - simd_address_at(tmp10, TMPQ3, 2, 4, 16); - * [register]:4 tmp10 = (* [register]:4 tmp8) * (* [register]:4 tmp9); - simd_address_at(tmp8, TMPQ1, 3, 4, 16); - simd_address_at(tmp9, TMPQ2, 3, 4, 16); - simd_address_at(tmp10, TMPQ3, 3, 4, 16); - * [register]:4 tmp10 = (* [register]:4 tmp8) * (* [register]:4 tmp9); + TMPQ3[0,32] = TMPQ1[0,32] * TMPQ2[0,32]; + TMPQ3[32,32] = TMPQ1[32,32] * TMPQ2[32,32]; + TMPQ3[64,32] = TMPQ1[64,32] * TMPQ2[64,32]; + TMPQ3[96,32] = TMPQ1[96,32] * TMPQ2[96,32]; # simd infix Rd_VPR128.4S = Rd_VPR128.4S + TMPQ3 on lane size 4 - local tmp11:4 = 0; - local tmp12:4 = 0; - local tmp13:4 = 0; - simd_address_at(tmp11, Rd_VPR128.4S, 0, 4, 16); - simd_address_at(tmp12, TMPQ3, 0, 4, 16); - simd_address_at(tmp13, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp13 = (* [register]:4 tmp11) + (* [register]:4 tmp12); - simd_address_at(tmp11, Rd_VPR128.4S, 1, 4, 16); - simd_address_at(tmp12, TMPQ3, 1, 4, 16); - simd_address_at(tmp13, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp13 = (* [register]:4 tmp11) + (* [register]:4 tmp12); - simd_address_at(tmp11, Rd_VPR128.4S, 2, 4, 16); - simd_address_at(tmp12, TMPQ3, 2, 4, 16); - simd_address_at(tmp13, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp13 = (* [register]:4 tmp11) + (* [register]:4 tmp12); - simd_address_at(tmp11, Rd_VPR128.4S, 3, 4, 16); - simd_address_at(tmp12, TMPQ3, 3, 4, 16); - simd_address_at(tmp13, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp13 = (* [register]:4 tmp11) + (* [register]:4 tmp12); + Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] + TMPQ3[0,32]; + Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] + TMPQ3[32,32]; + Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] + TMPQ3[64,32]; + Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] + TMPQ3[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_SEXT(Rn_VPR64.4H, 2:1); - local tmp2:16 = SIMD_INT_SEXT(Rm_VPR64.4H, 2:1); - local tmp3:16 = SIMD_INT_MULT(tmp1, tmp2, 4:1); - local tmpd:16 = SIMD_INT_ADD(Rd_VPR128.4S, tmp3, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_smlal(Rd_VPR128.4S, Rn_VPR64.4H, Rm_VPR64.4H, 2:1); -@endif } # C7.2.275 SMLAL, SMLAL2 (vector) page C7-1999 line 111847 MATCH x0e208000/mask=xbf20fc00 @@ -28058,143 +15925,43 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H :smlal Rd_VPR128.8H, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1215=0x8 & b_1011=0 & Rn_VPR64.8B & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_primitive) # simd resize TMPQ1 = sext(Rn_VPR64.8B) (lane size 1 to 2) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR64.8B, 0, 1, 8); - simd_address_at(tmp3, TMPQ1, 0, 2, 16); - * [register]:2 tmp3 = sext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 1, 1, 8); - simd_address_at(tmp3, TMPQ1, 1, 2, 16); - * [register]:2 tmp3 = sext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 2, 1, 8); - simd_address_at(tmp3, TMPQ1, 2, 2, 16); - * [register]:2 tmp3 = sext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 3, 1, 8); - simd_address_at(tmp3, TMPQ1, 3, 2, 16); - * [register]:2 tmp3 = sext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 4, 1, 8); - simd_address_at(tmp3, TMPQ1, 4, 2, 16); - * [register]:2 tmp3 = sext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 5, 1, 8); - simd_address_at(tmp3, TMPQ1, 5, 2, 16); - * [register]:2 tmp3 = sext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 6, 1, 8); - simd_address_at(tmp3, TMPQ1, 6, 2, 16); - * [register]:2 tmp3 = sext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 7, 1, 8); - simd_address_at(tmp3, TMPQ1, 7, 2, 16); - * [register]:2 tmp3 = sext(* [register]:1 tmp2); + TMPQ1[0,16] = sext(Rn_VPR64.8B[0,8]); + TMPQ1[16,16] = sext(Rn_VPR64.8B[8,8]); + TMPQ1[32,16] = sext(Rn_VPR64.8B[16,8]); + TMPQ1[48,16] = sext(Rn_VPR64.8B[24,8]); + TMPQ1[64,16] = sext(Rn_VPR64.8B[32,8]); + TMPQ1[80,16] = sext(Rn_VPR64.8B[40,8]); + TMPQ1[96,16] = sext(Rn_VPR64.8B[48,8]); + TMPQ1[112,16] = sext(Rn_VPR64.8B[56,8]); # simd resize TMPQ2 = sext(Rm_VPR64.8B) (lane size 1 to 2) - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, Rm_VPR64.8B, 0, 1, 8); - simd_address_at(tmp6, TMPQ2, 0, 2, 16); - * [register]:2 tmp6 = sext(* [register]:1 tmp5); - simd_address_at(tmp5, Rm_VPR64.8B, 1, 1, 8); - simd_address_at(tmp6, TMPQ2, 1, 2, 16); - * [register]:2 tmp6 = sext(* [register]:1 tmp5); - simd_address_at(tmp5, Rm_VPR64.8B, 2, 1, 8); - simd_address_at(tmp6, TMPQ2, 2, 2, 16); - * [register]:2 tmp6 = sext(* [register]:1 tmp5); - simd_address_at(tmp5, Rm_VPR64.8B, 3, 1, 8); - simd_address_at(tmp6, TMPQ2, 3, 2, 16); - * [register]:2 tmp6 = sext(* [register]:1 tmp5); - simd_address_at(tmp5, Rm_VPR64.8B, 4, 1, 8); - simd_address_at(tmp6, TMPQ2, 4, 2, 16); - * [register]:2 tmp6 = sext(* [register]:1 tmp5); - simd_address_at(tmp5, Rm_VPR64.8B, 5, 1, 8); - simd_address_at(tmp6, TMPQ2, 5, 2, 16); - * [register]:2 tmp6 = sext(* [register]:1 tmp5); - simd_address_at(tmp5, Rm_VPR64.8B, 6, 1, 8); - simd_address_at(tmp6, TMPQ2, 6, 2, 16); - * [register]:2 tmp6 = sext(* [register]:1 tmp5); - simd_address_at(tmp5, Rm_VPR64.8B, 7, 1, 8); - simd_address_at(tmp6, TMPQ2, 7, 2, 16); - * [register]:2 tmp6 = sext(* [register]:1 tmp5); + TMPQ2[0,16] = sext(Rm_VPR64.8B[0,8]); + TMPQ2[16,16] = sext(Rm_VPR64.8B[8,8]); + TMPQ2[32,16] = sext(Rm_VPR64.8B[16,8]); + TMPQ2[48,16] = sext(Rm_VPR64.8B[24,8]); + TMPQ2[64,16] = sext(Rm_VPR64.8B[32,8]); + TMPQ2[80,16] = sext(Rm_VPR64.8B[40,8]); + TMPQ2[96,16] = sext(Rm_VPR64.8B[48,8]); + TMPQ2[112,16] = sext(Rm_VPR64.8B[56,8]); # simd infix TMPQ3 = TMPQ1 * TMPQ2 on lane size 2 - local tmp8:4 = 0; - local tmp9:4 = 0; - local tmp10:4 = 0; - simd_address_at(tmp8, TMPQ1, 0, 2, 16); - simd_address_at(tmp9, TMPQ2, 0, 2, 16); - simd_address_at(tmp10, TMPQ3, 0, 2, 16); - * [register]:2 tmp10 = (* [register]:2 tmp8) * (* [register]:2 tmp9); - simd_address_at(tmp8, TMPQ1, 1, 2, 16); - simd_address_at(tmp9, TMPQ2, 1, 2, 16); - simd_address_at(tmp10, TMPQ3, 1, 2, 16); - * [register]:2 tmp10 = (* [register]:2 tmp8) * (* [register]:2 tmp9); - simd_address_at(tmp8, TMPQ1, 2, 2, 16); - simd_address_at(tmp9, TMPQ2, 2, 2, 16); - simd_address_at(tmp10, TMPQ3, 2, 2, 16); - * [register]:2 tmp10 = (* [register]:2 tmp8) * (* [register]:2 tmp9); - simd_address_at(tmp8, TMPQ1, 3, 2, 16); - simd_address_at(tmp9, TMPQ2, 3, 2, 16); - simd_address_at(tmp10, TMPQ3, 3, 2, 16); - * [register]:2 tmp10 = (* [register]:2 tmp8) * (* [register]:2 tmp9); - simd_address_at(tmp8, TMPQ1, 4, 2, 16); - simd_address_at(tmp9, TMPQ2, 4, 2, 16); - simd_address_at(tmp10, TMPQ3, 4, 2, 16); - * [register]:2 tmp10 = (* [register]:2 tmp8) * (* [register]:2 tmp9); - simd_address_at(tmp8, TMPQ1, 5, 2, 16); - simd_address_at(tmp9, TMPQ2, 5, 2, 16); - simd_address_at(tmp10, TMPQ3, 5, 2, 16); - * [register]:2 tmp10 = (* [register]:2 tmp8) * (* [register]:2 tmp9); - simd_address_at(tmp8, TMPQ1, 6, 2, 16); - simd_address_at(tmp9, TMPQ2, 6, 2, 16); - simd_address_at(tmp10, TMPQ3, 6, 2, 16); - * [register]:2 tmp10 = (* [register]:2 tmp8) * (* [register]:2 tmp9); - simd_address_at(tmp8, TMPQ1, 7, 2, 16); - simd_address_at(tmp9, TMPQ2, 7, 2, 16); - simd_address_at(tmp10, TMPQ3, 7, 2, 16); - * [register]:2 tmp10 = (* [register]:2 tmp8) * (* [register]:2 tmp9); + TMPQ3[0,16] = TMPQ1[0,16] * TMPQ2[0,16]; + TMPQ3[16,16] = TMPQ1[16,16] * TMPQ2[16,16]; + TMPQ3[32,16] = TMPQ1[32,16] * TMPQ2[32,16]; + TMPQ3[48,16] = TMPQ1[48,16] * TMPQ2[48,16]; + TMPQ3[64,16] = TMPQ1[64,16] * TMPQ2[64,16]; + TMPQ3[80,16] = TMPQ1[80,16] * TMPQ2[80,16]; + TMPQ3[96,16] = TMPQ1[96,16] * TMPQ2[96,16]; + TMPQ3[112,16] = TMPQ1[112,16] * TMPQ2[112,16]; # simd infix Rd_VPR128.8H = Rd_VPR128.8H + TMPQ3 on lane size 2 - local tmp11:4 = 0; - local tmp12:4 = 0; - local tmp13:4 = 0; - simd_address_at(tmp11, Rd_VPR128.8H, 0, 2, 16); - simd_address_at(tmp12, TMPQ3, 0, 2, 16); - simd_address_at(tmp13, Rd_VPR128.8H, 0, 2, 16); - * [register]:2 tmp13 = (* [register]:2 tmp11) + (* [register]:2 tmp12); - simd_address_at(tmp11, Rd_VPR128.8H, 1, 2, 16); - simd_address_at(tmp12, TMPQ3, 1, 2, 16); - simd_address_at(tmp13, Rd_VPR128.8H, 1, 2, 16); - * [register]:2 tmp13 = (* [register]:2 tmp11) + (* [register]:2 tmp12); - simd_address_at(tmp11, Rd_VPR128.8H, 2, 2, 16); - simd_address_at(tmp12, TMPQ3, 2, 2, 16); - simd_address_at(tmp13, Rd_VPR128.8H, 2, 2, 16); - * [register]:2 tmp13 = (* [register]:2 tmp11) + (* [register]:2 tmp12); - simd_address_at(tmp11, Rd_VPR128.8H, 3, 2, 16); - simd_address_at(tmp12, TMPQ3, 3, 2, 16); - simd_address_at(tmp13, Rd_VPR128.8H, 3, 2, 16); - * [register]:2 tmp13 = (* [register]:2 tmp11) + (* [register]:2 tmp12); - simd_address_at(tmp11, Rd_VPR128.8H, 4, 2, 16); - simd_address_at(tmp12, TMPQ3, 4, 2, 16); - simd_address_at(tmp13, Rd_VPR128.8H, 4, 2, 16); - * [register]:2 tmp13 = (* [register]:2 tmp11) + (* [register]:2 tmp12); - simd_address_at(tmp11, Rd_VPR128.8H, 5, 2, 16); - simd_address_at(tmp12, TMPQ3, 5, 2, 16); - simd_address_at(tmp13, Rd_VPR128.8H, 5, 2, 16); - * [register]:2 tmp13 = (* [register]:2 tmp11) + (* [register]:2 tmp12); - simd_address_at(tmp11, Rd_VPR128.8H, 6, 2, 16); - simd_address_at(tmp12, TMPQ3, 6, 2, 16); - simd_address_at(tmp13, Rd_VPR128.8H, 6, 2, 16); - * [register]:2 tmp13 = (* [register]:2 tmp11) + (* [register]:2 tmp12); - simd_address_at(tmp11, Rd_VPR128.8H, 7, 2, 16); - simd_address_at(tmp12, TMPQ3, 7, 2, 16); - simd_address_at(tmp13, Rd_VPR128.8H, 7, 2, 16); - * [register]:2 tmp13 = (* [register]:2 tmp11) + (* [register]:2 tmp12); + Rd_VPR128.8H[0,16] = Rd_VPR128.8H[0,16] + TMPQ3[0,16]; + Rd_VPR128.8H[16,16] = Rd_VPR128.8H[16,16] + TMPQ3[16,16]; + Rd_VPR128.8H[32,16] = Rd_VPR128.8H[32,16] + TMPQ3[32,16]; + Rd_VPR128.8H[48,16] = Rd_VPR128.8H[48,16] + TMPQ3[48,16]; + Rd_VPR128.8H[64,16] = Rd_VPR128.8H[64,16] + TMPQ3[64,16]; + Rd_VPR128.8H[80,16] = Rd_VPR128.8H[80,16] + TMPQ3[80,16]; + Rd_VPR128.8H[96,16] = Rd_VPR128.8H[96,16] + TMPQ3[96,16]; + Rd_VPR128.8H[112,16] = Rd_VPR128.8H[112,16] + TMPQ3[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_SEXT(Rn_VPR64.8B, 1:1); - local tmp2:16 = SIMD_INT_SEXT(Rm_VPR64.8B, 1:1); - local tmp3:16 = SIMD_INT_MULT(tmp1, tmp2, 2:1); - local tmpd:16 = SIMD_INT_ADD(Rd_VPR128.8H, tmp3, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.8H -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.8H = NEON_smlal(Rd_VPR128.8H, Rn_VPR64.8B, Rm_VPR64.8B, 1:1); -@endif } # C7.2.275 SMLAL, SMLAL2 (vector) page C7-1999 line 111847 MATCH x0e208000/mask=xbf20fc00 @@ -28206,67 +15973,21 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B :smlal2 Rd_VPR128.2D, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1215=0x8 & b_1011=0 & Rn_VPR128.4S & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rn_VPR128.4S, 1, 8, 16); - TMPD1 = * [register]:8 tmp1; + TMPD1 = Rn_VPR128.4S[64,64]; # simd resize TMPQ2 = sext(TMPD1) (lane size 4 to 8) - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, TMPD1, 0, 4, 8); - simd_address_at(tmp5, TMPQ2, 0, 8, 16); - * [register]:8 tmp5 = sext(* [register]:4 tmp4); - simd_address_at(tmp4, TMPD1, 1, 4, 8); - simd_address_at(tmp5, TMPQ2, 1, 8, 16); - * [register]:8 tmp5 = sext(* [register]:4 tmp4); - local tmp6:4 = 0; - simd_address_at(tmp6, Rm_VPR128.4S, 1, 8, 16); - TMPD3 = * [register]:8 tmp6; + TMPQ2[0,64] = sext(TMPD1[0,32]); + TMPQ2[64,64] = sext(TMPD1[32,32]); + TMPD3 = Rm_VPR128.4S[64,64]; # simd resize TMPQ4 = sext(TMPD3) (lane size 4 to 8) - local tmp9:4 = 0; - local tmp10:4 = 0; - simd_address_at(tmp9, TMPD3, 0, 4, 8); - simd_address_at(tmp10, TMPQ4, 0, 8, 16); - * [register]:8 tmp10 = sext(* [register]:4 tmp9); - simd_address_at(tmp9, TMPD3, 1, 4, 8); - simd_address_at(tmp10, TMPQ4, 1, 8, 16); - * [register]:8 tmp10 = sext(* [register]:4 tmp9); + TMPQ4[0,64] = sext(TMPD3[0,32]); + TMPQ4[64,64] = sext(TMPD3[32,32]); # simd infix TMPQ5 = TMPQ2 * TMPQ4 on lane size 8 - local tmp12:4 = 0; - local tmp13:4 = 0; - local tmp14:4 = 0; - simd_address_at(tmp12, TMPQ2, 0, 8, 16); - simd_address_at(tmp13, TMPQ4, 0, 8, 16); - simd_address_at(tmp14, TMPQ5, 0, 8, 16); - * [register]:8 tmp14 = (* [register]:8 tmp12) * (* [register]:8 tmp13); - simd_address_at(tmp12, TMPQ2, 1, 8, 16); - simd_address_at(tmp13, TMPQ4, 1, 8, 16); - simd_address_at(tmp14, TMPQ5, 1, 8, 16); - * [register]:8 tmp14 = (* [register]:8 tmp12) * (* [register]:8 tmp13); + TMPQ5[0,64] = TMPQ2[0,64] * TMPQ4[0,64]; + TMPQ5[64,64] = TMPQ2[64,64] * TMPQ4[64,64]; # simd infix Rd_VPR128.2D = Rd_VPR128.2D + TMPQ5 on lane size 8 - local tmp15:4 = 0; - local tmp16:4 = 0; - local tmp17:4 = 0; - simd_address_at(tmp15, Rd_VPR128.2D, 0, 8, 16); - simd_address_at(tmp16, TMPQ5, 0, 8, 16); - simd_address_at(tmp17, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp17 = (* [register]:8 tmp15) + (* [register]:8 tmp16); - simd_address_at(tmp15, Rd_VPR128.2D, 1, 8, 16); - simd_address_at(tmp16, TMPQ5, 1, 8, 16); - simd_address_at(tmp17, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp17 = (* [register]:8 tmp15) + (* [register]:8 tmp16); + Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] + TMPQ5[0,64]; + Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] + TMPQ5[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Rn_VPR128.4S, 1:1); - local tmp2:16 = SIMD_INT_SEXT(tmp1, 4:1); - local tmp3:8 = SIMD_PIECE(Rm_VPR128.4S, 1:1); - local tmp4:16 = SIMD_INT_SEXT(tmp3, 4:1); - local tmp5:16 = SIMD_INT_MULT(tmp2, tmp4, 8:1); - local tmpd:16 = SIMD_INT_ADD(Rd_VPR128.2D, tmp5, 8:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.2D = NEON_smlal2(Rd_VPR128.2D, Rn_VPR128.4S, Rm_VPR128.4S, 4:1); -@endif } # C7.2.275 SMLAL, SMLAL2 (vector) page C7-1999 line 111847 MATCH x0e208000/mask=xbf20fc00 @@ -28278,95 +15999,29 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S :smlal2 Rd_VPR128.4S, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1215=0x8 & b_1011=0 & Rn_VPR128.8H & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rn_VPR128.8H, 1, 8, 16); - TMPD1 = * [register]:8 tmp1; + TMPD1 = Rn_VPR128.8H[64,64]; # simd resize TMPQ2 = sext(TMPD1) (lane size 2 to 4) - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, TMPD1, 0, 2, 8); - simd_address_at(tmp5, TMPQ2, 0, 4, 16); - * [register]:4 tmp5 = sext(* [register]:2 tmp4); - simd_address_at(tmp4, TMPD1, 1, 2, 8); - simd_address_at(tmp5, TMPQ2, 1, 4, 16); - * [register]:4 tmp5 = sext(* [register]:2 tmp4); - simd_address_at(tmp4, TMPD1, 2, 2, 8); - simd_address_at(tmp5, TMPQ2, 2, 4, 16); - * [register]:4 tmp5 = sext(* [register]:2 tmp4); - simd_address_at(tmp4, TMPD1, 3, 2, 8); - simd_address_at(tmp5, TMPQ2, 3, 4, 16); - * [register]:4 tmp5 = sext(* [register]:2 tmp4); - local tmp6:4 = 0; - simd_address_at(tmp6, Rm_VPR128.8H, 1, 8, 16); - TMPD3 = * [register]:8 tmp6; + TMPQ2[0,32] = sext(TMPD1[0,16]); + TMPQ2[32,32] = sext(TMPD1[16,16]); + TMPQ2[64,32] = sext(TMPD1[32,16]); + TMPQ2[96,32] = sext(TMPD1[48,16]); + TMPD3 = Rm_VPR128.8H[64,64]; # simd resize TMPQ4 = sext(TMPD3) (lane size 2 to 4) - local tmp9:4 = 0; - local tmp10:4 = 0; - simd_address_at(tmp9, TMPD3, 0, 2, 8); - simd_address_at(tmp10, TMPQ4, 0, 4, 16); - * [register]:4 tmp10 = sext(* [register]:2 tmp9); - simd_address_at(tmp9, TMPD3, 1, 2, 8); - simd_address_at(tmp10, TMPQ4, 1, 4, 16); - * [register]:4 tmp10 = sext(* [register]:2 tmp9); - simd_address_at(tmp9, TMPD3, 2, 2, 8); - simd_address_at(tmp10, TMPQ4, 2, 4, 16); - * [register]:4 tmp10 = sext(* [register]:2 tmp9); - simd_address_at(tmp9, TMPD3, 3, 2, 8); - simd_address_at(tmp10, TMPQ4, 3, 4, 16); - * [register]:4 tmp10 = sext(* [register]:2 tmp9); + TMPQ4[0,32] = sext(TMPD3[0,16]); + TMPQ4[32,32] = sext(TMPD3[16,16]); + TMPQ4[64,32] = sext(TMPD3[32,16]); + TMPQ4[96,32] = sext(TMPD3[48,16]); # simd infix TMPQ5 = TMPQ2 * TMPQ4 on lane size 4 - local tmp12:4 = 0; - local tmp13:4 = 0; - local tmp14:4 = 0; - simd_address_at(tmp12, TMPQ2, 0, 4, 16); - simd_address_at(tmp13, TMPQ4, 0, 4, 16); - simd_address_at(tmp14, TMPQ5, 0, 4, 16); - * [register]:4 tmp14 = (* [register]:4 tmp12) * (* [register]:4 tmp13); - simd_address_at(tmp12, TMPQ2, 1, 4, 16); - simd_address_at(tmp13, TMPQ4, 1, 4, 16); - simd_address_at(tmp14, TMPQ5, 1, 4, 16); - * [register]:4 tmp14 = (* [register]:4 tmp12) * (* [register]:4 tmp13); - simd_address_at(tmp12, TMPQ2, 2, 4, 16); - simd_address_at(tmp13, TMPQ4, 2, 4, 16); - simd_address_at(tmp14, TMPQ5, 2, 4, 16); - * [register]:4 tmp14 = (* [register]:4 tmp12) * (* [register]:4 tmp13); - simd_address_at(tmp12, TMPQ2, 3, 4, 16); - simd_address_at(tmp13, TMPQ4, 3, 4, 16); - simd_address_at(tmp14, TMPQ5, 3, 4, 16); - * [register]:4 tmp14 = (* [register]:4 tmp12) * (* [register]:4 tmp13); + TMPQ5[0,32] = TMPQ2[0,32] * TMPQ4[0,32]; + TMPQ5[32,32] = TMPQ2[32,32] * TMPQ4[32,32]; + TMPQ5[64,32] = TMPQ2[64,32] * TMPQ4[64,32]; + TMPQ5[96,32] = TMPQ2[96,32] * TMPQ4[96,32]; # simd infix Rd_VPR128.4S = Rd_VPR128.4S + TMPQ5 on lane size 4 - local tmp15:4 = 0; - local tmp16:4 = 0; - local tmp17:4 = 0; - simd_address_at(tmp15, Rd_VPR128.4S, 0, 4, 16); - simd_address_at(tmp16, TMPQ5, 0, 4, 16); - simd_address_at(tmp17, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp17 = (* [register]:4 tmp15) + (* [register]:4 tmp16); - simd_address_at(tmp15, Rd_VPR128.4S, 1, 4, 16); - simd_address_at(tmp16, TMPQ5, 1, 4, 16); - simd_address_at(tmp17, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp17 = (* [register]:4 tmp15) + (* [register]:4 tmp16); - simd_address_at(tmp15, Rd_VPR128.4S, 2, 4, 16); - simd_address_at(tmp16, TMPQ5, 2, 4, 16); - simd_address_at(tmp17, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp17 = (* [register]:4 tmp15) + (* [register]:4 tmp16); - simd_address_at(tmp15, Rd_VPR128.4S, 3, 4, 16); - simd_address_at(tmp16, TMPQ5, 3, 4, 16); - simd_address_at(tmp17, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp17 = (* [register]:4 tmp15) + (* [register]:4 tmp16); + Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] + TMPQ5[0,32]; + Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] + TMPQ5[32,32]; + Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] + TMPQ5[64,32]; + Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] + TMPQ5[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Rn_VPR128.8H, 1:1); - local tmp2:16 = SIMD_INT_SEXT(tmp1, 2:1); - local tmp3:8 = SIMD_PIECE(Rm_VPR128.8H, 1:1); - local tmp4:16 = SIMD_INT_SEXT(tmp3, 2:1); - local tmp5:16 = SIMD_INT_MULT(tmp2, tmp4, 4:1); - local tmpd:16 = SIMD_INT_ADD(Rd_VPR128.4S, tmp5, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_smlal2(Rd_VPR128.4S, Rn_VPR128.8H, Rm_VPR128.8H, 2:1); -@endif } # C7.2.275 SMLAL, SMLAL2 (vector) page C7-1999 line 111847 MATCH x0e208000/mask=xbf20fc00 @@ -28378,151 +16033,45 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H :smlal2 Rd_VPR128.8H, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1215=0x8 & b_1011=0 & Rn_VPR128.16B & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rn_VPR128.16B, 1, 8, 16); - TMPD1 = * [register]:8 tmp1; + TMPD1 = Rn_VPR128.16B[64,64]; # simd resize TMPQ2 = sext(TMPD1) (lane size 1 to 2) - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, TMPD1, 0, 1, 8); - simd_address_at(tmp5, TMPQ2, 0, 2, 16); - * [register]:2 tmp5 = sext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 1, 1, 8); - simd_address_at(tmp5, TMPQ2, 1, 2, 16); - * [register]:2 tmp5 = sext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 2, 1, 8); - simd_address_at(tmp5, TMPQ2, 2, 2, 16); - * [register]:2 tmp5 = sext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 3, 1, 8); - simd_address_at(tmp5, TMPQ2, 3, 2, 16); - * [register]:2 tmp5 = sext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 4, 1, 8); - simd_address_at(tmp5, TMPQ2, 4, 2, 16); - * [register]:2 tmp5 = sext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 5, 1, 8); - simd_address_at(tmp5, TMPQ2, 5, 2, 16); - * [register]:2 tmp5 = sext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 6, 1, 8); - simd_address_at(tmp5, TMPQ2, 6, 2, 16); - * [register]:2 tmp5 = sext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 7, 1, 8); - simd_address_at(tmp5, TMPQ2, 7, 2, 16); - * [register]:2 tmp5 = sext(* [register]:1 tmp4); - local tmp6:4 = 0; - simd_address_at(tmp6, Rm_VPR128.16B, 1, 8, 16); - TMPD3 = * [register]:8 tmp6; + TMPQ2[0,16] = sext(TMPD1[0,8]); + TMPQ2[16,16] = sext(TMPD1[8,8]); + TMPQ2[32,16] = sext(TMPD1[16,8]); + TMPQ2[48,16] = sext(TMPD1[24,8]); + TMPQ2[64,16] = sext(TMPD1[32,8]); + TMPQ2[80,16] = sext(TMPD1[40,8]); + TMPQ2[96,16] = sext(TMPD1[48,8]); + TMPQ2[112,16] = sext(TMPD1[56,8]); + TMPD3 = Rm_VPR128.16B[64,64]; # simd resize TMPQ4 = sext(TMPD3) (lane size 1 to 2) - local tmp9:4 = 0; - local tmp10:4 = 0; - simd_address_at(tmp9, TMPD3, 0, 1, 8); - simd_address_at(tmp10, TMPQ4, 0, 2, 16); - * [register]:2 tmp10 = sext(* [register]:1 tmp9); - simd_address_at(tmp9, TMPD3, 1, 1, 8); - simd_address_at(tmp10, TMPQ4, 1, 2, 16); - * [register]:2 tmp10 = sext(* [register]:1 tmp9); - simd_address_at(tmp9, TMPD3, 2, 1, 8); - simd_address_at(tmp10, TMPQ4, 2, 2, 16); - * [register]:2 tmp10 = sext(* [register]:1 tmp9); - simd_address_at(tmp9, TMPD3, 3, 1, 8); - simd_address_at(tmp10, TMPQ4, 3, 2, 16); - * [register]:2 tmp10 = sext(* [register]:1 tmp9); - simd_address_at(tmp9, TMPD3, 4, 1, 8); - simd_address_at(tmp10, TMPQ4, 4, 2, 16); - * [register]:2 tmp10 = sext(* [register]:1 tmp9); - simd_address_at(tmp9, TMPD3, 5, 1, 8); - simd_address_at(tmp10, TMPQ4, 5, 2, 16); - * [register]:2 tmp10 = sext(* [register]:1 tmp9); - simd_address_at(tmp9, TMPD3, 6, 1, 8); - simd_address_at(tmp10, TMPQ4, 6, 2, 16); - * [register]:2 tmp10 = sext(* [register]:1 tmp9); - simd_address_at(tmp9, TMPD3, 7, 1, 8); - simd_address_at(tmp10, TMPQ4, 7, 2, 16); - * [register]:2 tmp10 = sext(* [register]:1 tmp9); + TMPQ4[0,16] = sext(TMPD3[0,8]); + TMPQ4[16,16] = sext(TMPD3[8,8]); + TMPQ4[32,16] = sext(TMPD3[16,8]); + TMPQ4[48,16] = sext(TMPD3[24,8]); + TMPQ4[64,16] = sext(TMPD3[32,8]); + TMPQ4[80,16] = sext(TMPD3[40,8]); + TMPQ4[96,16] = sext(TMPD3[48,8]); + TMPQ4[112,16] = sext(TMPD3[56,8]); # simd infix TMPQ5 = TMPQ2 * TMPQ4 on lane size 2 - local tmp12:4 = 0; - local tmp13:4 = 0; - local tmp14:4 = 0; - simd_address_at(tmp12, TMPQ2, 0, 2, 16); - simd_address_at(tmp13, TMPQ4, 0, 2, 16); - simd_address_at(tmp14, TMPQ5, 0, 2, 16); - * [register]:2 tmp14 = (* [register]:2 tmp12) * (* [register]:2 tmp13); - simd_address_at(tmp12, TMPQ2, 1, 2, 16); - simd_address_at(tmp13, TMPQ4, 1, 2, 16); - simd_address_at(tmp14, TMPQ5, 1, 2, 16); - * [register]:2 tmp14 = (* [register]:2 tmp12) * (* [register]:2 tmp13); - simd_address_at(tmp12, TMPQ2, 2, 2, 16); - simd_address_at(tmp13, TMPQ4, 2, 2, 16); - simd_address_at(tmp14, TMPQ5, 2, 2, 16); - * [register]:2 tmp14 = (* [register]:2 tmp12) * (* [register]:2 tmp13); - simd_address_at(tmp12, TMPQ2, 3, 2, 16); - simd_address_at(tmp13, TMPQ4, 3, 2, 16); - simd_address_at(tmp14, TMPQ5, 3, 2, 16); - * [register]:2 tmp14 = (* [register]:2 tmp12) * (* [register]:2 tmp13); - simd_address_at(tmp12, TMPQ2, 4, 2, 16); - simd_address_at(tmp13, TMPQ4, 4, 2, 16); - simd_address_at(tmp14, TMPQ5, 4, 2, 16); - * [register]:2 tmp14 = (* [register]:2 tmp12) * (* [register]:2 tmp13); - simd_address_at(tmp12, TMPQ2, 5, 2, 16); - simd_address_at(tmp13, TMPQ4, 5, 2, 16); - simd_address_at(tmp14, TMPQ5, 5, 2, 16); - * [register]:2 tmp14 = (* [register]:2 tmp12) * (* [register]:2 tmp13); - simd_address_at(tmp12, TMPQ2, 6, 2, 16); - simd_address_at(tmp13, TMPQ4, 6, 2, 16); - simd_address_at(tmp14, TMPQ5, 6, 2, 16); - * [register]:2 tmp14 = (* [register]:2 tmp12) * (* [register]:2 tmp13); - simd_address_at(tmp12, TMPQ2, 7, 2, 16); - simd_address_at(tmp13, TMPQ4, 7, 2, 16); - simd_address_at(tmp14, TMPQ5, 7, 2, 16); - * [register]:2 tmp14 = (* [register]:2 tmp12) * (* [register]:2 tmp13); + TMPQ5[0,16] = TMPQ2[0,16] * TMPQ4[0,16]; + TMPQ5[16,16] = TMPQ2[16,16] * TMPQ4[16,16]; + TMPQ5[32,16] = TMPQ2[32,16] * TMPQ4[32,16]; + TMPQ5[48,16] = TMPQ2[48,16] * TMPQ4[48,16]; + TMPQ5[64,16] = TMPQ2[64,16] * TMPQ4[64,16]; + TMPQ5[80,16] = TMPQ2[80,16] * TMPQ4[80,16]; + TMPQ5[96,16] = TMPQ2[96,16] * TMPQ4[96,16]; + TMPQ5[112,16] = TMPQ2[112,16] * TMPQ4[112,16]; # simd infix Rd_VPR128.8H = Rd_VPR128.8H + TMPQ5 on lane size 2 - local tmp15:4 = 0; - local tmp16:4 = 0; - local tmp17:4 = 0; - simd_address_at(tmp15, Rd_VPR128.8H, 0, 2, 16); - simd_address_at(tmp16, TMPQ5, 0, 2, 16); - simd_address_at(tmp17, Rd_VPR128.8H, 0, 2, 16); - * [register]:2 tmp17 = (* [register]:2 tmp15) + (* [register]:2 tmp16); - simd_address_at(tmp15, Rd_VPR128.8H, 1, 2, 16); - simd_address_at(tmp16, TMPQ5, 1, 2, 16); - simd_address_at(tmp17, Rd_VPR128.8H, 1, 2, 16); - * [register]:2 tmp17 = (* [register]:2 tmp15) + (* [register]:2 tmp16); - simd_address_at(tmp15, Rd_VPR128.8H, 2, 2, 16); - simd_address_at(tmp16, TMPQ5, 2, 2, 16); - simd_address_at(tmp17, Rd_VPR128.8H, 2, 2, 16); - * [register]:2 tmp17 = (* [register]:2 tmp15) + (* [register]:2 tmp16); - simd_address_at(tmp15, Rd_VPR128.8H, 3, 2, 16); - simd_address_at(tmp16, TMPQ5, 3, 2, 16); - simd_address_at(tmp17, Rd_VPR128.8H, 3, 2, 16); - * [register]:2 tmp17 = (* [register]:2 tmp15) + (* [register]:2 tmp16); - simd_address_at(tmp15, Rd_VPR128.8H, 4, 2, 16); - simd_address_at(tmp16, TMPQ5, 4, 2, 16); - simd_address_at(tmp17, Rd_VPR128.8H, 4, 2, 16); - * [register]:2 tmp17 = (* [register]:2 tmp15) + (* [register]:2 tmp16); - simd_address_at(tmp15, Rd_VPR128.8H, 5, 2, 16); - simd_address_at(tmp16, TMPQ5, 5, 2, 16); - simd_address_at(tmp17, Rd_VPR128.8H, 5, 2, 16); - * [register]:2 tmp17 = (* [register]:2 tmp15) + (* [register]:2 tmp16); - simd_address_at(tmp15, Rd_VPR128.8H, 6, 2, 16); - simd_address_at(tmp16, TMPQ5, 6, 2, 16); - simd_address_at(tmp17, Rd_VPR128.8H, 6, 2, 16); - * [register]:2 tmp17 = (* [register]:2 tmp15) + (* [register]:2 tmp16); - simd_address_at(tmp15, Rd_VPR128.8H, 7, 2, 16); - simd_address_at(tmp16, TMPQ5, 7, 2, 16); - simd_address_at(tmp17, Rd_VPR128.8H, 7, 2, 16); - * [register]:2 tmp17 = (* [register]:2 tmp15) + (* [register]:2 tmp16); + Rd_VPR128.8H[0,16] = Rd_VPR128.8H[0,16] + TMPQ5[0,16]; + Rd_VPR128.8H[16,16] = Rd_VPR128.8H[16,16] + TMPQ5[16,16]; + Rd_VPR128.8H[32,16] = Rd_VPR128.8H[32,16] + TMPQ5[32,16]; + Rd_VPR128.8H[48,16] = Rd_VPR128.8H[48,16] + TMPQ5[48,16]; + Rd_VPR128.8H[64,16] = Rd_VPR128.8H[64,16] + TMPQ5[64,16]; + Rd_VPR128.8H[80,16] = Rd_VPR128.8H[80,16] + TMPQ5[80,16]; + Rd_VPR128.8H[96,16] = Rd_VPR128.8H[96,16] + TMPQ5[96,16]; + Rd_VPR128.8H[112,16] = Rd_VPR128.8H[112,16] + TMPQ5[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Rn_VPR128.16B, 1:1); - local tmp2:16 = SIMD_INT_SEXT(tmp1, 1:1); - local tmp3:8 = SIMD_PIECE(Rm_VPR128.16B, 1:1); - local tmp4:16 = SIMD_INT_SEXT(tmp3, 1:1); - local tmp5:16 = SIMD_INT_MULT(tmp2, tmp4, 2:1); - local tmpd:16 = SIMD_INT_ADD(Rd_VPR128.8H, tmp5, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.8H -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.8H = NEON_smlal2(Rd_VPR128.8H, Rn_VPR128.16B, Rm_VPR128.16B, 1:1); -@endif } # C7.2.276 SMLSL, SMLSL2 (by element) page C7-2001 line 111970 MATCH x0f006000/mask=xbf00f400 @@ -28534,54 +16083,19 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16 :smlsl Rd_VPR128.2D, Rn_VPR64.2S, Re_VPR128.S.vIndex is b_3131=0 & q=0 & u=0 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & b_1215=0x6 & b_1010=0 & Rn_VPR64.2S & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_primitive) # simd resize TMPQ1 = sext(Rn_VPR64.2S) (lane size 4 to 8) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR64.2S, 0, 4, 8); - simd_address_at(tmp3, TMPQ1, 0, 8, 16); - * [register]:8 tmp3 = sext(* [register]:4 tmp2); - simd_address_at(tmp2, Rn_VPR64.2S, 1, 4, 8); - simd_address_at(tmp3, TMPQ1, 1, 8, 16); - * [register]:8 tmp3 = sext(* [register]:4 tmp2); - local tmp4:4 = 0; + TMPQ1[0,64] = sext(Rn_VPR64.2S[0,32]); + TMPQ1[64,64] = sext(Rn_VPR64.2S[32,32]); # simd element Re_VPR128.S[vIndex] lane size 4 - simd_address_at(tmp4, Re_VPR128.S, vIndex:4, 4, 16); - local tmp5:4 = * [register]:4 tmp4; - local tmp6:8 = sext(tmp5); - # simd infix TMPQ2 = TMPQ1 * tmp6 on lane size 8 - local tmp8:4 = 0; - local tmp9:4 = 0; - simd_address_at(tmp8, TMPQ1, 0, 8, 16); - simd_address_at(tmp9, TMPQ2, 0, 8, 16); - * [register]:8 tmp9 = (* [register]:8 tmp8) * tmp6; - simd_address_at(tmp8, TMPQ1, 1, 8, 16); - simd_address_at(tmp9, TMPQ2, 1, 8, 16); - * [register]:8 tmp9 = (* [register]:8 tmp8) * tmp6; - # simd infix Rd_VPR128.2D = Rd_VPR128.2D - TMPQ2 on lane size 8 - local tmp10:4 = 0; - local tmp11:4 = 0; - local tmp12:4 = 0; - simd_address_at(tmp10, Rd_VPR128.2D, 0, 8, 16); - simd_address_at(tmp11, TMPQ2, 0, 8, 16); - simd_address_at(tmp12, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp12 = (* [register]:8 tmp10) - (* [register]:8 tmp11); - simd_address_at(tmp10, Rd_VPR128.2D, 1, 8, 16); - simd_address_at(tmp11, TMPQ2, 1, 8, 16); - simd_address_at(tmp12, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp12 = (* [register]:8 tmp10) - (* [register]:8 tmp11); - zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_SEXT(Rn_VPR64.2S, 4:1); - local tmp2:4 = SIMD_PIECE(Re_VPR128.S, vIndex:1); + local tmp2:4 = Re_VPR128.S.vIndex; local tmp3:8 = sext(tmp2); - local tmp4:16 = SIMD_INT_MULT(tmp1, tmp3); - local tmpd:16 = SIMD_INT_SUB(Rd_VPR128.2D, tmp4, 8:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - local tmp1:4 = SIMD_PIECE(Re_VPR128.S, vIndex:1); - Rd_VPR128.2D = NEON_smlsl(Rd_VPR128.2D, Rn_VPR64.2S, tmp1, 4:1); -@endif + # simd infix TMPQ2 = TMPQ1 * tmp3 on lane size 8 + TMPQ2[0,64] = TMPQ1[0,64] * tmp3; + TMPQ2[64,64] = TMPQ1[64,64] * tmp3; + # simd infix Rd_VPR128.2D = Rd_VPR128.2D - TMPQ2 on lane size 8 + Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] - TMPQ2[0,64]; + Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] - TMPQ2[64,64]; + zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.276 SMLSL, SMLSL2 (by element) page C7-2001 line 111970 MATCH x0f006000/mask=xbf00f400 @@ -28593,74 +16107,25 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re :smlsl Rd_VPR128.4S, Rn_VPR64.4H, Re_VPR128Lo.H.vIndexHLM is b_3131=0 & q=0 & u=0 & b_2428=0xf & advSIMD3.size=1 & Re_VPR128Lo.H.vIndexHLM & Re_VPR128Lo.H & vIndexHLM & b_1215=0x6 & b_1010=0 & Rn_VPR64.4H & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) # simd resize TMPQ1 = sext(Rn_VPR64.4H) (lane size 2 to 4) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR64.4H, 0, 2, 8); - simd_address_at(tmp3, TMPQ1, 0, 4, 16); - * [register]:4 tmp3 = sext(* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR64.4H, 1, 2, 8); - simd_address_at(tmp3, TMPQ1, 1, 4, 16); - * [register]:4 tmp3 = sext(* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR64.4H, 2, 2, 8); - simd_address_at(tmp3, TMPQ1, 2, 4, 16); - * [register]:4 tmp3 = sext(* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR64.4H, 3, 2, 8); - simd_address_at(tmp3, TMPQ1, 3, 4, 16); - * [register]:4 tmp3 = sext(* [register]:2 tmp2); - local tmp4:4 = 0; + TMPQ1[0,32] = sext(Rn_VPR64.4H[0,16]); + TMPQ1[32,32] = sext(Rn_VPR64.4H[16,16]); + TMPQ1[64,32] = sext(Rn_VPR64.4H[32,16]); + TMPQ1[96,32] = sext(Rn_VPR64.4H[48,16]); # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 - simd_address_at(tmp4, Re_VPR128Lo.H, vIndexHLM:4, 2, 16); - local tmp5:2 = * [register]:2 tmp4; - local tmp6:4 = sext(tmp5); - # simd infix TMPQ2 = TMPQ1 * tmp6 on lane size 4 - local tmp8:4 = 0; - local tmp9:4 = 0; - simd_address_at(tmp8, TMPQ1, 0, 4, 16); - simd_address_at(tmp9, TMPQ2, 0, 4, 16); - * [register]:4 tmp9 = (* [register]:4 tmp8) * tmp6; - simd_address_at(tmp8, TMPQ1, 1, 4, 16); - simd_address_at(tmp9, TMPQ2, 1, 4, 16); - * [register]:4 tmp9 = (* [register]:4 tmp8) * tmp6; - simd_address_at(tmp8, TMPQ1, 2, 4, 16); - simd_address_at(tmp9, TMPQ2, 2, 4, 16); - * [register]:4 tmp9 = (* [register]:4 tmp8) * tmp6; - simd_address_at(tmp8, TMPQ1, 3, 4, 16); - simd_address_at(tmp9, TMPQ2, 3, 4, 16); - * [register]:4 tmp9 = (* [register]:4 tmp8) * tmp6; - # simd infix Rd_VPR128.4S = Rd_VPR128.4S - TMPQ2 on lane size 4 - local tmp10:4 = 0; - local tmp11:4 = 0; - local tmp12:4 = 0; - simd_address_at(tmp10, Rd_VPR128.4S, 0, 4, 16); - simd_address_at(tmp11, TMPQ2, 0, 4, 16); - simd_address_at(tmp12, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp12 = (* [register]:4 tmp10) - (* [register]:4 tmp11); - simd_address_at(tmp10, Rd_VPR128.4S, 1, 4, 16); - simd_address_at(tmp11, TMPQ2, 1, 4, 16); - simd_address_at(tmp12, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp12 = (* [register]:4 tmp10) - (* [register]:4 tmp11); - simd_address_at(tmp10, Rd_VPR128.4S, 2, 4, 16); - simd_address_at(tmp11, TMPQ2, 2, 4, 16); - simd_address_at(tmp12, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp12 = (* [register]:4 tmp10) - (* [register]:4 tmp11); - simd_address_at(tmp10, Rd_VPR128.4S, 3, 4, 16); - simd_address_at(tmp11, TMPQ2, 3, 4, 16); - simd_address_at(tmp12, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp12 = (* [register]:4 tmp10) - (* [register]:4 tmp11); - zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_SEXT(Rn_VPR64.4H, 2:1); - local tmp2:2 = SIMD_PIECE(Re_VPR128Lo.H, vIndexHLM:1); + local tmp2:2 = Re_VPR128Lo.H.vIndexHLM; local tmp3:4 = sext(tmp2); - local tmp4:16 = SIMD_INT_MULT(tmp1, tmp3); - local tmpd:16 = SIMD_INT_SUB(Rd_VPR128.4S, tmp4, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - local tmp1:2 = SIMD_PIECE(Re_VPR128Lo.H, vIndexHLM:1); - Rd_VPR128.4S = NEON_smlsl(Rd_VPR128.4S, Rn_VPR64.4H, tmp1, 2:1); -@endif + # simd infix TMPQ2 = TMPQ1 * tmp3 on lane size 4 + TMPQ2[0,32] = TMPQ1[0,32] * tmp3; + TMPQ2[32,32] = TMPQ1[32,32] * tmp3; + TMPQ2[64,32] = TMPQ1[64,32] * tmp3; + TMPQ2[96,32] = TMPQ1[96,32] * tmp3; + # simd infix Rd_VPR128.4S = Rd_VPR128.4S - TMPQ2 on lane size 4 + Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] - TMPQ2[0,32]; + Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] - TMPQ2[32,32]; + Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] - TMPQ2[64,32]; + Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] - TMPQ2[96,32]; + zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.276 SMLSL, SMLSL2 (by element) page C7-2001 line 111970 MATCH x0f006000/mask=xbf00f400 @@ -28672,58 +16137,20 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xf & advSIMD3.size=1 & Re_VPR128Lo.H.vIndexHLM :smlsl2 Rd_VPR128.2D, Rn_VPR128.4S, Re_VPR128.S.vIndex is b_3131=0 & q=1 & u=0 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & b_1215=0x6 & b_1010=0 & Rn_VPR128.4S & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rn_VPR128.4S, 1, 8, 16); - TMPD1 = * [register]:8 tmp1; + TMPD1 = Rn_VPR128.4S[64,64]; # simd resize TMPQ2 = sext(TMPD1) (lane size 4 to 8) - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, TMPD1, 0, 4, 8); - simd_address_at(tmp5, TMPQ2, 0, 8, 16); - * [register]:8 tmp5 = sext(* [register]:4 tmp4); - simd_address_at(tmp4, TMPD1, 1, 4, 8); - simd_address_at(tmp5, TMPQ2, 1, 8, 16); - * [register]:8 tmp5 = sext(* [register]:4 tmp4); - local tmp6:4 = 0; + TMPQ2[0,64] = sext(TMPD1[0,32]); + TMPQ2[64,64] = sext(TMPD1[32,32]); # simd element Re_VPR128.S[vIndex] lane size 4 - simd_address_at(tmp6, Re_VPR128.S, vIndex:4, 4, 16); - local tmp7:4 = * [register]:4 tmp6; - local tmp8:8 = sext(tmp7); - # simd infix TMPQ3 = TMPQ2 * tmp8 on lane size 8 - local tmp10:4 = 0; - local tmp11:4 = 0; - simd_address_at(tmp10, TMPQ2, 0, 8, 16); - simd_address_at(tmp11, TMPQ3, 0, 8, 16); - * [register]:8 tmp11 = (* [register]:8 tmp10) * tmp8; - simd_address_at(tmp10, TMPQ2, 1, 8, 16); - simd_address_at(tmp11, TMPQ3, 1, 8, 16); - * [register]:8 tmp11 = (* [register]:8 tmp10) * tmp8; - # simd infix Rd_VPR128.2D = Rd_VPR128.2D - TMPQ3 on lane size 8 - local tmp12:4 = 0; - local tmp13:4 = 0; - local tmp14:4 = 0; - simd_address_at(tmp12, Rd_VPR128.2D, 0, 8, 16); - simd_address_at(tmp13, TMPQ3, 0, 8, 16); - simd_address_at(tmp14, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp14 = (* [register]:8 tmp12) - (* [register]:8 tmp13); - simd_address_at(tmp12, Rd_VPR128.2D, 1, 8, 16); - simd_address_at(tmp13, TMPQ3, 1, 8, 16); - simd_address_at(tmp14, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp14 = (* [register]:8 tmp12) - (* [register]:8 tmp13); - zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Rn_VPR128.4S, 1:1); - local tmp2:16 = SIMD_INT_SEXT(tmp1, 4:1); - local tmp3:4 = SIMD_PIECE(Re_VPR128.S, vIndex:1); + local tmp3:4 = Re_VPR128.S.vIndex; local tmp4:8 = sext(tmp3); - local tmp5:16 = SIMD_INT_MULT(tmp2, tmp4); - local tmpd:16 = SIMD_INT_SUB(Rd_VPR128.2D, tmp5, 8:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - local tmp1:4 = SIMD_PIECE(Re_VPR128.S, vIndex:1); - Rd_VPR128.2D = NEON_smlsl2(Rd_VPR128.2D, Rn_VPR128.4S, tmp1, 4:1); -@endif + # simd infix TMPQ3 = TMPQ2 * tmp4 on lane size 8 + TMPQ3[0,64] = TMPQ2[0,64] * tmp4; + TMPQ3[64,64] = TMPQ2[64,64] * tmp4; + # simd infix Rd_VPR128.2D = Rd_VPR128.2D - TMPQ3 on lane size 8 + Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] - TMPQ3[0,64]; + Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] - TMPQ3[64,64]; + zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.276 SMLSL, SMLSL2 (by element) page C7-2001 line 111970 MATCH x0f006000/mask=xbf00f400 @@ -28735,78 +16162,26 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re :smlsl2 Rd_VPR128.4S, Rn_VPR128.8H, Re_VPR128Lo.H.vIndexHLM is b_3131=0 & q=1 & u=0 & b_2428=0xf & advSIMD3.size=1 & Re_VPR128Lo.H.vIndexHLM & Re_VPR128Lo.H & vIndexHLM & b_1215=0x6 & b_1010=0 & Rn_VPR128.8H & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rn_VPR128.8H, 1, 8, 16); - TMPD1 = * [register]:8 tmp1; + TMPD1 = Rn_VPR128.8H[64,64]; # simd resize TMPQ2 = sext(TMPD1) (lane size 2 to 4) - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, TMPD1, 0, 2, 8); - simd_address_at(tmp5, TMPQ2, 0, 4, 16); - * [register]:4 tmp5 = sext(* [register]:2 tmp4); - simd_address_at(tmp4, TMPD1, 1, 2, 8); - simd_address_at(tmp5, TMPQ2, 1, 4, 16); - * [register]:4 tmp5 = sext(* [register]:2 tmp4); - simd_address_at(tmp4, TMPD1, 2, 2, 8); - simd_address_at(tmp5, TMPQ2, 2, 4, 16); - * [register]:4 tmp5 = sext(* [register]:2 tmp4); - simd_address_at(tmp4, TMPD1, 3, 2, 8); - simd_address_at(tmp5, TMPQ2, 3, 4, 16); - * [register]:4 tmp5 = sext(* [register]:2 tmp4); - local tmp6:4 = 0; + TMPQ2[0,32] = sext(TMPD1[0,16]); + TMPQ2[32,32] = sext(TMPD1[16,16]); + TMPQ2[64,32] = sext(TMPD1[32,16]); + TMPQ2[96,32] = sext(TMPD1[48,16]); # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 - simd_address_at(tmp6, Re_VPR128Lo.H, vIndexHLM:4, 2, 16); - local tmp7:2 = * [register]:2 tmp6; - local tmp8:4 = sext(tmp7); - # simd infix TMPQ3 = TMPQ2 * tmp8 on lane size 4 - local tmp10:4 = 0; - local tmp11:4 = 0; - simd_address_at(tmp10, TMPQ2, 0, 4, 16); - simd_address_at(tmp11, TMPQ3, 0, 4, 16); - * [register]:4 tmp11 = (* [register]:4 tmp10) * tmp8; - simd_address_at(tmp10, TMPQ2, 1, 4, 16); - simd_address_at(tmp11, TMPQ3, 1, 4, 16); - * [register]:4 tmp11 = (* [register]:4 tmp10) * tmp8; - simd_address_at(tmp10, TMPQ2, 2, 4, 16); - simd_address_at(tmp11, TMPQ3, 2, 4, 16); - * [register]:4 tmp11 = (* [register]:4 tmp10) * tmp8; - simd_address_at(tmp10, TMPQ2, 3, 4, 16); - simd_address_at(tmp11, TMPQ3, 3, 4, 16); - * [register]:4 tmp11 = (* [register]:4 tmp10) * tmp8; - # simd infix Rd_VPR128.4S = Rd_VPR128.4S - TMPQ3 on lane size 4 - local tmp12:4 = 0; - local tmp13:4 = 0; - local tmp14:4 = 0; - simd_address_at(tmp12, Rd_VPR128.4S, 0, 4, 16); - simd_address_at(tmp13, TMPQ3, 0, 4, 16); - simd_address_at(tmp14, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp14 = (* [register]:4 tmp12) - (* [register]:4 tmp13); - simd_address_at(tmp12, Rd_VPR128.4S, 1, 4, 16); - simd_address_at(tmp13, TMPQ3, 1, 4, 16); - simd_address_at(tmp14, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp14 = (* [register]:4 tmp12) - (* [register]:4 tmp13); - simd_address_at(tmp12, Rd_VPR128.4S, 2, 4, 16); - simd_address_at(tmp13, TMPQ3, 2, 4, 16); - simd_address_at(tmp14, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp14 = (* [register]:4 tmp12) - (* [register]:4 tmp13); - simd_address_at(tmp12, Rd_VPR128.4S, 3, 4, 16); - simd_address_at(tmp13, TMPQ3, 3, 4, 16); - simd_address_at(tmp14, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp14 = (* [register]:4 tmp12) - (* [register]:4 tmp13); - zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Rn_VPR128.8H, 1:1); - local tmp2:16 = SIMD_INT_SEXT(tmp1, 2:1); - local tmp3:2 = SIMD_PIECE(Re_VPR128Lo.H, vIndexHLM:1); + local tmp3:2 = Re_VPR128Lo.H.vIndexHLM; local tmp4:4 = sext(tmp3); - local tmp5:16 = SIMD_INT_MULT(tmp2, tmp4); - local tmpd:16 = SIMD_INT_SUB(Rd_VPR128.4S, tmp5, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - local tmp1:2 = SIMD_PIECE(Re_VPR128Lo.H, vIndexHLM:1); - Rd_VPR128.4S = NEON_smlsl2(Rd_VPR128.4S, Rn_VPR128.8H, tmp1, 2:1); -@endif + # simd infix TMPQ3 = TMPQ2 * tmp4 on lane size 4 + TMPQ3[0,32] = TMPQ2[0,32] * tmp4; + TMPQ3[32,32] = TMPQ2[32,32] * tmp4; + TMPQ3[64,32] = TMPQ2[64,32] * tmp4; + TMPQ3[96,32] = TMPQ2[96,32] * tmp4; + # simd infix Rd_VPR128.4S = Rd_VPR128.4S - TMPQ3 on lane size 4 + Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] - TMPQ3[0,32]; + Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] - TMPQ3[32,32]; + Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] - TMPQ3[64,32]; + Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] - TMPQ3[96,32]; + zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.277 SMLSL, SMLSL2 (vector) page C7-2004 line 112131 MATCH x0e20a000/mask=xbf20fc00 @@ -28818,59 +16193,19 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xf & advSIMD3.size=1 & Re_VPR128Lo.H.vIndexHLM :smlsl Rd_VPR128.2D, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1215=0xa & b_1011=0 & Rn_VPR64.2S & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_primitive) # simd resize TMPQ1 = sext(Rn_VPR64.2S) (lane size 4 to 8) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR64.2S, 0, 4, 8); - simd_address_at(tmp3, TMPQ1, 0, 8, 16); - * [register]:8 tmp3 = sext(* [register]:4 tmp2); - simd_address_at(tmp2, Rn_VPR64.2S, 1, 4, 8); - simd_address_at(tmp3, TMPQ1, 1, 8, 16); - * [register]:8 tmp3 = sext(* [register]:4 tmp2); + TMPQ1[0,64] = sext(Rn_VPR64.2S[0,32]); + TMPQ1[64,64] = sext(Rn_VPR64.2S[32,32]); # simd resize TMPQ2 = sext(Rm_VPR64.2S) (lane size 4 to 8) - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, Rm_VPR64.2S, 0, 4, 8); - simd_address_at(tmp6, TMPQ2, 0, 8, 16); - * [register]:8 tmp6 = sext(* [register]:4 tmp5); - simd_address_at(tmp5, Rm_VPR64.2S, 1, 4, 8); - simd_address_at(tmp6, TMPQ2, 1, 8, 16); - * [register]:8 tmp6 = sext(* [register]:4 tmp5); + TMPQ2[0,64] = sext(Rm_VPR64.2S[0,32]); + TMPQ2[64,64] = sext(Rm_VPR64.2S[32,32]); # simd infix TMPQ3 = TMPQ1 * TMPQ2 on lane size 8 - local tmp8:4 = 0; - local tmp9:4 = 0; - local tmp10:4 = 0; - simd_address_at(tmp8, TMPQ1, 0, 8, 16); - simd_address_at(tmp9, TMPQ2, 0, 8, 16); - simd_address_at(tmp10, TMPQ3, 0, 8, 16); - * [register]:8 tmp10 = (* [register]:8 tmp8) * (* [register]:8 tmp9); - simd_address_at(tmp8, TMPQ1, 1, 8, 16); - simd_address_at(tmp9, TMPQ2, 1, 8, 16); - simd_address_at(tmp10, TMPQ3, 1, 8, 16); - * [register]:8 tmp10 = (* [register]:8 tmp8) * (* [register]:8 tmp9); + TMPQ3[0,64] = TMPQ1[0,64] * TMPQ2[0,64]; + TMPQ3[64,64] = TMPQ1[64,64] * TMPQ2[64,64]; # simd infix Rd_VPR128.2D = Rd_VPR128.2D - TMPQ3 on lane size 8 - local tmp11:4 = 0; - local tmp12:4 = 0; - local tmp13:4 = 0; - simd_address_at(tmp11, Rd_VPR128.2D, 0, 8, 16); - simd_address_at(tmp12, TMPQ3, 0, 8, 16); - simd_address_at(tmp13, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp13 = (* [register]:8 tmp11) - (* [register]:8 tmp12); - simd_address_at(tmp11, Rd_VPR128.2D, 1, 8, 16); - simd_address_at(tmp12, TMPQ3, 1, 8, 16); - simd_address_at(tmp13, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp13 = (* [register]:8 tmp11) - (* [register]:8 tmp12); + Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] - TMPQ3[0,64]; + Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] - TMPQ3[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_SEXT(Rn_VPR64.2S, 4:1); - local tmp2:16 = SIMD_INT_SEXT(Rm_VPR64.2S, 4:1); - local tmp3:16 = SIMD_INT_MULT(tmp1, tmp2, 8:1); - local tmpd:16 = SIMD_INT_SUB(Rd_VPR128.2D, tmp3, 8:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.2D = NEON_smlsl(Rd_VPR128.2D, Rn_VPR64.2S, Rm_VPR64.2S, 4:1); -@endif } # C7.2.277 SMLSL, SMLSL2 (vector) page C7-2004 line 112131 MATCH x0e20a000/mask=xbf20fc00 @@ -28882,87 +16217,27 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S :smlsl Rd_VPR128.4S, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1215=0xa & b_1011=0 & Rn_VPR64.4H & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) # simd resize TMPQ1 = sext(Rn_VPR64.4H) (lane size 2 to 4) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR64.4H, 0, 2, 8); - simd_address_at(tmp3, TMPQ1, 0, 4, 16); - * [register]:4 tmp3 = sext(* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR64.4H, 1, 2, 8); - simd_address_at(tmp3, TMPQ1, 1, 4, 16); - * [register]:4 tmp3 = sext(* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR64.4H, 2, 2, 8); - simd_address_at(tmp3, TMPQ1, 2, 4, 16); - * [register]:4 tmp3 = sext(* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR64.4H, 3, 2, 8); - simd_address_at(tmp3, TMPQ1, 3, 4, 16); - * [register]:4 tmp3 = sext(* [register]:2 tmp2); + TMPQ1[0,32] = sext(Rn_VPR64.4H[0,16]); + TMPQ1[32,32] = sext(Rn_VPR64.4H[16,16]); + TMPQ1[64,32] = sext(Rn_VPR64.4H[32,16]); + TMPQ1[96,32] = sext(Rn_VPR64.4H[48,16]); # simd resize TMPQ2 = sext(Rm_VPR64.4H) (lane size 2 to 4) - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, Rm_VPR64.4H, 0, 2, 8); - simd_address_at(tmp6, TMPQ2, 0, 4, 16); - * [register]:4 tmp6 = sext(* [register]:2 tmp5); - simd_address_at(tmp5, Rm_VPR64.4H, 1, 2, 8); - simd_address_at(tmp6, TMPQ2, 1, 4, 16); - * [register]:4 tmp6 = sext(* [register]:2 tmp5); - simd_address_at(tmp5, Rm_VPR64.4H, 2, 2, 8); - simd_address_at(tmp6, TMPQ2, 2, 4, 16); - * [register]:4 tmp6 = sext(* [register]:2 tmp5); - simd_address_at(tmp5, Rm_VPR64.4H, 3, 2, 8); - simd_address_at(tmp6, TMPQ2, 3, 4, 16); - * [register]:4 tmp6 = sext(* [register]:2 tmp5); + TMPQ2[0,32] = sext(Rm_VPR64.4H[0,16]); + TMPQ2[32,32] = sext(Rm_VPR64.4H[16,16]); + TMPQ2[64,32] = sext(Rm_VPR64.4H[32,16]); + TMPQ2[96,32] = sext(Rm_VPR64.4H[48,16]); # simd infix TMPQ3 = TMPQ1 * TMPQ2 on lane size 4 - local tmp8:4 = 0; - local tmp9:4 = 0; - local tmp10:4 = 0; - simd_address_at(tmp8, TMPQ1, 0, 4, 16); - simd_address_at(tmp9, TMPQ2, 0, 4, 16); - simd_address_at(tmp10, TMPQ3, 0, 4, 16); - * [register]:4 tmp10 = (* [register]:4 tmp8) * (* [register]:4 tmp9); - simd_address_at(tmp8, TMPQ1, 1, 4, 16); - simd_address_at(tmp9, TMPQ2, 1, 4, 16); - simd_address_at(tmp10, TMPQ3, 1, 4, 16); - * [register]:4 tmp10 = (* [register]:4 tmp8) * (* [register]:4 tmp9); - simd_address_at(tmp8, TMPQ1, 2, 4, 16); - simd_address_at(tmp9, TMPQ2, 2, 4, 16); - simd_address_at(tmp10, TMPQ3, 2, 4, 16); - * [register]:4 tmp10 = (* [register]:4 tmp8) * (* [register]:4 tmp9); - simd_address_at(tmp8, TMPQ1, 3, 4, 16); - simd_address_at(tmp9, TMPQ2, 3, 4, 16); - simd_address_at(tmp10, TMPQ3, 3, 4, 16); - * [register]:4 tmp10 = (* [register]:4 tmp8) * (* [register]:4 tmp9); + TMPQ3[0,32] = TMPQ1[0,32] * TMPQ2[0,32]; + TMPQ3[32,32] = TMPQ1[32,32] * TMPQ2[32,32]; + TMPQ3[64,32] = TMPQ1[64,32] * TMPQ2[64,32]; + TMPQ3[96,32] = TMPQ1[96,32] * TMPQ2[96,32]; # simd infix Rd_VPR128.4S = Rd_VPR128.4S - TMPQ3 on lane size 4 - local tmp11:4 = 0; - local tmp12:4 = 0; - local tmp13:4 = 0; - simd_address_at(tmp11, Rd_VPR128.4S, 0, 4, 16); - simd_address_at(tmp12, TMPQ3, 0, 4, 16); - simd_address_at(tmp13, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp13 = (* [register]:4 tmp11) - (* [register]:4 tmp12); - simd_address_at(tmp11, Rd_VPR128.4S, 1, 4, 16); - simd_address_at(tmp12, TMPQ3, 1, 4, 16); - simd_address_at(tmp13, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp13 = (* [register]:4 tmp11) - (* [register]:4 tmp12); - simd_address_at(tmp11, Rd_VPR128.4S, 2, 4, 16); - simd_address_at(tmp12, TMPQ3, 2, 4, 16); - simd_address_at(tmp13, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp13 = (* [register]:4 tmp11) - (* [register]:4 tmp12); - simd_address_at(tmp11, Rd_VPR128.4S, 3, 4, 16); - simd_address_at(tmp12, TMPQ3, 3, 4, 16); - simd_address_at(tmp13, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp13 = (* [register]:4 tmp11) - (* [register]:4 tmp12); + Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] - TMPQ3[0,32]; + Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] - TMPQ3[32,32]; + Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] - TMPQ3[64,32]; + Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] - TMPQ3[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_SEXT(Rn_VPR64.4H, 2:1); - local tmp2:16 = SIMD_INT_SEXT(Rm_VPR64.4H, 2:1); - local tmp3:16 = SIMD_INT_MULT(tmp1, tmp2, 4:1); - local tmpd:16 = SIMD_INT_SUB(Rd_VPR128.4S, tmp3, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_smlsl(Rd_VPR128.4S, Rn_VPR64.4H, Rm_VPR64.4H, 2:1); -@endif } # C7.2.277 SMLSL, SMLSL2 (vector) page C7-2004 line 112131 MATCH x0e20a000/mask=xbf20fc00 @@ -28974,143 +16249,43 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H :smlsl Rd_VPR128.8H, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1215=0xa & b_1011=0 & Rn_VPR64.8B & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_primitive) # simd resize TMPQ1 = sext(Rn_VPR64.8B) (lane size 1 to 2) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR64.8B, 0, 1, 8); - simd_address_at(tmp3, TMPQ1, 0, 2, 16); - * [register]:2 tmp3 = sext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 1, 1, 8); - simd_address_at(tmp3, TMPQ1, 1, 2, 16); - * [register]:2 tmp3 = sext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 2, 1, 8); - simd_address_at(tmp3, TMPQ1, 2, 2, 16); - * [register]:2 tmp3 = sext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 3, 1, 8); - simd_address_at(tmp3, TMPQ1, 3, 2, 16); - * [register]:2 tmp3 = sext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 4, 1, 8); - simd_address_at(tmp3, TMPQ1, 4, 2, 16); - * [register]:2 tmp3 = sext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 5, 1, 8); - simd_address_at(tmp3, TMPQ1, 5, 2, 16); - * [register]:2 tmp3 = sext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 6, 1, 8); - simd_address_at(tmp3, TMPQ1, 6, 2, 16); - * [register]:2 tmp3 = sext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 7, 1, 8); - simd_address_at(tmp3, TMPQ1, 7, 2, 16); - * [register]:2 tmp3 = sext(* [register]:1 tmp2); + TMPQ1[0,16] = sext(Rn_VPR64.8B[0,8]); + TMPQ1[16,16] = sext(Rn_VPR64.8B[8,8]); + TMPQ1[32,16] = sext(Rn_VPR64.8B[16,8]); + TMPQ1[48,16] = sext(Rn_VPR64.8B[24,8]); + TMPQ1[64,16] = sext(Rn_VPR64.8B[32,8]); + TMPQ1[80,16] = sext(Rn_VPR64.8B[40,8]); + TMPQ1[96,16] = sext(Rn_VPR64.8B[48,8]); + TMPQ1[112,16] = sext(Rn_VPR64.8B[56,8]); # simd resize TMPQ2 = sext(Rm_VPR64.8B) (lane size 1 to 2) - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, Rm_VPR64.8B, 0, 1, 8); - simd_address_at(tmp6, TMPQ2, 0, 2, 16); - * [register]:2 tmp6 = sext(* [register]:1 tmp5); - simd_address_at(tmp5, Rm_VPR64.8B, 1, 1, 8); - simd_address_at(tmp6, TMPQ2, 1, 2, 16); - * [register]:2 tmp6 = sext(* [register]:1 tmp5); - simd_address_at(tmp5, Rm_VPR64.8B, 2, 1, 8); - simd_address_at(tmp6, TMPQ2, 2, 2, 16); - * [register]:2 tmp6 = sext(* [register]:1 tmp5); - simd_address_at(tmp5, Rm_VPR64.8B, 3, 1, 8); - simd_address_at(tmp6, TMPQ2, 3, 2, 16); - * [register]:2 tmp6 = sext(* [register]:1 tmp5); - simd_address_at(tmp5, Rm_VPR64.8B, 4, 1, 8); - simd_address_at(tmp6, TMPQ2, 4, 2, 16); - * [register]:2 tmp6 = sext(* [register]:1 tmp5); - simd_address_at(tmp5, Rm_VPR64.8B, 5, 1, 8); - simd_address_at(tmp6, TMPQ2, 5, 2, 16); - * [register]:2 tmp6 = sext(* [register]:1 tmp5); - simd_address_at(tmp5, Rm_VPR64.8B, 6, 1, 8); - simd_address_at(tmp6, TMPQ2, 6, 2, 16); - * [register]:2 tmp6 = sext(* [register]:1 tmp5); - simd_address_at(tmp5, Rm_VPR64.8B, 7, 1, 8); - simd_address_at(tmp6, TMPQ2, 7, 2, 16); - * [register]:2 tmp6 = sext(* [register]:1 tmp5); + TMPQ2[0,16] = sext(Rm_VPR64.8B[0,8]); + TMPQ2[16,16] = sext(Rm_VPR64.8B[8,8]); + TMPQ2[32,16] = sext(Rm_VPR64.8B[16,8]); + TMPQ2[48,16] = sext(Rm_VPR64.8B[24,8]); + TMPQ2[64,16] = sext(Rm_VPR64.8B[32,8]); + TMPQ2[80,16] = sext(Rm_VPR64.8B[40,8]); + TMPQ2[96,16] = sext(Rm_VPR64.8B[48,8]); + TMPQ2[112,16] = sext(Rm_VPR64.8B[56,8]); # simd infix TMPQ3 = TMPQ1 * TMPQ2 on lane size 2 - local tmp8:4 = 0; - local tmp9:4 = 0; - local tmp10:4 = 0; - simd_address_at(tmp8, TMPQ1, 0, 2, 16); - simd_address_at(tmp9, TMPQ2, 0, 2, 16); - simd_address_at(tmp10, TMPQ3, 0, 2, 16); - * [register]:2 tmp10 = (* [register]:2 tmp8) * (* [register]:2 tmp9); - simd_address_at(tmp8, TMPQ1, 1, 2, 16); - simd_address_at(tmp9, TMPQ2, 1, 2, 16); - simd_address_at(tmp10, TMPQ3, 1, 2, 16); - * [register]:2 tmp10 = (* [register]:2 tmp8) * (* [register]:2 tmp9); - simd_address_at(tmp8, TMPQ1, 2, 2, 16); - simd_address_at(tmp9, TMPQ2, 2, 2, 16); - simd_address_at(tmp10, TMPQ3, 2, 2, 16); - * [register]:2 tmp10 = (* [register]:2 tmp8) * (* [register]:2 tmp9); - simd_address_at(tmp8, TMPQ1, 3, 2, 16); - simd_address_at(tmp9, TMPQ2, 3, 2, 16); - simd_address_at(tmp10, TMPQ3, 3, 2, 16); - * [register]:2 tmp10 = (* [register]:2 tmp8) * (* [register]:2 tmp9); - simd_address_at(tmp8, TMPQ1, 4, 2, 16); - simd_address_at(tmp9, TMPQ2, 4, 2, 16); - simd_address_at(tmp10, TMPQ3, 4, 2, 16); - * [register]:2 tmp10 = (* [register]:2 tmp8) * (* [register]:2 tmp9); - simd_address_at(tmp8, TMPQ1, 5, 2, 16); - simd_address_at(tmp9, TMPQ2, 5, 2, 16); - simd_address_at(tmp10, TMPQ3, 5, 2, 16); - * [register]:2 tmp10 = (* [register]:2 tmp8) * (* [register]:2 tmp9); - simd_address_at(tmp8, TMPQ1, 6, 2, 16); - simd_address_at(tmp9, TMPQ2, 6, 2, 16); - simd_address_at(tmp10, TMPQ3, 6, 2, 16); - * [register]:2 tmp10 = (* [register]:2 tmp8) * (* [register]:2 tmp9); - simd_address_at(tmp8, TMPQ1, 7, 2, 16); - simd_address_at(tmp9, TMPQ2, 7, 2, 16); - simd_address_at(tmp10, TMPQ3, 7, 2, 16); - * [register]:2 tmp10 = (* [register]:2 tmp8) * (* [register]:2 tmp9); + TMPQ3[0,16] = TMPQ1[0,16] * TMPQ2[0,16]; + TMPQ3[16,16] = TMPQ1[16,16] * TMPQ2[16,16]; + TMPQ3[32,16] = TMPQ1[32,16] * TMPQ2[32,16]; + TMPQ3[48,16] = TMPQ1[48,16] * TMPQ2[48,16]; + TMPQ3[64,16] = TMPQ1[64,16] * TMPQ2[64,16]; + TMPQ3[80,16] = TMPQ1[80,16] * TMPQ2[80,16]; + TMPQ3[96,16] = TMPQ1[96,16] * TMPQ2[96,16]; + TMPQ3[112,16] = TMPQ1[112,16] * TMPQ2[112,16]; # simd infix Rd_VPR128.8H = Rd_VPR128.8H - TMPQ3 on lane size 2 - local tmp11:4 = 0; - local tmp12:4 = 0; - local tmp13:4 = 0; - simd_address_at(tmp11, Rd_VPR128.8H, 0, 2, 16); - simd_address_at(tmp12, TMPQ3, 0, 2, 16); - simd_address_at(tmp13, Rd_VPR128.8H, 0, 2, 16); - * [register]:2 tmp13 = (* [register]:2 tmp11) - (* [register]:2 tmp12); - simd_address_at(tmp11, Rd_VPR128.8H, 1, 2, 16); - simd_address_at(tmp12, TMPQ3, 1, 2, 16); - simd_address_at(tmp13, Rd_VPR128.8H, 1, 2, 16); - * [register]:2 tmp13 = (* [register]:2 tmp11) - (* [register]:2 tmp12); - simd_address_at(tmp11, Rd_VPR128.8H, 2, 2, 16); - simd_address_at(tmp12, TMPQ3, 2, 2, 16); - simd_address_at(tmp13, Rd_VPR128.8H, 2, 2, 16); - * [register]:2 tmp13 = (* [register]:2 tmp11) - (* [register]:2 tmp12); - simd_address_at(tmp11, Rd_VPR128.8H, 3, 2, 16); - simd_address_at(tmp12, TMPQ3, 3, 2, 16); - simd_address_at(tmp13, Rd_VPR128.8H, 3, 2, 16); - * [register]:2 tmp13 = (* [register]:2 tmp11) - (* [register]:2 tmp12); - simd_address_at(tmp11, Rd_VPR128.8H, 4, 2, 16); - simd_address_at(tmp12, TMPQ3, 4, 2, 16); - simd_address_at(tmp13, Rd_VPR128.8H, 4, 2, 16); - * [register]:2 tmp13 = (* [register]:2 tmp11) - (* [register]:2 tmp12); - simd_address_at(tmp11, Rd_VPR128.8H, 5, 2, 16); - simd_address_at(tmp12, TMPQ3, 5, 2, 16); - simd_address_at(tmp13, Rd_VPR128.8H, 5, 2, 16); - * [register]:2 tmp13 = (* [register]:2 tmp11) - (* [register]:2 tmp12); - simd_address_at(tmp11, Rd_VPR128.8H, 6, 2, 16); - simd_address_at(tmp12, TMPQ3, 6, 2, 16); - simd_address_at(tmp13, Rd_VPR128.8H, 6, 2, 16); - * [register]:2 tmp13 = (* [register]:2 tmp11) - (* [register]:2 tmp12); - simd_address_at(tmp11, Rd_VPR128.8H, 7, 2, 16); - simd_address_at(tmp12, TMPQ3, 7, 2, 16); - simd_address_at(tmp13, Rd_VPR128.8H, 7, 2, 16); - * [register]:2 tmp13 = (* [register]:2 tmp11) - (* [register]:2 tmp12); + Rd_VPR128.8H[0,16] = Rd_VPR128.8H[0,16] - TMPQ3[0,16]; + Rd_VPR128.8H[16,16] = Rd_VPR128.8H[16,16] - TMPQ3[16,16]; + Rd_VPR128.8H[32,16] = Rd_VPR128.8H[32,16] - TMPQ3[32,16]; + Rd_VPR128.8H[48,16] = Rd_VPR128.8H[48,16] - TMPQ3[48,16]; + Rd_VPR128.8H[64,16] = Rd_VPR128.8H[64,16] - TMPQ3[64,16]; + Rd_VPR128.8H[80,16] = Rd_VPR128.8H[80,16] - TMPQ3[80,16]; + Rd_VPR128.8H[96,16] = Rd_VPR128.8H[96,16] - TMPQ3[96,16]; + Rd_VPR128.8H[112,16] = Rd_VPR128.8H[112,16] - TMPQ3[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_SEXT(Rn_VPR64.8B, 1:1); - local tmp2:16 = SIMD_INT_SEXT(Rm_VPR64.8B, 1:1); - local tmp3:16 = SIMD_INT_MULT(tmp1, tmp2, 2:1); - local tmpd:16 = SIMD_INT_SUB(Rd_VPR128.8H, tmp3, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.8H -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.8H = NEON_smlsl(Rd_VPR128.8H, Rn_VPR64.8B, Rm_VPR64.8B, 1:1); -@endif } # C7.2.277 SMLSL, SMLSL2 (vector) page C7-2004 line 112131 MATCH x0e20a000/mask=xbf20fc00 @@ -29122,67 +16297,21 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B :smlsl2 Rd_VPR128.2D, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1215=0xa & b_1011=0 & Rn_VPR128.4S & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rn_VPR128.4S, 1, 8, 16); - TMPD1 = * [register]:8 tmp1; + TMPD1 = Rn_VPR128.4S[64,64]; # simd resize TMPQ2 = sext(TMPD1) (lane size 4 to 8) - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, TMPD1, 0, 4, 8); - simd_address_at(tmp5, TMPQ2, 0, 8, 16); - * [register]:8 tmp5 = sext(* [register]:4 tmp4); - simd_address_at(tmp4, TMPD1, 1, 4, 8); - simd_address_at(tmp5, TMPQ2, 1, 8, 16); - * [register]:8 tmp5 = sext(* [register]:4 tmp4); - local tmp6:4 = 0; - simd_address_at(tmp6, Rm_VPR128.4S, 1, 8, 16); - TMPD3 = * [register]:8 tmp6; + TMPQ2[0,64] = sext(TMPD1[0,32]); + TMPQ2[64,64] = sext(TMPD1[32,32]); + TMPD3 = Rm_VPR128.4S[64,64]; # simd resize TMPQ4 = sext(TMPD3) (lane size 4 to 8) - local tmp9:4 = 0; - local tmp10:4 = 0; - simd_address_at(tmp9, TMPD3, 0, 4, 8); - simd_address_at(tmp10, TMPQ4, 0, 8, 16); - * [register]:8 tmp10 = sext(* [register]:4 tmp9); - simd_address_at(tmp9, TMPD3, 1, 4, 8); - simd_address_at(tmp10, TMPQ4, 1, 8, 16); - * [register]:8 tmp10 = sext(* [register]:4 tmp9); + TMPQ4[0,64] = sext(TMPD3[0,32]); + TMPQ4[64,64] = sext(TMPD3[32,32]); # simd infix TMPQ5 = TMPQ2 * TMPQ4 on lane size 8 - local tmp12:4 = 0; - local tmp13:4 = 0; - local tmp14:4 = 0; - simd_address_at(tmp12, TMPQ2, 0, 8, 16); - simd_address_at(tmp13, TMPQ4, 0, 8, 16); - simd_address_at(tmp14, TMPQ5, 0, 8, 16); - * [register]:8 tmp14 = (* [register]:8 tmp12) * (* [register]:8 tmp13); - simd_address_at(tmp12, TMPQ2, 1, 8, 16); - simd_address_at(tmp13, TMPQ4, 1, 8, 16); - simd_address_at(tmp14, TMPQ5, 1, 8, 16); - * [register]:8 tmp14 = (* [register]:8 tmp12) * (* [register]:8 tmp13); + TMPQ5[0,64] = TMPQ2[0,64] * TMPQ4[0,64]; + TMPQ5[64,64] = TMPQ2[64,64] * TMPQ4[64,64]; # simd infix Rd_VPR128.2D = Rd_VPR128.2D - TMPQ5 on lane size 8 - local tmp15:4 = 0; - local tmp16:4 = 0; - local tmp17:4 = 0; - simd_address_at(tmp15, Rd_VPR128.2D, 0, 8, 16); - simd_address_at(tmp16, TMPQ5, 0, 8, 16); - simd_address_at(tmp17, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp17 = (* [register]:8 tmp15) - (* [register]:8 tmp16); - simd_address_at(tmp15, Rd_VPR128.2D, 1, 8, 16); - simd_address_at(tmp16, TMPQ5, 1, 8, 16); - simd_address_at(tmp17, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp17 = (* [register]:8 tmp15) - (* [register]:8 tmp16); + Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] - TMPQ5[0,64]; + Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] - TMPQ5[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Rn_VPR128.4S, 1:1); - local tmp2:16 = SIMD_INT_SEXT(tmp1, 4:1); - local tmp3:8 = SIMD_PIECE(Rm_VPR128.4S, 1:1); - local tmp4:16 = SIMD_INT_SEXT(tmp3, 4:1); - local tmp5:16 = SIMD_INT_MULT(tmp2, tmp4, 8:1); - local tmpd:16 = SIMD_INT_SUB(Rd_VPR128.2D, tmp5, 8:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.2D = NEON_smlsl2(Rd_VPR128.2D, Rn_VPR128.4S, Rm_VPR128.4S, 4:1); -@endif } # C7.2.277 SMLSL, SMLSL2 (vector) page C7-2004 line 112131 MATCH x0e20a000/mask=xbf20fc00 @@ -29194,95 +16323,29 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S :smlsl2 Rd_VPR128.4S, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1215=0xa & b_1011=0 & Rn_VPR128.8H & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rn_VPR128.8H, 1, 8, 16); - TMPD1 = * [register]:8 tmp1; + TMPD1 = Rn_VPR128.8H[64,64]; # simd resize TMPQ2 = sext(TMPD1) (lane size 2 to 4) - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, TMPD1, 0, 2, 8); - simd_address_at(tmp5, TMPQ2, 0, 4, 16); - * [register]:4 tmp5 = sext(* [register]:2 tmp4); - simd_address_at(tmp4, TMPD1, 1, 2, 8); - simd_address_at(tmp5, TMPQ2, 1, 4, 16); - * [register]:4 tmp5 = sext(* [register]:2 tmp4); - simd_address_at(tmp4, TMPD1, 2, 2, 8); - simd_address_at(tmp5, TMPQ2, 2, 4, 16); - * [register]:4 tmp5 = sext(* [register]:2 tmp4); - simd_address_at(tmp4, TMPD1, 3, 2, 8); - simd_address_at(tmp5, TMPQ2, 3, 4, 16); - * [register]:4 tmp5 = sext(* [register]:2 tmp4); - local tmp6:4 = 0; - simd_address_at(tmp6, Rm_VPR128.8H, 1, 8, 16); - TMPD3 = * [register]:8 tmp6; + TMPQ2[0,32] = sext(TMPD1[0,16]); + TMPQ2[32,32] = sext(TMPD1[16,16]); + TMPQ2[64,32] = sext(TMPD1[32,16]); + TMPQ2[96,32] = sext(TMPD1[48,16]); + TMPD3 = Rm_VPR128.8H[64,64]; # simd resize TMPQ4 = sext(TMPD3) (lane size 2 to 4) - local tmp9:4 = 0; - local tmp10:4 = 0; - simd_address_at(tmp9, TMPD3, 0, 2, 8); - simd_address_at(tmp10, TMPQ4, 0, 4, 16); - * [register]:4 tmp10 = sext(* [register]:2 tmp9); - simd_address_at(tmp9, TMPD3, 1, 2, 8); - simd_address_at(tmp10, TMPQ4, 1, 4, 16); - * [register]:4 tmp10 = sext(* [register]:2 tmp9); - simd_address_at(tmp9, TMPD3, 2, 2, 8); - simd_address_at(tmp10, TMPQ4, 2, 4, 16); - * [register]:4 tmp10 = sext(* [register]:2 tmp9); - simd_address_at(tmp9, TMPD3, 3, 2, 8); - simd_address_at(tmp10, TMPQ4, 3, 4, 16); - * [register]:4 tmp10 = sext(* [register]:2 tmp9); + TMPQ4[0,32] = sext(TMPD3[0,16]); + TMPQ4[32,32] = sext(TMPD3[16,16]); + TMPQ4[64,32] = sext(TMPD3[32,16]); + TMPQ4[96,32] = sext(TMPD3[48,16]); # simd infix TMPQ5 = TMPQ2 * TMPQ4 on lane size 4 - local tmp12:4 = 0; - local tmp13:4 = 0; - local tmp14:4 = 0; - simd_address_at(tmp12, TMPQ2, 0, 4, 16); - simd_address_at(tmp13, TMPQ4, 0, 4, 16); - simd_address_at(tmp14, TMPQ5, 0, 4, 16); - * [register]:4 tmp14 = (* [register]:4 tmp12) * (* [register]:4 tmp13); - simd_address_at(tmp12, TMPQ2, 1, 4, 16); - simd_address_at(tmp13, TMPQ4, 1, 4, 16); - simd_address_at(tmp14, TMPQ5, 1, 4, 16); - * [register]:4 tmp14 = (* [register]:4 tmp12) * (* [register]:4 tmp13); - simd_address_at(tmp12, TMPQ2, 2, 4, 16); - simd_address_at(tmp13, TMPQ4, 2, 4, 16); - simd_address_at(tmp14, TMPQ5, 2, 4, 16); - * [register]:4 tmp14 = (* [register]:4 tmp12) * (* [register]:4 tmp13); - simd_address_at(tmp12, TMPQ2, 3, 4, 16); - simd_address_at(tmp13, TMPQ4, 3, 4, 16); - simd_address_at(tmp14, TMPQ5, 3, 4, 16); - * [register]:4 tmp14 = (* [register]:4 tmp12) * (* [register]:4 tmp13); + TMPQ5[0,32] = TMPQ2[0,32] * TMPQ4[0,32]; + TMPQ5[32,32] = TMPQ2[32,32] * TMPQ4[32,32]; + TMPQ5[64,32] = TMPQ2[64,32] * TMPQ4[64,32]; + TMPQ5[96,32] = TMPQ2[96,32] * TMPQ4[96,32]; # simd infix Rd_VPR128.4S = Rd_VPR128.4S - TMPQ5 on lane size 4 - local tmp15:4 = 0; - local tmp16:4 = 0; - local tmp17:4 = 0; - simd_address_at(tmp15, Rd_VPR128.4S, 0, 4, 16); - simd_address_at(tmp16, TMPQ5, 0, 4, 16); - simd_address_at(tmp17, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp17 = (* [register]:4 tmp15) - (* [register]:4 tmp16); - simd_address_at(tmp15, Rd_VPR128.4S, 1, 4, 16); - simd_address_at(tmp16, TMPQ5, 1, 4, 16); - simd_address_at(tmp17, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp17 = (* [register]:4 tmp15) - (* [register]:4 tmp16); - simd_address_at(tmp15, Rd_VPR128.4S, 2, 4, 16); - simd_address_at(tmp16, TMPQ5, 2, 4, 16); - simd_address_at(tmp17, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp17 = (* [register]:4 tmp15) - (* [register]:4 tmp16); - simd_address_at(tmp15, Rd_VPR128.4S, 3, 4, 16); - simd_address_at(tmp16, TMPQ5, 3, 4, 16); - simd_address_at(tmp17, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp17 = (* [register]:4 tmp15) - (* [register]:4 tmp16); + Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] - TMPQ5[0,32]; + Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] - TMPQ5[32,32]; + Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] - TMPQ5[64,32]; + Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] - TMPQ5[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Rn_VPR128.8H, 1:1); - local tmp2:16 = SIMD_INT_SEXT(tmp1, 2:1); - local tmp3:8 = SIMD_PIECE(Rm_VPR128.8H, 1:1); - local tmp4:16 = SIMD_INT_SEXT(tmp3, 2:1); - local tmp5:16 = SIMD_INT_MULT(tmp2, tmp4, 4:1); - local tmpd:16 = SIMD_INT_SUB(Rd_VPR128.4S, tmp5, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_smlsl2(Rd_VPR128.4S, Rn_VPR128.8H, Rm_VPR128.8H, 2:1); -@endif } # C7.2.277 SMLSL, SMLSL2 (vector) page C7-2004 line 112131 MATCH x0e20a000/mask=xbf20fc00 @@ -29294,151 +16357,45 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H :smlsl2 Rd_VPR128.8H, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1215=0xa & b_1011=0 & Rn_VPR128.16B & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rn_VPR128.16B, 1, 8, 16); - TMPD1 = * [register]:8 tmp1; + TMPD1 = Rn_VPR128.16B[64,64]; # simd resize TMPQ2 = sext(TMPD1) (lane size 1 to 2) - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, TMPD1, 0, 1, 8); - simd_address_at(tmp5, TMPQ2, 0, 2, 16); - * [register]:2 tmp5 = sext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 1, 1, 8); - simd_address_at(tmp5, TMPQ2, 1, 2, 16); - * [register]:2 tmp5 = sext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 2, 1, 8); - simd_address_at(tmp5, TMPQ2, 2, 2, 16); - * [register]:2 tmp5 = sext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 3, 1, 8); - simd_address_at(tmp5, TMPQ2, 3, 2, 16); - * [register]:2 tmp5 = sext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 4, 1, 8); - simd_address_at(tmp5, TMPQ2, 4, 2, 16); - * [register]:2 tmp5 = sext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 5, 1, 8); - simd_address_at(tmp5, TMPQ2, 5, 2, 16); - * [register]:2 tmp5 = sext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 6, 1, 8); - simd_address_at(tmp5, TMPQ2, 6, 2, 16); - * [register]:2 tmp5 = sext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 7, 1, 8); - simd_address_at(tmp5, TMPQ2, 7, 2, 16); - * [register]:2 tmp5 = sext(* [register]:1 tmp4); - local tmp6:4 = 0; - simd_address_at(tmp6, Rm_VPR128.16B, 1, 8, 16); - TMPD3 = * [register]:8 tmp6; + TMPQ2[0,16] = sext(TMPD1[0,8]); + TMPQ2[16,16] = sext(TMPD1[8,8]); + TMPQ2[32,16] = sext(TMPD1[16,8]); + TMPQ2[48,16] = sext(TMPD1[24,8]); + TMPQ2[64,16] = sext(TMPD1[32,8]); + TMPQ2[80,16] = sext(TMPD1[40,8]); + TMPQ2[96,16] = sext(TMPD1[48,8]); + TMPQ2[112,16] = sext(TMPD1[56,8]); + TMPD3 = Rm_VPR128.16B[64,64]; # simd resize TMPQ4 = sext(TMPD3) (lane size 1 to 2) - local tmp9:4 = 0; - local tmp10:4 = 0; - simd_address_at(tmp9, TMPD3, 0, 1, 8); - simd_address_at(tmp10, TMPQ4, 0, 2, 16); - * [register]:2 tmp10 = sext(* [register]:1 tmp9); - simd_address_at(tmp9, TMPD3, 1, 1, 8); - simd_address_at(tmp10, TMPQ4, 1, 2, 16); - * [register]:2 tmp10 = sext(* [register]:1 tmp9); - simd_address_at(tmp9, TMPD3, 2, 1, 8); - simd_address_at(tmp10, TMPQ4, 2, 2, 16); - * [register]:2 tmp10 = sext(* [register]:1 tmp9); - simd_address_at(tmp9, TMPD3, 3, 1, 8); - simd_address_at(tmp10, TMPQ4, 3, 2, 16); - * [register]:2 tmp10 = sext(* [register]:1 tmp9); - simd_address_at(tmp9, TMPD3, 4, 1, 8); - simd_address_at(tmp10, TMPQ4, 4, 2, 16); - * [register]:2 tmp10 = sext(* [register]:1 tmp9); - simd_address_at(tmp9, TMPD3, 5, 1, 8); - simd_address_at(tmp10, TMPQ4, 5, 2, 16); - * [register]:2 tmp10 = sext(* [register]:1 tmp9); - simd_address_at(tmp9, TMPD3, 6, 1, 8); - simd_address_at(tmp10, TMPQ4, 6, 2, 16); - * [register]:2 tmp10 = sext(* [register]:1 tmp9); - simd_address_at(tmp9, TMPD3, 7, 1, 8); - simd_address_at(tmp10, TMPQ4, 7, 2, 16); - * [register]:2 tmp10 = sext(* [register]:1 tmp9); + TMPQ4[0,16] = sext(TMPD3[0,8]); + TMPQ4[16,16] = sext(TMPD3[8,8]); + TMPQ4[32,16] = sext(TMPD3[16,8]); + TMPQ4[48,16] = sext(TMPD3[24,8]); + TMPQ4[64,16] = sext(TMPD3[32,8]); + TMPQ4[80,16] = sext(TMPD3[40,8]); + TMPQ4[96,16] = sext(TMPD3[48,8]); + TMPQ4[112,16] = sext(TMPD3[56,8]); # simd infix TMPQ5 = TMPQ2 * TMPQ4 on lane size 2 - local tmp12:4 = 0; - local tmp13:4 = 0; - local tmp14:4 = 0; - simd_address_at(tmp12, TMPQ2, 0, 2, 16); - simd_address_at(tmp13, TMPQ4, 0, 2, 16); - simd_address_at(tmp14, TMPQ5, 0, 2, 16); - * [register]:2 tmp14 = (* [register]:2 tmp12) * (* [register]:2 tmp13); - simd_address_at(tmp12, TMPQ2, 1, 2, 16); - simd_address_at(tmp13, TMPQ4, 1, 2, 16); - simd_address_at(tmp14, TMPQ5, 1, 2, 16); - * [register]:2 tmp14 = (* [register]:2 tmp12) * (* [register]:2 tmp13); - simd_address_at(tmp12, TMPQ2, 2, 2, 16); - simd_address_at(tmp13, TMPQ4, 2, 2, 16); - simd_address_at(tmp14, TMPQ5, 2, 2, 16); - * [register]:2 tmp14 = (* [register]:2 tmp12) * (* [register]:2 tmp13); - simd_address_at(tmp12, TMPQ2, 3, 2, 16); - simd_address_at(tmp13, TMPQ4, 3, 2, 16); - simd_address_at(tmp14, TMPQ5, 3, 2, 16); - * [register]:2 tmp14 = (* [register]:2 tmp12) * (* [register]:2 tmp13); - simd_address_at(tmp12, TMPQ2, 4, 2, 16); - simd_address_at(tmp13, TMPQ4, 4, 2, 16); - simd_address_at(tmp14, TMPQ5, 4, 2, 16); - * [register]:2 tmp14 = (* [register]:2 tmp12) * (* [register]:2 tmp13); - simd_address_at(tmp12, TMPQ2, 5, 2, 16); - simd_address_at(tmp13, TMPQ4, 5, 2, 16); - simd_address_at(tmp14, TMPQ5, 5, 2, 16); - * [register]:2 tmp14 = (* [register]:2 tmp12) * (* [register]:2 tmp13); - simd_address_at(tmp12, TMPQ2, 6, 2, 16); - simd_address_at(tmp13, TMPQ4, 6, 2, 16); - simd_address_at(tmp14, TMPQ5, 6, 2, 16); - * [register]:2 tmp14 = (* [register]:2 tmp12) * (* [register]:2 tmp13); - simd_address_at(tmp12, TMPQ2, 7, 2, 16); - simd_address_at(tmp13, TMPQ4, 7, 2, 16); - simd_address_at(tmp14, TMPQ5, 7, 2, 16); - * [register]:2 tmp14 = (* [register]:2 tmp12) * (* [register]:2 tmp13); + TMPQ5[0,16] = TMPQ2[0,16] * TMPQ4[0,16]; + TMPQ5[16,16] = TMPQ2[16,16] * TMPQ4[16,16]; + TMPQ5[32,16] = TMPQ2[32,16] * TMPQ4[32,16]; + TMPQ5[48,16] = TMPQ2[48,16] * TMPQ4[48,16]; + TMPQ5[64,16] = TMPQ2[64,16] * TMPQ4[64,16]; + TMPQ5[80,16] = TMPQ2[80,16] * TMPQ4[80,16]; + TMPQ5[96,16] = TMPQ2[96,16] * TMPQ4[96,16]; + TMPQ5[112,16] = TMPQ2[112,16] * TMPQ4[112,16]; # simd infix Rd_VPR128.8H = Rd_VPR128.8H - TMPQ5 on lane size 2 - local tmp15:4 = 0; - local tmp16:4 = 0; - local tmp17:4 = 0; - simd_address_at(tmp15, Rd_VPR128.8H, 0, 2, 16); - simd_address_at(tmp16, TMPQ5, 0, 2, 16); - simd_address_at(tmp17, Rd_VPR128.8H, 0, 2, 16); - * [register]:2 tmp17 = (* [register]:2 tmp15) - (* [register]:2 tmp16); - simd_address_at(tmp15, Rd_VPR128.8H, 1, 2, 16); - simd_address_at(tmp16, TMPQ5, 1, 2, 16); - simd_address_at(tmp17, Rd_VPR128.8H, 1, 2, 16); - * [register]:2 tmp17 = (* [register]:2 tmp15) - (* [register]:2 tmp16); - simd_address_at(tmp15, Rd_VPR128.8H, 2, 2, 16); - simd_address_at(tmp16, TMPQ5, 2, 2, 16); - simd_address_at(tmp17, Rd_VPR128.8H, 2, 2, 16); - * [register]:2 tmp17 = (* [register]:2 tmp15) - (* [register]:2 tmp16); - simd_address_at(tmp15, Rd_VPR128.8H, 3, 2, 16); - simd_address_at(tmp16, TMPQ5, 3, 2, 16); - simd_address_at(tmp17, Rd_VPR128.8H, 3, 2, 16); - * [register]:2 tmp17 = (* [register]:2 tmp15) - (* [register]:2 tmp16); - simd_address_at(tmp15, Rd_VPR128.8H, 4, 2, 16); - simd_address_at(tmp16, TMPQ5, 4, 2, 16); - simd_address_at(tmp17, Rd_VPR128.8H, 4, 2, 16); - * [register]:2 tmp17 = (* [register]:2 tmp15) - (* [register]:2 tmp16); - simd_address_at(tmp15, Rd_VPR128.8H, 5, 2, 16); - simd_address_at(tmp16, TMPQ5, 5, 2, 16); - simd_address_at(tmp17, Rd_VPR128.8H, 5, 2, 16); - * [register]:2 tmp17 = (* [register]:2 tmp15) - (* [register]:2 tmp16); - simd_address_at(tmp15, Rd_VPR128.8H, 6, 2, 16); - simd_address_at(tmp16, TMPQ5, 6, 2, 16); - simd_address_at(tmp17, Rd_VPR128.8H, 6, 2, 16); - * [register]:2 tmp17 = (* [register]:2 tmp15) - (* [register]:2 tmp16); - simd_address_at(tmp15, Rd_VPR128.8H, 7, 2, 16); - simd_address_at(tmp16, TMPQ5, 7, 2, 16); - simd_address_at(tmp17, Rd_VPR128.8H, 7, 2, 16); - * [register]:2 tmp17 = (* [register]:2 tmp15) - (* [register]:2 tmp16); + Rd_VPR128.8H[0,16] = Rd_VPR128.8H[0,16] - TMPQ5[0,16]; + Rd_VPR128.8H[16,16] = Rd_VPR128.8H[16,16] - TMPQ5[16,16]; + Rd_VPR128.8H[32,16] = Rd_VPR128.8H[32,16] - TMPQ5[32,16]; + Rd_VPR128.8H[48,16] = Rd_VPR128.8H[48,16] - TMPQ5[48,16]; + Rd_VPR128.8H[64,16] = Rd_VPR128.8H[64,16] - TMPQ5[64,16]; + Rd_VPR128.8H[80,16] = Rd_VPR128.8H[80,16] - TMPQ5[80,16]; + Rd_VPR128.8H[96,16] = Rd_VPR128.8H[96,16] - TMPQ5[96,16]; + Rd_VPR128.8H[112,16] = Rd_VPR128.8H[112,16] - TMPQ5[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Rn_VPR128.16B, 1:1); - local tmp2:16 = SIMD_INT_SEXT(tmp1, 1:1); - local tmp3:8 = SIMD_PIECE(Rm_VPR128.16B, 1:1); - local tmp4:16 = SIMD_INT_SEXT(tmp3, 1:1); - local tmp5:16 = SIMD_INT_MULT(tmp2, tmp4, 2:1); - local tmpd:16 = SIMD_INT_SUB(Rd_VPR128.8H, tmp5, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.8H -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.8H = NEON_smlsl2(Rd_VPR128.8H, Rn_VPR128.16B, Rm_VPR128.16B, 1:1); -@endif } # C7.2.279 SMOV page C7-2007 line 112311 MATCH x0e002c00/mask=xbfe0fc00 @@ -29448,23 +16405,12 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16 # AUNIT --inst x0e012c00/mask=xffe1fc00 --status pass :smov Rd_GPR32, Rn_VPR128.B.imm_neon_uimm4 -is b_3131=0 & Q=0 & b_29=0 & b_2428=0xe & b_2123=0 & Rn_VPR128.B.imm_neon_uimm4 & imm_neon_uimm4 & b_1616=1 & b_1515=0 & imm4=0x5 & b_1010=1 & Rn_VPR128 & Rd_GPR32 & Rd_GPR64 +is b_3131=0 & Q=0 & b_29=0 & b_2428=0xe & b_2123=0 & Rn_VPR128.B.imm_neon_uimm4 & b_1616=1 & b_1515=0 & imm4=0x5 & b_1010=1 & Rn_VPR128 & Rd_GPR32 & Rd_GPR64 { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; # simd element Rn_VPR128[imm_neon_uimm4] lane size 1 - simd_address_at(tmp1, Rn_VPR128, imm_neon_uimm4:4, 1, 16); - local tmp2:1 = * [register]:1 tmp1; - Rd_GPR32 = sext(tmp2); + local tmp1:1 = Rn_VPR128.B.imm_neon_uimm4; + Rd_GPR32 = sext(tmp1); zext_rs(Rd_GPR64); # zero upper 28 bytes of Rd_GPR64 -@elif defined(SEMANTIC_pcode) - local tmp1:1 = SIMD_PIECE(Rn_VPR128, imm_neon_uimm4:1); - local tmpd:4 = sext(tmp1); - Rd_GPR64 = zext(tmpd); # assigning to Rd_GPR32 -@elif defined(SEMANTIC_pseudo) - local tmp1:1 = SIMD_PIECE(Rn_VPR128, imm_neon_uimm4:1); - Rd_GPR32 = NEON_smov(tmp1); -@endif } # C7.2.279 SMOV page C7-2007 line 112311 MATCH x0e002c00/mask=xbfe0fc00 @@ -29474,23 +16420,12 @@ is b_3131=0 & Q=0 & b_29=0 & b_2428=0xe & b_2123=0 & Rn_VPR128.B.imm_neon_uimm4 # AUNIT --inst x0e022c00/mask=xffe3fc00 --status pass :smov Rd_GPR32, Rn_VPR128.H.imm_neon_uimm3 -is b_3131=0 & Q=0 & b_29=0 & b_2428=0xe & b_2123=0 & Rn_VPR128.H.imm_neon_uimm3 & imm_neon_uimm3 & b_1617=2 & b_1515=0 & imm4=0x5 & b_1010=1 & Rn_VPR128 & Rd_GPR32 & Rd_GPR64 +is b_3131=0 & Q=0 & b_29=0 & b_2428=0xe & b_2123=0 & Rn_VPR128.H.imm_neon_uimm3 & b_1617=2 & b_1515=0 & imm4=0x5 & b_1010=1 & Rn_VPR128 & Rd_GPR32 & Rd_GPR64 { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; # simd element Rn_VPR128[imm_neon_uimm3] lane size 2 - simd_address_at(tmp1, Rn_VPR128, imm_neon_uimm3:4, 2, 16); - local tmp2:2 = * [register]:2 tmp1; - Rd_GPR32 = sext(tmp2); + local tmp1:2 = Rn_VPR128.H.imm_neon_uimm3; + Rd_GPR32 = sext(tmp1); zext_rs(Rd_GPR64); # zero upper 28 bytes of Rd_GPR64 -@elif defined(SEMANTIC_pcode) - local tmp1:2 = SIMD_PIECE(Rn_VPR128, imm_neon_uimm3:1); - local tmpd:4 = sext(tmp1); - Rd_GPR64 = zext(tmpd); # assigning to Rd_GPR32 -@elif defined(SEMANTIC_pseudo) - local tmp1:2 = SIMD_PIECE(Rn_VPR128, imm_neon_uimm3:1); - Rd_GPR32 = NEON_smov(tmp1); -@endif } # C7.2.279 SMOV page C7-2007 line 112311 MATCH x0e002c00/mask=xbfe0fc00 @@ -29500,21 +16435,11 @@ is b_3131=0 & Q=0 & b_29=0 & b_2428=0xe & b_2123=0 & Rn_VPR128.H.imm_neon_uimm3 # AUNIT --inst x4e012c00/mask=xffe1fc00 --status pass :smov Rd_GPR64, Rn_VPR128.B.imm_neon_uimm4 -is b_3131=0 & Q=1 & b_29=0 & b_2428=0xe & b_2123=0 & Rn_VPR128.B.imm_neon_uimm4 & imm_neon_uimm4 & b_1616=1 & b_1515=0 & imm4=0x5 & b_1010=1 & Rn_VPR128 & Rd_GPR64 +is b_3131=0 & Q=1 & b_29=0 & b_2428=0xe & b_2123=0 & Rn_VPR128.B.imm_neon_uimm4 & b_1616=1 & b_1515=0 & imm4=0x5 & b_1010=1 & Rn_VPR128 & Rd_GPR64 { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; # simd element Rn_VPR128[imm_neon_uimm4] lane size 1 - simd_address_at(tmp1, Rn_VPR128, imm_neon_uimm4:4, 1, 16); - local tmp2:1 = * [register]:1 tmp1; - Rd_GPR64 = sext(tmp2); -@elif defined(SEMANTIC_pcode) - local tmp1:1 = SIMD_PIECE(Rn_VPR128, imm_neon_uimm4:1); + local tmp1:1 = Rn_VPR128.B.imm_neon_uimm4; Rd_GPR64 = sext(tmp1); -@elif defined(SEMANTIC_pseudo) - local tmp1:1 = SIMD_PIECE(Rn_VPR128, imm_neon_uimm4:1); - Rd_GPR64 = NEON_smov(tmp1); -@endif } # C7.2.279 SMOV page C7-2007 line 112311 MATCH x0e002c00/mask=xbfe0fc00 @@ -29524,21 +16449,11 @@ is b_3131=0 & Q=1 & b_29=0 & b_2428=0xe & b_2123=0 & Rn_VPR128.B.imm_neon_uimm4 # AUNIT --inst x4e022c00/mask=xffe3fc00 --status pass :smov Rd_GPR64, Rn_VPR128.H.imm_neon_uimm3 -is b_3131=0 & Q=1 & b_29=0 & b_2428=0xe & b_2123=0 & Rn_VPR128.H.imm_neon_uimm3 & imm_neon_uimm3 & b_1617=2 & b_1515=0 & imm4=0x5 & b_1010=1 & Rn_VPR128 & Rd_GPR64 +is b_3131=0 & Q=1 & b_29=0 & b_2428=0xe & b_2123=0 & Rn_VPR128.H.imm_neon_uimm3 & b_1617=2 & b_1515=0 & imm4=0x5 & b_1010=1 & Rn_VPR128 & Rd_GPR64 { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; # simd element Rn_VPR128[imm_neon_uimm3] lane size 2 - simd_address_at(tmp1, Rn_VPR128, imm_neon_uimm3:4, 2, 16); - local tmp2:2 = * [register]:2 tmp1; - Rd_GPR64 = sext(tmp2); -@elif defined(SEMANTIC_pcode) - local tmp1:2 = SIMD_PIECE(Rn_VPR128, imm_neon_uimm3:1); + local tmp1:2 = Rn_VPR128.H.imm_neon_uimm3; Rd_GPR64 = sext(tmp1); -@elif defined(SEMANTIC_pseudo) - local tmp1:2 = SIMD_PIECE(Rn_VPR128, imm_neon_uimm3:1); - Rd_GPR64 = NEON_smov(tmp1); -@endif } # C7.2.279 SMOV page C7-2007 line 112311 MATCH x0e002c00/mask=xbfe0fc00 @@ -29548,21 +16463,11 @@ is b_3131=0 & Q=1 & b_29=0 & b_2428=0xe & b_2123=0 & Rn_VPR128.H.imm_neon_uimm3 # AUNIT --inst x4e042c00/mask=xffe7fc00 --status pass :smov Rd_GPR64, Rn_VPR128.S.imm_neon_uimm2 -is b_3131=0 & Q=1 & b_29=0 & b_2428=0xe & b_2123=0 & Rn_VPR128.S.imm_neon_uimm2 & imm_neon_uimm2 & b_1618=4 & b_1515=0 & imm4=0x5 & b_1010=1 & Rn_VPR128 & Rd_GPR64 +is b_3131=0 & Q=1 & b_29=0 & b_2428=0xe & b_2123=0 & Rn_VPR128.S.imm_neon_uimm2 & b_1618=4 & b_1515=0 & imm4=0x5 & b_1010=1 & Rn_VPR128 & Rd_GPR64 { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; # simd element Rn_VPR128[imm_neon_uimm2] lane size 4 - simd_address_at(tmp1, Rn_VPR128, imm_neon_uimm2:4, 4, 16); - local tmp2:4 = * [register]:4 tmp1; - Rd_GPR64 = sext(tmp2); -@elif defined(SEMANTIC_pcode) - local tmp1:4 = SIMD_PIECE(Rn_VPR128, imm_neon_uimm2:1); + local tmp1:4 = Rn_VPR128.S.imm_neon_uimm2; Rd_GPR64 = sext(tmp1); -@elif defined(SEMANTIC_pseudo) - local tmp1:4 = SIMD_PIECE(Rn_VPR128, imm_neon_uimm2:1); - Rd_GPR64 = NEON_smov(tmp1); -@endif } # C7.2.280 SMULL, SMULL2 (by element) page C7-2009 line 112428 MATCH x0f00a000/mask=xbf00f400 @@ -29574,41 +16479,16 @@ is b_3131=0 & Q=1 & b_29=0 & b_2428=0xe & b_2123=0 & Rn_VPR128.S.imm_neon_uimm2 :smull Rd_VPR128.2D, Rn_VPR64.2S, Re_VPR128.S.vIndex is b_3131=0 & q=0 & u=0 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & b_1215=0xa & b_1010=0 & Rn_VPR64.2S & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_primitive) # simd resize TMPQ1 = sext(Rn_VPR64.2S) (lane size 4 to 8) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR64.2S, 0, 4, 8); - simd_address_at(tmp3, TMPQ1, 0, 8, 16); - * [register]:8 tmp3 = sext(* [register]:4 tmp2); - simd_address_at(tmp2, Rn_VPR64.2S, 1, 4, 8); - simd_address_at(tmp3, TMPQ1, 1, 8, 16); - * [register]:8 tmp3 = sext(* [register]:4 tmp2); - local tmp4:4 = 0; + TMPQ1[0,64] = sext(Rn_VPR64.2S[0,32]); + TMPQ1[64,64] = sext(Rn_VPR64.2S[32,32]); # simd element Re_VPR128.S[vIndex] lane size 4 - simd_address_at(tmp4, Re_VPR128.S, vIndex:4, 4, 16); - local tmp5:4 = * [register]:4 tmp4; - local tmp6:8 = sext(tmp5); - # simd infix Rd_VPR128.2D = TMPQ1 * tmp6 on lane size 8 - local tmp7:4 = 0; - local tmp8:4 = 0; - simd_address_at(tmp7, TMPQ1, 0, 8, 16); - simd_address_at(tmp8, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp8 = (* [register]:8 tmp7) * tmp6; - simd_address_at(tmp7, TMPQ1, 1, 8, 16); - simd_address_at(tmp8, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp8 = (* [register]:8 tmp7) * tmp6; - zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_SEXT(Rn_VPR64.2S, 4:1); - local tmp2:4 = SIMD_PIECE(Re_VPR128.S, vIndex:1); + local tmp2:4 = Re_VPR128.S.vIndex; local tmp3:8 = sext(tmp2); - local tmpd:16 = SIMD_INT_MULT(tmp1, tmp3); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - local tmp1:4 = SIMD_PIECE(Re_VPR128.S, vIndex:1); - Rd_VPR128.2D = NEON_smull(Rn_VPR64.2S, tmp1, 4:1); -@endif + # simd infix Rd_VPR128.2D = TMPQ1 * tmp3 on lane size 8 + Rd_VPR128.2D[0,64] = TMPQ1[0,64] * tmp3; + Rd_VPR128.2D[64,64] = TMPQ1[64,64] * tmp3; + zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.280 SMULL, SMULL2 (by element) page C7-2009 line 112428 MATCH x0f00a000/mask=xbf00f400 @@ -29620,53 +16500,20 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re :smull Rd_VPR128.4S, Rn_VPR64.4H, Re_VPR128Lo.H.vIndexHLM is b_3131=0 & q=0 & u=0 & b_2428=0xf & advSIMD3.size=1 & Re_VPR128Lo.H.vIndexHLM & Re_VPR128Lo.H & vIndexHLM & b_1215=0xa & b_1010=0 & Rn_VPR64.4H & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) # simd resize TMPQ1 = sext(Rn_VPR64.4H) (lane size 2 to 4) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR64.4H, 0, 2, 8); - simd_address_at(tmp3, TMPQ1, 0, 4, 16); - * [register]:4 tmp3 = sext(* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR64.4H, 1, 2, 8); - simd_address_at(tmp3, TMPQ1, 1, 4, 16); - * [register]:4 tmp3 = sext(* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR64.4H, 2, 2, 8); - simd_address_at(tmp3, TMPQ1, 2, 4, 16); - * [register]:4 tmp3 = sext(* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR64.4H, 3, 2, 8); - simd_address_at(tmp3, TMPQ1, 3, 4, 16); - * [register]:4 tmp3 = sext(* [register]:2 tmp2); - local tmp4:4 = 0; + TMPQ1[0,32] = sext(Rn_VPR64.4H[0,16]); + TMPQ1[32,32] = sext(Rn_VPR64.4H[16,16]); + TMPQ1[64,32] = sext(Rn_VPR64.4H[32,16]); + TMPQ1[96,32] = sext(Rn_VPR64.4H[48,16]); # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 - simd_address_at(tmp4, Re_VPR128Lo.H, vIndexHLM:4, 2, 16); - local tmp5:2 = * [register]:2 tmp4; - local tmp6:4 = sext(tmp5); - # simd infix Rd_VPR128.4S = TMPQ1 * tmp6 on lane size 4 - local tmp7:4 = 0; - local tmp8:4 = 0; - simd_address_at(tmp7, TMPQ1, 0, 4, 16); - simd_address_at(tmp8, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp8 = (* [register]:4 tmp7) * tmp6; - simd_address_at(tmp7, TMPQ1, 1, 4, 16); - simd_address_at(tmp8, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp8 = (* [register]:4 tmp7) * tmp6; - simd_address_at(tmp7, TMPQ1, 2, 4, 16); - simd_address_at(tmp8, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp8 = (* [register]:4 tmp7) * tmp6; - simd_address_at(tmp7, TMPQ1, 3, 4, 16); - simd_address_at(tmp8, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp8 = (* [register]:4 tmp7) * tmp6; - zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_SEXT(Rn_VPR64.4H, 2:1); - local tmp2:2 = SIMD_PIECE(Re_VPR128Lo.H, vIndexHLM:1); + local tmp2:2 = Re_VPR128Lo.H.vIndexHLM; local tmp3:4 = sext(tmp2); - local tmpd:16 = SIMD_INT_MULT(tmp1, tmp3); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - local tmp1:2 = SIMD_PIECE(Re_VPR128Lo.H, vIndexHLM:1); - Rd_VPR128.4S = NEON_smull(Rn_VPR64.4H, tmp1, 2:1); -@endif + # simd infix Rd_VPR128.4S = TMPQ1 * tmp3 on lane size 4 + Rd_VPR128.4S[0,32] = TMPQ1[0,32] * tmp3; + Rd_VPR128.4S[32,32] = TMPQ1[32,32] * tmp3; + Rd_VPR128.4S[64,32] = TMPQ1[64,32] * tmp3; + Rd_VPR128.4S[96,32] = TMPQ1[96,32] * tmp3; + zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.280 SMULL, SMULL2 (by element) page C7-2009 line 112428 MATCH x0f00a000/mask=xbf00f400 @@ -29678,45 +16525,17 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xf & advSIMD3.size=1 & Re_VPR128Lo.H.vIndexHLM :smull2 Rd_VPR128.2D, Rn_VPR128.4S, Re_VPR128.S.vIndex is b_3131=0 & q=1 & u=0 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & b_1215=0xa & b_1010=0 & Rn_VPR128.4S & Rd_VPR128.2D & Rn_VPR128 & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rn_VPR128.4S, 1, 8, 16); - TMPD1 = * [register]:8 tmp1; + TMPD1 = Rn_VPR128.4S[64,64]; # simd resize TMPQ2 = sext(TMPD1) (lane size 4 to 8) - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, TMPD1, 0, 4, 8); - simd_address_at(tmp5, TMPQ2, 0, 8, 16); - * [register]:8 tmp5 = sext(* [register]:4 tmp4); - simd_address_at(tmp4, TMPD1, 1, 4, 8); - simd_address_at(tmp5, TMPQ2, 1, 8, 16); - * [register]:8 tmp5 = sext(* [register]:4 tmp4); - local tmp6:4 = 0; + TMPQ2[0,64] = sext(TMPD1[0,32]); + TMPQ2[64,64] = sext(TMPD1[32,32]); # simd element Re_VPR128.S[vIndex] lane size 4 - simd_address_at(tmp6, Re_VPR128.S, vIndex:4, 4, 16); - local tmp7:4 = * [register]:4 tmp6; - local tmp8:8 = sext(tmp7); - # simd infix Rd_VPR128.2D = TMPQ2 * tmp8 on lane size 8 - local tmp9:4 = 0; - local tmp10:4 = 0; - simd_address_at(tmp9, TMPQ2, 0, 8, 16); - simd_address_at(tmp10, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp10 = (* [register]:8 tmp9) * tmp8; - simd_address_at(tmp9, TMPQ2, 1, 8, 16); - simd_address_at(tmp10, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp10 = (* [register]:8 tmp9) * tmp8; - zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Rn_VPR128.4S, 1:1); - local tmp2:16 = SIMD_INT_SEXT(tmp1, 4:1); - local tmp3:4 = SIMD_PIECE(Re_VPR128.S, vIndex:1); + local tmp3:4 = Re_VPR128.S.vIndex; local tmp4:8 = sext(tmp3); - local tmpd:16 = SIMD_INT_MULT(tmp2, tmp4); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - local tmp1:4 = SIMD_PIECE(Re_VPR128.S, vIndex:1); - Rd_VPR128.2D = NEON_smull2(Rn_VPR128.4S, tmp1, 4:1); -@endif + # simd infix Rd_VPR128.2D = TMPQ2 * tmp4 on lane size 8 + Rd_VPR128.2D[0,64] = TMPQ2[0,64] * tmp4; + Rd_VPR128.2D[64,64] = TMPQ2[64,64] * tmp4; + zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.280 SMULL, SMULL2 (by element) page C7-2009 line 112428 MATCH x0f00a000/mask=xbf00f400 @@ -29728,57 +16547,21 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re :smull2 Rd_VPR128.4S, Rn_VPR128.8H, Re_VPR128Lo.H.vIndexHLM is b_3131=0 & q=1 & u=0 & b_2428=0xf & advSIMD3.size=1 & Re_VPR128Lo.H.vIndexHLM & Re_VPR128Lo.H & vIndexHLM & b_1215=0xa & b_1010=0 & Rn_VPR128.8H & Rd_VPR128.4S & Rn_VPR128 & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rn_VPR128.8H, 1, 8, 16); - TMPD1 = * [register]:8 tmp1; + TMPD1 = Rn_VPR128.8H[64,64]; # simd resize TMPQ2 = sext(TMPD1) (lane size 2 to 4) - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, TMPD1, 0, 2, 8); - simd_address_at(tmp5, TMPQ2, 0, 4, 16); - * [register]:4 tmp5 = sext(* [register]:2 tmp4); - simd_address_at(tmp4, TMPD1, 1, 2, 8); - simd_address_at(tmp5, TMPQ2, 1, 4, 16); - * [register]:4 tmp5 = sext(* [register]:2 tmp4); - simd_address_at(tmp4, TMPD1, 2, 2, 8); - simd_address_at(tmp5, TMPQ2, 2, 4, 16); - * [register]:4 tmp5 = sext(* [register]:2 tmp4); - simd_address_at(tmp4, TMPD1, 3, 2, 8); - simd_address_at(tmp5, TMPQ2, 3, 4, 16); - * [register]:4 tmp5 = sext(* [register]:2 tmp4); - local tmp6:4 = 0; + TMPQ2[0,32] = sext(TMPD1[0,16]); + TMPQ2[32,32] = sext(TMPD1[16,16]); + TMPQ2[64,32] = sext(TMPD1[32,16]); + TMPQ2[96,32] = sext(TMPD1[48,16]); # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 - simd_address_at(tmp6, Re_VPR128Lo.H, vIndexHLM:4, 2, 16); - local tmp7:2 = * [register]:2 tmp6; - local tmp8:4 = sext(tmp7); - # simd infix Rd_VPR128.4S = TMPQ2 * tmp8 on lane size 4 - local tmp9:4 = 0; - local tmp10:4 = 0; - simd_address_at(tmp9, TMPQ2, 0, 4, 16); - simd_address_at(tmp10, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp10 = (* [register]:4 tmp9) * tmp8; - simd_address_at(tmp9, TMPQ2, 1, 4, 16); - simd_address_at(tmp10, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp10 = (* [register]:4 tmp9) * tmp8; - simd_address_at(tmp9, TMPQ2, 2, 4, 16); - simd_address_at(tmp10, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp10 = (* [register]:4 tmp9) * tmp8; - simd_address_at(tmp9, TMPQ2, 3, 4, 16); - simd_address_at(tmp10, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp10 = (* [register]:4 tmp9) * tmp8; - zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Rn_VPR128.8H, 1:1); - local tmp2:16 = SIMD_INT_SEXT(tmp1, 2:1); - local tmp3:2 = SIMD_PIECE(Re_VPR128Lo.H, vIndexHLM:1); + local tmp3:2 = Re_VPR128Lo.H.vIndexHLM; local tmp4:4 = sext(tmp3); - local tmpd:16 = SIMD_INT_MULT(tmp2, tmp4); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - local tmp1:2 = SIMD_PIECE(Re_VPR128Lo.H, vIndexHLM:1); - Rd_VPR128.4S = NEON_smull2(Rn_VPR128.8H, tmp1, 2:1); -@endif + # simd infix Rd_VPR128.4S = TMPQ2 * tmp4 on lane size 4 + Rd_VPR128.4S[0,32] = TMPQ2[0,32] * tmp4; + Rd_VPR128.4S[32,32] = TMPQ2[32,32] * tmp4; + Rd_VPR128.4S[64,32] = TMPQ2[64,32] * tmp4; + Rd_VPR128.4S[96,32] = TMPQ2[96,32] * tmp4; + zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.281 SMULL, SMULL2 (vector) page C7-2012 line 112581 MATCH x0e20c000/mask=xbf20fc00 @@ -29790,46 +16573,16 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xf & advSIMD3.size=1 & Re_VPR128Lo.H.vIndexHLM :smull Rd_VPR128.2D, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1215=0xc & b_1011=0 & Rn_VPR64.2S & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_primitive) # simd resize TMPQ1 = sext(Rn_VPR64.2S) (lane size 4 to 8) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR64.2S, 0, 4, 8); - simd_address_at(tmp3, TMPQ1, 0, 8, 16); - * [register]:8 tmp3 = sext(* [register]:4 tmp2); - simd_address_at(tmp2, Rn_VPR64.2S, 1, 4, 8); - simd_address_at(tmp3, TMPQ1, 1, 8, 16); - * [register]:8 tmp3 = sext(* [register]:4 tmp2); + TMPQ1[0,64] = sext(Rn_VPR64.2S[0,32]); + TMPQ1[64,64] = sext(Rn_VPR64.2S[32,32]); # simd resize TMPQ2 = sext(Rm_VPR64.2S) (lane size 4 to 8) - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, Rm_VPR64.2S, 0, 4, 8); - simd_address_at(tmp6, TMPQ2, 0, 8, 16); - * [register]:8 tmp6 = sext(* [register]:4 tmp5); - simd_address_at(tmp5, Rm_VPR64.2S, 1, 4, 8); - simd_address_at(tmp6, TMPQ2, 1, 8, 16); - * [register]:8 tmp6 = sext(* [register]:4 tmp5); + TMPQ2[0,64] = sext(Rm_VPR64.2S[0,32]); + TMPQ2[64,64] = sext(Rm_VPR64.2S[32,32]); # simd infix Rd_VPR128.2D = TMPQ1 * TMPQ2 on lane size 8 - local tmp7:4 = 0; - local tmp8:4 = 0; - local tmp9:4 = 0; - simd_address_at(tmp7, TMPQ1, 0, 8, 16); - simd_address_at(tmp8, TMPQ2, 0, 8, 16); - simd_address_at(tmp9, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp9 = (* [register]:8 tmp7) * (* [register]:8 tmp8); - simd_address_at(tmp7, TMPQ1, 1, 8, 16); - simd_address_at(tmp8, TMPQ2, 1, 8, 16); - simd_address_at(tmp9, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp9 = (* [register]:8 tmp7) * (* [register]:8 tmp8); + Rd_VPR128.2D[0,64] = TMPQ1[0,64] * TMPQ2[0,64]; + Rd_VPR128.2D[64,64] = TMPQ1[64,64] * TMPQ2[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_SEXT(Rn_VPR64.2S, 4:1); - local tmp2:16 = SIMD_INT_SEXT(Rm_VPR64.2S, 4:1); - local tmpd:16 = SIMD_INT_MULT(tmp1, tmp2, 8:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.2D = NEON_smull(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); -@endif } # C7.2.281 SMULL, SMULL2 (vector) page C7-2012 line 112581 MATCH x0e20c000/mask=xbf20fc00 @@ -29841,66 +16594,22 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S :smull Rd_VPR128.4S, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1215=0xc & b_1011=0 & Rn_VPR64.4H & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) # simd resize TMPQ1 = sext(Rn_VPR64.4H) (lane size 2 to 4) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR64.4H, 0, 2, 8); - simd_address_at(tmp3, TMPQ1, 0, 4, 16); - * [register]:4 tmp3 = sext(* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR64.4H, 1, 2, 8); - simd_address_at(tmp3, TMPQ1, 1, 4, 16); - * [register]:4 tmp3 = sext(* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR64.4H, 2, 2, 8); - simd_address_at(tmp3, TMPQ1, 2, 4, 16); - * [register]:4 tmp3 = sext(* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR64.4H, 3, 2, 8); - simd_address_at(tmp3, TMPQ1, 3, 4, 16); - * [register]:4 tmp3 = sext(* [register]:2 tmp2); + TMPQ1[0,32] = sext(Rn_VPR64.4H[0,16]); + TMPQ1[32,32] = sext(Rn_VPR64.4H[16,16]); + TMPQ1[64,32] = sext(Rn_VPR64.4H[32,16]); + TMPQ1[96,32] = sext(Rn_VPR64.4H[48,16]); # simd resize TMPQ2 = sext(Rm_VPR64.4H) (lane size 2 to 4) - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, Rm_VPR64.4H, 0, 2, 8); - simd_address_at(tmp6, TMPQ2, 0, 4, 16); - * [register]:4 tmp6 = sext(* [register]:2 tmp5); - simd_address_at(tmp5, Rm_VPR64.4H, 1, 2, 8); - simd_address_at(tmp6, TMPQ2, 1, 4, 16); - * [register]:4 tmp6 = sext(* [register]:2 tmp5); - simd_address_at(tmp5, Rm_VPR64.4H, 2, 2, 8); - simd_address_at(tmp6, TMPQ2, 2, 4, 16); - * [register]:4 tmp6 = sext(* [register]:2 tmp5); - simd_address_at(tmp5, Rm_VPR64.4H, 3, 2, 8); - simd_address_at(tmp6, TMPQ2, 3, 4, 16); - * [register]:4 tmp6 = sext(* [register]:2 tmp5); + TMPQ2[0,32] = sext(Rm_VPR64.4H[0,16]); + TMPQ2[32,32] = sext(Rm_VPR64.4H[16,16]); + TMPQ2[64,32] = sext(Rm_VPR64.4H[32,16]); + TMPQ2[96,32] = sext(Rm_VPR64.4H[48,16]); # simd infix Rd_VPR128.4S = TMPQ1 * TMPQ2 on lane size 4 - local tmp7:4 = 0; - local tmp8:4 = 0; - local tmp9:4 = 0; - simd_address_at(tmp7, TMPQ1, 0, 4, 16); - simd_address_at(tmp8, TMPQ2, 0, 4, 16); - simd_address_at(tmp9, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp9 = (* [register]:4 tmp7) * (* [register]:4 tmp8); - simd_address_at(tmp7, TMPQ1, 1, 4, 16); - simd_address_at(tmp8, TMPQ2, 1, 4, 16); - simd_address_at(tmp9, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp9 = (* [register]:4 tmp7) * (* [register]:4 tmp8); - simd_address_at(tmp7, TMPQ1, 2, 4, 16); - simd_address_at(tmp8, TMPQ2, 2, 4, 16); - simd_address_at(tmp9, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp9 = (* [register]:4 tmp7) * (* [register]:4 tmp8); - simd_address_at(tmp7, TMPQ1, 3, 4, 16); - simd_address_at(tmp8, TMPQ2, 3, 4, 16); - simd_address_at(tmp9, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp9 = (* [register]:4 tmp7) * (* [register]:4 tmp8); + Rd_VPR128.4S[0,32] = TMPQ1[0,32] * TMPQ2[0,32]; + Rd_VPR128.4S[32,32] = TMPQ1[32,32] * TMPQ2[32,32]; + Rd_VPR128.4S[64,32] = TMPQ1[64,32] * TMPQ2[64,32]; + Rd_VPR128.4S[96,32] = TMPQ1[96,32] * TMPQ2[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_SEXT(Rn_VPR64.4H, 2:1); - local tmp2:16 = SIMD_INT_SEXT(Rm_VPR64.4H, 2:1); - local tmpd:16 = SIMD_INT_MULT(tmp1, tmp2, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_smull(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); -@endif } # C7.2.281 SMULL, SMULL2 (vector) page C7-2012 line 112581 MATCH x0e20c000/mask=xbf20fc00 @@ -29912,106 +16621,34 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H :smull Rd_VPR128.8H, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1215=0xc & b_1011=0 & Rn_VPR64.8B & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_primitive) # simd resize TMPQ1 = sext(Rn_VPR64.8B) (lane size 1 to 2) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR64.8B, 0, 1, 8); - simd_address_at(tmp3, TMPQ1, 0, 2, 16); - * [register]:2 tmp3 = sext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 1, 1, 8); - simd_address_at(tmp3, TMPQ1, 1, 2, 16); - * [register]:2 tmp3 = sext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 2, 1, 8); - simd_address_at(tmp3, TMPQ1, 2, 2, 16); - * [register]:2 tmp3 = sext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 3, 1, 8); - simd_address_at(tmp3, TMPQ1, 3, 2, 16); - * [register]:2 tmp3 = sext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 4, 1, 8); - simd_address_at(tmp3, TMPQ1, 4, 2, 16); - * [register]:2 tmp3 = sext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 5, 1, 8); - simd_address_at(tmp3, TMPQ1, 5, 2, 16); - * [register]:2 tmp3 = sext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 6, 1, 8); - simd_address_at(tmp3, TMPQ1, 6, 2, 16); - * [register]:2 tmp3 = sext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 7, 1, 8); - simd_address_at(tmp3, TMPQ1, 7, 2, 16); - * [register]:2 tmp3 = sext(* [register]:1 tmp2); + TMPQ1[0,16] = sext(Rn_VPR64.8B[0,8]); + TMPQ1[16,16] = sext(Rn_VPR64.8B[8,8]); + TMPQ1[32,16] = sext(Rn_VPR64.8B[16,8]); + TMPQ1[48,16] = sext(Rn_VPR64.8B[24,8]); + TMPQ1[64,16] = sext(Rn_VPR64.8B[32,8]); + TMPQ1[80,16] = sext(Rn_VPR64.8B[40,8]); + TMPQ1[96,16] = sext(Rn_VPR64.8B[48,8]); + TMPQ1[112,16] = sext(Rn_VPR64.8B[56,8]); # simd resize TMPQ2 = sext(Rm_VPR64.8B) (lane size 1 to 2) - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, Rm_VPR64.8B, 0, 1, 8); - simd_address_at(tmp6, TMPQ2, 0, 2, 16); - * [register]:2 tmp6 = sext(* [register]:1 tmp5); - simd_address_at(tmp5, Rm_VPR64.8B, 1, 1, 8); - simd_address_at(tmp6, TMPQ2, 1, 2, 16); - * [register]:2 tmp6 = sext(* [register]:1 tmp5); - simd_address_at(tmp5, Rm_VPR64.8B, 2, 1, 8); - simd_address_at(tmp6, TMPQ2, 2, 2, 16); - * [register]:2 tmp6 = sext(* [register]:1 tmp5); - simd_address_at(tmp5, Rm_VPR64.8B, 3, 1, 8); - simd_address_at(tmp6, TMPQ2, 3, 2, 16); - * [register]:2 tmp6 = sext(* [register]:1 tmp5); - simd_address_at(tmp5, Rm_VPR64.8B, 4, 1, 8); - simd_address_at(tmp6, TMPQ2, 4, 2, 16); - * [register]:2 tmp6 = sext(* [register]:1 tmp5); - simd_address_at(tmp5, Rm_VPR64.8B, 5, 1, 8); - simd_address_at(tmp6, TMPQ2, 5, 2, 16); - * [register]:2 tmp6 = sext(* [register]:1 tmp5); - simd_address_at(tmp5, Rm_VPR64.8B, 6, 1, 8); - simd_address_at(tmp6, TMPQ2, 6, 2, 16); - * [register]:2 tmp6 = sext(* [register]:1 tmp5); - simd_address_at(tmp5, Rm_VPR64.8B, 7, 1, 8); - simd_address_at(tmp6, TMPQ2, 7, 2, 16); - * [register]:2 tmp6 = sext(* [register]:1 tmp5); + TMPQ2[0,16] = sext(Rm_VPR64.8B[0,8]); + TMPQ2[16,16] = sext(Rm_VPR64.8B[8,8]); + TMPQ2[32,16] = sext(Rm_VPR64.8B[16,8]); + TMPQ2[48,16] = sext(Rm_VPR64.8B[24,8]); + TMPQ2[64,16] = sext(Rm_VPR64.8B[32,8]); + TMPQ2[80,16] = sext(Rm_VPR64.8B[40,8]); + TMPQ2[96,16] = sext(Rm_VPR64.8B[48,8]); + TMPQ2[112,16] = sext(Rm_VPR64.8B[56,8]); # simd infix Rd_VPR128.8H = TMPQ1 * TMPQ2 on lane size 2 - local tmp7:4 = 0; - local tmp8:4 = 0; - local tmp9:4 = 0; - simd_address_at(tmp7, TMPQ1, 0, 2, 16); - simd_address_at(tmp8, TMPQ2, 0, 2, 16); - simd_address_at(tmp9, Rd_VPR128.8H, 0, 2, 16); - * [register]:2 tmp9 = (* [register]:2 tmp7) * (* [register]:2 tmp8); - simd_address_at(tmp7, TMPQ1, 1, 2, 16); - simd_address_at(tmp8, TMPQ2, 1, 2, 16); - simd_address_at(tmp9, Rd_VPR128.8H, 1, 2, 16); - * [register]:2 tmp9 = (* [register]:2 tmp7) * (* [register]:2 tmp8); - simd_address_at(tmp7, TMPQ1, 2, 2, 16); - simd_address_at(tmp8, TMPQ2, 2, 2, 16); - simd_address_at(tmp9, Rd_VPR128.8H, 2, 2, 16); - * [register]:2 tmp9 = (* [register]:2 tmp7) * (* [register]:2 tmp8); - simd_address_at(tmp7, TMPQ1, 3, 2, 16); - simd_address_at(tmp8, TMPQ2, 3, 2, 16); - simd_address_at(tmp9, Rd_VPR128.8H, 3, 2, 16); - * [register]:2 tmp9 = (* [register]:2 tmp7) * (* [register]:2 tmp8); - simd_address_at(tmp7, TMPQ1, 4, 2, 16); - simd_address_at(tmp8, TMPQ2, 4, 2, 16); - simd_address_at(tmp9, Rd_VPR128.8H, 4, 2, 16); - * [register]:2 tmp9 = (* [register]:2 tmp7) * (* [register]:2 tmp8); - simd_address_at(tmp7, TMPQ1, 5, 2, 16); - simd_address_at(tmp8, TMPQ2, 5, 2, 16); - simd_address_at(tmp9, Rd_VPR128.8H, 5, 2, 16); - * [register]:2 tmp9 = (* [register]:2 tmp7) * (* [register]:2 tmp8); - simd_address_at(tmp7, TMPQ1, 6, 2, 16); - simd_address_at(tmp8, TMPQ2, 6, 2, 16); - simd_address_at(tmp9, Rd_VPR128.8H, 6, 2, 16); - * [register]:2 tmp9 = (* [register]:2 tmp7) * (* [register]:2 tmp8); - simd_address_at(tmp7, TMPQ1, 7, 2, 16); - simd_address_at(tmp8, TMPQ2, 7, 2, 16); - simd_address_at(tmp9, Rd_VPR128.8H, 7, 2, 16); - * [register]:2 tmp9 = (* [register]:2 tmp7) * (* [register]:2 tmp8); + Rd_VPR128.8H[0,16] = TMPQ1[0,16] * TMPQ2[0,16]; + Rd_VPR128.8H[16,16] = TMPQ1[16,16] * TMPQ2[16,16]; + Rd_VPR128.8H[32,16] = TMPQ1[32,16] * TMPQ2[32,16]; + Rd_VPR128.8H[48,16] = TMPQ1[48,16] * TMPQ2[48,16]; + Rd_VPR128.8H[64,16] = TMPQ1[64,16] * TMPQ2[64,16]; + Rd_VPR128.8H[80,16] = TMPQ1[80,16] * TMPQ2[80,16]; + Rd_VPR128.8H[96,16] = TMPQ1[96,16] * TMPQ2[96,16]; + Rd_VPR128.8H[112,16] = TMPQ1[112,16] * TMPQ2[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_SEXT(Rn_VPR64.8B, 1:1); - local tmp2:16 = SIMD_INT_SEXT(Rm_VPR64.8B, 1:1); - local tmpd:16 = SIMD_INT_MULT(tmp1, tmp2, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.8H -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.8H = NEON_smull(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); -@endif } # C7.2.281 SMULL, SMULL2 (vector) page C7-2012 line 112581 MATCH x0e20c000/mask=xbf20fc00 @@ -30023,54 +16660,18 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B :smull2 Rd_VPR128.2D, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1215=0xc & b_1011=0 & Rn_VPR128.4S & Rd_VPR128.2D & Rn_VPR128 & Rm_VPR128 & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rn_VPR128.4S, 1, 8, 16); - TMPD1 = * [register]:8 tmp1; + TMPD1 = Rn_VPR128.4S[64,64]; # simd resize TMPQ2 = sext(TMPD1) (lane size 4 to 8) - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, TMPD1, 0, 4, 8); - simd_address_at(tmp5, TMPQ2, 0, 8, 16); - * [register]:8 tmp5 = sext(* [register]:4 tmp4); - simd_address_at(tmp4, TMPD1, 1, 4, 8); - simd_address_at(tmp5, TMPQ2, 1, 8, 16); - * [register]:8 tmp5 = sext(* [register]:4 tmp4); - local tmp6:4 = 0; - simd_address_at(tmp6, Rm_VPR128.4S, 1, 8, 16); - TMPD3 = * [register]:8 tmp6; + TMPQ2[0,64] = sext(TMPD1[0,32]); + TMPQ2[64,64] = sext(TMPD1[32,32]); + TMPD3 = Rm_VPR128.4S[64,64]; # simd resize TMPQ4 = sext(TMPD3) (lane size 4 to 8) - local tmp9:4 = 0; - local tmp10:4 = 0; - simd_address_at(tmp9, TMPD3, 0, 4, 8); - simd_address_at(tmp10, TMPQ4, 0, 8, 16); - * [register]:8 tmp10 = sext(* [register]:4 tmp9); - simd_address_at(tmp9, TMPD3, 1, 4, 8); - simd_address_at(tmp10, TMPQ4, 1, 8, 16); - * [register]:8 tmp10 = sext(* [register]:4 tmp9); + TMPQ4[0,64] = sext(TMPD3[0,32]); + TMPQ4[64,64] = sext(TMPD3[32,32]); # simd infix Rd_VPR128.2D = TMPQ2 * TMPQ4 on lane size 8 - local tmp11:4 = 0; - local tmp12:4 = 0; - local tmp13:4 = 0; - simd_address_at(tmp11, TMPQ2, 0, 8, 16); - simd_address_at(tmp12, TMPQ4, 0, 8, 16); - simd_address_at(tmp13, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp13 = (* [register]:8 tmp11) * (* [register]:8 tmp12); - simd_address_at(tmp11, TMPQ2, 1, 8, 16); - simd_address_at(tmp12, TMPQ4, 1, 8, 16); - simd_address_at(tmp13, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp13 = (* [register]:8 tmp11) * (* [register]:8 tmp12); + Rd_VPR128.2D[0,64] = TMPQ2[0,64] * TMPQ4[0,64]; + Rd_VPR128.2D[64,64] = TMPQ2[64,64] * TMPQ4[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Rn_VPR128.4S, 1:1); - local tmp2:16 = SIMD_INT_SEXT(tmp1, 4:1); - local tmp3:8 = SIMD_PIECE(Rm_VPR128.4S, 1:1); - local tmp4:16 = SIMD_INT_SEXT(tmp3, 4:1); - local tmpd:16 = SIMD_INT_MULT(tmp2, tmp4, 8:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.2D = NEON_smull2(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); -@endif } # C7.2.281 SMULL, SMULL2 (vector) page C7-2012 line 112581 MATCH x0e20c000/mask=xbf20fc00 @@ -30082,74 +16683,24 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S :smull2 Rd_VPR128.4S, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1215=0xc & b_1011=0 & Rn_VPR128.8H & Rd_VPR128.4S & Rn_VPR128 & Rm_VPR128 & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rn_VPR128.8H, 1, 8, 16); - TMPD1 = * [register]:8 tmp1; + TMPD1 = Rn_VPR128.8H[64,64]; # simd resize TMPQ2 = sext(TMPD1) (lane size 2 to 4) - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, TMPD1, 0, 2, 8); - simd_address_at(tmp5, TMPQ2, 0, 4, 16); - * [register]:4 tmp5 = sext(* [register]:2 tmp4); - simd_address_at(tmp4, TMPD1, 1, 2, 8); - simd_address_at(tmp5, TMPQ2, 1, 4, 16); - * [register]:4 tmp5 = sext(* [register]:2 tmp4); - simd_address_at(tmp4, TMPD1, 2, 2, 8); - simd_address_at(tmp5, TMPQ2, 2, 4, 16); - * [register]:4 tmp5 = sext(* [register]:2 tmp4); - simd_address_at(tmp4, TMPD1, 3, 2, 8); - simd_address_at(tmp5, TMPQ2, 3, 4, 16); - * [register]:4 tmp5 = sext(* [register]:2 tmp4); - local tmp6:4 = 0; - simd_address_at(tmp6, Rm_VPR128.8H, 1, 8, 16); - TMPD3 = * [register]:8 tmp6; + TMPQ2[0,32] = sext(TMPD1[0,16]); + TMPQ2[32,32] = sext(TMPD1[16,16]); + TMPQ2[64,32] = sext(TMPD1[32,16]); + TMPQ2[96,32] = sext(TMPD1[48,16]); + TMPD3 = Rm_VPR128.8H[64,64]; # simd resize TMPQ4 = sext(TMPD3) (lane size 2 to 4) - local tmp9:4 = 0; - local tmp10:4 = 0; - simd_address_at(tmp9, TMPD3, 0, 2, 8); - simd_address_at(tmp10, TMPQ4, 0, 4, 16); - * [register]:4 tmp10 = sext(* [register]:2 tmp9); - simd_address_at(tmp9, TMPD3, 1, 2, 8); - simd_address_at(tmp10, TMPQ4, 1, 4, 16); - * [register]:4 tmp10 = sext(* [register]:2 tmp9); - simd_address_at(tmp9, TMPD3, 2, 2, 8); - simd_address_at(tmp10, TMPQ4, 2, 4, 16); - * [register]:4 tmp10 = sext(* [register]:2 tmp9); - simd_address_at(tmp9, TMPD3, 3, 2, 8); - simd_address_at(tmp10, TMPQ4, 3, 4, 16); - * [register]:4 tmp10 = sext(* [register]:2 tmp9); + TMPQ4[0,32] = sext(TMPD3[0,16]); + TMPQ4[32,32] = sext(TMPD3[16,16]); + TMPQ4[64,32] = sext(TMPD3[32,16]); + TMPQ4[96,32] = sext(TMPD3[48,16]); # simd infix Rd_VPR128.4S = TMPQ2 * TMPQ4 on lane size 4 - local tmp11:4 = 0; - local tmp12:4 = 0; - local tmp13:4 = 0; - simd_address_at(tmp11, TMPQ2, 0, 4, 16); - simd_address_at(tmp12, TMPQ4, 0, 4, 16); - simd_address_at(tmp13, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp13 = (* [register]:4 tmp11) * (* [register]:4 tmp12); - simd_address_at(tmp11, TMPQ2, 1, 4, 16); - simd_address_at(tmp12, TMPQ4, 1, 4, 16); - simd_address_at(tmp13, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp13 = (* [register]:4 tmp11) * (* [register]:4 tmp12); - simd_address_at(tmp11, TMPQ2, 2, 4, 16); - simd_address_at(tmp12, TMPQ4, 2, 4, 16); - simd_address_at(tmp13, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp13 = (* [register]:4 tmp11) * (* [register]:4 tmp12); - simd_address_at(tmp11, TMPQ2, 3, 4, 16); - simd_address_at(tmp12, TMPQ4, 3, 4, 16); - simd_address_at(tmp13, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp13 = (* [register]:4 tmp11) * (* [register]:4 tmp12); + Rd_VPR128.4S[0,32] = TMPQ2[0,32] * TMPQ4[0,32]; + Rd_VPR128.4S[32,32] = TMPQ2[32,32] * TMPQ4[32,32]; + Rd_VPR128.4S[64,32] = TMPQ2[64,32] * TMPQ4[64,32]; + Rd_VPR128.4S[96,32] = TMPQ2[96,32] * TMPQ4[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Rn_VPR128.8H, 1:1); - local tmp2:16 = SIMD_INT_SEXT(tmp1, 2:1); - local tmp3:8 = SIMD_PIECE(Rm_VPR128.8H, 1:1); - local tmp4:16 = SIMD_INT_SEXT(tmp3, 2:1); - local tmpd:16 = SIMD_INT_MULT(tmp2, tmp4, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_smull2(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); -@endif } # C7.2.281 SMULL, SMULL2 (vector) page C7-2012 line 112581 MATCH x0e20c000/mask=xbf20fc00 @@ -30161,114 +16712,36 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H :smull2 Rd_VPR128.8H, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1215=0xc & b_1011=0 & Rn_VPR128.16B & Rd_VPR128.8H & Rn_VPR128 & Rm_VPR128 & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rn_VPR128.16B, 1, 8, 16); - TMPD1 = * [register]:8 tmp1; + TMPD1 = Rn_VPR128.16B[64,64]; # simd resize TMPQ2 = sext(TMPD1) (lane size 1 to 2) - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, TMPD1, 0, 1, 8); - simd_address_at(tmp5, TMPQ2, 0, 2, 16); - * [register]:2 tmp5 = sext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 1, 1, 8); - simd_address_at(tmp5, TMPQ2, 1, 2, 16); - * [register]:2 tmp5 = sext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 2, 1, 8); - simd_address_at(tmp5, TMPQ2, 2, 2, 16); - * [register]:2 tmp5 = sext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 3, 1, 8); - simd_address_at(tmp5, TMPQ2, 3, 2, 16); - * [register]:2 tmp5 = sext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 4, 1, 8); - simd_address_at(tmp5, TMPQ2, 4, 2, 16); - * [register]:2 tmp5 = sext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 5, 1, 8); - simd_address_at(tmp5, TMPQ2, 5, 2, 16); - * [register]:2 tmp5 = sext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 6, 1, 8); - simd_address_at(tmp5, TMPQ2, 6, 2, 16); - * [register]:2 tmp5 = sext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 7, 1, 8); - simd_address_at(tmp5, TMPQ2, 7, 2, 16); - * [register]:2 tmp5 = sext(* [register]:1 tmp4); - local tmp6:4 = 0; - simd_address_at(tmp6, Rm_VPR128.16B, 1, 8, 16); - TMPD3 = * [register]:8 tmp6; + TMPQ2[0,16] = sext(TMPD1[0,8]); + TMPQ2[16,16] = sext(TMPD1[8,8]); + TMPQ2[32,16] = sext(TMPD1[16,8]); + TMPQ2[48,16] = sext(TMPD1[24,8]); + TMPQ2[64,16] = sext(TMPD1[32,8]); + TMPQ2[80,16] = sext(TMPD1[40,8]); + TMPQ2[96,16] = sext(TMPD1[48,8]); + TMPQ2[112,16] = sext(TMPD1[56,8]); + TMPD3 = Rm_VPR128.16B[64,64]; # simd resize TMPQ4 = sext(TMPD3) (lane size 1 to 2) - local tmp9:4 = 0; - local tmp10:4 = 0; - simd_address_at(tmp9, TMPD3, 0, 1, 8); - simd_address_at(tmp10, TMPQ4, 0, 2, 16); - * [register]:2 tmp10 = sext(* [register]:1 tmp9); - simd_address_at(tmp9, TMPD3, 1, 1, 8); - simd_address_at(tmp10, TMPQ4, 1, 2, 16); - * [register]:2 tmp10 = sext(* [register]:1 tmp9); - simd_address_at(tmp9, TMPD3, 2, 1, 8); - simd_address_at(tmp10, TMPQ4, 2, 2, 16); - * [register]:2 tmp10 = sext(* [register]:1 tmp9); - simd_address_at(tmp9, TMPD3, 3, 1, 8); - simd_address_at(tmp10, TMPQ4, 3, 2, 16); - * [register]:2 tmp10 = sext(* [register]:1 tmp9); - simd_address_at(tmp9, TMPD3, 4, 1, 8); - simd_address_at(tmp10, TMPQ4, 4, 2, 16); - * [register]:2 tmp10 = sext(* [register]:1 tmp9); - simd_address_at(tmp9, TMPD3, 5, 1, 8); - simd_address_at(tmp10, TMPQ4, 5, 2, 16); - * [register]:2 tmp10 = sext(* [register]:1 tmp9); - simd_address_at(tmp9, TMPD3, 6, 1, 8); - simd_address_at(tmp10, TMPQ4, 6, 2, 16); - * [register]:2 tmp10 = sext(* [register]:1 tmp9); - simd_address_at(tmp9, TMPD3, 7, 1, 8); - simd_address_at(tmp10, TMPQ4, 7, 2, 16); - * [register]:2 tmp10 = sext(* [register]:1 tmp9); + TMPQ4[0,16] = sext(TMPD3[0,8]); + TMPQ4[16,16] = sext(TMPD3[8,8]); + TMPQ4[32,16] = sext(TMPD3[16,8]); + TMPQ4[48,16] = sext(TMPD3[24,8]); + TMPQ4[64,16] = sext(TMPD3[32,8]); + TMPQ4[80,16] = sext(TMPD3[40,8]); + TMPQ4[96,16] = sext(TMPD3[48,8]); + TMPQ4[112,16] = sext(TMPD3[56,8]); # simd infix Rd_VPR128.8H = TMPQ2 * TMPQ4 on lane size 2 - local tmp11:4 = 0; - local tmp12:4 = 0; - local tmp13:4 = 0; - simd_address_at(tmp11, TMPQ2, 0, 2, 16); - simd_address_at(tmp12, TMPQ4, 0, 2, 16); - simd_address_at(tmp13, Rd_VPR128.8H, 0, 2, 16); - * [register]:2 tmp13 = (* [register]:2 tmp11) * (* [register]:2 tmp12); - simd_address_at(tmp11, TMPQ2, 1, 2, 16); - simd_address_at(tmp12, TMPQ4, 1, 2, 16); - simd_address_at(tmp13, Rd_VPR128.8H, 1, 2, 16); - * [register]:2 tmp13 = (* [register]:2 tmp11) * (* [register]:2 tmp12); - simd_address_at(tmp11, TMPQ2, 2, 2, 16); - simd_address_at(tmp12, TMPQ4, 2, 2, 16); - simd_address_at(tmp13, Rd_VPR128.8H, 2, 2, 16); - * [register]:2 tmp13 = (* [register]:2 tmp11) * (* [register]:2 tmp12); - simd_address_at(tmp11, TMPQ2, 3, 2, 16); - simd_address_at(tmp12, TMPQ4, 3, 2, 16); - simd_address_at(tmp13, Rd_VPR128.8H, 3, 2, 16); - * [register]:2 tmp13 = (* [register]:2 tmp11) * (* [register]:2 tmp12); - simd_address_at(tmp11, TMPQ2, 4, 2, 16); - simd_address_at(tmp12, TMPQ4, 4, 2, 16); - simd_address_at(tmp13, Rd_VPR128.8H, 4, 2, 16); - * [register]:2 tmp13 = (* [register]:2 tmp11) * (* [register]:2 tmp12); - simd_address_at(tmp11, TMPQ2, 5, 2, 16); - simd_address_at(tmp12, TMPQ4, 5, 2, 16); - simd_address_at(tmp13, Rd_VPR128.8H, 5, 2, 16); - * [register]:2 tmp13 = (* [register]:2 tmp11) * (* [register]:2 tmp12); - simd_address_at(tmp11, TMPQ2, 6, 2, 16); - simd_address_at(tmp12, TMPQ4, 6, 2, 16); - simd_address_at(tmp13, Rd_VPR128.8H, 6, 2, 16); - * [register]:2 tmp13 = (* [register]:2 tmp11) * (* [register]:2 tmp12); - simd_address_at(tmp11, TMPQ2, 7, 2, 16); - simd_address_at(tmp12, TMPQ4, 7, 2, 16); - simd_address_at(tmp13, Rd_VPR128.8H, 7, 2, 16); - * [register]:2 tmp13 = (* [register]:2 tmp11) * (* [register]:2 tmp12); + Rd_VPR128.8H[0,16] = TMPQ2[0,16] * TMPQ4[0,16]; + Rd_VPR128.8H[16,16] = TMPQ2[16,16] * TMPQ4[16,16]; + Rd_VPR128.8H[32,16] = TMPQ2[32,16] * TMPQ4[32,16]; + Rd_VPR128.8H[48,16] = TMPQ2[48,16] * TMPQ4[48,16]; + Rd_VPR128.8H[64,16] = TMPQ2[64,16] * TMPQ4[64,16]; + Rd_VPR128.8H[80,16] = TMPQ2[80,16] * TMPQ4[80,16]; + Rd_VPR128.8H[96,16] = TMPQ2[96,16] * TMPQ4[96,16]; + Rd_VPR128.8H[112,16] = TMPQ2[112,16] * TMPQ4[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Rn_VPR128.16B, 1:1); - local tmp2:16 = SIMD_INT_SEXT(tmp1, 1:1); - local tmp3:8 = SIMD_PIECE(Rm_VPR128.16B, 1:1); - local tmp4:16 = SIMD_INT_SEXT(tmp3, 1:1); - local tmpd:16 = SIMD_INT_MULT(tmp2, tmp4, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.8H -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.8H = NEON_smull2(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); -@endif } # C7.2.282 SQABS page C7-2014 line 112696 MATCH x5e207800/mask=xff3ffc00 @@ -30283,15 +16756,8 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16 :sqabs Rd_FPR8, Rn_FPR8 is b_31=0 & b_30=1 & b_2429=0b011110 & b_2223=0b00 & b_1021=0b100000011110 & Rd_FPR8 & Rn_FPR8 & Zd { -@if defined(SEMANTIC_primitive) Rd_FPR8 = MP_INT_ABS(Rn_FPR8); zext_zb(Zd); # zero upper 31 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:1 = MP_INT_ABS(Rn_FPR8); - Zd = zext(tmpd); # assigning to Rd_FPR8 -@elif defined(SEMANTIC_pseudo) - Rd_FPR8 = NEON_sqabs(Rn_FPR8); -@endif } # C7.2.282 SQABS page C7-2014 line 112696 MATCH x5e207800/mask=xff3ffc00 @@ -30304,15 +16770,8 @@ is b_31=0 & b_30=1 & b_2429=0b011110 & b_2223=0b00 & b_1021=0b100000011110 & Rd_ :sqabs Rd_FPR16, Rn_FPR16 is b_31=0 & b_30=1 & b_2429=0b011110 & b_2223=0b01 & b_1021=0b100000011110 & Rd_FPR16 & Rn_FPR16 & Zd { -@if defined(SEMANTIC_primitive) Rd_FPR16 = MP_INT_ABS(Rn_FPR16); zext_zh(Zd); # zero upper 30 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:2 = MP_INT_ABS(Rn_FPR16); - Zd = zext(tmpd); # assigning to Rd_FPR16 -@elif defined(SEMANTIC_pseudo) - Rd_FPR16 = NEON_sqabs(Rn_FPR16); -@endif } # C7.2.282 SQABS page C7-2014 line 112696 MATCH x5e207800/mask=xff3ffc00 @@ -30325,15 +16784,8 @@ is b_31=0 & b_30=1 & b_2429=0b011110 & b_2223=0b01 & b_1021=0b100000011110 & Rd_ :sqabs Rd_FPR32, Rn_FPR32 is b_31=0 & b_30=1 & b_2429=0b011110 & b_2223=0b10 & b_1021=0b100000011110 & Rd_FPR32 & Rn_FPR32 & Zd { -@if defined(SEMANTIC_primitive) Rd_FPR32 = MP_INT_ABS(Rn_FPR32); zext_zs(Zd); # zero upper 28 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:4 = MP_INT_ABS(Rn_FPR32); - Zd = zext(tmpd); # assigning to Rd_FPR32 -@elif defined(SEMANTIC_pseudo) - Rd_FPR32 = NEON_sqabs(Rn_FPR32); -@endif } # C7.2.282 SQABS page C7-2014 line 112696 MATCH x5e207800/mask=xff3ffc00 @@ -30346,15 +16798,8 @@ is b_31=0 & b_30=1 & b_2429=0b011110 & b_2223=0b10 & b_1021=0b100000011110 & Rd_ :sqabs Rd_FPR64, Rn_FPR64 is b_31=0 & b_30=1 & b_2429=0b011110 & b_2223=0b11 & b_1021=0b100000011110 & Rd_FPR64 & Rn_FPR64 & Zd { -@if defined(SEMANTIC_primitive) Rd_FPR64 = MP_INT_ABS(Rn_FPR64); zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:8 = MP_INT_ABS(Rn_FPR64); - Zd = zext(tmpd); # assigning to Rd_FPR64 -@elif defined(SEMANTIC_pseudo) - Rd_FPR64 = NEON_sqabs(Rn_FPR64); -@endif } # C7.2.282 SQABS page C7-2014 line 112696 MATCH x0e207800/mask=xbf3ffc00 @@ -30367,41 +16812,16 @@ is b_31=0 & b_30=1 & b_2429=0b011110 & b_2223=0b11 & b_1021=0b100000011110 & Rd_ :sqabs Rd_VPR64.8B, Rn_VPR64.8B is b_31=0 & b_30=0 & b_2429=0b001110 & b_2223=0b00 & b_1021=0b100000011110 & Rd_VPR64.8B & Rn_VPR64.8B & Zd { -@if defined(SEMANTIC_primitive) # simd unary Rd_VPR64.8B = MP_INT_ABS(Rn_VPR64.8B) on lane size 1 - local tmp1:4 = 0; - local tmp2:4 = 0; - simd_address_at(tmp1, Rn_VPR64.8B, 0, 1, 8); - simd_address_at(tmp2, Rd_VPR64.8B, 0, 1, 8); - * [register]:1 tmp2 = MP_INT_ABS(* [register]:1 tmp1); - simd_address_at(tmp1, Rn_VPR64.8B, 1, 1, 8); - simd_address_at(tmp2, Rd_VPR64.8B, 1, 1, 8); - * [register]:1 tmp2 = MP_INT_ABS(* [register]:1 tmp1); - simd_address_at(tmp1, Rn_VPR64.8B, 2, 1, 8); - simd_address_at(tmp2, Rd_VPR64.8B, 2, 1, 8); - * [register]:1 tmp2 = MP_INT_ABS(* [register]:1 tmp1); - simd_address_at(tmp1, Rn_VPR64.8B, 3, 1, 8); - simd_address_at(tmp2, Rd_VPR64.8B, 3, 1, 8); - * [register]:1 tmp2 = MP_INT_ABS(* [register]:1 tmp1); - simd_address_at(tmp1, Rn_VPR64.8B, 4, 1, 8); - simd_address_at(tmp2, Rd_VPR64.8B, 4, 1, 8); - * [register]:1 tmp2 = MP_INT_ABS(* [register]:1 tmp1); - simd_address_at(tmp1, Rn_VPR64.8B, 5, 1, 8); - simd_address_at(tmp2, Rd_VPR64.8B, 5, 1, 8); - * [register]:1 tmp2 = MP_INT_ABS(* [register]:1 tmp1); - simd_address_at(tmp1, Rn_VPR64.8B, 6, 1, 8); - simd_address_at(tmp2, Rd_VPR64.8B, 6, 1, 8); - * [register]:1 tmp2 = MP_INT_ABS(* [register]:1 tmp1); - simd_address_at(tmp1, Rn_VPR64.8B, 7, 1, 8); - simd_address_at(tmp2, Rd_VPR64.8B, 7, 1, 8); - * [register]:1 tmp2 = MP_INT_ABS(* [register]:1 tmp1); + Rd_VPR64.8B[0,8] = MP_INT_ABS(Rn_VPR64.8B[0,8]); + Rd_VPR64.8B[8,8] = MP_INT_ABS(Rn_VPR64.8B[8,8]); + Rd_VPR64.8B[16,8] = MP_INT_ABS(Rn_VPR64.8B[16,8]); + Rd_VPR64.8B[24,8] = MP_INT_ABS(Rn_VPR64.8B[24,8]); + Rd_VPR64.8B[32,8] = MP_INT_ABS(Rn_VPR64.8B[32,8]); + Rd_VPR64.8B[40,8] = MP_INT_ABS(Rn_VPR64.8B[40,8]); + Rd_VPR64.8B[48,8] = MP_INT_ABS(Rn_VPR64.8B[48,8]); + Rd_VPR64.8B[56,8] = MP_INT_ABS(Rn_VPR64.8B[56,8]); zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:8 = SIMD_INT_ABS(Rn_VPR64.8B, 1:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.8B -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.8B = NEON_sqabs(Rn_VPR64.8B, 1:1); -@endif } # C7.2.282 SQABS page C7-2014 line 112696 MATCH x0e207800/mask=xbf3ffc00 @@ -30414,65 +16834,24 @@ is b_31=0 & b_30=0 & b_2429=0b001110 & b_2223=0b00 & b_1021=0b100000011110 & Rd_ :sqabs Rd_VPR128.16B, Rn_VPR128.16B is b_31=0 & b_30=1 & b_2429=0b001110 & b_2223=0b00 & b_1021=0b100000011110 & Rd_VPR128.16B & Rn_VPR128.16B & Zd { -@if defined(SEMANTIC_primitive) # simd unary Rd_VPR128.16B = MP_INT_ABS(Rn_VPR128.16B) on lane size 1 - local tmp1:4 = 0; - local tmp2:4 = 0; - simd_address_at(tmp1, Rn_VPR128.16B, 0, 1, 16); - simd_address_at(tmp2, Rd_VPR128.16B, 0, 1, 16); - * [register]:1 tmp2 = MP_INT_ABS(* [register]:1 tmp1); - simd_address_at(tmp1, Rn_VPR128.16B, 1, 1, 16); - simd_address_at(tmp2, Rd_VPR128.16B, 1, 1, 16); - * [register]:1 tmp2 = MP_INT_ABS(* [register]:1 tmp1); - simd_address_at(tmp1, Rn_VPR128.16B, 2, 1, 16); - simd_address_at(tmp2, Rd_VPR128.16B, 2, 1, 16); - * [register]:1 tmp2 = MP_INT_ABS(* [register]:1 tmp1); - simd_address_at(tmp1, Rn_VPR128.16B, 3, 1, 16); - simd_address_at(tmp2, Rd_VPR128.16B, 3, 1, 16); - * [register]:1 tmp2 = MP_INT_ABS(* [register]:1 tmp1); - simd_address_at(tmp1, Rn_VPR128.16B, 4, 1, 16); - simd_address_at(tmp2, Rd_VPR128.16B, 4, 1, 16); - * [register]:1 tmp2 = MP_INT_ABS(* [register]:1 tmp1); - simd_address_at(tmp1, Rn_VPR128.16B, 5, 1, 16); - simd_address_at(tmp2, Rd_VPR128.16B, 5, 1, 16); - * [register]:1 tmp2 = MP_INT_ABS(* [register]:1 tmp1); - simd_address_at(tmp1, Rn_VPR128.16B, 6, 1, 16); - simd_address_at(tmp2, Rd_VPR128.16B, 6, 1, 16); - * [register]:1 tmp2 = MP_INT_ABS(* [register]:1 tmp1); - simd_address_at(tmp1, Rn_VPR128.16B, 7, 1, 16); - simd_address_at(tmp2, Rd_VPR128.16B, 7, 1, 16); - * [register]:1 tmp2 = MP_INT_ABS(* [register]:1 tmp1); - simd_address_at(tmp1, Rn_VPR128.16B, 8, 1, 16); - simd_address_at(tmp2, Rd_VPR128.16B, 8, 1, 16); - * [register]:1 tmp2 = MP_INT_ABS(* [register]:1 tmp1); - simd_address_at(tmp1, Rn_VPR128.16B, 9, 1, 16); - simd_address_at(tmp2, Rd_VPR128.16B, 9, 1, 16); - * [register]:1 tmp2 = MP_INT_ABS(* [register]:1 tmp1); - simd_address_at(tmp1, Rn_VPR128.16B, 10, 1, 16); - simd_address_at(tmp2, Rd_VPR128.16B, 10, 1, 16); - * [register]:1 tmp2 = MP_INT_ABS(* [register]:1 tmp1); - simd_address_at(tmp1, Rn_VPR128.16B, 11, 1, 16); - simd_address_at(tmp2, Rd_VPR128.16B, 11, 1, 16); - * [register]:1 tmp2 = MP_INT_ABS(* [register]:1 tmp1); - simd_address_at(tmp1, Rn_VPR128.16B, 12, 1, 16); - simd_address_at(tmp2, Rd_VPR128.16B, 12, 1, 16); - * [register]:1 tmp2 = MP_INT_ABS(* [register]:1 tmp1); - simd_address_at(tmp1, Rn_VPR128.16B, 13, 1, 16); - simd_address_at(tmp2, Rd_VPR128.16B, 13, 1, 16); - * [register]:1 tmp2 = MP_INT_ABS(* [register]:1 tmp1); - simd_address_at(tmp1, Rn_VPR128.16B, 14, 1, 16); - simd_address_at(tmp2, Rd_VPR128.16B, 14, 1, 16); - * [register]:1 tmp2 = MP_INT_ABS(* [register]:1 tmp1); - simd_address_at(tmp1, Rn_VPR128.16B, 15, 1, 16); - simd_address_at(tmp2, Rd_VPR128.16B, 15, 1, 16); - * [register]:1 tmp2 = MP_INT_ABS(* [register]:1 tmp1); + Rd_VPR128.16B[0,8] = MP_INT_ABS(Rn_VPR128.16B[0,8]); + Rd_VPR128.16B[8,8] = MP_INT_ABS(Rn_VPR128.16B[8,8]); + Rd_VPR128.16B[16,8] = MP_INT_ABS(Rn_VPR128.16B[16,8]); + Rd_VPR128.16B[24,8] = MP_INT_ABS(Rn_VPR128.16B[24,8]); + Rd_VPR128.16B[32,8] = MP_INT_ABS(Rn_VPR128.16B[32,8]); + Rd_VPR128.16B[40,8] = MP_INT_ABS(Rn_VPR128.16B[40,8]); + Rd_VPR128.16B[48,8] = MP_INT_ABS(Rn_VPR128.16B[48,8]); + Rd_VPR128.16B[56,8] = MP_INT_ABS(Rn_VPR128.16B[56,8]); + Rd_VPR128.16B[64,8] = MP_INT_ABS(Rn_VPR128.16B[64,8]); + Rd_VPR128.16B[72,8] = MP_INT_ABS(Rn_VPR128.16B[72,8]); + Rd_VPR128.16B[80,8] = MP_INT_ABS(Rn_VPR128.16B[80,8]); + Rd_VPR128.16B[88,8] = MP_INT_ABS(Rn_VPR128.16B[88,8]); + Rd_VPR128.16B[96,8] = MP_INT_ABS(Rn_VPR128.16B[96,8]); + Rd_VPR128.16B[104,8] = MP_INT_ABS(Rn_VPR128.16B[104,8]); + Rd_VPR128.16B[112,8] = MP_INT_ABS(Rn_VPR128.16B[112,8]); + Rd_VPR128.16B[120,8] = MP_INT_ABS(Rn_VPR128.16B[120,8]); zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:16 = SIMD_INT_ABS(Rn_VPR128.16B, 1:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.16B -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.16B = NEON_sqabs(Rn_VPR128.16B, 1:1); -@endif } # C7.2.282 SQABS page C7-2014 line 112696 MATCH x0e207800/mask=xbf3ffc00 @@ -30485,29 +16864,12 @@ is b_31=0 & b_30=1 & b_2429=0b001110 & b_2223=0b00 & b_1021=0b100000011110 & Rd_ :sqabs Rd_VPR64.4H, Rn_VPR64.4H is b_31=0 & b_30=0 & b_2429=0b001110 & b_2223=0b01 & b_1021=0b100000011110 & Rd_VPR64.4H & Rn_VPR64.4H & Zd { -@if defined(SEMANTIC_primitive) # simd unary Rd_VPR64.4H = MP_INT_ABS(Rn_VPR64.4H) on lane size 2 - local tmp1:4 = 0; - local tmp2:4 = 0; - simd_address_at(tmp1, Rn_VPR64.4H, 0, 2, 8); - simd_address_at(tmp2, Rd_VPR64.4H, 0, 2, 8); - * [register]:2 tmp2 = MP_INT_ABS(* [register]:2 tmp1); - simd_address_at(tmp1, Rn_VPR64.4H, 1, 2, 8); - simd_address_at(tmp2, Rd_VPR64.4H, 1, 2, 8); - * [register]:2 tmp2 = MP_INT_ABS(* [register]:2 tmp1); - simd_address_at(tmp1, Rn_VPR64.4H, 2, 2, 8); - simd_address_at(tmp2, Rd_VPR64.4H, 2, 2, 8); - * [register]:2 tmp2 = MP_INT_ABS(* [register]:2 tmp1); - simd_address_at(tmp1, Rn_VPR64.4H, 3, 2, 8); - simd_address_at(tmp2, Rd_VPR64.4H, 3, 2, 8); - * [register]:2 tmp2 = MP_INT_ABS(* [register]:2 tmp1); + Rd_VPR64.4H[0,16] = MP_INT_ABS(Rn_VPR64.4H[0,16]); + Rd_VPR64.4H[16,16] = MP_INT_ABS(Rn_VPR64.4H[16,16]); + Rd_VPR64.4H[32,16] = MP_INT_ABS(Rn_VPR64.4H[32,16]); + Rd_VPR64.4H[48,16] = MP_INT_ABS(Rn_VPR64.4H[48,16]); zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:8 = SIMD_INT_ABS(Rn_VPR64.4H, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.4H -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.4H = NEON_sqabs(Rn_VPR64.4H, 2:1); -@endif } # C7.2.282 SQABS page C7-2014 line 112696 MATCH x0e207800/mask=xbf3ffc00 @@ -30520,41 +16882,16 @@ is b_31=0 & b_30=0 & b_2429=0b001110 & b_2223=0b01 & b_1021=0b100000011110 & Rd_ :sqabs Rd_VPR128.8H, Rn_VPR128.8H is b_31=0 & b_30=1 & b_2429=0b001110 & b_2223=0b01 & b_1021=0b100000011110 & Rd_VPR128.8H & Rn_VPR128.8H & Zd { -@if defined(SEMANTIC_primitive) # simd unary Rd_VPR128.8H = MP_INT_ABS(Rn_VPR128.8H) on lane size 2 - local tmp1:4 = 0; - local tmp2:4 = 0; - simd_address_at(tmp1, Rn_VPR128.8H, 0, 2, 16); - simd_address_at(tmp2, Rd_VPR128.8H, 0, 2, 16); - * [register]:2 tmp2 = MP_INT_ABS(* [register]:2 tmp1); - simd_address_at(tmp1, Rn_VPR128.8H, 1, 2, 16); - simd_address_at(tmp2, Rd_VPR128.8H, 1, 2, 16); - * [register]:2 tmp2 = MP_INT_ABS(* [register]:2 tmp1); - simd_address_at(tmp1, Rn_VPR128.8H, 2, 2, 16); - simd_address_at(tmp2, Rd_VPR128.8H, 2, 2, 16); - * [register]:2 tmp2 = MP_INT_ABS(* [register]:2 tmp1); - simd_address_at(tmp1, Rn_VPR128.8H, 3, 2, 16); - simd_address_at(tmp2, Rd_VPR128.8H, 3, 2, 16); - * [register]:2 tmp2 = MP_INT_ABS(* [register]:2 tmp1); - simd_address_at(tmp1, Rn_VPR128.8H, 4, 2, 16); - simd_address_at(tmp2, Rd_VPR128.8H, 4, 2, 16); - * [register]:2 tmp2 = MP_INT_ABS(* [register]:2 tmp1); - simd_address_at(tmp1, Rn_VPR128.8H, 5, 2, 16); - simd_address_at(tmp2, Rd_VPR128.8H, 5, 2, 16); - * [register]:2 tmp2 = MP_INT_ABS(* [register]:2 tmp1); - simd_address_at(tmp1, Rn_VPR128.8H, 6, 2, 16); - simd_address_at(tmp2, Rd_VPR128.8H, 6, 2, 16); - * [register]:2 tmp2 = MP_INT_ABS(* [register]:2 tmp1); - simd_address_at(tmp1, Rn_VPR128.8H, 7, 2, 16); - simd_address_at(tmp2, Rd_VPR128.8H, 7, 2, 16); - * [register]:2 tmp2 = MP_INT_ABS(* [register]:2 tmp1); + Rd_VPR128.8H[0,16] = MP_INT_ABS(Rn_VPR128.8H[0,16]); + Rd_VPR128.8H[16,16] = MP_INT_ABS(Rn_VPR128.8H[16,16]); + Rd_VPR128.8H[32,16] = MP_INT_ABS(Rn_VPR128.8H[32,16]); + Rd_VPR128.8H[48,16] = MP_INT_ABS(Rn_VPR128.8H[48,16]); + Rd_VPR128.8H[64,16] = MP_INT_ABS(Rn_VPR128.8H[64,16]); + Rd_VPR128.8H[80,16] = MP_INT_ABS(Rn_VPR128.8H[80,16]); + Rd_VPR128.8H[96,16] = MP_INT_ABS(Rn_VPR128.8H[96,16]); + Rd_VPR128.8H[112,16] = MP_INT_ABS(Rn_VPR128.8H[112,16]); zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:16 = SIMD_INT_ABS(Rn_VPR128.8H, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.8H -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.8H = NEON_sqabs(Rn_VPR128.8H, 2:1); -@endif } # C7.2.282 SQABS page C7-2014 line 112696 MATCH x0e207800/mask=xbf3ffc00 @@ -30567,23 +16904,10 @@ is b_31=0 & b_30=1 & b_2429=0b001110 & b_2223=0b01 & b_1021=0b100000011110 & Rd_ :sqabs Rd_VPR64.2S, Rn_VPR64.2S is b_31=0 & b_30=0 & b_2429=0b001110 & b_2223=0b10 & b_1021=0b100000011110 & Rd_VPR64.2S & Rn_VPR64.2S & Zd { -@if defined(SEMANTIC_primitive) # simd unary Rd_VPR64.2S = MP_INT_ABS(Rn_VPR64.2S) on lane size 4 - local tmp1:4 = 0; - local tmp2:4 = 0; - simd_address_at(tmp1, Rn_VPR64.2S, 0, 4, 8); - simd_address_at(tmp2, Rd_VPR64.2S, 0, 4, 8); - * [register]:4 tmp2 = MP_INT_ABS(* [register]:4 tmp1); - simd_address_at(tmp1, Rn_VPR64.2S, 1, 4, 8); - simd_address_at(tmp2, Rd_VPR64.2S, 1, 4, 8); - * [register]:4 tmp2 = MP_INT_ABS(* [register]:4 tmp1); + Rd_VPR64.2S[0,32] = MP_INT_ABS(Rn_VPR64.2S[0,32]); + Rd_VPR64.2S[32,32] = MP_INT_ABS(Rn_VPR64.2S[32,32]); zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:8 = SIMD_INT_ABS(Rn_VPR64.2S, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.2S -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.2S = NEON_sqabs(Rn_VPR64.2S, 4:1); -@endif } # C7.2.282 SQABS page C7-2014 line 112696 MATCH x0e207800/mask=xbf3ffc00 @@ -30596,29 +16920,12 @@ is b_31=0 & b_30=0 & b_2429=0b001110 & b_2223=0b10 & b_1021=0b100000011110 & Rd_ :sqabs Rd_VPR128.4S, Rn_VPR128.4S is b_31=0 & b_30=1 & b_2429=0b001110 & b_2223=0b10 & b_1021=0b100000011110 & Rd_VPR128.4S & Rn_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) # simd unary Rd_VPR128.4S = MP_INT_ABS(Rn_VPR128.4S) on lane size 4 - local tmp1:4 = 0; - local tmp2:4 = 0; - simd_address_at(tmp1, Rn_VPR128.4S, 0, 4, 16); - simd_address_at(tmp2, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp2 = MP_INT_ABS(* [register]:4 tmp1); - simd_address_at(tmp1, Rn_VPR128.4S, 1, 4, 16); - simd_address_at(tmp2, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp2 = MP_INT_ABS(* [register]:4 tmp1); - simd_address_at(tmp1, Rn_VPR128.4S, 2, 4, 16); - simd_address_at(tmp2, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp2 = MP_INT_ABS(* [register]:4 tmp1); - simd_address_at(tmp1, Rn_VPR128.4S, 3, 4, 16); - simd_address_at(tmp2, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp2 = MP_INT_ABS(* [register]:4 tmp1); + Rd_VPR128.4S[0,32] = MP_INT_ABS(Rn_VPR128.4S[0,32]); + Rd_VPR128.4S[32,32] = MP_INT_ABS(Rn_VPR128.4S[32,32]); + Rd_VPR128.4S[64,32] = MP_INT_ABS(Rn_VPR128.4S[64,32]); + Rd_VPR128.4S[96,32] = MP_INT_ABS(Rn_VPR128.4S[96,32]); zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:16 = SIMD_INT_ABS(Rn_VPR128.4S, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_sqabs(Rn_VPR128.4S, 4:1); -@endif } # C7.2.282 SQABS page C7-2014 line 112696 MATCH x0e207800/mask=xbf3ffc00 @@ -30631,23 +16938,10 @@ is b_31=0 & b_30=1 & b_2429=0b001110 & b_2223=0b10 & b_1021=0b100000011110 & Rd_ :sqabs Rd_VPR128.2D, Rn_VPR128.2D is b_31=0 & b_30=1 & b_2429=0b001110 & b_2223=0b11 & b_1021=0b100000011110 & Rd_VPR128.2D & Rn_VPR128.2D & Zd { -@if defined(SEMANTIC_primitive) # simd unary Rd_VPR128.2D = MP_INT_ABS(Rn_VPR128.2D) on lane size 8 - local tmp1:4 = 0; - local tmp2:4 = 0; - simd_address_at(tmp1, Rn_VPR128.2D, 0, 8, 16); - simd_address_at(tmp2, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp2 = MP_INT_ABS(* [register]:8 tmp1); - simd_address_at(tmp1, Rn_VPR128.2D, 1, 8, 16); - simd_address_at(tmp2, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp2 = MP_INT_ABS(* [register]:8 tmp1); + Rd_VPR128.2D[0,64] = MP_INT_ABS(Rn_VPR128.2D[0,64]); + Rd_VPR128.2D[64,64] = MP_INT_ABS(Rn_VPR128.2D[64,64]); zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:16 = SIMD_INT_ABS(Rn_VPR128.2D, 8:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.2D = NEON_sqabs(Rn_VPR128.2D, 8:1); -@endif } # C7.2.283 SQADD page C7-2016 line 112816 MATCH x5e200c00/mask=xff20fc00 @@ -30658,9 +16952,7 @@ is b_31=0 & b_30=1 & b_2429=0b001110 & b_2223=0b11 & b_1021=0b100000011110 & Rd_ :sqadd Rd_FPR8, Rn_FPR8, Rm_FPR8 is b_3031=1 & u=0 & b_2428=0x1e & advSIMD3.size=0 & b_2121=1 & Rm_FPR8 & b_1115=0x1 & b_1010=1 & Rn_FPR8 & Rd_FPR8 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR8 = NEON_sqadd(Rn_FPR8, Rm_FPR8); -@endif } # C7.2.283 SQADD page C7-2016 line 112816 MATCH x5e200c00/mask=xff20fc00 @@ -30671,9 +16963,7 @@ is b_3031=1 & u=0 & b_2428=0x1e & advSIMD3.size=0 & b_2121=1 & Rm_FPR8 & b_1115= :sqadd Rd_FPR64, Rn_FPR64, Rm_FPR64 is b_3031=1 & u=0 & b_2428=0x1e & advSIMD3.size=3 & b_2121=1 & Rm_FPR64 & b_1115=0x1 & b_1010=1 & Rn_FPR64 & Rd_FPR64 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR64 = NEON_sqadd(Rn_FPR64, Rm_FPR64); -@endif } # C7.2.283 SQADD page C7-2016 line 112816 MATCH x5e200c00/mask=xff20fc00 @@ -30684,9 +16974,7 @@ is b_3031=1 & u=0 & b_2428=0x1e & advSIMD3.size=3 & b_2121=1 & Rm_FPR64 & b_1115 :sqadd Rd_FPR16, Rn_FPR16, Rm_FPR16 is b_3031=1 & u=0 & b_2428=0x1e & advSIMD3.size=1 & b_2121=1 & Rm_FPR16 & b_1115=0x1 & b_1010=1 & Rn_FPR16 & Rd_FPR16 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR16 = NEON_sqadd(Rn_FPR16, Rm_FPR16); -@endif } # C7.2.283 SQADD page C7-2016 line 112816 MATCH x5e200c00/mask=xff20fc00 @@ -30697,9 +16985,7 @@ is b_3031=1 & u=0 & b_2428=0x1e & advSIMD3.size=1 & b_2121=1 & Rm_FPR16 & b_1115 :sqadd Rd_FPR32, Rn_FPR32, Rm_FPR32 is b_3031=1 & u=0 & b_2428=0x1e & advSIMD3.size=2 & b_2121=1 & Rm_FPR32 & b_1115=0x1 & b_1010=1 & Rn_FPR32 & Rd_FPR32 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR32 = NEON_sqadd(Rn_FPR32, Rm_FPR32); -@endif } # C7.2.283 SQADD page C7-2016 line 112816 MATCH x0e200c00/mask=xbf20fc00 @@ -30710,9 +16996,7 @@ is b_3031=1 & u=0 & b_2428=0x1e & advSIMD3.size=2 & b_2121=1 & Rm_FPR32 & b_1115 :sqadd Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0x1 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.16B = NEON_sqadd(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); -@endif } # C7.2.283 SQADD page C7-2016 line 112816 MATCH x0e200c00/mask=xbf20fc00 @@ -30723,9 +17007,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16 :sqadd Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=3 & b_2121=1 & Rm_VPR128.2D & b_1115=0x1 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.2D = NEON_sqadd(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); -@endif } # C7.2.283 SQADD page C7-2016 line 112816 MATCH x0e200c00/mask=xbf20fc00 @@ -30736,9 +17018,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=3 & b_2121=1 & Rm_VPR128.2D :sqadd Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0x1 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.2S = NEON_sqadd(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); -@endif } # C7.2.283 SQADD page C7-2016 line 112816 MATCH x0e200c00/mask=xbf20fc00 @@ -30749,9 +17029,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S :sqadd Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0x1 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.4H = NEON_sqadd(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); -@endif } # C7.2.283 SQADD page C7-2016 line 112816 MATCH x0e200c00/mask=xbf20fc00 @@ -30762,9 +17040,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H :sqadd Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0x1 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_sqadd(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); -@endif } # C7.2.283 SQADD page C7-2016 line 112816 MATCH x0e200c00/mask=xbf20fc00 @@ -30775,9 +17051,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S :sqadd Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0x1 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.8B = NEON_sqadd(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); -@endif } # C7.2.283 SQADD page C7-2016 line 112816 MATCH x0e200c00/mask=xbf20fc00 @@ -30788,9 +17062,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B :sqadd Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0x1 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.8H = NEON_sqadd(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); -@endif } # C7.2.284 SQDMLAL, SQDMLAL2 (by element) page C7-2018 line 112941 MATCH x5f003000/mask=xff00f400 @@ -30803,29 +17075,14 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H :sqdmlal Rd_FPR64, Rn_FPR32, Re_VPR128.S.vIndex is b_2431=0b01011111 & b_2223=0b10 & b_1215=0b0011 & b_10=0 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & Rd_FPR64 & Rn_FPR32 & Zd { -@if defined(SEMANTIC_primitive) local tmp1:8 = sext(Rn_FPR32); - local tmp2:4 = 0; # simd element Re_VPR128.S[vIndex] lane size 4 - simd_address_at(tmp2, Re_VPR128.S, vIndex:4, 4, 16); - local tmp3:4 = * [register]:4 tmp2; - local tmp4:8 = sext(tmp3); - local tmp5:8 = tmp1 * tmp4; - local tmp6:8 = tmp5 * 2:8; - Rd_FPR64 = Rd_FPR64 + tmp6; - zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = sext(Rn_FPR32); - local tmp2:4 = SIMD_PIECE(Re_VPR128.S, vIndex:1); + local tmp2:4 = Re_VPR128.S.vIndex; local tmp3:8 = sext(tmp2); local tmp4:8 = tmp1 * tmp3; local tmp5:8 = tmp4 * 2:8; - local tmpd:8 = Rd_FPR64 + tmp5; - Zd = zext(tmpd); # assigning to Rd_FPR64 -@elif defined(SEMANTIC_pseudo) - local tmp1:4 = SIMD_PIECE(Re_VPR128.S, vIndex:1); - Rd_FPR64 = NEON_sqdmlal(Rd_FPR64, Rn_FPR32, tmp1); -@endif + Rd_FPR64 = Rd_FPR64 + tmp5; + zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.284 SQDMLAL, SQDMLAL2 (by element) page C7-2018 line 112941 MATCH x5f003000/mask=xff00f400 @@ -30838,29 +17095,14 @@ is b_2431=0b01011111 & b_2223=0b10 & b_1215=0b0011 & b_10=0 & Re_VPR128.S.vIndex :sqdmlal Rd_FPR32, Rn_FPR16, Re_VPR128Lo.H.vIndexHLM is b_2431=0b01011111 & b_2223=0b01 & b_1215=0b0011 & b_10=0 & Re_VPR128Lo.H.vIndexHLM & Re_VPR128Lo.H & vIndexHLM & Rd_FPR32 & Rn_FPR16 & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 - simd_address_at(tmp1, Re_VPR128Lo.H, vIndexHLM:4, 2, 16); - local tmp2:2 = * [register]:2 tmp1; - local tmp3:4 = sext(tmp2); - local tmp4:4 = sext(Rn_FPR16); - local tmp5:4 = tmp3 * tmp4; - local tmp6:4 = tmp5 * 2:4; - Rd_FPR32 = Rd_FPR32 + tmp6; - zext_zs(Zd); # zero upper 28 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:2 = SIMD_PIECE(Re_VPR128Lo.H, vIndexHLM:1); + local tmp1:2 = Re_VPR128Lo.H.vIndexHLM; local tmp2:4 = sext(tmp1); local tmp3:4 = sext(Rn_FPR16); local tmp4:4 = tmp2 * tmp3; local tmp5:4 = tmp4 * 2:4; - local tmpd:4 = Rd_FPR32 + tmp5; - Zd = zext(tmpd); # assigning to Rd_FPR32 -@elif defined(SEMANTIC_pseudo) - local tmp1:2 = SIMD_PIECE(Re_VPR128Lo.H, vIndexHLM:1); - Rd_FPR32 = NEON_sqdmlal(Rd_FPR32, Rn_FPR16, tmp1); -@endif + Rd_FPR32 = Rd_FPR32 + tmp5; + zext_zs(Zd); # zero upper 28 bytes of Zd } # C7.2.284 SQDMLAL, SQDMLAL2 (by element) page C7-2018 line 112941 MATCH x0f003000/mask=xbf00f400 @@ -30873,64 +17115,22 @@ is b_2431=0b01011111 & b_2223=0b01 & b_1215=0b0011 & b_10=0 & Re_VPR128Lo.H.vInd :sqdmlal Rd_VPR128.2D, Rn_VPR64.2S, Re_VPR128.S.vIndex is b_31=0 & b_30=0 & b_2429=0b001111 & b_2223=0b10 & b_1215=0b0011 & b_10=0 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & Rn_VPR64.2S & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_primitive) # simd resize TMPQ1 = sext(Rn_VPR64.2S) (lane size 4 to 8) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR64.2S, 0, 4, 8); - simd_address_at(tmp3, TMPQ1, 0, 8, 16); - * [register]:8 tmp3 = sext(* [register]:4 tmp2); - simd_address_at(tmp2, Rn_VPR64.2S, 1, 4, 8); - simd_address_at(tmp3, TMPQ1, 1, 8, 16); - * [register]:8 tmp3 = sext(* [register]:4 tmp2); - local tmp4:4 = 0; + TMPQ1[0,64] = sext(Rn_VPR64.2S[0,32]); + TMPQ1[64,64] = sext(Rn_VPR64.2S[32,32]); # simd element Re_VPR128.S[vIndex] lane size 4 - simd_address_at(tmp4, Re_VPR128.S, vIndex:4, 4, 16); - local tmp5:4 = * [register]:4 tmp4; - local tmp6:8 = sext(tmp5); - # simd infix TMPQ2 = TMPQ1 * tmp6 on lane size 8 - local tmp8:4 = 0; - local tmp9:4 = 0; - simd_address_at(tmp8, TMPQ1, 0, 8, 16); - simd_address_at(tmp9, TMPQ2, 0, 8, 16); - * [register]:8 tmp9 = (* [register]:8 tmp8) * tmp6; - simd_address_at(tmp8, TMPQ1, 1, 8, 16); - simd_address_at(tmp9, TMPQ2, 1, 8, 16); - * [register]:8 tmp9 = (* [register]:8 tmp8) * tmp6; - # simd infix TMPQ3 = TMPQ2 * 2:8 on lane size 8 - local tmp11:4 = 0; - local tmp12:4 = 0; - simd_address_at(tmp11, TMPQ2, 0, 8, 16); - simd_address_at(tmp12, TMPQ3, 0, 8, 16); - * [register]:8 tmp12 = (* [register]:8 tmp11) * 2:8; - simd_address_at(tmp11, TMPQ2, 1, 8, 16); - simd_address_at(tmp12, TMPQ3, 1, 8, 16); - * [register]:8 tmp12 = (* [register]:8 tmp11) * 2:8; - # simd infix Rd_VPR128.2D = Rd_VPR128.2D + TMPQ3 on lane size 8 - local tmp13:4 = 0; - local tmp14:4 = 0; - local tmp15:4 = 0; - simd_address_at(tmp13, Rd_VPR128.2D, 0, 8, 16); - simd_address_at(tmp14, TMPQ3, 0, 8, 16); - simd_address_at(tmp15, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp15 = (* [register]:8 tmp13) + (* [register]:8 tmp14); - simd_address_at(tmp13, Rd_VPR128.2D, 1, 8, 16); - simd_address_at(tmp14, TMPQ3, 1, 8, 16); - simd_address_at(tmp15, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp15 = (* [register]:8 tmp13) + (* [register]:8 tmp14); - zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_SEXT(Rn_VPR64.2S, 4:1); - local tmp2:4 = SIMD_PIECE(Re_VPR128.S, vIndex:1); + local tmp2:4 = Re_VPR128.S.vIndex; local tmp3:8 = sext(tmp2); - local tmp4:16 = SIMD_INT_MULT(tmp1, tmp3); - local tmp5:16 = SIMD_INT_MULT(tmp4, 2:8); - local tmpd:16 = SIMD_INT_ADD(Rd_VPR128.2D, tmp5, 8:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - local tmp1:4 = SIMD_PIECE(Re_VPR128.S, vIndex:1); - Rd_VPR128.2D = NEON_sqdmlal(Rd_VPR128.2D, Rn_VPR64.2S, tmp1, 4:1); -@endif + # simd infix TMPQ2 = TMPQ1 * tmp3 on lane size 8 + TMPQ2[0,64] = TMPQ1[0,64] * tmp3; + TMPQ2[64,64] = TMPQ1[64,64] * tmp3; + # simd infix TMPQ3 = TMPQ2 * 2:8 on lane size 8 + TMPQ3[0,64] = TMPQ2[0,64] * 2:8; + TMPQ3[64,64] = TMPQ2[64,64] * 2:8; + # simd infix Rd_VPR128.2D = Rd_VPR128.2D + TMPQ3 on lane size 8 + Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] + TMPQ3[0,64]; + Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] + TMPQ3[64,64]; + zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.284 SQDMLAL, SQDMLAL2 (by element) page C7-2018 line 112941 MATCH x0f003000/mask=xbf00f400 @@ -30943,90 +17143,30 @@ is b_31=0 & b_30=0 & b_2429=0b001111 & b_2223=0b10 & b_1215=0b0011 & b_10=0 & Re :sqdmlal Rd_VPR128.4S, Rn_VPR64.4H, Re_VPR128Lo.H.vIndexHLM is b_31=0 & b_30=0 & b_2429=0b001111 & b_2223=0b01 & b_1215=0b0011 & b_10=0 & Re_VPR128Lo.H.vIndexHLM & Re_VPR128Lo.H & vIndexHLM & Rn_VPR64.4H & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) # simd resize TMPQ1 = sext(Rn_VPR64.4H) (lane size 2 to 4) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR64.4H, 0, 2, 8); - simd_address_at(tmp3, TMPQ1, 0, 4, 16); - * [register]:4 tmp3 = sext(* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR64.4H, 1, 2, 8); - simd_address_at(tmp3, TMPQ1, 1, 4, 16); - * [register]:4 tmp3 = sext(* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR64.4H, 2, 2, 8); - simd_address_at(tmp3, TMPQ1, 2, 4, 16); - * [register]:4 tmp3 = sext(* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR64.4H, 3, 2, 8); - simd_address_at(tmp3, TMPQ1, 3, 4, 16); - * [register]:4 tmp3 = sext(* [register]:2 tmp2); - local tmp4:4 = 0; + TMPQ1[0,32] = sext(Rn_VPR64.4H[0,16]); + TMPQ1[32,32] = sext(Rn_VPR64.4H[16,16]); + TMPQ1[64,32] = sext(Rn_VPR64.4H[32,16]); + TMPQ1[96,32] = sext(Rn_VPR64.4H[48,16]); # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 - simd_address_at(tmp4, Re_VPR128Lo.H, vIndexHLM:4, 2, 16); - local tmp5:2 = * [register]:2 tmp4; - local tmp6:4 = sext(tmp5); - # simd infix TMPQ2 = TMPQ1 * tmp6 on lane size 4 - local tmp8:4 = 0; - local tmp9:4 = 0; - simd_address_at(tmp8, TMPQ1, 0, 4, 16); - simd_address_at(tmp9, TMPQ2, 0, 4, 16); - * [register]:4 tmp9 = (* [register]:4 tmp8) * tmp6; - simd_address_at(tmp8, TMPQ1, 1, 4, 16); - simd_address_at(tmp9, TMPQ2, 1, 4, 16); - * [register]:4 tmp9 = (* [register]:4 tmp8) * tmp6; - simd_address_at(tmp8, TMPQ1, 2, 4, 16); - simd_address_at(tmp9, TMPQ2, 2, 4, 16); - * [register]:4 tmp9 = (* [register]:4 tmp8) * tmp6; - simd_address_at(tmp8, TMPQ1, 3, 4, 16); - simd_address_at(tmp9, TMPQ2, 3, 4, 16); - * [register]:4 tmp9 = (* [register]:4 tmp8) * tmp6; - # simd infix TMPQ3 = TMPQ2 * 2:4 on lane size 4 - local tmp11:4 = 0; - local tmp12:4 = 0; - simd_address_at(tmp11, TMPQ2, 0, 4, 16); - simd_address_at(tmp12, TMPQ3, 0, 4, 16); - * [register]:4 tmp12 = (* [register]:4 tmp11) * 2:4; - simd_address_at(tmp11, TMPQ2, 1, 4, 16); - simd_address_at(tmp12, TMPQ3, 1, 4, 16); - * [register]:4 tmp12 = (* [register]:4 tmp11) * 2:4; - simd_address_at(tmp11, TMPQ2, 2, 4, 16); - simd_address_at(tmp12, TMPQ3, 2, 4, 16); - * [register]:4 tmp12 = (* [register]:4 tmp11) * 2:4; - simd_address_at(tmp11, TMPQ2, 3, 4, 16); - simd_address_at(tmp12, TMPQ3, 3, 4, 16); - * [register]:4 tmp12 = (* [register]:4 tmp11) * 2:4; - # simd infix Rd_VPR128.4S = Rd_VPR128.4S + TMPQ3 on lane size 4 - local tmp13:4 = 0; - local tmp14:4 = 0; - local tmp15:4 = 0; - simd_address_at(tmp13, Rd_VPR128.4S, 0, 4, 16); - simd_address_at(tmp14, TMPQ3, 0, 4, 16); - simd_address_at(tmp15, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp15 = (* [register]:4 tmp13) + (* [register]:4 tmp14); - simd_address_at(tmp13, Rd_VPR128.4S, 1, 4, 16); - simd_address_at(tmp14, TMPQ3, 1, 4, 16); - simd_address_at(tmp15, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp15 = (* [register]:4 tmp13) + (* [register]:4 tmp14); - simd_address_at(tmp13, Rd_VPR128.4S, 2, 4, 16); - simd_address_at(tmp14, TMPQ3, 2, 4, 16); - simd_address_at(tmp15, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp15 = (* [register]:4 tmp13) + (* [register]:4 tmp14); - simd_address_at(tmp13, Rd_VPR128.4S, 3, 4, 16); - simd_address_at(tmp14, TMPQ3, 3, 4, 16); - simd_address_at(tmp15, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp15 = (* [register]:4 tmp13) + (* [register]:4 tmp14); - zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_SEXT(Rn_VPR64.4H, 2:1); - local tmp2:2 = SIMD_PIECE(Re_VPR128Lo.H, vIndexHLM:1); + local tmp2:2 = Re_VPR128Lo.H.vIndexHLM; local tmp3:4 = sext(tmp2); - local tmp4:16 = SIMD_INT_MULT(tmp1, tmp3); - local tmp5:16 = SIMD_INT_MULT(tmp4, 2:4); - local tmpd:16 = SIMD_INT_ADD(Rd_VPR128.4S, tmp5, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - local tmp1:2 = SIMD_PIECE(Re_VPR128Lo.H, vIndexHLM:1); - Rd_VPR128.4S = NEON_sqdmlal(Rd_VPR128.4S, Rn_VPR64.4H, tmp1, 2:1); -@endif + # simd infix TMPQ2 = TMPQ1 * tmp3 on lane size 4 + TMPQ2[0,32] = TMPQ1[0,32] * tmp3; + TMPQ2[32,32] = TMPQ1[32,32] * tmp3; + TMPQ2[64,32] = TMPQ1[64,32] * tmp3; + TMPQ2[96,32] = TMPQ1[96,32] * tmp3; + # simd infix TMPQ3 = TMPQ2 * 2:4 on lane size 4 + TMPQ3[0,32] = TMPQ2[0,32] * 2:4; + TMPQ3[32,32] = TMPQ2[32,32] * 2:4; + TMPQ3[64,32] = TMPQ2[64,32] * 2:4; + TMPQ3[96,32] = TMPQ2[96,32] * 2:4; + # simd infix Rd_VPR128.4S = Rd_VPR128.4S + TMPQ3 on lane size 4 + Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] + TMPQ3[0,32]; + Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] + TMPQ3[32,32]; + Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] + TMPQ3[64,32]; + Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] + TMPQ3[96,32]; + zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.284 SQDMLAL, SQDMLAL2 (by element) page C7-2018 line 112941 MATCH x0f003000/mask=xbf00f400 @@ -31039,68 +17179,23 @@ is b_31=0 & b_30=0 & b_2429=0b001111 & b_2223=0b01 & b_1215=0b0011 & b_10=0 & Re :sqdmlal2 Rd_VPR128.2D, Rn_VPR128.4S, Re_VPR128.S.vIndex is b_31=0 & b_30=1 & b_2429=0b001111 & b_2223=0b10 & b_1215=0b0011 & b_10=0 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & Rn_VPR128.4S & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rn_VPR128.4S, 1, 8, 16); - TMPD1 = * [register]:8 tmp1; + TMPD1 = Rn_VPR128.4S[64,64]; # simd resize TMPQ2 = sext(TMPD1) (lane size 4 to 8) - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, TMPD1, 0, 4, 8); - simd_address_at(tmp5, TMPQ2, 0, 8, 16); - * [register]:8 tmp5 = sext(* [register]:4 tmp4); - simd_address_at(tmp4, TMPD1, 1, 4, 8); - simd_address_at(tmp5, TMPQ2, 1, 8, 16); - * [register]:8 tmp5 = sext(* [register]:4 tmp4); - local tmp6:4 = 0; + TMPQ2[0,64] = sext(TMPD1[0,32]); + TMPQ2[64,64] = sext(TMPD1[32,32]); # simd element Re_VPR128.S[vIndex] lane size 4 - simd_address_at(tmp6, Re_VPR128.S, vIndex:4, 4, 16); - local tmp7:4 = * [register]:4 tmp6; - local tmp8:8 = sext(tmp7); - # simd infix TMPQ3 = TMPQ2 * tmp8 on lane size 8 - local tmp10:4 = 0; - local tmp11:4 = 0; - simd_address_at(tmp10, TMPQ2, 0, 8, 16); - simd_address_at(tmp11, TMPQ3, 0, 8, 16); - * [register]:8 tmp11 = (* [register]:8 tmp10) * tmp8; - simd_address_at(tmp10, TMPQ2, 1, 8, 16); - simd_address_at(tmp11, TMPQ3, 1, 8, 16); - * [register]:8 tmp11 = (* [register]:8 tmp10) * tmp8; - # simd infix TMPQ4 = TMPQ3 * 2:8 on lane size 8 - local tmp13:4 = 0; - local tmp14:4 = 0; - simd_address_at(tmp13, TMPQ3, 0, 8, 16); - simd_address_at(tmp14, TMPQ4, 0, 8, 16); - * [register]:8 tmp14 = (* [register]:8 tmp13) * 2:8; - simd_address_at(tmp13, TMPQ3, 1, 8, 16); - simd_address_at(tmp14, TMPQ4, 1, 8, 16); - * [register]:8 tmp14 = (* [register]:8 tmp13) * 2:8; - # simd infix Rd_VPR128.2D = Rd_VPR128.2D + TMPQ4 on lane size 8 - local tmp15:4 = 0; - local tmp16:4 = 0; - local tmp17:4 = 0; - simd_address_at(tmp15, Rd_VPR128.2D, 0, 8, 16); - simd_address_at(tmp16, TMPQ4, 0, 8, 16); - simd_address_at(tmp17, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp17 = (* [register]:8 tmp15) + (* [register]:8 tmp16); - simd_address_at(tmp15, Rd_VPR128.2D, 1, 8, 16); - simd_address_at(tmp16, TMPQ4, 1, 8, 16); - simd_address_at(tmp17, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp17 = (* [register]:8 tmp15) + (* [register]:8 tmp16); - zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Rn_VPR128.4S, 1:1); - local tmp2:16 = SIMD_INT_SEXT(tmp1, 4:1); - local tmp3:4 = SIMD_PIECE(Re_VPR128.S, vIndex:1); + local tmp3:4 = Re_VPR128.S.vIndex; local tmp4:8 = sext(tmp3); - local tmp5:16 = SIMD_INT_MULT(tmp2, tmp4); - local tmp6:16 = SIMD_INT_MULT(tmp5, 2:8); - local tmpd:16 = SIMD_INT_ADD(Rd_VPR128.2D, tmp6, 8:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - local tmp1:4 = SIMD_PIECE(Re_VPR128.S, vIndex:1); - Rd_VPR128.2D = NEON_sqdmlal2(Rd_VPR128.2D, Rn_VPR128.4S, tmp1, 4:1); -@endif + # simd infix TMPQ3 = TMPQ2 * tmp4 on lane size 8 + TMPQ3[0,64] = TMPQ2[0,64] * tmp4; + TMPQ3[64,64] = TMPQ2[64,64] * tmp4; + # simd infix TMPQ4 = TMPQ3 * 2:8 on lane size 8 + TMPQ4[0,64] = TMPQ3[0,64] * 2:8; + TMPQ4[64,64] = TMPQ3[64,64] * 2:8; + # simd infix Rd_VPR128.2D = Rd_VPR128.2D + TMPQ4 on lane size 8 + Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] + TMPQ4[0,64]; + Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] + TMPQ4[64,64]; + zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.284 SQDMLAL, SQDMLAL2 (by element) page C7-2018 line 112941 MATCH x0f003000/mask=xbf00f400 @@ -31113,94 +17208,31 @@ is b_31=0 & b_30=1 & b_2429=0b001111 & b_2223=0b10 & b_1215=0b0011 & b_10=0 & Re :sqdmlal2 Rd_VPR128.4S, Rn_VPR128.8H, Re_VPR128Lo.H.vIndexHLM is b_31=0 & b_30=1 & b_2429=0b001111 & b_2223=0b01 & b_1215=0b0011 & b_10=0 & Re_VPR128Lo.H.vIndexHLM & Re_VPR128Lo.H & vIndexHLM & Rn_VPR128.8H & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rn_VPR128.8H, 1, 8, 16); - TMPD1 = * [register]:8 tmp1; + TMPD1 = Rn_VPR128.8H[64,64]; # simd resize TMPQ2 = sext(TMPD1) (lane size 2 to 4) - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, TMPD1, 0, 2, 8); - simd_address_at(tmp5, TMPQ2, 0, 4, 16); - * [register]:4 tmp5 = sext(* [register]:2 tmp4); - simd_address_at(tmp4, TMPD1, 1, 2, 8); - simd_address_at(tmp5, TMPQ2, 1, 4, 16); - * [register]:4 tmp5 = sext(* [register]:2 tmp4); - simd_address_at(tmp4, TMPD1, 2, 2, 8); - simd_address_at(tmp5, TMPQ2, 2, 4, 16); - * [register]:4 tmp5 = sext(* [register]:2 tmp4); - simd_address_at(tmp4, TMPD1, 3, 2, 8); - simd_address_at(tmp5, TMPQ2, 3, 4, 16); - * [register]:4 tmp5 = sext(* [register]:2 tmp4); - local tmp6:4 = 0; + TMPQ2[0,32] = sext(TMPD1[0,16]); + TMPQ2[32,32] = sext(TMPD1[16,16]); + TMPQ2[64,32] = sext(TMPD1[32,16]); + TMPQ2[96,32] = sext(TMPD1[48,16]); # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 - simd_address_at(tmp6, Re_VPR128Lo.H, vIndexHLM:4, 2, 16); - local tmp7:2 = * [register]:2 tmp6; - local tmp8:4 = sext(tmp7); - # simd infix TMPQ3 = TMPQ2 * tmp8 on lane size 4 - local tmp10:4 = 0; - local tmp11:4 = 0; - simd_address_at(tmp10, TMPQ2, 0, 4, 16); - simd_address_at(tmp11, TMPQ3, 0, 4, 16); - * [register]:4 tmp11 = (* [register]:4 tmp10) * tmp8; - simd_address_at(tmp10, TMPQ2, 1, 4, 16); - simd_address_at(tmp11, TMPQ3, 1, 4, 16); - * [register]:4 tmp11 = (* [register]:4 tmp10) * tmp8; - simd_address_at(tmp10, TMPQ2, 2, 4, 16); - simd_address_at(tmp11, TMPQ3, 2, 4, 16); - * [register]:4 tmp11 = (* [register]:4 tmp10) * tmp8; - simd_address_at(tmp10, TMPQ2, 3, 4, 16); - simd_address_at(tmp11, TMPQ3, 3, 4, 16); - * [register]:4 tmp11 = (* [register]:4 tmp10) * tmp8; - # simd infix TMPQ4 = TMPQ3 * 2:4 on lane size 4 - local tmp13:4 = 0; - local tmp14:4 = 0; - simd_address_at(tmp13, TMPQ3, 0, 4, 16); - simd_address_at(tmp14, TMPQ4, 0, 4, 16); - * [register]:4 tmp14 = (* [register]:4 tmp13) * 2:4; - simd_address_at(tmp13, TMPQ3, 1, 4, 16); - simd_address_at(tmp14, TMPQ4, 1, 4, 16); - * [register]:4 tmp14 = (* [register]:4 tmp13) * 2:4; - simd_address_at(tmp13, TMPQ3, 2, 4, 16); - simd_address_at(tmp14, TMPQ4, 2, 4, 16); - * [register]:4 tmp14 = (* [register]:4 tmp13) * 2:4; - simd_address_at(tmp13, TMPQ3, 3, 4, 16); - simd_address_at(tmp14, TMPQ4, 3, 4, 16); - * [register]:4 tmp14 = (* [register]:4 tmp13) * 2:4; - # simd infix Rd_VPR128.4S = Rd_VPR128.4S + TMPQ4 on lane size 4 - local tmp15:4 = 0; - local tmp16:4 = 0; - local tmp17:4 = 0; - simd_address_at(tmp15, Rd_VPR128.4S, 0, 4, 16); - simd_address_at(tmp16, TMPQ4, 0, 4, 16); - simd_address_at(tmp17, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp17 = (* [register]:4 tmp15) + (* [register]:4 tmp16); - simd_address_at(tmp15, Rd_VPR128.4S, 1, 4, 16); - simd_address_at(tmp16, TMPQ4, 1, 4, 16); - simd_address_at(tmp17, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp17 = (* [register]:4 tmp15) + (* [register]:4 tmp16); - simd_address_at(tmp15, Rd_VPR128.4S, 2, 4, 16); - simd_address_at(tmp16, TMPQ4, 2, 4, 16); - simd_address_at(tmp17, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp17 = (* [register]:4 tmp15) + (* [register]:4 tmp16); - simd_address_at(tmp15, Rd_VPR128.4S, 3, 4, 16); - simd_address_at(tmp16, TMPQ4, 3, 4, 16); - simd_address_at(tmp17, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp17 = (* [register]:4 tmp15) + (* [register]:4 tmp16); - zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Rn_VPR128.8H, 1:1); - local tmp2:16 = SIMD_INT_SEXT(tmp1, 2:1); - local tmp3:2 = SIMD_PIECE(Re_VPR128Lo.H, vIndexHLM:1); + local tmp3:2 = Re_VPR128Lo.H.vIndexHLM; local tmp4:4 = sext(tmp3); - local tmp5:16 = SIMD_INT_MULT(tmp2, tmp4); - local tmp6:16 = SIMD_INT_MULT(tmp5, 2:4); - local tmpd:16 = SIMD_INT_ADD(Rd_VPR128.4S, tmp6, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - local tmp1:2 = SIMD_PIECE(Re_VPR128Lo.H, vIndexHLM:1); - Rd_VPR128.4S = NEON_sqdmlal2(Rd_VPR128.4S, Rn_VPR128.8H, tmp1, 2:1); -@endif + # simd infix TMPQ3 = TMPQ2 * tmp4 on lane size 4 + TMPQ3[0,32] = TMPQ2[0,32] * tmp4; + TMPQ3[32,32] = TMPQ2[32,32] * tmp4; + TMPQ3[64,32] = TMPQ2[64,32] * tmp4; + TMPQ3[96,32] = TMPQ2[96,32] * tmp4; + # simd infix TMPQ4 = TMPQ3 * 2:4 on lane size 4 + TMPQ4[0,32] = TMPQ3[0,32] * 2:4; + TMPQ4[32,32] = TMPQ3[32,32] * 2:4; + TMPQ4[64,32] = TMPQ3[64,32] * 2:4; + TMPQ4[96,32] = TMPQ3[96,32] * 2:4; + # simd infix Rd_VPR128.4S = Rd_VPR128.4S + TMPQ4 on lane size 4 + Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] + TMPQ4[0,32]; + Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] + TMPQ4[32,32]; + Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] + TMPQ4[64,32]; + Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] + TMPQ4[96,32]; + zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.285 SQDMLAL, SQDMLAL2 (vector) page C7-2022 line 113158 MATCH x5e209000/mask=xff20fc00 @@ -31213,23 +17245,12 @@ is b_31=0 & b_30=1 & b_2429=0b001111 & b_2223=0b01 & b_1215=0b0011 & b_10=0 & Re :sqdmlal Rd_FPR64, Rn_FPR32, Rm_FPR32 is b_2431=0b01011110 & b_2223=0b10 & b_21=1 & b_1015=0b100100 & Rd_FPR64 & Rn_FPR32 & Rm_FPR32 & Zd { -@if defined(SEMANTIC_primitive) local tmp1:8 = sext(Rn_FPR32); local tmp2:8 = sext(Rm_FPR32); local tmp3:8 = tmp1 * tmp2; local tmp4:8 = tmp3 * 2:8; Rd_FPR64 = Rd_FPR64 + tmp4; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = sext(Rn_FPR32); - local tmp2:8 = sext(Rm_FPR32); - local tmp3:8 = tmp1 * tmp2; - local tmp4:8 = tmp3 * 2:8; - local tmpd:8 = Rd_FPR64 + tmp4; - Zd = zext(tmpd); # assigning to Rd_FPR64 -@elif defined(SEMANTIC_pseudo) - Rd_FPR64 = NEON_sqdmlal(Rd_FPR64, Rn_FPR32, Rm_FPR32); -@endif } # C7.2.285 SQDMLAL, SQDMLAL2 (vector) page C7-2022 line 113158 MATCH x5e209000/mask=xff20fc00 @@ -31242,23 +17263,12 @@ is b_2431=0b01011110 & b_2223=0b10 & b_21=1 & b_1015=0b100100 & Rd_FPR64 & Rn_FP :sqdmlal Rd_FPR32, Rn_FPR16, Rm_FPR16 is b_2431=0b01011110 & b_2223=0b01 & b_21=1 & b_1015=0b100100 & Rd_FPR32 & Rn_FPR16 & Rm_FPR16 & Zd { -@if defined(SEMANTIC_primitive) local tmp1:4 = sext(Rn_FPR16); local tmp2:4 = sext(Rm_FPR16); local tmp3:4 = tmp1 * tmp2; local tmp4:4 = tmp3 * 2:4; Rd_FPR32 = Rd_FPR32 + tmp4; zext_zs(Zd); # zero upper 28 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:4 = sext(Rn_FPR16); - local tmp2:4 = sext(Rm_FPR16); - local tmp3:4 = tmp1 * tmp2; - local tmp4:4 = tmp3 * 2:4; - local tmpd:4 = Rd_FPR32 + tmp4; - Zd = zext(tmpd); # assigning to Rd_FPR32 -@elif defined(SEMANTIC_pseudo) - Rd_FPR32 = NEON_sqdmlal(Rd_FPR32, Rn_FPR16, Rm_FPR16); -@endif } # C7.2.285 SQDMLAL, SQDMLAL2 (vector) page C7-2022 line 113158 MATCH x0e209000/mask=xbf20fc00 @@ -31271,69 +17281,22 @@ is b_2431=0b01011110 & b_2223=0b01 & b_21=1 & b_1015=0b100100 & Rd_FPR32 & Rn_FP :sqdmlal Rd_VPR128.2D, Rn_VPR64.2S, Rm_VPR64.2S is b_31=0 & b_30=0 & b_2429=0b001110 & b_2223=0b10 & b_21=1 & b_1015=0b100100 & Rn_VPR64.2S & Rd_VPR128.2D & Rm_VPR64.2S & Zd { -@if defined(SEMANTIC_primitive) # simd resize TMPQ1 = sext(Rn_VPR64.2S) (lane size 4 to 8) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR64.2S, 0, 4, 8); - simd_address_at(tmp3, TMPQ1, 0, 8, 16); - * [register]:8 tmp3 = sext(* [register]:4 tmp2); - simd_address_at(tmp2, Rn_VPR64.2S, 1, 4, 8); - simd_address_at(tmp3, TMPQ1, 1, 8, 16); - * [register]:8 tmp3 = sext(* [register]:4 tmp2); + TMPQ1[0,64] = sext(Rn_VPR64.2S[0,32]); + TMPQ1[64,64] = sext(Rn_VPR64.2S[32,32]); # simd resize TMPQ2 = sext(Rm_VPR64.2S) (lane size 4 to 8) - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, Rm_VPR64.2S, 0, 4, 8); - simd_address_at(tmp6, TMPQ2, 0, 8, 16); - * [register]:8 tmp6 = sext(* [register]:4 tmp5); - simd_address_at(tmp5, Rm_VPR64.2S, 1, 4, 8); - simd_address_at(tmp6, TMPQ2, 1, 8, 16); - * [register]:8 tmp6 = sext(* [register]:4 tmp5); + TMPQ2[0,64] = sext(Rm_VPR64.2S[0,32]); + TMPQ2[64,64] = sext(Rm_VPR64.2S[32,32]); # simd infix TMPQ3 = TMPQ1 * TMPQ2 on lane size 8 - local tmp8:4 = 0; - local tmp9:4 = 0; - local tmp10:4 = 0; - simd_address_at(tmp8, TMPQ1, 0, 8, 16); - simd_address_at(tmp9, TMPQ2, 0, 8, 16); - simd_address_at(tmp10, TMPQ3, 0, 8, 16); - * [register]:8 tmp10 = (* [register]:8 tmp8) * (* [register]:8 tmp9); - simd_address_at(tmp8, TMPQ1, 1, 8, 16); - simd_address_at(tmp9, TMPQ2, 1, 8, 16); - simd_address_at(tmp10, TMPQ3, 1, 8, 16); - * [register]:8 tmp10 = (* [register]:8 tmp8) * (* [register]:8 tmp9); + TMPQ3[0,64] = TMPQ1[0,64] * TMPQ2[0,64]; + TMPQ3[64,64] = TMPQ1[64,64] * TMPQ2[64,64]; # simd infix TMPQ4 = TMPQ3 * 2:8 on lane size 8 - local tmp12:4 = 0; - local tmp13:4 = 0; - simd_address_at(tmp12, TMPQ3, 0, 8, 16); - simd_address_at(tmp13, TMPQ4, 0, 8, 16); - * [register]:8 tmp13 = (* [register]:8 tmp12) * 2:8; - simd_address_at(tmp12, TMPQ3, 1, 8, 16); - simd_address_at(tmp13, TMPQ4, 1, 8, 16); - * [register]:8 tmp13 = (* [register]:8 tmp12) * 2:8; + TMPQ4[0,64] = TMPQ3[0,64] * 2:8; + TMPQ4[64,64] = TMPQ3[64,64] * 2:8; # simd infix Rd_VPR128.2D = Rd_VPR128.2D + TMPQ4 on lane size 8 - local tmp14:4 = 0; - local tmp15:4 = 0; - local tmp16:4 = 0; - simd_address_at(tmp14, Rd_VPR128.2D, 0, 8, 16); - simd_address_at(tmp15, TMPQ4, 0, 8, 16); - simd_address_at(tmp16, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp16 = (* [register]:8 tmp14) + (* [register]:8 tmp15); - simd_address_at(tmp14, Rd_VPR128.2D, 1, 8, 16); - simd_address_at(tmp15, TMPQ4, 1, 8, 16); - simd_address_at(tmp16, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp16 = (* [register]:8 tmp14) + (* [register]:8 tmp15); + Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] + TMPQ4[0,64]; + Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] + TMPQ4[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_SEXT(Rn_VPR64.2S, 4:1); - local tmp2:16 = SIMD_INT_SEXT(Rm_VPR64.2S, 4:1); - local tmp3:16 = SIMD_INT_MULT(tmp1, tmp2, 8:1); - local tmp4:16 = SIMD_INT_MULT(tmp3, 2:8); - local tmpd:16 = SIMD_INT_ADD(Rd_VPR128.2D, tmp4, 8:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.2D = NEON_sqdmlal(Rd_VPR128.2D, Rn_VPR64.2S, Rm_VPR64.2S, 4:1); -@endif } # C7.2.285 SQDMLAL, SQDMLAL2 (vector) page C7-2022 line 113158 MATCH x0e209000/mask=xbf20fc00 @@ -31346,103 +17309,32 @@ is b_31=0 & b_30=0 & b_2429=0b001110 & b_2223=0b10 & b_21=1 & b_1015=0b100100 & :sqdmlal Rd_VPR128.4S, Rn_VPR64.4H, Rm_VPR64.4H is b_31=0 & b_30=0 & b_2429=0b001110 & b_2223=0b01 & b_21=1 & b_1015=0b100100 & Rn_VPR64.4H & Rd_VPR128.4S & Rm_VPR64.4H & Zd { -@if defined(SEMANTIC_primitive) # simd resize TMPQ1 = sext(Rn_VPR64.4H) (lane size 2 to 4) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR64.4H, 0, 2, 8); - simd_address_at(tmp3, TMPQ1, 0, 4, 16); - * [register]:4 tmp3 = sext(* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR64.4H, 1, 2, 8); - simd_address_at(tmp3, TMPQ1, 1, 4, 16); - * [register]:4 tmp3 = sext(* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR64.4H, 2, 2, 8); - simd_address_at(tmp3, TMPQ1, 2, 4, 16); - * [register]:4 tmp3 = sext(* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR64.4H, 3, 2, 8); - simd_address_at(tmp3, TMPQ1, 3, 4, 16); - * [register]:4 tmp3 = sext(* [register]:2 tmp2); + TMPQ1[0,32] = sext(Rn_VPR64.4H[0,16]); + TMPQ1[32,32] = sext(Rn_VPR64.4H[16,16]); + TMPQ1[64,32] = sext(Rn_VPR64.4H[32,16]); + TMPQ1[96,32] = sext(Rn_VPR64.4H[48,16]); # simd resize TMPQ2 = sext(Rm_VPR64.4H) (lane size 2 to 4) - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, Rm_VPR64.4H, 0, 2, 8); - simd_address_at(tmp6, TMPQ2, 0, 4, 16); - * [register]:4 tmp6 = sext(* [register]:2 tmp5); - simd_address_at(tmp5, Rm_VPR64.4H, 1, 2, 8); - simd_address_at(tmp6, TMPQ2, 1, 4, 16); - * [register]:4 tmp6 = sext(* [register]:2 tmp5); - simd_address_at(tmp5, Rm_VPR64.4H, 2, 2, 8); - simd_address_at(tmp6, TMPQ2, 2, 4, 16); - * [register]:4 tmp6 = sext(* [register]:2 tmp5); - simd_address_at(tmp5, Rm_VPR64.4H, 3, 2, 8); - simd_address_at(tmp6, TMPQ2, 3, 4, 16); - * [register]:4 tmp6 = sext(* [register]:2 tmp5); + TMPQ2[0,32] = sext(Rm_VPR64.4H[0,16]); + TMPQ2[32,32] = sext(Rm_VPR64.4H[16,16]); + TMPQ2[64,32] = sext(Rm_VPR64.4H[32,16]); + TMPQ2[96,32] = sext(Rm_VPR64.4H[48,16]); # simd infix TMPQ3 = TMPQ1 * TMPQ2 on lane size 4 - local tmp8:4 = 0; - local tmp9:4 = 0; - local tmp10:4 = 0; - simd_address_at(tmp8, TMPQ1, 0, 4, 16); - simd_address_at(tmp9, TMPQ2, 0, 4, 16); - simd_address_at(tmp10, TMPQ3, 0, 4, 16); - * [register]:4 tmp10 = (* [register]:4 tmp8) * (* [register]:4 tmp9); - simd_address_at(tmp8, TMPQ1, 1, 4, 16); - simd_address_at(tmp9, TMPQ2, 1, 4, 16); - simd_address_at(tmp10, TMPQ3, 1, 4, 16); - * [register]:4 tmp10 = (* [register]:4 tmp8) * (* [register]:4 tmp9); - simd_address_at(tmp8, TMPQ1, 2, 4, 16); - simd_address_at(tmp9, TMPQ2, 2, 4, 16); - simd_address_at(tmp10, TMPQ3, 2, 4, 16); - * [register]:4 tmp10 = (* [register]:4 tmp8) * (* [register]:4 tmp9); - simd_address_at(tmp8, TMPQ1, 3, 4, 16); - simd_address_at(tmp9, TMPQ2, 3, 4, 16); - simd_address_at(tmp10, TMPQ3, 3, 4, 16); - * [register]:4 tmp10 = (* [register]:4 tmp8) * (* [register]:4 tmp9); + TMPQ3[0,32] = TMPQ1[0,32] * TMPQ2[0,32]; + TMPQ3[32,32] = TMPQ1[32,32] * TMPQ2[32,32]; + TMPQ3[64,32] = TMPQ1[64,32] * TMPQ2[64,32]; + TMPQ3[96,32] = TMPQ1[96,32] * TMPQ2[96,32]; # simd infix TMPQ4 = TMPQ3 * 2:4 on lane size 4 - local tmp12:4 = 0; - local tmp13:4 = 0; - simd_address_at(tmp12, TMPQ3, 0, 4, 16); - simd_address_at(tmp13, TMPQ4, 0, 4, 16); - * [register]:4 tmp13 = (* [register]:4 tmp12) * 2:4; - simd_address_at(tmp12, TMPQ3, 1, 4, 16); - simd_address_at(tmp13, TMPQ4, 1, 4, 16); - * [register]:4 tmp13 = (* [register]:4 tmp12) * 2:4; - simd_address_at(tmp12, TMPQ3, 2, 4, 16); - simd_address_at(tmp13, TMPQ4, 2, 4, 16); - * [register]:4 tmp13 = (* [register]:4 tmp12) * 2:4; - simd_address_at(tmp12, TMPQ3, 3, 4, 16); - simd_address_at(tmp13, TMPQ4, 3, 4, 16); - * [register]:4 tmp13 = (* [register]:4 tmp12) * 2:4; + TMPQ4[0,32] = TMPQ3[0,32] * 2:4; + TMPQ4[32,32] = TMPQ3[32,32] * 2:4; + TMPQ4[64,32] = TMPQ3[64,32] * 2:4; + TMPQ4[96,32] = TMPQ3[96,32] * 2:4; # simd infix Rd_VPR128.4S = Rd_VPR128.4S + TMPQ4 on lane size 4 - local tmp14:4 = 0; - local tmp15:4 = 0; - local tmp16:4 = 0; - simd_address_at(tmp14, Rd_VPR128.4S, 0, 4, 16); - simd_address_at(tmp15, TMPQ4, 0, 4, 16); - simd_address_at(tmp16, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp16 = (* [register]:4 tmp14) + (* [register]:4 tmp15); - simd_address_at(tmp14, Rd_VPR128.4S, 1, 4, 16); - simd_address_at(tmp15, TMPQ4, 1, 4, 16); - simd_address_at(tmp16, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp16 = (* [register]:4 tmp14) + (* [register]:4 tmp15); - simd_address_at(tmp14, Rd_VPR128.4S, 2, 4, 16); - simd_address_at(tmp15, TMPQ4, 2, 4, 16); - simd_address_at(tmp16, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp16 = (* [register]:4 tmp14) + (* [register]:4 tmp15); - simd_address_at(tmp14, Rd_VPR128.4S, 3, 4, 16); - simd_address_at(tmp15, TMPQ4, 3, 4, 16); - simd_address_at(tmp16, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp16 = (* [register]:4 tmp14) + (* [register]:4 tmp15); + Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] + TMPQ4[0,32]; + Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] + TMPQ4[32,32]; + Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] + TMPQ4[64,32]; + Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] + TMPQ4[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_SEXT(Rn_VPR64.4H, 2:1); - local tmp2:16 = SIMD_INT_SEXT(Rm_VPR64.4H, 2:1); - local tmp3:16 = SIMD_INT_MULT(tmp1, tmp2, 4:1); - local tmp4:16 = SIMD_INT_MULT(tmp3, 2:4); - local tmpd:16 = SIMD_INT_ADD(Rd_VPR128.4S, tmp4, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_sqdmlal(Rd_VPR128.4S, Rn_VPR64.4H, Rm_VPR64.4H, 2:1); -@endif } # C7.2.285 SQDMLAL, SQDMLAL2 (vector) page C7-2022 line 113158 MATCH x0e209000/mask=xbf20fc00 @@ -31455,77 +17347,24 @@ is b_31=0 & b_30=0 & b_2429=0b001110 & b_2223=0b01 & b_21=1 & b_1015=0b100100 & :sqdmlal2 Rd_VPR128.2D, Rn_VPR128.4S, Rm_VPR128.4S is b_31=0 & b_30=1 & b_2429=0b001110 & b_2223=0b10 & b_21=1 & b_1015=0b100100 & Rn_VPR128.4S & Rd_VPR128.2D & Rm_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rn_VPR128.4S, 1, 8, 16); - TMPD1 = * [register]:8 tmp1; + TMPD1 = Rn_VPR128.4S[64,64]; # simd resize TMPQ2 = sext(TMPD1) (lane size 4 to 8) - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, TMPD1, 0, 4, 8); - simd_address_at(tmp5, TMPQ2, 0, 8, 16); - * [register]:8 tmp5 = sext(* [register]:4 tmp4); - simd_address_at(tmp4, TMPD1, 1, 4, 8); - simd_address_at(tmp5, TMPQ2, 1, 8, 16); - * [register]:8 tmp5 = sext(* [register]:4 tmp4); - local tmp6:4 = 0; - simd_address_at(tmp6, Rm_VPR128.4S, 1, 8, 16); - TMPD3 = * [register]:8 tmp6; + TMPQ2[0,64] = sext(TMPD1[0,32]); + TMPQ2[64,64] = sext(TMPD1[32,32]); + TMPD3 = Rm_VPR128.4S[64,64]; # simd resize TMPQ4 = sext(TMPD3) (lane size 4 to 8) - local tmp9:4 = 0; - local tmp10:4 = 0; - simd_address_at(tmp9, TMPD3, 0, 4, 8); - simd_address_at(tmp10, TMPQ4, 0, 8, 16); - * [register]:8 tmp10 = sext(* [register]:4 tmp9); - simd_address_at(tmp9, TMPD3, 1, 4, 8); - simd_address_at(tmp10, TMPQ4, 1, 8, 16); - * [register]:8 tmp10 = sext(* [register]:4 tmp9); + TMPQ4[0,64] = sext(TMPD3[0,32]); + TMPQ4[64,64] = sext(TMPD3[32,32]); # simd infix TMPQ5 = TMPQ2 * TMPQ4 on lane size 8 - local tmp12:4 = 0; - local tmp13:4 = 0; - local tmp14:4 = 0; - simd_address_at(tmp12, TMPQ2, 0, 8, 16); - simd_address_at(tmp13, TMPQ4, 0, 8, 16); - simd_address_at(tmp14, TMPQ5, 0, 8, 16); - * [register]:8 tmp14 = (* [register]:8 tmp12) * (* [register]:8 tmp13); - simd_address_at(tmp12, TMPQ2, 1, 8, 16); - simd_address_at(tmp13, TMPQ4, 1, 8, 16); - simd_address_at(tmp14, TMPQ5, 1, 8, 16); - * [register]:8 tmp14 = (* [register]:8 tmp12) * (* [register]:8 tmp13); + TMPQ5[0,64] = TMPQ2[0,64] * TMPQ4[0,64]; + TMPQ5[64,64] = TMPQ2[64,64] * TMPQ4[64,64]; # simd infix TMPQ6 = TMPQ5 * 2:8 on lane size 8 - local tmp16:4 = 0; - local tmp17:4 = 0; - simd_address_at(tmp16, TMPQ5, 0, 8, 16); - simd_address_at(tmp17, TMPQ6, 0, 8, 16); - * [register]:8 tmp17 = (* [register]:8 tmp16) * 2:8; - simd_address_at(tmp16, TMPQ5, 1, 8, 16); - simd_address_at(tmp17, TMPQ6, 1, 8, 16); - * [register]:8 tmp17 = (* [register]:8 tmp16) * 2:8; + TMPQ6[0,64] = TMPQ5[0,64] * 2:8; + TMPQ6[64,64] = TMPQ5[64,64] * 2:8; # simd infix Rd_VPR128.2D = Rd_VPR128.2D + TMPQ6 on lane size 8 - local tmp18:4 = 0; - local tmp19:4 = 0; - local tmp20:4 = 0; - simd_address_at(tmp18, Rd_VPR128.2D, 0, 8, 16); - simd_address_at(tmp19, TMPQ6, 0, 8, 16); - simd_address_at(tmp20, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp20 = (* [register]:8 tmp18) + (* [register]:8 tmp19); - simd_address_at(tmp18, Rd_VPR128.2D, 1, 8, 16); - simd_address_at(tmp19, TMPQ6, 1, 8, 16); - simd_address_at(tmp20, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp20 = (* [register]:8 tmp18) + (* [register]:8 tmp19); + Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] + TMPQ6[0,64]; + Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] + TMPQ6[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Rn_VPR128.4S, 1:1); - local tmp2:16 = SIMD_INT_SEXT(tmp1, 4:1); - local tmp3:8 = SIMD_PIECE(Rm_VPR128.4S, 1:1); - local tmp4:16 = SIMD_INT_SEXT(tmp3, 4:1); - local tmp5:16 = SIMD_INT_MULT(tmp2, tmp4, 8:1); - local tmp6:16 = SIMD_INT_MULT(tmp5, 2:8); - local tmpd:16 = SIMD_INT_ADD(Rd_VPR128.2D, tmp6, 8:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.2D = NEON_sqdmlal2(Rd_VPR128.2D, Rn_VPR128.4S, Rm_VPR128.4S, 4:1); -@endif } # C7.2.285 SQDMLAL, SQDMLAL2 (vector) page C7-2022 line 113158 MATCH x0e209000/mask=xbf20fc00 @@ -31538,111 +17377,34 @@ is b_31=0 & b_30=1 & b_2429=0b001110 & b_2223=0b10 & b_21=1 & b_1015=0b100100 & :sqdmlal2 Rd_VPR128.4S, Rn_VPR128.8H, Rm_VPR128.8H is b_31=0 & b_30=1 & b_2429=0b001110 & b_2223=0b01 & b_21=1 & b_1015=0b100100 & Rn_VPR128.8H & Rd_VPR128.4S & Rm_VPR128.8H & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rn_VPR128.8H, 1, 8, 16); - TMPD1 = * [register]:8 tmp1; + TMPD1 = Rn_VPR128.8H[64,64]; # simd resize TMPQ2 = sext(TMPD1) (lane size 2 to 4) - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, TMPD1, 0, 2, 8); - simd_address_at(tmp5, TMPQ2, 0, 4, 16); - * [register]:4 tmp5 = sext(* [register]:2 tmp4); - simd_address_at(tmp4, TMPD1, 1, 2, 8); - simd_address_at(tmp5, TMPQ2, 1, 4, 16); - * [register]:4 tmp5 = sext(* [register]:2 tmp4); - simd_address_at(tmp4, TMPD1, 2, 2, 8); - simd_address_at(tmp5, TMPQ2, 2, 4, 16); - * [register]:4 tmp5 = sext(* [register]:2 tmp4); - simd_address_at(tmp4, TMPD1, 3, 2, 8); - simd_address_at(tmp5, TMPQ2, 3, 4, 16); - * [register]:4 tmp5 = sext(* [register]:2 tmp4); - local tmp6:4 = 0; - simd_address_at(tmp6, Rm_VPR128.8H, 1, 8, 16); - TMPD3 = * [register]:8 tmp6; + TMPQ2[0,32] = sext(TMPD1[0,16]); + TMPQ2[32,32] = sext(TMPD1[16,16]); + TMPQ2[64,32] = sext(TMPD1[32,16]); + TMPQ2[96,32] = sext(TMPD1[48,16]); + TMPD3 = Rm_VPR128.8H[64,64]; # simd resize TMPQ4 = sext(TMPD3) (lane size 2 to 4) - local tmp9:4 = 0; - local tmp10:4 = 0; - simd_address_at(tmp9, TMPD3, 0, 2, 8); - simd_address_at(tmp10, TMPQ4, 0, 4, 16); - * [register]:4 tmp10 = sext(* [register]:2 tmp9); - simd_address_at(tmp9, TMPD3, 1, 2, 8); - simd_address_at(tmp10, TMPQ4, 1, 4, 16); - * [register]:4 tmp10 = sext(* [register]:2 tmp9); - simd_address_at(tmp9, TMPD3, 2, 2, 8); - simd_address_at(tmp10, TMPQ4, 2, 4, 16); - * [register]:4 tmp10 = sext(* [register]:2 tmp9); - simd_address_at(tmp9, TMPD3, 3, 2, 8); - simd_address_at(tmp10, TMPQ4, 3, 4, 16); - * [register]:4 tmp10 = sext(* [register]:2 tmp9); + TMPQ4[0,32] = sext(TMPD3[0,16]); + TMPQ4[32,32] = sext(TMPD3[16,16]); + TMPQ4[64,32] = sext(TMPD3[32,16]); + TMPQ4[96,32] = sext(TMPD3[48,16]); # simd infix TMPQ5 = TMPQ2 * TMPQ4 on lane size 4 - local tmp12:4 = 0; - local tmp13:4 = 0; - local tmp14:4 = 0; - simd_address_at(tmp12, TMPQ2, 0, 4, 16); - simd_address_at(tmp13, TMPQ4, 0, 4, 16); - simd_address_at(tmp14, TMPQ5, 0, 4, 16); - * [register]:4 tmp14 = (* [register]:4 tmp12) * (* [register]:4 tmp13); - simd_address_at(tmp12, TMPQ2, 1, 4, 16); - simd_address_at(tmp13, TMPQ4, 1, 4, 16); - simd_address_at(tmp14, TMPQ5, 1, 4, 16); - * [register]:4 tmp14 = (* [register]:4 tmp12) * (* [register]:4 tmp13); - simd_address_at(tmp12, TMPQ2, 2, 4, 16); - simd_address_at(tmp13, TMPQ4, 2, 4, 16); - simd_address_at(tmp14, TMPQ5, 2, 4, 16); - * [register]:4 tmp14 = (* [register]:4 tmp12) * (* [register]:4 tmp13); - simd_address_at(tmp12, TMPQ2, 3, 4, 16); - simd_address_at(tmp13, TMPQ4, 3, 4, 16); - simd_address_at(tmp14, TMPQ5, 3, 4, 16); - * [register]:4 tmp14 = (* [register]:4 tmp12) * (* [register]:4 tmp13); + TMPQ5[0,32] = TMPQ2[0,32] * TMPQ4[0,32]; + TMPQ5[32,32] = TMPQ2[32,32] * TMPQ4[32,32]; + TMPQ5[64,32] = TMPQ2[64,32] * TMPQ4[64,32]; + TMPQ5[96,32] = TMPQ2[96,32] * TMPQ4[96,32]; # simd infix TMPQ6 = TMPQ5 * 2:4 on lane size 4 - local tmp16:4 = 0; - local tmp17:4 = 0; - simd_address_at(tmp16, TMPQ5, 0, 4, 16); - simd_address_at(tmp17, TMPQ6, 0, 4, 16); - * [register]:4 tmp17 = (* [register]:4 tmp16) * 2:4; - simd_address_at(tmp16, TMPQ5, 1, 4, 16); - simd_address_at(tmp17, TMPQ6, 1, 4, 16); - * [register]:4 tmp17 = (* [register]:4 tmp16) * 2:4; - simd_address_at(tmp16, TMPQ5, 2, 4, 16); - simd_address_at(tmp17, TMPQ6, 2, 4, 16); - * [register]:4 tmp17 = (* [register]:4 tmp16) * 2:4; - simd_address_at(tmp16, TMPQ5, 3, 4, 16); - simd_address_at(tmp17, TMPQ6, 3, 4, 16); - * [register]:4 tmp17 = (* [register]:4 tmp16) * 2:4; + TMPQ6[0,32] = TMPQ5[0,32] * 2:4; + TMPQ6[32,32] = TMPQ5[32,32] * 2:4; + TMPQ6[64,32] = TMPQ5[64,32] * 2:4; + TMPQ6[96,32] = TMPQ5[96,32] * 2:4; # simd infix Rd_VPR128.4S = Rd_VPR128.4S + TMPQ6 on lane size 4 - local tmp18:4 = 0; - local tmp19:4 = 0; - local tmp20:4 = 0; - simd_address_at(tmp18, Rd_VPR128.4S, 0, 4, 16); - simd_address_at(tmp19, TMPQ6, 0, 4, 16); - simd_address_at(tmp20, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp20 = (* [register]:4 tmp18) + (* [register]:4 tmp19); - simd_address_at(tmp18, Rd_VPR128.4S, 1, 4, 16); - simd_address_at(tmp19, TMPQ6, 1, 4, 16); - simd_address_at(tmp20, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp20 = (* [register]:4 tmp18) + (* [register]:4 tmp19); - simd_address_at(tmp18, Rd_VPR128.4S, 2, 4, 16); - simd_address_at(tmp19, TMPQ6, 2, 4, 16); - simd_address_at(tmp20, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp20 = (* [register]:4 tmp18) + (* [register]:4 tmp19); - simd_address_at(tmp18, Rd_VPR128.4S, 3, 4, 16); - simd_address_at(tmp19, TMPQ6, 3, 4, 16); - simd_address_at(tmp20, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp20 = (* [register]:4 tmp18) + (* [register]:4 tmp19); + Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] + TMPQ6[0,32]; + Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] + TMPQ6[32,32]; + Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] + TMPQ6[64,32]; + Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] + TMPQ6[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Rn_VPR128.8H, 1:1); - local tmp2:16 = SIMD_INT_SEXT(tmp1, 2:1); - local tmp3:8 = SIMD_PIECE(Rm_VPR128.8H, 1:1); - local tmp4:16 = SIMD_INT_SEXT(tmp3, 2:1); - local tmp5:16 = SIMD_INT_MULT(tmp2, tmp4, 4:1); - local tmp6:16 = SIMD_INT_MULT(tmp5, 2:4); - local tmpd:16 = SIMD_INT_ADD(Rd_VPR128.4S, tmp6, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_sqdmlal2(Rd_VPR128.4S, Rn_VPR128.8H, Rm_VPR128.8H, 2:1); -@endif } # C7.2.286 SQDMLSL, SQDMLSL2 (by element) page C7-2025 line 113331 MATCH x5f007000/mask=xff00f400 @@ -31655,29 +17417,14 @@ is b_31=0 & b_30=1 & b_2429=0b001110 & b_2223=0b01 & b_21=1 & b_1015=0b100100 & :sqdmlsl Rd_FPR64, Rn_FPR32, Re_VPR128.S.vIndex is b_2431=0b01011111 & b_2223=0b10 & b_1215=0b0111 & b_10=0 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & Rd_FPR64 & Rn_FPR32 & Zd { -@if defined(SEMANTIC_primitive) local tmp1:8 = sext(Rn_FPR32); - local tmp2:4 = 0; # simd element Re_VPR128.S[vIndex] lane size 4 - simd_address_at(tmp2, Re_VPR128.S, vIndex:4, 4, 16); - local tmp3:4 = * [register]:4 tmp2; - local tmp4:8 = sext(tmp3); - local tmp5:8 = tmp1 * tmp4; - local tmp6:8 = tmp5 * 2:8; - Rd_FPR64 = Rd_FPR64 - tmp6; - zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = sext(Rn_FPR32); - local tmp2:4 = SIMD_PIECE(Re_VPR128.S, vIndex:1); + local tmp2:4 = Re_VPR128.S.vIndex; local tmp3:8 = sext(tmp2); local tmp4:8 = tmp1 * tmp3; local tmp5:8 = tmp4 * 2:8; - local tmpd:8 = Rd_FPR64 - tmp5; - Zd = zext(tmpd); # assigning to Rd_FPR64 -@elif defined(SEMANTIC_pseudo) - local tmp1:4 = SIMD_PIECE(Re_VPR128.S, vIndex:1); - Rd_FPR64 = NEON_sqdmlsl(Rd_FPR64, Rn_FPR32, tmp1); -@endif + Rd_FPR64 = Rd_FPR64 - tmp5; + zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.286 SQDMLSL, SQDMLSL2 (by element) page C7-2025 line 113331 MATCH x5f007000/mask=xff00f400 @@ -31690,29 +17437,14 @@ is b_2431=0b01011111 & b_2223=0b10 & b_1215=0b0111 & b_10=0 & Re_VPR128.S.vIndex :sqdmlsl Rd_FPR32, Rn_FPR16, Re_VPR128Lo.H.vIndexHLM is b_2431=0b01011111 & b_2223=0b01 & b_1215=0b0111 & b_10=0 & Re_VPR128Lo.H.vIndexHLM & Re_VPR128Lo.H & vIndexHLM & Rd_FPR32 & Rn_FPR16 & Zd { -@if defined(SEMANTIC_primitive) local tmp1:4 = sext(Rn_FPR16); - local tmp2:4 = 0; # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 - simd_address_at(tmp2, Re_VPR128Lo.H, vIndexHLM:4, 2, 16); - local tmp3:2 = * [register]:2 tmp2; - local tmp4:4 = sext(tmp3); - local tmp5:4 = tmp1 * tmp4; - local tmp6:4 = tmp5 * 2:4; - Rd_FPR32 = Rd_FPR32 - tmp6; - zext_zs(Zd); # zero upper 28 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:4 = sext(Rn_FPR16); - local tmp2:2 = SIMD_PIECE(Re_VPR128Lo.H, vIndexHLM:1); + local tmp2:2 = Re_VPR128Lo.H.vIndexHLM; local tmp3:4 = sext(tmp2); local tmp4:4 = tmp1 * tmp3; local tmp5:4 = tmp4 * 2:4; - local tmpd:4 = Rd_FPR32 - tmp5; - Zd = zext(tmpd); # assigning to Rd_FPR32 -@elif defined(SEMANTIC_pseudo) - local tmp1:2 = SIMD_PIECE(Re_VPR128Lo.H, vIndexHLM:1); - Rd_FPR32 = NEON_sqdmlsl(Rd_FPR32, Rn_FPR16, tmp1); -@endif + Rd_FPR32 = Rd_FPR32 - tmp5; + zext_zs(Zd); # zero upper 28 bytes of Zd } # C7.2.286 SQDMLSL, SQDMLSL2 (by element) page C7-2025 line 113331 MATCH x0f007000/mask=xbf00f400 @@ -31725,64 +17457,22 @@ is b_2431=0b01011111 & b_2223=0b01 & b_1215=0b0111 & b_10=0 & Re_VPR128Lo.H.vInd :sqdmlsl Rd_VPR128.2D, Rn_VPR64.2S, Re_VPR128.S.vIndex is b_31=0 & b_30=0 & b_2429=0b001111 & b_2223=0b10 & b_1215=0b0111 & b_10=0 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & Rn_VPR64.2S & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_primitive) # simd resize TMPQ1 = sext(Rn_VPR64.2S) (lane size 4 to 8) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR64.2S, 0, 4, 8); - simd_address_at(tmp3, TMPQ1, 0, 8, 16); - * [register]:8 tmp3 = sext(* [register]:4 tmp2); - simd_address_at(tmp2, Rn_VPR64.2S, 1, 4, 8); - simd_address_at(tmp3, TMPQ1, 1, 8, 16); - * [register]:8 tmp3 = sext(* [register]:4 tmp2); - local tmp4:4 = 0; + TMPQ1[0,64] = sext(Rn_VPR64.2S[0,32]); + TMPQ1[64,64] = sext(Rn_VPR64.2S[32,32]); # simd element Re_VPR128.S[vIndex] lane size 4 - simd_address_at(tmp4, Re_VPR128.S, vIndex:4, 4, 16); - local tmp5:4 = * [register]:4 tmp4; - local tmp6:8 = sext(tmp5); - # simd infix TMPQ2 = TMPQ1 * tmp6 on lane size 8 - local tmp8:4 = 0; - local tmp9:4 = 0; - simd_address_at(tmp8, TMPQ1, 0, 8, 16); - simd_address_at(tmp9, TMPQ2, 0, 8, 16); - * [register]:8 tmp9 = (* [register]:8 tmp8) * tmp6; - simd_address_at(tmp8, TMPQ1, 1, 8, 16); - simd_address_at(tmp9, TMPQ2, 1, 8, 16); - * [register]:8 tmp9 = (* [register]:8 tmp8) * tmp6; - # simd infix TMPQ3 = TMPQ2 * 2:8 on lane size 8 - local tmp11:4 = 0; - local tmp12:4 = 0; - simd_address_at(tmp11, TMPQ2, 0, 8, 16); - simd_address_at(tmp12, TMPQ3, 0, 8, 16); - * [register]:8 tmp12 = (* [register]:8 tmp11) * 2:8; - simd_address_at(tmp11, TMPQ2, 1, 8, 16); - simd_address_at(tmp12, TMPQ3, 1, 8, 16); - * [register]:8 tmp12 = (* [register]:8 tmp11) * 2:8; - # simd infix Rd_VPR128.2D = Rd_VPR128.2D - TMPQ3 on lane size 8 - local tmp13:4 = 0; - local tmp14:4 = 0; - local tmp15:4 = 0; - simd_address_at(tmp13, Rd_VPR128.2D, 0, 8, 16); - simd_address_at(tmp14, TMPQ3, 0, 8, 16); - simd_address_at(tmp15, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp15 = (* [register]:8 tmp13) - (* [register]:8 tmp14); - simd_address_at(tmp13, Rd_VPR128.2D, 1, 8, 16); - simd_address_at(tmp14, TMPQ3, 1, 8, 16); - simd_address_at(tmp15, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp15 = (* [register]:8 tmp13) - (* [register]:8 tmp14); - zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_SEXT(Rn_VPR64.2S, 4:1); - local tmp2:4 = SIMD_PIECE(Re_VPR128.S, vIndex:1); + local tmp2:4 = Re_VPR128.S.vIndex; local tmp3:8 = sext(tmp2); - local tmp4:16 = SIMD_INT_MULT(tmp1, tmp3); - local tmp5:16 = SIMD_INT_MULT(tmp4, 2:8); - local tmpd:16 = SIMD_INT_SUB(Rd_VPR128.2D, tmp5, 8:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - local tmp1:4 = SIMD_PIECE(Re_VPR128.S, vIndex:1); - Rd_VPR128.2D = NEON_sqdmlsl(Rd_VPR128.2D, Rn_VPR64.2S, tmp1, 4:1); -@endif + # simd infix TMPQ2 = TMPQ1 * tmp3 on lane size 8 + TMPQ2[0,64] = TMPQ1[0,64] * tmp3; + TMPQ2[64,64] = TMPQ1[64,64] * tmp3; + # simd infix TMPQ3 = TMPQ2 * 2:8 on lane size 8 + TMPQ3[0,64] = TMPQ2[0,64] * 2:8; + TMPQ3[64,64] = TMPQ2[64,64] * 2:8; + # simd infix Rd_VPR128.2D = Rd_VPR128.2D - TMPQ3 on lane size 8 + Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] - TMPQ3[0,64]; + Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] - TMPQ3[64,64]; + zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.286 SQDMLSL, SQDMLSL2 (by element) page C7-2025 line 113331 MATCH x0f007000/mask=xbf00f400 @@ -31795,90 +17485,30 @@ is b_31=0 & b_30=0 & b_2429=0b001111 & b_2223=0b10 & b_1215=0b0111 & b_10=0 & Re :sqdmlsl Rd_VPR128.4S, Rn_VPR64.4H, Re_VPR128Lo.H.vIndexHLM is b_31=0 & b_30=0 & b_2429=0b001111 & b_2223=0b01 & b_1215=0b0111 & b_10=0 & Re_VPR128Lo.H.vIndexHLM & Re_VPR128Lo.H & vIndexHLM & Rn_VPR64.4H & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) # simd resize TMPQ1 = sext(Rn_VPR64.4H) (lane size 2 to 4) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR64.4H, 0, 2, 8); - simd_address_at(tmp3, TMPQ1, 0, 4, 16); - * [register]:4 tmp3 = sext(* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR64.4H, 1, 2, 8); - simd_address_at(tmp3, TMPQ1, 1, 4, 16); - * [register]:4 tmp3 = sext(* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR64.4H, 2, 2, 8); - simd_address_at(tmp3, TMPQ1, 2, 4, 16); - * [register]:4 tmp3 = sext(* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR64.4H, 3, 2, 8); - simd_address_at(tmp3, TMPQ1, 3, 4, 16); - * [register]:4 tmp3 = sext(* [register]:2 tmp2); - local tmp4:4 = 0; + TMPQ1[0,32] = sext(Rn_VPR64.4H[0,16]); + TMPQ1[32,32] = sext(Rn_VPR64.4H[16,16]); + TMPQ1[64,32] = sext(Rn_VPR64.4H[32,16]); + TMPQ1[96,32] = sext(Rn_VPR64.4H[48,16]); # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 - simd_address_at(tmp4, Re_VPR128Lo.H, vIndexHLM:4, 2, 16); - local tmp5:2 = * [register]:2 tmp4; - local tmp6:4 = sext(tmp5); - # simd infix TMPQ2 = TMPQ1 * tmp6 on lane size 4 - local tmp8:4 = 0; - local tmp9:4 = 0; - simd_address_at(tmp8, TMPQ1, 0, 4, 16); - simd_address_at(tmp9, TMPQ2, 0, 4, 16); - * [register]:4 tmp9 = (* [register]:4 tmp8) * tmp6; - simd_address_at(tmp8, TMPQ1, 1, 4, 16); - simd_address_at(tmp9, TMPQ2, 1, 4, 16); - * [register]:4 tmp9 = (* [register]:4 tmp8) * tmp6; - simd_address_at(tmp8, TMPQ1, 2, 4, 16); - simd_address_at(tmp9, TMPQ2, 2, 4, 16); - * [register]:4 tmp9 = (* [register]:4 tmp8) * tmp6; - simd_address_at(tmp8, TMPQ1, 3, 4, 16); - simd_address_at(tmp9, TMPQ2, 3, 4, 16); - * [register]:4 tmp9 = (* [register]:4 tmp8) * tmp6; - # simd infix TMPQ3 = TMPQ2 * 2:4 on lane size 4 - local tmp11:4 = 0; - local tmp12:4 = 0; - simd_address_at(tmp11, TMPQ2, 0, 4, 16); - simd_address_at(tmp12, TMPQ3, 0, 4, 16); - * [register]:4 tmp12 = (* [register]:4 tmp11) * 2:4; - simd_address_at(tmp11, TMPQ2, 1, 4, 16); - simd_address_at(tmp12, TMPQ3, 1, 4, 16); - * [register]:4 tmp12 = (* [register]:4 tmp11) * 2:4; - simd_address_at(tmp11, TMPQ2, 2, 4, 16); - simd_address_at(tmp12, TMPQ3, 2, 4, 16); - * [register]:4 tmp12 = (* [register]:4 tmp11) * 2:4; - simd_address_at(tmp11, TMPQ2, 3, 4, 16); - simd_address_at(tmp12, TMPQ3, 3, 4, 16); - * [register]:4 tmp12 = (* [register]:4 tmp11) * 2:4; - # simd infix Rd_VPR128.4S = Rd_VPR128.4S - TMPQ3 on lane size 4 - local tmp13:4 = 0; - local tmp14:4 = 0; - local tmp15:4 = 0; - simd_address_at(tmp13, Rd_VPR128.4S, 0, 4, 16); - simd_address_at(tmp14, TMPQ3, 0, 4, 16); - simd_address_at(tmp15, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp15 = (* [register]:4 tmp13) - (* [register]:4 tmp14); - simd_address_at(tmp13, Rd_VPR128.4S, 1, 4, 16); - simd_address_at(tmp14, TMPQ3, 1, 4, 16); - simd_address_at(tmp15, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp15 = (* [register]:4 tmp13) - (* [register]:4 tmp14); - simd_address_at(tmp13, Rd_VPR128.4S, 2, 4, 16); - simd_address_at(tmp14, TMPQ3, 2, 4, 16); - simd_address_at(tmp15, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp15 = (* [register]:4 tmp13) - (* [register]:4 tmp14); - simd_address_at(tmp13, Rd_VPR128.4S, 3, 4, 16); - simd_address_at(tmp14, TMPQ3, 3, 4, 16); - simd_address_at(tmp15, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp15 = (* [register]:4 tmp13) - (* [register]:4 tmp14); - zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_SEXT(Rn_VPR64.4H, 2:1); - local tmp2:2 = SIMD_PIECE(Re_VPR128Lo.H, vIndexHLM:1); + local tmp2:2 = Re_VPR128Lo.H.vIndexHLM; local tmp3:4 = sext(tmp2); - local tmp4:16 = SIMD_INT_MULT(tmp1, tmp3); - local tmp5:16 = SIMD_INT_MULT(tmp4, 2:4); - local tmpd:16 = SIMD_INT_SUB(Rd_VPR128.4S, tmp5, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - local tmp1:2 = SIMD_PIECE(Re_VPR128Lo.H, vIndexHLM:1); - Rd_VPR128.4S = NEON_sqdmlsl(Rd_VPR128.4S, Rn_VPR64.4H, tmp1, 2:1); -@endif + # simd infix TMPQ2 = TMPQ1 * tmp3 on lane size 4 + TMPQ2[0,32] = TMPQ1[0,32] * tmp3; + TMPQ2[32,32] = TMPQ1[32,32] * tmp3; + TMPQ2[64,32] = TMPQ1[64,32] * tmp3; + TMPQ2[96,32] = TMPQ1[96,32] * tmp3; + # simd infix TMPQ3 = TMPQ2 * 2:4 on lane size 4 + TMPQ3[0,32] = TMPQ2[0,32] * 2:4; + TMPQ3[32,32] = TMPQ2[32,32] * 2:4; + TMPQ3[64,32] = TMPQ2[64,32] * 2:4; + TMPQ3[96,32] = TMPQ2[96,32] * 2:4; + # simd infix Rd_VPR128.4S = Rd_VPR128.4S - TMPQ3 on lane size 4 + Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] - TMPQ3[0,32]; + Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] - TMPQ3[32,32]; + Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] - TMPQ3[64,32]; + Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] - TMPQ3[96,32]; + zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.286 SQDMLSL, SQDMLSL2 (by element) page C7-2025 line 113331 MATCH x0f007000/mask=xbf00f400 @@ -31891,68 +17521,23 @@ is b_31=0 & b_30=0 & b_2429=0b001111 & b_2223=0b01 & b_1215=0b0111 & b_10=0 & Re :sqdmlsl2 Rd_VPR128.2D, Rn_VPR128.4S, Re_VPR128.S.vIndex is b_31=0 & b_30=1 & b_2429=0b001111 & b_2223=0b10 & b_1215=0b0111 & b_10=0 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & Rn_VPR128.4S & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rn_VPR128.4S, 1, 8, 16); - TMPD1 = * [register]:8 tmp1; + TMPD1 = Rn_VPR128.4S[64,64]; # simd resize TMPQ2 = sext(TMPD1) (lane size 4 to 8) - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, TMPD1, 0, 4, 8); - simd_address_at(tmp5, TMPQ2, 0, 8, 16); - * [register]:8 tmp5 = sext(* [register]:4 tmp4); - simd_address_at(tmp4, TMPD1, 1, 4, 8); - simd_address_at(tmp5, TMPQ2, 1, 8, 16); - * [register]:8 tmp5 = sext(* [register]:4 tmp4); - local tmp6:4 = 0; + TMPQ2[0,64] = sext(TMPD1[0,32]); + TMPQ2[64,64] = sext(TMPD1[32,32]); # simd element Re_VPR128.S[vIndex] lane size 4 - simd_address_at(tmp6, Re_VPR128.S, vIndex:4, 4, 16); - local tmp7:4 = * [register]:4 tmp6; - local tmp8:8 = sext(tmp7); - # simd infix TMPQ3 = TMPQ2 * tmp8 on lane size 8 - local tmp10:4 = 0; - local tmp11:4 = 0; - simd_address_at(tmp10, TMPQ2, 0, 8, 16); - simd_address_at(tmp11, TMPQ3, 0, 8, 16); - * [register]:8 tmp11 = (* [register]:8 tmp10) * tmp8; - simd_address_at(tmp10, TMPQ2, 1, 8, 16); - simd_address_at(tmp11, TMPQ3, 1, 8, 16); - * [register]:8 tmp11 = (* [register]:8 tmp10) * tmp8; - # simd infix TMPQ4 = TMPQ3 * 2:8 on lane size 8 - local tmp13:4 = 0; - local tmp14:4 = 0; - simd_address_at(tmp13, TMPQ3, 0, 8, 16); - simd_address_at(tmp14, TMPQ4, 0, 8, 16); - * [register]:8 tmp14 = (* [register]:8 tmp13) * 2:8; - simd_address_at(tmp13, TMPQ3, 1, 8, 16); - simd_address_at(tmp14, TMPQ4, 1, 8, 16); - * [register]:8 tmp14 = (* [register]:8 tmp13) * 2:8; - # simd infix Rd_VPR128.2D = Rd_VPR128.2D - TMPQ4 on lane size 8 - local tmp15:4 = 0; - local tmp16:4 = 0; - local tmp17:4 = 0; - simd_address_at(tmp15, Rd_VPR128.2D, 0, 8, 16); - simd_address_at(tmp16, TMPQ4, 0, 8, 16); - simd_address_at(tmp17, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp17 = (* [register]:8 tmp15) - (* [register]:8 tmp16); - simd_address_at(tmp15, Rd_VPR128.2D, 1, 8, 16); - simd_address_at(tmp16, TMPQ4, 1, 8, 16); - simd_address_at(tmp17, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp17 = (* [register]:8 tmp15) - (* [register]:8 tmp16); - zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Rn_VPR128.4S, 1:1); - local tmp2:16 = SIMD_INT_SEXT(tmp1, 4:1); - local tmp3:4 = SIMD_PIECE(Re_VPR128.S, vIndex:1); + local tmp3:4 = Re_VPR128.S.vIndex; local tmp4:8 = sext(tmp3); - local tmp5:16 = SIMD_INT_MULT(tmp2, tmp4); - local tmp6:16 = SIMD_INT_MULT(tmp5, 2:8); - local tmpd:16 = SIMD_INT_SUB(Rd_VPR128.2D, tmp6, 8:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - local tmp1:4 = SIMD_PIECE(Re_VPR128.S, vIndex:1); - Rd_VPR128.2D = NEON_sqdmlsl2(Rd_VPR128.2D, Rn_VPR128.4S, tmp1, 4:1); -@endif + # simd infix TMPQ3 = TMPQ2 * tmp4 on lane size 8 + TMPQ3[0,64] = TMPQ2[0,64] * tmp4; + TMPQ3[64,64] = TMPQ2[64,64] * tmp4; + # simd infix TMPQ4 = TMPQ3 * 2:8 on lane size 8 + TMPQ4[0,64] = TMPQ3[0,64] * 2:8; + TMPQ4[64,64] = TMPQ3[64,64] * 2:8; + # simd infix Rd_VPR128.2D = Rd_VPR128.2D - TMPQ4 on lane size 8 + Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] - TMPQ4[0,64]; + Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] - TMPQ4[64,64]; + zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.286 SQDMLSL, SQDMLSL2 (by element) page C7-2025 line 113331 MATCH x0f007000/mask=xbf00f400 @@ -31965,94 +17550,31 @@ is b_31=0 & b_30=1 & b_2429=0b001111 & b_2223=0b10 & b_1215=0b0111 & b_10=0 & Re :sqdmlsl2 Rd_VPR128.4S, Rn_VPR128.8H, Re_VPR128Lo.H.vIndexHLM is b_31=0 & b_30=1 & b_2429=0b001111 & b_2223=0b01 & b_1215=0b0111 & b_10=0 & Re_VPR128Lo.H.vIndexHLM & Re_VPR128Lo.H & vIndexHLM & Rn_VPR128.8H & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rn_VPR128.8H, 1, 8, 16); - TMPD1 = * [register]:8 tmp1; + TMPD1 = Rn_VPR128.8H[64,64]; # simd resize TMPQ2 = sext(TMPD1) (lane size 2 to 4) - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, TMPD1, 0, 2, 8); - simd_address_at(tmp5, TMPQ2, 0, 4, 16); - * [register]:4 tmp5 = sext(* [register]:2 tmp4); - simd_address_at(tmp4, TMPD1, 1, 2, 8); - simd_address_at(tmp5, TMPQ2, 1, 4, 16); - * [register]:4 tmp5 = sext(* [register]:2 tmp4); - simd_address_at(tmp4, TMPD1, 2, 2, 8); - simd_address_at(tmp5, TMPQ2, 2, 4, 16); - * [register]:4 tmp5 = sext(* [register]:2 tmp4); - simd_address_at(tmp4, TMPD1, 3, 2, 8); - simd_address_at(tmp5, TMPQ2, 3, 4, 16); - * [register]:4 tmp5 = sext(* [register]:2 tmp4); - local tmp6:4 = 0; + TMPQ2[0,32] = sext(TMPD1[0,16]); + TMPQ2[32,32] = sext(TMPD1[16,16]); + TMPQ2[64,32] = sext(TMPD1[32,16]); + TMPQ2[96,32] = sext(TMPD1[48,16]); # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 - simd_address_at(tmp6, Re_VPR128Lo.H, vIndexHLM:4, 2, 16); - local tmp7:2 = * [register]:2 tmp6; - local tmp8:4 = sext(tmp7); - # simd infix TMPQ3 = TMPQ2 * tmp8 on lane size 4 - local tmp10:4 = 0; - local tmp11:4 = 0; - simd_address_at(tmp10, TMPQ2, 0, 4, 16); - simd_address_at(tmp11, TMPQ3, 0, 4, 16); - * [register]:4 tmp11 = (* [register]:4 tmp10) * tmp8; - simd_address_at(tmp10, TMPQ2, 1, 4, 16); - simd_address_at(tmp11, TMPQ3, 1, 4, 16); - * [register]:4 tmp11 = (* [register]:4 tmp10) * tmp8; - simd_address_at(tmp10, TMPQ2, 2, 4, 16); - simd_address_at(tmp11, TMPQ3, 2, 4, 16); - * [register]:4 tmp11 = (* [register]:4 tmp10) * tmp8; - simd_address_at(tmp10, TMPQ2, 3, 4, 16); - simd_address_at(tmp11, TMPQ3, 3, 4, 16); - * [register]:4 tmp11 = (* [register]:4 tmp10) * tmp8; - # simd infix TMPQ4 = TMPQ3 * 2:4 on lane size 4 - local tmp13:4 = 0; - local tmp14:4 = 0; - simd_address_at(tmp13, TMPQ3, 0, 4, 16); - simd_address_at(tmp14, TMPQ4, 0, 4, 16); - * [register]:4 tmp14 = (* [register]:4 tmp13) * 2:4; - simd_address_at(tmp13, TMPQ3, 1, 4, 16); - simd_address_at(tmp14, TMPQ4, 1, 4, 16); - * [register]:4 tmp14 = (* [register]:4 tmp13) * 2:4; - simd_address_at(tmp13, TMPQ3, 2, 4, 16); - simd_address_at(tmp14, TMPQ4, 2, 4, 16); - * [register]:4 tmp14 = (* [register]:4 tmp13) * 2:4; - simd_address_at(tmp13, TMPQ3, 3, 4, 16); - simd_address_at(tmp14, TMPQ4, 3, 4, 16); - * [register]:4 tmp14 = (* [register]:4 tmp13) * 2:4; - # simd infix Rd_VPR128.4S = Rd_VPR128.4S - TMPQ4 on lane size 4 - local tmp15:4 = 0; - local tmp16:4 = 0; - local tmp17:4 = 0; - simd_address_at(tmp15, Rd_VPR128.4S, 0, 4, 16); - simd_address_at(tmp16, TMPQ4, 0, 4, 16); - simd_address_at(tmp17, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp17 = (* [register]:4 tmp15) - (* [register]:4 tmp16); - simd_address_at(tmp15, Rd_VPR128.4S, 1, 4, 16); - simd_address_at(tmp16, TMPQ4, 1, 4, 16); - simd_address_at(tmp17, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp17 = (* [register]:4 tmp15) - (* [register]:4 tmp16); - simd_address_at(tmp15, Rd_VPR128.4S, 2, 4, 16); - simd_address_at(tmp16, TMPQ4, 2, 4, 16); - simd_address_at(tmp17, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp17 = (* [register]:4 tmp15) - (* [register]:4 tmp16); - simd_address_at(tmp15, Rd_VPR128.4S, 3, 4, 16); - simd_address_at(tmp16, TMPQ4, 3, 4, 16); - simd_address_at(tmp17, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp17 = (* [register]:4 tmp15) - (* [register]:4 tmp16); - zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Rn_VPR128.8H, 1:1); - local tmp2:16 = SIMD_INT_SEXT(tmp1, 2:1); - local tmp3:2 = SIMD_PIECE(Re_VPR128Lo.H, vIndexHLM:1); + local tmp3:2 = Re_VPR128Lo.H.vIndexHLM; local tmp4:4 = sext(tmp3); - local tmp5:16 = SIMD_INT_MULT(tmp2, tmp4); - local tmp6:16 = SIMD_INT_MULT(tmp5, 2:4); - local tmpd:16 = SIMD_INT_SUB(Rd_VPR128.4S, tmp6, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - local tmp1:2 = SIMD_PIECE(Re_VPR128Lo.H, vIndexHLM:1); - Rd_VPR128.4S = NEON_sqdmlsl2(Rd_VPR128.4S, Rn_VPR128.8H, tmp1, 2:1); -@endif + # simd infix TMPQ3 = TMPQ2 * tmp4 on lane size 4 + TMPQ3[0,32] = TMPQ2[0,32] * tmp4; + TMPQ3[32,32] = TMPQ2[32,32] * tmp4; + TMPQ3[64,32] = TMPQ2[64,32] * tmp4; + TMPQ3[96,32] = TMPQ2[96,32] * tmp4; + # simd infix TMPQ4 = TMPQ3 * 2:4 on lane size 4 + TMPQ4[0,32] = TMPQ3[0,32] * 2:4; + TMPQ4[32,32] = TMPQ3[32,32] * 2:4; + TMPQ4[64,32] = TMPQ3[64,32] * 2:4; + TMPQ4[96,32] = TMPQ3[96,32] * 2:4; + # simd infix Rd_VPR128.4S = Rd_VPR128.4S - TMPQ4 on lane size 4 + Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] - TMPQ4[0,32]; + Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] - TMPQ4[32,32]; + Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] - TMPQ4[64,32]; + Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] - TMPQ4[96,32]; + zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.287 SQDMLSL, SQDMLSL2 (vector) page C7-2029 line 113549 MATCH x5e20b000/mask=xff20fc00 @@ -32065,23 +17587,12 @@ is b_31=0 & b_30=1 & b_2429=0b001111 & b_2223=0b01 & b_1215=0b0111 & b_10=0 & Re :sqdmlsl Rd_FPR64, Rn_FPR32, Rm_FPR32 is b_2431=0b01011110 & b_2223=0b10 & b_21=1 & b_1015=0b101100 & Rd_FPR64 & Rn_FPR32 & Rm_FPR32 & Zd { -@if defined(SEMANTIC_primitive) local tmp1:8 = sext(Rn_FPR32); local tmp2:8 = sext(Rm_FPR32); local tmp3:8 = tmp1 * tmp2; local tmp4:8 = tmp3 * 2:8; Rd_FPR64 = Rd_FPR64 - tmp4; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = sext(Rn_FPR32); - local tmp2:8 = sext(Rm_FPR32); - local tmp3:8 = tmp1 * tmp2; - local tmp4:8 = tmp3 * 2:8; - local tmpd:8 = Rd_FPR64 - tmp4; - Zd = zext(tmpd); # assigning to Rd_FPR64 -@elif defined(SEMANTIC_pseudo) - Rd_FPR64 = NEON_sqdmlsl(Rd_FPR64, Rn_FPR32, Rm_FPR32); -@endif } # C7.2.287 SQDMLSL, SQDMLSL2 (vector) page C7-2029 line 113549 MATCH x5e20b000/mask=xff20fc00 @@ -32094,23 +17605,12 @@ is b_2431=0b01011110 & b_2223=0b10 & b_21=1 & b_1015=0b101100 & Rd_FPR64 & Rn_FP :sqdmlsl Rd_FPR32, Rn_FPR16, Rm_FPR16 is b_2431=0b01011110 & b_2223=0b01 & b_21=1 & b_1015=0b101100 & Rd_FPR32 & Rn_FPR16 & Rm_FPR16 & Zd { -@if defined(SEMANTIC_primitive) local tmp1:4 = sext(Rn_FPR16); local tmp2:4 = sext(Rm_FPR16); local tmp3:4 = tmp1 * tmp2; local tmp4:4 = tmp3 * 2:4; Rd_FPR32 = Rd_FPR32 - tmp4; zext_zs(Zd); # zero upper 28 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:4 = sext(Rn_FPR16); - local tmp2:4 = sext(Rm_FPR16); - local tmp3:4 = tmp1 * tmp2; - local tmp4:4 = tmp3 * 2:4; - local tmpd:4 = Rd_FPR32 - tmp4; - Zd = zext(tmpd); # assigning to Rd_FPR32 -@elif defined(SEMANTIC_pseudo) - Rd_FPR32 = NEON_sqdmlsl(Rd_FPR32, Rn_FPR16, Rm_FPR16); -@endif } # C7.2.287 SQDMLSL, SQDMLSL2 (vector) page C7-2029 line 113549 MATCH x0e20b000/mask=xbf20fc00 @@ -32123,69 +17623,22 @@ is b_2431=0b01011110 & b_2223=0b01 & b_21=1 & b_1015=0b101100 & Rd_FPR32 & Rn_FP :sqdmlsl Rd_VPR128.2D, Rn_VPR64.2S, Rm_VPR64.2S is b_31=0 & b_30=0 & b_2429=0b001110 & b_2223=0b10 & b_21=1 & b_1015=0b101100 & Rn_VPR64.2S & Rd_VPR128.2D & Rm_VPR64.2S & Zd { -@if defined(SEMANTIC_primitive) # simd resize TMPQ1 = sext(Rn_VPR64.2S) (lane size 4 to 8) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR64.2S, 0, 4, 8); - simd_address_at(tmp3, TMPQ1, 0, 8, 16); - * [register]:8 tmp3 = sext(* [register]:4 tmp2); - simd_address_at(tmp2, Rn_VPR64.2S, 1, 4, 8); - simd_address_at(tmp3, TMPQ1, 1, 8, 16); - * [register]:8 tmp3 = sext(* [register]:4 tmp2); + TMPQ1[0,64] = sext(Rn_VPR64.2S[0,32]); + TMPQ1[64,64] = sext(Rn_VPR64.2S[32,32]); # simd resize TMPQ2 = sext(Rm_VPR64.2S) (lane size 4 to 8) - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, Rm_VPR64.2S, 0, 4, 8); - simd_address_at(tmp6, TMPQ2, 0, 8, 16); - * [register]:8 tmp6 = sext(* [register]:4 tmp5); - simd_address_at(tmp5, Rm_VPR64.2S, 1, 4, 8); - simd_address_at(tmp6, TMPQ2, 1, 8, 16); - * [register]:8 tmp6 = sext(* [register]:4 tmp5); + TMPQ2[0,64] = sext(Rm_VPR64.2S[0,32]); + TMPQ2[64,64] = sext(Rm_VPR64.2S[32,32]); # simd infix TMPQ3 = TMPQ1 * TMPQ2 on lane size 8 - local tmp8:4 = 0; - local tmp9:4 = 0; - local tmp10:4 = 0; - simd_address_at(tmp8, TMPQ1, 0, 8, 16); - simd_address_at(tmp9, TMPQ2, 0, 8, 16); - simd_address_at(tmp10, TMPQ3, 0, 8, 16); - * [register]:8 tmp10 = (* [register]:8 tmp8) * (* [register]:8 tmp9); - simd_address_at(tmp8, TMPQ1, 1, 8, 16); - simd_address_at(tmp9, TMPQ2, 1, 8, 16); - simd_address_at(tmp10, TMPQ3, 1, 8, 16); - * [register]:8 tmp10 = (* [register]:8 tmp8) * (* [register]:8 tmp9); + TMPQ3[0,64] = TMPQ1[0,64] * TMPQ2[0,64]; + TMPQ3[64,64] = TMPQ1[64,64] * TMPQ2[64,64]; # simd infix TMPQ4 = TMPQ3 * 2:8 on lane size 8 - local tmp12:4 = 0; - local tmp13:4 = 0; - simd_address_at(tmp12, TMPQ3, 0, 8, 16); - simd_address_at(tmp13, TMPQ4, 0, 8, 16); - * [register]:8 tmp13 = (* [register]:8 tmp12) * 2:8; - simd_address_at(tmp12, TMPQ3, 1, 8, 16); - simd_address_at(tmp13, TMPQ4, 1, 8, 16); - * [register]:8 tmp13 = (* [register]:8 tmp12) * 2:8; + TMPQ4[0,64] = TMPQ3[0,64] * 2:8; + TMPQ4[64,64] = TMPQ3[64,64] * 2:8; # simd infix Rd_VPR128.2D = Rd_VPR128.2D - TMPQ4 on lane size 8 - local tmp14:4 = 0; - local tmp15:4 = 0; - local tmp16:4 = 0; - simd_address_at(tmp14, Rd_VPR128.2D, 0, 8, 16); - simd_address_at(tmp15, TMPQ4, 0, 8, 16); - simd_address_at(tmp16, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp16 = (* [register]:8 tmp14) - (* [register]:8 tmp15); - simd_address_at(tmp14, Rd_VPR128.2D, 1, 8, 16); - simd_address_at(tmp15, TMPQ4, 1, 8, 16); - simd_address_at(tmp16, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp16 = (* [register]:8 tmp14) - (* [register]:8 tmp15); + Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] - TMPQ4[0,64]; + Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] - TMPQ4[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_SEXT(Rn_VPR64.2S, 4:1); - local tmp2:16 = SIMD_INT_SEXT(Rm_VPR64.2S, 4:1); - local tmp3:16 = SIMD_INT_MULT(tmp1, tmp2, 8:1); - local tmp4:16 = SIMD_INT_MULT(tmp3, 2:8); - local tmpd:16 = SIMD_INT_SUB(Rd_VPR128.2D, tmp4, 8:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.2D = NEON_sqdmlsl(Rd_VPR128.2D, Rn_VPR64.2S, Rm_VPR64.2S); -@endif } # C7.2.287 SQDMLSL, SQDMLSL2 (vector) page C7-2029 line 113549 MATCH x0e20b000/mask=xbf20fc00 @@ -32198,103 +17651,32 @@ is b_31=0 & b_30=0 & b_2429=0b001110 & b_2223=0b10 & b_21=1 & b_1015=0b101100 & :sqdmlsl Rd_VPR128.4S, Rn_VPR64.4H, Rm_VPR64.4H is b_31=0 & b_30=0 & b_2429=0b001110 & b_2223=0b01 & b_21=1 & b_1015=0b101100 & Rn_VPR64.4H & Rd_VPR128.4S & Rm_VPR64.4H & Zd { -@if defined(SEMANTIC_primitive) # simd resize TMPQ1 = sext(Rn_VPR64.4H) (lane size 2 to 4) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR64.4H, 0, 2, 8); - simd_address_at(tmp3, TMPQ1, 0, 4, 16); - * [register]:4 tmp3 = sext(* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR64.4H, 1, 2, 8); - simd_address_at(tmp3, TMPQ1, 1, 4, 16); - * [register]:4 tmp3 = sext(* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR64.4H, 2, 2, 8); - simd_address_at(tmp3, TMPQ1, 2, 4, 16); - * [register]:4 tmp3 = sext(* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR64.4H, 3, 2, 8); - simd_address_at(tmp3, TMPQ1, 3, 4, 16); - * [register]:4 tmp3 = sext(* [register]:2 tmp2); + TMPQ1[0,32] = sext(Rn_VPR64.4H[0,16]); + TMPQ1[32,32] = sext(Rn_VPR64.4H[16,16]); + TMPQ1[64,32] = sext(Rn_VPR64.4H[32,16]); + TMPQ1[96,32] = sext(Rn_VPR64.4H[48,16]); # simd resize TMPQ2 = sext(Rm_VPR64.4H) (lane size 2 to 4) - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, Rm_VPR64.4H, 0, 2, 8); - simd_address_at(tmp6, TMPQ2, 0, 4, 16); - * [register]:4 tmp6 = sext(* [register]:2 tmp5); - simd_address_at(tmp5, Rm_VPR64.4H, 1, 2, 8); - simd_address_at(tmp6, TMPQ2, 1, 4, 16); - * [register]:4 tmp6 = sext(* [register]:2 tmp5); - simd_address_at(tmp5, Rm_VPR64.4H, 2, 2, 8); - simd_address_at(tmp6, TMPQ2, 2, 4, 16); - * [register]:4 tmp6 = sext(* [register]:2 tmp5); - simd_address_at(tmp5, Rm_VPR64.4H, 3, 2, 8); - simd_address_at(tmp6, TMPQ2, 3, 4, 16); - * [register]:4 tmp6 = sext(* [register]:2 tmp5); + TMPQ2[0,32] = sext(Rm_VPR64.4H[0,16]); + TMPQ2[32,32] = sext(Rm_VPR64.4H[16,16]); + TMPQ2[64,32] = sext(Rm_VPR64.4H[32,16]); + TMPQ2[96,32] = sext(Rm_VPR64.4H[48,16]); # simd infix TMPQ3 = TMPQ1 * TMPQ2 on lane size 4 - local tmp8:4 = 0; - local tmp9:4 = 0; - local tmp10:4 = 0; - simd_address_at(tmp8, TMPQ1, 0, 4, 16); - simd_address_at(tmp9, TMPQ2, 0, 4, 16); - simd_address_at(tmp10, TMPQ3, 0, 4, 16); - * [register]:4 tmp10 = (* [register]:4 tmp8) * (* [register]:4 tmp9); - simd_address_at(tmp8, TMPQ1, 1, 4, 16); - simd_address_at(tmp9, TMPQ2, 1, 4, 16); - simd_address_at(tmp10, TMPQ3, 1, 4, 16); - * [register]:4 tmp10 = (* [register]:4 tmp8) * (* [register]:4 tmp9); - simd_address_at(tmp8, TMPQ1, 2, 4, 16); - simd_address_at(tmp9, TMPQ2, 2, 4, 16); - simd_address_at(tmp10, TMPQ3, 2, 4, 16); - * [register]:4 tmp10 = (* [register]:4 tmp8) * (* [register]:4 tmp9); - simd_address_at(tmp8, TMPQ1, 3, 4, 16); - simd_address_at(tmp9, TMPQ2, 3, 4, 16); - simd_address_at(tmp10, TMPQ3, 3, 4, 16); - * [register]:4 tmp10 = (* [register]:4 tmp8) * (* [register]:4 tmp9); + TMPQ3[0,32] = TMPQ1[0,32] * TMPQ2[0,32]; + TMPQ3[32,32] = TMPQ1[32,32] * TMPQ2[32,32]; + TMPQ3[64,32] = TMPQ1[64,32] * TMPQ2[64,32]; + TMPQ3[96,32] = TMPQ1[96,32] * TMPQ2[96,32]; # simd infix TMPQ4 = TMPQ3 * 2:4 on lane size 4 - local tmp12:4 = 0; - local tmp13:4 = 0; - simd_address_at(tmp12, TMPQ3, 0, 4, 16); - simd_address_at(tmp13, TMPQ4, 0, 4, 16); - * [register]:4 tmp13 = (* [register]:4 tmp12) * 2:4; - simd_address_at(tmp12, TMPQ3, 1, 4, 16); - simd_address_at(tmp13, TMPQ4, 1, 4, 16); - * [register]:4 tmp13 = (* [register]:4 tmp12) * 2:4; - simd_address_at(tmp12, TMPQ3, 2, 4, 16); - simd_address_at(tmp13, TMPQ4, 2, 4, 16); - * [register]:4 tmp13 = (* [register]:4 tmp12) * 2:4; - simd_address_at(tmp12, TMPQ3, 3, 4, 16); - simd_address_at(tmp13, TMPQ4, 3, 4, 16); - * [register]:4 tmp13 = (* [register]:4 tmp12) * 2:4; + TMPQ4[0,32] = TMPQ3[0,32] * 2:4; + TMPQ4[32,32] = TMPQ3[32,32] * 2:4; + TMPQ4[64,32] = TMPQ3[64,32] * 2:4; + TMPQ4[96,32] = TMPQ3[96,32] * 2:4; # simd infix Rd_VPR128.4S = Rd_VPR128.4S - TMPQ4 on lane size 4 - local tmp14:4 = 0; - local tmp15:4 = 0; - local tmp16:4 = 0; - simd_address_at(tmp14, Rd_VPR128.4S, 0, 4, 16); - simd_address_at(tmp15, TMPQ4, 0, 4, 16); - simd_address_at(tmp16, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp16 = (* [register]:4 tmp14) - (* [register]:4 tmp15); - simd_address_at(tmp14, Rd_VPR128.4S, 1, 4, 16); - simd_address_at(tmp15, TMPQ4, 1, 4, 16); - simd_address_at(tmp16, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp16 = (* [register]:4 tmp14) - (* [register]:4 tmp15); - simd_address_at(tmp14, Rd_VPR128.4S, 2, 4, 16); - simd_address_at(tmp15, TMPQ4, 2, 4, 16); - simd_address_at(tmp16, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp16 = (* [register]:4 tmp14) - (* [register]:4 tmp15); - simd_address_at(tmp14, Rd_VPR128.4S, 3, 4, 16); - simd_address_at(tmp15, TMPQ4, 3, 4, 16); - simd_address_at(tmp16, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp16 = (* [register]:4 tmp14) - (* [register]:4 tmp15); + Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] - TMPQ4[0,32]; + Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] - TMPQ4[32,32]; + Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] - TMPQ4[64,32]; + Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] - TMPQ4[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_SEXT(Rn_VPR64.4H, 2:1); - local tmp2:16 = SIMD_INT_SEXT(Rm_VPR64.4H, 2:1); - local tmp3:16 = SIMD_INT_MULT(tmp1, tmp2, 4:1); - local tmp4:16 = SIMD_INT_MULT(tmp3, 2:4); - local tmpd:16 = SIMD_INT_SUB(Rd_VPR128.4S, tmp4, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_sqdmlsl(Rd_VPR128.4S, Rn_VPR64.4H, Rm_VPR64.4H, 2:1); -@endif } # C7.2.287 SQDMLSL, SQDMLSL2 (vector) page C7-2029 line 113549 MATCH x0e20b000/mask=xbf20fc00 @@ -32307,77 +17689,24 @@ is b_31=0 & b_30=0 & b_2429=0b001110 & b_2223=0b01 & b_21=1 & b_1015=0b101100 & :sqdmlsl2 Rd_VPR128.2D, Rn_VPR128.4S, Rm_VPR128.4S is b_31=0 & b_30=1 & b_2429=0b001110 & b_2223=0b10 & b_21=1 & b_1015=0b101100 & Rn_VPR128.4S & Rd_VPR128.2D & Rm_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rn_VPR128.4S, 1, 8, 16); - TMPD1 = * [register]:8 tmp1; + TMPD1 = Rn_VPR128.4S[64,64]; # simd resize TMPQ2 = sext(TMPD1) (lane size 4 to 8) - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, TMPD1, 0, 4, 8); - simd_address_at(tmp5, TMPQ2, 0, 8, 16); - * [register]:8 tmp5 = sext(* [register]:4 tmp4); - simd_address_at(tmp4, TMPD1, 1, 4, 8); - simd_address_at(tmp5, TMPQ2, 1, 8, 16); - * [register]:8 tmp5 = sext(* [register]:4 tmp4); - local tmp6:4 = 0; - simd_address_at(tmp6, Rm_VPR128.4S, 1, 8, 16); - TMPD3 = * [register]:8 tmp6; + TMPQ2[0,64] = sext(TMPD1[0,32]); + TMPQ2[64,64] = sext(TMPD1[32,32]); + TMPD3 = Rm_VPR128.4S[64,64]; # simd resize TMPQ4 = sext(TMPD3) (lane size 4 to 8) - local tmp9:4 = 0; - local tmp10:4 = 0; - simd_address_at(tmp9, TMPD3, 0, 4, 8); - simd_address_at(tmp10, TMPQ4, 0, 8, 16); - * [register]:8 tmp10 = sext(* [register]:4 tmp9); - simd_address_at(tmp9, TMPD3, 1, 4, 8); - simd_address_at(tmp10, TMPQ4, 1, 8, 16); - * [register]:8 tmp10 = sext(* [register]:4 tmp9); + TMPQ4[0,64] = sext(TMPD3[0,32]); + TMPQ4[64,64] = sext(TMPD3[32,32]); # simd infix TMPQ5 = TMPQ2 * TMPQ4 on lane size 8 - local tmp12:4 = 0; - local tmp13:4 = 0; - local tmp14:4 = 0; - simd_address_at(tmp12, TMPQ2, 0, 8, 16); - simd_address_at(tmp13, TMPQ4, 0, 8, 16); - simd_address_at(tmp14, TMPQ5, 0, 8, 16); - * [register]:8 tmp14 = (* [register]:8 tmp12) * (* [register]:8 tmp13); - simd_address_at(tmp12, TMPQ2, 1, 8, 16); - simd_address_at(tmp13, TMPQ4, 1, 8, 16); - simd_address_at(tmp14, TMPQ5, 1, 8, 16); - * [register]:8 tmp14 = (* [register]:8 tmp12) * (* [register]:8 tmp13); + TMPQ5[0,64] = TMPQ2[0,64] * TMPQ4[0,64]; + TMPQ5[64,64] = TMPQ2[64,64] * TMPQ4[64,64]; # simd infix TMPQ6 = TMPQ5 * 2:8 on lane size 8 - local tmp16:4 = 0; - local tmp17:4 = 0; - simd_address_at(tmp16, TMPQ5, 0, 8, 16); - simd_address_at(tmp17, TMPQ6, 0, 8, 16); - * [register]:8 tmp17 = (* [register]:8 tmp16) * 2:8; - simd_address_at(tmp16, TMPQ5, 1, 8, 16); - simd_address_at(tmp17, TMPQ6, 1, 8, 16); - * [register]:8 tmp17 = (* [register]:8 tmp16) * 2:8; + TMPQ6[0,64] = TMPQ5[0,64] * 2:8; + TMPQ6[64,64] = TMPQ5[64,64] * 2:8; # simd infix Rd_VPR128.2D = Rd_VPR128.2D - TMPQ6 on lane size 8 - local tmp18:4 = 0; - local tmp19:4 = 0; - local tmp20:4 = 0; - simd_address_at(tmp18, Rd_VPR128.2D, 0, 8, 16); - simd_address_at(tmp19, TMPQ6, 0, 8, 16); - simd_address_at(tmp20, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp20 = (* [register]:8 tmp18) - (* [register]:8 tmp19); - simd_address_at(tmp18, Rd_VPR128.2D, 1, 8, 16); - simd_address_at(tmp19, TMPQ6, 1, 8, 16); - simd_address_at(tmp20, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp20 = (* [register]:8 tmp18) - (* [register]:8 tmp19); + Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] - TMPQ6[0,64]; + Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] - TMPQ6[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Rn_VPR128.4S, 1:1); - local tmp2:16 = SIMD_INT_SEXT(tmp1, 4:1); - local tmp3:8 = SIMD_PIECE(Rm_VPR128.4S, 1:1); - local tmp4:16 = SIMD_INT_SEXT(tmp3, 4:1); - local tmp5:16 = SIMD_INT_MULT(tmp2, tmp4, 8:1); - local tmp6:16 = SIMD_INT_MULT(tmp5, 2:8); - local tmpd:16 = SIMD_INT_SUB(Rd_VPR128.2D, tmp6, 8:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.2D = NEON_sqdmlsl2(Rd_VPR128.2D, Rn_VPR128.4S, Rm_VPR128.4S, 4:1); -@endif } # C7.2.287 SQDMLSL, SQDMLSL2 (vector) page C7-2029 line 113549 MATCH x0e20b000/mask=xbf20fc00 @@ -32390,111 +17719,34 @@ is b_31=0 & b_30=1 & b_2429=0b001110 & b_2223=0b10 & b_21=1 & b_1015=0b101100 & :sqdmlsl2 Rd_VPR128.4S, Rn_VPR128.8H, Rm_VPR128.8H is b_31=0 & b_30=1 & b_2429=0b001110 & b_2223=0b01 & b_21=1 & b_1015=0b101100 & Rn_VPR128.8H & Rd_VPR128.4S & Rm_VPR128.8H & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rn_VPR128.8H, 1, 8, 16); - TMPD1 = * [register]:8 tmp1; + TMPD1 = Rn_VPR128.8H[64,64]; # simd resize TMPQ2 = sext(TMPD1) (lane size 2 to 4) - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, TMPD1, 0, 2, 8); - simd_address_at(tmp5, TMPQ2, 0, 4, 16); - * [register]:4 tmp5 = sext(* [register]:2 tmp4); - simd_address_at(tmp4, TMPD1, 1, 2, 8); - simd_address_at(tmp5, TMPQ2, 1, 4, 16); - * [register]:4 tmp5 = sext(* [register]:2 tmp4); - simd_address_at(tmp4, TMPD1, 2, 2, 8); - simd_address_at(tmp5, TMPQ2, 2, 4, 16); - * [register]:4 tmp5 = sext(* [register]:2 tmp4); - simd_address_at(tmp4, TMPD1, 3, 2, 8); - simd_address_at(tmp5, TMPQ2, 3, 4, 16); - * [register]:4 tmp5 = sext(* [register]:2 tmp4); - local tmp6:4 = 0; - simd_address_at(tmp6, Rm_VPR128.8H, 1, 8, 16); - TMPD3 = * [register]:8 tmp6; + TMPQ2[0,32] = sext(TMPD1[0,16]); + TMPQ2[32,32] = sext(TMPD1[16,16]); + TMPQ2[64,32] = sext(TMPD1[32,16]); + TMPQ2[96,32] = sext(TMPD1[48,16]); + TMPD3 = Rm_VPR128.8H[64,64]; # simd resize TMPQ4 = sext(TMPD3) (lane size 2 to 4) - local tmp9:4 = 0; - local tmp10:4 = 0; - simd_address_at(tmp9, TMPD3, 0, 2, 8); - simd_address_at(tmp10, TMPQ4, 0, 4, 16); - * [register]:4 tmp10 = sext(* [register]:2 tmp9); - simd_address_at(tmp9, TMPD3, 1, 2, 8); - simd_address_at(tmp10, TMPQ4, 1, 4, 16); - * [register]:4 tmp10 = sext(* [register]:2 tmp9); - simd_address_at(tmp9, TMPD3, 2, 2, 8); - simd_address_at(tmp10, TMPQ4, 2, 4, 16); - * [register]:4 tmp10 = sext(* [register]:2 tmp9); - simd_address_at(tmp9, TMPD3, 3, 2, 8); - simd_address_at(tmp10, TMPQ4, 3, 4, 16); - * [register]:4 tmp10 = sext(* [register]:2 tmp9); + TMPQ4[0,32] = sext(TMPD3[0,16]); + TMPQ4[32,32] = sext(TMPD3[16,16]); + TMPQ4[64,32] = sext(TMPD3[32,16]); + TMPQ4[96,32] = sext(TMPD3[48,16]); # simd infix TMPQ5 = TMPQ2 * TMPQ4 on lane size 4 - local tmp12:4 = 0; - local tmp13:4 = 0; - local tmp14:4 = 0; - simd_address_at(tmp12, TMPQ2, 0, 4, 16); - simd_address_at(tmp13, TMPQ4, 0, 4, 16); - simd_address_at(tmp14, TMPQ5, 0, 4, 16); - * [register]:4 tmp14 = (* [register]:4 tmp12) * (* [register]:4 tmp13); - simd_address_at(tmp12, TMPQ2, 1, 4, 16); - simd_address_at(tmp13, TMPQ4, 1, 4, 16); - simd_address_at(tmp14, TMPQ5, 1, 4, 16); - * [register]:4 tmp14 = (* [register]:4 tmp12) * (* [register]:4 tmp13); - simd_address_at(tmp12, TMPQ2, 2, 4, 16); - simd_address_at(tmp13, TMPQ4, 2, 4, 16); - simd_address_at(tmp14, TMPQ5, 2, 4, 16); - * [register]:4 tmp14 = (* [register]:4 tmp12) * (* [register]:4 tmp13); - simd_address_at(tmp12, TMPQ2, 3, 4, 16); - simd_address_at(tmp13, TMPQ4, 3, 4, 16); - simd_address_at(tmp14, TMPQ5, 3, 4, 16); - * [register]:4 tmp14 = (* [register]:4 tmp12) * (* [register]:4 tmp13); + TMPQ5[0,32] = TMPQ2[0,32] * TMPQ4[0,32]; + TMPQ5[32,32] = TMPQ2[32,32] * TMPQ4[32,32]; + TMPQ5[64,32] = TMPQ2[64,32] * TMPQ4[64,32]; + TMPQ5[96,32] = TMPQ2[96,32] * TMPQ4[96,32]; # simd infix TMPQ6 = TMPQ5 * 2:4 on lane size 4 - local tmp16:4 = 0; - local tmp17:4 = 0; - simd_address_at(tmp16, TMPQ5, 0, 4, 16); - simd_address_at(tmp17, TMPQ6, 0, 4, 16); - * [register]:4 tmp17 = (* [register]:4 tmp16) * 2:4; - simd_address_at(tmp16, TMPQ5, 1, 4, 16); - simd_address_at(tmp17, TMPQ6, 1, 4, 16); - * [register]:4 tmp17 = (* [register]:4 tmp16) * 2:4; - simd_address_at(tmp16, TMPQ5, 2, 4, 16); - simd_address_at(tmp17, TMPQ6, 2, 4, 16); - * [register]:4 tmp17 = (* [register]:4 tmp16) * 2:4; - simd_address_at(tmp16, TMPQ5, 3, 4, 16); - simd_address_at(tmp17, TMPQ6, 3, 4, 16); - * [register]:4 tmp17 = (* [register]:4 tmp16) * 2:4; + TMPQ6[0,32] = TMPQ5[0,32] * 2:4; + TMPQ6[32,32] = TMPQ5[32,32] * 2:4; + TMPQ6[64,32] = TMPQ5[64,32] * 2:4; + TMPQ6[96,32] = TMPQ5[96,32] * 2:4; # simd infix Rd_VPR128.4S = Rd_VPR128.4S - TMPQ6 on lane size 4 - local tmp18:4 = 0; - local tmp19:4 = 0; - local tmp20:4 = 0; - simd_address_at(tmp18, Rd_VPR128.4S, 0, 4, 16); - simd_address_at(tmp19, TMPQ6, 0, 4, 16); - simd_address_at(tmp20, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp20 = (* [register]:4 tmp18) - (* [register]:4 tmp19); - simd_address_at(tmp18, Rd_VPR128.4S, 1, 4, 16); - simd_address_at(tmp19, TMPQ6, 1, 4, 16); - simd_address_at(tmp20, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp20 = (* [register]:4 tmp18) - (* [register]:4 tmp19); - simd_address_at(tmp18, Rd_VPR128.4S, 2, 4, 16); - simd_address_at(tmp19, TMPQ6, 2, 4, 16); - simd_address_at(tmp20, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp20 = (* [register]:4 tmp18) - (* [register]:4 tmp19); - simd_address_at(tmp18, Rd_VPR128.4S, 3, 4, 16); - simd_address_at(tmp19, TMPQ6, 3, 4, 16); - simd_address_at(tmp20, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp20 = (* [register]:4 tmp18) - (* [register]:4 tmp19); + Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] - TMPQ6[0,32]; + Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] - TMPQ6[32,32]; + Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] - TMPQ6[64,32]; + Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] - TMPQ6[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Rn_VPR128.8H, 1:1); - local tmp2:16 = SIMD_INT_SEXT(tmp1, 2:1); - local tmp3:8 = SIMD_PIECE(Rm_VPR128.8H, 1:1); - local tmp4:16 = SIMD_INT_SEXT(tmp3, 2:1); - local tmp5:16 = SIMD_INT_MULT(tmp2, tmp4, 4:1); - local tmp6:16 = SIMD_INT_MULT(tmp5, 2:4); - local tmpd:16 = SIMD_INT_SUB(Rd_VPR128.4S, tmp6, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_sqdmlsl2(Rd_VPR128.4S, Rn_VPR128.8H, Rm_VPR128.8H, 2:1); -@endif } # C7.2.288 SQDMULH (by element) page C7-2032 line 113722 MATCH x0f00c000/mask=xbf00f400 @@ -32506,64 +17758,22 @@ is b_31=0 & b_30=1 & b_2429=0b001110 & b_2223=0b01 & b_21=1 & b_1015=0b101100 & :sqdmulh Rd_VPR64.2S, Rn_VPR64.2S, Re_VPR128.S.vIndex is b_3131=0 & q=0 & u=0 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & b_1215=0xc & b_1010=0 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_primitive) # simd resize TMPQ1 = sext(Rn_VPR64.2S) (lane size 4 to 8) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR64.2S, 0, 4, 8); - simd_address_at(tmp3, TMPQ1, 0, 8, 16); - * [register]:8 tmp3 = sext(* [register]:4 tmp2); - simd_address_at(tmp2, Rn_VPR64.2S, 1, 4, 8); - simd_address_at(tmp3, TMPQ1, 1, 8, 16); - * [register]:8 tmp3 = sext(* [register]:4 tmp2); - local tmp4:4 = 0; + TMPQ1[0,64] = sext(Rn_VPR64.2S[0,32]); + TMPQ1[64,64] = sext(Rn_VPR64.2S[32,32]); # simd element Re_VPR128.S[vIndex] lane size 4 - simd_address_at(tmp4, Re_VPR128.S, vIndex:4, 4, 16); - local tmp5:4 = * [register]:4 tmp4; - local tmp6:8 = sext(tmp5); - # simd infix TMPQ2 = TMPQ1 * tmp6 on lane size 8 - local tmp8:4 = 0; - local tmp9:4 = 0; - simd_address_at(tmp8, TMPQ1, 0, 8, 16); - simd_address_at(tmp9, TMPQ2, 0, 8, 16); - * [register]:8 tmp9 = (* [register]:8 tmp8) * tmp6; - simd_address_at(tmp8, TMPQ1, 1, 8, 16); - simd_address_at(tmp9, TMPQ2, 1, 8, 16); - * [register]:8 tmp9 = (* [register]:8 tmp8) * tmp6; - # simd infix TMPQ3 = TMPQ2 * 2:8 on lane size 8 - local tmp11:4 = 0; - local tmp12:4 = 0; - simd_address_at(tmp11, TMPQ2, 0, 8, 16); - simd_address_at(tmp12, TMPQ3, 0, 8, 16); - * [register]:8 tmp12 = (* [register]:8 tmp11) * 2:8; - simd_address_at(tmp11, TMPQ2, 1, 8, 16); - simd_address_at(tmp12, TMPQ3, 1, 8, 16); - * [register]:8 tmp12 = (* [register]:8 tmp11) * 2:8; - # simd shuffle Rd_VPR64.2S = TMPQ3 (@1-0@3-1) lane size 4 - local tmp13:4 = 0; - local tmp14:4 = 0; - simd_address_at(tmp13, TMPQ3, 1, 4, 16); - simd_address_at(tmp14, Rd_VPR64.2S, 0, 4, 8); - * [register]:4 tmp14 = * [register]:4 tmp13; - simd_address_at(tmp13, TMPQ3, 3, 4, 16); - simd_address_at(tmp14, Rd_VPR64.2S, 1, 4, 8); - * [register]:4 tmp14 = * [register]:4 tmp13; - zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_SEXT(Rn_VPR64.2S, 4:1); - local tmp2:4 = SIMD_PIECE(Re_VPR128.S, vIndex:1); + local tmp2:4 = Re_VPR128.S.vIndex; local tmp3:8 = sext(tmp2); - local tmp4:16 = SIMD_INT_MULT(tmp1, tmp3); - local tmp5:16 = SIMD_INT_MULT(tmp4, 2:8); - local tmp6:4 = 0; - local tmpd:8 = Rd_VPR64.2S; - tmp6 = SIMD_PIECE(tmp5, 1:1); tmpd = SIMD_COPY(tmpd, tmp6, 0:1); - tmp6 = SIMD_PIECE(tmp5, 3:1); tmpd = SIMD_COPY(tmpd, tmp6, 1:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.2S -@elif defined(SEMANTIC_pseudo) - local tmp1:4 = SIMD_PIECE(Re_VPR128.S, vIndex:1); - Rd_VPR64.2S = NEON_sqdmlsl(Rd_VPR64.2S, Rn_VPR64.2S, tmp1, 4:1); -@endif + # simd infix TMPQ2 = TMPQ1 * tmp3 on lane size 8 + TMPQ2[0,64] = TMPQ1[0,64] * tmp3; + TMPQ2[64,64] = TMPQ1[64,64] * tmp3; + # simd infix TMPQ3 = TMPQ2 * 2:8 on lane size 8 + TMPQ3[0,64] = TMPQ2[0,64] * 2:8; + TMPQ3[64,64] = TMPQ2[64,64] * 2:8; + # simd shuffle Rd_VPR64.2S = TMPQ3 (@1-0@3-1) lane size 4 + Rd_VPR64.2S[0,32] = TMPQ3[32,32]; + Rd_VPR64.2S[32,32] = TMPQ3[96,32]; + zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.288 SQDMULH (by element) page C7-2032 line 113722 MATCH x0f00c000/mask=xbf00f400 @@ -32575,90 +17785,30 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re :sqdmulh Rd_VPR64.4H, Rn_VPR64.4H, Re_VPR128Lo.H.vIndexHLM is b_3131=0 & q=0 & u=0 & b_2428=0xf & advSIMD3.size=1 & Re_VPR128Lo.H.vIndexHLM & Re_VPR128Lo.H & vIndexHLM & b_1215=0xc & b_1010=0 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { -@if defined(SEMANTIC_primitive) # simd resize TMPQ1 = sext(Rn_VPR64.4H) (lane size 2 to 4) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR64.4H, 0, 2, 8); - simd_address_at(tmp3, TMPQ1, 0, 4, 16); - * [register]:4 tmp3 = sext(* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR64.4H, 1, 2, 8); - simd_address_at(tmp3, TMPQ1, 1, 4, 16); - * [register]:4 tmp3 = sext(* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR64.4H, 2, 2, 8); - simd_address_at(tmp3, TMPQ1, 2, 4, 16); - * [register]:4 tmp3 = sext(* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR64.4H, 3, 2, 8); - simd_address_at(tmp3, TMPQ1, 3, 4, 16); - * [register]:4 tmp3 = sext(* [register]:2 tmp2); - local tmp4:4 = 0; + TMPQ1[0,32] = sext(Rn_VPR64.4H[0,16]); + TMPQ1[32,32] = sext(Rn_VPR64.4H[16,16]); + TMPQ1[64,32] = sext(Rn_VPR64.4H[32,16]); + TMPQ1[96,32] = sext(Rn_VPR64.4H[48,16]); # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 - simd_address_at(tmp4, Re_VPR128Lo.H, vIndexHLM:4, 2, 16); - local tmp5:2 = * [register]:2 tmp4; - local tmp6:4 = sext(tmp5); - # simd infix TMPQ2 = TMPQ1 * tmp6 on lane size 4 - local tmp8:4 = 0; - local tmp9:4 = 0; - simd_address_at(tmp8, TMPQ1, 0, 4, 16); - simd_address_at(tmp9, TMPQ2, 0, 4, 16); - * [register]:4 tmp9 = (* [register]:4 tmp8) * tmp6; - simd_address_at(tmp8, TMPQ1, 1, 4, 16); - simd_address_at(tmp9, TMPQ2, 1, 4, 16); - * [register]:4 tmp9 = (* [register]:4 tmp8) * tmp6; - simd_address_at(tmp8, TMPQ1, 2, 4, 16); - simd_address_at(tmp9, TMPQ2, 2, 4, 16); - * [register]:4 tmp9 = (* [register]:4 tmp8) * tmp6; - simd_address_at(tmp8, TMPQ1, 3, 4, 16); - simd_address_at(tmp9, TMPQ2, 3, 4, 16); - * [register]:4 tmp9 = (* [register]:4 tmp8) * tmp6; - # simd infix TMPQ3 = TMPQ2 * 2:4 on lane size 4 - local tmp11:4 = 0; - local tmp12:4 = 0; - simd_address_at(tmp11, TMPQ2, 0, 4, 16); - simd_address_at(tmp12, TMPQ3, 0, 4, 16); - * [register]:4 tmp12 = (* [register]:4 tmp11) * 2:4; - simd_address_at(tmp11, TMPQ2, 1, 4, 16); - simd_address_at(tmp12, TMPQ3, 1, 4, 16); - * [register]:4 tmp12 = (* [register]:4 tmp11) * 2:4; - simd_address_at(tmp11, TMPQ2, 2, 4, 16); - simd_address_at(tmp12, TMPQ3, 2, 4, 16); - * [register]:4 tmp12 = (* [register]:4 tmp11) * 2:4; - simd_address_at(tmp11, TMPQ2, 3, 4, 16); - simd_address_at(tmp12, TMPQ3, 3, 4, 16); - * [register]:4 tmp12 = (* [register]:4 tmp11) * 2:4; - # simd shuffle Rd_VPR64.4H = TMPQ3 (@1-0@3-1@5-2@7-3) lane size 2 - local tmp13:4 = 0; - local tmp14:4 = 0; - simd_address_at(tmp13, TMPQ3, 1, 2, 16); - simd_address_at(tmp14, Rd_VPR64.4H, 0, 2, 8); - * [register]:2 tmp14 = * [register]:2 tmp13; - simd_address_at(tmp13, TMPQ3, 3, 2, 16); - simd_address_at(tmp14, Rd_VPR64.4H, 1, 2, 8); - * [register]:2 tmp14 = * [register]:2 tmp13; - simd_address_at(tmp13, TMPQ3, 5, 2, 16); - simd_address_at(tmp14, Rd_VPR64.4H, 2, 2, 8); - * [register]:2 tmp14 = * [register]:2 tmp13; - simd_address_at(tmp13, TMPQ3, 7, 2, 16); - simd_address_at(tmp14, Rd_VPR64.4H, 3, 2, 8); - * [register]:2 tmp14 = * [register]:2 tmp13; - zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_SEXT(Rn_VPR64.4H, 2:1); - local tmp2:2 = SIMD_PIECE(Re_VPR128Lo.H, vIndexHLM:1); + local tmp2:2 = Re_VPR128Lo.H.vIndexHLM; local tmp3:4 = sext(tmp2); - local tmp4:16 = SIMD_INT_MULT(tmp1, tmp3); - local tmp5:16 = SIMD_INT_MULT(tmp4, 2:4); - local tmp6:2 = 0; - local tmpd:8 = Rd_VPR64.4H; - tmp6 = SIMD_PIECE(tmp5, 1:1); tmpd = SIMD_COPY(tmpd, tmp6, 0:1); - tmp6 = SIMD_PIECE(tmp5, 3:1); tmpd = SIMD_COPY(tmpd, tmp6, 1:1); - tmp6 = SIMD_PIECE(tmp5, 5:1); tmpd = SIMD_COPY(tmpd, tmp6, 2:1); - tmp6 = SIMD_PIECE(tmp5, 7:1); tmpd = SIMD_COPY(tmpd, tmp6, 3:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.4H -@elif defined(SEMANTIC_pseudo) - local tmp1:2 = SIMD_PIECE(Re_VPR128Lo.H, vIndexHLM:1); - Rd_VPR64.4H = NEON_sqdmulh(Rn_VPR64.4H, tmp1, 2:1); -@endif + # simd infix TMPQ2 = TMPQ1 * tmp3 on lane size 4 + TMPQ2[0,32] = TMPQ1[0,32] * tmp3; + TMPQ2[32,32] = TMPQ1[32,32] * tmp3; + TMPQ2[64,32] = TMPQ1[64,32] * tmp3; + TMPQ2[96,32] = TMPQ1[96,32] * tmp3; + # simd infix TMPQ3 = TMPQ2 * 2:4 on lane size 4 + TMPQ3[0,32] = TMPQ2[0,32] * 2:4; + TMPQ3[32,32] = TMPQ2[32,32] * 2:4; + TMPQ3[64,32] = TMPQ2[64,32] * 2:4; + TMPQ3[96,32] = TMPQ2[96,32] * 2:4; + # simd shuffle Rd_VPR64.4H = TMPQ3 (@1-0@3-1@5-2@7-3) lane size 2 + Rd_VPR64.4H[0,16] = TMPQ3[16,16]; + Rd_VPR64.4H[16,16] = TMPQ3[48,16]; + Rd_VPR64.4H[32,16] = TMPQ3[80,16]; + Rd_VPR64.4H[48,16] = TMPQ3[112,16]; + zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.288 SQDMULH (by element) page C7-2032 line 113722 MATCH x0f00c000/mask=xbf00f400 @@ -32670,90 +17820,30 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xf & advSIMD3.size=1 & Re_VPR128Lo.H.vIndexHLM :sqdmulh Rd_VPR128.4S, Rn_VPR128.4S, Re_VPR128.S.vIndex is b_3131=0 & q=1 & u=0 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & b_1215=0xc & b_1010=0 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) # simd resize TMPZ1 = sext(Rn_VPR128.4S) (lane size 4 to 8) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR128.4S, 0, 4, 16); - simd_address_at(tmp3, TMPZ1, 0, 8, 32); - * [register]:8 tmp3 = sext(* [register]:4 tmp2); - simd_address_at(tmp2, Rn_VPR128.4S, 1, 4, 16); - simd_address_at(tmp3, TMPZ1, 1, 8, 32); - * [register]:8 tmp3 = sext(* [register]:4 tmp2); - simd_address_at(tmp2, Rn_VPR128.4S, 2, 4, 16); - simd_address_at(tmp3, TMPZ1, 2, 8, 32); - * [register]:8 tmp3 = sext(* [register]:4 tmp2); - simd_address_at(tmp2, Rn_VPR128.4S, 3, 4, 16); - simd_address_at(tmp3, TMPZ1, 3, 8, 32); - * [register]:8 tmp3 = sext(* [register]:4 tmp2); - local tmp4:4 = 0; + TMPZ1[0,64] = sext(Rn_VPR128.4S[0,32]); + TMPZ1[64,64] = sext(Rn_VPR128.4S[32,32]); + TMPZ1[128,64] = sext(Rn_VPR128.4S[64,32]); + TMPZ1[192,64] = sext(Rn_VPR128.4S[96,32]); # simd element Re_VPR128.S[vIndex] lane size 4 - simd_address_at(tmp4, Re_VPR128.S, vIndex:4, 4, 16); - local tmp5:4 = * [register]:4 tmp4; - local tmp6:8 = sext(tmp5); - # simd infix TMPZ2 = TMPZ1 * tmp6 on lane size 8 - local tmp8:4 = 0; - local tmp9:4 = 0; - simd_address_at(tmp8, TMPZ1, 0, 8, 32); - simd_address_at(tmp9, TMPZ2, 0, 8, 32); - * [register]:8 tmp9 = (* [register]:8 tmp8) * tmp6; - simd_address_at(tmp8, TMPZ1, 1, 8, 32); - simd_address_at(tmp9, TMPZ2, 1, 8, 32); - * [register]:8 tmp9 = (* [register]:8 tmp8) * tmp6; - simd_address_at(tmp8, TMPZ1, 2, 8, 32); - simd_address_at(tmp9, TMPZ2, 2, 8, 32); - * [register]:8 tmp9 = (* [register]:8 tmp8) * tmp6; - simd_address_at(tmp8, TMPZ1, 3, 8, 32); - simd_address_at(tmp9, TMPZ2, 3, 8, 32); - * [register]:8 tmp9 = (* [register]:8 tmp8) * tmp6; - # simd infix TMPZ3 = TMPZ2 * 2:8 on lane size 8 - local tmp11:4 = 0; - local tmp12:4 = 0; - simd_address_at(tmp11, TMPZ2, 0, 8, 32); - simd_address_at(tmp12, TMPZ3, 0, 8, 32); - * [register]:8 tmp12 = (* [register]:8 tmp11) * 2:8; - simd_address_at(tmp11, TMPZ2, 1, 8, 32); - simd_address_at(tmp12, TMPZ3, 1, 8, 32); - * [register]:8 tmp12 = (* [register]:8 tmp11) * 2:8; - simd_address_at(tmp11, TMPZ2, 2, 8, 32); - simd_address_at(tmp12, TMPZ3, 2, 8, 32); - * [register]:8 tmp12 = (* [register]:8 tmp11) * 2:8; - simd_address_at(tmp11, TMPZ2, 3, 8, 32); - simd_address_at(tmp12, TMPZ3, 3, 8, 32); - * [register]:8 tmp12 = (* [register]:8 tmp11) * 2:8; - # simd shuffle Rd_VPR128.4S = TMPZ3 (@1-0@3-1@5-2@7-3) lane size 4 - local tmp13:4 = 0; - local tmp14:4 = 0; - simd_address_at(tmp13, TMPZ3, 1, 4, 32); - simd_address_at(tmp14, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp14 = * [register]:4 tmp13; - simd_address_at(tmp13, TMPZ3, 3, 4, 32); - simd_address_at(tmp14, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp14 = * [register]:4 tmp13; - simd_address_at(tmp13, TMPZ3, 5, 4, 32); - simd_address_at(tmp14, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp14 = * [register]:4 tmp13; - simd_address_at(tmp13, TMPZ3, 7, 4, 32); - simd_address_at(tmp14, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp14 = * [register]:4 tmp13; - zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:32 = SIMD_INT_SEXT(Rn_VPR128.4S, 4:1); - local tmp2:4 = SIMD_PIECE(Re_VPR128.S, vIndex:1); + local tmp2:4 = Re_VPR128.S.vIndex; local tmp3:8 = sext(tmp2); - local tmp4:32 = SIMD_INT_MULT(tmp1, tmp3); - local tmp5:32 = SIMD_INT_MULT(tmp4, 2:8); - local tmp6:4 = 0; - local tmpd:16 = Rd_VPR128.4S; - tmp6 = SIMD_PIECE(tmp5, 1:1); tmpd = SIMD_COPY(tmpd, tmp6, 0:1); - tmp6 = SIMD_PIECE(tmp5, 3:1); tmpd = SIMD_COPY(tmpd, tmp6, 1:1); - tmp6 = SIMD_PIECE(tmp5, 5:1); tmpd = SIMD_COPY(tmpd, tmp6, 2:1); - tmp6 = SIMD_PIECE(tmp5, 7:1); tmpd = SIMD_COPY(tmpd, tmp6, 3:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - local tmp1:4 = SIMD_PIECE(Re_VPR128.S, vIndex:1); - Rd_VPR128.4S = NEON_sqdmulh(Rn_VPR128.4S, tmp1, 4:1); -@endif + # simd infix TMPZ2 = TMPZ1 * tmp3 on lane size 8 + TMPZ2[0,64] = TMPZ1[0,64] * tmp3; + TMPZ2[64,64] = TMPZ1[64,64] * tmp3; + TMPZ2[128,64] = TMPZ1[128,64] * tmp3; + TMPZ2[192,64] = TMPZ1[192,64] * tmp3; + # simd infix TMPZ3 = TMPZ2 * 2:8 on lane size 8 + TMPZ3[0,64] = TMPZ2[0,64] * 2:8; + TMPZ3[64,64] = TMPZ2[64,64] * 2:8; + TMPZ3[128,64] = TMPZ2[128,64] * 2:8; + TMPZ3[192,64] = TMPZ2[192,64] * 2:8; + # simd shuffle Rd_VPR128.4S = TMPZ3 (@1-0@3-1@5-2@7-3) lane size 4 + Rd_VPR128.4S[0,32] = TMPZ3[32,32]; + Rd_VPR128.4S[32,32] = TMPZ3[96,32]; + Rd_VPR128.4S[64,32] = TMPZ3[160,32]; + Rd_VPR128.4S[96,32] = TMPZ3[224,32]; + zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.288 SQDMULH (by element) page C7-2032 line 113722 MATCH x0f00c000/mask=xbf00f400 @@ -32765,142 +17855,46 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re :sqdmulh Rd_VPR128.8H, Rn_VPR128.8H, Re_VPR128Lo.H.vIndexHLM is b_3131=0 & q=1 & u=0 & b_2428=0xf & advSIMD3.size=1 & Re_VPR128Lo.H.vIndexHLM & Re_VPR128Lo.H & vIndexHLM & b_1215=0xc & b_1010=0 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_primitive) # simd resize TMPZ1 = sext(Rn_VPR128.8H) (lane size 2 to 4) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR128.8H, 0, 2, 16); - simd_address_at(tmp3, TMPZ1, 0, 4, 32); - * [register]:4 tmp3 = sext(* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR128.8H, 1, 2, 16); - simd_address_at(tmp3, TMPZ1, 1, 4, 32); - * [register]:4 tmp3 = sext(* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR128.8H, 2, 2, 16); - simd_address_at(tmp3, TMPZ1, 2, 4, 32); - * [register]:4 tmp3 = sext(* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR128.8H, 3, 2, 16); - simd_address_at(tmp3, TMPZ1, 3, 4, 32); - * [register]:4 tmp3 = sext(* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR128.8H, 4, 2, 16); - simd_address_at(tmp3, TMPZ1, 4, 4, 32); - * [register]:4 tmp3 = sext(* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR128.8H, 5, 2, 16); - simd_address_at(tmp3, TMPZ1, 5, 4, 32); - * [register]:4 tmp3 = sext(* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR128.8H, 6, 2, 16); - simd_address_at(tmp3, TMPZ1, 6, 4, 32); - * [register]:4 tmp3 = sext(* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR128.8H, 7, 2, 16); - simd_address_at(tmp3, TMPZ1, 7, 4, 32); - * [register]:4 tmp3 = sext(* [register]:2 tmp2); - local tmp4:4 = 0; + TMPZ1[0,32] = sext(Rn_VPR128.8H[0,16]); + TMPZ1[32,32] = sext(Rn_VPR128.8H[16,16]); + TMPZ1[64,32] = sext(Rn_VPR128.8H[32,16]); + TMPZ1[96,32] = sext(Rn_VPR128.8H[48,16]); + TMPZ1[128,32] = sext(Rn_VPR128.8H[64,16]); + TMPZ1[160,32] = sext(Rn_VPR128.8H[80,16]); + TMPZ1[192,32] = sext(Rn_VPR128.8H[96,16]); + TMPZ1[224,32] = sext(Rn_VPR128.8H[112,16]); # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 - simd_address_at(tmp4, Re_VPR128Lo.H, vIndexHLM:4, 2, 16); - local tmp5:2 = * [register]:2 tmp4; - local tmp6:4 = sext(tmp5); - # simd infix TMPZ2 = TMPZ1 * tmp6 on lane size 4 - local tmp8:4 = 0; - local tmp9:4 = 0; - simd_address_at(tmp8, TMPZ1, 0, 4, 32); - simd_address_at(tmp9, TMPZ2, 0, 4, 32); - * [register]:4 tmp9 = (* [register]:4 tmp8) * tmp6; - simd_address_at(tmp8, TMPZ1, 1, 4, 32); - simd_address_at(tmp9, TMPZ2, 1, 4, 32); - * [register]:4 tmp9 = (* [register]:4 tmp8) * tmp6; - simd_address_at(tmp8, TMPZ1, 2, 4, 32); - simd_address_at(tmp9, TMPZ2, 2, 4, 32); - * [register]:4 tmp9 = (* [register]:4 tmp8) * tmp6; - simd_address_at(tmp8, TMPZ1, 3, 4, 32); - simd_address_at(tmp9, TMPZ2, 3, 4, 32); - * [register]:4 tmp9 = (* [register]:4 tmp8) * tmp6; - simd_address_at(tmp8, TMPZ1, 4, 4, 32); - simd_address_at(tmp9, TMPZ2, 4, 4, 32); - * [register]:4 tmp9 = (* [register]:4 tmp8) * tmp6; - simd_address_at(tmp8, TMPZ1, 5, 4, 32); - simd_address_at(tmp9, TMPZ2, 5, 4, 32); - * [register]:4 tmp9 = (* [register]:4 tmp8) * tmp6; - simd_address_at(tmp8, TMPZ1, 6, 4, 32); - simd_address_at(tmp9, TMPZ2, 6, 4, 32); - * [register]:4 tmp9 = (* [register]:4 tmp8) * tmp6; - simd_address_at(tmp8, TMPZ1, 7, 4, 32); - simd_address_at(tmp9, TMPZ2, 7, 4, 32); - * [register]:4 tmp9 = (* [register]:4 tmp8) * tmp6; - # simd infix TMPZ3 = TMPZ2 * 2:4 on lane size 4 - local tmp11:4 = 0; - local tmp12:4 = 0; - simd_address_at(tmp11, TMPZ2, 0, 4, 32); - simd_address_at(tmp12, TMPZ3, 0, 4, 32); - * [register]:4 tmp12 = (* [register]:4 tmp11) * 2:4; - simd_address_at(tmp11, TMPZ2, 1, 4, 32); - simd_address_at(tmp12, TMPZ3, 1, 4, 32); - * [register]:4 tmp12 = (* [register]:4 tmp11) * 2:4; - simd_address_at(tmp11, TMPZ2, 2, 4, 32); - simd_address_at(tmp12, TMPZ3, 2, 4, 32); - * [register]:4 tmp12 = (* [register]:4 tmp11) * 2:4; - simd_address_at(tmp11, TMPZ2, 3, 4, 32); - simd_address_at(tmp12, TMPZ3, 3, 4, 32); - * [register]:4 tmp12 = (* [register]:4 tmp11) * 2:4; - simd_address_at(tmp11, TMPZ2, 4, 4, 32); - simd_address_at(tmp12, TMPZ3, 4, 4, 32); - * [register]:4 tmp12 = (* [register]:4 tmp11) * 2:4; - simd_address_at(tmp11, TMPZ2, 5, 4, 32); - simd_address_at(tmp12, TMPZ3, 5, 4, 32); - * [register]:4 tmp12 = (* [register]:4 tmp11) * 2:4; - simd_address_at(tmp11, TMPZ2, 6, 4, 32); - simd_address_at(tmp12, TMPZ3, 6, 4, 32); - * [register]:4 tmp12 = (* [register]:4 tmp11) * 2:4; - simd_address_at(tmp11, TMPZ2, 7, 4, 32); - simd_address_at(tmp12, TMPZ3, 7, 4, 32); - * [register]:4 tmp12 = (* [register]:4 tmp11) * 2:4; - # simd shuffle Rd_VPR128.8H = TMPZ3 (@1-0@3-1@5-2@7-3@9-4@11-5@13-6@15-7) lane size 2 - local tmp13:4 = 0; - local tmp14:4 = 0; - simd_address_at(tmp13, TMPZ3, 1, 2, 32); - simd_address_at(tmp14, Rd_VPR128.8H, 0, 2, 16); - * [register]:2 tmp14 = * [register]:2 tmp13; - simd_address_at(tmp13, TMPZ3, 3, 2, 32); - simd_address_at(tmp14, Rd_VPR128.8H, 1, 2, 16); - * [register]:2 tmp14 = * [register]:2 tmp13; - simd_address_at(tmp13, TMPZ3, 5, 2, 32); - simd_address_at(tmp14, Rd_VPR128.8H, 2, 2, 16); - * [register]:2 tmp14 = * [register]:2 tmp13; - simd_address_at(tmp13, TMPZ3, 7, 2, 32); - simd_address_at(tmp14, Rd_VPR128.8H, 3, 2, 16); - * [register]:2 tmp14 = * [register]:2 tmp13; - simd_address_at(tmp13, TMPZ3, 9, 2, 32); - simd_address_at(tmp14, Rd_VPR128.8H, 4, 2, 16); - * [register]:2 tmp14 = * [register]:2 tmp13; - simd_address_at(tmp13, TMPZ3, 11, 2, 32); - simd_address_at(tmp14, Rd_VPR128.8H, 5, 2, 16); - * [register]:2 tmp14 = * [register]:2 tmp13; - simd_address_at(tmp13, TMPZ3, 13, 2, 32); - simd_address_at(tmp14, Rd_VPR128.8H, 6, 2, 16); - * [register]:2 tmp14 = * [register]:2 tmp13; - simd_address_at(tmp13, TMPZ3, 15, 2, 32); - simd_address_at(tmp14, Rd_VPR128.8H, 7, 2, 16); - * [register]:2 tmp14 = * [register]:2 tmp13; - zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:32 = SIMD_INT_SEXT(Rn_VPR128.8H, 2:1); - local tmp2:2 = SIMD_PIECE(Re_VPR128Lo.H, vIndexHLM:1); + local tmp2:2 = Re_VPR128Lo.H.vIndexHLM; local tmp3:4 = sext(tmp2); - local tmp4:32 = SIMD_INT_MULT(tmp1, tmp3); - local tmp5:32 = SIMD_INT_MULT(tmp4, 2:4); - local tmp6:2 = 0; - local tmpd:16 = Rd_VPR128.8H; - tmp6 = SIMD_PIECE(tmp5, 1:1); tmpd = SIMD_COPY(tmpd, tmp6, 0:1); - tmp6 = SIMD_PIECE(tmp5, 3:1); tmpd = SIMD_COPY(tmpd, tmp6, 1:1); - tmp6 = SIMD_PIECE(tmp5, 5:1); tmpd = SIMD_COPY(tmpd, tmp6, 2:1); - tmp6 = SIMD_PIECE(tmp5, 7:1); tmpd = SIMD_COPY(tmpd, tmp6, 3:1); - tmp6 = SIMD_PIECE(tmp5, 9:1); tmpd = SIMD_COPY(tmpd, tmp6, 4:1); - tmp6 = SIMD_PIECE(tmp5, 11:1); tmpd = SIMD_COPY(tmpd, tmp6, 5:1); - tmp6 = SIMD_PIECE(tmp5, 13:1); tmpd = SIMD_COPY(tmpd, tmp6, 6:1); - tmp6 = SIMD_PIECE(tmp5, 15:1); tmpd = SIMD_COPY(tmpd, tmp6, 7:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.8H -@elif defined(SEMANTIC_pseudo) - local tmp1:2 = SIMD_PIECE(Re_VPR128Lo.H, vIndexHLM:1); - Rd_VPR128.8H = NEON_sqdmulh(Rn_VPR128.8H, tmp1, 2:1); -@endif + # simd infix TMPZ2 = TMPZ1 * tmp3 on lane size 4 + TMPZ2[0,32] = TMPZ1[0,32] * tmp3; + TMPZ2[32,32] = TMPZ1[32,32] * tmp3; + TMPZ2[64,32] = TMPZ1[64,32] * tmp3; + TMPZ2[96,32] = TMPZ1[96,32] * tmp3; + TMPZ2[128,32] = TMPZ1[128,32] * tmp3; + TMPZ2[160,32] = TMPZ1[160,32] * tmp3; + TMPZ2[192,32] = TMPZ1[192,32] * tmp3; + TMPZ2[224,32] = TMPZ1[224,32] * tmp3; + # simd infix TMPZ3 = TMPZ2 * 2:4 on lane size 4 + TMPZ3[0,32] = TMPZ2[0,32] * 2:4; + TMPZ3[32,32] = TMPZ2[32,32] * 2:4; + TMPZ3[64,32] = TMPZ2[64,32] * 2:4; + TMPZ3[96,32] = TMPZ2[96,32] * 2:4; + TMPZ3[128,32] = TMPZ2[128,32] * 2:4; + TMPZ3[160,32] = TMPZ2[160,32] * 2:4; + TMPZ3[192,32] = TMPZ2[192,32] * 2:4; + TMPZ3[224,32] = TMPZ2[224,32] * 2:4; + # simd shuffle Rd_VPR128.8H = TMPZ3 (@1-0@3-1@5-2@7-3@9-4@11-5@13-6@15-7) lane size 2 + Rd_VPR128.8H[0,16] = TMPZ3[16,16]; + Rd_VPR128.8H[16,16] = TMPZ3[48,16]; + Rd_VPR128.8H[32,16] = TMPZ3[80,16]; + Rd_VPR128.8H[48,16] = TMPZ3[112,16]; + Rd_VPR128.8H[64,16] = TMPZ3[144,16]; + Rd_VPR128.8H[80,16] = TMPZ3[176,16]; + Rd_VPR128.8H[96,16] = TMPZ3[208,16]; + Rd_VPR128.8H[112,16] = TMPZ3[240,16]; + zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.288 SQDMULH (by element) page C7-2032 line 113722 MATCH x5f00c000/mask=xff00f400 @@ -32913,31 +17907,15 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xf & advSIMD3.size=1 & Re_VPR128Lo.H.vIndexHLM :sqdmulh Rd_FPR16, Rn_FPR16, Re_VPR128Lo.H.vIndexHLM is b_2431=0b01011111 & b_2223=0b01 & b_1215=0b1100 & b_10=0 & Rd_FPR16 & Rn_FPR16 & Re_VPR128Lo.H.vIndexHLM & Re_VPR128Lo.H & vIndexHLM & Zd { -@if defined(SEMANTIC_primitive) local tmp1:4 = sext(Rn_FPR16); - local tmp2:4 = 0; # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 - simd_address_at(tmp2, Re_VPR128Lo.H, vIndexHLM:4, 2, 16); - local tmp3:2 = * [register]:2 tmp2; - local tmp4:4 = sext(tmp3); - local tmp5:4 = tmp1 * tmp4; - local tmp6:4 = tmp5 * 2:4; - local tmp7:4 = tmp6 >> 16:1; - Rd_FPR16 = tmp7:2; - zext_zh(Zd); # zero upper 30 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:4 = sext(Rn_FPR16); - local tmp2:2 = SIMD_PIECE(Re_VPR128Lo.H, vIndexHLM:1); + local tmp2:2 = Re_VPR128Lo.H.vIndexHLM; local tmp3:4 = sext(tmp2); local tmp4:4 = tmp1 * tmp3; local tmp5:4 = tmp4 * 2:4; local tmp6:4 = tmp5 >> 16:1; - local tmpd:2 = tmp6:2; - Zd = zext(tmpd); # assigning to Rd_FPR16 -@elif defined(SEMANTIC_pseudo) - local tmp1:2 = SIMD_PIECE(Re_VPR128Lo.H, vIndexHLM:1); - Rd_FPR16 = NEON_sqdmulh(Rn_FPR16, tmp1); -@endif + Rd_FPR16 = tmp6:2; + zext_zh(Zd); # zero upper 30 bytes of Zd } # C7.2.288 SQDMULH (by element) page C7-2032 line 113722 MATCH x5f00c000/mask=xff00f400 @@ -32950,31 +17928,15 @@ is b_2431=0b01011111 & b_2223=0b01 & b_1215=0b1100 & b_10=0 & Rd_FPR16 & Rn_FPR1 :sqdmulh Rd_FPR32, Rn_FPR32, Re_VPR128.S.vIndex is b_2431=0b01011111 & b_2223=0b10 & b_1215=0b1100 & b_10=0 & Rd_FPR32 & Rn_FPR32 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & Zd { -@if defined(SEMANTIC_primitive) local tmp1:8 = sext(Rn_FPR32); - local tmp2:4 = 0; # simd element Re_VPR128.S[vIndex] lane size 4 - simd_address_at(tmp2, Re_VPR128.S, vIndex:4, 4, 16); - local tmp3:4 = * [register]:4 tmp2; - local tmp4:8 = sext(tmp3); - local tmp5:8 = tmp1 * tmp4; - local tmp6:8 = tmp5 * 2:8; - local tmp7:8 = tmp6 >> 32:1; - Rd_FPR32 = tmp7:4; - zext_zs(Zd); # zero upper 28 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = sext(Rn_FPR32); - local tmp2:4 = SIMD_PIECE(Re_VPR128.S, vIndex:1); + local tmp2:4 = Re_VPR128.S.vIndex; local tmp3:8 = sext(tmp2); local tmp4:8 = tmp1 * tmp3; local tmp5:8 = tmp4 * 2:8; local tmp6:8 = tmp5 >> 32:1; - local tmpd:4 = tmp6:4; - Zd = zext(tmpd); # assigning to Rd_FPR32 -@elif defined(SEMANTIC_pseudo) - local tmp1:4 = SIMD_PIECE(Re_VPR128.S, vIndex:1); - Rd_FPR32 = NEON_sqdmulh(Rn_FPR32, tmp1); -@endif + Rd_FPR32 = tmp6:4; + zext_zs(Zd); # zero upper 28 bytes of Zd } # C7.2.289 SQDMULH (vector) page C7-2035 line 113898 MATCH x5e20b400/mask=xff20fc00 @@ -32985,9 +17947,7 @@ is b_2431=0b01011111 & b_2223=0b10 & b_1215=0b1100 & b_10=0 & Rd_FPR32 & Rn_FPR3 :sqdmulh Rd_FPR16, Rn_FPR16, Rm_FPR16 is b_3031=1 & u=0 & b_2428=0x1e & advSIMD3.size=1 & b_2121=1 & Rm_FPR16 & b_1115=0x16 & b_1010=1 & Rn_FPR16 & Rd_FPR16 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR16 = NEON_sqdmulh(Rn_FPR16, Rm_FPR16); -@endif } # C7.2.289 SQDMULH (vector) page C7-2035 line 113898 MATCH x5e20b400/mask=xff20fc00 @@ -32998,9 +17958,7 @@ is b_3031=1 & u=0 & b_2428=0x1e & advSIMD3.size=1 & b_2121=1 & Rm_FPR16 & b_1115 :sqdmulh Rd_FPR32, Rn_FPR32, Rm_FPR32 is b_3031=1 & u=0 & b_2428=0x1e & advSIMD3.size=2 & b_2121=1 & Rm_FPR32 & b_1115=0x16 & b_1010=1 & Rn_FPR32 & Rd_FPR32 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR32 = NEON_sqdmulh(Rn_FPR32, Rm_FPR32); -@endif } # C7.2.289 SQDMULH (vector) page C7-2035 line 113898 MATCH x0e20b400/mask=xbf20fc00 @@ -33011,9 +17969,7 @@ is b_3031=1 & u=0 & b_2428=0x1e & advSIMD3.size=2 & b_2121=1 & Rm_FPR32 & b_1115 :sqdmulh Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0x16 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.2S = NEON_sqdmulh(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); -@endif } # C7.2.289 SQDMULH (vector) page C7-2035 line 113898 MATCH x0e20b400/mask=xbf20fc00 @@ -33024,9 +17980,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S :sqdmulh Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0x16 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.4H = NEON_sqdmulh(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); -@endif } # C7.2.289 SQDMULH (vector) page C7-2035 line 113898 MATCH x0e20b400/mask=xbf20fc00 @@ -33037,9 +17991,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H :sqdmulh Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0x16 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_sqdmulh(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); -@endif } # C7.2.289 SQDMULH (vector) page C7-2035 line 113898 MATCH x0e20b400/mask=xbf20fc00 @@ -33050,9 +18002,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S :sqdmulh Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0x16 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.8H = NEON_sqdmulh(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); -@endif } # C7.2.290 SQDMULL, SQDMULL2 (by element) page C7-2037 line 114026 MATCH x0f00b000/mask=xbf00f400 @@ -33064,51 +18014,19 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H :sqdmull Rd_VPR128.2D, Rn_VPR64.2S, Re_VPR128.S.vIndex is b_3131=0 & q=0 & u=0 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & b_1215=0xb & b_1010=0 & Rn_VPR64.2S & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_primitive) # simd resize TMPQ1 = sext(Rn_VPR64.2S) (lane size 4 to 8) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR64.2S, 0, 4, 8); - simd_address_at(tmp3, TMPQ1, 0, 8, 16); - * [register]:8 tmp3 = sext(* [register]:4 tmp2); - simd_address_at(tmp2, Rn_VPR64.2S, 1, 4, 8); - simd_address_at(tmp3, TMPQ1, 1, 8, 16); - * [register]:8 tmp3 = sext(* [register]:4 tmp2); - local tmp4:4 = 0; + TMPQ1[0,64] = sext(Rn_VPR64.2S[0,32]); + TMPQ1[64,64] = sext(Rn_VPR64.2S[32,32]); # simd element Re_VPR128.S[vIndex] lane size 4 - simd_address_at(tmp4, Re_VPR128.S, vIndex:4, 4, 16); - local tmp5:4 = * [register]:4 tmp4; - local tmp6:8 = sext(tmp5); - # simd infix TMPQ2 = TMPQ1 * tmp6 on lane size 8 - local tmp8:4 = 0; - local tmp9:4 = 0; - simd_address_at(tmp8, TMPQ1, 0, 8, 16); - simd_address_at(tmp9, TMPQ2, 0, 8, 16); - * [register]:8 tmp9 = (* [register]:8 tmp8) * tmp6; - simd_address_at(tmp8, TMPQ1, 1, 8, 16); - simd_address_at(tmp9, TMPQ2, 1, 8, 16); - * [register]:8 tmp9 = (* [register]:8 tmp8) * tmp6; - # simd infix Rd_VPR128.2D = TMPQ2 * 2:8 on lane size 8 - local tmp10:4 = 0; - local tmp11:4 = 0; - simd_address_at(tmp10, TMPQ2, 0, 8, 16); - simd_address_at(tmp11, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp11 = (* [register]:8 tmp10) * 2:8; - simd_address_at(tmp10, TMPQ2, 1, 8, 16); - simd_address_at(tmp11, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp11 = (* [register]:8 tmp10) * 2:8; - zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_SEXT(Rn_VPR64.2S, 4:1); - local tmp2:4 = SIMD_PIECE(Re_VPR128.S, vIndex:1); + local tmp2:4 = Re_VPR128.S.vIndex; local tmp3:8 = sext(tmp2); - local tmp4:16 = SIMD_INT_MULT(tmp1, tmp3); - local tmpd:16 = SIMD_INT_MULT(tmp4, 2:8); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - local tmp1:4 = SIMD_PIECE(Re_VPR128.S, vIndex:1); - Rd_VPR128.2D = NEON_sqdmull(Rn_VPR64.2S, tmp1, 4:1); -@endif + # simd infix TMPQ2 = TMPQ1 * tmp3 on lane size 8 + TMPQ2[0,64] = TMPQ1[0,64] * tmp3; + TMPQ2[64,64] = TMPQ1[64,64] * tmp3; + # simd infix Rd_VPR128.2D = TMPQ2 * 2:8 on lane size 8 + Rd_VPR128.2D[0,64] = TMPQ2[0,64] * 2:8; + Rd_VPR128.2D[64,64] = TMPQ2[64,64] * 2:8; + zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.290 SQDMULL, SQDMULL2 (by element) page C7-2037 line 114026 MATCH x0f00b000/mask=xbf00f400 @@ -33120,55 +18038,20 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re :sqdmull2 Rd_VPR128.2D, Rn_VPR128.4S, Re_VPR128.S.vIndex is b_3131=0 & q=1 & u=0 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & b_1215=0xb & b_1010=0 & Rn_VPR128.4S & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rn_VPR128.4S, 1, 8, 16); - TMPD1 = * [register]:8 tmp1; + TMPD1 = Rn_VPR128.4S[64,64]; # simd resize TMPQ2 = sext(TMPD1) (lane size 4 to 8) - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, TMPD1, 0, 4, 8); - simd_address_at(tmp5, TMPQ2, 0, 8, 16); - * [register]:8 tmp5 = sext(* [register]:4 tmp4); - simd_address_at(tmp4, TMPD1, 1, 4, 8); - simd_address_at(tmp5, TMPQ2, 1, 8, 16); - * [register]:8 tmp5 = sext(* [register]:4 tmp4); - local tmp6:4 = 0; + TMPQ2[0,64] = sext(TMPD1[0,32]); + TMPQ2[64,64] = sext(TMPD1[32,32]); # simd element Re_VPR128.S[vIndex] lane size 4 - simd_address_at(tmp6, Re_VPR128.S, vIndex:4, 4, 16); - local tmp7:4 = * [register]:4 tmp6; - local tmp8:8 = sext(tmp7); - # simd infix TMPQ3 = TMPQ2 * tmp8 on lane size 8 - local tmp10:4 = 0; - local tmp11:4 = 0; - simd_address_at(tmp10, TMPQ2, 0, 8, 16); - simd_address_at(tmp11, TMPQ3, 0, 8, 16); - * [register]:8 tmp11 = (* [register]:8 tmp10) * tmp8; - simd_address_at(tmp10, TMPQ2, 1, 8, 16); - simd_address_at(tmp11, TMPQ3, 1, 8, 16); - * [register]:8 tmp11 = (* [register]:8 tmp10) * tmp8; - # simd infix Rd_VPR128.2D = TMPQ3 * 2:8 on lane size 8 - local tmp12:4 = 0; - local tmp13:4 = 0; - simd_address_at(tmp12, TMPQ3, 0, 8, 16); - simd_address_at(tmp13, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp13 = (* [register]:8 tmp12) * 2:8; - simd_address_at(tmp12, TMPQ3, 1, 8, 16); - simd_address_at(tmp13, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp13 = (* [register]:8 tmp12) * 2:8; - zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Rn_VPR128.4S, 1:1); - local tmp2:16 = SIMD_INT_SEXT(tmp1, 4:1); - local tmp3:4 = SIMD_PIECE(Re_VPR128.S, vIndex:1); + local tmp3:4 = Re_VPR128.S.vIndex; local tmp4:8 = sext(tmp3); - local tmp5:16 = SIMD_INT_MULT(tmp2, tmp4); - local tmpd:16 = SIMD_INT_MULT(tmp5, 2:8); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - local tmp1:4 = SIMD_PIECE(Re_VPR128.S, vIndex:1); - Rd_VPR128.2D = NEON_sqdmull2(Rn_VPR128.4S, tmp1, 4:1); -@endif + # simd infix TMPQ3 = TMPQ2 * tmp4 on lane size 8 + TMPQ3[0,64] = TMPQ2[0,64] * tmp4; + TMPQ3[64,64] = TMPQ2[64,64] * tmp4; + # simd infix Rd_VPR128.2D = TMPQ3 * 2:8 on lane size 8 + Rd_VPR128.2D[0,64] = TMPQ3[0,64] * 2:8; + Rd_VPR128.2D[64,64] = TMPQ3[64,64] * 2:8; + zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.290 SQDMULL, SQDMULL2 (by element) page C7-2037 line 114026 MATCH x0f00b000/mask=xbf00f400 @@ -33180,69 +18063,25 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re :sqdmull Rd_VPR128.4S, Rn_VPR64.4H, Re_VPR128Lo.H.vIndexHLM is b_3131=0 & q=0 & u=0 & b_2428=0xf & advSIMD3.size=1 & Re_VPR128Lo.H.vIndexHLM & Re_VPR128Lo.H & vIndexHLM & b_1215=0xb & b_1010=0 & Rn_VPR64.4H & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) # simd resize TMPQ1 = sext(Rn_VPR64.4H) (lane size 2 to 4) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR64.4H, 0, 2, 8); - simd_address_at(tmp3, TMPQ1, 0, 4, 16); - * [register]:4 tmp3 = sext(* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR64.4H, 1, 2, 8); - simd_address_at(tmp3, TMPQ1, 1, 4, 16); - * [register]:4 tmp3 = sext(* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR64.4H, 2, 2, 8); - simd_address_at(tmp3, TMPQ1, 2, 4, 16); - * [register]:4 tmp3 = sext(* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR64.4H, 3, 2, 8); - simd_address_at(tmp3, TMPQ1, 3, 4, 16); - * [register]:4 tmp3 = sext(* [register]:2 tmp2); - local tmp4:4 = 0; + TMPQ1[0,32] = sext(Rn_VPR64.4H[0,16]); + TMPQ1[32,32] = sext(Rn_VPR64.4H[16,16]); + TMPQ1[64,32] = sext(Rn_VPR64.4H[32,16]); + TMPQ1[96,32] = sext(Rn_VPR64.4H[48,16]); # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 - simd_address_at(tmp4, Re_VPR128Lo.H, vIndexHLM:4, 2, 16); - local tmp5:2 = * [register]:2 tmp4; - local tmp6:4 = sext(tmp5); - # simd infix TMPQ2 = TMPQ1 * tmp6 on lane size 4 - local tmp8:4 = 0; - local tmp9:4 = 0; - simd_address_at(tmp8, TMPQ1, 0, 4, 16); - simd_address_at(tmp9, TMPQ2, 0, 4, 16); - * [register]:4 tmp9 = (* [register]:4 tmp8) * tmp6; - simd_address_at(tmp8, TMPQ1, 1, 4, 16); - simd_address_at(tmp9, TMPQ2, 1, 4, 16); - * [register]:4 tmp9 = (* [register]:4 tmp8) * tmp6; - simd_address_at(tmp8, TMPQ1, 2, 4, 16); - simd_address_at(tmp9, TMPQ2, 2, 4, 16); - * [register]:4 tmp9 = (* [register]:4 tmp8) * tmp6; - simd_address_at(tmp8, TMPQ1, 3, 4, 16); - simd_address_at(tmp9, TMPQ2, 3, 4, 16); - * [register]:4 tmp9 = (* [register]:4 tmp8) * tmp6; - # simd infix Rd_VPR128.4S = TMPQ2 * 2:4 on lane size 4 - local tmp10:4 = 0; - local tmp11:4 = 0; - simd_address_at(tmp10, TMPQ2, 0, 4, 16); - simd_address_at(tmp11, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp11 = (* [register]:4 tmp10) * 2:4; - simd_address_at(tmp10, TMPQ2, 1, 4, 16); - simd_address_at(tmp11, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp11 = (* [register]:4 tmp10) * 2:4; - simd_address_at(tmp10, TMPQ2, 2, 4, 16); - simd_address_at(tmp11, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp11 = (* [register]:4 tmp10) * 2:4; - simd_address_at(tmp10, TMPQ2, 3, 4, 16); - simd_address_at(tmp11, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp11 = (* [register]:4 tmp10) * 2:4; - zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_SEXT(Rn_VPR64.4H, 2:1); - local tmp2:2 = SIMD_PIECE(Re_VPR128Lo.H, vIndexHLM:1); + local tmp2:2 = Re_VPR128Lo.H.vIndexHLM; local tmp3:4 = sext(tmp2); - local tmp4:16 = SIMD_INT_MULT(tmp1, tmp3); - local tmpd:16 = SIMD_INT_MULT(tmp4, 2:4); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - local tmp1:2 = SIMD_PIECE(Re_VPR128Lo.H, vIndexHLM:1); - Rd_VPR128.4S = NEON_sqdmull2(Rn_VPR64.4H, tmp1, 2:1); -@endif + # simd infix TMPQ2 = TMPQ1 * tmp3 on lane size 4 + TMPQ2[0,32] = TMPQ1[0,32] * tmp3; + TMPQ2[32,32] = TMPQ1[32,32] * tmp3; + TMPQ2[64,32] = TMPQ1[64,32] * tmp3; + TMPQ2[96,32] = TMPQ1[96,32] * tmp3; + # simd infix Rd_VPR128.4S = TMPQ2 * 2:4 on lane size 4 + Rd_VPR128.4S[0,32] = TMPQ2[0,32] * 2:4; + Rd_VPR128.4S[32,32] = TMPQ2[32,32] * 2:4; + Rd_VPR128.4S[64,32] = TMPQ2[64,32] * 2:4; + Rd_VPR128.4S[96,32] = TMPQ2[96,32] * 2:4; + zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.290 SQDMULL, SQDMULL2 (by element) page C7-2037 line 114026 MATCH x0f00b000/mask=xbf00f400 @@ -33254,73 +18093,26 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xf & advSIMD3.size=1 & Re_VPR128Lo.H.vIndexHLM :sqdmull2 Rd_VPR128.4S, Rn_VPR128.8H, Re_VPR128Lo.H.vIndexHLM is b_3131=0 & q=1 & u=0 & b_2428=0xf & advSIMD3.size=1 & Re_VPR128Lo.H.vIndexHLM & Re_VPR128Lo.H & vIndexHLM & b_1215=0xb & b_1010=0 & Rn_VPR128.8H & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rn_VPR128.8H, 1, 8, 16); - TMPD1 = * [register]:8 tmp1; + TMPD1 = Rn_VPR128.8H[64,64]; # simd resize TMPQ2 = sext(TMPD1) (lane size 2 to 4) - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, TMPD1, 0, 2, 8); - simd_address_at(tmp5, TMPQ2, 0, 4, 16); - * [register]:4 tmp5 = sext(* [register]:2 tmp4); - simd_address_at(tmp4, TMPD1, 1, 2, 8); - simd_address_at(tmp5, TMPQ2, 1, 4, 16); - * [register]:4 tmp5 = sext(* [register]:2 tmp4); - simd_address_at(tmp4, TMPD1, 2, 2, 8); - simd_address_at(tmp5, TMPQ2, 2, 4, 16); - * [register]:4 tmp5 = sext(* [register]:2 tmp4); - simd_address_at(tmp4, TMPD1, 3, 2, 8); - simd_address_at(tmp5, TMPQ2, 3, 4, 16); - * [register]:4 tmp5 = sext(* [register]:2 tmp4); - local tmp6:4 = 0; + TMPQ2[0,32] = sext(TMPD1[0,16]); + TMPQ2[32,32] = sext(TMPD1[16,16]); + TMPQ2[64,32] = sext(TMPD1[32,16]); + TMPQ2[96,32] = sext(TMPD1[48,16]); # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 - simd_address_at(tmp6, Re_VPR128Lo.H, vIndexHLM:4, 2, 16); - local tmp7:2 = * [register]:2 tmp6; - local tmp8:4 = sext(tmp7); - # simd infix TMPQ3 = TMPQ2 * tmp8 on lane size 4 - local tmp10:4 = 0; - local tmp11:4 = 0; - simd_address_at(tmp10, TMPQ2, 0, 4, 16); - simd_address_at(tmp11, TMPQ3, 0, 4, 16); - * [register]:4 tmp11 = (* [register]:4 tmp10) * tmp8; - simd_address_at(tmp10, TMPQ2, 1, 4, 16); - simd_address_at(tmp11, TMPQ3, 1, 4, 16); - * [register]:4 tmp11 = (* [register]:4 tmp10) * tmp8; - simd_address_at(tmp10, TMPQ2, 2, 4, 16); - simd_address_at(tmp11, TMPQ3, 2, 4, 16); - * [register]:4 tmp11 = (* [register]:4 tmp10) * tmp8; - simd_address_at(tmp10, TMPQ2, 3, 4, 16); - simd_address_at(tmp11, TMPQ3, 3, 4, 16); - * [register]:4 tmp11 = (* [register]:4 tmp10) * tmp8; - # simd infix Rd_VPR128.4S = TMPQ3 * 2:4 on lane size 4 - local tmp12:4 = 0; - local tmp13:4 = 0; - simd_address_at(tmp12, TMPQ3, 0, 4, 16); - simd_address_at(tmp13, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp13 = (* [register]:4 tmp12) * 2:4; - simd_address_at(tmp12, TMPQ3, 1, 4, 16); - simd_address_at(tmp13, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp13 = (* [register]:4 tmp12) * 2:4; - simd_address_at(tmp12, TMPQ3, 2, 4, 16); - simd_address_at(tmp13, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp13 = (* [register]:4 tmp12) * 2:4; - simd_address_at(tmp12, TMPQ3, 3, 4, 16); - simd_address_at(tmp13, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp13 = (* [register]:4 tmp12) * 2:4; - zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Rn_VPR128.8H, 1:1); - local tmp2:16 = SIMD_INT_SEXT(tmp1, 2:1); - local tmp3:2 = SIMD_PIECE(Re_VPR128Lo.H, vIndexHLM:1); + local tmp3:2 = Re_VPR128Lo.H.vIndexHLM; local tmp4:4 = sext(tmp3); - local tmp5:16 = SIMD_INT_MULT(tmp2, tmp4); - local tmpd:16 = SIMD_INT_MULT(tmp5, 2:4); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - local tmp1:2 = SIMD_PIECE(Re_VPR128Lo.H, vIndexHLM:1); - Rd_VPR128.4S = NEON_sqdmull2(Rn_VPR128.8H, tmp1, 2:1); -@endif + # simd infix TMPQ3 = TMPQ2 * tmp4 on lane size 4 + TMPQ3[0,32] = TMPQ2[0,32] * tmp4; + TMPQ3[32,32] = TMPQ2[32,32] * tmp4; + TMPQ3[64,32] = TMPQ2[64,32] * tmp4; + TMPQ3[96,32] = TMPQ2[96,32] * tmp4; + # simd infix Rd_VPR128.4S = TMPQ3 * 2:4 on lane size 4 + Rd_VPR128.4S[0,32] = TMPQ3[0,32] * 2:4; + Rd_VPR128.4S[32,32] = TMPQ3[32,32] * 2:4; + Rd_VPR128.4S[64,32] = TMPQ3[64,32] * 2:4; + Rd_VPR128.4S[96,32] = TMPQ3[96,32] * 2:4; + zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.290 SQDMULL, SQDMULL2 (by element) page C7-2037 line 114026 MATCH x5f00b000/mask=xff00f400 @@ -33333,27 +18125,13 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xf & advSIMD3.size=1 & Re_VPR128Lo.H.vIndexHLM :sqdmull Rd_FPR32, Rn_FPR16, Re_VPR128Lo.H.vIndexHLM is b_2431=0b01011111 & b_2223=0b01 & b_1215=0b1011 & b_10=0 & Rd_FPR32 & Rn_FPR16 & Re_VPR128Lo.H.vIndexHLM & Re_VPR128Lo.H & vIndexHLM & Zd { -@if defined(SEMANTIC_primitive) local tmp1:4 = sext(Rn_FPR16); - local tmp2:4 = 0; # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 - simd_address_at(tmp2, Re_VPR128Lo.H, vIndexHLM:4, 2, 16); - local tmp3:2 = * [register]:2 tmp2; - local tmp4:4 = sext(tmp3); - local tmp5:4 = tmp1 * tmp4; - Rd_FPR32 = tmp5 * 2:4; - zext_zs(Zd); # zero upper 28 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:4 = sext(Rn_FPR16); - local tmp2:2 = SIMD_PIECE(Re_VPR128Lo.H, vIndexHLM:1); + local tmp2:2 = Re_VPR128Lo.H.vIndexHLM; local tmp3:4 = sext(tmp2); local tmp4:4 = tmp1 * tmp3; - local tmpd:4 = tmp4 * 2:4; - Zd = zext(tmpd); # assigning to Rd_FPR32 -@elif defined(SEMANTIC_pseudo) - local tmp1:2 = SIMD_PIECE(Re_VPR128Lo.H, vIndexHLM:1); - Rd_FPR32 = NEON_sqdmull(Rn_FPR16, tmp1); -@endif + Rd_FPR32 = tmp4 * 2:4; + zext_zs(Zd); # zero upper 28 bytes of Zd } # C7.2.290 SQDMULL, SQDMULL2 (by element) page C7-2037 line 114026 MATCH x5f00b000/mask=xff00f400 @@ -33366,27 +18144,13 @@ is b_2431=0b01011111 & b_2223=0b01 & b_1215=0b1011 & b_10=0 & Rd_FPR32 & Rn_FPR1 :sqdmull Rd_FPR64, Rn_FPR32, Re_VPR128.S.vIndex is b_2431=0b01011111 & b_2223=0b10 & b_1215=0b1011 & b_10=0 & Rd_FPR64 & Rn_FPR32 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & Zd { -@if defined(SEMANTIC_primitive) local tmp1:8 = sext(Rn_FPR32); - local tmp2:4 = 0; # simd element Re_VPR128.S[vIndex] lane size 4 - simd_address_at(tmp2, Re_VPR128.S, vIndex:4, 4, 16); - local tmp3:4 = * [register]:4 tmp2; - local tmp4:8 = sext(tmp3); - local tmp5:8 = tmp1 * tmp4; - Rd_FPR64 = tmp5 * 2:8; - zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = sext(Rn_FPR32); - local tmp2:4 = SIMD_PIECE(Re_VPR128.S, vIndex:1); + local tmp2:4 = Re_VPR128.S.vIndex; local tmp3:8 = sext(tmp2); local tmp4:8 = tmp1 * tmp3; - local tmpd:8 = tmp4 * 2:8; - Zd = zext(tmpd); # assigning to Rd_FPR64 -@elif defined(SEMANTIC_pseudo) - local tmp1:4 = SIMD_PIECE(Re_VPR128.S, vIndex:1); - Rd_FPR64 = NEON_sqdmull(Rn_FPR32, tmp1); -@endif + Rd_FPR64 = tmp4 * 2:8; + zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.291 SQDMULL, SQDMULL2 (vector) page C7-2040 line 114226 MATCH x0e20d000/mask=xbf20fc00 @@ -33398,64 +18162,21 @@ is b_2431=0b01011111 & b_2223=0b10 & b_1215=0b1011 & b_10=0 & Rd_FPR64 & Rn_FPR3 :sqdmull2 Rd_VPR128.2D, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1215=0xd & b_1011=0 & Rn_VPR128.4S & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rn_VPR128.4S, 1, 8, 16); - TMPD1 = * [register]:8 tmp1; + TMPD1 = Rn_VPR128.4S[64,64]; # simd resize TMPQ2 = sext(TMPD1) (lane size 4 to 8) - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, TMPD1, 0, 4, 8); - simd_address_at(tmp5, TMPQ2, 0, 8, 16); - * [register]:8 tmp5 = sext(* [register]:4 tmp4); - simd_address_at(tmp4, TMPD1, 1, 4, 8); - simd_address_at(tmp5, TMPQ2, 1, 8, 16); - * [register]:8 tmp5 = sext(* [register]:4 tmp4); - local tmp6:4 = 0; - simd_address_at(tmp6, Rm_VPR128.4S, 1, 8, 16); - TMPD3 = * [register]:8 tmp6; + TMPQ2[0,64] = sext(TMPD1[0,32]); + TMPQ2[64,64] = sext(TMPD1[32,32]); + TMPD3 = Rm_VPR128.4S[64,64]; # simd resize TMPQ4 = sext(TMPD3) (lane size 4 to 8) - local tmp9:4 = 0; - local tmp10:4 = 0; - simd_address_at(tmp9, TMPD3, 0, 4, 8); - simd_address_at(tmp10, TMPQ4, 0, 8, 16); - * [register]:8 tmp10 = sext(* [register]:4 tmp9); - simd_address_at(tmp9, TMPD3, 1, 4, 8); - simd_address_at(tmp10, TMPQ4, 1, 8, 16); - * [register]:8 tmp10 = sext(* [register]:4 tmp9); + TMPQ4[0,64] = sext(TMPD3[0,32]); + TMPQ4[64,64] = sext(TMPD3[32,32]); # simd infix TMPQ5 = TMPQ2 * TMPQ4 on lane size 8 - local tmp12:4 = 0; - local tmp13:4 = 0; - local tmp14:4 = 0; - simd_address_at(tmp12, TMPQ2, 0, 8, 16); - simd_address_at(tmp13, TMPQ4, 0, 8, 16); - simd_address_at(tmp14, TMPQ5, 0, 8, 16); - * [register]:8 tmp14 = (* [register]:8 tmp12) * (* [register]:8 tmp13); - simd_address_at(tmp12, TMPQ2, 1, 8, 16); - simd_address_at(tmp13, TMPQ4, 1, 8, 16); - simd_address_at(tmp14, TMPQ5, 1, 8, 16); - * [register]:8 tmp14 = (* [register]:8 tmp12) * (* [register]:8 tmp13); + TMPQ5[0,64] = TMPQ2[0,64] * TMPQ4[0,64]; + TMPQ5[64,64] = TMPQ2[64,64] * TMPQ4[64,64]; # simd infix Rd_VPR128.2D = TMPQ5 * 2:8 on lane size 8 - local tmp15:4 = 0; - local tmp16:4 = 0; - simd_address_at(tmp15, TMPQ5, 0, 8, 16); - simd_address_at(tmp16, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp16 = (* [register]:8 tmp15) * 2:8; - simd_address_at(tmp15, TMPQ5, 1, 8, 16); - simd_address_at(tmp16, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp16 = (* [register]:8 tmp15) * 2:8; + Rd_VPR128.2D[0,64] = TMPQ5[0,64] * 2:8; + Rd_VPR128.2D[64,64] = TMPQ5[64,64] * 2:8; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Rn_VPR128.4S, 1:1); - local tmp2:16 = SIMD_INT_SEXT(tmp1, 4:1); - local tmp3:8 = SIMD_PIECE(Rm_VPR128.4S, 1:1); - local tmp4:16 = SIMD_INT_SEXT(tmp3, 4:1); - local tmp5:16 = SIMD_INT_MULT(tmp2, tmp4, 8:1); - local tmpd:16 = SIMD_INT_MULT(tmp5, 2:8); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.2D = NEON_sqdmull2(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); -@endif } # C7.2.291 SQDMULL, SQDMULL2 (vector) page C7-2040 line 114226 MATCH x0e20d000/mask=xbf20fc00 @@ -33467,90 +18188,29 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S :sqdmull2 Rd_VPR128.4S, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1215=0xd & b_1011=0 & Rn_VPR128.8H & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rn_VPR128.8H, 1, 8, 16); - TMPD1 = * [register]:8 tmp1; + TMPD1 = Rn_VPR128.8H[64,64]; # simd resize TMPQ2 = sext(TMPD1) (lane size 2 to 4) - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, TMPD1, 0, 2, 8); - simd_address_at(tmp5, TMPQ2, 0, 4, 16); - * [register]:4 tmp5 = sext(* [register]:2 tmp4); - simd_address_at(tmp4, TMPD1, 1, 2, 8); - simd_address_at(tmp5, TMPQ2, 1, 4, 16); - * [register]:4 tmp5 = sext(* [register]:2 tmp4); - simd_address_at(tmp4, TMPD1, 2, 2, 8); - simd_address_at(tmp5, TMPQ2, 2, 4, 16); - * [register]:4 tmp5 = sext(* [register]:2 tmp4); - simd_address_at(tmp4, TMPD1, 3, 2, 8); - simd_address_at(tmp5, TMPQ2, 3, 4, 16); - * [register]:4 tmp5 = sext(* [register]:2 tmp4); - local tmp6:4 = 0; - simd_address_at(tmp6, Rm_VPR128.8H, 1, 8, 16); - TMPD3 = * [register]:8 tmp6; + TMPQ2[0,32] = sext(TMPD1[0,16]); + TMPQ2[32,32] = sext(TMPD1[16,16]); + TMPQ2[64,32] = sext(TMPD1[32,16]); + TMPQ2[96,32] = sext(TMPD1[48,16]); + TMPD3 = Rm_VPR128.8H[64,64]; # simd resize TMPQ4 = sext(TMPD3) (lane size 2 to 4) - local tmp9:4 = 0; - local tmp10:4 = 0; - simd_address_at(tmp9, TMPD3, 0, 2, 8); - simd_address_at(tmp10, TMPQ4, 0, 4, 16); - * [register]:4 tmp10 = sext(* [register]:2 tmp9); - simd_address_at(tmp9, TMPD3, 1, 2, 8); - simd_address_at(tmp10, TMPQ4, 1, 4, 16); - * [register]:4 tmp10 = sext(* [register]:2 tmp9); - simd_address_at(tmp9, TMPD3, 2, 2, 8); - simd_address_at(tmp10, TMPQ4, 2, 4, 16); - * [register]:4 tmp10 = sext(* [register]:2 tmp9); - simd_address_at(tmp9, TMPD3, 3, 2, 8); - simd_address_at(tmp10, TMPQ4, 3, 4, 16); - * [register]:4 tmp10 = sext(* [register]:2 tmp9); + TMPQ4[0,32] = sext(TMPD3[0,16]); + TMPQ4[32,32] = sext(TMPD3[16,16]); + TMPQ4[64,32] = sext(TMPD3[32,16]); + TMPQ4[96,32] = sext(TMPD3[48,16]); # simd infix TMPQ5 = TMPQ2 * TMPQ4 on lane size 4 - local tmp12:4 = 0; - local tmp13:4 = 0; - local tmp14:4 = 0; - simd_address_at(tmp12, TMPQ2, 0, 4, 16); - simd_address_at(tmp13, TMPQ4, 0, 4, 16); - simd_address_at(tmp14, TMPQ5, 0, 4, 16); - * [register]:4 tmp14 = (* [register]:4 tmp12) * (* [register]:4 tmp13); - simd_address_at(tmp12, TMPQ2, 1, 4, 16); - simd_address_at(tmp13, TMPQ4, 1, 4, 16); - simd_address_at(tmp14, TMPQ5, 1, 4, 16); - * [register]:4 tmp14 = (* [register]:4 tmp12) * (* [register]:4 tmp13); - simd_address_at(tmp12, TMPQ2, 2, 4, 16); - simd_address_at(tmp13, TMPQ4, 2, 4, 16); - simd_address_at(tmp14, TMPQ5, 2, 4, 16); - * [register]:4 tmp14 = (* [register]:4 tmp12) * (* [register]:4 tmp13); - simd_address_at(tmp12, TMPQ2, 3, 4, 16); - simd_address_at(tmp13, TMPQ4, 3, 4, 16); - simd_address_at(tmp14, TMPQ5, 3, 4, 16); - * [register]:4 tmp14 = (* [register]:4 tmp12) * (* [register]:4 tmp13); + TMPQ5[0,32] = TMPQ2[0,32] * TMPQ4[0,32]; + TMPQ5[32,32] = TMPQ2[32,32] * TMPQ4[32,32]; + TMPQ5[64,32] = TMPQ2[64,32] * TMPQ4[64,32]; + TMPQ5[96,32] = TMPQ2[96,32] * TMPQ4[96,32]; # simd infix Rd_VPR128.4S = TMPQ5 * 2:4 on lane size 4 - local tmp15:4 = 0; - local tmp16:4 = 0; - simd_address_at(tmp15, TMPQ5, 0, 4, 16); - simd_address_at(tmp16, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp16 = (* [register]:4 tmp15) * 2:4; - simd_address_at(tmp15, TMPQ5, 1, 4, 16); - simd_address_at(tmp16, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp16 = (* [register]:4 tmp15) * 2:4; - simd_address_at(tmp15, TMPQ5, 2, 4, 16); - simd_address_at(tmp16, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp16 = (* [register]:4 tmp15) * 2:4; - simd_address_at(tmp15, TMPQ5, 3, 4, 16); - simd_address_at(tmp16, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp16 = (* [register]:4 tmp15) * 2:4; + Rd_VPR128.4S[0,32] = TMPQ5[0,32] * 2:4; + Rd_VPR128.4S[32,32] = TMPQ5[32,32] * 2:4; + Rd_VPR128.4S[64,32] = TMPQ5[64,32] * 2:4; + Rd_VPR128.4S[96,32] = TMPQ5[96,32] * 2:4; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Rn_VPR128.8H, 1:1); - local tmp2:16 = SIMD_INT_SEXT(tmp1, 2:1); - local tmp3:8 = SIMD_PIECE(Rm_VPR128.8H, 1:1); - local tmp4:16 = SIMD_INT_SEXT(tmp3, 2:1); - local tmp5:16 = SIMD_INT_MULT(tmp2, tmp4, 4:1); - local tmpd:16 = SIMD_INT_MULT(tmp5, 2:4); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_sqdmull2(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); -@endif } # C7.2.291 SQDMULL, SQDMULL2 (vector) page C7-2040 line 114226 MATCH x0e20d000/mask=xbf20fc00 @@ -33561,9 +18221,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H :sqdmull Rd_VPR128.2D, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1215=0xd & b_1011=0 & Rn_VPR64.2S & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.2D = NEON_sqdmull(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); -@endif } # C7.2.291 SQDMULL, SQDMULL2 (vector) page C7-2040 line 114226 MATCH x0e20d000/mask=xbf20fc00 @@ -33574,9 +18232,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S :sqdmull Rd_VPR128.4S, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1215=0xd & b_1011=0 & Rn_VPR64.4H & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_sqdmull(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); -@endif } # C7.2.291 SQDMULL, SQDMULL2 (vector) page C7-2040 line 114226 MATCH x5e20d000/mask=xff20fc00 @@ -33589,21 +18245,11 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H :sqdmull Rd_FPR32, Rn_FPR16, Rm_FPR16 is b_2431=0b01011110 & b_2223=0b01 & b_21=1 & b_1015=0b110100 & Rd_FPR32 & Rn_FPR16 & Rm_FPR16 & Zd { -@if defined(SEMANTIC_primitive) local tmp1:4 = sext(Rn_FPR16); local tmp2:4 = sext(Rm_FPR16); local tmp3:4 = tmp1 * tmp2; Rd_FPR32 = tmp3 * 2:4; zext_zs(Zd); # zero upper 28 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:4 = sext(Rn_FPR16); - local tmp2:4 = sext(Rm_FPR16); - local tmp3:4 = tmp1 * tmp2; - local tmpd:4 = tmp3 * 2:4; - Zd = zext(tmpd); # assigning to Rd_FPR32 -@elif defined(SEMANTIC_pseudo) - Rd_FPR32 = NEON_sqdmull(Rn_FPR16, Rm_FPR16); -@endif } # C7.2.291 SQDMULL, SQDMULL2 (vector) page C7-2040 line 114226 MATCH x5e20d000/mask=xff20fc00 @@ -33616,21 +18262,11 @@ is b_2431=0b01011110 & b_2223=0b01 & b_21=1 & b_1015=0b110100 & Rd_FPR32 & Rn_FP :sqdmull Rd_FPR64, Rn_FPR32, Rm_FPR32 is b_2431=0b01011110 & b_2223=0b10 & b_21=1 & b_1015=0b110100 & Rd_FPR64 & Rn_FPR32 & Rm_FPR32 & Zd { -@if defined(SEMANTIC_primitive) local tmp1:8 = sext(Rn_FPR32); local tmp2:8 = sext(Rm_FPR32); local tmp3:8 = tmp1 * tmp2; Rd_FPR64 = tmp3 * 2:8; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = sext(Rn_FPR32); - local tmp2:8 = sext(Rm_FPR32); - local tmp3:8 = tmp1 * tmp2; - local tmpd:8 = tmp3 * 2:8; - Zd = zext(tmpd); # assigning to Rd_FPR64 -@elif defined(SEMANTIC_pseudo) - Rd_FPR64 = NEON_sqdmull(Rn_FPR32, Rm_FPR32); -@endif } # C7.2.292 SQNEG page C7-2043 line 114388 MATCH x7e207800/mask=xff3ffc00 @@ -33643,15 +18279,8 @@ is b_2431=0b01011110 & b_2223=0b10 & b_21=1 & b_1015=0b110100 & Rd_FPR64 & Rn_FP :sqneg Rd_FPR8, Rn_FPR8 is b_31=0 & Q=1 & b_2429=0b111110 & b_2223=0b00 & b_1021=0b100000011110 & Rd_FPR8 & Rn_FPR8 & Zd { -@if defined(SEMANTIC_primitive) Rd_FPR8 = - Rn_FPR8; zext_zb(Zd); # zero upper 31 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:1 = - Rn_FPR8; - Zd = zext(tmpd); # assigning to Rd_FPR8 -@elif defined(SEMANTIC_pseudo) - Rd_FPR8 = NEON_sqneg(Rn_FPR8); -@endif } # C7.2.292 SQNEG page C7-2043 line 114388 MATCH x7e207800/mask=xff3ffc00 @@ -33664,15 +18293,8 @@ is b_31=0 & Q=1 & b_2429=0b111110 & b_2223=0b00 & b_1021=0b100000011110 & Rd_FPR :sqneg Rd_FPR16, Rn_FPR16 is b_31=0 & Q=1 & b_2429=0b111110 & b_2223=0b01 & b_1021=0b100000011110 & Rd_FPR16 & Rn_FPR16 & Zd { -@if defined(SEMANTIC_primitive) Rd_FPR16 = - Rn_FPR16; zext_zh(Zd); # zero upper 30 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:2 = - Rn_FPR16; - Zd = zext(tmpd); # assigning to Rd_FPR16 -@elif defined(SEMANTIC_pseudo) - Rd_FPR16 = NEON_sqneg(Rn_FPR16); -@endif } # C7.2.292 SQNEG page C7-2043 line 114388 MATCH x7e207800/mask=xff3ffc00 @@ -33685,15 +18307,8 @@ is b_31=0 & Q=1 & b_2429=0b111110 & b_2223=0b01 & b_1021=0b100000011110 & Rd_FPR :sqneg Rd_FPR32, Rn_FPR32 is b_31=0 & Q=1 & b_2429=0b111110 & b_2223=0b10 & b_1021=0b100000011110 & Rd_FPR32 & Rn_FPR32 & Zd { -@if defined(SEMANTIC_primitive) Rd_FPR32 = - Rn_FPR32; zext_zs(Zd); # zero upper 28 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:4 = - Rn_FPR32; - Zd = zext(tmpd); # assigning to Rd_FPR32 -@elif defined(SEMANTIC_pseudo) - Rd_FPR32 = NEON_sqneg(Rn_FPR32); -@endif } # C7.2.292 SQNEG page C7-2043 line 114388 MATCH x7e207800/mask=xff3ffc00 @@ -33706,15 +18321,8 @@ is b_31=0 & Q=1 & b_2429=0b111110 & b_2223=0b10 & b_1021=0b100000011110 & Rd_FPR :sqneg Rd_FPR64, Rn_FPR64 is b_31=0 & Q=1 & b_2429=0b111110 & b_2223=0b11 & b_1021=0b100000011110 & Rd_FPR64 & Rn_FPR64 & Zd { -@if defined(SEMANTIC_primitive) Rd_FPR64 = - Rn_FPR64; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:8 = - Rn_FPR64; - Zd = zext(tmpd); # assigning to Rd_FPR64 -@elif defined(SEMANTIC_pseudo) - Rd_FPR64 = NEON_sqneg(Rn_FPR64); -@endif } # C7.2.292 SQNEG page C7-2043 line 114388 MATCH x2e207800/mask=xbf3ffc00 @@ -33727,41 +18335,16 @@ is b_31=0 & Q=1 & b_2429=0b111110 & b_2223=0b11 & b_1021=0b100000011110 & Rd_FPR :sqneg Rd_VPR64.8B, Rn_VPR64.8B is b_31=0 & Q=0 & b_2429=0b101110 & b_2223=0b00 & b_1021=0b100000011110 & Rd_VPR64.8B & Rn_VPR64.8B & Zd { -@if defined(SEMANTIC_primitive) # simd unary Rd_VPR64.8B = -(Rn_VPR64.8B) on lane size 1 - local tmp1:4 = 0; - local tmp2:4 = 0; - simd_address_at(tmp1, Rn_VPR64.8B, 0, 1, 8); - simd_address_at(tmp2, Rd_VPR64.8B, 0, 1, 8); - * [register]:1 tmp2 = -(* [register]:1 tmp1); - simd_address_at(tmp1, Rn_VPR64.8B, 1, 1, 8); - simd_address_at(tmp2, Rd_VPR64.8B, 1, 1, 8); - * [register]:1 tmp2 = -(* [register]:1 tmp1); - simd_address_at(tmp1, Rn_VPR64.8B, 2, 1, 8); - simd_address_at(tmp2, Rd_VPR64.8B, 2, 1, 8); - * [register]:1 tmp2 = -(* [register]:1 tmp1); - simd_address_at(tmp1, Rn_VPR64.8B, 3, 1, 8); - simd_address_at(tmp2, Rd_VPR64.8B, 3, 1, 8); - * [register]:1 tmp2 = -(* [register]:1 tmp1); - simd_address_at(tmp1, Rn_VPR64.8B, 4, 1, 8); - simd_address_at(tmp2, Rd_VPR64.8B, 4, 1, 8); - * [register]:1 tmp2 = -(* [register]:1 tmp1); - simd_address_at(tmp1, Rn_VPR64.8B, 5, 1, 8); - simd_address_at(tmp2, Rd_VPR64.8B, 5, 1, 8); - * [register]:1 tmp2 = -(* [register]:1 tmp1); - simd_address_at(tmp1, Rn_VPR64.8B, 6, 1, 8); - simd_address_at(tmp2, Rd_VPR64.8B, 6, 1, 8); - * [register]:1 tmp2 = -(* [register]:1 tmp1); - simd_address_at(tmp1, Rn_VPR64.8B, 7, 1, 8); - simd_address_at(tmp2, Rd_VPR64.8B, 7, 1, 8); - * [register]:1 tmp2 = -(* [register]:1 tmp1); + Rd_VPR64.8B[0,8] = -(Rn_VPR64.8B[0,8]); + Rd_VPR64.8B[8,8] = -(Rn_VPR64.8B[8,8]); + Rd_VPR64.8B[16,8] = -(Rn_VPR64.8B[16,8]); + Rd_VPR64.8B[24,8] = -(Rn_VPR64.8B[24,8]); + Rd_VPR64.8B[32,8] = -(Rn_VPR64.8B[32,8]); + Rd_VPR64.8B[40,8] = -(Rn_VPR64.8B[40,8]); + Rd_VPR64.8B[48,8] = -(Rn_VPR64.8B[48,8]); + Rd_VPR64.8B[56,8] = -(Rn_VPR64.8B[56,8]); zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:8 = SIMD_INT_2COMP(Rn_VPR64.8B, 1:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.8B -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.8B = NEON_sqneg(Rn_VPR64.8B, 1:1); -@endif } # C7.2.292 SQNEG page C7-2043 line 114388 MATCH x2e207800/mask=xbf3ffc00 @@ -33774,65 +18357,24 @@ is b_31=0 & Q=0 & b_2429=0b101110 & b_2223=0b00 & b_1021=0b100000011110 & Rd_VPR :sqneg Rd_VPR128.16B, Rn_VPR128.16B is b_31=0 & Q=1 & b_2429=0b101110 & b_2223=0b00 & b_1021=0b100000011110 & Rd_VPR128.16B & Rn_VPR128.16B & Zd { -@if defined(SEMANTIC_primitive) # simd unary Rd_VPR128.16B = -(Rn_VPR128.16B) on lane size 1 - local tmp1:4 = 0; - local tmp2:4 = 0; - simd_address_at(tmp1, Rn_VPR128.16B, 0, 1, 16); - simd_address_at(tmp2, Rd_VPR128.16B, 0, 1, 16); - * [register]:1 tmp2 = -(* [register]:1 tmp1); - simd_address_at(tmp1, Rn_VPR128.16B, 1, 1, 16); - simd_address_at(tmp2, Rd_VPR128.16B, 1, 1, 16); - * [register]:1 tmp2 = -(* [register]:1 tmp1); - simd_address_at(tmp1, Rn_VPR128.16B, 2, 1, 16); - simd_address_at(tmp2, Rd_VPR128.16B, 2, 1, 16); - * [register]:1 tmp2 = -(* [register]:1 tmp1); - simd_address_at(tmp1, Rn_VPR128.16B, 3, 1, 16); - simd_address_at(tmp2, Rd_VPR128.16B, 3, 1, 16); - * [register]:1 tmp2 = -(* [register]:1 tmp1); - simd_address_at(tmp1, Rn_VPR128.16B, 4, 1, 16); - simd_address_at(tmp2, Rd_VPR128.16B, 4, 1, 16); - * [register]:1 tmp2 = -(* [register]:1 tmp1); - simd_address_at(tmp1, Rn_VPR128.16B, 5, 1, 16); - simd_address_at(tmp2, Rd_VPR128.16B, 5, 1, 16); - * [register]:1 tmp2 = -(* [register]:1 tmp1); - simd_address_at(tmp1, Rn_VPR128.16B, 6, 1, 16); - simd_address_at(tmp2, Rd_VPR128.16B, 6, 1, 16); - * [register]:1 tmp2 = -(* [register]:1 tmp1); - simd_address_at(tmp1, Rn_VPR128.16B, 7, 1, 16); - simd_address_at(tmp2, Rd_VPR128.16B, 7, 1, 16); - * [register]:1 tmp2 = -(* [register]:1 tmp1); - simd_address_at(tmp1, Rn_VPR128.16B, 8, 1, 16); - simd_address_at(tmp2, Rd_VPR128.16B, 8, 1, 16); - * [register]:1 tmp2 = -(* [register]:1 tmp1); - simd_address_at(tmp1, Rn_VPR128.16B, 9, 1, 16); - simd_address_at(tmp2, Rd_VPR128.16B, 9, 1, 16); - * [register]:1 tmp2 = -(* [register]:1 tmp1); - simd_address_at(tmp1, Rn_VPR128.16B, 10, 1, 16); - simd_address_at(tmp2, Rd_VPR128.16B, 10, 1, 16); - * [register]:1 tmp2 = -(* [register]:1 tmp1); - simd_address_at(tmp1, Rn_VPR128.16B, 11, 1, 16); - simd_address_at(tmp2, Rd_VPR128.16B, 11, 1, 16); - * [register]:1 tmp2 = -(* [register]:1 tmp1); - simd_address_at(tmp1, Rn_VPR128.16B, 12, 1, 16); - simd_address_at(tmp2, Rd_VPR128.16B, 12, 1, 16); - * [register]:1 tmp2 = -(* [register]:1 tmp1); - simd_address_at(tmp1, Rn_VPR128.16B, 13, 1, 16); - simd_address_at(tmp2, Rd_VPR128.16B, 13, 1, 16); - * [register]:1 tmp2 = -(* [register]:1 tmp1); - simd_address_at(tmp1, Rn_VPR128.16B, 14, 1, 16); - simd_address_at(tmp2, Rd_VPR128.16B, 14, 1, 16); - * [register]:1 tmp2 = -(* [register]:1 tmp1); - simd_address_at(tmp1, Rn_VPR128.16B, 15, 1, 16); - simd_address_at(tmp2, Rd_VPR128.16B, 15, 1, 16); - * [register]:1 tmp2 = -(* [register]:1 tmp1); + Rd_VPR128.16B[0,8] = -(Rn_VPR128.16B[0,8]); + Rd_VPR128.16B[8,8] = -(Rn_VPR128.16B[8,8]); + Rd_VPR128.16B[16,8] = -(Rn_VPR128.16B[16,8]); + Rd_VPR128.16B[24,8] = -(Rn_VPR128.16B[24,8]); + Rd_VPR128.16B[32,8] = -(Rn_VPR128.16B[32,8]); + Rd_VPR128.16B[40,8] = -(Rn_VPR128.16B[40,8]); + Rd_VPR128.16B[48,8] = -(Rn_VPR128.16B[48,8]); + Rd_VPR128.16B[56,8] = -(Rn_VPR128.16B[56,8]); + Rd_VPR128.16B[64,8] = -(Rn_VPR128.16B[64,8]); + Rd_VPR128.16B[72,8] = -(Rn_VPR128.16B[72,8]); + Rd_VPR128.16B[80,8] = -(Rn_VPR128.16B[80,8]); + Rd_VPR128.16B[88,8] = -(Rn_VPR128.16B[88,8]); + Rd_VPR128.16B[96,8] = -(Rn_VPR128.16B[96,8]); + Rd_VPR128.16B[104,8] = -(Rn_VPR128.16B[104,8]); + Rd_VPR128.16B[112,8] = -(Rn_VPR128.16B[112,8]); + Rd_VPR128.16B[120,8] = -(Rn_VPR128.16B[120,8]); zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:16 = SIMD_INT_2COMP(Rn_VPR128.16B, 1:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.16B -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.16B = NEON_sqneg(Rn_VPR128.16B, 1:1); -@endif } # C7.2.292 SQNEG page C7-2043 line 114388 MATCH x2e207800/mask=xbf3ffc00 @@ -33845,29 +18387,12 @@ is b_31=0 & Q=1 & b_2429=0b101110 & b_2223=0b00 & b_1021=0b100000011110 & Rd_VPR :sqneg Rd_VPR64.4H, Rn_VPR64.4H is b_31=0 & Q=0 & b_2429=0b101110 & b_2223=0b01 & b_1021=0b100000011110 & Rd_VPR64.4H & Rn_VPR64.4H & Zd { -@if defined(SEMANTIC_primitive) # simd unary Rd_VPR64.4H = -(Rn_VPR64.4H) on lane size 2 - local tmp1:4 = 0; - local tmp2:4 = 0; - simd_address_at(tmp1, Rn_VPR64.4H, 0, 2, 8); - simd_address_at(tmp2, Rd_VPR64.4H, 0, 2, 8); - * [register]:2 tmp2 = -(* [register]:2 tmp1); - simd_address_at(tmp1, Rn_VPR64.4H, 1, 2, 8); - simd_address_at(tmp2, Rd_VPR64.4H, 1, 2, 8); - * [register]:2 tmp2 = -(* [register]:2 tmp1); - simd_address_at(tmp1, Rn_VPR64.4H, 2, 2, 8); - simd_address_at(tmp2, Rd_VPR64.4H, 2, 2, 8); - * [register]:2 tmp2 = -(* [register]:2 tmp1); - simd_address_at(tmp1, Rn_VPR64.4H, 3, 2, 8); - simd_address_at(tmp2, Rd_VPR64.4H, 3, 2, 8); - * [register]:2 tmp2 = -(* [register]:2 tmp1); + Rd_VPR64.4H[0,16] = -(Rn_VPR64.4H[0,16]); + Rd_VPR64.4H[16,16] = -(Rn_VPR64.4H[16,16]); + Rd_VPR64.4H[32,16] = -(Rn_VPR64.4H[32,16]); + Rd_VPR64.4H[48,16] = -(Rn_VPR64.4H[48,16]); zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:8 = SIMD_INT_2COMP(Rn_VPR64.4H, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.4H -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.4H = NEON_sqneg(Rn_VPR64.4H, 2:1); -@endif } # C7.2.292 SQNEG page C7-2043 line 114388 MATCH x2e207800/mask=xbf3ffc00 @@ -33880,41 +18405,16 @@ is b_31=0 & Q=0 & b_2429=0b101110 & b_2223=0b01 & b_1021=0b100000011110 & Rd_VPR :sqneg Rd_VPR128.8H, Rn_VPR128.8H is b_31=0 & Q=1 & b_2429=0b101110 & b_2223=0b01 & b_1021=0b100000011110 & Rd_VPR128.8H & Rn_VPR128.8H & Zd { -@if defined(SEMANTIC_primitive) # simd unary Rd_VPR128.8H = -(Rn_VPR128.8H) on lane size 2 - local tmp1:4 = 0; - local tmp2:4 = 0; - simd_address_at(tmp1, Rn_VPR128.8H, 0, 2, 16); - simd_address_at(tmp2, Rd_VPR128.8H, 0, 2, 16); - * [register]:2 tmp2 = -(* [register]:2 tmp1); - simd_address_at(tmp1, Rn_VPR128.8H, 1, 2, 16); - simd_address_at(tmp2, Rd_VPR128.8H, 1, 2, 16); - * [register]:2 tmp2 = -(* [register]:2 tmp1); - simd_address_at(tmp1, Rn_VPR128.8H, 2, 2, 16); - simd_address_at(tmp2, Rd_VPR128.8H, 2, 2, 16); - * [register]:2 tmp2 = -(* [register]:2 tmp1); - simd_address_at(tmp1, Rn_VPR128.8H, 3, 2, 16); - simd_address_at(tmp2, Rd_VPR128.8H, 3, 2, 16); - * [register]:2 tmp2 = -(* [register]:2 tmp1); - simd_address_at(tmp1, Rn_VPR128.8H, 4, 2, 16); - simd_address_at(tmp2, Rd_VPR128.8H, 4, 2, 16); - * [register]:2 tmp2 = -(* [register]:2 tmp1); - simd_address_at(tmp1, Rn_VPR128.8H, 5, 2, 16); - simd_address_at(tmp2, Rd_VPR128.8H, 5, 2, 16); - * [register]:2 tmp2 = -(* [register]:2 tmp1); - simd_address_at(tmp1, Rn_VPR128.8H, 6, 2, 16); - simd_address_at(tmp2, Rd_VPR128.8H, 6, 2, 16); - * [register]:2 tmp2 = -(* [register]:2 tmp1); - simd_address_at(tmp1, Rn_VPR128.8H, 7, 2, 16); - simd_address_at(tmp2, Rd_VPR128.8H, 7, 2, 16); - * [register]:2 tmp2 = -(* [register]:2 tmp1); + Rd_VPR128.8H[0,16] = -(Rn_VPR128.8H[0,16]); + Rd_VPR128.8H[16,16] = -(Rn_VPR128.8H[16,16]); + Rd_VPR128.8H[32,16] = -(Rn_VPR128.8H[32,16]); + Rd_VPR128.8H[48,16] = -(Rn_VPR128.8H[48,16]); + Rd_VPR128.8H[64,16] = -(Rn_VPR128.8H[64,16]); + Rd_VPR128.8H[80,16] = -(Rn_VPR128.8H[80,16]); + Rd_VPR128.8H[96,16] = -(Rn_VPR128.8H[96,16]); + Rd_VPR128.8H[112,16] = -(Rn_VPR128.8H[112,16]); zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:16 = SIMD_INT_2COMP(Rn_VPR128.8H, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.8H -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.8H = NEON_sqneg(Rn_VPR128.8H, 2:1); -@endif } # C7.2.292 SQNEG page C7-2043 line 114388 MATCH x2e207800/mask=xbf3ffc00 @@ -33927,23 +18427,10 @@ is b_31=0 & Q=1 & b_2429=0b101110 & b_2223=0b01 & b_1021=0b100000011110 & Rd_VPR :sqneg Rd_VPR64.2S, Rn_VPR64.2S is b_31=0 & Q=0 & b_2429=0b101110 & b_2223=0b10 & b_1021=0b100000011110 & Rd_VPR64.2S & Rn_VPR64.2S & Zd { -@if defined(SEMANTIC_primitive) # simd unary Rd_VPR64.2S = -(Rn_VPR64.2S) on lane size 4 - local tmp1:4 = 0; - local tmp2:4 = 0; - simd_address_at(tmp1, Rn_VPR64.2S, 0, 4, 8); - simd_address_at(tmp2, Rd_VPR64.2S, 0, 4, 8); - * [register]:4 tmp2 = -(* [register]:4 tmp1); - simd_address_at(tmp1, Rn_VPR64.2S, 1, 4, 8); - simd_address_at(tmp2, Rd_VPR64.2S, 1, 4, 8); - * [register]:4 tmp2 = -(* [register]:4 tmp1); + Rd_VPR64.2S[0,32] = -(Rn_VPR64.2S[0,32]); + Rd_VPR64.2S[32,32] = -(Rn_VPR64.2S[32,32]); zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:8 = SIMD_INT_2COMP(Rn_VPR64.2S, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.2S -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.2S = NEON_sqneg(Rn_VPR64.2S, 4:1); -@endif } # C7.2.292 SQNEG page C7-2043 line 114388 MATCH x2e207800/mask=xbf3ffc00 @@ -33956,29 +18443,12 @@ is b_31=0 & Q=0 & b_2429=0b101110 & b_2223=0b10 & b_1021=0b100000011110 & Rd_VPR :sqneg Rd_VPR128.4S, Rn_VPR128.4S is b_31=0 & Q=1 & b_2429=0b101110 & b_2223=0b10 & b_1021=0b100000011110 & Rd_VPR128.4S & Rn_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) # simd unary Rd_VPR128.4S = -(Rn_VPR128.4S) on lane size 4 - local tmp1:4 = 0; - local tmp2:4 = 0; - simd_address_at(tmp1, Rn_VPR128.4S, 0, 4, 16); - simd_address_at(tmp2, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp2 = -(* [register]:4 tmp1); - simd_address_at(tmp1, Rn_VPR128.4S, 1, 4, 16); - simd_address_at(tmp2, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp2 = -(* [register]:4 tmp1); - simd_address_at(tmp1, Rn_VPR128.4S, 2, 4, 16); - simd_address_at(tmp2, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp2 = -(* [register]:4 tmp1); - simd_address_at(tmp1, Rn_VPR128.4S, 3, 4, 16); - simd_address_at(tmp2, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp2 = -(* [register]:4 tmp1); + Rd_VPR128.4S[0,32] = -(Rn_VPR128.4S[0,32]); + Rd_VPR128.4S[32,32] = -(Rn_VPR128.4S[32,32]); + Rd_VPR128.4S[64,32] = -(Rn_VPR128.4S[64,32]); + Rd_VPR128.4S[96,32] = -(Rn_VPR128.4S[96,32]); zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:16 = SIMD_INT_2COMP(Rn_VPR128.4S, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_sqneg(Rn_VPR128.4S, 4:1); -@endif } # C7.2.292 SQNEG page C7-2043 line 114388 MATCH x2e207800/mask=xbf3ffc00 @@ -33991,23 +18461,10 @@ is b_31=0 & Q=1 & b_2429=0b101110 & b_2223=0b10 & b_1021=0b100000011110 & Rd_VPR :sqneg Rd_VPR128.2D, Rn_VPR128.2D is b_31=0 & Q=1 & b_2429=0b101110 & b_2223=0b11 & b_1021=0b100000011110 & Rd_VPR128.2D & Rn_VPR128.2D & Zd { -@if defined(SEMANTIC_primitive) # simd unary Rd_VPR128.2D = -(Rn_VPR128.2D) on lane size 8 - local tmp1:4 = 0; - local tmp2:4 = 0; - simd_address_at(tmp1, Rn_VPR128.2D, 0, 8, 16); - simd_address_at(tmp2, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp2 = -(* [register]:8 tmp1); - simd_address_at(tmp1, Rn_VPR128.2D, 1, 8, 16); - simd_address_at(tmp2, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp2 = -(* [register]:8 tmp1); + Rd_VPR128.2D[0,64] = -(Rn_VPR128.2D[0,64]); + Rd_VPR128.2D[64,64] = -(Rn_VPR128.2D[64,64]); zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:16 = SIMD_INT_2COMP(Rn_VPR128.2D, 8:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.2D = NEON_sqneg(Rn_VPR128.2D, 8:1); -@endif } # C7.2.277 SQRDMLAH (by element) page C7-1598 line 92254 KEEPWITH @@ -34083,9 +18540,7 @@ sqrdml_vm: sqrdml_vmlo^"."^sqrdml_esize[sqrdml_index] is b_24=1 & sqrdml_vmlo & :sqrdml^sqrdml_subop sqrdml_vd, sqrdml_vn, sqrdml_vm is b_2431=0b01111111 & b_1415=0b11 & b_12=1 & b_10=0 & sqrdml_subop & sqrdml_vd & sqrdml_vn & sqrdml_vm & sqrdml_esize & sqrdml_index & Rd_VPR128 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128 = NEON_sqrdml_as_h(Rd_VPR128, sqrdml_vn, sqrdml_vm, sqrdml_esize, 1:1, sqrdml_subop, sqrdml_index); -@endif } # C7.2.293 SQRDMLAH (by element) page C7-2045 line 114508 MATCH x2f00d000/mask=xbf00f400 @@ -34097,9 +18552,7 @@ is b_2431=0b01111111 & b_1415=0b11 & b_12=1 & b_10=0 & sqrdml_subop & sqrdml_vd :sqrdml^sqrdml_subop sqrdml_vd, sqrdml_vn, sqrdml_vm is b_31=0 & b_2429=0b101111 & b_1415=0b11 & b_12=1 & b_10=0 & sqrdml_subop & sqrdml_elements & sqrdml_vm & sqrdml_esize & sqrdml_index & sqrdml_vd & sqrdml_vn & Rd_VPR128 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128 = NEON_sqrdml_as_h(Rd_VPR128, sqrdml_vn, sqrdml_vm, sqrdml_esize, sqrdml_elements, sqrdml_subop, sqrdml_index); -@endif } # C7.2.294 SQRDMLAH (vector) page C7-2048 line 114696 MATCH x7e008400/mask=xff20fc00 @@ -34111,9 +18564,7 @@ is b_31=0 & b_2429=0b101111 & b_1415=0b11 & b_12=1 & b_10=0 & sqrdml_subop & sqr :sqrdml^sqrdml_subop sqrdml_vd, sqrdml_vn, sqrdml_vm is b_2431=0b01111110 & b_21=0 & b_1215=0b1000 & b_10=1 & sqrdml_subop & sqrdml_esize & sqrdml_vd & sqrdml_vn & sqrdml_vm & Rd_VPR128 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128 = NEON_sqrdml_as_h(Rd_VPR128, sqrdml_vn, sqrdml_vm, sqrdml_esize, 1:1, sqrdml_subop); -@endif } # C7.2.294 SQRDMLAH (vector) page C7-2048 line 114696 MATCH x2e008400/mask=xbf20fc00 @@ -34125,9 +18576,7 @@ is b_2431=0b01111110 & b_21=0 & b_1215=0b1000 & b_10=1 & sqrdml_subop & sqrdml_e :sqrdml^sqrdml_subop sqrdml_vd, sqrdml_vn, sqrdml_vm is b_31=0 & b_2429=0b101110 & b_21=0 & b_1215=0b1000 & b_10=1 & sqrdml_subop & sqrdml_elements & sqrdml_vd & sqrdml_vn & sqrdml_vm & sqrdml_esize & Rd_VPR128 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128 = NEON_sqrdml_as_h(Rd_VPR128, sqrdml_vn, sqrdml_vm, sqrdml_esize, sqrdml_elements, sqrdml_subop); -@endif } # C7.2.297 SQRDMULH (by element) page C7-2055 line 115168 MATCH x0f00d000/mask=xbf00f400 @@ -34141,64 +18590,22 @@ is b_31=0 & b_2429=0b101110 & b_21=0 & b_1215=0b1000 & b_10=1 & sqrdml_subop & s :sqrdmulh Rd_VPR64.2S, Rn_VPR64.2S, Re_VPR128.S.vIndex is b_3131=0 & q=0 & u=0 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & b_1215=0xd & b_1010=0 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_primitive) # simd resize TMPQ1 = sext(Rn_VPR64.2S) (lane size 4 to 8) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR64.2S, 0, 4, 8); - simd_address_at(tmp3, TMPQ1, 0, 8, 16); - * [register]:8 tmp3 = sext(* [register]:4 tmp2); - simd_address_at(tmp2, Rn_VPR64.2S, 1, 4, 8); - simd_address_at(tmp3, TMPQ1, 1, 8, 16); - * [register]:8 tmp3 = sext(* [register]:4 tmp2); - local tmp4:4 = 0; + TMPQ1[0,64] = sext(Rn_VPR64.2S[0,32]); + TMPQ1[64,64] = sext(Rn_VPR64.2S[32,32]); # simd element Re_VPR128.S[vIndex] lane size 4 - simd_address_at(tmp4, Re_VPR128.S, vIndex:4, 4, 16); - local tmp5:4 = * [register]:4 tmp4; - local tmp6:8 = sext(tmp5); - # simd infix TMPQ2 = TMPQ1 * tmp6 on lane size 8 - local tmp8:4 = 0; - local tmp9:4 = 0; - simd_address_at(tmp8, TMPQ1, 0, 8, 16); - simd_address_at(tmp9, TMPQ2, 0, 8, 16); - * [register]:8 tmp9 = (* [register]:8 tmp8) * tmp6; - simd_address_at(tmp8, TMPQ1, 1, 8, 16); - simd_address_at(tmp9, TMPQ2, 1, 8, 16); - * [register]:8 tmp9 = (* [register]:8 tmp8) * tmp6; - # simd infix TMPQ3 = TMPQ2 * 2:8 on lane size 8 - local tmp11:4 = 0; - local tmp12:4 = 0; - simd_address_at(tmp11, TMPQ2, 0, 8, 16); - simd_address_at(tmp12, TMPQ3, 0, 8, 16); - * [register]:8 tmp12 = (* [register]:8 tmp11) * 2:8; - simd_address_at(tmp11, TMPQ2, 1, 8, 16); - simd_address_at(tmp12, TMPQ3, 1, 8, 16); - * [register]:8 tmp12 = (* [register]:8 tmp11) * 2:8; - # simd shuffle Rd_VPR64.2S = TMPQ3 (@1-0@3-1) lane size 4 - local tmp13:4 = 0; - local tmp14:4 = 0; - simd_address_at(tmp13, TMPQ3, 1, 4, 16); - simd_address_at(tmp14, Rd_VPR64.2S, 0, 4, 8); - * [register]:4 tmp14 = * [register]:4 tmp13; - simd_address_at(tmp13, TMPQ3, 3, 4, 16); - simd_address_at(tmp14, Rd_VPR64.2S, 1, 4, 8); - * [register]:4 tmp14 = * [register]:4 tmp13; - zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_SEXT(Rn_VPR64.2S, 4:1); - local tmp2:4 = SIMD_PIECE(Re_VPR128.S, vIndex:1); + local tmp2:4 = Re_VPR128.S.vIndex; local tmp3:8 = sext(tmp2); - local tmp4:16 = SIMD_INT_MULT(tmp1, tmp3); - local tmp5:16 = SIMD_INT_MULT(tmp4, 2:8); - local tmp6:4 = 0; - local tmpd:8 = Rd_VPR64.2S; - tmp6 = SIMD_PIECE(tmp5, 1:1); tmpd = SIMD_COPY(tmpd, tmp6, 0:1); - tmp6 = SIMD_PIECE(tmp5, 3:1); tmpd = SIMD_COPY(tmpd, tmp6, 1:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.2S -@elif defined(SEMANTIC_pseudo) - local tmp1:4 = SIMD_PIECE(Re_VPR128.S, vIndex:1); - Rd_VPR64.2S = NEON_sqrdmulh(Rn_VPR64.2S, tmp1, 4:1); -@endif + # simd infix TMPQ2 = TMPQ1 * tmp3 on lane size 8 + TMPQ2[0,64] = TMPQ1[0,64] * tmp3; + TMPQ2[64,64] = TMPQ1[64,64] * tmp3; + # simd infix TMPQ3 = TMPQ2 * 2:8 on lane size 8 + TMPQ3[0,64] = TMPQ2[0,64] * 2:8; + TMPQ3[64,64] = TMPQ2[64,64] * 2:8; + # simd shuffle Rd_VPR64.2S = TMPQ3 (@1-0@3-1) lane size 4 + Rd_VPR64.2S[0,32] = TMPQ3[32,32]; + Rd_VPR64.2S[32,32] = TMPQ3[96,32]; + zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.297 SQRDMULH (by element) page C7-2055 line 115168 MATCH x0f00d000/mask=xbf00f400 @@ -34210,90 +18617,30 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re :sqrdmulh Rd_VPR64.4H, Rn_VPR64.4H, Re_VPR128Lo.H.vIndexHLM is b_3131=0 & q=0 & u=0 & b_2428=0xf & advSIMD3.size=1 & Re_VPR128Lo.H.vIndexHLM & Re_VPR128Lo.H & vIndexHLM & b_1215=0xd & b_1010=0 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { -@if defined(SEMANTIC_primitive) # simd resize TMPQ1 = sext(Rn_VPR64.4H) (lane size 2 to 4) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR64.4H, 0, 2, 8); - simd_address_at(tmp3, TMPQ1, 0, 4, 16); - * [register]:4 tmp3 = sext(* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR64.4H, 1, 2, 8); - simd_address_at(tmp3, TMPQ1, 1, 4, 16); - * [register]:4 tmp3 = sext(* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR64.4H, 2, 2, 8); - simd_address_at(tmp3, TMPQ1, 2, 4, 16); - * [register]:4 tmp3 = sext(* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR64.4H, 3, 2, 8); - simd_address_at(tmp3, TMPQ1, 3, 4, 16); - * [register]:4 tmp3 = sext(* [register]:2 tmp2); - local tmp4:4 = 0; + TMPQ1[0,32] = sext(Rn_VPR64.4H[0,16]); + TMPQ1[32,32] = sext(Rn_VPR64.4H[16,16]); + TMPQ1[64,32] = sext(Rn_VPR64.4H[32,16]); + TMPQ1[96,32] = sext(Rn_VPR64.4H[48,16]); # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 - simd_address_at(tmp4, Re_VPR128Lo.H, vIndexHLM:4, 2, 16); - local tmp5:2 = * [register]:2 tmp4; - local tmp6:4 = sext(tmp5); - # simd infix TMPQ2 = TMPQ1 * tmp6 on lane size 4 - local tmp8:4 = 0; - local tmp9:4 = 0; - simd_address_at(tmp8, TMPQ1, 0, 4, 16); - simd_address_at(tmp9, TMPQ2, 0, 4, 16); - * [register]:4 tmp9 = (* [register]:4 tmp8) * tmp6; - simd_address_at(tmp8, TMPQ1, 1, 4, 16); - simd_address_at(tmp9, TMPQ2, 1, 4, 16); - * [register]:4 tmp9 = (* [register]:4 tmp8) * tmp6; - simd_address_at(tmp8, TMPQ1, 2, 4, 16); - simd_address_at(tmp9, TMPQ2, 2, 4, 16); - * [register]:4 tmp9 = (* [register]:4 tmp8) * tmp6; - simd_address_at(tmp8, TMPQ1, 3, 4, 16); - simd_address_at(tmp9, TMPQ2, 3, 4, 16); - * [register]:4 tmp9 = (* [register]:4 tmp8) * tmp6; - # simd infix TMPQ3 = TMPQ2 * 2:4 on lane size 4 - local tmp11:4 = 0; - local tmp12:4 = 0; - simd_address_at(tmp11, TMPQ2, 0, 4, 16); - simd_address_at(tmp12, TMPQ3, 0, 4, 16); - * [register]:4 tmp12 = (* [register]:4 tmp11) * 2:4; - simd_address_at(tmp11, TMPQ2, 1, 4, 16); - simd_address_at(tmp12, TMPQ3, 1, 4, 16); - * [register]:4 tmp12 = (* [register]:4 tmp11) * 2:4; - simd_address_at(tmp11, TMPQ2, 2, 4, 16); - simd_address_at(tmp12, TMPQ3, 2, 4, 16); - * [register]:4 tmp12 = (* [register]:4 tmp11) * 2:4; - simd_address_at(tmp11, TMPQ2, 3, 4, 16); - simd_address_at(tmp12, TMPQ3, 3, 4, 16); - * [register]:4 tmp12 = (* [register]:4 tmp11) * 2:4; - # simd shuffle Rd_VPR64.4H = TMPQ3 (@1-0@3-1@5-2@7-3) lane size 2 - local tmp13:4 = 0; - local tmp14:4 = 0; - simd_address_at(tmp13, TMPQ3, 1, 2, 16); - simd_address_at(tmp14, Rd_VPR64.4H, 0, 2, 8); - * [register]:2 tmp14 = * [register]:2 tmp13; - simd_address_at(tmp13, TMPQ3, 3, 2, 16); - simd_address_at(tmp14, Rd_VPR64.4H, 1, 2, 8); - * [register]:2 tmp14 = * [register]:2 tmp13; - simd_address_at(tmp13, TMPQ3, 5, 2, 16); - simd_address_at(tmp14, Rd_VPR64.4H, 2, 2, 8); - * [register]:2 tmp14 = * [register]:2 tmp13; - simd_address_at(tmp13, TMPQ3, 7, 2, 16); - simd_address_at(tmp14, Rd_VPR64.4H, 3, 2, 8); - * [register]:2 tmp14 = * [register]:2 tmp13; - zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_SEXT(Rn_VPR64.4H, 2:1); - local tmp2:2 = SIMD_PIECE(Re_VPR128Lo.H, vIndexHLM:1); + local tmp2:2 = Re_VPR128Lo.H.vIndexHLM; local tmp3:4 = sext(tmp2); - local tmp4:16 = SIMD_INT_MULT(tmp1, tmp3); - local tmp5:16 = SIMD_INT_MULT(tmp4, 2:4); - local tmp6:2 = 0; - local tmpd:8 = Rd_VPR64.4H; - tmp6 = SIMD_PIECE(tmp5, 1:1); tmpd = SIMD_COPY(tmpd, tmp6, 0:1); - tmp6 = SIMD_PIECE(tmp5, 3:1); tmpd = SIMD_COPY(tmpd, tmp6, 1:1); - tmp6 = SIMD_PIECE(tmp5, 5:1); tmpd = SIMD_COPY(tmpd, tmp6, 2:1); - tmp6 = SIMD_PIECE(tmp5, 7:1); tmpd = SIMD_COPY(tmpd, tmp6, 3:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.4H -@elif defined(SEMANTIC_pseudo) - local tmp1:2 = SIMD_PIECE(Re_VPR128Lo.H, vIndexHLM:1); - Rd_VPR64.4H = NEON_sqrdmulh(Rn_VPR64.4H, tmp1, 2:1); -@endif + # simd infix TMPQ2 = TMPQ1 * tmp3 on lane size 4 + TMPQ2[0,32] = TMPQ1[0,32] * tmp3; + TMPQ2[32,32] = TMPQ1[32,32] * tmp3; + TMPQ2[64,32] = TMPQ1[64,32] * tmp3; + TMPQ2[96,32] = TMPQ1[96,32] * tmp3; + # simd infix TMPQ3 = TMPQ2 * 2:4 on lane size 4 + TMPQ3[0,32] = TMPQ2[0,32] * 2:4; + TMPQ3[32,32] = TMPQ2[32,32] * 2:4; + TMPQ3[64,32] = TMPQ2[64,32] * 2:4; + TMPQ3[96,32] = TMPQ2[96,32] * 2:4; + # simd shuffle Rd_VPR64.4H = TMPQ3 (@1-0@3-1@5-2@7-3) lane size 2 + Rd_VPR64.4H[0,16] = TMPQ3[16,16]; + Rd_VPR64.4H[16,16] = TMPQ3[48,16]; + Rd_VPR64.4H[32,16] = TMPQ3[80,16]; + Rd_VPR64.4H[48,16] = TMPQ3[112,16]; + zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.297 SQRDMULH (by element) page C7-2055 line 115168 MATCH x0f00d000/mask=xbf00f400 @@ -34305,90 +18652,30 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xf & advSIMD3.size=1 & Re_VPR128Lo.H.vIndexHLM :sqrdmulh Rd_VPR128.4S, Rn_VPR128.4S, Re_VPR128.S.vIndex is b_3131=0 & q=1 & u=0 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & b_1215=0xd & b_1010=0 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) # simd resize TMPZ1 = sext(Rn_VPR128.4S) (lane size 4 to 8) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR128.4S, 0, 4, 16); - simd_address_at(tmp3, TMPZ1, 0, 8, 32); - * [register]:8 tmp3 = sext(* [register]:4 tmp2); - simd_address_at(tmp2, Rn_VPR128.4S, 1, 4, 16); - simd_address_at(tmp3, TMPZ1, 1, 8, 32); - * [register]:8 tmp3 = sext(* [register]:4 tmp2); - simd_address_at(tmp2, Rn_VPR128.4S, 2, 4, 16); - simd_address_at(tmp3, TMPZ1, 2, 8, 32); - * [register]:8 tmp3 = sext(* [register]:4 tmp2); - simd_address_at(tmp2, Rn_VPR128.4S, 3, 4, 16); - simd_address_at(tmp3, TMPZ1, 3, 8, 32); - * [register]:8 tmp3 = sext(* [register]:4 tmp2); - local tmp4:4 = 0; + TMPZ1[0,64] = sext(Rn_VPR128.4S[0,32]); + TMPZ1[64,64] = sext(Rn_VPR128.4S[32,32]); + TMPZ1[128,64] = sext(Rn_VPR128.4S[64,32]); + TMPZ1[192,64] = sext(Rn_VPR128.4S[96,32]); # simd element Re_VPR128.S[vIndex] lane size 4 - simd_address_at(tmp4, Re_VPR128.S, vIndex:4, 4, 16); - local tmp5:4 = * [register]:4 tmp4; - local tmp6:8 = sext(tmp5); - # simd infix TMPZ2 = TMPZ1 * tmp6 on lane size 8 - local tmp8:4 = 0; - local tmp9:4 = 0; - simd_address_at(tmp8, TMPZ1, 0, 8, 32); - simd_address_at(tmp9, TMPZ2, 0, 8, 32); - * [register]:8 tmp9 = (* [register]:8 tmp8) * tmp6; - simd_address_at(tmp8, TMPZ1, 1, 8, 32); - simd_address_at(tmp9, TMPZ2, 1, 8, 32); - * [register]:8 tmp9 = (* [register]:8 tmp8) * tmp6; - simd_address_at(tmp8, TMPZ1, 2, 8, 32); - simd_address_at(tmp9, TMPZ2, 2, 8, 32); - * [register]:8 tmp9 = (* [register]:8 tmp8) * tmp6; - simd_address_at(tmp8, TMPZ1, 3, 8, 32); - simd_address_at(tmp9, TMPZ2, 3, 8, 32); - * [register]:8 tmp9 = (* [register]:8 tmp8) * tmp6; - # simd infix TMPZ3 = TMPZ2 * 2:8 on lane size 8 - local tmp11:4 = 0; - local tmp12:4 = 0; - simd_address_at(tmp11, TMPZ2, 0, 8, 32); - simd_address_at(tmp12, TMPZ3, 0, 8, 32); - * [register]:8 tmp12 = (* [register]:8 tmp11) * 2:8; - simd_address_at(tmp11, TMPZ2, 1, 8, 32); - simd_address_at(tmp12, TMPZ3, 1, 8, 32); - * [register]:8 tmp12 = (* [register]:8 tmp11) * 2:8; - simd_address_at(tmp11, TMPZ2, 2, 8, 32); - simd_address_at(tmp12, TMPZ3, 2, 8, 32); - * [register]:8 tmp12 = (* [register]:8 tmp11) * 2:8; - simd_address_at(tmp11, TMPZ2, 3, 8, 32); - simd_address_at(tmp12, TMPZ3, 3, 8, 32); - * [register]:8 tmp12 = (* [register]:8 tmp11) * 2:8; - # simd shuffle Rd_VPR128.4S = TMPZ3 (@1-0@3-1@5-2@7-3) lane size 4 - local tmp13:4 = 0; - local tmp14:4 = 0; - simd_address_at(tmp13, TMPZ3, 1, 4, 32); - simd_address_at(tmp14, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp14 = * [register]:4 tmp13; - simd_address_at(tmp13, TMPZ3, 3, 4, 32); - simd_address_at(tmp14, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp14 = * [register]:4 tmp13; - simd_address_at(tmp13, TMPZ3, 5, 4, 32); - simd_address_at(tmp14, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp14 = * [register]:4 tmp13; - simd_address_at(tmp13, TMPZ3, 7, 4, 32); - simd_address_at(tmp14, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp14 = * [register]:4 tmp13; - zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:32 = SIMD_INT_SEXT(Rn_VPR128.4S, 4:1); - local tmp2:4 = SIMD_PIECE(Re_VPR128.S, vIndex:1); + local tmp2:4 = Re_VPR128.S.vIndex; local tmp3:8 = sext(tmp2); - local tmp4:32 = SIMD_INT_MULT(tmp1, tmp3); - local tmp5:32 = SIMD_INT_MULT(tmp4, 2:8); - local tmp6:4 = 0; - local tmpd:16 = Rd_VPR128.4S; - tmp6 = SIMD_PIECE(tmp5, 1:1); tmpd = SIMD_COPY(tmpd, tmp6, 0:1); - tmp6 = SIMD_PIECE(tmp5, 3:1); tmpd = SIMD_COPY(tmpd, tmp6, 1:1); - tmp6 = SIMD_PIECE(tmp5, 5:1); tmpd = SIMD_COPY(tmpd, tmp6, 2:1); - tmp6 = SIMD_PIECE(tmp5, 7:1); tmpd = SIMD_COPY(tmpd, tmp6, 3:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - local tmp1:4 = SIMD_PIECE(Re_VPR128.S, vIndex:1); - Rd_VPR128.4S = NEON_sqrdmulh(Rn_VPR128.4S, tmp1, 4:1); -@endif + # simd infix TMPZ2 = TMPZ1 * tmp3 on lane size 8 + TMPZ2[0,64] = TMPZ1[0,64] * tmp3; + TMPZ2[64,64] = TMPZ1[64,64] * tmp3; + TMPZ2[128,64] = TMPZ1[128,64] * tmp3; + TMPZ2[192,64] = TMPZ1[192,64] * tmp3; + # simd infix TMPZ3 = TMPZ2 * 2:8 on lane size 8 + TMPZ3[0,64] = TMPZ2[0,64] * 2:8; + TMPZ3[64,64] = TMPZ2[64,64] * 2:8; + TMPZ3[128,64] = TMPZ2[128,64] * 2:8; + TMPZ3[192,64] = TMPZ2[192,64] * 2:8; + # simd shuffle Rd_VPR128.4S = TMPZ3 (@1-0@3-1@5-2@7-3) lane size 4 + Rd_VPR128.4S[0,32] = TMPZ3[32,32]; + Rd_VPR128.4S[32,32] = TMPZ3[96,32]; + Rd_VPR128.4S[64,32] = TMPZ3[160,32]; + Rd_VPR128.4S[96,32] = TMPZ3[224,32]; + zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.297 SQRDMULH (by element) page C7-2055 line 115168 MATCH x0f00d000/mask=xbf00f400 @@ -34400,142 +18687,46 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re :sqrdmulh Rd_VPR128.8H, Rn_VPR128.8H, Re_VPR128Lo.H.vIndexHLM is b_3131=0 & q=1 & u=0 & b_2428=0xf & advSIMD3.size=1 & Re_VPR128Lo.H.vIndexHLM & Re_VPR128Lo.H & vIndexHLM & b_1215=0xd & b_1010=0 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_primitive) # simd resize TMPZ1 = sext(Rn_VPR128.8H) (lane size 2 to 4) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR128.8H, 0, 2, 16); - simd_address_at(tmp3, TMPZ1, 0, 4, 32); - * [register]:4 tmp3 = sext(* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR128.8H, 1, 2, 16); - simd_address_at(tmp3, TMPZ1, 1, 4, 32); - * [register]:4 tmp3 = sext(* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR128.8H, 2, 2, 16); - simd_address_at(tmp3, TMPZ1, 2, 4, 32); - * [register]:4 tmp3 = sext(* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR128.8H, 3, 2, 16); - simd_address_at(tmp3, TMPZ1, 3, 4, 32); - * [register]:4 tmp3 = sext(* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR128.8H, 4, 2, 16); - simd_address_at(tmp3, TMPZ1, 4, 4, 32); - * [register]:4 tmp3 = sext(* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR128.8H, 5, 2, 16); - simd_address_at(tmp3, TMPZ1, 5, 4, 32); - * [register]:4 tmp3 = sext(* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR128.8H, 6, 2, 16); - simd_address_at(tmp3, TMPZ1, 6, 4, 32); - * [register]:4 tmp3 = sext(* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR128.8H, 7, 2, 16); - simd_address_at(tmp3, TMPZ1, 7, 4, 32); - * [register]:4 tmp3 = sext(* [register]:2 tmp2); - local tmp4:4 = 0; + TMPZ1[0,32] = sext(Rn_VPR128.8H[0,16]); + TMPZ1[32,32] = sext(Rn_VPR128.8H[16,16]); + TMPZ1[64,32] = sext(Rn_VPR128.8H[32,16]); + TMPZ1[96,32] = sext(Rn_VPR128.8H[48,16]); + TMPZ1[128,32] = sext(Rn_VPR128.8H[64,16]); + TMPZ1[160,32] = sext(Rn_VPR128.8H[80,16]); + TMPZ1[192,32] = sext(Rn_VPR128.8H[96,16]); + TMPZ1[224,32] = sext(Rn_VPR128.8H[112,16]); # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 - simd_address_at(tmp4, Re_VPR128Lo.H, vIndexHLM:4, 2, 16); - local tmp5:2 = * [register]:2 tmp4; - local tmp6:4 = sext(tmp5); - # simd infix TMPZ2 = TMPZ1 * tmp6 on lane size 4 - local tmp8:4 = 0; - local tmp9:4 = 0; - simd_address_at(tmp8, TMPZ1, 0, 4, 32); - simd_address_at(tmp9, TMPZ2, 0, 4, 32); - * [register]:4 tmp9 = (* [register]:4 tmp8) * tmp6; - simd_address_at(tmp8, TMPZ1, 1, 4, 32); - simd_address_at(tmp9, TMPZ2, 1, 4, 32); - * [register]:4 tmp9 = (* [register]:4 tmp8) * tmp6; - simd_address_at(tmp8, TMPZ1, 2, 4, 32); - simd_address_at(tmp9, TMPZ2, 2, 4, 32); - * [register]:4 tmp9 = (* [register]:4 tmp8) * tmp6; - simd_address_at(tmp8, TMPZ1, 3, 4, 32); - simd_address_at(tmp9, TMPZ2, 3, 4, 32); - * [register]:4 tmp9 = (* [register]:4 tmp8) * tmp6; - simd_address_at(tmp8, TMPZ1, 4, 4, 32); - simd_address_at(tmp9, TMPZ2, 4, 4, 32); - * [register]:4 tmp9 = (* [register]:4 tmp8) * tmp6; - simd_address_at(tmp8, TMPZ1, 5, 4, 32); - simd_address_at(tmp9, TMPZ2, 5, 4, 32); - * [register]:4 tmp9 = (* [register]:4 tmp8) * tmp6; - simd_address_at(tmp8, TMPZ1, 6, 4, 32); - simd_address_at(tmp9, TMPZ2, 6, 4, 32); - * [register]:4 tmp9 = (* [register]:4 tmp8) * tmp6; - simd_address_at(tmp8, TMPZ1, 7, 4, 32); - simd_address_at(tmp9, TMPZ2, 7, 4, 32); - * [register]:4 tmp9 = (* [register]:4 tmp8) * tmp6; - # simd infix TMPZ3 = TMPZ2 * 2:4 on lane size 4 - local tmp11:4 = 0; - local tmp12:4 = 0; - simd_address_at(tmp11, TMPZ2, 0, 4, 32); - simd_address_at(tmp12, TMPZ3, 0, 4, 32); - * [register]:4 tmp12 = (* [register]:4 tmp11) * 2:4; - simd_address_at(tmp11, TMPZ2, 1, 4, 32); - simd_address_at(tmp12, TMPZ3, 1, 4, 32); - * [register]:4 tmp12 = (* [register]:4 tmp11) * 2:4; - simd_address_at(tmp11, TMPZ2, 2, 4, 32); - simd_address_at(tmp12, TMPZ3, 2, 4, 32); - * [register]:4 tmp12 = (* [register]:4 tmp11) * 2:4; - simd_address_at(tmp11, TMPZ2, 3, 4, 32); - simd_address_at(tmp12, TMPZ3, 3, 4, 32); - * [register]:4 tmp12 = (* [register]:4 tmp11) * 2:4; - simd_address_at(tmp11, TMPZ2, 4, 4, 32); - simd_address_at(tmp12, TMPZ3, 4, 4, 32); - * [register]:4 tmp12 = (* [register]:4 tmp11) * 2:4; - simd_address_at(tmp11, TMPZ2, 5, 4, 32); - simd_address_at(tmp12, TMPZ3, 5, 4, 32); - * [register]:4 tmp12 = (* [register]:4 tmp11) * 2:4; - simd_address_at(tmp11, TMPZ2, 6, 4, 32); - simd_address_at(tmp12, TMPZ3, 6, 4, 32); - * [register]:4 tmp12 = (* [register]:4 tmp11) * 2:4; - simd_address_at(tmp11, TMPZ2, 7, 4, 32); - simd_address_at(tmp12, TMPZ3, 7, 4, 32); - * [register]:4 tmp12 = (* [register]:4 tmp11) * 2:4; - # simd shuffle Rd_VPR128.8H = TMPZ3 (@1-0@3-1@5-2@7-3@9-4@11-5@13-6@15-7) lane size 2 - local tmp13:4 = 0; - local tmp14:4 = 0; - simd_address_at(tmp13, TMPZ3, 1, 2, 32); - simd_address_at(tmp14, Rd_VPR128.8H, 0, 2, 16); - * [register]:2 tmp14 = * [register]:2 tmp13; - simd_address_at(tmp13, TMPZ3, 3, 2, 32); - simd_address_at(tmp14, Rd_VPR128.8H, 1, 2, 16); - * [register]:2 tmp14 = * [register]:2 tmp13; - simd_address_at(tmp13, TMPZ3, 5, 2, 32); - simd_address_at(tmp14, Rd_VPR128.8H, 2, 2, 16); - * [register]:2 tmp14 = * [register]:2 tmp13; - simd_address_at(tmp13, TMPZ3, 7, 2, 32); - simd_address_at(tmp14, Rd_VPR128.8H, 3, 2, 16); - * [register]:2 tmp14 = * [register]:2 tmp13; - simd_address_at(tmp13, TMPZ3, 9, 2, 32); - simd_address_at(tmp14, Rd_VPR128.8H, 4, 2, 16); - * [register]:2 tmp14 = * [register]:2 tmp13; - simd_address_at(tmp13, TMPZ3, 11, 2, 32); - simd_address_at(tmp14, Rd_VPR128.8H, 5, 2, 16); - * [register]:2 tmp14 = * [register]:2 tmp13; - simd_address_at(tmp13, TMPZ3, 13, 2, 32); - simd_address_at(tmp14, Rd_VPR128.8H, 6, 2, 16); - * [register]:2 tmp14 = * [register]:2 tmp13; - simd_address_at(tmp13, TMPZ3, 15, 2, 32); - simd_address_at(tmp14, Rd_VPR128.8H, 7, 2, 16); - * [register]:2 tmp14 = * [register]:2 tmp13; - zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:32 = SIMD_INT_SEXT(Rn_VPR128.8H, 2:1); - local tmp2:2 = SIMD_PIECE(Re_VPR128Lo.H, vIndexHLM:1); + local tmp2:2 = Re_VPR128Lo.H.vIndexHLM; local tmp3:4 = sext(tmp2); - local tmp4:32 = SIMD_INT_MULT(tmp1, tmp3); - local tmp5:32 = SIMD_INT_MULT(tmp4, 2:4); - local tmp6:2 = 0; - local tmpd:16 = Rd_VPR128.8H; - tmp6 = SIMD_PIECE(tmp5, 1:1); tmpd = SIMD_COPY(tmpd, tmp6, 0:1); - tmp6 = SIMD_PIECE(tmp5, 3:1); tmpd = SIMD_COPY(tmpd, tmp6, 1:1); - tmp6 = SIMD_PIECE(tmp5, 5:1); tmpd = SIMD_COPY(tmpd, tmp6, 2:1); - tmp6 = SIMD_PIECE(tmp5, 7:1); tmpd = SIMD_COPY(tmpd, tmp6, 3:1); - tmp6 = SIMD_PIECE(tmp5, 9:1); tmpd = SIMD_COPY(tmpd, tmp6, 4:1); - tmp6 = SIMD_PIECE(tmp5, 11:1); tmpd = SIMD_COPY(tmpd, tmp6, 5:1); - tmp6 = SIMD_PIECE(tmp5, 13:1); tmpd = SIMD_COPY(tmpd, tmp6, 6:1); - tmp6 = SIMD_PIECE(tmp5, 15:1); tmpd = SIMD_COPY(tmpd, tmp6, 7:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.8H -@elif defined(SEMANTIC_pseudo) - local tmp1:2 = SIMD_PIECE(Re_VPR128Lo.H, vIndexHLM:1); - Rd_VPR128.8H = NEON_sqrdmulh(Rn_VPR128.8H, tmp1, 2:1); -@endif + # simd infix TMPZ2 = TMPZ1 * tmp3 on lane size 4 + TMPZ2[0,32] = TMPZ1[0,32] * tmp3; + TMPZ2[32,32] = TMPZ1[32,32] * tmp3; + TMPZ2[64,32] = TMPZ1[64,32] * tmp3; + TMPZ2[96,32] = TMPZ1[96,32] * tmp3; + TMPZ2[128,32] = TMPZ1[128,32] * tmp3; + TMPZ2[160,32] = TMPZ1[160,32] * tmp3; + TMPZ2[192,32] = TMPZ1[192,32] * tmp3; + TMPZ2[224,32] = TMPZ1[224,32] * tmp3; + # simd infix TMPZ3 = TMPZ2 * 2:4 on lane size 4 + TMPZ3[0,32] = TMPZ2[0,32] * 2:4; + TMPZ3[32,32] = TMPZ2[32,32] * 2:4; + TMPZ3[64,32] = TMPZ2[64,32] * 2:4; + TMPZ3[96,32] = TMPZ2[96,32] * 2:4; + TMPZ3[128,32] = TMPZ2[128,32] * 2:4; + TMPZ3[160,32] = TMPZ2[160,32] * 2:4; + TMPZ3[192,32] = TMPZ2[192,32] * 2:4; + TMPZ3[224,32] = TMPZ2[224,32] * 2:4; + # simd shuffle Rd_VPR128.8H = TMPZ3 (@1-0@3-1@5-2@7-3@9-4@11-5@13-6@15-7) lane size 2 + Rd_VPR128.8H[0,16] = TMPZ3[16,16]; + Rd_VPR128.8H[16,16] = TMPZ3[48,16]; + Rd_VPR128.8H[32,16] = TMPZ3[80,16]; + Rd_VPR128.8H[48,16] = TMPZ3[112,16]; + Rd_VPR128.8H[64,16] = TMPZ3[144,16]; + Rd_VPR128.8H[80,16] = TMPZ3[176,16]; + Rd_VPR128.8H[96,16] = TMPZ3[208,16]; + Rd_VPR128.8H[112,16] = TMPZ3[240,16]; + zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.297 SQRDMULH (by element) page C7-2055 line 115168 MATCH x5f00d000/mask=xff00f400 @@ -34548,31 +18739,15 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xf & advSIMD3.size=1 & Re_VPR128Lo.H.vIndexHLM :sqrdmulh Rd_FPR16, Rn_FPR16, Re_VPR128Lo.H.vIndexHLM is b_2431=0b01011111 & b_2223=0b01 & b_1215=0b1101 & b_10=0 & Rd_FPR16 & Rn_FPR16 & Re_VPR128Lo.H.vIndexHLM & Re_VPR128Lo.H & vIndexHLM & Zd { -@if defined(SEMANTIC_primitive) local tmp1:4 = sext(Rn_FPR16); - local tmp2:4 = 0; # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 - simd_address_at(tmp2, Re_VPR128Lo.H, vIndexHLM:4, 2, 16); - local tmp3:2 = * [register]:2 tmp2; - local tmp4:4 = sext(tmp3); - local tmp5:4 = tmp1 * tmp4; - local tmp6:4 = tmp5 * 2:4; - local tmp7:4 = tmp6 >> 16:4; - Rd_FPR16 = tmp7:2; - zext_zh(Zd); # zero upper 30 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:4 = sext(Rn_FPR16); - local tmp2:2 = SIMD_PIECE(Re_VPR128Lo.H, vIndexHLM:1); + local tmp2:2 = Re_VPR128Lo.H.vIndexHLM; local tmp3:4 = sext(tmp2); local tmp4:4 = tmp1 * tmp3; local tmp5:4 = tmp4 * 2:4; local tmp6:4 = tmp5 >> 16:4; - local tmpd:2 = tmp6:2; - Zd = zext(tmpd); # assigning to Rd_FPR16 -@elif defined(SEMANTIC_pseudo) - local tmp1:2 = SIMD_PIECE(Re_VPR128Lo.H, vIndexHLM:1); - Rd_FPR16 = NEON_sqrdmulh(Rn_FPR16, tmp1); -@endif + Rd_FPR16 = tmp6:2; + zext_zh(Zd); # zero upper 30 bytes of Zd } # C7.2.297 SQRDMULH (by element) page C7-2055 line 115168 MATCH x5f00d000/mask=xff00f400 @@ -34585,31 +18760,15 @@ is b_2431=0b01011111 & b_2223=0b01 & b_1215=0b1101 & b_10=0 & Rd_FPR16 & Rn_FPR1 :sqrdmulh Rd_FPR32, Rn_FPR32, Re_VPR128.S.vIndex is b_2431=0b01011111 & b_2223=0b10 & b_1215=0b1101 & b_10=0 & Rd_FPR32 & Rn_FPR32 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & Zd { -@if defined(SEMANTIC_primitive) local tmp1:8 = sext(Rn_FPR32); - local tmp2:4 = 0; # simd element Re_VPR128.S[vIndex] lane size 4 - simd_address_at(tmp2, Re_VPR128.S, vIndex:4, 4, 16); - local tmp3:4 = * [register]:4 tmp2; - local tmp4:8 = sext(tmp3); - local tmp5:8 = tmp1 * tmp4; - local tmp6:8 = tmp5 * 2:8; - local tmp7:8 = tmp6 >> 32:8; - Rd_FPR32 = tmp7:4; - zext_zs(Zd); # zero upper 28 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = sext(Rn_FPR32); - local tmp2:4 = SIMD_PIECE(Re_VPR128.S, vIndex:1); + local tmp2:4 = Re_VPR128.S.vIndex; local tmp3:8 = sext(tmp2); local tmp4:8 = tmp1 * tmp3; local tmp5:8 = tmp4 * 2:8; local tmp6:8 = tmp5 >> 32:8; - local tmpd:4 = tmp6:4; - Zd = zext(tmpd); # assigning to Rd_FPR32 -@elif defined(SEMANTIC_pseudo) - local tmp1:4 = SIMD_PIECE(Re_VPR128.S, vIndex:1); - Rd_FPR32 = NEON_sqrdmulh(Rn_FPR32, tmp1); -@endif + Rd_FPR32 = tmp6:4; + zext_zs(Zd); # zero upper 28 bytes of Zd } # C7.2.298 SQRDMULH (vector) page C7-2058 line 115344 MATCH x7e20b400/mask=xff20fc00 @@ -34620,9 +18779,7 @@ is b_2431=0b01011111 & b_2223=0b10 & b_1215=0b1101 & b_10=0 & Rd_FPR32 & Rn_FPR3 :sqrdmulh Rd_FPR16, Rn_FPR16, Rm_FPR16 is b_3031=1 & u=1 & b_2428=0x1e & advSIMD3.size=1 & b_2121=1 & Rm_FPR16 & b_1115=0x16 & b_1010=1 & Rn_FPR16 & Rd_FPR16 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR16 = NEON_sqrdmulh(Rn_FPR16, Rm_FPR16); -@endif } # C7.2.298 SQRDMULH (vector) page C7-2058 line 115344 MATCH x7e20b400/mask=xff20fc00 @@ -34633,9 +18790,7 @@ is b_3031=1 & u=1 & b_2428=0x1e & advSIMD3.size=1 & b_2121=1 & Rm_FPR16 & b_1115 :sqrdmulh Rd_FPR32, Rn_FPR32, Rm_FPR32 is b_3031=1 & u=1 & b_2428=0x1e & advSIMD3.size=2 & b_2121=1 & Rm_FPR32 & b_1115=0x16 & b_1010=1 & Rn_FPR32 & Rd_FPR32 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR32 = NEON_sqrdmulh(Rn_FPR32, Rm_FPR32); -@endif } # C7.2.298 SQRDMULH (vector) page C7-2058 line 115344 MATCH x2e20b400/mask=xbf20fc00 @@ -34646,9 +18801,7 @@ is b_3031=1 & u=1 & b_2428=0x1e & advSIMD3.size=2 & b_2121=1 & Rm_FPR32 & b_1115 :sqrdmulh Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0x16 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.2S = NEON_sqrdmulh(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); -@endif } # C7.2.298 SQRDMULH (vector) page C7-2058 line 115344 MATCH x2e20b400/mask=xbf20fc00 @@ -34659,9 +18812,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S :sqrdmulh Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0x16 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.4H = NEON_sqrdmulh(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); -@endif } # C7.2.298 SQRDMULH (vector) page C7-2058 line 115344 MATCH x2e20b400/mask=xbf20fc00 @@ -34672,9 +18823,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H :sqrdmulh Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0x16 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_sqrdmulh(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); -@endif } # C7.2.298 SQRDMULH (vector) page C7-2058 line 115344 MATCH x2e20b400/mask=xbf20fc00 @@ -34685,9 +18834,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S :sqrdmulh Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0x16 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.8H = NEON_sqrdmulh(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); -@endif } # C7.2.299 SQRSHL page C7-2060 line 115472 MATCH x5e205c00/mask=xff20fc00 @@ -34698,9 +18845,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H :sqrshl Rd_FPR8, Rn_FPR8, Rm_FPR8 is b_3031=1 & u=0 & b_2428=0x1e & advSIMD3.size=0 & b_2121=1 & Rm_FPR8 & b_1115=0xb & b_1010=1 & Rn_FPR8 & Rd_FPR8 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR8 = NEON_sqrshl(Rn_FPR8, Rm_FPR8); -@endif } # C7.2.299 SQRSHL page C7-2060 line 115472 MATCH x5e205c00/mask=xff20fc00 @@ -34711,9 +18856,7 @@ is b_3031=1 & u=0 & b_2428=0x1e & advSIMD3.size=0 & b_2121=1 & Rm_FPR8 & b_1115= :sqrshl Rd_FPR64, Rn_FPR64, Rm_FPR64 is b_3031=1 & u=0 & b_2428=0x1e & advSIMD3.size=3 & b_2121=1 & Rm_FPR64 & b_1115=0xb & b_1010=1 & Rn_FPR64 & Rd_FPR64 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR64 = NEON_sqrshl(Rn_FPR64, Rm_FPR64); -@endif } # C7.2.299 SQRSHL page C7-2060 line 115472 MATCH x5e205c00/mask=xff20fc00 @@ -34724,9 +18867,7 @@ is b_3031=1 & u=0 & b_2428=0x1e & advSIMD3.size=3 & b_2121=1 & Rm_FPR64 & b_1115 :sqrshl Rd_FPR16, Rn_FPR16, Rm_FPR16 is b_3031=1 & u=0 & b_2428=0x1e & advSIMD3.size=1 & b_2121=1 & Rm_FPR16 & b_1115=0xb & b_1010=1 & Rn_FPR16 & Rd_FPR16 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR16 = NEON_sqrshl(Rn_FPR16, Rm_FPR16); -@endif } # C7.2.299 SQRSHL page C7-2060 line 115472 MATCH x5e205c00/mask=xff20fc00 @@ -34737,9 +18878,7 @@ is b_3031=1 & u=0 & b_2428=0x1e & advSIMD3.size=1 & b_2121=1 & Rm_FPR16 & b_1115 :sqrshl Rd_FPR32, Rn_FPR32, Rm_FPR32 is b_3031=1 & u=0 & b_2428=0x1e & advSIMD3.size=2 & b_2121=1 & Rm_FPR32 & b_1115=0xb & b_1010=1 & Rn_FPR32 & Rd_FPR32 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR32 = NEON_sqrshl(Rn_FPR32, Rm_FPR32); -@endif } # C7.2.299 SQRSHL page C7-2060 line 115472 MATCH x0e205c00/mask=xbf20fc00 @@ -34750,9 +18889,7 @@ is b_3031=1 & u=0 & b_2428=0x1e & advSIMD3.size=2 & b_2121=1 & Rm_FPR32 & b_1115 :sqrshl Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0xb & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.16B = NEON_sqrshl(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); -@endif } # C7.2.299 SQRSHL page C7-2060 line 115472 MATCH x0e205c00/mask=xbf20fc00 @@ -34763,9 +18900,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16 :sqrshl Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=3 & b_2121=1 & Rm_VPR128.2D & b_1115=0xb & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.2D = NEON_sqrshl(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); -@endif } # C7.2.299 SQRSHL page C7-2060 line 115472 MATCH x0e205c00/mask=xbf20fc00 @@ -34776,9 +18911,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=3 & b_2121=1 & Rm_VPR128.2D :sqrshl Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0xb & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.2S = NEON_sqrshl(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); -@endif } # C7.2.299 SQRSHL page C7-2060 line 115472 MATCH x0e205c00/mask=xbf20fc00 @@ -34789,9 +18922,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S :sqrshl Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0xb & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.4H = NEON_sqrshl(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); -@endif } # C7.2.299 SQRSHL page C7-2060 line 115472 MATCH x0e205c00/mask=xbf20fc00 @@ -34802,9 +18933,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H :sqrshl Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0xb & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_sqrshl(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); -@endif } # C7.2.299 SQRSHL page C7-2060 line 115472 MATCH x0e205c00/mask=xbf20fc00 @@ -34815,9 +18944,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S :sqrshl Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0xb & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.8B = NEON_sqrshl(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); -@endif } # C7.2.299 SQRSHL page C7-2060 line 115472 MATCH x0e205c00/mask=xbf20fc00 @@ -34828,9 +18955,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B :sqrshl Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0xb & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.8H = NEON_sqrshl(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); -@endif } # C7.2.300 SQRSHRN, SQRSHRN2 page C7-2062 line 115610 MATCH x0f009c00/mask=xbf80fc00 @@ -34841,9 +18966,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H :sqrshrn2 Rd_VPR128.16B, Rn_VPR128.8H, Imm_shr_imm8 is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x13 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.16B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.16B = NEON_sqrshrn2(Rd_VPR128.16B, Rn_VPR128.8H, Imm_shr_imm8:1, 2:1); -@endif } # C7.2.300 SQRSHRN, SQRSHRN2 page C7-2062 line 115610 MATCH x0f009c00/mask=xbf80fc00 @@ -34854,9 +18977,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x13 & :sqrshrn Rd_VPR64.2S, Rn_VPR128.2D, Imm_shr_imm32 is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x13 & b_1010=1 & Rn_VPR128.2D & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.2S = NEON_sqrshrn(Rd_VPR64.2S, Rn_VPR128.2D, Imm_shr_imm32:1, 8:1); -@endif } # C7.2.300 SQRSHRN, SQRSHRN2 page C7-2062 line 115610 MATCH x0f009c00/mask=xbf80fc00 @@ -34867,9 +18988,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x13 & :sqrshrn Rd_VPR64.4H, Rn_VPR128.4S, Imm_shr_imm16 is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x13 & b_1010=1 & Rn_VPR128.4S & Rd_VPR64.4H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.4H = NEON_sqrshrn(Rd_VPR64.4H, Rn_VPR128.4S, Imm_shr_imm16:1, 4:1); -@endif } # C7.2.300 SQRSHRN, SQRSHRN2 page C7-2062 line 115610 MATCH x0f009c00/mask=xbf80fc00 @@ -34880,9 +18999,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x13 :sqrshrn2 Rd_VPR128.4S, Rn_VPR128.2D, Imm_shr_imm32 is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x13 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_sqrshrn2(Rd_VPR128.4S, Rn_VPR128.2D, Imm_shr_imm32:1, 8:1); -@endif } # C7.2.300 SQRSHRN, SQRSHRN2 page C7-2062 line 115610 MATCH x0f009c00/mask=xbf80fc00 @@ -34893,9 +19010,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x13 & :sqrshrn Rd_VPR64.8B, Rn_VPR128.8H, Imm_shr_imm8 is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x13 & b_1010=1 & Rn_VPR128.8H & Rd_VPR64.8B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.8B = NEON_sqrshrn2(Rd_VPR64.8B, Rn_VPR128.8H, Imm_shr_imm8:1, 2:1); -@endif } # C7.2.300 SQRSHRN, SQRSHRN2 page C7-2062 line 115610 MATCH x0f009c00/mask=xbf80fc00 @@ -34906,9 +19021,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x13 & :sqrshrn2 Rd_VPR128.8H, Rn_VPR128.4S, Imm_shr_imm16 is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x13 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.8H = NEON_sqrshrn2(Rd_VPR128.8H, Rn_VPR128.4S, Imm_shr_imm16:1, 4:1); -@endif } # C7.2.300 SQRSHRN, SQRSHRN2 page C7-2062 line 115610 MATCH x5f009c00/mask=xff80fc00 @@ -34920,9 +19033,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x13 :sqrshrn Rd_FPR8, Rn_FPR16, Imm_shr_imm8 is b_2331=0b010111110 & b_1922=0b0001 & b_1015=0b100111 & Rd_FPR8 & Rn_FPR16 & Imm_shr_imm8 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR8 = NEON_sqrshrn(Rd_FPR8, Rn_FPR16, Imm_shr_imm8:1); -@endif } # C7.2.300 SQRSHRN, SQRSHRN2 page C7-2062 line 115610 MATCH x5f009c00/mask=xff80fc00 @@ -34934,9 +19045,7 @@ is b_2331=0b010111110 & b_1922=0b0001 & b_1015=0b100111 & Rd_FPR8 & Rn_FPR16 & I :sqrshrn Rd_FPR16, Rn_FPR32, Imm_shr_imm16 is b_2331=0b010111110 & b_2022=0b001 & b_1015=0b100111 & Rd_FPR16 & Rn_FPR32 & Imm_shr_imm16 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR16 = NEON_sqrshrn(Rd_FPR16, Rn_FPR32, Imm_shr_imm16:1); -@endif } # C7.2.300 SQRSHRN, SQRSHRN2 page C7-2062 line 115610 MATCH x5f009c00/mask=xff80fc00 @@ -34948,9 +19057,7 @@ is b_2331=0b010111110 & b_2022=0b001 & b_1015=0b100111 & Rd_FPR16 & Rn_FPR32 & I :sqrshrn Rd_FPR32, Rn_FPR64, Imm_shr_imm32 is b_2331=0b010111110 & b_2122=0b01 & b_1015=0b100111 & Rd_FPR32 & Rn_FPR64 & Imm_shr_imm32 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR32 = NEON_sqrshrn(Rd_FPR32, Rn_FPR64, Imm_shr_imm32:1); -@endif } # C7.2.301 SQRSHRUN, SQRSHRUN2 page C7-2065 line 115795 MATCH x2f008c00/mask=xbf80fc00 @@ -34961,9 +19068,7 @@ is b_2331=0b010111110 & b_2122=0b01 & b_1015=0b100111 & Rd_FPR32 & Rn_FPR64 & Im :sqrshrun2 Rd_VPR128.16B, Rn_VPR128.8H, Imm_shr_imm8 is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x11 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.16B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.16B = NEON_sqrshrun2(Rd_VPR128.16B, Rn_VPR128.8H, Imm_shr_imm8:1, 2:1); -@endif } # C7.2.301 SQRSHRUN, SQRSHRUN2 page C7-2065 line 115795 MATCH x2f008c00/mask=xbf80fc00 @@ -34974,9 +19079,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x11 & :sqrshrun Rd_VPR64.2S, Rn_VPR128.2D, Imm_shr_imm32 is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x11 & b_1010=1 & Rn_VPR128.2D & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.2S = NEON_sqrshrun(Rd_VPR64.2S, Rn_VPR128.2D, Imm_shr_imm32:1, 8:1); -@endif } # C7.2.301 SQRSHRUN, SQRSHRUN2 page C7-2065 line 115795 MATCH x2f008c00/mask=xbf80fc00 @@ -34987,9 +19090,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x11 & :sqrshrun Rd_VPR64.4H, Rn_VPR128.4S, Imm_shr_imm16 is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x11 & b_1010=1 & Rn_VPR128.4S & Rd_VPR64.4H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.4H = NEON_sqrshrun(Rd_VPR64.4H, Rn_VPR128.4S, Imm_shr_imm16:1, 4:1); -@endif } # C7.2.301 SQRSHRUN, SQRSHRUN2 page C7-2065 line 115795 MATCH x2f008c00/mask=xbf80fc00 @@ -35000,9 +19101,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x11 :sqrshrun2 Rd_VPR128.4S, Rn_VPR128.2D, Imm_shr_imm32 is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x11 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_sqrshrun2(Rd_VPR128.4S, Rn_VPR128.2D, Imm_shr_imm32:1, 8:1); -@endif } # C7.2.301 SQRSHRUN, SQRSHRUN2 page C7-2065 line 115795 MATCH x2f008c00/mask=xbf80fc00 @@ -35013,9 +19112,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x11 & :sqrshrun Rd_VPR64.8B, Rn_VPR128.8H, Imm_shr_imm8 is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x11 & b_1010=1 & Rn_VPR128.8H & Rd_VPR64.8B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.8B = NEON_sqrshrun(Rd_VPR64.8B, Rn_VPR128.8H, Imm_shr_imm8:1, 2:1); -@endif } # C7.2.301 SQRSHRUN, SQRSHRUN2 page C7-2065 line 115795 MATCH x2f008c00/mask=xbf80fc00 @@ -35026,9 +19123,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x11 & :sqrshrun2 Rd_VPR128.8H, Rn_VPR128.4S, Imm_shr_imm16 is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x11 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.8H = NEON_sqrshrun2(Rd_VPR128.8H, Rn_VPR128.4S, Imm_shr_imm16:1, 4:1); -@endif } # C7.2.301 SQRSHRUN, SQRSHRUN2 page C7-2065 line 115795 MATCH x7f008c00/mask=xff80fc00 @@ -35040,9 +19135,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x11 :sqrshrun Rd_FPR8, Rn_FPR16, Imm_shr_imm8 is b_2331=0b011111110 & b_1922=0b0001 & b_1015=0b100011 & Rd_FPR8 & Rn_FPR16 & Imm_shr_imm8 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR8 = NEON_sqrshrun(Rd_FPR8, Rn_FPR16, Imm_shr_imm8:1); -@endif } # C7.2.301 SQRSHRUN, SQRSHRUN2 page C7-2065 line 115795 MATCH x7f008c00/mask=xff80fc00 @@ -35054,9 +19147,7 @@ is b_2331=0b011111110 & b_1922=0b0001 & b_1015=0b100011 & Rd_FPR8 & Rn_FPR16 & I :sqrshrun Rd_FPR16, Rn_FPR32, Imm_shr_imm16 is b_2331=0b011111110 & b_2022=0b001 & b_1015=0b100011 & Rd_FPR16 & Rn_FPR32 & Imm_shr_imm16 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR16 = NEON_sqrshrun(Rd_FPR16, Rn_FPR32, Imm_shr_imm16:1); -@endif } # C7.2.301 SQRSHRUN, SQRSHRUN2 page C7-2065 line 115795 MATCH x7f008c00/mask=xff80fc00 @@ -35068,9 +19159,7 @@ is b_2331=0b011111110 & b_2022=0b001 & b_1015=0b100011 & Rd_FPR16 & Rn_FPR32 & I :sqrshrun Rd_FPR32, Rn_FPR64, Imm_shr_imm32 is b_2331=0b011111110 & b_2122=0b01 & b_1015=0b100011 & Rd_FPR32 & Rn_FPR64 & Imm_shr_imm32 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR32 = NEON_sqrshrun(Rd_FPR32, Rn_FPR64, Imm_shr_imm32:1); -@endif } # C7.2.302 SQSHL (immediate) page C7-2068 line 115975 MATCH x0f007400/mask=xbf80fc00 @@ -35081,9 +19170,7 @@ is b_2331=0b011111110 & b_2122=0b01 & b_1015=0b100011 & Rd_FPR32 & Rn_FPR64 & Im :sqshl Rd_VPR128.16B, Rn_VPR128.16B, Imm_uimm3 is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_1923=0x1 & Imm_uimm3 & b_1115=0xe & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.16B = NEON_sqshl(Rn_VPR128.16B, Imm_uimm3:1, 1:1); -@endif } # C7.2.302 SQSHL (immediate) page C7-2068 line 115975 MATCH x0f007400/mask=xbf80fc00 @@ -35094,9 +19181,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_1923=0x1 & Imm_uimm3 & b_1115=0xe & b_1 :sqshl Rd_VPR128.2D, Rn_VPR128.2D, Imm_imm0_63 is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2223=0b01 & Imm_imm0_63 & b_1115=0xe & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.2D = NEON_sqshl(Rn_VPR128.2D, Imm_imm0_63:1, 8:1); -@endif } # C7.2.302 SQSHL (immediate) page C7-2068 line 115975 MATCH x0f007400/mask=xbf80fc00 @@ -35107,9 +19192,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2223=0b01 & Imm_imm0_63 & b_1115=0xe & :sqshl Rd_VPR64.2S, Rn_VPR64.2S, Imm_uimm5 is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_2123=1 & Imm_uimm5 & b_1115=0xe & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.2S = NEON_sqshl(Rn_VPR64.2S, Imm_uimm5:1, 4:1); -@endif } # C7.2.302 SQSHL (immediate) page C7-2068 line 115975 MATCH x0f007400/mask=xbf80fc00 @@ -35120,9 +19203,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_2123=1 & Imm_uimm5 & b_1115=0xe & b_101 :sqshl Rd_VPR64.4H, Rn_VPR64.4H, Imm_uimm4 is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_2023=0x1 & Imm_uimm4 & b_1115=0xe & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.4H = NEON_sqshl(Rn_VPR64.4H, Imm_uimm4:1, 2:1); -@endif } # C7.2.302 SQSHL (immediate) page C7-2068 line 115975 MATCH x0f007400/mask=xbf80fc00 @@ -35133,9 +19214,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_2023=0x1 & Imm_uimm4 & b_1115=0xe & b_1 :sqshl Rd_VPR128.4S, Rn_VPR128.4S, Imm_uimm5 is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2123=1 & Imm_uimm5 & b_1115=0xe & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_sqshl(Rn_VPR128.4S, Imm_uimm5:1, 4:1); -@endif } # C7.2.302 SQSHL (immediate) page C7-2068 line 115975 MATCH x0f007400/mask=xbf80fc00 @@ -35146,9 +19225,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2123=1 & Imm_uimm5 & b_1115=0xe & b_101 :sqshl Rd_VPR64.8B, Rn_VPR64.8B, Imm_uimm3 is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_1923=0x1 & Imm_uimm3 & b_1115=0xe & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.8B = NEON_sqshl(Rn_VPR64.8B, Imm_uimm3:1, 1:1); -@endif } # C7.2.302 SQSHL (immediate) page C7-2068 line 115975 MATCH x0f007400/mask=xbf80fc00 @@ -35159,9 +19236,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_1923=0x1 & Imm_uimm3 & b_1115=0xe & b_1 :sqshl Rd_VPR128.8H, Rn_VPR128.8H, Imm_uimm4 is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2023=0x1 & Imm_uimm4 & b_1115=0xe & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.8H = NEON_sqshl(Rn_VPR128.8H, Imm_uimm4:1, 2:1); -@endif } # C7.2.302 SQSHL (immediate) page C7-2068 line 115975 MATCH x5f007400/mask=xff80fc00 @@ -35173,9 +19248,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2023=0x1 & Imm_uimm4 & b_1115=0xe & b_1 :sqshl Rd_FPR8, Rn_FPR8, Imm_shr_imm8 is b_2331=0b010111110 & b_1922=0b0001 & b_1015=0b011101 & Rd_FPR8 & Rn_FPR8 & Imm_shr_imm8 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR8 = NEON_sqshl(Rn_FPR8, Imm_shr_imm8:1); -@endif } # C7.2.302 SQSHL (immediate) page C7-2068 line 115975 MATCH x5f007400/mask=xff80fc00 @@ -35187,9 +19260,7 @@ is b_2331=0b010111110 & b_1922=0b0001 & b_1015=0b011101 & Rd_FPR8 & Rn_FPR8 & Im :sqshl Rd_FPR16, Rn_FPR16, Imm_shr_imm16 is b_2331=0b010111110 & b_2022=0b001 & b_1015=0b011101 & Rd_FPR16 & Rn_FPR16 & Imm_shr_imm16 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR16 = NEON_sqshl(Rn_FPR16, Imm_shr_imm16:1); -@endif } # C7.2.302 SQSHL (immediate) page C7-2068 line 115975 MATCH x5f007400/mask=xff80fc00 @@ -35201,9 +19272,7 @@ is b_2331=0b010111110 & b_2022=0b001 & b_1015=0b011101 & Rd_FPR16 & Rn_FPR16 & I :sqshl Rd_FPR32, Rn_FPR32, Imm_shr_imm32 is b_2331=0b010111110 & b_2122=0b01 & b_1015=0b011101 & Rd_FPR32 & Rn_FPR32 & Imm_shr_imm32 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR32 = NEON_sqshl(Rn_FPR32, Imm_shr_imm32:1); -@endif } # C7.2.302 SQSHL (immediate) page C7-2068 line 115975 MATCH x5f007400/mask=xff80fc00 @@ -35215,9 +19284,7 @@ is b_2331=0b010111110 & b_2122=0b01 & b_1015=0b011101 & Rd_FPR32 & Rn_FPR32 & Im :sqshl Rd_FPR64, Rn_FPR64, Imm_shr_imm64 is b_2331=0b010111110 & b_22=1 & b_1015=0b011101 & Rd_FPR64 & Rn_FPR64 & Imm_shr_imm64 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR64 = NEON_sqshl(Rn_FPR64, Imm_shr_imm64:1); -@endif } # C7.2.303 SQSHL (register) page C7-2071 line 116140 MATCH x5e204c00/mask=xff20fc00 @@ -35228,9 +19295,7 @@ is b_2331=0b010111110 & b_22=1 & b_1015=0b011101 & Rd_FPR64 & Rn_FPR64 & Imm_shr :sqshl Rd_FPR8, Rn_FPR8, Rm_FPR8 is b_3031=1 & u=0 & b_2428=0x1e & advSIMD3.size=0 & b_2121=1 & Rm_FPR8 & b_1115=0x9 & b_1010=1 & Rn_FPR8 & Rd_FPR8 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR8 = NEON_sqshl(Rn_FPR8, Rm_FPR8); -@endif } # C7.2.303 SQSHL (register) page C7-2071 line 116140 MATCH x5e204c00/mask=xff20fc00 @@ -35241,9 +19306,7 @@ is b_3031=1 & u=0 & b_2428=0x1e & advSIMD3.size=0 & b_2121=1 & Rm_FPR8 & b_1115= :sqshl Rd_FPR64, Rn_FPR64, Rm_FPR64 is b_3031=1 & u=0 & b_2428=0x1e & advSIMD3.size=3 & b_2121=1 & Rm_FPR64 & b_1115=0x9 & b_1010=1 & Rn_FPR64 & Rd_FPR64 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR64 = NEON_sqshl(Rn_FPR64, Rm_FPR64); -@endif } # C7.2.303 SQSHL (register) page C7-2071 line 116140 MATCH x5e204c00/mask=xff20fc00 @@ -35254,9 +19317,7 @@ is b_3031=1 & u=0 & b_2428=0x1e & advSIMD3.size=3 & b_2121=1 & Rm_FPR64 & b_1115 :sqshl Rd_FPR16, Rn_FPR16, Rm_FPR16 is b_3031=1 & u=0 & b_2428=0x1e & advSIMD3.size=1 & b_2121=1 & Rm_FPR16 & b_1115=0x9 & b_1010=1 & Rn_FPR16 & Rd_FPR16 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR16 = NEON_sqshl(Rn_FPR16, Rm_FPR16); -@endif } # C7.2.303 SQSHL (register) page C7-2071 line 116140 MATCH x5e204c00/mask=xff20fc00 @@ -35267,9 +19328,7 @@ is b_3031=1 & u=0 & b_2428=0x1e & advSIMD3.size=1 & b_2121=1 & Rm_FPR16 & b_1115 :sqshl Rd_FPR32, Rn_FPR32, Rm_FPR32 is b_3031=1 & u=0 & b_2428=0x1e & advSIMD3.size=2 & b_2121=1 & Rm_FPR32 & b_1115=0x9 & b_1010=1 & Rn_FPR32 & Rd_FPR32 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR32 = NEON_sqshl(Rn_FPR32, Rm_FPR32); -@endif } # C7.2.303 SQSHL (register) page C7-2071 line 116140 MATCH x0e204c00/mask=xbf20fc00 @@ -35280,9 +19339,7 @@ is b_3031=1 & u=0 & b_2428=0x1e & advSIMD3.size=2 & b_2121=1 & Rm_FPR32 & b_1115 :sqshl Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0x9 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.16B = NEON_sqshl(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); -@endif } # C7.2.303 SQSHL (register) page C7-2071 line 116140 MATCH x0e204c00/mask=xbf20fc00 @@ -35293,9 +19350,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16 :sqshl Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=3 & b_2121=1 & Rm_VPR128.2D & b_1115=0x9 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.2D = NEON_sqshl(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); -@endif } # C7.2.303 SQSHL (register) page C7-2071 line 116140 MATCH x0e204c00/mask=xbf20fc00 @@ -35306,9 +19361,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=3 & b_2121=1 & Rm_VPR128.2D :sqshl Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0x9 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.2S = NEON_sqshl(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); -@endif } # C7.2.303 SQSHL (register) page C7-2071 line 116140 MATCH x0e204c00/mask=xbf20fc00 @@ -35319,9 +19372,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S :sqshl Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0x9 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.4H = NEON_sqshl(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); -@endif } # C7.2.303 SQSHL (register) page C7-2071 line 116140 MATCH x0e204c00/mask=xbf20fc00 @@ -35332,9 +19383,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H :sqshl Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0x9 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_sqshl(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); -@endif } # C7.2.303 SQSHL (register) page C7-2071 line 116140 MATCH x0e204c00/mask=xbf20fc00 @@ -35345,9 +19394,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S :sqshl Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0x9 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.8B = NEON_sqshl(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); -@endif } # C7.2.303 SQSHL (register) page C7-2071 line 116140 MATCH x0e204c00/mask=xbf20fc00 @@ -35358,9 +19405,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B :sqshl Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0x9 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.8H = NEON_sqshl(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); -@endif } # C7.2.304 SQSHLU page C7-2073 line 116278 MATCH x2f006400/mask=xbf80fc00 @@ -35371,9 +19416,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H :sqshlu Rd_VPR128.16B, Rn_VPR128.16B, Imm_uimm3 is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_uimm3 & b_1115=0xc & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.16B = NEON_sqshlu(Rn_VPR128.16B, Imm_uimm3:1, 1:1); -@endif } # C7.2.304 SQSHLU page C7-2073 line 116278 MATCH x2f006400/mask=xbf80fc00 @@ -35384,9 +19427,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_uimm3 & b_1115=0xc & b_1 :sqshlu Rd_VPR128.2D, Rn_VPR128.2D, Imm_imm0_63 is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2223=0b01 & Imm_imm0_63 & b_1115=0xc & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.2D = NEON_sqshlu(Rn_VPR128.2D, Imm_imm0_63:1, 8:1); -@endif } # C7.2.304 SQSHLU page C7-2073 line 116278 MATCH x2f006400/mask=xbf80fc00 @@ -35397,9 +19438,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2223=0b01 & Imm_imm0_63 & b_1115=0xc & :sqshlu Rd_VPR64.2S, Rn_VPR64.2S, Imm_uimm5 is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2123=1 & Imm_uimm5 & b_1115=0xc & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.2S = NEON_sqshlu(Rn_VPR64.2S, Imm_uimm5:1, 4:1); -@endif } # C7.2.304 SQSHLU page C7-2073 line 116278 MATCH x2f006400/mask=xbf80fc00 @@ -35410,9 +19449,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2123=1 & Imm_uimm5 & b_1115=0xc & b_101 :sqshlu Rd_VPR64.4H, Rn_VPR64.4H, Imm_uimm4 is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_uimm4 & b_1115=0xc & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.4H = NEON_sqshlu(Rn_VPR64.4H, Imm_uimm4:1, 2:1); -@endif } # C7.2.304 SQSHLU page C7-2073 line 116278 MATCH x2f006400/mask=xbf80fc00 @@ -35423,9 +19460,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_uimm4 & b_1115=0xc & b_1 :sqshlu Rd_VPR128.4S, Rn_VPR128.4S, Imm_uimm5 is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2123=1 & Imm_uimm5 & b_1115=0xc & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_sqshlu(Rn_VPR128.4S, Imm_uimm5:1, 4:1); -@endif } # C7.2.304 SQSHLU page C7-2073 line 116278 MATCH x2f006400/mask=xbf80fc00 @@ -35436,9 +19471,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2123=1 & Imm_uimm5 & b_1115=0xc & b_101 :sqshlu Rd_VPR64.8B, Rn_VPR64.8B, Imm_uimm3 is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_uimm3 & b_1115=0xc & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.8B = NEON_sqshlu(Rn_VPR64.8B, Imm_uimm3:1, 1:1); -@endif } # C7.2.304 SQSHLU page C7-2073 line 116278 MATCH x2f006400/mask=xbf80fc00 @@ -35449,9 +19482,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_uimm3 & b_1115=0xc & b_1 :sqshlu Rd_VPR128.8H, Rn_VPR128.8H, Imm_uimm4 is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_uimm4 & b_1115=0xc & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.8H = NEON_sqshlu(Rn_VPR128.8H, Imm_uimm4:1, 2:1); -@endif } # C7.2.304 SQSHLU page C7-2073 line 116278 MATCH x7f006400/mask=xff80fc00 @@ -35463,9 +19494,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_uimm4 & b_1115=0xc & b_1 :sqshlu Rd_FPR8, Rn_FPR8, Imm_shr_imm8 is b_2331=0b011111110 & b_1922=0b0001 & b_1015=0b011001 & Rd_FPR8 & Rn_FPR8 & Imm_shr_imm8 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR8 = NEON_sqshlu(Rn_FPR8, Imm_shr_imm8:1); -@endif } # C7.2.304 SQSHLU page C7-2073 line 116278 MATCH x7f006400/mask=xff80fc00 @@ -35477,9 +19506,7 @@ is b_2331=0b011111110 & b_1922=0b0001 & b_1015=0b011001 & Rd_FPR8 & Rn_FPR8 & Im :sqshlu Rd_FPR16, Rn_FPR16, Imm_shr_imm16 is b_2331=0b011111110 & b_2022=0b001 & b_1015=0b011001 & Rd_FPR16 & Rn_FPR16 & Imm_shr_imm16 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR16 = NEON_sqshlu(Rn_FPR16, Imm_shr_imm16:1); -@endif } # C7.2.304 SQSHLU page C7-2073 line 116278 MATCH x7f006400/mask=xff80fc00 @@ -35491,9 +19518,7 @@ is b_2331=0b011111110 & b_2022=0b001 & b_1015=0b011001 & Rd_FPR16 & Rn_FPR16 & I :sqshlu Rd_FPR32, Rn_FPR32, Imm_shr_imm32 is b_2331=0b011111110 & b_2122=0b01 & b_1015=0b011001 & Rd_FPR32 & Rn_FPR32 & Imm_shr_imm32 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR32 = NEON_sqshlu(Rn_FPR32, Imm_shr_imm32:1); -@endif } # C7.2.304 SQSHLU page C7-2073 line 116278 MATCH x7f006400/mask=xff80fc00 @@ -35505,9 +19530,7 @@ is b_2331=0b011111110 & b_2122=0b01 & b_1015=0b011001 & Rd_FPR32 & Rn_FPR32 & Im :sqshlu Rd_FPR64, Rn_FPR64, Imm_shr_imm64 is b_2331=0b011111110 & b_22=1 & b_1015=0b011001 & Rd_FPR64 & Rn_FPR64 & Imm_shr_imm64 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR64 = NEON_sqshlu(Rn_FPR64, Imm_shr_imm64:1); -@endif } # C7.2.305 SQSHRN, SQSHRN2 page C7-2076 line 116443 MATCH x0f009400/mask=xbf80fc00 @@ -35518,9 +19541,7 @@ is b_2331=0b011111110 & b_22=1 & b_1015=0b011001 & Rd_FPR64 & Rn_FPR64 & Imm_shr :sqshrn2 Rd_VPR128.16B, Rn_VPR128.8H, Imm_shr_imm8 is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x12 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.16B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.16B = NEON_sqshrn2(Rd_VPR128.16B, Rn_VPR128.8H, Imm_shr_imm8:1, 2:1); -@endif } # C7.2.305 SQSHRN, SQSHRN2 page C7-2076 line 116443 MATCH x0f009400/mask=xbf80fc00 @@ -35531,9 +19552,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x12 & :sqshrn Rd_VPR64.2S, Rn_VPR128.2D, Imm_shr_imm32 is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x12 & b_1010=1 & Rn_VPR128.2D & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.2S = NEON_sqshrn(Rd_VPR64.2S, Rn_VPR128.2D, Imm_shr_imm32:1, 8:1); -@endif } # C7.2.305 SQSHRN, SQSHRN2 page C7-2076 line 116443 MATCH x0f009400/mask=xbf80fc00 @@ -35544,9 +19563,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x12 & :sqshrn Rd_VPR64.4H, Rn_VPR128.4S, Imm_shr_imm16 is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x12 & b_1010=1 & Rn_VPR128.4S & Rd_VPR64.4H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.4H = NEON_sqshrn(Rd_VPR64.4H, Rn_VPR128.4S, Imm_shr_imm16:1, 4:1); -@endif } # C7.2.305 SQSHRN, SQSHRN2 page C7-2076 line 116443 MATCH x0f009400/mask=xbf80fc00 @@ -35557,9 +19574,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x12 :sqshrn2 Rd_VPR128.4S, Rn_VPR128.2D, Imm_shr_imm32 is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x12 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_sqshrn2(Rd_VPR128.4S, Rn_VPR128.2D, Imm_shr_imm32:1, 8:1); -@endif } # C7.2.305 SQSHRN, SQSHRN2 page C7-2076 line 116443 MATCH x0f009400/mask=xbf80fc00 @@ -35570,9 +19585,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x12 & :sqshrn Rd_VPR64.8B, Rn_VPR128.8H, Imm_shr_imm8 is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x12 & b_1010=1 & Rn_VPR128.8H & Rd_VPR64.8B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.8B = NEON_sqshrn(Rd_VPR64.8B, Rn_VPR128.8H, Imm_shr_imm8:1, 2:1); -@endif } # C7.2.305 SQSHRN, SQSHRN2 page C7-2076 line 116443 MATCH x0f009400/mask=xbf80fc00 @@ -35583,9 +19596,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x12 & :sqshrn2 Rd_VPR128.8H, Rn_VPR128.4S, Imm_shr_imm16 is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x12 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.8H = NEON_sqshrn2(Rd_VPR128.8H, Rn_VPR128.4S, Imm_shr_imm16:1, 4:1); -@endif } # C7.2.305 SQSHRN, SQSHRN2 page C7-2076 line 116443 MATCH x5f009400/mask=xff80fc00 @@ -35597,9 +19608,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x12 :sqshrn Rd_FPR8, Rd_FPR16, Imm_shr_imm8 is b_2331=0b010111110 & b_1922=0b0001 & b_1015=0b100101 & Rd_FPR8 & Rd_FPR16 & Imm_shr_imm8 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR8 = NEON_sqshrn(Rd_FPR8, Rd_FPR16, Imm_shr_imm8:1); -@endif } # C7.2.305 SQSHRN, SQSHRN2 page C7-2076 line 116443 MATCH x5f009400/mask=xff80fc00 @@ -35611,9 +19620,7 @@ is b_2331=0b010111110 & b_1922=0b0001 & b_1015=0b100101 & Rd_FPR8 & Rd_FPR16 & I :sqshrn Rd_FPR16, Rd_FPR32, Imm_shr_imm16 is b_2331=0b010111110 & b_2022=0b001 & b_1015=0b100101 & Rd_FPR16 & Rd_FPR32 & Imm_shr_imm16 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR16 = NEON_sqshrn(Rd_FPR16, Rd_FPR32, Imm_shr_imm16:1); -@endif } # C7.2.305 SQSHRN, SQSHRN2 page C7-2076 line 116443 MATCH x5f009400/mask=xff80fc00 @@ -35625,9 +19632,7 @@ is b_2331=0b010111110 & b_2022=0b001 & b_1015=0b100101 & Rd_FPR16 & Rd_FPR32 & I :sqshrn Rd_FPR32, Rd_FPR64, Imm_shr_imm32 is b_2331=0b010111110 & b_2122=0b01 & b_1015=0b100101 & Rd_FPR32 & Rd_FPR64 & Imm_shr_imm32 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR32 = NEON_sqshrn(Rd_FPR32, Rd_FPR64, Imm_shr_imm32:1); -@endif } # C7.2.306 SQSHRUN, SQSHRUN2 page C7-2079 line 116627 MATCH x2f008400/mask=xbf80fc00 @@ -35638,9 +19643,7 @@ is b_2331=0b010111110 & b_2122=0b01 & b_1015=0b100101 & Rd_FPR32 & Rd_FPR64 & Im :sqshrun2 Rd_VPR128.16B, Rn_VPR128.8H, Imm_shr_imm8 is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x10 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.16B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.16B = NEON_sqshrun2(Rd_VPR128.16B, Rn_VPR128.8H, Imm_shr_imm8:1, 2:1); -@endif } # C7.2.306 SQSHRUN, SQSHRUN2 page C7-2079 line 116627 MATCH x2f008400/mask=xbf80fc00 @@ -35651,9 +19654,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x10 & :sqshrun Rd_VPR64.2S, Rn_VPR128.2D, Imm_shr_imm32 is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x10 & b_1010=1 & Rn_VPR128.2D & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.2S = NEON_sqshrun(Rd_VPR64.2S, Rn_VPR128.2D, Imm_shr_imm32:1, 8:1); -@endif } # C7.2.306 SQSHRUN, SQSHRUN2 page C7-2079 line 116627 MATCH x2f008400/mask=xbf80fc00 @@ -35664,9 +19665,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x10 & :sqshrun Rd_VPR64.4H, Rn_VPR128.4S, Imm_shr_imm16 is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x10 & b_1010=1 & Rn_VPR128.4S & Rd_VPR64.4H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.4H = NEON_sqshrun(Rd_VPR64.4H, Rn_VPR128.4S, Imm_shr_imm16:1, 4:1); -@endif } # C7.2.306 SQSHRUN, SQSHRUN2 page C7-2079 line 116627 MATCH x2f008400/mask=xbf80fc00 @@ -35677,9 +19676,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x10 :sqshrun2 Rd_VPR128.4S, Rn_VPR128.2D, Imm_shr_imm32 is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x10 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_sqshrun2(Rd_VPR128.4S, Rn_VPR128.2D, Imm_shr_imm32:1, 8:1); -@endif } # C7.2.306 SQSHRUN, SQSHRUN2 page C7-2079 line 116627 MATCH x2f008400/mask=xbf80fc00 @@ -35690,9 +19687,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x10 & :sqshrun Rd_VPR64.8B, Rn_VPR128.8H, Imm_shr_imm8 is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x10 & b_1010=1 & Rn_VPR128.8H & Rd_VPR64.8B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.8B = NEON_sqshrun(Rd_VPR64.8B, Rn_VPR128.8H, Imm_shr_imm8:1, 2:1); -@endif } # C7.2.306 SQSHRUN, SQSHRUN2 page C7-2079 line 116627 MATCH x2f008400/mask=xbf80fc00 @@ -35703,9 +19698,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x10 & :sqshrun2 Rd_VPR128.8H, Rn_VPR128.4S, Imm_shr_imm16 is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x10 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.8H = NEON_sqshrun2(Rd_VPR128.8H, Rn_VPR128.4S, Imm_shr_imm16:1, 4:1); -@endif } # C7.2.306 SQSHRUN, SQSHRUN2 page C7-2079 line 116627 MATCH x7f008400/mask=xff80fc00 @@ -35717,9 +19710,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x10 :sqshrun Rd_FPR8, Rd_FPR16, Imm_shr_imm8 is b_2331=0b011111110 & b_1922=0b0001 & b_1015=0b100001 & Rd_FPR8 & Rd_FPR16 & Imm_shr_imm8 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR8 = NEON_sqshrun(Rd_FPR8, Rd_FPR16, Imm_shr_imm8:1); -@endif } # C7.2.306 SQSHRUN, SQSHRUN2 page C7-2079 line 116627 MATCH x7f008400/mask=xff80fc00 @@ -35731,9 +19722,7 @@ is b_2331=0b011111110 & b_1922=0b0001 & b_1015=0b100001 & Rd_FPR8 & Rd_FPR16 & I :sqshrun Rd_FPR16, Rd_FPR32, Imm_shr_imm16 is b_2331=0b011111110 & b_2022=0b001 & b_1015=0b100001 & Rd_FPR16 & Rd_FPR32 & Imm_shr_imm16 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR16 = NEON_sqshrun(Rd_FPR16, Rd_FPR32, Imm_shr_imm16:1); -@endif } # C7.2.306 SQSHRUN, SQSHRUN2 page C7-2079 line 116627 MATCH x7f008400/mask=xff80fc00 @@ -35745,9 +19734,7 @@ is b_2331=0b011111110 & b_2022=0b001 & b_1015=0b100001 & Rd_FPR16 & Rd_FPR32 & I :sqshrun Rd_FPR32, Rd_FPR64, Imm_shr_imm32 is b_2331=0b011111110 & b_2122=0b01 & b_1015=0b100001 & Rd_FPR32 & Rd_FPR64 & Imm_shr_imm32 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR32 = NEON_sqshrun(Rd_FPR32, Rd_FPR64, Imm_shr_imm32:1); -@endif } # C7.2.307 SQSUB page C7-2082 line 116807 MATCH x5e202c00/mask=xff20fc00 @@ -35758,9 +19745,7 @@ is b_2331=0b011111110 & b_2122=0b01 & b_1015=0b100001 & Rd_FPR32 & Rd_FPR64 & Im :sqsub Rd_FPR8, Rn_FPR8, Rm_FPR8 is b_3031=1 & u=0 & b_2428=0x1e & advSIMD3.size=0 & b_2121=1 & Rm_FPR8 & b_1115=0x5 & b_1010=1 & Rn_FPR8 & Rd_FPR8 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR8 = NEON_sqsub(Rn_FPR8, Rm_FPR8); -@endif } # C7.2.307 SQSUB page C7-2082 line 116807 MATCH x5e202c00/mask=xff20fc00 @@ -35771,9 +19756,7 @@ is b_3031=1 & u=0 & b_2428=0x1e & advSIMD3.size=0 & b_2121=1 & Rm_FPR8 & b_1115= :sqsub Rd_FPR64, Rn_FPR64, Rm_FPR64 is b_3031=1 & u=0 & b_2428=0x1e & advSIMD3.size=3 & b_2121=1 & Rm_FPR64 & b_1115=0x5 & b_1010=1 & Rn_FPR64 & Rd_FPR64 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR64 = NEON_sqsub(Rn_FPR64, Rm_FPR64); -@endif } # C7.2.307 SQSUB page C7-2082 line 116807 MATCH x5e202c00/mask=xff20fc00 @@ -35784,9 +19767,7 @@ is b_3031=1 & u=0 & b_2428=0x1e & advSIMD3.size=3 & b_2121=1 & Rm_FPR64 & b_1115 :sqsub Rd_FPR16, Rn_FPR16, Rm_FPR16 is b_3031=1 & u=0 & b_2428=0x1e & advSIMD3.size=1 & b_2121=1 & Rm_FPR16 & b_1115=0x5 & b_1010=1 & Rn_FPR16 & Rd_FPR16 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR16 = NEON_sqsub(Rn_FPR16, Rm_FPR16); -@endif } # C7.2.307 SQSUB page C7-2082 line 116807 MATCH x5e202c00/mask=xff20fc00 @@ -35797,9 +19778,7 @@ is b_3031=1 & u=0 & b_2428=0x1e & advSIMD3.size=1 & b_2121=1 & Rm_FPR16 & b_1115 :sqsub Rd_FPR32, Rn_FPR32, Rm_FPR32 is b_3031=1 & u=0 & b_2428=0x1e & advSIMD3.size=2 & b_2121=1 & Rm_FPR32 & b_1115=0x5 & b_1010=1 & Rn_FPR32 & Rd_FPR32 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR32 = NEON_sqsub(Rn_FPR32, Rm_FPR32); -@endif } # C7.2.307 SQSUB page C7-2082 line 116807 MATCH x0e202c00/mask=xbf20fc00 @@ -35810,9 +19789,7 @@ is b_3031=1 & u=0 & b_2428=0x1e & advSIMD3.size=2 & b_2121=1 & Rm_FPR32 & b_1115 :sqsub Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0x5 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.16B = NEON_sqsub(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); -@endif } # C7.2.307 SQSUB page C7-2082 line 116807 MATCH x0e202c00/mask=xbf20fc00 @@ -35823,9 +19800,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16 :sqsub Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=3 & b_2121=1 & Rm_VPR128.2D & b_1115=0x5 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.2D = NEON_sqsub(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); -@endif } # C7.2.307 SQSUB page C7-2082 line 116807 MATCH x0e202c00/mask=xbf20fc00 @@ -35836,9 +19811,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=3 & b_2121=1 & Rm_VPR128.2D :sqsub Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0x5 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.2S = NEON_sqsub(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); -@endif } # C7.2.307 SQSUB page C7-2082 line 116807 MATCH x0e202c00/mask=xbf20fc00 @@ -35849,9 +19822,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S :sqsub Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0x5 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.4H = NEON_sqsub(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); -@endif } # C7.2.307 SQSUB page C7-2082 line 116807 MATCH x0e202c00/mask=xbf20fc00 @@ -35862,9 +19833,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H :sqsub Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0x5 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_sqsub(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); -@endif } # C7.2.307 SQSUB page C7-2082 line 116807 MATCH x0e202c00/mask=xbf20fc00 @@ -35875,9 +19844,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S :sqsub Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0x5 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.8B = NEON_sqsub(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); -@endif } # C7.2.307 SQSUB page C7-2082 line 116807 MATCH x0e202c00/mask=xbf20fc00 @@ -35888,9 +19855,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B :sqsub Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0x5 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.8H = NEON_sqsub(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); -@endif } # C7.2.308 SQXTN, SQXTN2 page C7-2084 line 116932 MATCH x5e214800/mask=xff3ffc00 @@ -35901,9 +19866,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H :sqxtn Rd_FPR8, Rn_FPR16 is b_3031=1 & u=0 & b_2428=0x1e & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x14 & b_1011=2 & Rn_FPR16 & Rd_FPR8 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR8 = NEON_sqxtn(Rd_FPR8, Rn_FPR16); -@endif } # C7.2.308 SQXTN, SQXTN2 page C7-2084 line 116932 MATCH x5e214800/mask=xff3ffc00 @@ -35914,9 +19877,7 @@ is b_3031=1 & u=0 & b_2428=0x1e & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x14 & :sqxtn Rd_FPR16, Rn_FPR32 is b_3031=1 & u=0 & b_2428=0x1e & advSIMD3.size=1 & b_1721=0x10 & b_1216=0x14 & b_1011=2 & Rn_FPR32 & Rd_FPR16 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR16 = NEON_sqxtn(Rd_FPR16, Rn_FPR32); -@endif } # C7.2.308 SQXTN, SQXTN2 page C7-2084 line 116932 MATCH x5e214800/mask=xff3ffc00 @@ -35927,9 +19888,7 @@ is b_3031=1 & u=0 & b_2428=0x1e & advSIMD3.size=1 & b_1721=0x10 & b_1216=0x14 & :sqxtn Rd_FPR32, Rn_FPR64 is b_3031=1 & u=0 & b_2428=0x1e & advSIMD3.size=2 & b_1721=0x10 & b_1216=0x14 & b_1011=2 & Rn_FPR64 & Rd_FPR32 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR32 = NEON_sqxtn(Rd_FPR32, Rn_FPR64); -@endif } # C7.2.308 SQXTN, SQXTN2 page C7-2084 line 116932 MATCH x0e214800/mask=xbf3ffc00 @@ -35940,9 +19899,7 @@ is b_3031=1 & u=0 & b_2428=0x1e & advSIMD3.size=2 & b_1721=0x10 & b_1216=0x14 & :sqxtn2 Rd_VPR128.16B, Rn_VPR128.8H is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x14 & b_1011=2 & Rn_VPR128.8H & Rd_VPR128.16B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.16B = NEON_sqxtn2(Rd_VPR128.16B, Rn_VPR128.8H, 2:1); -@endif } # C7.2.308 SQXTN, SQXTN2 page C7-2084 line 116932 MATCH x0e214800/mask=xbf3ffc00 @@ -35953,9 +19910,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x :sqxtn2 Rd_VPR128.8H, Rn_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=0x14 & b_1011=2 & Rn_VPR128.4S & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.8H = NEON_sqxtn2(Rd_VPR128.8H, Rn_VPR128.4S, 4:1); -@endif } # C7.2.308 SQXTN, SQXTN2 page C7-2084 line 116932 MATCH x0e214800/mask=xbf3ffc00 @@ -35966,9 +19921,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=0x :sqxtn2 Rd_VPR128.4S, Rn_VPR128.2D is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x10 & b_1216=0x14 & b_1011=2 & Rn_VPR128.2D & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_sqxtn2(Rd_VPR128.4S, Rn_VPR128.2D, 8:1); -@endif } # C7.2.308 SQXTN, SQXTN2 page C7-2084 line 116932 MATCH x0e214800/mask=xbf3ffc00 @@ -35979,9 +19932,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x10 & b_1216=0x :sqxtn Rd_VPR64.2S, Rn_VPR128.2D is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x10 & b_1216=0x14 & b_1011=2 & Rn_VPR128.2D & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.2S = NEON_sqxtn(Rd_VPR64.2S, Rn_VPR128.2D, 8:1); -@endif } # C7.2.308 SQXTN, SQXTN2 page C7-2084 line 116932 MATCH x0e214800/mask=xbf3ffc00 @@ -35992,9 +19943,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x10 & b_1216=0x :sqxtn Rd_VPR64.4H, Rn_VPR128.4S is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=0x14 & b_1011=2 & Rn_VPR128.4S & Rd_VPR64.4H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.4H = NEON_sqxtn(Rd_VPR64.4H, Rn_VPR128.4S, 4:1); -@endif } # C7.2.308 SQXTN, SQXTN2 page C7-2084 line 116932 MATCH x0e214800/mask=xbf3ffc00 @@ -36005,9 +19954,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=0x :sqxtn Rd_VPR64.8B, Rn_VPR128.8H is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x14 & b_1011=2 & Rn_VPR128.8H & Rd_VPR64.8B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.8B = NEON_sqxtn(Rd_VPR64.8B, Rn_VPR128.8H, 2:1); -@endif } # C7.2.309 SQXTUN, SQXTUN2 page C7-2087 line 117086 MATCH x7e212800/mask=xff3ffc00 @@ -36019,9 +19966,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x :sqxtun Rd_FPR8, Rn_FPR16 is b_31=0 & b_30=1 & b_2429=0b111110 & b_2223=0b00 & b_1021=0b100001001010 & Rd_FPR8 & Rn_FPR16 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR8 = NEON_sqxtun(Rd_FPR8, Rn_FPR16); -@endif } # C7.2.309 SQXTUN, SQXTUN2 page C7-2087 line 117086 MATCH x7e212800/mask=xff3ffc00 @@ -36033,9 +19978,7 @@ is b_31=0 & b_30=1 & b_2429=0b111110 & b_2223=0b00 & b_1021=0b100001001010 & Rd_ :sqxtun Rd_FPR16, Rn_FPR32 is b_31=0 & b_30=1 & b_2429=0b111110 & b_2223=0b01 & b_1021=0b100001001010 & Rd_FPR16 & Rn_FPR32 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR16 = NEON_sqxtun(Rd_FPR16, Rn_FPR32); -@endif } # C7.2.309 SQXTUN, SQXTUN2 page C7-2087 line 117086 MATCH x7e212800/mask=xff3ffc00 @@ -36047,9 +19990,7 @@ is b_31=0 & b_30=1 & b_2429=0b111110 & b_2223=0b01 & b_1021=0b100001001010 & Rd_ :sqxtun Rd_FPR32, Rn_FPR64 is b_31=0 & b_30=1 & b_2429=0b111110 & b_2223=0b10 & b_1021=0b100001001010 & Rd_FPR32 & Rn_FPR64 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR32 = NEON_sqxtun(Rd_FPR32, Rn_FPR64); -@endif } # C7.2.309 SQXTUN, SQXTUN2 page C7-2087 line 117086 MATCH x2e212800/mask=xbf3ffc00 @@ -36061,9 +20002,7 @@ is b_31=0 & b_30=1 & b_2429=0b111110 & b_2223=0b10 & b_1021=0b100001001010 & Rd_ :sqxtun Rd_VPR64.8B, Rn_VPR128.8H is b_31=0 & b_30=0 & b_2429=0b101110 & b_2223=0b00 & b_1021=0b100001001010 & Rd_VPR64.8B & Rn_VPR128.8H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.8B = NEON_sqxtun(Rd_VPR64.8B, Rn_VPR128.8H, 2:1); -@endif } # C7.2.309 SQXTUN, SQXTUN2 page C7-2087 line 117086 MATCH x2e212800/mask=xbf3ffc00 @@ -36075,9 +20014,7 @@ is b_31=0 & b_30=0 & b_2429=0b101110 & b_2223=0b00 & b_1021=0b100001001010 & Rd_ :sqxtun2 Rd_VPR128.16B, Rn_VPR128.8H is b_31=0 & b_30=1 & b_2429=0b101110 & b_2223=0b00 & b_1021=0b100001001010 & Rd_VPR128.16B & Rn_VPR128.8H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.16B = NEON_sqxtun2(Rd_VPR128.16B, Rn_VPR128.8H, 2:1); -@endif } # C7.2.309 SQXTUN, SQXTUN2 page C7-2087 line 117086 MATCH x2e212800/mask=xbf3ffc00 @@ -36089,9 +20026,7 @@ is b_31=0 & b_30=1 & b_2429=0b101110 & b_2223=0b00 & b_1021=0b100001001010 & Rd_ :sqxtun Rd_VPR64.4H, Rn_VPR128.4S is b_31=0 & b_30=0 & b_2429=0b101110 & b_2223=0b01 & b_1021=0b100001001010 & Rd_VPR64.4H & Rn_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.4H = NEON_sqxtun(Rd_VPR64.4H, Rn_VPR128.4S, 4:1); -@endif } # C7.2.309 SQXTUN, SQXTUN2 page C7-2087 line 117086 MATCH x2e212800/mask=xbf3ffc00 @@ -36103,9 +20038,7 @@ is b_31=0 & b_30=0 & b_2429=0b101110 & b_2223=0b01 & b_1021=0b100001001010 & Rd_ :sqxtun2 Rd_VPR128.8H, Rn_VPR128.4S is b_31=0 & b_30=1 & b_2429=0b101110 & b_2223=0b01 & b_1021=0b100001001010 & Rd_VPR128.8H & Rn_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.8H = NEON_sqxtun2(Rd_VPR128.8H, Rn_VPR128.4S, 4:1); -@endif } # C7.2.309 SQXTUN, SQXTUN2 page C7-2087 line 117086 MATCH x2e212800/mask=xbf3ffc00 @@ -36117,9 +20050,7 @@ is b_31=0 & b_30=1 & b_2429=0b101110 & b_2223=0b01 & b_1021=0b100001001010 & Rd_ :sqxtun Rd_VPR64.2S, Rn_VPR128.2D is b_31=0 & b_30=0 & b_2429=0b101110 & b_2223=0b10 & b_1021=0b100001001010 & Rd_VPR64.2S & Rn_VPR128.2D & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.2S = NEON_sqxtun(Rd_VPR64.2S, Rn_VPR128.2D, 8:1); -@endif } # C7.2.309 SQXTUN, SQXTUN2 page C7-2087 line 117086 MATCH x2e212800/mask=xbf3ffc00 @@ -36131,9 +20062,7 @@ is b_31=0 & b_30=0 & b_2429=0b101110 & b_2223=0b10 & b_1021=0b100001001010 & Rd_ :sqxtun2 Rd_VPR128.4S, Rn_VPR128.2D is b_31=0 & b_30=1 & b_2429=0b101110 & b_2223=0b10 & b_1021=0b100001001010 & Rd_VPR128.4S & Rn_VPR128.2D & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_sqxtun2(Rd_VPR128.4S, Rn_VPR128.2D, 8:1); -@endif } # C7.2.310 SRHADD page C7-2090 line 117237 MATCH x0e201400/mask=xbf20fc00 @@ -36144,9 +20073,7 @@ is b_31=0 & b_30=1 & b_2429=0b101110 & b_2223=0b10 & b_1021=0b100001001010 & Rd_ :srhadd Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0x2 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rn_VPR128.16B = NEON_srhadd(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); -@endif } # C7.2.310 SRHADD page C7-2090 line 117237 MATCH x0e201400/mask=xbf20fc00 @@ -36157,9 +20084,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16 :srhadd Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0x2 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rn_VPR64.2S = NEON_srhadd(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); -@endif } # C7.2.310 SRHADD page C7-2090 line 117237 MATCH x0e201400/mask=xbf20fc00 @@ -36170,9 +20095,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S :srhadd Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0x2 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rn_VPR64.4H = NEON_srhadd(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); -@endif } # C7.2.310 SRHADD page C7-2090 line 117237 MATCH x0e201400/mask=xbf20fc00 @@ -36183,9 +20106,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H :srhadd Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0x2 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rn_VPR128.4S = NEON_srhadd(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); -@endif } # C7.2.310 SRHADD page C7-2090 line 117237 MATCH x0e201400/mask=xbf20fc00 @@ -36196,9 +20117,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S :srhadd Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0x2 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rn_VPR64.8B = NEON_srhadd(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); -@endif } # C7.2.310 SRHADD page C7-2090 line 117237 MATCH x0e201400/mask=xbf20fc00 @@ -36209,9 +20128,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B :srhadd Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0x2 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rn_VPR128.8H = NEON_srhadd(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); -@endif } # C7.2.311 SRI page C7-2092 line 117324 MATCH x2f004400/mask=xbf80fc00 @@ -36222,9 +20139,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H :sri Rd_VPR128.16B, Rn_VPR128.16B, Imm_shr_imm8 is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x8 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.16B = NEON_sri(Rd_VPR128.16B, Rn_VPR128.16B, Imm_shr_imm8:4, 1:1); -@endif } # C7.2.311 SRI page C7-2092 line 117324 MATCH x2f004400/mask=xbf80fc00 @@ -36235,9 +20150,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x8 & :sri Rd_VPR128.2D, Rn_VPR128.2D, Imm_shr_imm64 is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2223=0b01 & Imm_shr_imm64 & b_1115=0x8 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.2D = NEON_sri(Rd_VPR128.2D, Rn_VPR128.2D, Imm_shr_imm64:4, 8:1); -@endif } # C7.2.311 SRI page C7-2092 line 117324 MATCH x2f004400/mask=xbf80fc00 @@ -36248,9 +20161,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2223=0b01 & Imm_shr_imm64 & b_1115=0x8 :sri Rd_VPR64.2S, Rn_VPR64.2S, Imm_shr_imm32 is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x8 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.2S = NEON_sri(Rd_VPR64.2S, Rn_VPR64.2S, Imm_shr_imm32:4, 4:1); -@endif } # C7.2.311 SRI page C7-2092 line 117324 MATCH x2f004400/mask=xbf80fc00 @@ -36261,9 +20172,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x8 & b :sri Rd_VPR64.4H, Rn_VPR64.4H, Imm_shr_imm16 is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x8 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.4H = NEON_sri(Rd_VPR64.4H, Rn_VPR64.4H, Imm_shr_imm16:4, 2:1); -@endif } # C7.2.311 SRI page C7-2092 line 117324 MATCH x2f004400/mask=xbf80fc00 @@ -36274,9 +20183,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x8 & :sri Rd_VPR128.4S, Rn_VPR128.4S, Imm_shr_imm32 is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x8 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_sri(Rd_VPR128.4S, Rn_VPR128.4S, Imm_shr_imm32:4, 4:1); -@endif } # C7.2.311 SRI page C7-2092 line 117324 MATCH x2f004400/mask=xbf80fc00 @@ -36287,9 +20194,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x8 & b :sri Rd_VPR64.8B, Rn_VPR64.8B, Imm_shr_imm8 is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x8 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.8B = NEON_sri(Rd_VPR64.8B, Rn_VPR64.8B, Imm_shr_imm8:4, 1:1); -@endif } # C7.2.311 SRI page C7-2092 line 117324 MATCH x2f004400/mask=xbf80fc00 @@ -36300,9 +20205,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x8 & :sri Rd_VPR128.8H, Rn_VPR128.8H, Imm_shr_imm16 is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x8 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.8H = NEON_sri(Rd_VPR128.8H, Rn_VPR128.8H, Imm_shr_imm16:4, 2:1); -@endif } # C7.2.311 SRI page C7-2092 line 117324 MATCH x7f004400/mask=xff80fc00 @@ -36313,9 +20216,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x8 & :sri Rd_FPR64, Rn_FPR64, Imm_shr_imm64 is b_2331=0b011111110 & b_22=1 & b_1015=0b010001 & Rd_FPR64 & Rn_FPR64 & Imm_shr_imm64 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR64 = NEON_sri(Rd_FPR64, Rn_FPR64, Imm_shr_imm64:1); -@endif } # C7.2.312 SRSHL page C7-2095 line 117488 MATCH x5e205400/mask=xff20fc00 @@ -36326,9 +20227,7 @@ is b_2331=0b011111110 & b_22=1 & b_1015=0b010001 & Rd_FPR64 & Rn_FPR64 & Imm_shr :srshl Rd_FPR64, Rn_FPR64, Rm_FPR64 is b_3031=1 & u=0 & b_2428=0x1e & advSIMD3.size=3 & b_2121=1 & Rm_FPR64 & b_1115=0xa & b_1010=1 & Rn_FPR64 & Rd_FPR64 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR64 = NEON_srshl(Rn_FPR64, Rm_FPR64); -@endif } # C7.2.312 SRSHL page C7-2095 line 117488 MATCH x0e205400/mask=xbf20fc00 @@ -36339,9 +20238,7 @@ is b_3031=1 & u=0 & b_2428=0x1e & advSIMD3.size=3 & b_2121=1 & Rm_FPR64 & b_1115 :srshl Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0xa & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.16B = NEON_srshl(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); -@endif } # C7.2.312 SRSHL page C7-2095 line 117488 MATCH x0e205400/mask=xbf20fc00 @@ -36352,9 +20249,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16 :srshl Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=3 & b_2121=1 & Rm_VPR128.2D & b_1115=0xa & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.2D = NEON_srshl(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); -@endif } # C7.2.312 SRSHL page C7-2095 line 117488 MATCH x0e205400/mask=xbf20fc00 @@ -36365,9 +20260,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=3 & b_2121=1 & Rm_VPR128.2D :srshl Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0xa & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.2S = NEON_srshl(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); -@endif } # C7.2.312 SRSHL page C7-2095 line 117488 MATCH x0e205400/mask=xbf20fc00 @@ -36378,9 +20271,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S :srshl Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0xa & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.4H = NEON_srshl(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); -@endif } # C7.2.312 SRSHL page C7-2095 line 117488 MATCH x0e205400/mask=xbf20fc00 @@ -36391,9 +20282,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H :srshl Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0xa & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_srshl(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); -@endif } # C7.2.312 SRSHL page C7-2095 line 117488 MATCH x0e205400/mask=xbf20fc00 @@ -36404,9 +20293,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S :srshl Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0xa & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.8B = NEON_srshl(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); -@endif } # C7.2.312 SRSHL page C7-2095 line 117488 MATCH x0e205400/mask=xbf20fc00 @@ -36417,9 +20304,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B :srshl Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0xa & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.8H = NEON_srshl(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); -@endif } # C7.2.313 SRSHR page C7-2097 line 117624 MATCH x0f002400/mask=xbf80fc00 @@ -36430,9 +20315,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H :srshr Rd_VPR128.16B, Rn_VPR128.16B, Imm_shr_imm8 is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x4 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.16B = NEON_srshr(Rn_VPR128.16B, Imm_shr_imm8:1, 1:1); -@endif } # C7.2.313 SRSHR page C7-2097 line 117624 MATCH x0f002400/mask=xbf80fc00 @@ -36443,9 +20326,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x4 & :srshr Rd_VPR128.2D, Rn_VPR128.2D, Imm_shr_imm64 is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2223=0b01 & Imm_shr_imm64 & b_1115=0x4 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.2D = NEON_srshr(Rn_VPR128.2D, Imm_shr_imm64:1, 8:1); -@endif } # C7.2.313 SRSHR page C7-2097 line 117624 MATCH x0f002400/mask=xbf80fc00 @@ -36456,9 +20337,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2223=0b01 & Imm_shr_imm64 & b_1115=0x4 :srshr Rd_VPR64.2S, Rn_VPR64.2S, Imm_shr_imm32 is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x4 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.2S = NEON_srshr(Rn_VPR64.2S, Imm_shr_imm32:1, 4:1); -@endif } # C7.2.313 SRSHR page C7-2097 line 117624 MATCH x0f002400/mask=xbf80fc00 @@ -36469,9 +20348,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x4 & b :srshr Rd_VPR64.4H, Rn_VPR64.4H, Imm_shr_imm16 is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x4 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.4H = NEON_srshr(Rn_VPR64.4H, Imm_shr_imm16:1, 2:1); -@endif } # C7.2.313 SRSHR page C7-2097 line 117624 MATCH x0f002400/mask=xbf80fc00 @@ -36482,9 +20359,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x4 & :srshr Rd_VPR128.4S, Rn_VPR128.4S, Imm_shr_imm32 is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x4 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_srshr(Rn_VPR128.4S, Imm_shr_imm32:1, 4:1); -@endif } # C7.2.313 SRSHR page C7-2097 line 117624 MATCH x0f002400/mask=xbf80fc00 @@ -36495,9 +20370,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x4 & b :srshr Rd_VPR64.8B, Rn_VPR64.8B, Imm_shr_imm8 is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x4 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.8B = NEON_srshr(Rn_VPR64.8B, Imm_shr_imm8:1, 1:1); -@endif } # C7.2.313 SRSHR page C7-2097 line 117624 MATCH x0f002400/mask=xbf80fc00 @@ -36508,9 +20381,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x4 & :srshr Rd_VPR128.8H, Rn_VPR128.8H, Imm_shr_imm16 is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x4 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.8H = NEON_srshr(Rn_VPR128.8H, Imm_shr_imm16:1, 2:1); -@endif } # C7.2.313 SRSHR page C7-2097 line 117624 MATCH x5f002400/mask=xff80fc00 @@ -36521,9 +20392,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x4 & :srshr Rd_FPR64, Rn_FPR64, Imm_shr_imm64 is b_2331=0b010111110 & b_22=1 & b_1015=0b001001 & Rd_FPR64 & Rn_FPR64 & Imm_shr_imm64 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR64 = NEON_srshr(Rn_FPR64, Imm_shr_imm64:1); -@endif } # C7.2.314 SRSRA page C7-2099 line 117760 MATCH x0f003400/mask=xbf80fc00 @@ -36535,134 +20404,41 @@ is b_2331=0b010111110 & b_22=1 & b_1015=0b001001 & Rd_FPR64 & Rn_FPR64 & Imm_shr :srsra Rd_VPR128.16B, Rn_VPR128.16B, Imm_shr_imm8 is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x6 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { -@if defined(SEMANTIC_primitive) # simd infix TMPQ1 = Rn_VPR128.16B s>> Imm_shr_imm8:1 on lane size 1 - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR128.16B, 0, 1, 16); - simd_address_at(tmp3, TMPQ1, 0, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp2) s>> Imm_shr_imm8:1; - simd_address_at(tmp2, Rn_VPR128.16B, 1, 1, 16); - simd_address_at(tmp3, TMPQ1, 1, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp2) s>> Imm_shr_imm8:1; - simd_address_at(tmp2, Rn_VPR128.16B, 2, 1, 16); - simd_address_at(tmp3, TMPQ1, 2, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp2) s>> Imm_shr_imm8:1; - simd_address_at(tmp2, Rn_VPR128.16B, 3, 1, 16); - simd_address_at(tmp3, TMPQ1, 3, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp2) s>> Imm_shr_imm8:1; - simd_address_at(tmp2, Rn_VPR128.16B, 4, 1, 16); - simd_address_at(tmp3, TMPQ1, 4, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp2) s>> Imm_shr_imm8:1; - simd_address_at(tmp2, Rn_VPR128.16B, 5, 1, 16); - simd_address_at(tmp3, TMPQ1, 5, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp2) s>> Imm_shr_imm8:1; - simd_address_at(tmp2, Rn_VPR128.16B, 6, 1, 16); - simd_address_at(tmp3, TMPQ1, 6, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp2) s>> Imm_shr_imm8:1; - simd_address_at(tmp2, Rn_VPR128.16B, 7, 1, 16); - simd_address_at(tmp3, TMPQ1, 7, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp2) s>> Imm_shr_imm8:1; - simd_address_at(tmp2, Rn_VPR128.16B, 8, 1, 16); - simd_address_at(tmp3, TMPQ1, 8, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp2) s>> Imm_shr_imm8:1; - simd_address_at(tmp2, Rn_VPR128.16B, 9, 1, 16); - simd_address_at(tmp3, TMPQ1, 9, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp2) s>> Imm_shr_imm8:1; - simd_address_at(tmp2, Rn_VPR128.16B, 10, 1, 16); - simd_address_at(tmp3, TMPQ1, 10, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp2) s>> Imm_shr_imm8:1; - simd_address_at(tmp2, Rn_VPR128.16B, 11, 1, 16); - simd_address_at(tmp3, TMPQ1, 11, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp2) s>> Imm_shr_imm8:1; - simd_address_at(tmp2, Rn_VPR128.16B, 12, 1, 16); - simd_address_at(tmp3, TMPQ1, 12, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp2) s>> Imm_shr_imm8:1; - simd_address_at(tmp2, Rn_VPR128.16B, 13, 1, 16); - simd_address_at(tmp3, TMPQ1, 13, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp2) s>> Imm_shr_imm8:1; - simd_address_at(tmp2, Rn_VPR128.16B, 14, 1, 16); - simd_address_at(tmp3, TMPQ1, 14, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp2) s>> Imm_shr_imm8:1; - simd_address_at(tmp2, Rn_VPR128.16B, 15, 1, 16); - simd_address_at(tmp3, TMPQ1, 15, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp2) s>> Imm_shr_imm8:1; + TMPQ1[0,8] = Rn_VPR128.16B[0,8] s>> Imm_shr_imm8:1; + TMPQ1[8,8] = Rn_VPR128.16B[8,8] s>> Imm_shr_imm8:1; + TMPQ1[16,8] = Rn_VPR128.16B[16,8] s>> Imm_shr_imm8:1; + TMPQ1[24,8] = Rn_VPR128.16B[24,8] s>> Imm_shr_imm8:1; + TMPQ1[32,8] = Rn_VPR128.16B[32,8] s>> Imm_shr_imm8:1; + TMPQ1[40,8] = Rn_VPR128.16B[40,8] s>> Imm_shr_imm8:1; + TMPQ1[48,8] = Rn_VPR128.16B[48,8] s>> Imm_shr_imm8:1; + TMPQ1[56,8] = Rn_VPR128.16B[56,8] s>> Imm_shr_imm8:1; + TMPQ1[64,8] = Rn_VPR128.16B[64,8] s>> Imm_shr_imm8:1; + TMPQ1[72,8] = Rn_VPR128.16B[72,8] s>> Imm_shr_imm8:1; + TMPQ1[80,8] = Rn_VPR128.16B[80,8] s>> Imm_shr_imm8:1; + TMPQ1[88,8] = Rn_VPR128.16B[88,8] s>> Imm_shr_imm8:1; + TMPQ1[96,8] = Rn_VPR128.16B[96,8] s>> Imm_shr_imm8:1; + TMPQ1[104,8] = Rn_VPR128.16B[104,8] s>> Imm_shr_imm8:1; + TMPQ1[112,8] = Rn_VPR128.16B[112,8] s>> Imm_shr_imm8:1; + TMPQ1[120,8] = Rn_VPR128.16B[120,8] s>> Imm_shr_imm8:1; # simd infix Rd_VPR128.16B = Rd_VPR128.16B + TMPQ1 on lane size 1 - local tmp4:4 = 0; - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp4, Rd_VPR128.16B, 0, 1, 16); - simd_address_at(tmp5, TMPQ1, 0, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 0, 1, 16); - * [register]:1 tmp6 = (* [register]:1 tmp4) + (* [register]:1 tmp5); - simd_address_at(tmp4, Rd_VPR128.16B, 1, 1, 16); - simd_address_at(tmp5, TMPQ1, 1, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 1, 1, 16); - * [register]:1 tmp6 = (* [register]:1 tmp4) + (* [register]:1 tmp5); - simd_address_at(tmp4, Rd_VPR128.16B, 2, 1, 16); - simd_address_at(tmp5, TMPQ1, 2, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 2, 1, 16); - * [register]:1 tmp6 = (* [register]:1 tmp4) + (* [register]:1 tmp5); - simd_address_at(tmp4, Rd_VPR128.16B, 3, 1, 16); - simd_address_at(tmp5, TMPQ1, 3, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 3, 1, 16); - * [register]:1 tmp6 = (* [register]:1 tmp4) + (* [register]:1 tmp5); - simd_address_at(tmp4, Rd_VPR128.16B, 4, 1, 16); - simd_address_at(tmp5, TMPQ1, 4, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 4, 1, 16); - * [register]:1 tmp6 = (* [register]:1 tmp4) + (* [register]:1 tmp5); - simd_address_at(tmp4, Rd_VPR128.16B, 5, 1, 16); - simd_address_at(tmp5, TMPQ1, 5, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 5, 1, 16); - * [register]:1 tmp6 = (* [register]:1 tmp4) + (* [register]:1 tmp5); - simd_address_at(tmp4, Rd_VPR128.16B, 6, 1, 16); - simd_address_at(tmp5, TMPQ1, 6, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 6, 1, 16); - * [register]:1 tmp6 = (* [register]:1 tmp4) + (* [register]:1 tmp5); - simd_address_at(tmp4, Rd_VPR128.16B, 7, 1, 16); - simd_address_at(tmp5, TMPQ1, 7, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 7, 1, 16); - * [register]:1 tmp6 = (* [register]:1 tmp4) + (* [register]:1 tmp5); - simd_address_at(tmp4, Rd_VPR128.16B, 8, 1, 16); - simd_address_at(tmp5, TMPQ1, 8, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 8, 1, 16); - * [register]:1 tmp6 = (* [register]:1 tmp4) + (* [register]:1 tmp5); - simd_address_at(tmp4, Rd_VPR128.16B, 9, 1, 16); - simd_address_at(tmp5, TMPQ1, 9, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 9, 1, 16); - * [register]:1 tmp6 = (* [register]:1 tmp4) + (* [register]:1 tmp5); - simd_address_at(tmp4, Rd_VPR128.16B, 10, 1, 16); - simd_address_at(tmp5, TMPQ1, 10, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 10, 1, 16); - * [register]:1 tmp6 = (* [register]:1 tmp4) + (* [register]:1 tmp5); - simd_address_at(tmp4, Rd_VPR128.16B, 11, 1, 16); - simd_address_at(tmp5, TMPQ1, 11, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 11, 1, 16); - * [register]:1 tmp6 = (* [register]:1 tmp4) + (* [register]:1 tmp5); - simd_address_at(tmp4, Rd_VPR128.16B, 12, 1, 16); - simd_address_at(tmp5, TMPQ1, 12, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 12, 1, 16); - * [register]:1 tmp6 = (* [register]:1 tmp4) + (* [register]:1 tmp5); - simd_address_at(tmp4, Rd_VPR128.16B, 13, 1, 16); - simd_address_at(tmp5, TMPQ1, 13, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 13, 1, 16); - * [register]:1 tmp6 = (* [register]:1 tmp4) + (* [register]:1 tmp5); - simd_address_at(tmp4, Rd_VPR128.16B, 14, 1, 16); - simd_address_at(tmp5, TMPQ1, 14, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 14, 1, 16); - * [register]:1 tmp6 = (* [register]:1 tmp4) + (* [register]:1 tmp5); - simd_address_at(tmp4, Rd_VPR128.16B, 15, 1, 16); - simd_address_at(tmp5, TMPQ1, 15, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 15, 1, 16); - * [register]:1 tmp6 = (* [register]:1 tmp4) + (* [register]:1 tmp5); + Rd_VPR128.16B[0,8] = Rd_VPR128.16B[0,8] + TMPQ1[0,8]; + Rd_VPR128.16B[8,8] = Rd_VPR128.16B[8,8] + TMPQ1[8,8]; + Rd_VPR128.16B[16,8] = Rd_VPR128.16B[16,8] + TMPQ1[16,8]; + Rd_VPR128.16B[24,8] = Rd_VPR128.16B[24,8] + TMPQ1[24,8]; + Rd_VPR128.16B[32,8] = Rd_VPR128.16B[32,8] + TMPQ1[32,8]; + Rd_VPR128.16B[40,8] = Rd_VPR128.16B[40,8] + TMPQ1[40,8]; + Rd_VPR128.16B[48,8] = Rd_VPR128.16B[48,8] + TMPQ1[48,8]; + Rd_VPR128.16B[56,8] = Rd_VPR128.16B[56,8] + TMPQ1[56,8]; + Rd_VPR128.16B[64,8] = Rd_VPR128.16B[64,8] + TMPQ1[64,8]; + Rd_VPR128.16B[72,8] = Rd_VPR128.16B[72,8] + TMPQ1[72,8]; + Rd_VPR128.16B[80,8] = Rd_VPR128.16B[80,8] + TMPQ1[80,8]; + Rd_VPR128.16B[88,8] = Rd_VPR128.16B[88,8] + TMPQ1[88,8]; + Rd_VPR128.16B[96,8] = Rd_VPR128.16B[96,8] + TMPQ1[96,8]; + Rd_VPR128.16B[104,8] = Rd_VPR128.16B[104,8] + TMPQ1[104,8]; + Rd_VPR128.16B[112,8] = Rd_VPR128.16B[112,8] + TMPQ1[112,8]; + Rd_VPR128.16B[120,8] = Rd_VPR128.16B[120,8] + TMPQ1[120,8]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_SRIGHT(Rn_VPR128.16B, Imm_shr_imm8:1, 1:1); - local tmpd:16 = SIMD_INT_ADD(Rd_VPR128.16B, tmp1, 1:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.16B -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.16B = NEON_srsra(Rd_VPR128.16B, Rn_VPR128.16B, Imm_shr_imm8:1, 1:1); -@endif } # C7.2.314 SRSRA page C7-2099 line 117760 MATCH x0f003400/mask=xbf80fc00 @@ -36674,38 +20450,14 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x6 & :srsra Rd_VPR128.2D, Rn_VPR128.2D, Imm_shr_imm64 is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2223=0b01 & Imm_shr_imm64 & b_1115=0x6 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_primitive) local tmp1:8 = zext(Imm_shr_imm64); # simd infix TMPQ1 = Rn_VPR128.2D s>> tmp1 on lane size 8 - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp3, Rn_VPR128.2D, 0, 8, 16); - simd_address_at(tmp4, TMPQ1, 0, 8, 16); - * [register]:8 tmp4 = (* [register]:8 tmp3) s>> tmp1; - simd_address_at(tmp3, Rn_VPR128.2D, 1, 8, 16); - simd_address_at(tmp4, TMPQ1, 1, 8, 16); - * [register]:8 tmp4 = (* [register]:8 tmp3) s>> tmp1; + TMPQ1[0,64] = Rn_VPR128.2D[0,64] s>> tmp1; + TMPQ1[64,64] = Rn_VPR128.2D[64,64] s>> tmp1; # simd infix Rd_VPR128.2D = Rd_VPR128.2D + TMPQ1 on lane size 8 - local tmp5:4 = 0; - local tmp6:4 = 0; - local tmp7:4 = 0; - simd_address_at(tmp5, Rd_VPR128.2D, 0, 8, 16); - simd_address_at(tmp6, TMPQ1, 0, 8, 16); - simd_address_at(tmp7, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp7 = (* [register]:8 tmp5) + (* [register]:8 tmp6); - simd_address_at(tmp5, Rd_VPR128.2D, 1, 8, 16); - simd_address_at(tmp6, TMPQ1, 1, 8, 16); - simd_address_at(tmp7, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp7 = (* [register]:8 tmp5) + (* [register]:8 tmp6); + Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] + TMPQ1[0,64]; + Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] + TMPQ1[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = zext(Imm_shr_imm64); - local tmp2:16 = SIMD_INT_SRIGHT(Rn_VPR128.2D, tmp1, 8:1); - local tmpd:16 = SIMD_INT_ADD(Rd_VPR128.2D, tmp2, 8:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.2D = NEON_srsra(Rd_VPR128.2D, Rn_VPR128.2D, Imm_shr_imm64:1, 8:1); -@endif } # C7.2.314 SRSRA page C7-2099 line 117760 MATCH x0f003400/mask=xbf80fc00 @@ -36717,36 +20469,13 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2223=0b01 & Imm_shr_imm64 & b_1115=0x6 :srsra Rd_VPR64.2S, Rn_VPR64.2S, Imm_shr_imm32 is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x6 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_primitive) # simd infix TMPD1 = Rn_VPR64.2S s>> Imm_shr_imm32:4 on lane size 4 - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR64.2S, 0, 4, 8); - simd_address_at(tmp3, TMPD1, 0, 4, 8); - * [register]:4 tmp3 = (* [register]:4 tmp2) s>> Imm_shr_imm32:4; - simd_address_at(tmp2, Rn_VPR64.2S, 1, 4, 8); - simd_address_at(tmp3, TMPD1, 1, 4, 8); - * [register]:4 tmp3 = (* [register]:4 tmp2) s>> Imm_shr_imm32:4; + TMPD1[0,32] = Rn_VPR64.2S[0,32] s>> Imm_shr_imm32:4; + TMPD1[32,32] = Rn_VPR64.2S[32,32] s>> Imm_shr_imm32:4; # simd infix Rd_VPR64.2S = Rd_VPR64.2S + TMPD1 on lane size 4 - local tmp4:4 = 0; - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp4, Rd_VPR64.2S, 0, 4, 8); - simd_address_at(tmp5, TMPD1, 0, 4, 8); - simd_address_at(tmp6, Rd_VPR64.2S, 0, 4, 8); - * [register]:4 tmp6 = (* [register]:4 tmp4) + (* [register]:4 tmp5); - simd_address_at(tmp4, Rd_VPR64.2S, 1, 4, 8); - simd_address_at(tmp5, TMPD1, 1, 4, 8); - simd_address_at(tmp6, Rd_VPR64.2S, 1, 4, 8); - * [register]:4 tmp6 = (* [register]:4 tmp4) + (* [register]:4 tmp5); + Rd_VPR64.2S[0,32] = Rd_VPR64.2S[0,32] + TMPD1[0,32]; + Rd_VPR64.2S[32,32] = Rd_VPR64.2S[32,32] + TMPD1[32,32]; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_INT_SRIGHT(Rn_VPR64.2S, Imm_shr_imm32:4, 4:1); - local tmpd:8 = SIMD_INT_ADD(Rd_VPR64.2S, tmp1, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.2S -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.2S = NEON_srsra(Rd_VPR64.2S, Rn_VPR64.2S, Imm_shr_imm32:1, 4:1); -@endif } # C7.2.314 SRSRA page C7-2099 line 117760 MATCH x0f003400/mask=xbf80fc00 @@ -36758,50 +20487,17 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x6 & b :srsra Rd_VPR64.4H, Rn_VPR64.4H, Imm_shr_imm16 is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x6 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { -@if defined(SEMANTIC_primitive) # simd infix TMPD1 = Rn_VPR64.4H s>> Imm_shr_imm16:2 on lane size 2 - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR64.4H, 0, 2, 8); - simd_address_at(tmp3, TMPD1, 0, 2, 8); - * [register]:2 tmp3 = (* [register]:2 tmp2) s>> Imm_shr_imm16:2; - simd_address_at(tmp2, Rn_VPR64.4H, 1, 2, 8); - simd_address_at(tmp3, TMPD1, 1, 2, 8); - * [register]:2 tmp3 = (* [register]:2 tmp2) s>> Imm_shr_imm16:2; - simd_address_at(tmp2, Rn_VPR64.4H, 2, 2, 8); - simd_address_at(tmp3, TMPD1, 2, 2, 8); - * [register]:2 tmp3 = (* [register]:2 tmp2) s>> Imm_shr_imm16:2; - simd_address_at(tmp2, Rn_VPR64.4H, 3, 2, 8); - simd_address_at(tmp3, TMPD1, 3, 2, 8); - * [register]:2 tmp3 = (* [register]:2 tmp2) s>> Imm_shr_imm16:2; + TMPD1[0,16] = Rn_VPR64.4H[0,16] s>> Imm_shr_imm16:2; + TMPD1[16,16] = Rn_VPR64.4H[16,16] s>> Imm_shr_imm16:2; + TMPD1[32,16] = Rn_VPR64.4H[32,16] s>> Imm_shr_imm16:2; + TMPD1[48,16] = Rn_VPR64.4H[48,16] s>> Imm_shr_imm16:2; # simd infix Rd_VPR64.4H = Rd_VPR64.4H + TMPD1 on lane size 2 - local tmp4:4 = 0; - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp4, Rd_VPR64.4H, 0, 2, 8); - simd_address_at(tmp5, TMPD1, 0, 2, 8); - simd_address_at(tmp6, Rd_VPR64.4H, 0, 2, 8); - * [register]:2 tmp6 = (* [register]:2 tmp4) + (* [register]:2 tmp5); - simd_address_at(tmp4, Rd_VPR64.4H, 1, 2, 8); - simd_address_at(tmp5, TMPD1, 1, 2, 8); - simd_address_at(tmp6, Rd_VPR64.4H, 1, 2, 8); - * [register]:2 tmp6 = (* [register]:2 tmp4) + (* [register]:2 tmp5); - simd_address_at(tmp4, Rd_VPR64.4H, 2, 2, 8); - simd_address_at(tmp5, TMPD1, 2, 2, 8); - simd_address_at(tmp6, Rd_VPR64.4H, 2, 2, 8); - * [register]:2 tmp6 = (* [register]:2 tmp4) + (* [register]:2 tmp5); - simd_address_at(tmp4, Rd_VPR64.4H, 3, 2, 8); - simd_address_at(tmp5, TMPD1, 3, 2, 8); - simd_address_at(tmp6, Rd_VPR64.4H, 3, 2, 8); - * [register]:2 tmp6 = (* [register]:2 tmp4) + (* [register]:2 tmp5); + Rd_VPR64.4H[0,16] = Rd_VPR64.4H[0,16] + TMPD1[0,16]; + Rd_VPR64.4H[16,16] = Rd_VPR64.4H[16,16] + TMPD1[16,16]; + Rd_VPR64.4H[32,16] = Rd_VPR64.4H[32,16] + TMPD1[32,16]; + Rd_VPR64.4H[48,16] = Rd_VPR64.4H[48,16] + TMPD1[48,16]; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_INT_SRIGHT(Rn_VPR64.4H, Imm_shr_imm16:2, 2:1); - local tmpd:8 = SIMD_INT_ADD(Rd_VPR64.4H, tmp1, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.4H -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.4H = NEON_srsra(Rd_VPR64.4H, Rn_VPR64.4H, Imm_shr_imm16:1, 2:1); -@endif } # C7.2.314 SRSRA page C7-2099 line 117760 MATCH x0f003400/mask=xbf80fc00 @@ -36813,50 +20509,17 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x6 & :srsra Rd_VPR128.4S, Rn_VPR128.4S, Imm_shr_imm32 is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x6 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) # simd infix TMPQ1 = Rn_VPR128.4S s>> Imm_shr_imm32:4 on lane size 4 - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR128.4S, 0, 4, 16); - simd_address_at(tmp3, TMPQ1, 0, 4, 16); - * [register]:4 tmp3 = (* [register]:4 tmp2) s>> Imm_shr_imm32:4; - simd_address_at(tmp2, Rn_VPR128.4S, 1, 4, 16); - simd_address_at(tmp3, TMPQ1, 1, 4, 16); - * [register]:4 tmp3 = (* [register]:4 tmp2) s>> Imm_shr_imm32:4; - simd_address_at(tmp2, Rn_VPR128.4S, 2, 4, 16); - simd_address_at(tmp3, TMPQ1, 2, 4, 16); - * [register]:4 tmp3 = (* [register]:4 tmp2) s>> Imm_shr_imm32:4; - simd_address_at(tmp2, Rn_VPR128.4S, 3, 4, 16); - simd_address_at(tmp3, TMPQ1, 3, 4, 16); - * [register]:4 tmp3 = (* [register]:4 tmp2) s>> Imm_shr_imm32:4; + TMPQ1[0,32] = Rn_VPR128.4S[0,32] s>> Imm_shr_imm32:4; + TMPQ1[32,32] = Rn_VPR128.4S[32,32] s>> Imm_shr_imm32:4; + TMPQ1[64,32] = Rn_VPR128.4S[64,32] s>> Imm_shr_imm32:4; + TMPQ1[96,32] = Rn_VPR128.4S[96,32] s>> Imm_shr_imm32:4; # simd infix Rd_VPR128.4S = Rd_VPR128.4S + TMPQ1 on lane size 4 - local tmp4:4 = 0; - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp4, Rd_VPR128.4S, 0, 4, 16); - simd_address_at(tmp5, TMPQ1, 0, 4, 16); - simd_address_at(tmp6, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp6 = (* [register]:4 tmp4) + (* [register]:4 tmp5); - simd_address_at(tmp4, Rd_VPR128.4S, 1, 4, 16); - simd_address_at(tmp5, TMPQ1, 1, 4, 16); - simd_address_at(tmp6, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp6 = (* [register]:4 tmp4) + (* [register]:4 tmp5); - simd_address_at(tmp4, Rd_VPR128.4S, 2, 4, 16); - simd_address_at(tmp5, TMPQ1, 2, 4, 16); - simd_address_at(tmp6, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp6 = (* [register]:4 tmp4) + (* [register]:4 tmp5); - simd_address_at(tmp4, Rd_VPR128.4S, 3, 4, 16); - simd_address_at(tmp5, TMPQ1, 3, 4, 16); - simd_address_at(tmp6, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp6 = (* [register]:4 tmp4) + (* [register]:4 tmp5); + Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] + TMPQ1[0,32]; + Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] + TMPQ1[32,32]; + Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] + TMPQ1[64,32]; + Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] + TMPQ1[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_SRIGHT(Rn_VPR128.4S, Imm_shr_imm32:4, 4:1); - local tmpd:16 = SIMD_INT_ADD(Rd_VPR128.4S, tmp1, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_srsra(Rd_VPR128.4S, Rn_VPR128.4S, Imm_shr_imm32:1, 4:1); -@endif } # C7.2.314 SRSRA page C7-2099 line 117760 MATCH x0f003400/mask=xbf80fc00 @@ -36868,78 +20531,25 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x6 & b :srsra Rd_VPR64.8B, Rn_VPR64.8B, Imm_shr_imm8 is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x6 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { -@if defined(SEMANTIC_primitive) # simd infix TMPD1 = Rn_VPR64.8B s>> Imm_shr_imm8:1 on lane size 1 - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR64.8B, 0, 1, 8); - simd_address_at(tmp3, TMPD1, 0, 1, 8); - * [register]:1 tmp3 = (* [register]:1 tmp2) s>> Imm_shr_imm8:1; - simd_address_at(tmp2, Rn_VPR64.8B, 1, 1, 8); - simd_address_at(tmp3, TMPD1, 1, 1, 8); - * [register]:1 tmp3 = (* [register]:1 tmp2) s>> Imm_shr_imm8:1; - simd_address_at(tmp2, Rn_VPR64.8B, 2, 1, 8); - simd_address_at(tmp3, TMPD1, 2, 1, 8); - * [register]:1 tmp3 = (* [register]:1 tmp2) s>> Imm_shr_imm8:1; - simd_address_at(tmp2, Rn_VPR64.8B, 3, 1, 8); - simd_address_at(tmp3, TMPD1, 3, 1, 8); - * [register]:1 tmp3 = (* [register]:1 tmp2) s>> Imm_shr_imm8:1; - simd_address_at(tmp2, Rn_VPR64.8B, 4, 1, 8); - simd_address_at(tmp3, TMPD1, 4, 1, 8); - * [register]:1 tmp3 = (* [register]:1 tmp2) s>> Imm_shr_imm8:1; - simd_address_at(tmp2, Rn_VPR64.8B, 5, 1, 8); - simd_address_at(tmp3, TMPD1, 5, 1, 8); - * [register]:1 tmp3 = (* [register]:1 tmp2) s>> Imm_shr_imm8:1; - simd_address_at(tmp2, Rn_VPR64.8B, 6, 1, 8); - simd_address_at(tmp3, TMPD1, 6, 1, 8); - * [register]:1 tmp3 = (* [register]:1 tmp2) s>> Imm_shr_imm8:1; - simd_address_at(tmp2, Rn_VPR64.8B, 7, 1, 8); - simd_address_at(tmp3, TMPD1, 7, 1, 8); - * [register]:1 tmp3 = (* [register]:1 tmp2) s>> Imm_shr_imm8:1; + TMPD1[0,8] = Rn_VPR64.8B[0,8] s>> Imm_shr_imm8:1; + TMPD1[8,8] = Rn_VPR64.8B[8,8] s>> Imm_shr_imm8:1; + TMPD1[16,8] = Rn_VPR64.8B[16,8] s>> Imm_shr_imm8:1; + TMPD1[24,8] = Rn_VPR64.8B[24,8] s>> Imm_shr_imm8:1; + TMPD1[32,8] = Rn_VPR64.8B[32,8] s>> Imm_shr_imm8:1; + TMPD1[40,8] = Rn_VPR64.8B[40,8] s>> Imm_shr_imm8:1; + TMPD1[48,8] = Rn_VPR64.8B[48,8] s>> Imm_shr_imm8:1; + TMPD1[56,8] = Rn_VPR64.8B[56,8] s>> Imm_shr_imm8:1; # simd infix Rd_VPR64.8B = Rd_VPR64.8B + TMPD1 on lane size 1 - local tmp4:4 = 0; - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp4, Rd_VPR64.8B, 0, 1, 8); - simd_address_at(tmp5, TMPD1, 0, 1, 8); - simd_address_at(tmp6, Rd_VPR64.8B, 0, 1, 8); - * [register]:1 tmp6 = (* [register]:1 tmp4) + (* [register]:1 tmp5); - simd_address_at(tmp4, Rd_VPR64.8B, 1, 1, 8); - simd_address_at(tmp5, TMPD1, 1, 1, 8); - simd_address_at(tmp6, Rd_VPR64.8B, 1, 1, 8); - * [register]:1 tmp6 = (* [register]:1 tmp4) + (* [register]:1 tmp5); - simd_address_at(tmp4, Rd_VPR64.8B, 2, 1, 8); - simd_address_at(tmp5, TMPD1, 2, 1, 8); - simd_address_at(tmp6, Rd_VPR64.8B, 2, 1, 8); - * [register]:1 tmp6 = (* [register]:1 tmp4) + (* [register]:1 tmp5); - simd_address_at(tmp4, Rd_VPR64.8B, 3, 1, 8); - simd_address_at(tmp5, TMPD1, 3, 1, 8); - simd_address_at(tmp6, Rd_VPR64.8B, 3, 1, 8); - * [register]:1 tmp6 = (* [register]:1 tmp4) + (* [register]:1 tmp5); - simd_address_at(tmp4, Rd_VPR64.8B, 4, 1, 8); - simd_address_at(tmp5, TMPD1, 4, 1, 8); - simd_address_at(tmp6, Rd_VPR64.8B, 4, 1, 8); - * [register]:1 tmp6 = (* [register]:1 tmp4) + (* [register]:1 tmp5); - simd_address_at(tmp4, Rd_VPR64.8B, 5, 1, 8); - simd_address_at(tmp5, TMPD1, 5, 1, 8); - simd_address_at(tmp6, Rd_VPR64.8B, 5, 1, 8); - * [register]:1 tmp6 = (* [register]:1 tmp4) + (* [register]:1 tmp5); - simd_address_at(tmp4, Rd_VPR64.8B, 6, 1, 8); - simd_address_at(tmp5, TMPD1, 6, 1, 8); - simd_address_at(tmp6, Rd_VPR64.8B, 6, 1, 8); - * [register]:1 tmp6 = (* [register]:1 tmp4) + (* [register]:1 tmp5); - simd_address_at(tmp4, Rd_VPR64.8B, 7, 1, 8); - simd_address_at(tmp5, TMPD1, 7, 1, 8); - simd_address_at(tmp6, Rd_VPR64.8B, 7, 1, 8); - * [register]:1 tmp6 = (* [register]:1 tmp4) + (* [register]:1 tmp5); + Rd_VPR64.8B[0,8] = Rd_VPR64.8B[0,8] + TMPD1[0,8]; + Rd_VPR64.8B[8,8] = Rd_VPR64.8B[8,8] + TMPD1[8,8]; + Rd_VPR64.8B[16,8] = Rd_VPR64.8B[16,8] + TMPD1[16,8]; + Rd_VPR64.8B[24,8] = Rd_VPR64.8B[24,8] + TMPD1[24,8]; + Rd_VPR64.8B[32,8] = Rd_VPR64.8B[32,8] + TMPD1[32,8]; + Rd_VPR64.8B[40,8] = Rd_VPR64.8B[40,8] + TMPD1[40,8]; + Rd_VPR64.8B[48,8] = Rd_VPR64.8B[48,8] + TMPD1[48,8]; + Rd_VPR64.8B[56,8] = Rd_VPR64.8B[56,8] + TMPD1[56,8]; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_INT_SRIGHT(Rn_VPR64.8B, Imm_shr_imm8:1, 1:1); - local tmpd:8 = SIMD_INT_ADD(Rd_VPR64.8B, tmp1, 1:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.8B -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.8B = NEON_srsra(Rd_VPR64.8B, Rn_VPR64.8B, Imm_shr_imm8:1, 1:1); -@endif } # C7.2.314 SRSRA page C7-2099 line 117760 MATCH x0f003400/mask=xbf80fc00 @@ -36951,78 +20561,25 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x6 & :srsra Rd_VPR128.8H, Rn_VPR128.8H, Imm_shr_imm16 is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x6 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_primitive) # simd infix TMPQ1 = Rn_VPR128.8H s>> Imm_shr_imm16:2 on lane size 2 - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR128.8H, 0, 2, 16); - simd_address_at(tmp3, TMPQ1, 0, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp2) s>> Imm_shr_imm16:2; - simd_address_at(tmp2, Rn_VPR128.8H, 1, 2, 16); - simd_address_at(tmp3, TMPQ1, 1, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp2) s>> Imm_shr_imm16:2; - simd_address_at(tmp2, Rn_VPR128.8H, 2, 2, 16); - simd_address_at(tmp3, TMPQ1, 2, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp2) s>> Imm_shr_imm16:2; - simd_address_at(tmp2, Rn_VPR128.8H, 3, 2, 16); - simd_address_at(tmp3, TMPQ1, 3, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp2) s>> Imm_shr_imm16:2; - simd_address_at(tmp2, Rn_VPR128.8H, 4, 2, 16); - simd_address_at(tmp3, TMPQ1, 4, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp2) s>> Imm_shr_imm16:2; - simd_address_at(tmp2, Rn_VPR128.8H, 5, 2, 16); - simd_address_at(tmp3, TMPQ1, 5, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp2) s>> Imm_shr_imm16:2; - simd_address_at(tmp2, Rn_VPR128.8H, 6, 2, 16); - simd_address_at(tmp3, TMPQ1, 6, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp2) s>> Imm_shr_imm16:2; - simd_address_at(tmp2, Rn_VPR128.8H, 7, 2, 16); - simd_address_at(tmp3, TMPQ1, 7, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp2) s>> Imm_shr_imm16:2; + TMPQ1[0,16] = Rn_VPR128.8H[0,16] s>> Imm_shr_imm16:2; + TMPQ1[16,16] = Rn_VPR128.8H[16,16] s>> Imm_shr_imm16:2; + TMPQ1[32,16] = Rn_VPR128.8H[32,16] s>> Imm_shr_imm16:2; + TMPQ1[48,16] = Rn_VPR128.8H[48,16] s>> Imm_shr_imm16:2; + TMPQ1[64,16] = Rn_VPR128.8H[64,16] s>> Imm_shr_imm16:2; + TMPQ1[80,16] = Rn_VPR128.8H[80,16] s>> Imm_shr_imm16:2; + TMPQ1[96,16] = Rn_VPR128.8H[96,16] s>> Imm_shr_imm16:2; + TMPQ1[112,16] = Rn_VPR128.8H[112,16] s>> Imm_shr_imm16:2; # simd infix Rd_VPR128.8H = Rd_VPR128.8H + TMPQ1 on lane size 2 - local tmp4:4 = 0; - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp4, Rd_VPR128.8H, 0, 2, 16); - simd_address_at(tmp5, TMPQ1, 0, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 0, 2, 16); - * [register]:2 tmp6 = (* [register]:2 tmp4) + (* [register]:2 tmp5); - simd_address_at(tmp4, Rd_VPR128.8H, 1, 2, 16); - simd_address_at(tmp5, TMPQ1, 1, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 1, 2, 16); - * [register]:2 tmp6 = (* [register]:2 tmp4) + (* [register]:2 tmp5); - simd_address_at(tmp4, Rd_VPR128.8H, 2, 2, 16); - simd_address_at(tmp5, TMPQ1, 2, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 2, 2, 16); - * [register]:2 tmp6 = (* [register]:2 tmp4) + (* [register]:2 tmp5); - simd_address_at(tmp4, Rd_VPR128.8H, 3, 2, 16); - simd_address_at(tmp5, TMPQ1, 3, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 3, 2, 16); - * [register]:2 tmp6 = (* [register]:2 tmp4) + (* [register]:2 tmp5); - simd_address_at(tmp4, Rd_VPR128.8H, 4, 2, 16); - simd_address_at(tmp5, TMPQ1, 4, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 4, 2, 16); - * [register]:2 tmp6 = (* [register]:2 tmp4) + (* [register]:2 tmp5); - simd_address_at(tmp4, Rd_VPR128.8H, 5, 2, 16); - simd_address_at(tmp5, TMPQ1, 5, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 5, 2, 16); - * [register]:2 tmp6 = (* [register]:2 tmp4) + (* [register]:2 tmp5); - simd_address_at(tmp4, Rd_VPR128.8H, 6, 2, 16); - simd_address_at(tmp5, TMPQ1, 6, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 6, 2, 16); - * [register]:2 tmp6 = (* [register]:2 tmp4) + (* [register]:2 tmp5); - simd_address_at(tmp4, Rd_VPR128.8H, 7, 2, 16); - simd_address_at(tmp5, TMPQ1, 7, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 7, 2, 16); - * [register]:2 tmp6 = (* [register]:2 tmp4) + (* [register]:2 tmp5); + Rd_VPR128.8H[0,16] = Rd_VPR128.8H[0,16] + TMPQ1[0,16]; + Rd_VPR128.8H[16,16] = Rd_VPR128.8H[16,16] + TMPQ1[16,16]; + Rd_VPR128.8H[32,16] = Rd_VPR128.8H[32,16] + TMPQ1[32,16]; + Rd_VPR128.8H[48,16] = Rd_VPR128.8H[48,16] + TMPQ1[48,16]; + Rd_VPR128.8H[64,16] = Rd_VPR128.8H[64,16] + TMPQ1[64,16]; + Rd_VPR128.8H[80,16] = Rd_VPR128.8H[80,16] + TMPQ1[80,16]; + Rd_VPR128.8H[96,16] = Rd_VPR128.8H[96,16] + TMPQ1[96,16]; + Rd_VPR128.8H[112,16] = Rd_VPR128.8H[112,16] + TMPQ1[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_SRIGHT(Rn_VPR128.8H, Imm_shr_imm16:2, 2:1); - local tmpd:16 = SIMD_INT_ADD(Rd_VPR128.8H, tmp1, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.8H -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.8H = NEON_srsra(Rd_VPR128.8H, Rn_VPR128.8H, Imm_shr_imm16:1, 2:1); -@endif } # C7.2.314 SRSRA page C7-2099 line 117760 MATCH x5f003400/mask=xff80fc00 @@ -37034,19 +20591,10 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x6 & :srsra Rd_FPR64, Rn_FPR64, Imm_shr_imm64 is b_2331=0b010111110 & b_22=1 & b_1015=0b001101 & Rd_FPR64 & Rn_FPR64 & Imm_shr_imm64 & Zd { -@if defined(SEMANTIC_primitive) local tmp1:8 = zext(Imm_shr_imm64); local tmp2:8 = Rn_FPR64 s>> tmp1; Rd_FPR64 = Rd_FPR64 + tmp2; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = zext(Imm_shr_imm64); - local tmp2:8 = Rn_FPR64 s>> tmp1; - local tmpd:8 = Rd_FPR64 + tmp2; - Zd = zext(tmpd); # assigning to Rd_FPR64 -@elif defined(SEMANTIC_pseudo) - Rd_FPR64 = NEON_srsra(Rd_FPR64, Rn_FPR64, Imm_shr_imm64:1); -@endif } # C7.2.315 SSHL page C7-2101 line 117896 MATCH x5e204400/mask=xff20fc00 @@ -37057,9 +20605,7 @@ is b_2331=0b010111110 & b_22=1 & b_1015=0b001101 & Rd_FPR64 & Rn_FPR64 & Imm_shr :sshl Rd_FPR64, Rn_FPR64, Rm_FPR64 is b_3031=1 & u=0 & b_2428=0x1e & advSIMD3.size=3 & b_2121=1 & Rm_FPR64 & b_1115=0x8 & b_1010=1 & Rn_FPR64 & Rd_FPR64 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR64 = NEON_sshl(Rn_FPR64, Rm_FPR64); -@endif } # C7.2.315 SSHL page C7-2101 line 117896 MATCH x0e204400/mask=xbf20fc00 @@ -37070,9 +20616,7 @@ is b_3031=1 & u=0 & b_2428=0x1e & advSIMD3.size=3 & b_2121=1 & Rm_FPR64 & b_1115 :sshl Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0x8 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.16B = NEON_sshl(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); -@endif } # C7.2.315 SSHL page C7-2101 line 117896 MATCH x0e204400/mask=xbf20fc00 @@ -37083,9 +20627,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16 :sshl Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=3 & b_2121=1 & Rm_VPR128.2D & b_1115=0x8 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.2D = NEON_sshl(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); -@endif } # C7.2.315 SSHL page C7-2101 line 117896 MATCH x0e204400/mask=xbf20fc00 @@ -37096,9 +20638,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=3 & b_2121=1 & Rm_VPR128.2D :sshl Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0x8 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.2S = NEON_sshl(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); -@endif } # C7.2.315 SSHL page C7-2101 line 117896 MATCH x0e204400/mask=xbf20fc00 @@ -37109,9 +20649,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S :sshl Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0x8 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.4H = NEON_sshl(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); -@endif } # C7.2.315 SSHL page C7-2101 line 117896 MATCH x0e204400/mask=xbf20fc00 @@ -37122,9 +20660,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H :sshl Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0x8 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_sshl(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); -@endif } # C7.2.315 SSHL page C7-2101 line 117896 MATCH x0e204400/mask=xbf20fc00 @@ -37135,9 +20671,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S :sshl Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0x8 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.8B = NEON_sshl(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); -@endif } # C7.2.315 SSHL page C7-2101 line 117896 MATCH x0e204400/mask=xbf20fc00 @@ -37148,9 +20682,7 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B :sshl Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0x8 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.8H = NEON_sshl(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); -@endif } # C7.2.316 SSHLL, SSHLL2 page C7-2104 line 118053 MATCH x0f00a400/mask=xbf80fc00 @@ -37163,75 +20695,27 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H :sshll2 Rd_VPR128.8H, Rn_VPR128.16B, Imm_uimm3 is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_1923=0x1 & Imm_uimm3 & b_1115=0x14 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rn_VPR128.16B, 1, 8, 16); - TMPD1 = * [register]:8 tmp1; + TMPD1 = Rn_VPR128.16B[64,64]; # simd resize TMPQ2 = sext(TMPD1) (lane size 1 to 2) - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, TMPD1, 0, 1, 8); - simd_address_at(tmp5, TMPQ2, 0, 2, 16); - * [register]:2 tmp5 = sext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 1, 1, 8); - simd_address_at(tmp5, TMPQ2, 1, 2, 16); - * [register]:2 tmp5 = sext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 2, 1, 8); - simd_address_at(tmp5, TMPQ2, 2, 2, 16); - * [register]:2 tmp5 = sext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 3, 1, 8); - simd_address_at(tmp5, TMPQ2, 3, 2, 16); - * [register]:2 tmp5 = sext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 4, 1, 8); - simd_address_at(tmp5, TMPQ2, 4, 2, 16); - * [register]:2 tmp5 = sext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 5, 1, 8); - simd_address_at(tmp5, TMPQ2, 5, 2, 16); - * [register]:2 tmp5 = sext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 6, 1, 8); - simd_address_at(tmp5, TMPQ2, 6, 2, 16); - * [register]:2 tmp5 = sext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 7, 1, 8); - simd_address_at(tmp5, TMPQ2, 7, 2, 16); - * [register]:2 tmp5 = sext(* [register]:1 tmp4); - local tmp6:2 = Imm_uimm3; - # simd infix Rd_VPR128.8H = TMPQ2 << tmp6 on lane size 2 - local tmp7:4 = 0; - local tmp8:4 = 0; - simd_address_at(tmp7, TMPQ2, 0, 2, 16); - simd_address_at(tmp8, Rd_VPR128.8H, 0, 2, 16); - * [register]:2 tmp8 = (* [register]:2 tmp7) << tmp6; - simd_address_at(tmp7, TMPQ2, 1, 2, 16); - simd_address_at(tmp8, Rd_VPR128.8H, 1, 2, 16); - * [register]:2 tmp8 = (* [register]:2 tmp7) << tmp6; - simd_address_at(tmp7, TMPQ2, 2, 2, 16); - simd_address_at(tmp8, Rd_VPR128.8H, 2, 2, 16); - * [register]:2 tmp8 = (* [register]:2 tmp7) << tmp6; - simd_address_at(tmp7, TMPQ2, 3, 2, 16); - simd_address_at(tmp8, Rd_VPR128.8H, 3, 2, 16); - * [register]:2 tmp8 = (* [register]:2 tmp7) << tmp6; - simd_address_at(tmp7, TMPQ2, 4, 2, 16); - simd_address_at(tmp8, Rd_VPR128.8H, 4, 2, 16); - * [register]:2 tmp8 = (* [register]:2 tmp7) << tmp6; - simd_address_at(tmp7, TMPQ2, 5, 2, 16); - simd_address_at(tmp8, Rd_VPR128.8H, 5, 2, 16); - * [register]:2 tmp8 = (* [register]:2 tmp7) << tmp6; - simd_address_at(tmp7, TMPQ2, 6, 2, 16); - simd_address_at(tmp8, Rd_VPR128.8H, 6, 2, 16); - * [register]:2 tmp8 = (* [register]:2 tmp7) << tmp6; - simd_address_at(tmp7, TMPQ2, 7, 2, 16); - simd_address_at(tmp8, Rd_VPR128.8H, 7, 2, 16); - * [register]:2 tmp8 = (* [register]:2 tmp7) << tmp6; - zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Rn_VPR128.16B, 1:1); - local tmp2:16 = SIMD_INT_SEXT(tmp1, 1:1); + TMPQ2[0,16] = sext(TMPD1[0,8]); + TMPQ2[16,16] = sext(TMPD1[8,8]); + TMPQ2[32,16] = sext(TMPD1[16,8]); + TMPQ2[48,16] = sext(TMPD1[24,8]); + TMPQ2[64,16] = sext(TMPD1[32,8]); + TMPQ2[80,16] = sext(TMPD1[40,8]); + TMPQ2[96,16] = sext(TMPD1[48,8]); + TMPQ2[112,16] = sext(TMPD1[56,8]); local tmp3:2 = Imm_uimm3; - local tmpd:16 = SIMD_INT_LEFT(tmp2, tmp3, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.8H -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.8H = NEON_sshll2(Rn_VPR128.16B, Imm_uimm3:1, 1:1); -@endif + # simd infix Rd_VPR128.8H = TMPQ2 << tmp3 on lane size 2 + Rd_VPR128.8H[0,16] = TMPQ2[0,16] << tmp3; + Rd_VPR128.8H[16,16] = TMPQ2[16,16] << tmp3; + Rd_VPR128.8H[32,16] = TMPQ2[32,16] << tmp3; + Rd_VPR128.8H[48,16] = TMPQ2[48,16] << tmp3; + Rd_VPR128.8H[64,16] = TMPQ2[64,16] << tmp3; + Rd_VPR128.8H[80,16] = TMPQ2[80,16] << tmp3; + Rd_VPR128.8H[96,16] = TMPQ2[96,16] << tmp3; + Rd_VPR128.8H[112,16] = TMPQ2[112,16] << tmp3; + zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.316 SSHLL, SSHLL2 page C7-2104 line 118053 MATCH x0f00a400/mask=xbf80fc00 @@ -37244,35 +20728,14 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_1923=0x1 & Imm_uimm3 & b_1115=0x14 & b_ :sshll Rd_VPR128.2D, Rn_VPR64.2S, Imm_uimm5 is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_2123=1 & Imm_uimm5 & b_1115=0x14 & b_1010=1 & Rn_VPR64.2S & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_primitive) # simd resize TMPQ1 = sext(Rn_VPR64.2S) (lane size 4 to 8) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR64.2S, 0, 4, 8); - simd_address_at(tmp3, TMPQ1, 0, 8, 16); - * [register]:8 tmp3 = sext(* [register]:4 tmp2); - simd_address_at(tmp2, Rn_VPR64.2S, 1, 4, 8); - simd_address_at(tmp3, TMPQ1, 1, 8, 16); - * [register]:8 tmp3 = sext(* [register]:4 tmp2); - local tmp4:8 = Imm_uimm5; - # simd infix Rd_VPR128.2D = TMPQ1 << tmp4 on lane size 8 - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, TMPQ1, 0, 8, 16); - simd_address_at(tmp6, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp6 = (* [register]:8 tmp5) << tmp4; - simd_address_at(tmp5, TMPQ1, 1, 8, 16); - simd_address_at(tmp6, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp6 = (* [register]:8 tmp5) << tmp4; - zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_SEXT(Rn_VPR64.2S, 4:1); + TMPQ1[0,64] = sext(Rn_VPR64.2S[0,32]); + TMPQ1[64,64] = sext(Rn_VPR64.2S[32,32]); local tmp2:8 = Imm_uimm5; - local tmpd:16 = SIMD_INT_LEFT(tmp1, tmp2, 8:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.2D = NEON_sshll(Rn_VPR64.2S, Imm_uimm5:1, 4:1); -@endif + # simd infix Rd_VPR128.2D = TMPQ1 << tmp2 on lane size 8 + Rd_VPR128.2D[0,64] = TMPQ1[0,64] << tmp2; + Rd_VPR128.2D[64,64] = TMPQ1[64,64] << tmp2; + zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.316 SSHLL, SSHLL2 page C7-2104 line 118053 MATCH x0f00a400/mask=xbf80fc00 @@ -37285,47 +20748,18 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_2123=1 & Imm_uimm5 & b_1115=0x14 & b_10 :sshll Rd_VPR128.4S, Rn_VPR64.4H, Imm_uimm4 is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_2023=0x1 & Imm_uimm4 & b_1115=0x14 & b_1010=1 & Rn_VPR64.4H & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) # simd resize TMPQ1 = sext(Rn_VPR64.4H) (lane size 2 to 4) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR64.4H, 0, 2, 8); - simd_address_at(tmp3, TMPQ1, 0, 4, 16); - * [register]:4 tmp3 = sext(* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR64.4H, 1, 2, 8); - simd_address_at(tmp3, TMPQ1, 1, 4, 16); - * [register]:4 tmp3 = sext(* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR64.4H, 2, 2, 8); - simd_address_at(tmp3, TMPQ1, 2, 4, 16); - * [register]:4 tmp3 = sext(* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR64.4H, 3, 2, 8); - simd_address_at(tmp3, TMPQ1, 3, 4, 16); - * [register]:4 tmp3 = sext(* [register]:2 tmp2); - local tmp4:4 = Imm_uimm4; - # simd infix Rd_VPR128.4S = TMPQ1 << tmp4 on lane size 4 - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, TMPQ1, 0, 4, 16); - simd_address_at(tmp6, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp6 = (* [register]:4 tmp5) << tmp4; - simd_address_at(tmp5, TMPQ1, 1, 4, 16); - simd_address_at(tmp6, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp6 = (* [register]:4 tmp5) << tmp4; - simd_address_at(tmp5, TMPQ1, 2, 4, 16); - simd_address_at(tmp6, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp6 = (* [register]:4 tmp5) << tmp4; - simd_address_at(tmp5, TMPQ1, 3, 4, 16); - simd_address_at(tmp6, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp6 = (* [register]:4 tmp5) << tmp4; - zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_SEXT(Rn_VPR64.4H, 2:1); + TMPQ1[0,32] = sext(Rn_VPR64.4H[0,16]); + TMPQ1[32,32] = sext(Rn_VPR64.4H[16,16]); + TMPQ1[64,32] = sext(Rn_VPR64.4H[32,16]); + TMPQ1[96,32] = sext(Rn_VPR64.4H[48,16]); local tmp2:4 = Imm_uimm4; - local tmpd:16 = SIMD_INT_LEFT(tmp1, tmp2, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_sshll(Rn_VPR64.4H, Imm_uimm4:1, 2:1); -@endif + # simd infix Rd_VPR128.4S = TMPQ1 << tmp2 on lane size 4 + Rd_VPR128.4S[0,32] = TMPQ1[0,32] << tmp2; + Rd_VPR128.4S[32,32] = TMPQ1[32,32] << tmp2; + Rd_VPR128.4S[64,32] = TMPQ1[64,32] << tmp2; + Rd_VPR128.4S[96,32] = TMPQ1[96,32] << tmp2; + zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.316 SSHLL, SSHLL2 page C7-2104 line 118053 MATCH x0f00a400/mask=xbf80fc00 @@ -37338,39 +20772,15 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_2023=0x1 & Imm_uimm4 & b_1115=0x14 & b_ :sshll2 Rd_VPR128.2D, Rn_VPR128.4S, Imm_uimm5 is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2123=1 & Imm_uimm5 & b_1115=0x14 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rn_VPR128.4S, 1, 8, 16); - TMPD1 = * [register]:8 tmp1; + TMPD1 = Rn_VPR128.4S[64,64]; # simd resize TMPQ2 = sext(TMPD1) (lane size 4 to 8) - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, TMPD1, 0, 4, 8); - simd_address_at(tmp5, TMPQ2, 0, 8, 16); - * [register]:8 tmp5 = sext(* [register]:4 tmp4); - simd_address_at(tmp4, TMPD1, 1, 4, 8); - simd_address_at(tmp5, TMPQ2, 1, 8, 16); - * [register]:8 tmp5 = sext(* [register]:4 tmp4); - local tmp6:8 = Imm_uimm5; - # simd infix Rd_VPR128.2D = TMPQ2 << tmp6 on lane size 8 - local tmp7:4 = 0; - local tmp8:4 = 0; - simd_address_at(tmp7, TMPQ2, 0, 8, 16); - simd_address_at(tmp8, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp8 = (* [register]:8 tmp7) << tmp6; - simd_address_at(tmp7, TMPQ2, 1, 8, 16); - simd_address_at(tmp8, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp8 = (* [register]:8 tmp7) << tmp6; - zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Rn_VPR128.4S, 1:1); - local tmp2:16 = SIMD_INT_SEXT(tmp1, 4:1); + TMPQ2[0,64] = sext(TMPD1[0,32]); + TMPQ2[64,64] = sext(TMPD1[32,32]); local tmp3:8 = Imm_uimm5; - local tmpd:16 = SIMD_INT_LEFT(tmp2, tmp3, 8:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.2D = NEON_sshll2(Rn_VPR128.4S, Imm_uimm5:1, 4:1); -@endif + # simd infix Rd_VPR128.2D = TMPQ2 << tmp3 on lane size 8 + Rd_VPR128.2D[0,64] = TMPQ2[0,64] << tmp3; + Rd_VPR128.2D[64,64] = TMPQ2[64,64] << tmp3; + zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.316 SSHLL, SSHLL2 page C7-2104 line 118053 MATCH x0f00a400/mask=xbf80fc00 @@ -37383,71 +20793,26 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2123=1 & Imm_uimm5 & b_1115=0x14 & b_10 :sshll Rd_VPR128.8H, Rn_VPR64.8B, Imm_uimm3 is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_1923=0x1 & Imm_uimm3 & b_1115=0x14 & b_1010=1 & Rn_VPR64.8B & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_primitive) # simd resize TMPQ1 = sext(Rn_VPR64.8B) (lane size 1 to 2) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR64.8B, 0, 1, 8); - simd_address_at(tmp3, TMPQ1, 0, 2, 16); - * [register]:2 tmp3 = sext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 1, 1, 8); - simd_address_at(tmp3, TMPQ1, 1, 2, 16); - * [register]:2 tmp3 = sext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 2, 1, 8); - simd_address_at(tmp3, TMPQ1, 2, 2, 16); - * [register]:2 tmp3 = sext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 3, 1, 8); - simd_address_at(tmp3, TMPQ1, 3, 2, 16); - * [register]:2 tmp3 = sext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 4, 1, 8); - simd_address_at(tmp3, TMPQ1, 4, 2, 16); - * [register]:2 tmp3 = sext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 5, 1, 8); - simd_address_at(tmp3, TMPQ1, 5, 2, 16); - * [register]:2 tmp3 = sext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 6, 1, 8); - simd_address_at(tmp3, TMPQ1, 6, 2, 16); - * [register]:2 tmp3 = sext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 7, 1, 8); - simd_address_at(tmp3, TMPQ1, 7, 2, 16); - * [register]:2 tmp3 = sext(* [register]:1 tmp2); - local tmp4:2 = Imm_uimm3; - # simd infix Rd_VPR128.8H = TMPQ1 << tmp4 on lane size 2 - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, TMPQ1, 0, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 0, 2, 16); - * [register]:2 tmp6 = (* [register]:2 tmp5) << tmp4; - simd_address_at(tmp5, TMPQ1, 1, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 1, 2, 16); - * [register]:2 tmp6 = (* [register]:2 tmp5) << tmp4; - simd_address_at(tmp5, TMPQ1, 2, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 2, 2, 16); - * [register]:2 tmp6 = (* [register]:2 tmp5) << tmp4; - simd_address_at(tmp5, TMPQ1, 3, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 3, 2, 16); - * [register]:2 tmp6 = (* [register]:2 tmp5) << tmp4; - simd_address_at(tmp5, TMPQ1, 4, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 4, 2, 16); - * [register]:2 tmp6 = (* [register]:2 tmp5) << tmp4; - simd_address_at(tmp5, TMPQ1, 5, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 5, 2, 16); - * [register]:2 tmp6 = (* [register]:2 tmp5) << tmp4; - simd_address_at(tmp5, TMPQ1, 6, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 6, 2, 16); - * [register]:2 tmp6 = (* [register]:2 tmp5) << tmp4; - simd_address_at(tmp5, TMPQ1, 7, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 7, 2, 16); - * [register]:2 tmp6 = (* [register]:2 tmp5) << tmp4; - zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_SEXT(Rn_VPR64.8B, 1:1); + TMPQ1[0,16] = sext(Rn_VPR64.8B[0,8]); + TMPQ1[16,16] = sext(Rn_VPR64.8B[8,8]); + TMPQ1[32,16] = sext(Rn_VPR64.8B[16,8]); + TMPQ1[48,16] = sext(Rn_VPR64.8B[24,8]); + TMPQ1[64,16] = sext(Rn_VPR64.8B[32,8]); + TMPQ1[80,16] = sext(Rn_VPR64.8B[40,8]); + TMPQ1[96,16] = sext(Rn_VPR64.8B[48,8]); + TMPQ1[112,16] = sext(Rn_VPR64.8B[56,8]); local tmp2:2 = Imm_uimm3; - local tmpd:16 = SIMD_INT_LEFT(tmp1, tmp2, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.8H -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.8H = NEON_sshll(Rn_VPR64.8B, Imm_uimm3:1, 1:1); -@endif + # simd infix Rd_VPR128.8H = TMPQ1 << tmp2 on lane size 2 + Rd_VPR128.8H[0,16] = TMPQ1[0,16] << tmp2; + Rd_VPR128.8H[16,16] = TMPQ1[16,16] << tmp2; + Rd_VPR128.8H[32,16] = TMPQ1[32,16] << tmp2; + Rd_VPR128.8H[48,16] = TMPQ1[48,16] << tmp2; + Rd_VPR128.8H[64,16] = TMPQ1[64,16] << tmp2; + Rd_VPR128.8H[80,16] = TMPQ1[80,16] << tmp2; + Rd_VPR128.8H[96,16] = TMPQ1[96,16] << tmp2; + Rd_VPR128.8H[112,16] = TMPQ1[112,16] << tmp2; + zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.316 SSHLL, SSHLL2 page C7-2104 line 118053 MATCH x0f00a400/mask=xbf80fc00 @@ -37460,51 +20825,19 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_1923=0x1 & Imm_uimm3 & b_1115=0x14 & b_ :sshll2 Rd_VPR128.4S, Rn_VPR128.8H, Imm_uimm4 is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2023=0x1 & Imm_uimm4 & b_1115=0x14 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rn_VPR128.8H, 1, 8, 16); - TMPD1 = * [register]:8 tmp1; + TMPD1 = Rn_VPR128.8H[64,64]; # simd resize TMPQ2 = sext(TMPD1) (lane size 2 to 4) - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, TMPD1, 0, 2, 8); - simd_address_at(tmp5, TMPQ2, 0, 4, 16); - * [register]:4 tmp5 = sext(* [register]:2 tmp4); - simd_address_at(tmp4, TMPD1, 1, 2, 8); - simd_address_at(tmp5, TMPQ2, 1, 4, 16); - * [register]:4 tmp5 = sext(* [register]:2 tmp4); - simd_address_at(tmp4, TMPD1, 2, 2, 8); - simd_address_at(tmp5, TMPQ2, 2, 4, 16); - * [register]:4 tmp5 = sext(* [register]:2 tmp4); - simd_address_at(tmp4, TMPD1, 3, 2, 8); - simd_address_at(tmp5, TMPQ2, 3, 4, 16); - * [register]:4 tmp5 = sext(* [register]:2 tmp4); - local tmp6:4 = Imm_uimm4; - # simd infix Rd_VPR128.4S = TMPQ2 << tmp6 on lane size 4 - local tmp7:4 = 0; - local tmp8:4 = 0; - simd_address_at(tmp7, TMPQ2, 0, 4, 16); - simd_address_at(tmp8, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp8 = (* [register]:4 tmp7) << tmp6; - simd_address_at(tmp7, TMPQ2, 1, 4, 16); - simd_address_at(tmp8, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp8 = (* [register]:4 tmp7) << tmp6; - simd_address_at(tmp7, TMPQ2, 2, 4, 16); - simd_address_at(tmp8, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp8 = (* [register]:4 tmp7) << tmp6; - simd_address_at(tmp7, TMPQ2, 3, 4, 16); - simd_address_at(tmp8, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp8 = (* [register]:4 tmp7) << tmp6; - zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Rn_VPR128.8H, 1:1); - local tmp2:16 = SIMD_INT_SEXT(tmp1, 2:1); + TMPQ2[0,32] = sext(TMPD1[0,16]); + TMPQ2[32,32] = sext(TMPD1[16,16]); + TMPQ2[64,32] = sext(TMPD1[32,16]); + TMPQ2[96,32] = sext(TMPD1[48,16]); local tmp3:4 = Imm_uimm4; - local tmpd:16 = SIMD_INT_LEFT(tmp2, tmp3, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_sshll2(Rn_VPR128.8H, Imm_uimm4:1, 2:1); -@endif + # simd infix Rd_VPR128.4S = TMPQ2 << tmp3 on lane size 4 + Rd_VPR128.4S[0,32] = TMPQ2[0,32] << tmp3; + Rd_VPR128.4S[32,32] = TMPQ2[32,32] << tmp3; + Rd_VPR128.4S[64,32] = TMPQ2[64,32] << tmp3; + Rd_VPR128.4S[96,32] = TMPQ2[96,32] << tmp3; + zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.317 SSHR page C7-2106 line 118183 MATCH x5f000400/mask=xff80fc00 @@ -37515,9 +20848,7 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2023=0x1 & Imm_uimm4 & b_1115=0x14 & b_ :sshr Rd_FPR64, Rn_FPR64, Imm_shr_imm64 is b_3031=1 & u=0 & b_2428=0x1f & b_2223=0b01 & Imm_shr_imm64 & b_1115=0x0 & b_1010=1 & Rn_FPR64 & Rd_FPR64 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR64 = NEON_sshr(Rn_FPR64, Imm_shr_imm64:1); -@endif } # C7.2.317 SSHR page C7-2106 line 118183 MATCH x0f000400/mask=xbf80fc00 @@ -37529,65 +20860,24 @@ is b_3031=1 & u=0 & b_2428=0x1f & b_2223=0b01 & Imm_shr_imm64 & b_1115=0x0 & b_1 :sshr Rd_VPR128.16B, Rn_VPR128.16B, Imm_shr_imm8 is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x0 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { -@if defined(SEMANTIC_primitive) # simd infix Rd_VPR128.16B = Rn_VPR128.16B s>> Imm_shr_imm8:1 on lane size 1 - local tmp1:4 = 0; - local tmp2:4 = 0; - simd_address_at(tmp1, Rn_VPR128.16B, 0, 1, 16); - simd_address_at(tmp2, Rd_VPR128.16B, 0, 1, 16); - * [register]:1 tmp2 = (* [register]:1 tmp1) s>> Imm_shr_imm8:1; - simd_address_at(tmp1, Rn_VPR128.16B, 1, 1, 16); - simd_address_at(tmp2, Rd_VPR128.16B, 1, 1, 16); - * [register]:1 tmp2 = (* [register]:1 tmp1) s>> Imm_shr_imm8:1; - simd_address_at(tmp1, Rn_VPR128.16B, 2, 1, 16); - simd_address_at(tmp2, Rd_VPR128.16B, 2, 1, 16); - * [register]:1 tmp2 = (* [register]:1 tmp1) s>> Imm_shr_imm8:1; - simd_address_at(tmp1, Rn_VPR128.16B, 3, 1, 16); - simd_address_at(tmp2, Rd_VPR128.16B, 3, 1, 16); - * [register]:1 tmp2 = (* [register]:1 tmp1) s>> Imm_shr_imm8:1; - simd_address_at(tmp1, Rn_VPR128.16B, 4, 1, 16); - simd_address_at(tmp2, Rd_VPR128.16B, 4, 1, 16); - * [register]:1 tmp2 = (* [register]:1 tmp1) s>> Imm_shr_imm8:1; - simd_address_at(tmp1, Rn_VPR128.16B, 5, 1, 16); - simd_address_at(tmp2, Rd_VPR128.16B, 5, 1, 16); - * [register]:1 tmp2 = (* [register]:1 tmp1) s>> Imm_shr_imm8:1; - simd_address_at(tmp1, Rn_VPR128.16B, 6, 1, 16); - simd_address_at(tmp2, Rd_VPR128.16B, 6, 1, 16); - * [register]:1 tmp2 = (* [register]:1 tmp1) s>> Imm_shr_imm8:1; - simd_address_at(tmp1, Rn_VPR128.16B, 7, 1, 16); - simd_address_at(tmp2, Rd_VPR128.16B, 7, 1, 16); - * [register]:1 tmp2 = (* [register]:1 tmp1) s>> Imm_shr_imm8:1; - simd_address_at(tmp1, Rn_VPR128.16B, 8, 1, 16); - simd_address_at(tmp2, Rd_VPR128.16B, 8, 1, 16); - * [register]:1 tmp2 = (* [register]:1 tmp1) s>> Imm_shr_imm8:1; - simd_address_at(tmp1, Rn_VPR128.16B, 9, 1, 16); - simd_address_at(tmp2, Rd_VPR128.16B, 9, 1, 16); - * [register]:1 tmp2 = (* [register]:1 tmp1) s>> Imm_shr_imm8:1; - simd_address_at(tmp1, Rn_VPR128.16B, 10, 1, 16); - simd_address_at(tmp2, Rd_VPR128.16B, 10, 1, 16); - * [register]:1 tmp2 = (* [register]:1 tmp1) s>> Imm_shr_imm8:1; - simd_address_at(tmp1, Rn_VPR128.16B, 11, 1, 16); - simd_address_at(tmp2, Rd_VPR128.16B, 11, 1, 16); - * [register]:1 tmp2 = (* [register]:1 tmp1) s>> Imm_shr_imm8:1; - simd_address_at(tmp1, Rn_VPR128.16B, 12, 1, 16); - simd_address_at(tmp2, Rd_VPR128.16B, 12, 1, 16); - * [register]:1 tmp2 = (* [register]:1 tmp1) s>> Imm_shr_imm8:1; - simd_address_at(tmp1, Rn_VPR128.16B, 13, 1, 16); - simd_address_at(tmp2, Rd_VPR128.16B, 13, 1, 16); - * [register]:1 tmp2 = (* [register]:1 tmp1) s>> Imm_shr_imm8:1; - simd_address_at(tmp1, Rn_VPR128.16B, 14, 1, 16); - simd_address_at(tmp2, Rd_VPR128.16B, 14, 1, 16); - * [register]:1 tmp2 = (* [register]:1 tmp1) s>> Imm_shr_imm8:1; - simd_address_at(tmp1, Rn_VPR128.16B, 15, 1, 16); - simd_address_at(tmp2, Rd_VPR128.16B, 15, 1, 16); - * [register]:1 tmp2 = (* [register]:1 tmp1) s>> Imm_shr_imm8:1; + Rd_VPR128.16B[0,8] = Rn_VPR128.16B[0,8] s>> Imm_shr_imm8:1; + Rd_VPR128.16B[8,8] = Rn_VPR128.16B[8,8] s>> Imm_shr_imm8:1; + Rd_VPR128.16B[16,8] = Rn_VPR128.16B[16,8] s>> Imm_shr_imm8:1; + Rd_VPR128.16B[24,8] = Rn_VPR128.16B[24,8] s>> Imm_shr_imm8:1; + Rd_VPR128.16B[32,8] = Rn_VPR128.16B[32,8] s>> Imm_shr_imm8:1; + Rd_VPR128.16B[40,8] = Rn_VPR128.16B[40,8] s>> Imm_shr_imm8:1; + Rd_VPR128.16B[48,8] = Rn_VPR128.16B[48,8] s>> Imm_shr_imm8:1; + Rd_VPR128.16B[56,8] = Rn_VPR128.16B[56,8] s>> Imm_shr_imm8:1; + Rd_VPR128.16B[64,8] = Rn_VPR128.16B[64,8] s>> Imm_shr_imm8:1; + Rd_VPR128.16B[72,8] = Rn_VPR128.16B[72,8] s>> Imm_shr_imm8:1; + Rd_VPR128.16B[80,8] = Rn_VPR128.16B[80,8] s>> Imm_shr_imm8:1; + Rd_VPR128.16B[88,8] = Rn_VPR128.16B[88,8] s>> Imm_shr_imm8:1; + Rd_VPR128.16B[96,8] = Rn_VPR128.16B[96,8] s>> Imm_shr_imm8:1; + Rd_VPR128.16B[104,8] = Rn_VPR128.16B[104,8] s>> Imm_shr_imm8:1; + Rd_VPR128.16B[112,8] = Rn_VPR128.16B[112,8] s>> Imm_shr_imm8:1; + Rd_VPR128.16B[120,8] = Rn_VPR128.16B[120,8] s>> Imm_shr_imm8:1; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:16 = SIMD_INT_SRIGHT(Rn_VPR128.16B, Imm_shr_imm8:1, 1:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.16B -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.16B = NEON_sshr(Rn_VPR128.16B, Imm_shr_imm8:1, 1:1); -@endif } # C7.2.317 SSHR page C7-2106 line 118183 MATCH x0f000400/mask=xbf80fc00 @@ -37599,25 +20889,11 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x0 & :sshr Rd_VPR128.2D, Rn_VPR128.2D, Imm_shr_imm64 is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2223=0b01 & Imm_shr_imm64 & b_1115=0x0 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_primitive) local tmp1:8 = zext(Imm_shr_imm64); # simd infix Rd_VPR128.2D = Rn_VPR128.2D s>> tmp1 on lane size 8 - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR128.2D, 0, 8, 16); - simd_address_at(tmp3, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp3 = (* [register]:8 tmp2) s>> tmp1; - simd_address_at(tmp2, Rn_VPR128.2D, 1, 8, 16); - simd_address_at(tmp3, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp3 = (* [register]:8 tmp2) s>> tmp1; + Rd_VPR128.2D[0,64] = Rn_VPR128.2D[0,64] s>> tmp1; + Rd_VPR128.2D[64,64] = Rn_VPR128.2D[64,64] s>> tmp1; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = zext(Imm_shr_imm64); - local tmpd:16 = SIMD_INT_SRIGHT(Rn_VPR128.2D, tmp1, 8:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.2D = NEON_sshr(Rn_VPR128.2D, Imm_shr_imm64:1, 8:1); -@endif } # C7.2.317 SSHR page C7-2106 line 118183 MATCH x0f000400/mask=xbf80fc00 @@ -37629,25 +20905,11 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2223=0b01 & Imm_shr_imm64 & b_1115=0x0 :sshr Rd_VPR64.2S, Rn_VPR64.2S, Imm_shr_imm32 is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x0 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_primitive) local tmp1:4 = Imm_shr_imm32; # simd infix Rd_VPR64.2S = Rn_VPR64.2S s>> tmp1 on lane size 4 - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR64.2S, 0, 4, 8); - simd_address_at(tmp3, Rd_VPR64.2S, 0, 4, 8); - * [register]:4 tmp3 = (* [register]:4 tmp2) s>> tmp1; - simd_address_at(tmp2, Rn_VPR64.2S, 1, 4, 8); - simd_address_at(tmp3, Rd_VPR64.2S, 1, 4, 8); - * [register]:4 tmp3 = (* [register]:4 tmp2) s>> tmp1; + Rd_VPR64.2S[0,32] = Rn_VPR64.2S[0,32] s>> tmp1; + Rd_VPR64.2S[32,32] = Rn_VPR64.2S[32,32] s>> tmp1; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:4 = Imm_shr_imm32; - local tmpd:8 = SIMD_INT_SRIGHT(Rn_VPR64.2S, tmp1, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.2S -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.2S = NEON_sshr(Rn_VPR64.2S, Imm_shr_imm32:1, 4:1); -@endif } # C7.2.317 SSHR page C7-2106 line 118183 MATCH x0f000400/mask=xbf80fc00 @@ -37659,29 +20921,12 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x0 & b :sshr Rd_VPR64.4H, Rn_VPR64.4H, Imm_shr_imm16 is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x0 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { -@if defined(SEMANTIC_primitive) # simd infix Rd_VPR64.4H = Rn_VPR64.4H s>> Imm_shr_imm16:2 on lane size 2 - local tmp1:4 = 0; - local tmp2:4 = 0; - simd_address_at(tmp1, Rn_VPR64.4H, 0, 2, 8); - simd_address_at(tmp2, Rd_VPR64.4H, 0, 2, 8); - * [register]:2 tmp2 = (* [register]:2 tmp1) s>> Imm_shr_imm16:2; - simd_address_at(tmp1, Rn_VPR64.4H, 1, 2, 8); - simd_address_at(tmp2, Rd_VPR64.4H, 1, 2, 8); - * [register]:2 tmp2 = (* [register]:2 tmp1) s>> Imm_shr_imm16:2; - simd_address_at(tmp1, Rn_VPR64.4H, 2, 2, 8); - simd_address_at(tmp2, Rd_VPR64.4H, 2, 2, 8); - * [register]:2 tmp2 = (* [register]:2 tmp1) s>> Imm_shr_imm16:2; - simd_address_at(tmp1, Rn_VPR64.4H, 3, 2, 8); - simd_address_at(tmp2, Rd_VPR64.4H, 3, 2, 8); - * [register]:2 tmp2 = (* [register]:2 tmp1) s>> Imm_shr_imm16:2; + Rd_VPR64.4H[0,16] = Rn_VPR64.4H[0,16] s>> Imm_shr_imm16:2; + Rd_VPR64.4H[16,16] = Rn_VPR64.4H[16,16] s>> Imm_shr_imm16:2; + Rd_VPR64.4H[32,16] = Rn_VPR64.4H[32,16] s>> Imm_shr_imm16:2; + Rd_VPR64.4H[48,16] = Rn_VPR64.4H[48,16] s>> Imm_shr_imm16:2; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:8 = SIMD_INT_SRIGHT(Rn_VPR64.4H, Imm_shr_imm16:2, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.4H -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.4H = NEON_sshr(Rn_VPR64.4H, Imm_shr_imm16:1, 2:1); -@endif } # C7.2.317 SSHR page C7-2106 line 118183 MATCH x0f000400/mask=xbf80fc00 @@ -37693,31 +20938,13 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x0 & :sshr Rd_VPR128.4S, Rn_VPR128.4S, Imm_shr_imm32 is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x0 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) local tmp1:4 = Imm_shr_imm32; # simd infix Rd_VPR128.4S = Rn_VPR128.4S s>> tmp1 on lane size 4 - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR128.4S, 0, 4, 16); - simd_address_at(tmp3, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp3 = (* [register]:4 tmp2) s>> tmp1; - simd_address_at(tmp2, Rn_VPR128.4S, 1, 4, 16); - simd_address_at(tmp3, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp3 = (* [register]:4 tmp2) s>> tmp1; - simd_address_at(tmp2, Rn_VPR128.4S, 2, 4, 16); - simd_address_at(tmp3, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp3 = (* [register]:4 tmp2) s>> tmp1; - simd_address_at(tmp2, Rn_VPR128.4S, 3, 4, 16); - simd_address_at(tmp3, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp3 = (* [register]:4 tmp2) s>> tmp1; + Rd_VPR128.4S[0,32] = Rn_VPR128.4S[0,32] s>> tmp1; + Rd_VPR128.4S[32,32] = Rn_VPR128.4S[32,32] s>> tmp1; + Rd_VPR128.4S[64,32] = Rn_VPR128.4S[64,32] s>> tmp1; + Rd_VPR128.4S[96,32] = Rn_VPR128.4S[96,32] s>> tmp1; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:4 = Imm_shr_imm32; - local tmpd:16 = SIMD_INT_SRIGHT(Rn_VPR128.4S, tmp1, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_sshr(Rn_VPR128.4S, Imm_shr_imm32:1, 4:1); -@endif } # C7.2.317 SSHR page C7-2106 line 118183 MATCH x0f000400/mask=xbf80fc00 @@ -37729,41 +20956,16 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x0 & b :sshr Rd_VPR64.8B, Rn_VPR64.8B, Imm_shr_imm8 is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x0 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { -@if defined(SEMANTIC_primitive) # simd infix Rd_VPR64.8B = Rn_VPR64.8B s>> Imm_shr_imm8:1 on lane size 1 - local tmp1:4 = 0; - local tmp2:4 = 0; - simd_address_at(tmp1, Rn_VPR64.8B, 0, 1, 8); - simd_address_at(tmp2, Rd_VPR64.8B, 0, 1, 8); - * [register]:1 tmp2 = (* [register]:1 tmp1) s>> Imm_shr_imm8:1; - simd_address_at(tmp1, Rn_VPR64.8B, 1, 1, 8); - simd_address_at(tmp2, Rd_VPR64.8B, 1, 1, 8); - * [register]:1 tmp2 = (* [register]:1 tmp1) s>> Imm_shr_imm8:1; - simd_address_at(tmp1, Rn_VPR64.8B, 2, 1, 8); - simd_address_at(tmp2, Rd_VPR64.8B, 2, 1, 8); - * [register]:1 tmp2 = (* [register]:1 tmp1) s>> Imm_shr_imm8:1; - simd_address_at(tmp1, Rn_VPR64.8B, 3, 1, 8); - simd_address_at(tmp2, Rd_VPR64.8B, 3, 1, 8); - * [register]:1 tmp2 = (* [register]:1 tmp1) s>> Imm_shr_imm8:1; - simd_address_at(tmp1, Rn_VPR64.8B, 4, 1, 8); - simd_address_at(tmp2, Rd_VPR64.8B, 4, 1, 8); - * [register]:1 tmp2 = (* [register]:1 tmp1) s>> Imm_shr_imm8:1; - simd_address_at(tmp1, Rn_VPR64.8B, 5, 1, 8); - simd_address_at(tmp2, Rd_VPR64.8B, 5, 1, 8); - * [register]:1 tmp2 = (* [register]:1 tmp1) s>> Imm_shr_imm8:1; - simd_address_at(tmp1, Rn_VPR64.8B, 6, 1, 8); - simd_address_at(tmp2, Rd_VPR64.8B, 6, 1, 8); - * [register]:1 tmp2 = (* [register]:1 tmp1) s>> Imm_shr_imm8:1; - simd_address_at(tmp1, Rn_VPR64.8B, 7, 1, 8); - simd_address_at(tmp2, Rd_VPR64.8B, 7, 1, 8); - * [register]:1 tmp2 = (* [register]:1 tmp1) s>> Imm_shr_imm8:1; + Rd_VPR64.8B[0,8] = Rn_VPR64.8B[0,8] s>> Imm_shr_imm8:1; + Rd_VPR64.8B[8,8] = Rn_VPR64.8B[8,8] s>> Imm_shr_imm8:1; + Rd_VPR64.8B[16,8] = Rn_VPR64.8B[16,8] s>> Imm_shr_imm8:1; + Rd_VPR64.8B[24,8] = Rn_VPR64.8B[24,8] s>> Imm_shr_imm8:1; + Rd_VPR64.8B[32,8] = Rn_VPR64.8B[32,8] s>> Imm_shr_imm8:1; + Rd_VPR64.8B[40,8] = Rn_VPR64.8B[40,8] s>> Imm_shr_imm8:1; + Rd_VPR64.8B[48,8] = Rn_VPR64.8B[48,8] s>> Imm_shr_imm8:1; + Rd_VPR64.8B[56,8] = Rn_VPR64.8B[56,8] s>> Imm_shr_imm8:1; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:8 = SIMD_INT_SRIGHT(Rn_VPR64.8B, Imm_shr_imm8:1, 1:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.8B -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.8B = NEON_sshr(Rn_VPR64.8B, Imm_shr_imm8:1, 1:1); -@endif } # C7.2.317 SSHR page C7-2106 line 118183 MATCH x0f000400/mask=xbf80fc00 @@ -37775,41 +20977,16 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x0 & :sshr Rd_VPR128.8H, Rn_VPR128.8H, Imm_shr_imm16 is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x0 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_primitive) # simd infix Rd_VPR128.8H = Rn_VPR128.8H s>> Imm_shr_imm16:2 on lane size 2 - local tmp1:4 = 0; - local tmp2:4 = 0; - simd_address_at(tmp1, Rn_VPR128.8H, 0, 2, 16); - simd_address_at(tmp2, Rd_VPR128.8H, 0, 2, 16); - * [register]:2 tmp2 = (* [register]:2 tmp1) s>> Imm_shr_imm16:2; - simd_address_at(tmp1, Rn_VPR128.8H, 1, 2, 16); - simd_address_at(tmp2, Rd_VPR128.8H, 1, 2, 16); - * [register]:2 tmp2 = (* [register]:2 tmp1) s>> Imm_shr_imm16:2; - simd_address_at(tmp1, Rn_VPR128.8H, 2, 2, 16); - simd_address_at(tmp2, Rd_VPR128.8H, 2, 2, 16); - * [register]:2 tmp2 = (* [register]:2 tmp1) s>> Imm_shr_imm16:2; - simd_address_at(tmp1, Rn_VPR128.8H, 3, 2, 16); - simd_address_at(tmp2, Rd_VPR128.8H, 3, 2, 16); - * [register]:2 tmp2 = (* [register]:2 tmp1) s>> Imm_shr_imm16:2; - simd_address_at(tmp1, Rn_VPR128.8H, 4, 2, 16); - simd_address_at(tmp2, Rd_VPR128.8H, 4, 2, 16); - * [register]:2 tmp2 = (* [register]:2 tmp1) s>> Imm_shr_imm16:2; - simd_address_at(tmp1, Rn_VPR128.8H, 5, 2, 16); - simd_address_at(tmp2, Rd_VPR128.8H, 5, 2, 16); - * [register]:2 tmp2 = (* [register]:2 tmp1) s>> Imm_shr_imm16:2; - simd_address_at(tmp1, Rn_VPR128.8H, 6, 2, 16); - simd_address_at(tmp2, Rd_VPR128.8H, 6, 2, 16); - * [register]:2 tmp2 = (* [register]:2 tmp1) s>> Imm_shr_imm16:2; - simd_address_at(tmp1, Rn_VPR128.8H, 7, 2, 16); - simd_address_at(tmp2, Rd_VPR128.8H, 7, 2, 16); - * [register]:2 tmp2 = (* [register]:2 tmp1) s>> Imm_shr_imm16:2; + Rd_VPR128.8H[0,16] = Rn_VPR128.8H[0,16] s>> Imm_shr_imm16:2; + Rd_VPR128.8H[16,16] = Rn_VPR128.8H[16,16] s>> Imm_shr_imm16:2; + Rd_VPR128.8H[32,16] = Rn_VPR128.8H[32,16] s>> Imm_shr_imm16:2; + Rd_VPR128.8H[48,16] = Rn_VPR128.8H[48,16] s>> Imm_shr_imm16:2; + Rd_VPR128.8H[64,16] = Rn_VPR128.8H[64,16] s>> Imm_shr_imm16:2; + Rd_VPR128.8H[80,16] = Rn_VPR128.8H[80,16] s>> Imm_shr_imm16:2; + Rd_VPR128.8H[96,16] = Rn_VPR128.8H[96,16] s>> Imm_shr_imm16:2; + Rd_VPR128.8H[112,16] = Rn_VPR128.8H[112,16] s>> Imm_shr_imm16:2; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:16 = SIMD_INT_SRIGHT(Rn_VPR128.8H, Imm_shr_imm16:2, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.8H -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.8H = NEON_sshr(Rn_VPR128.8H, Imm_shr_imm16:1, 2:1); -@endif } # C7.2.318 SSRA page C7-2109 line 118340 MATCH x0f001400/mask=xbf80fc00 @@ -37821,134 +20998,41 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x0 & :ssra Rd_VPR128.16B, Rn_VPR128.16B, Imm_shr_imm8 is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x2 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { -@if defined(SEMANTIC_primitive) # simd infix TMPQ1 = Rn_VPR128.16B s>> Imm_shr_imm8:1 on lane size 1 - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR128.16B, 0, 1, 16); - simd_address_at(tmp3, TMPQ1, 0, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp2) s>> Imm_shr_imm8:1; - simd_address_at(tmp2, Rn_VPR128.16B, 1, 1, 16); - simd_address_at(tmp3, TMPQ1, 1, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp2) s>> Imm_shr_imm8:1; - simd_address_at(tmp2, Rn_VPR128.16B, 2, 1, 16); - simd_address_at(tmp3, TMPQ1, 2, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp2) s>> Imm_shr_imm8:1; - simd_address_at(tmp2, Rn_VPR128.16B, 3, 1, 16); - simd_address_at(tmp3, TMPQ1, 3, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp2) s>> Imm_shr_imm8:1; - simd_address_at(tmp2, Rn_VPR128.16B, 4, 1, 16); - simd_address_at(tmp3, TMPQ1, 4, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp2) s>> Imm_shr_imm8:1; - simd_address_at(tmp2, Rn_VPR128.16B, 5, 1, 16); - simd_address_at(tmp3, TMPQ1, 5, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp2) s>> Imm_shr_imm8:1; - simd_address_at(tmp2, Rn_VPR128.16B, 6, 1, 16); - simd_address_at(tmp3, TMPQ1, 6, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp2) s>> Imm_shr_imm8:1; - simd_address_at(tmp2, Rn_VPR128.16B, 7, 1, 16); - simd_address_at(tmp3, TMPQ1, 7, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp2) s>> Imm_shr_imm8:1; - simd_address_at(tmp2, Rn_VPR128.16B, 8, 1, 16); - simd_address_at(tmp3, TMPQ1, 8, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp2) s>> Imm_shr_imm8:1; - simd_address_at(tmp2, Rn_VPR128.16B, 9, 1, 16); - simd_address_at(tmp3, TMPQ1, 9, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp2) s>> Imm_shr_imm8:1; - simd_address_at(tmp2, Rn_VPR128.16B, 10, 1, 16); - simd_address_at(tmp3, TMPQ1, 10, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp2) s>> Imm_shr_imm8:1; - simd_address_at(tmp2, Rn_VPR128.16B, 11, 1, 16); - simd_address_at(tmp3, TMPQ1, 11, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp2) s>> Imm_shr_imm8:1; - simd_address_at(tmp2, Rn_VPR128.16B, 12, 1, 16); - simd_address_at(tmp3, TMPQ1, 12, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp2) s>> Imm_shr_imm8:1; - simd_address_at(tmp2, Rn_VPR128.16B, 13, 1, 16); - simd_address_at(tmp3, TMPQ1, 13, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp2) s>> Imm_shr_imm8:1; - simd_address_at(tmp2, Rn_VPR128.16B, 14, 1, 16); - simd_address_at(tmp3, TMPQ1, 14, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp2) s>> Imm_shr_imm8:1; - simd_address_at(tmp2, Rn_VPR128.16B, 15, 1, 16); - simd_address_at(tmp3, TMPQ1, 15, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp2) s>> Imm_shr_imm8:1; + TMPQ1[0,8] = Rn_VPR128.16B[0,8] s>> Imm_shr_imm8:1; + TMPQ1[8,8] = Rn_VPR128.16B[8,8] s>> Imm_shr_imm8:1; + TMPQ1[16,8] = Rn_VPR128.16B[16,8] s>> Imm_shr_imm8:1; + TMPQ1[24,8] = Rn_VPR128.16B[24,8] s>> Imm_shr_imm8:1; + TMPQ1[32,8] = Rn_VPR128.16B[32,8] s>> Imm_shr_imm8:1; + TMPQ1[40,8] = Rn_VPR128.16B[40,8] s>> Imm_shr_imm8:1; + TMPQ1[48,8] = Rn_VPR128.16B[48,8] s>> Imm_shr_imm8:1; + TMPQ1[56,8] = Rn_VPR128.16B[56,8] s>> Imm_shr_imm8:1; + TMPQ1[64,8] = Rn_VPR128.16B[64,8] s>> Imm_shr_imm8:1; + TMPQ1[72,8] = Rn_VPR128.16B[72,8] s>> Imm_shr_imm8:1; + TMPQ1[80,8] = Rn_VPR128.16B[80,8] s>> Imm_shr_imm8:1; + TMPQ1[88,8] = Rn_VPR128.16B[88,8] s>> Imm_shr_imm8:1; + TMPQ1[96,8] = Rn_VPR128.16B[96,8] s>> Imm_shr_imm8:1; + TMPQ1[104,8] = Rn_VPR128.16B[104,8] s>> Imm_shr_imm8:1; + TMPQ1[112,8] = Rn_VPR128.16B[112,8] s>> Imm_shr_imm8:1; + TMPQ1[120,8] = Rn_VPR128.16B[120,8] s>> Imm_shr_imm8:1; # simd infix Rd_VPR128.16B = Rd_VPR128.16B + TMPQ1 on lane size 1 - local tmp4:4 = 0; - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp4, Rd_VPR128.16B, 0, 1, 16); - simd_address_at(tmp5, TMPQ1, 0, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 0, 1, 16); - * [register]:1 tmp6 = (* [register]:1 tmp4) + (* [register]:1 tmp5); - simd_address_at(tmp4, Rd_VPR128.16B, 1, 1, 16); - simd_address_at(tmp5, TMPQ1, 1, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 1, 1, 16); - * [register]:1 tmp6 = (* [register]:1 tmp4) + (* [register]:1 tmp5); - simd_address_at(tmp4, Rd_VPR128.16B, 2, 1, 16); - simd_address_at(tmp5, TMPQ1, 2, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 2, 1, 16); - * [register]:1 tmp6 = (* [register]:1 tmp4) + (* [register]:1 tmp5); - simd_address_at(tmp4, Rd_VPR128.16B, 3, 1, 16); - simd_address_at(tmp5, TMPQ1, 3, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 3, 1, 16); - * [register]:1 tmp6 = (* [register]:1 tmp4) + (* [register]:1 tmp5); - simd_address_at(tmp4, Rd_VPR128.16B, 4, 1, 16); - simd_address_at(tmp5, TMPQ1, 4, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 4, 1, 16); - * [register]:1 tmp6 = (* [register]:1 tmp4) + (* [register]:1 tmp5); - simd_address_at(tmp4, Rd_VPR128.16B, 5, 1, 16); - simd_address_at(tmp5, TMPQ1, 5, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 5, 1, 16); - * [register]:1 tmp6 = (* [register]:1 tmp4) + (* [register]:1 tmp5); - simd_address_at(tmp4, Rd_VPR128.16B, 6, 1, 16); - simd_address_at(tmp5, TMPQ1, 6, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 6, 1, 16); - * [register]:1 tmp6 = (* [register]:1 tmp4) + (* [register]:1 tmp5); - simd_address_at(tmp4, Rd_VPR128.16B, 7, 1, 16); - simd_address_at(tmp5, TMPQ1, 7, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 7, 1, 16); - * [register]:1 tmp6 = (* [register]:1 tmp4) + (* [register]:1 tmp5); - simd_address_at(tmp4, Rd_VPR128.16B, 8, 1, 16); - simd_address_at(tmp5, TMPQ1, 8, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 8, 1, 16); - * [register]:1 tmp6 = (* [register]:1 tmp4) + (* [register]:1 tmp5); - simd_address_at(tmp4, Rd_VPR128.16B, 9, 1, 16); - simd_address_at(tmp5, TMPQ1, 9, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 9, 1, 16); - * [register]:1 tmp6 = (* [register]:1 tmp4) + (* [register]:1 tmp5); - simd_address_at(tmp4, Rd_VPR128.16B, 10, 1, 16); - simd_address_at(tmp5, TMPQ1, 10, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 10, 1, 16); - * [register]:1 tmp6 = (* [register]:1 tmp4) + (* [register]:1 tmp5); - simd_address_at(tmp4, Rd_VPR128.16B, 11, 1, 16); - simd_address_at(tmp5, TMPQ1, 11, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 11, 1, 16); - * [register]:1 tmp6 = (* [register]:1 tmp4) + (* [register]:1 tmp5); - simd_address_at(tmp4, Rd_VPR128.16B, 12, 1, 16); - simd_address_at(tmp5, TMPQ1, 12, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 12, 1, 16); - * [register]:1 tmp6 = (* [register]:1 tmp4) + (* [register]:1 tmp5); - simd_address_at(tmp4, Rd_VPR128.16B, 13, 1, 16); - simd_address_at(tmp5, TMPQ1, 13, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 13, 1, 16); - * [register]:1 tmp6 = (* [register]:1 tmp4) + (* [register]:1 tmp5); - simd_address_at(tmp4, Rd_VPR128.16B, 14, 1, 16); - simd_address_at(tmp5, TMPQ1, 14, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 14, 1, 16); - * [register]:1 tmp6 = (* [register]:1 tmp4) + (* [register]:1 tmp5); - simd_address_at(tmp4, Rd_VPR128.16B, 15, 1, 16); - simd_address_at(tmp5, TMPQ1, 15, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 15, 1, 16); - * [register]:1 tmp6 = (* [register]:1 tmp4) + (* [register]:1 tmp5); + Rd_VPR128.16B[0,8] = Rd_VPR128.16B[0,8] + TMPQ1[0,8]; + Rd_VPR128.16B[8,8] = Rd_VPR128.16B[8,8] + TMPQ1[8,8]; + Rd_VPR128.16B[16,8] = Rd_VPR128.16B[16,8] + TMPQ1[16,8]; + Rd_VPR128.16B[24,8] = Rd_VPR128.16B[24,8] + TMPQ1[24,8]; + Rd_VPR128.16B[32,8] = Rd_VPR128.16B[32,8] + TMPQ1[32,8]; + Rd_VPR128.16B[40,8] = Rd_VPR128.16B[40,8] + TMPQ1[40,8]; + Rd_VPR128.16B[48,8] = Rd_VPR128.16B[48,8] + TMPQ1[48,8]; + Rd_VPR128.16B[56,8] = Rd_VPR128.16B[56,8] + TMPQ1[56,8]; + Rd_VPR128.16B[64,8] = Rd_VPR128.16B[64,8] + TMPQ1[64,8]; + Rd_VPR128.16B[72,8] = Rd_VPR128.16B[72,8] + TMPQ1[72,8]; + Rd_VPR128.16B[80,8] = Rd_VPR128.16B[80,8] + TMPQ1[80,8]; + Rd_VPR128.16B[88,8] = Rd_VPR128.16B[88,8] + TMPQ1[88,8]; + Rd_VPR128.16B[96,8] = Rd_VPR128.16B[96,8] + TMPQ1[96,8]; + Rd_VPR128.16B[104,8] = Rd_VPR128.16B[104,8] + TMPQ1[104,8]; + Rd_VPR128.16B[112,8] = Rd_VPR128.16B[112,8] + TMPQ1[112,8]; + Rd_VPR128.16B[120,8] = Rd_VPR128.16B[120,8] + TMPQ1[120,8]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_SRIGHT(Rn_VPR128.16B, Imm_shr_imm8:1, 1:1); - local tmpd:16 = SIMD_INT_ADD(Rd_VPR128.16B, tmp1, 1:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.16B -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.16B = NEON_ssra(Rd_VPR128.16B, Rn_VPR128.16B, Imm_shr_imm8:1, 1:1); -@endif } # C7.2.318 SSRA page C7-2109 line 118340 MATCH x0f001400/mask=xbf80fc00 @@ -37960,38 +21044,14 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x2 & :ssra Rd_VPR128.2D, Rn_VPR128.2D, Imm_shr_imm64 is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2223=0b01 & Imm_shr_imm64 & b_1115=0x2 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_primitive) local tmp1:8 = zext(Imm_shr_imm64); # simd infix TMPQ1 = Rn_VPR128.2D s>> tmp1 on lane size 8 - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp3, Rn_VPR128.2D, 0, 8, 16); - simd_address_at(tmp4, TMPQ1, 0, 8, 16); - * [register]:8 tmp4 = (* [register]:8 tmp3) s>> tmp1; - simd_address_at(tmp3, Rn_VPR128.2D, 1, 8, 16); - simd_address_at(tmp4, TMPQ1, 1, 8, 16); - * [register]:8 tmp4 = (* [register]:8 tmp3) s>> tmp1; + TMPQ1[0,64] = Rn_VPR128.2D[0,64] s>> tmp1; + TMPQ1[64,64] = Rn_VPR128.2D[64,64] s>> tmp1; # simd infix Rd_VPR128.2D = Rd_VPR128.2D + TMPQ1 on lane size 8 - local tmp5:4 = 0; - local tmp6:4 = 0; - local tmp7:4 = 0; - simd_address_at(tmp5, Rd_VPR128.2D, 0, 8, 16); - simd_address_at(tmp6, TMPQ1, 0, 8, 16); - simd_address_at(tmp7, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp7 = (* [register]:8 tmp5) + (* [register]:8 tmp6); - simd_address_at(tmp5, Rd_VPR128.2D, 1, 8, 16); - simd_address_at(tmp6, TMPQ1, 1, 8, 16); - simd_address_at(tmp7, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp7 = (* [register]:8 tmp5) + (* [register]:8 tmp6); + Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] + TMPQ1[0,64]; + Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] + TMPQ1[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = zext(Imm_shr_imm64); - local tmp2:16 = SIMD_INT_SRIGHT(Rn_VPR128.2D, tmp1, 8:1); - local tmpd:16 = SIMD_INT_ADD(Rd_VPR128.2D, tmp2, 8:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.2D = NEON_ssra(Rd_VPR128.2D, Rn_VPR128.2D, Imm_shr_imm64:1, 8:1); -@endif } # C7.2.318 SSRA page C7-2109 line 118340 MATCH x0f001400/mask=xbf80fc00 @@ -38003,38 +21063,14 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2223=0b01 & Imm_shr_imm64 & b_1115=0x2 :ssra Rd_VPR64.2S, Rn_VPR64.2S, Imm_shr_imm32 is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x2 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_primitive) local tmp1:4 = Imm_shr_imm32; # simd infix TMPD1 = Rn_VPR64.2S s>> tmp1 on lane size 4 - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp3, Rn_VPR64.2S, 0, 4, 8); - simd_address_at(tmp4, TMPD1, 0, 4, 8); - * [register]:4 tmp4 = (* [register]:4 tmp3) s>> tmp1; - simd_address_at(tmp3, Rn_VPR64.2S, 1, 4, 8); - simd_address_at(tmp4, TMPD1, 1, 4, 8); - * [register]:4 tmp4 = (* [register]:4 tmp3) s>> tmp1; + TMPD1[0,32] = Rn_VPR64.2S[0,32] s>> tmp1; + TMPD1[32,32] = Rn_VPR64.2S[32,32] s>> tmp1; # simd infix Rd_VPR64.2S = Rd_VPR64.2S + TMPD1 on lane size 4 - local tmp5:4 = 0; - local tmp6:4 = 0; - local tmp7:4 = 0; - simd_address_at(tmp5, Rd_VPR64.2S, 0, 4, 8); - simd_address_at(tmp6, TMPD1, 0, 4, 8); - simd_address_at(tmp7, Rd_VPR64.2S, 0, 4, 8); - * [register]:4 tmp7 = (* [register]:4 tmp5) + (* [register]:4 tmp6); - simd_address_at(tmp5, Rd_VPR64.2S, 1, 4, 8); - simd_address_at(tmp6, TMPD1, 1, 4, 8); - simd_address_at(tmp7, Rd_VPR64.2S, 1, 4, 8); - * [register]:4 tmp7 = (* [register]:4 tmp5) + (* [register]:4 tmp6); + Rd_VPR64.2S[0,32] = Rd_VPR64.2S[0,32] + TMPD1[0,32]; + Rd_VPR64.2S[32,32] = Rd_VPR64.2S[32,32] + TMPD1[32,32]; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:4 = Imm_shr_imm32; - local tmp2:8 = SIMD_INT_SRIGHT(Rn_VPR64.2S, tmp1, 4:1); - local tmpd:8 = SIMD_INT_ADD(Rd_VPR64.2S, tmp2, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.2S -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.2S = NEON_ssra(Rd_VPR64.2S, Rn_VPR64.2S, Imm_shr_imm32:1, 4:1); -@endif } # C7.2.318 SSRA page C7-2109 line 118340 MATCH x0f001400/mask=xbf80fc00 @@ -38046,50 +21082,17 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x2 & b :ssra Rd_VPR64.4H, Rn_VPR64.4H, Imm_shr_imm16 is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x2 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { -@if defined(SEMANTIC_primitive) # simd infix TMPD1 = Rn_VPR64.4H s>> Imm_shr_imm16:2 on lane size 2 - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR64.4H, 0, 2, 8); - simd_address_at(tmp3, TMPD1, 0, 2, 8); - * [register]:2 tmp3 = (* [register]:2 tmp2) s>> Imm_shr_imm16:2; - simd_address_at(tmp2, Rn_VPR64.4H, 1, 2, 8); - simd_address_at(tmp3, TMPD1, 1, 2, 8); - * [register]:2 tmp3 = (* [register]:2 tmp2) s>> Imm_shr_imm16:2; - simd_address_at(tmp2, Rn_VPR64.4H, 2, 2, 8); - simd_address_at(tmp3, TMPD1, 2, 2, 8); - * [register]:2 tmp3 = (* [register]:2 tmp2) s>> Imm_shr_imm16:2; - simd_address_at(tmp2, Rn_VPR64.4H, 3, 2, 8); - simd_address_at(tmp3, TMPD1, 3, 2, 8); - * [register]:2 tmp3 = (* [register]:2 tmp2) s>> Imm_shr_imm16:2; + TMPD1[0,16] = Rn_VPR64.4H[0,16] s>> Imm_shr_imm16:2; + TMPD1[16,16] = Rn_VPR64.4H[16,16] s>> Imm_shr_imm16:2; + TMPD1[32,16] = Rn_VPR64.4H[32,16] s>> Imm_shr_imm16:2; + TMPD1[48,16] = Rn_VPR64.4H[48,16] s>> Imm_shr_imm16:2; # simd infix Rd_VPR64.4H = Rd_VPR64.4H + TMPD1 on lane size 2 - local tmp4:4 = 0; - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp4, Rd_VPR64.4H, 0, 2, 8); - simd_address_at(tmp5, TMPD1, 0, 2, 8); - simd_address_at(tmp6, Rd_VPR64.4H, 0, 2, 8); - * [register]:2 tmp6 = (* [register]:2 tmp4) + (* [register]:2 tmp5); - simd_address_at(tmp4, Rd_VPR64.4H, 1, 2, 8); - simd_address_at(tmp5, TMPD1, 1, 2, 8); - simd_address_at(tmp6, Rd_VPR64.4H, 1, 2, 8); - * [register]:2 tmp6 = (* [register]:2 tmp4) + (* [register]:2 tmp5); - simd_address_at(tmp4, Rd_VPR64.4H, 2, 2, 8); - simd_address_at(tmp5, TMPD1, 2, 2, 8); - simd_address_at(tmp6, Rd_VPR64.4H, 2, 2, 8); - * [register]:2 tmp6 = (* [register]:2 tmp4) + (* [register]:2 tmp5); - simd_address_at(tmp4, Rd_VPR64.4H, 3, 2, 8); - simd_address_at(tmp5, TMPD1, 3, 2, 8); - simd_address_at(tmp6, Rd_VPR64.4H, 3, 2, 8); - * [register]:2 tmp6 = (* [register]:2 tmp4) + (* [register]:2 tmp5); + Rd_VPR64.4H[0,16] = Rd_VPR64.4H[0,16] + TMPD1[0,16]; + Rd_VPR64.4H[16,16] = Rd_VPR64.4H[16,16] + TMPD1[16,16]; + Rd_VPR64.4H[32,16] = Rd_VPR64.4H[32,16] + TMPD1[32,16]; + Rd_VPR64.4H[48,16] = Rd_VPR64.4H[48,16] + TMPD1[48,16]; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_INT_SRIGHT(Rn_VPR64.4H, Imm_shr_imm16:2, 2:1); - local tmpd:8 = SIMD_INT_ADD(Rd_VPR64.4H, tmp1, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.4H -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.4H = NEON_ssra(Rd_VPR64.4H, Rn_VPR64.4H, Imm_shr_imm16:1, 2:1); -@endif } # C7.2.318 SSRA page C7-2109 line 118340 MATCH x0f001400/mask=xbf80fc00 @@ -38101,52 +21104,18 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x2 & :ssra Rd_VPR128.4S, Rn_VPR128.4S, Imm_shr_imm32 is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x2 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) local tmp1:4 = Imm_shr_imm32; # simd infix TMPQ1 = Rn_VPR128.4S s>> tmp1 on lane size 4 - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp3, Rn_VPR128.4S, 0, 4, 16); - simd_address_at(tmp4, TMPQ1, 0, 4, 16); - * [register]:4 tmp4 = (* [register]:4 tmp3) s>> tmp1; - simd_address_at(tmp3, Rn_VPR128.4S, 1, 4, 16); - simd_address_at(tmp4, TMPQ1, 1, 4, 16); - * [register]:4 tmp4 = (* [register]:4 tmp3) s>> tmp1; - simd_address_at(tmp3, Rn_VPR128.4S, 2, 4, 16); - simd_address_at(tmp4, TMPQ1, 2, 4, 16); - * [register]:4 tmp4 = (* [register]:4 tmp3) s>> tmp1; - simd_address_at(tmp3, Rn_VPR128.4S, 3, 4, 16); - simd_address_at(tmp4, TMPQ1, 3, 4, 16); - * [register]:4 tmp4 = (* [register]:4 tmp3) s>> tmp1; + TMPQ1[0,32] = Rn_VPR128.4S[0,32] s>> tmp1; + TMPQ1[32,32] = Rn_VPR128.4S[32,32] s>> tmp1; + TMPQ1[64,32] = Rn_VPR128.4S[64,32] s>> tmp1; + TMPQ1[96,32] = Rn_VPR128.4S[96,32] s>> tmp1; # simd infix Rd_VPR128.4S = Rd_VPR128.4S + TMPQ1 on lane size 4 - local tmp5:4 = 0; - local tmp6:4 = 0; - local tmp7:4 = 0; - simd_address_at(tmp5, Rd_VPR128.4S, 0, 4, 16); - simd_address_at(tmp6, TMPQ1, 0, 4, 16); - simd_address_at(tmp7, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp7 = (* [register]:4 tmp5) + (* [register]:4 tmp6); - simd_address_at(tmp5, Rd_VPR128.4S, 1, 4, 16); - simd_address_at(tmp6, TMPQ1, 1, 4, 16); - simd_address_at(tmp7, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp7 = (* [register]:4 tmp5) + (* [register]:4 tmp6); - simd_address_at(tmp5, Rd_VPR128.4S, 2, 4, 16); - simd_address_at(tmp6, TMPQ1, 2, 4, 16); - simd_address_at(tmp7, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp7 = (* [register]:4 tmp5) + (* [register]:4 tmp6); - simd_address_at(tmp5, Rd_VPR128.4S, 3, 4, 16); - simd_address_at(tmp6, TMPQ1, 3, 4, 16); - simd_address_at(tmp7, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp7 = (* [register]:4 tmp5) + (* [register]:4 tmp6); + Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] + TMPQ1[0,32]; + Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] + TMPQ1[32,32]; + Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] + TMPQ1[64,32]; + Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] + TMPQ1[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:4 = Imm_shr_imm32; - local tmp2:16 = SIMD_INT_SRIGHT(Rn_VPR128.4S, tmp1, 4:1); - local tmpd:16 = SIMD_INT_ADD(Rd_VPR128.4S, tmp2, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_ssra(Rd_VPR128.4S, Rn_VPR128.4S, Imm_shr_imm32:1, 4:1); -@endif } # C7.2.318 SSRA page C7-2109 line 118340 MATCH x0f001400/mask=xbf80fc00 @@ -38158,78 +21127,25 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x2 & b :ssra Rd_VPR64.8B, Rn_VPR64.8B, Imm_shr_imm8 is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x2 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { -@if defined(SEMANTIC_primitive) # simd infix TMPD1 = Rn_VPR64.8B s>> Imm_shr_imm8:1 on lane size 1 - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR64.8B, 0, 1, 8); - simd_address_at(tmp3, TMPD1, 0, 1, 8); - * [register]:1 tmp3 = (* [register]:1 tmp2) s>> Imm_shr_imm8:1; - simd_address_at(tmp2, Rn_VPR64.8B, 1, 1, 8); - simd_address_at(tmp3, TMPD1, 1, 1, 8); - * [register]:1 tmp3 = (* [register]:1 tmp2) s>> Imm_shr_imm8:1; - simd_address_at(tmp2, Rn_VPR64.8B, 2, 1, 8); - simd_address_at(tmp3, TMPD1, 2, 1, 8); - * [register]:1 tmp3 = (* [register]:1 tmp2) s>> Imm_shr_imm8:1; - simd_address_at(tmp2, Rn_VPR64.8B, 3, 1, 8); - simd_address_at(tmp3, TMPD1, 3, 1, 8); - * [register]:1 tmp3 = (* [register]:1 tmp2) s>> Imm_shr_imm8:1; - simd_address_at(tmp2, Rn_VPR64.8B, 4, 1, 8); - simd_address_at(tmp3, TMPD1, 4, 1, 8); - * [register]:1 tmp3 = (* [register]:1 tmp2) s>> Imm_shr_imm8:1; - simd_address_at(tmp2, Rn_VPR64.8B, 5, 1, 8); - simd_address_at(tmp3, TMPD1, 5, 1, 8); - * [register]:1 tmp3 = (* [register]:1 tmp2) s>> Imm_shr_imm8:1; - simd_address_at(tmp2, Rn_VPR64.8B, 6, 1, 8); - simd_address_at(tmp3, TMPD1, 6, 1, 8); - * [register]:1 tmp3 = (* [register]:1 tmp2) s>> Imm_shr_imm8:1; - simd_address_at(tmp2, Rn_VPR64.8B, 7, 1, 8); - simd_address_at(tmp3, TMPD1, 7, 1, 8); - * [register]:1 tmp3 = (* [register]:1 tmp2) s>> Imm_shr_imm8:1; + TMPD1[0,8] = Rn_VPR64.8B[0,8] s>> Imm_shr_imm8:1; + TMPD1[8,8] = Rn_VPR64.8B[8,8] s>> Imm_shr_imm8:1; + TMPD1[16,8] = Rn_VPR64.8B[16,8] s>> Imm_shr_imm8:1; + TMPD1[24,8] = Rn_VPR64.8B[24,8] s>> Imm_shr_imm8:1; + TMPD1[32,8] = Rn_VPR64.8B[32,8] s>> Imm_shr_imm8:1; + TMPD1[40,8] = Rn_VPR64.8B[40,8] s>> Imm_shr_imm8:1; + TMPD1[48,8] = Rn_VPR64.8B[48,8] s>> Imm_shr_imm8:1; + TMPD1[56,8] = Rn_VPR64.8B[56,8] s>> Imm_shr_imm8:1; # simd infix Rd_VPR64.8B = Rd_VPR64.8B + TMPD1 on lane size 1 - local tmp4:4 = 0; - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp4, Rd_VPR64.8B, 0, 1, 8); - simd_address_at(tmp5, TMPD1, 0, 1, 8); - simd_address_at(tmp6, Rd_VPR64.8B, 0, 1, 8); - * [register]:1 tmp6 = (* [register]:1 tmp4) + (* [register]:1 tmp5); - simd_address_at(tmp4, Rd_VPR64.8B, 1, 1, 8); - simd_address_at(tmp5, TMPD1, 1, 1, 8); - simd_address_at(tmp6, Rd_VPR64.8B, 1, 1, 8); - * [register]:1 tmp6 = (* [register]:1 tmp4) + (* [register]:1 tmp5); - simd_address_at(tmp4, Rd_VPR64.8B, 2, 1, 8); - simd_address_at(tmp5, TMPD1, 2, 1, 8); - simd_address_at(tmp6, Rd_VPR64.8B, 2, 1, 8); - * [register]:1 tmp6 = (* [register]:1 tmp4) + (* [register]:1 tmp5); - simd_address_at(tmp4, Rd_VPR64.8B, 3, 1, 8); - simd_address_at(tmp5, TMPD1, 3, 1, 8); - simd_address_at(tmp6, Rd_VPR64.8B, 3, 1, 8); - * [register]:1 tmp6 = (* [register]:1 tmp4) + (* [register]:1 tmp5); - simd_address_at(tmp4, Rd_VPR64.8B, 4, 1, 8); - simd_address_at(tmp5, TMPD1, 4, 1, 8); - simd_address_at(tmp6, Rd_VPR64.8B, 4, 1, 8); - * [register]:1 tmp6 = (* [register]:1 tmp4) + (* [register]:1 tmp5); - simd_address_at(tmp4, Rd_VPR64.8B, 5, 1, 8); - simd_address_at(tmp5, TMPD1, 5, 1, 8); - simd_address_at(tmp6, Rd_VPR64.8B, 5, 1, 8); - * [register]:1 tmp6 = (* [register]:1 tmp4) + (* [register]:1 tmp5); - simd_address_at(tmp4, Rd_VPR64.8B, 6, 1, 8); - simd_address_at(tmp5, TMPD1, 6, 1, 8); - simd_address_at(tmp6, Rd_VPR64.8B, 6, 1, 8); - * [register]:1 tmp6 = (* [register]:1 tmp4) + (* [register]:1 tmp5); - simd_address_at(tmp4, Rd_VPR64.8B, 7, 1, 8); - simd_address_at(tmp5, TMPD1, 7, 1, 8); - simd_address_at(tmp6, Rd_VPR64.8B, 7, 1, 8); - * [register]:1 tmp6 = (* [register]:1 tmp4) + (* [register]:1 tmp5); + Rd_VPR64.8B[0,8] = Rd_VPR64.8B[0,8] + TMPD1[0,8]; + Rd_VPR64.8B[8,8] = Rd_VPR64.8B[8,8] + TMPD1[8,8]; + Rd_VPR64.8B[16,8] = Rd_VPR64.8B[16,8] + TMPD1[16,8]; + Rd_VPR64.8B[24,8] = Rd_VPR64.8B[24,8] + TMPD1[24,8]; + Rd_VPR64.8B[32,8] = Rd_VPR64.8B[32,8] + TMPD1[32,8]; + Rd_VPR64.8B[40,8] = Rd_VPR64.8B[40,8] + TMPD1[40,8]; + Rd_VPR64.8B[48,8] = Rd_VPR64.8B[48,8] + TMPD1[48,8]; + Rd_VPR64.8B[56,8] = Rd_VPR64.8B[56,8] + TMPD1[56,8]; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_INT_SRIGHT(Rn_VPR64.8B, Imm_shr_imm8:1, 1:1); - local tmpd:8 = SIMD_INT_ADD(Rd_VPR64.8B, tmp1, 1:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.8B -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.8B = NEON_ssra(Rd_VPR64.8B, Rn_VPR64.8B, Imm_shr_imm8:1, 1:1); -@endif } # C7.2.318 SSRA page C7-2109 line 118340 MATCH x0f001400/mask=xbf80fc00 @@ -38241,78 +21157,25 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x2 & :ssra Rd_VPR128.8H, Rn_VPR128.8H, Imm_shr_imm16 is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x2 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_primitive) # simd infix TMPQ1 = Rn_VPR128.8H s>> Imm_shr_imm16:2 on lane size 2 - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR128.8H, 0, 2, 16); - simd_address_at(tmp3, TMPQ1, 0, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp2) s>> Imm_shr_imm16:2; - simd_address_at(tmp2, Rn_VPR128.8H, 1, 2, 16); - simd_address_at(tmp3, TMPQ1, 1, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp2) s>> Imm_shr_imm16:2; - simd_address_at(tmp2, Rn_VPR128.8H, 2, 2, 16); - simd_address_at(tmp3, TMPQ1, 2, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp2) s>> Imm_shr_imm16:2; - simd_address_at(tmp2, Rn_VPR128.8H, 3, 2, 16); - simd_address_at(tmp3, TMPQ1, 3, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp2) s>> Imm_shr_imm16:2; - simd_address_at(tmp2, Rn_VPR128.8H, 4, 2, 16); - simd_address_at(tmp3, TMPQ1, 4, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp2) s>> Imm_shr_imm16:2; - simd_address_at(tmp2, Rn_VPR128.8H, 5, 2, 16); - simd_address_at(tmp3, TMPQ1, 5, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp2) s>> Imm_shr_imm16:2; - simd_address_at(tmp2, Rn_VPR128.8H, 6, 2, 16); - simd_address_at(tmp3, TMPQ1, 6, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp2) s>> Imm_shr_imm16:2; - simd_address_at(tmp2, Rn_VPR128.8H, 7, 2, 16); - simd_address_at(tmp3, TMPQ1, 7, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp2) s>> Imm_shr_imm16:2; + TMPQ1[0,16] = Rn_VPR128.8H[0,16] s>> Imm_shr_imm16:2; + TMPQ1[16,16] = Rn_VPR128.8H[16,16] s>> Imm_shr_imm16:2; + TMPQ1[32,16] = Rn_VPR128.8H[32,16] s>> Imm_shr_imm16:2; + TMPQ1[48,16] = Rn_VPR128.8H[48,16] s>> Imm_shr_imm16:2; + TMPQ1[64,16] = Rn_VPR128.8H[64,16] s>> Imm_shr_imm16:2; + TMPQ1[80,16] = Rn_VPR128.8H[80,16] s>> Imm_shr_imm16:2; + TMPQ1[96,16] = Rn_VPR128.8H[96,16] s>> Imm_shr_imm16:2; + TMPQ1[112,16] = Rn_VPR128.8H[112,16] s>> Imm_shr_imm16:2; # simd infix Rd_VPR128.8H = Rd_VPR128.8H + TMPQ1 on lane size 2 - local tmp4:4 = 0; - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp4, Rd_VPR128.8H, 0, 2, 16); - simd_address_at(tmp5, TMPQ1, 0, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 0, 2, 16); - * [register]:2 tmp6 = (* [register]:2 tmp4) + (* [register]:2 tmp5); - simd_address_at(tmp4, Rd_VPR128.8H, 1, 2, 16); - simd_address_at(tmp5, TMPQ1, 1, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 1, 2, 16); - * [register]:2 tmp6 = (* [register]:2 tmp4) + (* [register]:2 tmp5); - simd_address_at(tmp4, Rd_VPR128.8H, 2, 2, 16); - simd_address_at(tmp5, TMPQ1, 2, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 2, 2, 16); - * [register]:2 tmp6 = (* [register]:2 tmp4) + (* [register]:2 tmp5); - simd_address_at(tmp4, Rd_VPR128.8H, 3, 2, 16); - simd_address_at(tmp5, TMPQ1, 3, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 3, 2, 16); - * [register]:2 tmp6 = (* [register]:2 tmp4) + (* [register]:2 tmp5); - simd_address_at(tmp4, Rd_VPR128.8H, 4, 2, 16); - simd_address_at(tmp5, TMPQ1, 4, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 4, 2, 16); - * [register]:2 tmp6 = (* [register]:2 tmp4) + (* [register]:2 tmp5); - simd_address_at(tmp4, Rd_VPR128.8H, 5, 2, 16); - simd_address_at(tmp5, TMPQ1, 5, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 5, 2, 16); - * [register]:2 tmp6 = (* [register]:2 tmp4) + (* [register]:2 tmp5); - simd_address_at(tmp4, Rd_VPR128.8H, 6, 2, 16); - simd_address_at(tmp5, TMPQ1, 6, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 6, 2, 16); - * [register]:2 tmp6 = (* [register]:2 tmp4) + (* [register]:2 tmp5); - simd_address_at(tmp4, Rd_VPR128.8H, 7, 2, 16); - simd_address_at(tmp5, TMPQ1, 7, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 7, 2, 16); - * [register]:2 tmp6 = (* [register]:2 tmp4) + (* [register]:2 tmp5); + Rd_VPR128.8H[0,16] = Rd_VPR128.8H[0,16] + TMPQ1[0,16]; + Rd_VPR128.8H[16,16] = Rd_VPR128.8H[16,16] + TMPQ1[16,16]; + Rd_VPR128.8H[32,16] = Rd_VPR128.8H[32,16] + TMPQ1[32,16]; + Rd_VPR128.8H[48,16] = Rd_VPR128.8H[48,16] + TMPQ1[48,16]; + Rd_VPR128.8H[64,16] = Rd_VPR128.8H[64,16] + TMPQ1[64,16]; + Rd_VPR128.8H[80,16] = Rd_VPR128.8H[80,16] + TMPQ1[80,16]; + Rd_VPR128.8H[96,16] = Rd_VPR128.8H[96,16] + TMPQ1[96,16]; + Rd_VPR128.8H[112,16] = Rd_VPR128.8H[112,16] + TMPQ1[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_SRIGHT(Rn_VPR128.8H, Imm_shr_imm16:2, 2:1); - local tmpd:16 = SIMD_INT_ADD(Rd_VPR128.8H, tmp1, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.8H -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.8H = NEON_ssra(Rd_VPR128.8H, Rn_VPR128.8H, Imm_shr_imm16:1, 2:1); -@endif } # C7.2.318 SSRA page C7-2109 line 118340 MATCH x5f001400/mask=xff80fc00 @@ -38324,19 +21187,10 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x2 & :ssra Rd_FPR64, Rn_FPR64, Imm_shr_imm64 is b_2331=0b010111110 & b_22=1 & b_1015=0b000101 & Rd_FPR64 & Rn_FPR64 & Imm_shr_imm64 & Zd { -@if defined(SEMANTIC_primitive) local tmp1:8 = zext(Imm_shr_imm64); local tmp2:8 = Rn_FPR64 s>> tmp1; Rd_FPR64 = Rd_FPR64 + tmp2; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = zext(Imm_shr_imm64); - local tmp2:8 = Rn_FPR64 s>> tmp1; - local tmpd:8 = Rd_FPR64 + tmp2; - Zd = zext(tmpd); # assigning to Rd_FPR64 -@elif defined(SEMANTIC_pseudo) - Rd_FPR64 = NEON_ssra(Rd_FPR64, Rn_FPR64, Imm_shr_imm64:1); -@endif } # C7.2.319 SSUBL, SSUBL2 page C7-2112 line 118497 MATCH x0e202000/mask=xbf20fc00 @@ -38348,54 +21202,18 @@ is b_2331=0b010111110 & b_22=1 & b_1015=0b000101 & Rd_FPR64 & Rn_FPR64 & Imm_shr :ssubl2 Rd_VPR128.2D, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1215=0x2 & b_1011=0 & Rn_VPR128.4S & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rn_VPR128.4S, 1, 8, 16); - TMPD1 = * [register]:8 tmp1; + TMPD1 = Rn_VPR128.4S[64,64]; # simd resize TMPQ2 = sext(TMPD1) (lane size 4 to 8) - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, TMPD1, 0, 4, 8); - simd_address_at(tmp5, TMPQ2, 0, 8, 16); - * [register]:8 tmp5 = sext(* [register]:4 tmp4); - simd_address_at(tmp4, TMPD1, 1, 4, 8); - simd_address_at(tmp5, TMPQ2, 1, 8, 16); - * [register]:8 tmp5 = sext(* [register]:4 tmp4); - local tmp6:4 = 0; - simd_address_at(tmp6, Rm_VPR128.4S, 1, 8, 16); - TMPD3 = * [register]:8 tmp6; + TMPQ2[0,64] = sext(TMPD1[0,32]); + TMPQ2[64,64] = sext(TMPD1[32,32]); + TMPD3 = Rm_VPR128.4S[64,64]; # simd resize TMPQ4 = sext(TMPD3) (lane size 4 to 8) - local tmp9:4 = 0; - local tmp10:4 = 0; - simd_address_at(tmp9, TMPD3, 0, 4, 8); - simd_address_at(tmp10, TMPQ4, 0, 8, 16); - * [register]:8 tmp10 = sext(* [register]:4 tmp9); - simd_address_at(tmp9, TMPD3, 1, 4, 8); - simd_address_at(tmp10, TMPQ4, 1, 8, 16); - * [register]:8 tmp10 = sext(* [register]:4 tmp9); + TMPQ4[0,64] = sext(TMPD3[0,32]); + TMPQ4[64,64] = sext(TMPD3[32,32]); # simd infix Rd_VPR128.2D = TMPQ2 - TMPQ4 on lane size 8 - local tmp11:4 = 0; - local tmp12:4 = 0; - local tmp13:4 = 0; - simd_address_at(tmp11, TMPQ2, 0, 8, 16); - simd_address_at(tmp12, TMPQ4, 0, 8, 16); - simd_address_at(tmp13, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp13 = (* [register]:8 tmp11) - (* [register]:8 tmp12); - simd_address_at(tmp11, TMPQ2, 1, 8, 16); - simd_address_at(tmp12, TMPQ4, 1, 8, 16); - simd_address_at(tmp13, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp13 = (* [register]:8 tmp11) - (* [register]:8 tmp12); + Rd_VPR128.2D[0,64] = TMPQ2[0,64] - TMPQ4[0,64]; + Rd_VPR128.2D[64,64] = TMPQ2[64,64] - TMPQ4[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Rn_VPR128.4S, 1:1); - local tmp2:16 = SIMD_INT_SEXT(tmp1, 4:1); - local tmp3:8 = SIMD_PIECE(Rm_VPR128.4S, 1:1); - local tmp4:16 = SIMD_INT_SEXT(tmp3, 4:1); - local tmpd:16 = SIMD_INT_SUB(tmp2, tmp4, 8:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.2D = NEON_ssubl2(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); -@endif } # C7.2.319 SSUBL, SSUBL2 page C7-2112 line 118497 MATCH x0e202000/mask=xbf20fc00 @@ -38407,74 +21225,24 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S :ssubl2 Rd_VPR128.4S, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1215=0x2 & b_1011=0 & Rn_VPR128.8H & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rn_VPR128.8H, 1, 8, 16); - TMPD1 = * [register]:8 tmp1; + TMPD1 = Rn_VPR128.8H[64,64]; # simd resize TMPQ2 = sext(TMPD1) (lane size 2 to 4) - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, TMPD1, 0, 2, 8); - simd_address_at(tmp5, TMPQ2, 0, 4, 16); - * [register]:4 tmp5 = sext(* [register]:2 tmp4); - simd_address_at(tmp4, TMPD1, 1, 2, 8); - simd_address_at(tmp5, TMPQ2, 1, 4, 16); - * [register]:4 tmp5 = sext(* [register]:2 tmp4); - simd_address_at(tmp4, TMPD1, 2, 2, 8); - simd_address_at(tmp5, TMPQ2, 2, 4, 16); - * [register]:4 tmp5 = sext(* [register]:2 tmp4); - simd_address_at(tmp4, TMPD1, 3, 2, 8); - simd_address_at(tmp5, TMPQ2, 3, 4, 16); - * [register]:4 tmp5 = sext(* [register]:2 tmp4); - local tmp6:4 = 0; - simd_address_at(tmp6, Rm_VPR128.8H, 1, 8, 16); - TMPD3 = * [register]:8 tmp6; + TMPQ2[0,32] = sext(TMPD1[0,16]); + TMPQ2[32,32] = sext(TMPD1[16,16]); + TMPQ2[64,32] = sext(TMPD1[32,16]); + TMPQ2[96,32] = sext(TMPD1[48,16]); + TMPD3 = Rm_VPR128.8H[64,64]; # simd resize TMPQ4 = sext(TMPD3) (lane size 2 to 4) - local tmp9:4 = 0; - local tmp10:4 = 0; - simd_address_at(tmp9, TMPD3, 0, 2, 8); - simd_address_at(tmp10, TMPQ4, 0, 4, 16); - * [register]:4 tmp10 = sext(* [register]:2 tmp9); - simd_address_at(tmp9, TMPD3, 1, 2, 8); - simd_address_at(tmp10, TMPQ4, 1, 4, 16); - * [register]:4 tmp10 = sext(* [register]:2 tmp9); - simd_address_at(tmp9, TMPD3, 2, 2, 8); - simd_address_at(tmp10, TMPQ4, 2, 4, 16); - * [register]:4 tmp10 = sext(* [register]:2 tmp9); - simd_address_at(tmp9, TMPD3, 3, 2, 8); - simd_address_at(tmp10, TMPQ4, 3, 4, 16); - * [register]:4 tmp10 = sext(* [register]:2 tmp9); + TMPQ4[0,32] = sext(TMPD3[0,16]); + TMPQ4[32,32] = sext(TMPD3[16,16]); + TMPQ4[64,32] = sext(TMPD3[32,16]); + TMPQ4[96,32] = sext(TMPD3[48,16]); # simd infix Rd_VPR128.4S = TMPQ2 - TMPQ4 on lane size 4 - local tmp11:4 = 0; - local tmp12:4 = 0; - local tmp13:4 = 0; - simd_address_at(tmp11, TMPQ2, 0, 4, 16); - simd_address_at(tmp12, TMPQ4, 0, 4, 16); - simd_address_at(tmp13, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp13 = (* [register]:4 tmp11) - (* [register]:4 tmp12); - simd_address_at(tmp11, TMPQ2, 1, 4, 16); - simd_address_at(tmp12, TMPQ4, 1, 4, 16); - simd_address_at(tmp13, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp13 = (* [register]:4 tmp11) - (* [register]:4 tmp12); - simd_address_at(tmp11, TMPQ2, 2, 4, 16); - simd_address_at(tmp12, TMPQ4, 2, 4, 16); - simd_address_at(tmp13, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp13 = (* [register]:4 tmp11) - (* [register]:4 tmp12); - simd_address_at(tmp11, TMPQ2, 3, 4, 16); - simd_address_at(tmp12, TMPQ4, 3, 4, 16); - simd_address_at(tmp13, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp13 = (* [register]:4 tmp11) - (* [register]:4 tmp12); + Rd_VPR128.4S[0,32] = TMPQ2[0,32] - TMPQ4[0,32]; + Rd_VPR128.4S[32,32] = TMPQ2[32,32] - TMPQ4[32,32]; + Rd_VPR128.4S[64,32] = TMPQ2[64,32] - TMPQ4[64,32]; + Rd_VPR128.4S[96,32] = TMPQ2[96,32] - TMPQ4[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Rn_VPR128.8H, 1:1); - local tmp2:16 = SIMD_INT_SEXT(tmp1, 2:1); - local tmp3:8 = SIMD_PIECE(Rm_VPR128.8H, 1:1); - local tmp4:16 = SIMD_INT_SEXT(tmp3, 2:1); - local tmpd:16 = SIMD_INT_SUB(tmp2, tmp4, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_ssubl2(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); -@endif } # C7.2.319 SSUBL, SSUBL2 page C7-2112 line 118497 MATCH x0e202000/mask=xbf20fc00 @@ -38486,114 +21254,36 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H :ssubl2 Rd_VPR128.8H, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1215=0x2 & b_1011=0 & Rn_VPR128.16B & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rn_VPR128.16B, 1, 8, 16); - TMPD1 = * [register]:8 tmp1; + TMPD1 = Rn_VPR128.16B[64,64]; # simd resize TMPQ2 = sext(TMPD1) (lane size 1 to 2) - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, TMPD1, 0, 1, 8); - simd_address_at(tmp5, TMPQ2, 0, 2, 16); - * [register]:2 tmp5 = sext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 1, 1, 8); - simd_address_at(tmp5, TMPQ2, 1, 2, 16); - * [register]:2 tmp5 = sext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 2, 1, 8); - simd_address_at(tmp5, TMPQ2, 2, 2, 16); - * [register]:2 tmp5 = sext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 3, 1, 8); - simd_address_at(tmp5, TMPQ2, 3, 2, 16); - * [register]:2 tmp5 = sext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 4, 1, 8); - simd_address_at(tmp5, TMPQ2, 4, 2, 16); - * [register]:2 tmp5 = sext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 5, 1, 8); - simd_address_at(tmp5, TMPQ2, 5, 2, 16); - * [register]:2 tmp5 = sext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 6, 1, 8); - simd_address_at(tmp5, TMPQ2, 6, 2, 16); - * [register]:2 tmp5 = sext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 7, 1, 8); - simd_address_at(tmp5, TMPQ2, 7, 2, 16); - * [register]:2 tmp5 = sext(* [register]:1 tmp4); - local tmp6:4 = 0; - simd_address_at(tmp6, Rm_VPR128.16B, 1, 8, 16); - TMPD3 = * [register]:8 tmp6; + TMPQ2[0,16] = sext(TMPD1[0,8]); + TMPQ2[16,16] = sext(TMPD1[8,8]); + TMPQ2[32,16] = sext(TMPD1[16,8]); + TMPQ2[48,16] = sext(TMPD1[24,8]); + TMPQ2[64,16] = sext(TMPD1[32,8]); + TMPQ2[80,16] = sext(TMPD1[40,8]); + TMPQ2[96,16] = sext(TMPD1[48,8]); + TMPQ2[112,16] = sext(TMPD1[56,8]); + TMPD3 = Rm_VPR128.16B[64,64]; # simd resize TMPQ4 = sext(TMPD3) (lane size 1 to 2) - local tmp9:4 = 0; - local tmp10:4 = 0; - simd_address_at(tmp9, TMPD3, 0, 1, 8); - simd_address_at(tmp10, TMPQ4, 0, 2, 16); - * [register]:2 tmp10 = sext(* [register]:1 tmp9); - simd_address_at(tmp9, TMPD3, 1, 1, 8); - simd_address_at(tmp10, TMPQ4, 1, 2, 16); - * [register]:2 tmp10 = sext(* [register]:1 tmp9); - simd_address_at(tmp9, TMPD3, 2, 1, 8); - simd_address_at(tmp10, TMPQ4, 2, 2, 16); - * [register]:2 tmp10 = sext(* [register]:1 tmp9); - simd_address_at(tmp9, TMPD3, 3, 1, 8); - simd_address_at(tmp10, TMPQ4, 3, 2, 16); - * [register]:2 tmp10 = sext(* [register]:1 tmp9); - simd_address_at(tmp9, TMPD3, 4, 1, 8); - simd_address_at(tmp10, TMPQ4, 4, 2, 16); - * [register]:2 tmp10 = sext(* [register]:1 tmp9); - simd_address_at(tmp9, TMPD3, 5, 1, 8); - simd_address_at(tmp10, TMPQ4, 5, 2, 16); - * [register]:2 tmp10 = sext(* [register]:1 tmp9); - simd_address_at(tmp9, TMPD3, 6, 1, 8); - simd_address_at(tmp10, TMPQ4, 6, 2, 16); - * [register]:2 tmp10 = sext(* [register]:1 tmp9); - simd_address_at(tmp9, TMPD3, 7, 1, 8); - simd_address_at(tmp10, TMPQ4, 7, 2, 16); - * [register]:2 tmp10 = sext(* [register]:1 tmp9); + TMPQ4[0,16] = sext(TMPD3[0,8]); + TMPQ4[16,16] = sext(TMPD3[8,8]); + TMPQ4[32,16] = sext(TMPD3[16,8]); + TMPQ4[48,16] = sext(TMPD3[24,8]); + TMPQ4[64,16] = sext(TMPD3[32,8]); + TMPQ4[80,16] = sext(TMPD3[40,8]); + TMPQ4[96,16] = sext(TMPD3[48,8]); + TMPQ4[112,16] = sext(TMPD3[56,8]); # simd infix Rd_VPR128.8H = TMPQ2 - TMPQ4 on lane size 2 - local tmp11:4 = 0; - local tmp12:4 = 0; - local tmp13:4 = 0; - simd_address_at(tmp11, TMPQ2, 0, 2, 16); - simd_address_at(tmp12, TMPQ4, 0, 2, 16); - simd_address_at(tmp13, Rd_VPR128.8H, 0, 2, 16); - * [register]:2 tmp13 = (* [register]:2 tmp11) - (* [register]:2 tmp12); - simd_address_at(tmp11, TMPQ2, 1, 2, 16); - simd_address_at(tmp12, TMPQ4, 1, 2, 16); - simd_address_at(tmp13, Rd_VPR128.8H, 1, 2, 16); - * [register]:2 tmp13 = (* [register]:2 tmp11) - (* [register]:2 tmp12); - simd_address_at(tmp11, TMPQ2, 2, 2, 16); - simd_address_at(tmp12, TMPQ4, 2, 2, 16); - simd_address_at(tmp13, Rd_VPR128.8H, 2, 2, 16); - * [register]:2 tmp13 = (* [register]:2 tmp11) - (* [register]:2 tmp12); - simd_address_at(tmp11, TMPQ2, 3, 2, 16); - simd_address_at(tmp12, TMPQ4, 3, 2, 16); - simd_address_at(tmp13, Rd_VPR128.8H, 3, 2, 16); - * [register]:2 tmp13 = (* [register]:2 tmp11) - (* [register]:2 tmp12); - simd_address_at(tmp11, TMPQ2, 4, 2, 16); - simd_address_at(tmp12, TMPQ4, 4, 2, 16); - simd_address_at(tmp13, Rd_VPR128.8H, 4, 2, 16); - * [register]:2 tmp13 = (* [register]:2 tmp11) - (* [register]:2 tmp12); - simd_address_at(tmp11, TMPQ2, 5, 2, 16); - simd_address_at(tmp12, TMPQ4, 5, 2, 16); - simd_address_at(tmp13, Rd_VPR128.8H, 5, 2, 16); - * [register]:2 tmp13 = (* [register]:2 tmp11) - (* [register]:2 tmp12); - simd_address_at(tmp11, TMPQ2, 6, 2, 16); - simd_address_at(tmp12, TMPQ4, 6, 2, 16); - simd_address_at(tmp13, Rd_VPR128.8H, 6, 2, 16); - * [register]:2 tmp13 = (* [register]:2 tmp11) - (* [register]:2 tmp12); - simd_address_at(tmp11, TMPQ2, 7, 2, 16); - simd_address_at(tmp12, TMPQ4, 7, 2, 16); - simd_address_at(tmp13, Rd_VPR128.8H, 7, 2, 16); - * [register]:2 tmp13 = (* [register]:2 tmp11) - (* [register]:2 tmp12); + Rd_VPR128.8H[0,16] = TMPQ2[0,16] - TMPQ4[0,16]; + Rd_VPR128.8H[16,16] = TMPQ2[16,16] - TMPQ4[16,16]; + Rd_VPR128.8H[32,16] = TMPQ2[32,16] - TMPQ4[32,16]; + Rd_VPR128.8H[48,16] = TMPQ2[48,16] - TMPQ4[48,16]; + Rd_VPR128.8H[64,16] = TMPQ2[64,16] - TMPQ4[64,16]; + Rd_VPR128.8H[80,16] = TMPQ2[80,16] - TMPQ4[80,16]; + Rd_VPR128.8H[96,16] = TMPQ2[96,16] - TMPQ4[96,16]; + Rd_VPR128.8H[112,16] = TMPQ2[112,16] - TMPQ4[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Rn_VPR128.16B, 1:1); - local tmp2:16 = SIMD_INT_SEXT(tmp1, 1:1); - local tmp3:8 = SIMD_PIECE(Rm_VPR128.16B, 1:1); - local tmp4:16 = SIMD_INT_SEXT(tmp3, 1:1); - local tmpd:16 = SIMD_INT_SUB(tmp2, tmp4, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.8H -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.8H = NEON_ssubl2(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); -@endif } # C7.2.319 SSUBL, SSUBL2 page C7-2112 line 118497 MATCH x0e202000/mask=xbf20fc00 @@ -38605,46 +21295,16 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16 :ssubl Rd_VPR128.2D, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1215=0x2 & b_1011=0 & Rn_VPR64.2S & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_primitive) # simd resize TMPQ1 = sext(Rn_VPR64.2S) (lane size 4 to 8) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR64.2S, 0, 4, 8); - simd_address_at(tmp3, TMPQ1, 0, 8, 16); - * [register]:8 tmp3 = sext(* [register]:4 tmp2); - simd_address_at(tmp2, Rn_VPR64.2S, 1, 4, 8); - simd_address_at(tmp3, TMPQ1, 1, 8, 16); - * [register]:8 tmp3 = sext(* [register]:4 tmp2); + TMPQ1[0,64] = sext(Rn_VPR64.2S[0,32]); + TMPQ1[64,64] = sext(Rn_VPR64.2S[32,32]); # simd resize TMPQ2 = sext(Rm_VPR64.2S) (lane size 4 to 8) - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, Rm_VPR64.2S, 0, 4, 8); - simd_address_at(tmp6, TMPQ2, 0, 8, 16); - * [register]:8 tmp6 = sext(* [register]:4 tmp5); - simd_address_at(tmp5, Rm_VPR64.2S, 1, 4, 8); - simd_address_at(tmp6, TMPQ2, 1, 8, 16); - * [register]:8 tmp6 = sext(* [register]:4 tmp5); + TMPQ2[0,64] = sext(Rm_VPR64.2S[0,32]); + TMPQ2[64,64] = sext(Rm_VPR64.2S[32,32]); # simd infix Rd_VPR128.2D = TMPQ1 - TMPQ2 on lane size 8 - local tmp7:4 = 0; - local tmp8:4 = 0; - local tmp9:4 = 0; - simd_address_at(tmp7, TMPQ1, 0, 8, 16); - simd_address_at(tmp8, TMPQ2, 0, 8, 16); - simd_address_at(tmp9, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp9 = (* [register]:8 tmp7) - (* [register]:8 tmp8); - simd_address_at(tmp7, TMPQ1, 1, 8, 16); - simd_address_at(tmp8, TMPQ2, 1, 8, 16); - simd_address_at(tmp9, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp9 = (* [register]:8 tmp7) - (* [register]:8 tmp8); + Rd_VPR128.2D[0,64] = TMPQ1[0,64] - TMPQ2[0,64]; + Rd_VPR128.2D[64,64] = TMPQ1[64,64] - TMPQ2[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_SEXT(Rn_VPR64.2S, 4:1); - local tmp2:16 = SIMD_INT_SEXT(Rm_VPR64.2S, 4:1); - local tmpd:16 = SIMD_INT_SUB(tmp1, tmp2, 8:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.2D = NEON_ssubl(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); -@endif } # C7.2.319 SSUBL, SSUBL2 page C7-2112 line 118497 MATCH x0e202000/mask=xbf20fc00 @@ -38656,66 +21316,22 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S :ssubl Rd_VPR128.4S, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1215=0x2 & b_1011=0 & Rn_VPR64.4H & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) # simd resize TMPQ1 = sext(Rn_VPR64.4H) (lane size 2 to 4) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR64.4H, 0, 2, 8); - simd_address_at(tmp3, TMPQ1, 0, 4, 16); - * [register]:4 tmp3 = sext(* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR64.4H, 1, 2, 8); - simd_address_at(tmp3, TMPQ1, 1, 4, 16); - * [register]:4 tmp3 = sext(* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR64.4H, 2, 2, 8); - simd_address_at(tmp3, TMPQ1, 2, 4, 16); - * [register]:4 tmp3 = sext(* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR64.4H, 3, 2, 8); - simd_address_at(tmp3, TMPQ1, 3, 4, 16); - * [register]:4 tmp3 = sext(* [register]:2 tmp2); + TMPQ1[0,32] = sext(Rn_VPR64.4H[0,16]); + TMPQ1[32,32] = sext(Rn_VPR64.4H[16,16]); + TMPQ1[64,32] = sext(Rn_VPR64.4H[32,16]); + TMPQ1[96,32] = sext(Rn_VPR64.4H[48,16]); # simd resize TMPQ2 = sext(Rm_VPR64.4H) (lane size 2 to 4) - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, Rm_VPR64.4H, 0, 2, 8); - simd_address_at(tmp6, TMPQ2, 0, 4, 16); - * [register]:4 tmp6 = sext(* [register]:2 tmp5); - simd_address_at(tmp5, Rm_VPR64.4H, 1, 2, 8); - simd_address_at(tmp6, TMPQ2, 1, 4, 16); - * [register]:4 tmp6 = sext(* [register]:2 tmp5); - simd_address_at(tmp5, Rm_VPR64.4H, 2, 2, 8); - simd_address_at(tmp6, TMPQ2, 2, 4, 16); - * [register]:4 tmp6 = sext(* [register]:2 tmp5); - simd_address_at(tmp5, Rm_VPR64.4H, 3, 2, 8); - simd_address_at(tmp6, TMPQ2, 3, 4, 16); - * [register]:4 tmp6 = sext(* [register]:2 tmp5); + TMPQ2[0,32] = sext(Rm_VPR64.4H[0,16]); + TMPQ2[32,32] = sext(Rm_VPR64.4H[16,16]); + TMPQ2[64,32] = sext(Rm_VPR64.4H[32,16]); + TMPQ2[96,32] = sext(Rm_VPR64.4H[48,16]); # simd infix Rd_VPR128.4S = TMPQ1 - TMPQ2 on lane size 4 - local tmp7:4 = 0; - local tmp8:4 = 0; - local tmp9:4 = 0; - simd_address_at(tmp7, TMPQ1, 0, 4, 16); - simd_address_at(tmp8, TMPQ2, 0, 4, 16); - simd_address_at(tmp9, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp9 = (* [register]:4 tmp7) - (* [register]:4 tmp8); - simd_address_at(tmp7, TMPQ1, 1, 4, 16); - simd_address_at(tmp8, TMPQ2, 1, 4, 16); - simd_address_at(tmp9, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp9 = (* [register]:4 tmp7) - (* [register]:4 tmp8); - simd_address_at(tmp7, TMPQ1, 2, 4, 16); - simd_address_at(tmp8, TMPQ2, 2, 4, 16); - simd_address_at(tmp9, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp9 = (* [register]:4 tmp7) - (* [register]:4 tmp8); - simd_address_at(tmp7, TMPQ1, 3, 4, 16); - simd_address_at(tmp8, TMPQ2, 3, 4, 16); - simd_address_at(tmp9, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp9 = (* [register]:4 tmp7) - (* [register]:4 tmp8); + Rd_VPR128.4S[0,32] = TMPQ1[0,32] - TMPQ2[0,32]; + Rd_VPR128.4S[32,32] = TMPQ1[32,32] - TMPQ2[32,32]; + Rd_VPR128.4S[64,32] = TMPQ1[64,32] - TMPQ2[64,32]; + Rd_VPR128.4S[96,32] = TMPQ1[96,32] - TMPQ2[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_SEXT(Rn_VPR64.4H, 2:1); - local tmp2:16 = SIMD_INT_SEXT(Rm_VPR64.4H, 2:1); - local tmpd:16 = SIMD_INT_SUB(tmp1, tmp2, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_ssubl(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); -@endif } # C7.2.319 SSUBL, SSUBL2 page C7-2112 line 118497 MATCH x0e202000/mask=xbf20fc00 @@ -38727,106 +21343,34 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H :ssubl Rd_VPR128.8H, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1215=0x2 & b_1011=0 & Rn_VPR64.8B & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_primitive) # simd resize TMPQ1 = sext(Rn_VPR64.8B) (lane size 1 to 2) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR64.8B, 0, 1, 8); - simd_address_at(tmp3, TMPQ1, 0, 2, 16); - * [register]:2 tmp3 = sext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 1, 1, 8); - simd_address_at(tmp3, TMPQ1, 1, 2, 16); - * [register]:2 tmp3 = sext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 2, 1, 8); - simd_address_at(tmp3, TMPQ1, 2, 2, 16); - * [register]:2 tmp3 = sext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 3, 1, 8); - simd_address_at(tmp3, TMPQ1, 3, 2, 16); - * [register]:2 tmp3 = sext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 4, 1, 8); - simd_address_at(tmp3, TMPQ1, 4, 2, 16); - * [register]:2 tmp3 = sext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 5, 1, 8); - simd_address_at(tmp3, TMPQ1, 5, 2, 16); - * [register]:2 tmp3 = sext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 6, 1, 8); - simd_address_at(tmp3, TMPQ1, 6, 2, 16); - * [register]:2 tmp3 = sext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 7, 1, 8); - simd_address_at(tmp3, TMPQ1, 7, 2, 16); - * [register]:2 tmp3 = sext(* [register]:1 tmp2); + TMPQ1[0,16] = sext(Rn_VPR64.8B[0,8]); + TMPQ1[16,16] = sext(Rn_VPR64.8B[8,8]); + TMPQ1[32,16] = sext(Rn_VPR64.8B[16,8]); + TMPQ1[48,16] = sext(Rn_VPR64.8B[24,8]); + TMPQ1[64,16] = sext(Rn_VPR64.8B[32,8]); + TMPQ1[80,16] = sext(Rn_VPR64.8B[40,8]); + TMPQ1[96,16] = sext(Rn_VPR64.8B[48,8]); + TMPQ1[112,16] = sext(Rn_VPR64.8B[56,8]); # simd resize TMPQ2 = sext(Rm_VPR64.8B) (lane size 1 to 2) - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, Rm_VPR64.8B, 0, 1, 8); - simd_address_at(tmp6, TMPQ2, 0, 2, 16); - * [register]:2 tmp6 = sext(* [register]:1 tmp5); - simd_address_at(tmp5, Rm_VPR64.8B, 1, 1, 8); - simd_address_at(tmp6, TMPQ2, 1, 2, 16); - * [register]:2 tmp6 = sext(* [register]:1 tmp5); - simd_address_at(tmp5, Rm_VPR64.8B, 2, 1, 8); - simd_address_at(tmp6, TMPQ2, 2, 2, 16); - * [register]:2 tmp6 = sext(* [register]:1 tmp5); - simd_address_at(tmp5, Rm_VPR64.8B, 3, 1, 8); - simd_address_at(tmp6, TMPQ2, 3, 2, 16); - * [register]:2 tmp6 = sext(* [register]:1 tmp5); - simd_address_at(tmp5, Rm_VPR64.8B, 4, 1, 8); - simd_address_at(tmp6, TMPQ2, 4, 2, 16); - * [register]:2 tmp6 = sext(* [register]:1 tmp5); - simd_address_at(tmp5, Rm_VPR64.8B, 5, 1, 8); - simd_address_at(tmp6, TMPQ2, 5, 2, 16); - * [register]:2 tmp6 = sext(* [register]:1 tmp5); - simd_address_at(tmp5, Rm_VPR64.8B, 6, 1, 8); - simd_address_at(tmp6, TMPQ2, 6, 2, 16); - * [register]:2 tmp6 = sext(* [register]:1 tmp5); - simd_address_at(tmp5, Rm_VPR64.8B, 7, 1, 8); - simd_address_at(tmp6, TMPQ2, 7, 2, 16); - * [register]:2 tmp6 = sext(* [register]:1 tmp5); + TMPQ2[0,16] = sext(Rm_VPR64.8B[0,8]); + TMPQ2[16,16] = sext(Rm_VPR64.8B[8,8]); + TMPQ2[32,16] = sext(Rm_VPR64.8B[16,8]); + TMPQ2[48,16] = sext(Rm_VPR64.8B[24,8]); + TMPQ2[64,16] = sext(Rm_VPR64.8B[32,8]); + TMPQ2[80,16] = sext(Rm_VPR64.8B[40,8]); + TMPQ2[96,16] = sext(Rm_VPR64.8B[48,8]); + TMPQ2[112,16] = sext(Rm_VPR64.8B[56,8]); # simd infix Rd_VPR128.8H = TMPQ1 - TMPQ2 on lane size 2 - local tmp7:4 = 0; - local tmp8:4 = 0; - local tmp9:4 = 0; - simd_address_at(tmp7, TMPQ1, 0, 2, 16); - simd_address_at(tmp8, TMPQ2, 0, 2, 16); - simd_address_at(tmp9, Rd_VPR128.8H, 0, 2, 16); - * [register]:2 tmp9 = (* [register]:2 tmp7) - (* [register]:2 tmp8); - simd_address_at(tmp7, TMPQ1, 1, 2, 16); - simd_address_at(tmp8, TMPQ2, 1, 2, 16); - simd_address_at(tmp9, Rd_VPR128.8H, 1, 2, 16); - * [register]:2 tmp9 = (* [register]:2 tmp7) - (* [register]:2 tmp8); - simd_address_at(tmp7, TMPQ1, 2, 2, 16); - simd_address_at(tmp8, TMPQ2, 2, 2, 16); - simd_address_at(tmp9, Rd_VPR128.8H, 2, 2, 16); - * [register]:2 tmp9 = (* [register]:2 tmp7) - (* [register]:2 tmp8); - simd_address_at(tmp7, TMPQ1, 3, 2, 16); - simd_address_at(tmp8, TMPQ2, 3, 2, 16); - simd_address_at(tmp9, Rd_VPR128.8H, 3, 2, 16); - * [register]:2 tmp9 = (* [register]:2 tmp7) - (* [register]:2 tmp8); - simd_address_at(tmp7, TMPQ1, 4, 2, 16); - simd_address_at(tmp8, TMPQ2, 4, 2, 16); - simd_address_at(tmp9, Rd_VPR128.8H, 4, 2, 16); - * [register]:2 tmp9 = (* [register]:2 tmp7) - (* [register]:2 tmp8); - simd_address_at(tmp7, TMPQ1, 5, 2, 16); - simd_address_at(tmp8, TMPQ2, 5, 2, 16); - simd_address_at(tmp9, Rd_VPR128.8H, 5, 2, 16); - * [register]:2 tmp9 = (* [register]:2 tmp7) - (* [register]:2 tmp8); - simd_address_at(tmp7, TMPQ1, 6, 2, 16); - simd_address_at(tmp8, TMPQ2, 6, 2, 16); - simd_address_at(tmp9, Rd_VPR128.8H, 6, 2, 16); - * [register]:2 tmp9 = (* [register]:2 tmp7) - (* [register]:2 tmp8); - simd_address_at(tmp7, TMPQ1, 7, 2, 16); - simd_address_at(tmp8, TMPQ2, 7, 2, 16); - simd_address_at(tmp9, Rd_VPR128.8H, 7, 2, 16); - * [register]:2 tmp9 = (* [register]:2 tmp7) - (* [register]:2 tmp8); + Rd_VPR128.8H[0,16] = TMPQ1[0,16] - TMPQ2[0,16]; + Rd_VPR128.8H[16,16] = TMPQ1[16,16] - TMPQ2[16,16]; + Rd_VPR128.8H[32,16] = TMPQ1[32,16] - TMPQ2[32,16]; + Rd_VPR128.8H[48,16] = TMPQ1[48,16] - TMPQ2[48,16]; + Rd_VPR128.8H[64,16] = TMPQ1[64,16] - TMPQ2[64,16]; + Rd_VPR128.8H[80,16] = TMPQ1[80,16] - TMPQ2[80,16]; + Rd_VPR128.8H[96,16] = TMPQ1[96,16] - TMPQ2[96,16]; + Rd_VPR128.8H[112,16] = TMPQ1[112,16] - TMPQ2[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_SEXT(Rn_VPR64.8B, 1:1); - local tmp2:16 = SIMD_INT_SEXT(Rm_VPR64.8B, 1:1); - local tmpd:16 = SIMD_INT_SUB(tmp1, tmp2, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.8H -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.8H = NEON_ssubl(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); -@endif } # C7.2.320 SSUBW, SSUBW2 page C7-2114 line 118617 MATCH x0e203000/mask=xbf20fc00 @@ -38838,40 +21382,14 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B :ssubw2 Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1215=0x3 & b_1011=0 & Rn_VPR128.2D & Rd_VPR128.2D & Rm_VPR128 & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rm_VPR128.4S, 1, 8, 16); - TMPD1 = * [register]:8 tmp1; + TMPD1 = Rm_VPR128.4S[64,64]; # simd resize TMPQ2 = sext(TMPD1) (lane size 4 to 8) - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, TMPD1, 0, 4, 8); - simd_address_at(tmp5, TMPQ2, 0, 8, 16); - * [register]:8 tmp5 = sext(* [register]:4 tmp4); - simd_address_at(tmp4, TMPD1, 1, 4, 8); - simd_address_at(tmp5, TMPQ2, 1, 8, 16); - * [register]:8 tmp5 = sext(* [register]:4 tmp4); + TMPQ2[0,64] = sext(TMPD1[0,32]); + TMPQ2[64,64] = sext(TMPD1[32,32]); # simd infix Rd_VPR128.2D = Rn_VPR128.2D - TMPQ2 on lane size 8 - local tmp6:4 = 0; - local tmp7:4 = 0; - local tmp8:4 = 0; - simd_address_at(tmp6, Rn_VPR128.2D, 0, 8, 16); - simd_address_at(tmp7, TMPQ2, 0, 8, 16); - simd_address_at(tmp8, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp8 = (* [register]:8 tmp6) - (* [register]:8 tmp7); - simd_address_at(tmp6, Rn_VPR128.2D, 1, 8, 16); - simd_address_at(tmp7, TMPQ2, 1, 8, 16); - simd_address_at(tmp8, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp8 = (* [register]:8 tmp6) - (* [register]:8 tmp7); + Rd_VPR128.2D[0,64] = Rn_VPR128.2D[0,64] - TMPQ2[0,64]; + Rd_VPR128.2D[64,64] = Rn_VPR128.2D[64,64] - TMPQ2[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Rm_VPR128.4S, 1:1); - local tmp2:16 = SIMD_INT_SEXT(tmp1, 4:1); - local tmpd:16 = SIMD_INT_SUB(Rn_VPR128.2D, tmp2, 8:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.2D = NEON_ssubw2(Rn_VPR128.2D, Rm_VPR128.4S, 4:1); -@endif } # C7.2.320 SSUBW, SSUBW2 page C7-2114 line 118617 MATCH x0e203000/mask=xbf20fc00 @@ -38883,54 +21401,18 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S :ssubw2 Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.8H is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1215=0x3 & b_1011=0 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rm_VPR128.8H, 1, 8, 16); - TMPD1 = * [register]:8 tmp1; + TMPD1 = Rm_VPR128.8H[64,64]; # simd resize TMPQ2 = sext(TMPD1) (lane size 2 to 4) - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, TMPD1, 0, 2, 8); - simd_address_at(tmp5, TMPQ2, 0, 4, 16); - * [register]:4 tmp5 = sext(* [register]:2 tmp4); - simd_address_at(tmp4, TMPD1, 1, 2, 8); - simd_address_at(tmp5, TMPQ2, 1, 4, 16); - * [register]:4 tmp5 = sext(* [register]:2 tmp4); - simd_address_at(tmp4, TMPD1, 2, 2, 8); - simd_address_at(tmp5, TMPQ2, 2, 4, 16); - * [register]:4 tmp5 = sext(* [register]:2 tmp4); - simd_address_at(tmp4, TMPD1, 3, 2, 8); - simd_address_at(tmp5, TMPQ2, 3, 4, 16); - * [register]:4 tmp5 = sext(* [register]:2 tmp4); + TMPQ2[0,32] = sext(TMPD1[0,16]); + TMPQ2[32,32] = sext(TMPD1[16,16]); + TMPQ2[64,32] = sext(TMPD1[32,16]); + TMPQ2[96,32] = sext(TMPD1[48,16]); # simd infix Rd_VPR128.4S = Rn_VPR128.4S - TMPQ2 on lane size 4 - local tmp6:4 = 0; - local tmp7:4 = 0; - local tmp8:4 = 0; - simd_address_at(tmp6, Rn_VPR128.4S, 0, 4, 16); - simd_address_at(tmp7, TMPQ2, 0, 4, 16); - simd_address_at(tmp8, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp8 = (* [register]:4 tmp6) - (* [register]:4 tmp7); - simd_address_at(tmp6, Rn_VPR128.4S, 1, 4, 16); - simd_address_at(tmp7, TMPQ2, 1, 4, 16); - simd_address_at(tmp8, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp8 = (* [register]:4 tmp6) - (* [register]:4 tmp7); - simd_address_at(tmp6, Rn_VPR128.4S, 2, 4, 16); - simd_address_at(tmp7, TMPQ2, 2, 4, 16); - simd_address_at(tmp8, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp8 = (* [register]:4 tmp6) - (* [register]:4 tmp7); - simd_address_at(tmp6, Rn_VPR128.4S, 3, 4, 16); - simd_address_at(tmp7, TMPQ2, 3, 4, 16); - simd_address_at(tmp8, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp8 = (* [register]:4 tmp6) - (* [register]:4 tmp7); + Rd_VPR128.4S[0,32] = Rn_VPR128.4S[0,32] - TMPQ2[0,32]; + Rd_VPR128.4S[32,32] = Rn_VPR128.4S[32,32] - TMPQ2[32,32]; + Rd_VPR128.4S[64,32] = Rn_VPR128.4S[64,32] - TMPQ2[64,32]; + Rd_VPR128.4S[96,32] = Rn_VPR128.4S[96,32] - TMPQ2[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Rm_VPR128.8H, 1:1); - local tmp2:16 = SIMD_INT_SEXT(tmp1, 2:1); - local tmpd:16 = SIMD_INT_SUB(Rn_VPR128.4S, tmp2, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_ssubw2(Rn_VPR128.4S, Rm_VPR128.8H, 2:1); -@endif } # C7.2.320 SSUBW, SSUBW2 page C7-2114 line 118617 MATCH x0e203000/mask=xbf20fc00 @@ -38942,82 +21424,26 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H :ssubw2 Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.16B is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1215=0x3 & b_1011=0 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rm_VPR128.16B, 1, 8, 16); - TMPD1 = * [register]:8 tmp1; + TMPD1 = Rm_VPR128.16B[64,64]; # simd resize TMPQ2 = sext(TMPD1) (lane size 1 to 2) - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, TMPD1, 0, 1, 8); - simd_address_at(tmp5, TMPQ2, 0, 2, 16); - * [register]:2 tmp5 = sext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 1, 1, 8); - simd_address_at(tmp5, TMPQ2, 1, 2, 16); - * [register]:2 tmp5 = sext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 2, 1, 8); - simd_address_at(tmp5, TMPQ2, 2, 2, 16); - * [register]:2 tmp5 = sext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 3, 1, 8); - simd_address_at(tmp5, TMPQ2, 3, 2, 16); - * [register]:2 tmp5 = sext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 4, 1, 8); - simd_address_at(tmp5, TMPQ2, 4, 2, 16); - * [register]:2 tmp5 = sext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 5, 1, 8); - simd_address_at(tmp5, TMPQ2, 5, 2, 16); - * [register]:2 tmp5 = sext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 6, 1, 8); - simd_address_at(tmp5, TMPQ2, 6, 2, 16); - * [register]:2 tmp5 = sext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 7, 1, 8); - simd_address_at(tmp5, TMPQ2, 7, 2, 16); - * [register]:2 tmp5 = sext(* [register]:1 tmp4); + TMPQ2[0,16] = sext(TMPD1[0,8]); + TMPQ2[16,16] = sext(TMPD1[8,8]); + TMPQ2[32,16] = sext(TMPD1[16,8]); + TMPQ2[48,16] = sext(TMPD1[24,8]); + TMPQ2[64,16] = sext(TMPD1[32,8]); + TMPQ2[80,16] = sext(TMPD1[40,8]); + TMPQ2[96,16] = sext(TMPD1[48,8]); + TMPQ2[112,16] = sext(TMPD1[56,8]); # simd infix Rd_VPR128.8H = Rn_VPR128.8H - TMPQ2 on lane size 2 - local tmp6:4 = 0; - local tmp7:4 = 0; - local tmp8:4 = 0; - simd_address_at(tmp6, Rn_VPR128.8H, 0, 2, 16); - simd_address_at(tmp7, TMPQ2, 0, 2, 16); - simd_address_at(tmp8, Rd_VPR128.8H, 0, 2, 16); - * [register]:2 tmp8 = (* [register]:2 tmp6) - (* [register]:2 tmp7); - simd_address_at(tmp6, Rn_VPR128.8H, 1, 2, 16); - simd_address_at(tmp7, TMPQ2, 1, 2, 16); - simd_address_at(tmp8, Rd_VPR128.8H, 1, 2, 16); - * [register]:2 tmp8 = (* [register]:2 tmp6) - (* [register]:2 tmp7); - simd_address_at(tmp6, Rn_VPR128.8H, 2, 2, 16); - simd_address_at(tmp7, TMPQ2, 2, 2, 16); - simd_address_at(tmp8, Rd_VPR128.8H, 2, 2, 16); - * [register]:2 tmp8 = (* [register]:2 tmp6) - (* [register]:2 tmp7); - simd_address_at(tmp6, Rn_VPR128.8H, 3, 2, 16); - simd_address_at(tmp7, TMPQ2, 3, 2, 16); - simd_address_at(tmp8, Rd_VPR128.8H, 3, 2, 16); - * [register]:2 tmp8 = (* [register]:2 tmp6) - (* [register]:2 tmp7); - simd_address_at(tmp6, Rn_VPR128.8H, 4, 2, 16); - simd_address_at(tmp7, TMPQ2, 4, 2, 16); - simd_address_at(tmp8, Rd_VPR128.8H, 4, 2, 16); - * [register]:2 tmp8 = (* [register]:2 tmp6) - (* [register]:2 tmp7); - simd_address_at(tmp6, Rn_VPR128.8H, 5, 2, 16); - simd_address_at(tmp7, TMPQ2, 5, 2, 16); - simd_address_at(tmp8, Rd_VPR128.8H, 5, 2, 16); - * [register]:2 tmp8 = (* [register]:2 tmp6) - (* [register]:2 tmp7); - simd_address_at(tmp6, Rn_VPR128.8H, 6, 2, 16); - simd_address_at(tmp7, TMPQ2, 6, 2, 16); - simd_address_at(tmp8, Rd_VPR128.8H, 6, 2, 16); - * [register]:2 tmp8 = (* [register]:2 tmp6) - (* [register]:2 tmp7); - simd_address_at(tmp6, Rn_VPR128.8H, 7, 2, 16); - simd_address_at(tmp7, TMPQ2, 7, 2, 16); - simd_address_at(tmp8, Rd_VPR128.8H, 7, 2, 16); - * [register]:2 tmp8 = (* [register]:2 tmp6) - (* [register]:2 tmp7); + Rd_VPR128.8H[0,16] = Rn_VPR128.8H[0,16] - TMPQ2[0,16]; + Rd_VPR128.8H[16,16] = Rn_VPR128.8H[16,16] - TMPQ2[16,16]; + Rd_VPR128.8H[32,16] = Rn_VPR128.8H[32,16] - TMPQ2[32,16]; + Rd_VPR128.8H[48,16] = Rn_VPR128.8H[48,16] - TMPQ2[48,16]; + Rd_VPR128.8H[64,16] = Rn_VPR128.8H[64,16] - TMPQ2[64,16]; + Rd_VPR128.8H[80,16] = Rn_VPR128.8H[80,16] - TMPQ2[80,16]; + Rd_VPR128.8H[96,16] = Rn_VPR128.8H[96,16] - TMPQ2[96,16]; + Rd_VPR128.8H[112,16] = Rn_VPR128.8H[112,16] - TMPQ2[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Rm_VPR128.16B, 1:1); - local tmp2:16 = SIMD_INT_SEXT(tmp1, 1:1); - local tmpd:16 = SIMD_INT_SUB(Rn_VPR128.8H, tmp2, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.8H -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.8H = NEON_ssubw2(Rn_VPR128.8H, Rm_VPR128.16B, 1:1); -@endif } # C7.2.320 SSUBW, SSUBW2 page C7-2114 line 118617 MATCH x0e203000/mask=xbf20fc00 @@ -39029,36 +21455,13 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16 :ssubw Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR64.2S is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1215=0x3 & b_1011=0 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_primitive) # simd resize TMPQ1 = sext(Rm_VPR64.2S) (lane size 4 to 8) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rm_VPR64.2S, 0, 4, 8); - simd_address_at(tmp3, TMPQ1, 0, 8, 16); - * [register]:8 tmp3 = sext(* [register]:4 tmp2); - simd_address_at(tmp2, Rm_VPR64.2S, 1, 4, 8); - simd_address_at(tmp3, TMPQ1, 1, 8, 16); - * [register]:8 tmp3 = sext(* [register]:4 tmp2); + TMPQ1[0,64] = sext(Rm_VPR64.2S[0,32]); + TMPQ1[64,64] = sext(Rm_VPR64.2S[32,32]); # simd infix Rd_VPR128.2D = Rn_VPR128.2D - TMPQ1 on lane size 8 - local tmp4:4 = 0; - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp4, Rn_VPR128.2D, 0, 8, 16); - simd_address_at(tmp5, TMPQ1, 0, 8, 16); - simd_address_at(tmp6, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp6 = (* [register]:8 tmp4) - (* [register]:8 tmp5); - simd_address_at(tmp4, Rn_VPR128.2D, 1, 8, 16); - simd_address_at(tmp5, TMPQ1, 1, 8, 16); - simd_address_at(tmp6, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp6 = (* [register]:8 tmp4) - (* [register]:8 tmp5); + Rd_VPR128.2D[0,64] = Rn_VPR128.2D[0,64] - TMPQ1[0,64]; + Rd_VPR128.2D[64,64] = Rn_VPR128.2D[64,64] - TMPQ1[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_SEXT(Rm_VPR64.2S, 4:1); - local tmpd:16 = SIMD_INT_SUB(Rn_VPR128.2D, tmp1, 8:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.2D = NEON_ssubw(Rn_VPR128.2D, Rm_VPR64.2S, 4:1); -@endif } # C7.2.320 SSUBW, SSUBW2 page C7-2114 line 118617 MATCH x0e203000/mask=xbf20fc00 @@ -39070,50 +21473,17 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S :ssubw Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR64.4H is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1215=0x3 & b_1011=0 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) # simd resize TMPQ1 = sext(Rm_VPR64.4H) (lane size 2 to 4) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rm_VPR64.4H, 0, 2, 8); - simd_address_at(tmp3, TMPQ1, 0, 4, 16); - * [register]:4 tmp3 = sext(* [register]:2 tmp2); - simd_address_at(tmp2, Rm_VPR64.4H, 1, 2, 8); - simd_address_at(tmp3, TMPQ1, 1, 4, 16); - * [register]:4 tmp3 = sext(* [register]:2 tmp2); - simd_address_at(tmp2, Rm_VPR64.4H, 2, 2, 8); - simd_address_at(tmp3, TMPQ1, 2, 4, 16); - * [register]:4 tmp3 = sext(* [register]:2 tmp2); - simd_address_at(tmp2, Rm_VPR64.4H, 3, 2, 8); - simd_address_at(tmp3, TMPQ1, 3, 4, 16); - * [register]:4 tmp3 = sext(* [register]:2 tmp2); + TMPQ1[0,32] = sext(Rm_VPR64.4H[0,16]); + TMPQ1[32,32] = sext(Rm_VPR64.4H[16,16]); + TMPQ1[64,32] = sext(Rm_VPR64.4H[32,16]); + TMPQ1[96,32] = sext(Rm_VPR64.4H[48,16]); # simd infix Rd_VPR128.4S = Rn_VPR128.4S - TMPQ1 on lane size 4 - local tmp4:4 = 0; - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp4, Rn_VPR128.4S, 0, 4, 16); - simd_address_at(tmp5, TMPQ1, 0, 4, 16); - simd_address_at(tmp6, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp6 = (* [register]:4 tmp4) - (* [register]:4 tmp5); - simd_address_at(tmp4, Rn_VPR128.4S, 1, 4, 16); - simd_address_at(tmp5, TMPQ1, 1, 4, 16); - simd_address_at(tmp6, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp6 = (* [register]:4 tmp4) - (* [register]:4 tmp5); - simd_address_at(tmp4, Rn_VPR128.4S, 2, 4, 16); - simd_address_at(tmp5, TMPQ1, 2, 4, 16); - simd_address_at(tmp6, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp6 = (* [register]:4 tmp4) - (* [register]:4 tmp5); - simd_address_at(tmp4, Rn_VPR128.4S, 3, 4, 16); - simd_address_at(tmp5, TMPQ1, 3, 4, 16); - simd_address_at(tmp6, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp6 = (* [register]:4 tmp4) - (* [register]:4 tmp5); + Rd_VPR128.4S[0,32] = Rn_VPR128.4S[0,32] - TMPQ1[0,32]; + Rd_VPR128.4S[32,32] = Rn_VPR128.4S[32,32] - TMPQ1[32,32]; + Rd_VPR128.4S[64,32] = Rn_VPR128.4S[64,32] - TMPQ1[64,32]; + Rd_VPR128.4S[96,32] = Rn_VPR128.4S[96,32] - TMPQ1[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_SEXT(Rm_VPR64.4H, 2:1); - local tmpd:16 = SIMD_INT_SUB(Rn_VPR128.4S, tmp1, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_ssubw(Rn_VPR128.4S, Rm_VPR64.4H, 2:1); -@endif } # C7.2.320 SSUBW, SSUBW2 page C7-2114 line 118617 MATCH x0e203000/mask=xbf20fc00 @@ -39125,78 +21495,25 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H :ssubw Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR64.8B is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1215=0x3 & b_1011=0 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_primitive) # simd resize TMPQ1 = sext(Rm_VPR64.8B) (lane size 1 to 2) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rm_VPR64.8B, 0, 1, 8); - simd_address_at(tmp3, TMPQ1, 0, 2, 16); - * [register]:2 tmp3 = sext(* [register]:1 tmp2); - simd_address_at(tmp2, Rm_VPR64.8B, 1, 1, 8); - simd_address_at(tmp3, TMPQ1, 1, 2, 16); - * [register]:2 tmp3 = sext(* [register]:1 tmp2); - simd_address_at(tmp2, Rm_VPR64.8B, 2, 1, 8); - simd_address_at(tmp3, TMPQ1, 2, 2, 16); - * [register]:2 tmp3 = sext(* [register]:1 tmp2); - simd_address_at(tmp2, Rm_VPR64.8B, 3, 1, 8); - simd_address_at(tmp3, TMPQ1, 3, 2, 16); - * [register]:2 tmp3 = sext(* [register]:1 tmp2); - simd_address_at(tmp2, Rm_VPR64.8B, 4, 1, 8); - simd_address_at(tmp3, TMPQ1, 4, 2, 16); - * [register]:2 tmp3 = sext(* [register]:1 tmp2); - simd_address_at(tmp2, Rm_VPR64.8B, 5, 1, 8); - simd_address_at(tmp3, TMPQ1, 5, 2, 16); - * [register]:2 tmp3 = sext(* [register]:1 tmp2); - simd_address_at(tmp2, Rm_VPR64.8B, 6, 1, 8); - simd_address_at(tmp3, TMPQ1, 6, 2, 16); - * [register]:2 tmp3 = sext(* [register]:1 tmp2); - simd_address_at(tmp2, Rm_VPR64.8B, 7, 1, 8); - simd_address_at(tmp3, TMPQ1, 7, 2, 16); - * [register]:2 tmp3 = sext(* [register]:1 tmp2); + TMPQ1[0,16] = sext(Rm_VPR64.8B[0,8]); + TMPQ1[16,16] = sext(Rm_VPR64.8B[8,8]); + TMPQ1[32,16] = sext(Rm_VPR64.8B[16,8]); + TMPQ1[48,16] = sext(Rm_VPR64.8B[24,8]); + TMPQ1[64,16] = sext(Rm_VPR64.8B[32,8]); + TMPQ1[80,16] = sext(Rm_VPR64.8B[40,8]); + TMPQ1[96,16] = sext(Rm_VPR64.8B[48,8]); + TMPQ1[112,16] = sext(Rm_VPR64.8B[56,8]); # simd infix Rd_VPR128.8H = Rn_VPR128.8H - TMPQ1 on lane size 2 - local tmp4:4 = 0; - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp4, Rn_VPR128.8H, 0, 2, 16); - simd_address_at(tmp5, TMPQ1, 0, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 0, 2, 16); - * [register]:2 tmp6 = (* [register]:2 tmp4) - (* [register]:2 tmp5); - simd_address_at(tmp4, Rn_VPR128.8H, 1, 2, 16); - simd_address_at(tmp5, TMPQ1, 1, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 1, 2, 16); - * [register]:2 tmp6 = (* [register]:2 tmp4) - (* [register]:2 tmp5); - simd_address_at(tmp4, Rn_VPR128.8H, 2, 2, 16); - simd_address_at(tmp5, TMPQ1, 2, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 2, 2, 16); - * [register]:2 tmp6 = (* [register]:2 tmp4) - (* [register]:2 tmp5); - simd_address_at(tmp4, Rn_VPR128.8H, 3, 2, 16); - simd_address_at(tmp5, TMPQ1, 3, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 3, 2, 16); - * [register]:2 tmp6 = (* [register]:2 tmp4) - (* [register]:2 tmp5); - simd_address_at(tmp4, Rn_VPR128.8H, 4, 2, 16); - simd_address_at(tmp5, TMPQ1, 4, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 4, 2, 16); - * [register]:2 tmp6 = (* [register]:2 tmp4) - (* [register]:2 tmp5); - simd_address_at(tmp4, Rn_VPR128.8H, 5, 2, 16); - simd_address_at(tmp5, TMPQ1, 5, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 5, 2, 16); - * [register]:2 tmp6 = (* [register]:2 tmp4) - (* [register]:2 tmp5); - simd_address_at(tmp4, Rn_VPR128.8H, 6, 2, 16); - simd_address_at(tmp5, TMPQ1, 6, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 6, 2, 16); - * [register]:2 tmp6 = (* [register]:2 tmp4) - (* [register]:2 tmp5); - simd_address_at(tmp4, Rn_VPR128.8H, 7, 2, 16); - simd_address_at(tmp5, TMPQ1, 7, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 7, 2, 16); - * [register]:2 tmp6 = (* [register]:2 tmp4) - (* [register]:2 tmp5); + Rd_VPR128.8H[0,16] = Rn_VPR128.8H[0,16] - TMPQ1[0,16]; + Rd_VPR128.8H[16,16] = Rn_VPR128.8H[16,16] - TMPQ1[16,16]; + Rd_VPR128.8H[32,16] = Rn_VPR128.8H[32,16] - TMPQ1[32,16]; + Rd_VPR128.8H[48,16] = Rn_VPR128.8H[48,16] - TMPQ1[48,16]; + Rd_VPR128.8H[64,16] = Rn_VPR128.8H[64,16] - TMPQ1[64,16]; + Rd_VPR128.8H[80,16] = Rn_VPR128.8H[80,16] - TMPQ1[80,16]; + Rd_VPR128.8H[96,16] = Rn_VPR128.8H[96,16] - TMPQ1[96,16]; + Rd_VPR128.8H[112,16] = Rn_VPR128.8H[112,16] - TMPQ1[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_SEXT(Rm_VPR64.8B, 1:1); - local tmpd:16 = SIMD_INT_SUB(Rn_VPR128.8H, tmp1, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.8H -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.8H = NEON_ssubw(Rn_VPR128.8H, Rm_VPR64.8B, 1:1); -@endif } # C7.2.329 STNP (SIMD&FP) page C7-2145 line 120535 MATCH x2c000000/mask=x3fc00000 @@ -39208,18 +21525,9 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B :stnp Rt_FPR32, Rt2_FPR32, addrPairIndexed is b_3031=0b00 & b_2229=0b10110000 & Rt2_FPR32 & addrPairIndexed & Rt_FPR32 { -@if defined(SEMANTIC_primitive) * addrPairIndexed = Rt_FPR32; local tmp1:8 = addrPairIndexed + 4; * tmp1 = Rt2_FPR32; -@elif defined(SEMANTIC_pcode) - * addrPairIndexed = Rt_FPR32; - local tmp1:8 = addrPairIndexed + 4; - * tmp1 = Rt2_FPR32; -@elif defined(SEMANTIC_pseudo) - NEON_stnp1(Rt_FPR32, addrPairIndexed); - NEON_stnp2(Rt2_FPR32, addrPairIndexed); -@endif } # C7.2.329 STNP (SIMD&FP) page C7-2145 line 120535 MATCH x2c000000/mask=x3fc00000 @@ -39231,18 +21539,9 @@ is b_3031=0b00 & b_2229=0b10110000 & Rt2_FPR32 & addrPairIndexed & Rt_FPR32 :stnp Rt_FPR64, Rt2_FPR64, addrPairIndexed is b_3031=0b01 & b_2229=0b10110000 & Rt2_FPR64 & addrPairIndexed & Rt_FPR64 { -@if defined(SEMANTIC_primitive) * addrPairIndexed = Rt_FPR64; local tmp1:8 = addrPairIndexed + 8; * tmp1 = Rt2_FPR64; -@elif defined(SEMANTIC_pcode) - * addrPairIndexed = Rt_FPR64; - local tmp1:8 = addrPairIndexed + 8; - * tmp1 = Rt2_FPR64; -@elif defined(SEMANTIC_pseudo) - NEON_stnp1(Rt_FPR64, addrPairIndexed); - NEON_stnp2(Rt2_FPR64, addrPairIndexed); -@endif } # C7.2.329 STNP (SIMD&FP) page C7-2145 line 120535 MATCH x2c000000/mask=x3fc00000 @@ -39254,18 +21553,9 @@ is b_3031=0b01 & b_2229=0b10110000 & Rt2_FPR64 & addrPairIndexed & Rt_FPR64 :stnp Rt_FPR128, Rt2_FPR128, addrPairIndexed is b_3031=0b10 & b_2229=0b10110000 & Rt2_FPR64 & Rt2_FPR128 & addrPairIndexed & Rt_FPR128 { -@if defined(SEMANTIC_primitive) * addrPairIndexed = Rt_FPR128; local tmp1:8 = addrPairIndexed + 16; * tmp1 = Rt2_FPR128; -@elif defined(SEMANTIC_pcode) - * addrPairIndexed = Rt_FPR128; - local tmp1:8 = addrPairIndexed + 16; - * tmp1 = Rt2_FPR128; -@elif defined(SEMANTIC_pseudo) - NEON_stnp1(Rt_FPR128, addrPairIndexed); - NEON_stnp2(Rt2_FPR128, addrPairIndexed); -@endif } # C7.2.330 STP (SIMD&FP) page C7-2147 line 120656 MATCH x2c800000/mask=x3fc00000 @@ -39281,18 +21571,9 @@ is b_3031=0b10 & b_2229=0b10110000 & Rt2_FPR64 & Rt2_FPR128 & addrPairIndexed & :stp Rt_FPR128, Rt2_FPR128, addrPairIndexed is b_3031=0b10 & b_2529=0b10110 & b_22=0 & Rt2_FPR128 & addrPairIndexed & Rt_FPR128 { -@if defined(SEMANTIC_primitive) * addrPairIndexed = Rt_FPR128; local tmp1:8 = addrPairIndexed + 16; * tmp1 = Rt2_FPR128; -@elif defined(SEMANTIC_pcode) - * addrPairIndexed = Rt_FPR128; - local tmp1:8 = addrPairIndexed + 16; - * tmp1 = Rt2_FPR128; -@elif defined(SEMANTIC_pseudo) - NEON_stp1(Rt_FPR128, addrPairIndexed); - NEON_stp2(Rt2_FPR128, addrPairIndexed); -@endif } # C7.2.330 STP (SIMD&FP) page C7-2147 line 120656 MATCH x2c800000/mask=x3fc00000 @@ -39308,18 +21589,9 @@ is b_3031=0b10 & b_2529=0b10110 & b_22=0 & Rt2_FPR128 & addrPairIndexed & Rt_FPR :stp Rt_FPR32, Rt2_FPR32, addrPairIndexed is b_3031=0b00 & b_2529=0b10110 & b_22=0 & Rt2_FPR32 & addrPairIndexed & Rt_FPR32 { -@if defined(SEMANTIC_primitive) * addrPairIndexed = Rt_FPR32; local tmp1:8 = addrPairIndexed + 4; * tmp1 = Rt2_FPR32; -@elif defined(SEMANTIC_pcode) - * addrPairIndexed = Rt_FPR32; - local tmp1:8 = addrPairIndexed + 4; - * tmp1 = Rt2_FPR32; -@elif defined(SEMANTIC_pseudo) - NEON_stp1(Rt_FPR32, addrPairIndexed); - NEON_stp2(Rt2_FPR32, addrPairIndexed); -@endif } # C7.2.330 STP (SIMD&FP) page C7-2147 line 120656 MATCH x2c800000/mask=x3fc00000 @@ -39335,18 +21607,9 @@ is b_3031=0b00 & b_2529=0b10110 & b_22=0 & Rt2_FPR32 & addrPairIndexed & Rt_FPR3 :stp Rt_FPR64, Rt2_FPR64, addrPairIndexed is b_3031=0b01 & b_2529=0b10110 & b_22=0 & Rt2_FPR64 & addrPairIndexed & Rt_FPR64 { -@if defined(SEMANTIC_primitive) * addrPairIndexed = Rt_FPR64; local tmp1:8 = addrPairIndexed + 8; * tmp1 = Rt2_FPR64; -@elif defined(SEMANTIC_pcode) - * addrPairIndexed = Rt_FPR64; - local tmp1:8 = addrPairIndexed + 8; - * tmp1 = Rt2_FPR64; -@elif defined(SEMANTIC_pseudo) - NEON_stp1(Rt_FPR64, addrPairIndexed); - NEON_stp2(Rt2_FPR64, addrPairIndexed); -@endif } # C7.2.331 STR (immediate, SIMD&FP) page C7-2150 line 120865 MATCH x3c000400/mask=x3f600c00 @@ -39360,13 +21623,7 @@ is b_3031=0b01 & b_2529=0b10110 & b_22=0 & Rt2_FPR64 & addrPairIndexed & Rt_FPR6 :str Rt_FPR8, addrIndexed is b_3031=0b00 & b_2429=0b111100 & b_2223=0b00 & b_21=0 & b_10=1 & Rt_FPR8 & addrIndexed & Zt { -@if defined(SEMANTIC_primitive) * addrIndexed = Rt_FPR8; -@elif defined(SEMANTIC_pcode) - * addrIndexed = Rt_FPR8; -@elif defined(SEMANTIC_pseudo) - NEON_str(Rt_FPR8, addrIndexed); -@endif } # C7.2.331 STR (immediate, SIMD&FP) page C7-2150 line 120865 MATCH x3c000400/mask=x3f600c00 @@ -39380,13 +21637,7 @@ is b_3031=0b00 & b_2429=0b111100 & b_2223=0b00 & b_21=0 & b_10=1 & Rt_FPR8 & add :str Rt_FPR16, addrIndexed is b_3031=0b01 & b_2429=0b111100 & b_2223=0b00 & b_21=0 & b_10=1 & Rt_FPR16 & addrIndexed & Zt { -@if defined(SEMANTIC_primitive) * addrIndexed = Rt_FPR16; -@elif defined(SEMANTIC_pcode) - * addrIndexed = Rt_FPR16; -@elif defined(SEMANTIC_pseudo) - NEON_str(Rt_FPR16, addrIndexed); -@endif } # C7.2.331 STR (immediate, SIMD&FP) page C7-2150 line 120865 MATCH x3c000400/mask=x3f600c00 @@ -39400,13 +21651,7 @@ is b_3031=0b01 & b_2429=0b111100 & b_2223=0b00 & b_21=0 & b_10=1 & Rt_FPR16 & ad :str Rt_FPR32, addrIndexed is b_3031=0b10 & b_2429=0b111100 & b_2223=0b00 & b_21=0 & b_10=1 & Rt_FPR32 & addrIndexed & Zt { -@if defined(SEMANTIC_primitive) * addrIndexed = Rt_FPR32; -@elif defined(SEMANTIC_pcode) - * addrIndexed = Rt_FPR32; -@elif defined(SEMANTIC_pseudo) - NEON_str(Rt_FPR32, addrIndexed); -@endif } # C7.2.331 STR (immediate, SIMD&FP) page C7-2150 line 120865 MATCH x3c000400/mask=x3f600c00 @@ -39420,13 +21665,7 @@ is b_3031=0b10 & b_2429=0b111100 & b_2223=0b00 & b_21=0 & b_10=1 & Rt_FPR32 & ad :str Rt_FPR64, addrIndexed is b_3031=0b11 & b_2429=0b111100 & b_2223=0b00 & b_21=0 & b_10=1 & Rt_FPR64 & addrIndexed & Zt { -@if defined(SEMANTIC_primitive) * addrIndexed = Rt_FPR64; -@elif defined(SEMANTIC_pcode) - * addrIndexed = Rt_FPR64; -@elif defined(SEMANTIC_pseudo) - NEON_str(Rt_FPR64, addrIndexed); -@endif } # C7.2.331 STR (immediate, SIMD&FP) page C7-2150 line 120865 MATCH x3c000400/mask=x3f600c00 @@ -39440,13 +21679,7 @@ is b_3031=0b11 & b_2429=0b111100 & b_2223=0b00 & b_21=0 & b_10=1 & Rt_FPR64 & ad :str Rt_FPR128, addrIndexed is b_3031=0b00 & b_2429=0b111100 & b_2223=0b10 & b_21=0 & b_10=1 & Rt_FPR128 & addrIndexed & Zt { -@if defined(SEMANTIC_primitive) * addrIndexed = Rt_FPR128; -@elif defined(SEMANTIC_pcode) - * addrIndexed = Rt_FPR128; -@elif defined(SEMANTIC_pseudo) - NEON_str(Rt_FPR128, addrIndexed); -@endif } # C7.2.331 STR (immediate, SIMD&FP) page C7-2150 line 120865 MATCH x3d000000/mask=x3f400000 @@ -39459,13 +21692,7 @@ is b_3031=0b00 & b_2429=0b111100 & b_2223=0b10 & b_21=0 & b_10=1 & Rt_FPR128 & a :str Rt_FPR8, addrUIMM is b_3031=0b00 & b_2429=0b111101 & b_2223=0b00 & Rt_FPR8 & addrUIMM & Zt { -@if defined(SEMANTIC_primitive) * addrUIMM = Rt_FPR8; -@elif defined(SEMANTIC_pcode) - * addrUIMM = Rt_FPR8; -@elif defined(SEMANTIC_pseudo) - NEON_str(Rt_FPR8, addrUIMM); -@endif } # C7.2.331 STR (immediate, SIMD&FP) page C7-2150 line 120865 MATCH x3d000000/mask=x3f400000 @@ -39478,13 +21705,7 @@ is b_3031=0b00 & b_2429=0b111101 & b_2223=0b00 & Rt_FPR8 & addrUIMM & Zt :str Rt_FPR16, addrUIMM is b_3031=0b01 & b_2429=0b111101 & b_2223=0b00 & Rt_FPR16 & addrUIMM & Zt { -@if defined(SEMANTIC_primitive) * addrUIMM = Rt_FPR16; -@elif defined(SEMANTIC_pcode) - * addrUIMM = Rt_FPR16; -@elif defined(SEMANTIC_pseudo) - NEON_str(Rt_FPR16, addrUIMM); -@endif } # C7.2.331 STR (immediate, SIMD&FP) page C7-2150 line 120865 MATCH x3d000000/mask=x3f400000 @@ -39497,13 +21718,7 @@ is b_3031=0b01 & b_2429=0b111101 & b_2223=0b00 & Rt_FPR16 & addrUIMM & Zt :str Rt_FPR32, addrUIMM is b_3031=0b10 & b_2429=0b111101 & b_2223=0b00 & Rt_FPR32 & addrUIMM & Zt { -@if defined(SEMANTIC_primitive) * addrUIMM = Rt_FPR32; -@elif defined(SEMANTIC_pcode) - * addrUIMM = Rt_FPR32; -@elif defined(SEMANTIC_pseudo) - NEON_str(Rt_FPR32, addrUIMM); -@endif } # C7.2.331 STR (immediate, SIMD&FP) page C7-2150 line 120865 MATCH x3d000000/mask=x3f400000 @@ -39516,13 +21731,7 @@ is b_3031=0b10 & b_2429=0b111101 & b_2223=0b00 & Rt_FPR32 & addrUIMM & Zt :str Rt_FPR64, addrUIMM is b_3031=0b11 & b_2429=0b111101 & b_2223=0b00 & Rt_FPR64 & addrUIMM & Zt { -@if defined(SEMANTIC_primitive) * addrUIMM = Rt_FPR64; -@elif defined(SEMANTIC_pcode) - * addrUIMM = Rt_FPR64; -@elif defined(SEMANTIC_pseudo) - NEON_str(Rt_FPR64, addrUIMM); -@endif } # C7.2.331 STR (immediate, SIMD&FP) page C7-2150 line 120865 MATCH x3d000000/mask=x3f400000 @@ -39535,13 +21744,7 @@ is b_3031=0b11 & b_2429=0b111101 & b_2223=0b00 & Rt_FPR64 & addrUIMM & Zt :str Rt_FPR128, addrUIMM is b_3031=0b00 & b_2429=0b111101 & b_2223=0b10 & Rt_FPR128 & addrUIMM & Zt { -@if defined(SEMANTIC_primitive) * addrUIMM = Rt_FPR128; -@elif defined(SEMANTIC_pcode) - * addrUIMM = Rt_FPR128; -@elif defined(SEMANTIC_pseudo) - NEON_str(Rt_FPR128, addrUIMM); -@endif } # C7.2.332 STR (register, SIMD&FP) page C7-2154 line 121123 MATCH x3c200800/mask=x3f600c00 @@ -39554,17 +21757,9 @@ is b_3031=0b00 & b_2429=0b111101 & b_2223=0b10 & Rt_FPR128 & addrUIMM & Zt :str Rt_FPR8, [Rn_GPR64xsp, Rm_GPR32^extend_spec^extend_amount] is b_3031=0b00 & b_2429=0b111100 & b_2223=0b00 & b_21=1 & b_13=0 & b_1011=0b10 & Rt_FPR8 & Rn_GPR64xsp & Rm_GPR32 & extend_spec & extend_amount & Zd { -@if defined(SEMANTIC_primitive) local tmp1:8 = extend_spec << extend_amount; local tmp2:8 = Rn_GPR64xsp + tmp1; * tmp2 = Rt_FPR8; -@elif defined(SEMANTIC_pcode) - local tmp1:8 = extend_spec << extend_amount; - local tmp2:8 = Rn_GPR64xsp + tmp1; - * tmp2 = Rt_FPR8; -@elif defined(SEMANTIC_pseudo) - NEON_str(Rt_FPR8, Rn_GPR64xsp, extend_spec, extend_amount); -@endif } # C7.2.332 STR (register, SIMD&FP) page C7-2154 line 121123 MATCH x3c200800/mask=x3f600c00 @@ -39577,17 +21772,9 @@ is b_3031=0b00 & b_2429=0b111100 & b_2223=0b00 & b_21=1 & b_13=0 & b_1011=0b10 & :str Rt_FPR8, [Rn_GPR64xsp, Rm_GPR64^extend_spec^extend_amount] is b_3031=0b00 & b_2429=0b111100 & b_2223=0b00 & b_21=1 & b_13=1 & b_1011=0b10 & Rt_FPR8 & Rn_GPR64xsp & Rm_GPR64 & extend_spec & extend_amount & Zd { -@if defined(SEMANTIC_primitive) local tmp1:8 = extend_spec << extend_amount; local tmp2:8 = Rn_GPR64xsp + tmp1; * tmp2 = Rt_FPR8; -@elif defined(SEMANTIC_pcode) - local tmp1:8 = extend_spec << extend_amount; - local tmp2:8 = Rn_GPR64xsp + tmp1; - * tmp2 = Rt_FPR8; -@elif defined(SEMANTIC_pseudo) - NEON_str(Rt_FPR8, Rn_GPR64xsp, extend_spec, extend_amount); -@endif } # C7.2.332 STR (register, SIMD&FP) page C7-2154 line 121123 MATCH x3c200800/mask=x3f600c00 @@ -39600,17 +21787,9 @@ is b_3031=0b00 & b_2429=0b111100 & b_2223=0b00 & b_21=1 & b_13=1 & b_1011=0b10 & :str Rt_FPR8, [Rn_GPR64xsp, Rm_GPR64^extend_spec^extend_amount] is b_3031=0b00 & b_2429=0b111100 & b_2223=0b00 & b_21=1 & b_1315=0b011 & b_1011=0b10 & Rt_FPR8 & Rn_GPR64xsp & Rm_GPR64 & extend_spec & extend_amount & Zd { -@if defined(SEMANTIC_primitive) local tmp1:8 = extend_spec << extend_amount; local tmp2:8 = Rn_GPR64xsp + tmp1; * tmp2 = Rt_FPR8; -@elif defined(SEMANTIC_pcode) - local tmp1:8 = extend_spec << extend_amount; - local tmp2:8 = Rn_GPR64xsp + tmp1; - * tmp2 = Rt_FPR8; -@elif defined(SEMANTIC_pseudo) - NEON_str(Rt_FPR8, Rn_GPR64xsp, extend_spec, extend_amount); -@endif } # C7.2.332 STR (register, SIMD&FP) page C7-2154 line 121123 MATCH x3c200800/mask=x3f600c00 @@ -39623,17 +21802,9 @@ is b_3031=0b00 & b_2429=0b111100 & b_2223=0b00 & b_21=1 & b_1315=0b011 & b_1011= :str Rt_FPR16, [Rn_GPR64xsp, Rm_GPR32^extend_spec^extend_amount] is b_3031=0b01 & b_2429=0b111100 & b_2223=0b00 & b_21=1 & b_13=0 & b_1011=0b10 & Rt_FPR16 & Rn_GPR64xsp & Rm_GPR32 & extend_spec & extend_amount & Zd { -@if defined(SEMANTIC_primitive) local tmp1:8 = extend_spec << extend_amount; local tmp2:8 = Rn_GPR64xsp + tmp1; * tmp2 = Rt_FPR16; -@elif defined(SEMANTIC_pcode) - local tmp1:8 = extend_spec << extend_amount; - local tmp2:8 = Rn_GPR64xsp + tmp1; - * tmp2 = Rt_FPR16; -@elif defined(SEMANTIC_pseudo) - NEON_str(Rt_FPR16, Rn_GPR64xsp, extend_spec, extend_amount); -@endif } # C7.2.332 STR (register, SIMD&FP) page C7-2154 line 121123 MATCH x3c200800/mask=x3f600c00 @@ -39646,17 +21817,9 @@ is b_3031=0b01 & b_2429=0b111100 & b_2223=0b00 & b_21=1 & b_13=0 & b_1011=0b10 & :str Rt_FPR16, [Rn_GPR64xsp, Rm_GPR64^extend_spec^extend_amount] is b_3031=0b01 & b_2429=0b111100 & b_2223=0b00 & b_21=1 & b_13=1 & b_1011=0b10 & Rt_FPR16 & Rn_GPR64xsp & Rm_GPR64 & extend_spec & extend_amount & Zd { -@if defined(SEMANTIC_primitive) local tmp1:8 = extend_spec << extend_amount; local tmp2:8 = Rn_GPR64xsp + tmp1; * tmp2 = Rt_FPR16; -@elif defined(SEMANTIC_pcode) - local tmp1:8 = extend_spec << extend_amount; - local tmp2:8 = Rn_GPR64xsp + tmp1; - * tmp2 = Rt_FPR16; -@elif defined(SEMANTIC_pseudo) - NEON_str(Rt_FPR16, Rn_GPR64xsp, extend_spec, extend_amount); -@endif } # C7.2.332 STR (register, SIMD&FP) page C7-2154 line 121123 MATCH x3c200800/mask=x3f600c00 @@ -39669,17 +21832,9 @@ is b_3031=0b01 & b_2429=0b111100 & b_2223=0b00 & b_21=1 & b_13=1 & b_1011=0b10 & :str Rt_FPR32, [Rn_GPR64xsp, Rm_GPR32^extend_spec^extend_amount] is b_3031=0b10 & b_2429=0b111100 & b_2223=0b00 & b_21=1 & b_13=0 & b_1011=0b10 & Rt_FPR32 & Rn_GPR64xsp & Rm_GPR32 & extend_spec & extend_amount & Zd { -@if defined(SEMANTIC_primitive) local tmp1:8 = extend_spec << extend_amount; local tmp2:8 = Rn_GPR64xsp + tmp1; * tmp2 = Rt_FPR32; -@elif defined(SEMANTIC_pcode) - local tmp1:8 = extend_spec << extend_amount; - local tmp2:8 = Rn_GPR64xsp + tmp1; - * tmp2 = Rt_FPR32; -@elif defined(SEMANTIC_pseudo) - NEON_str(Rt_FPR32, Rn_GPR64xsp, extend_spec, extend_amount); -@endif } # C7.2.332 STR (register, SIMD&FP) page C7-2154 line 121123 MATCH x3c200800/mask=x3f600c00 @@ -39692,17 +21847,9 @@ is b_3031=0b10 & b_2429=0b111100 & b_2223=0b00 & b_21=1 & b_13=0 & b_1011=0b10 & :str Rt_FPR32, [Rn_GPR64xsp, Rm_GPR64^extend_spec^extend_amount] is b_3031=0b10 & b_2429=0b111100 & b_2223=0b00 & b_21=1 & b_13=1 & b_1011=0b10 & Rt_FPR32 & Rn_GPR64xsp & Rm_GPR64 & extend_spec & extend_amount & Zd { -@if defined(SEMANTIC_primitive) local tmp1:8 = extend_spec << extend_amount; local tmp2:8 = Rn_GPR64xsp + tmp1; * tmp2 = Rt_FPR32; -@elif defined(SEMANTIC_pcode) - local tmp1:8 = extend_spec << extend_amount; - local tmp2:8 = Rn_GPR64xsp + tmp1; - * tmp2 = Rt_FPR32; -@elif defined(SEMANTIC_pseudo) - NEON_str(Rt_FPR32, Rn_GPR64xsp, extend_spec, extend_amount); -@endif } # C7.2.332 STR (register, SIMD&FP) page C7-2154 line 121123 MATCH x3c200800/mask=x3f600c00 @@ -39715,17 +21862,9 @@ is b_3031=0b10 & b_2429=0b111100 & b_2223=0b00 & b_21=1 & b_13=1 & b_1011=0b10 & :str Rt_FPR64, [Rn_GPR64xsp, Rm_GPR32^extend_spec^extend_amount] is b_3031=0b11 & b_2429=0b111100 & b_2223=0b00 & b_21=1 & b_13=0 & b_1011=0b10 & Rt_FPR64 & Rn_GPR64xsp & Rm_GPR32 & extend_spec & extend_amount & Zd { -@if defined(SEMANTIC_primitive) local tmp1:8 = extend_spec << extend_amount; local tmp2:8 = Rn_GPR64xsp + tmp1; * tmp2 = Rt_FPR64; -@elif defined(SEMANTIC_pcode) - local tmp1:8 = extend_spec << extend_amount; - local tmp2:8 = Rn_GPR64xsp + tmp1; - * tmp2 = Rt_FPR64; -@elif defined(SEMANTIC_pseudo) - NEON_str(Rt_FPR64, Rn_GPR64xsp, extend_spec, extend_amount); -@endif } # C7.2.332 STR (register, SIMD&FP) page C7-2154 line 121123 MATCH x3c200800/mask=x3f600c00 @@ -39738,17 +21877,9 @@ is b_3031=0b11 & b_2429=0b111100 & b_2223=0b00 & b_21=1 & b_13=0 & b_1011=0b10 & :str Rt_FPR64, [Rn_GPR64xsp, Rm_GPR64^extend_spec^extend_amount] is b_3031=0b11 & b_2429=0b111100 & b_2223=0b00 & b_21=1 & b_13=1 & b_1011=0b10 & Rt_FPR64 & Rn_GPR64xsp & Rm_GPR64 & extend_spec & extend_amount & Zd { -@if defined(SEMANTIC_primitive) local tmp1:8 = extend_spec << extend_amount; local tmp2:8 = Rn_GPR64xsp + tmp1; * tmp2 = Rt_FPR64; -@elif defined(SEMANTIC_pcode) - local tmp1:8 = extend_spec << extend_amount; - local tmp2:8 = Rn_GPR64xsp + tmp1; - * tmp2 = Rt_FPR64; -@elif defined(SEMANTIC_pseudo) - NEON_str(Rt_FPR64, Rn_GPR64xsp, extend_spec, extend_amount); -@endif } # C7.2.332 STR (register, SIMD&FP) page C7-2154 line 121123 MATCH x3c200800/mask=x3f600c00 @@ -39761,17 +21892,9 @@ is b_3031=0b11 & b_2429=0b111100 & b_2223=0b00 & b_21=1 & b_13=1 & b_1011=0b10 & :str Rt_FPR128, [Rn_GPR64xsp, Rm_GPR32^extend_spec^extend_amount] is b_3031=0b00 & b_2429=0b111100 & b_2223=0b10 & b_21=1 & b_13=0 & b_1011=0b10 & Rt_FPR128 & Rn_GPR64xsp & Rm_GPR32 & extend_spec & extend_amount & Zd { -@if defined(SEMANTIC_primitive) local tmp1:8 = extend_spec << extend_amount; local tmp2:8 = Rn_GPR64xsp + tmp1; * tmp2 = Rt_FPR128; -@elif defined(SEMANTIC_pcode) - local tmp1:8 = extend_spec << extend_amount; - local tmp2:8 = Rn_GPR64xsp + tmp1; - * tmp2 = Rt_FPR128; -@elif defined(SEMANTIC_pseudo) - NEON_str(Rt_FPR128, Rn_GPR64xsp, extend_spec, extend_amount); -@endif } # C7.2.332 STR (register, SIMD&FP) page C7-2154 line 121123 MATCH x3c200800/mask=x3f600c00 @@ -39784,17 +21907,9 @@ is b_3031=0b00 & b_2429=0b111100 & b_2223=0b10 & b_21=1 & b_13=0 & b_1011=0b10 & :str Rt_FPR128, [Rn_GPR64xsp, Rm_GPR64^extend_spec^extend_amount] is b_3031=0b00 & b_2429=0b111100 & b_2223=0b10 & b_21=1 & b_13=1 & b_1011=0b10 & Rt_FPR128 & Rn_GPR64xsp & Rm_GPR64 & extend_spec & extend_amount & Zd { -@if defined(SEMANTIC_primitive) local tmp1:8 = extend_spec << extend_amount; local tmp2:8 = Rn_GPR64xsp + tmp1; * tmp2 = Rt_FPR128; -@elif defined(SEMANTIC_pcode) - local tmp1:8 = extend_spec << extend_amount; - local tmp2:8 = Rn_GPR64xsp + tmp1; - * tmp2 = Rt_FPR128; -@elif defined(SEMANTIC_pseudo) - NEON_str(Rt_FPR128, Rn_GPR64xsp, extend_spec, extend_amount); -@endif } # C7.2.333 STUR (SIMD&FP) page C7-2157 line 121306 MATCH x3c000000/mask=x3f600c00 @@ -39806,13 +21921,7 @@ is b_3031=0b00 & b_2429=0b111100 & b_2223=0b10 & b_21=1 & b_13=1 & b_1011=0b10 & :stur Rt_FPR128, addrIndexed is size.ldstr=0 & b_2729=7 & v=1 & b_2425=0 & b_23=1 & b_2222=0 & b_2121=0 & b_1011=0 & addrIndexed & Rt_FPR128 { -@if defined(SEMANTIC_primitive) * addrIndexed = Rt_FPR128; -@elif defined(SEMANTIC_pcode) - * addrIndexed = Rt_FPR128; -@elif defined(SEMANTIC_pseudo) - NEON_stur(Rt_FPR128, addrIndexed); -@endif } # C7.2.333 STUR (SIMD&FP) page C7-2157 line 121306 MATCH x3c000000/mask=x3f600c00 @@ -39824,13 +21933,7 @@ is size.ldstr=0 & b_2729=7 & v=1 & b_2425=0 & b_23=1 & b_2222=0 & b_2121=0 & b_1 :stur Rt_FPR16, addrIndexed is size.ldstr=1 & b_2729=7 & v=1 & b_2425=0 & b_23=0 & b_2222=0 & b_1011=0 & addrIndexed & Rt_FPR16 { -@if defined(SEMANTIC_primitive) * addrIndexed = Rt_FPR16; -@elif defined(SEMANTIC_pcode) - * addrIndexed = Rt_FPR16; -@elif defined(SEMANTIC_pseudo) - NEON_stur(Rt_FPR16, addrIndexed); -@endif } # C7.2.333 STUR (SIMD&FP) page C7-2157 line 121306 MATCH x3c000000/mask=x3f600c00 @@ -39842,13 +21945,7 @@ is size.ldstr=1 & b_2729=7 & v=1 & b_2425=0 & b_23=0 & b_2222=0 & b_1011=0 & add :stur Rt_FPR32, addrIndexed is size.ldstr=2 & b_2729=7 & v=1 & b_2425=0 & b_23=0 & b_2222=0 & b_2121=0 & b_1011=0 & addrIndexed & Rt_FPR32 { -@if defined(SEMANTIC_primitive) * addrIndexed = Rt_FPR32; -@elif defined(SEMANTIC_pcode) - * addrIndexed = Rt_FPR32; -@elif defined(SEMANTIC_pseudo) - NEON_stur(Rt_FPR32, addrIndexed); -@endif } # C7.2.333 STUR (SIMD&FP) page C7-2157 line 121306 MATCH x3c000000/mask=x3f600c00 @@ -39860,13 +21957,7 @@ is size.ldstr=2 & b_2729=7 & v=1 & b_2425=0 & b_23=0 & b_2222=0 & b_2121=0 & b_1 :stur Rt_FPR64, addrIndexed is size.ldstr=3 & b_2729=7 & v=1 & b_2425=0 & b_23=0 & b_2222=0 & b_2121=0 & b_1011=0 & addrIndexed & Rt_FPR64 { -@if defined(SEMANTIC_primitive) * addrIndexed = Rt_FPR64; -@elif defined(SEMANTIC_pcode) - * addrIndexed = Rt_FPR64; -@elif defined(SEMANTIC_pseudo) - NEON_stur(Rt_FPR64, addrIndexed); -@endif } # C7.2.333 STUR (SIMD&FP) page C7-2157 line 121306 MATCH x3c000000/mask=x3f600c00 @@ -39878,13 +21969,7 @@ is size.ldstr=3 & b_2729=7 & v=1 & b_2425=0 & b_23=0 & b_2222=0 & b_2121=0 & b_1 :stur Rt_FPR8, addrIndexed is size.ldstr=0 & b_2729=7 & v=1 & b_2425=0 & b_23=0 & b_2222=0 & b_2121=0 & b_1011=0 & addrIndexed & Rt_FPR8 { -@if defined(SEMANTIC_primitive) * addrIndexed = Rt_FPR8; -@elif defined(SEMANTIC_pcode) - * addrIndexed = Rt_FPR8; -@elif defined(SEMANTIC_pseudo) - NEON_stur(Rt_FPR8, addrIndexed); -@endif } # C7.2.334 SUB (vector) page C7-2159 line 121431 MATCH x7e208400/mask=xff20fc00 @@ -39896,15 +21981,8 @@ is size.ldstr=0 & b_2729=7 & v=1 & b_2425=0 & b_23=0 & b_2222=0 & b_2121=0 & b_1 :sub Rd_FPR64, Rn_FPR64, Rm_FPR64 is b_3031=1 & u=1 & b_2428=0x1e & advSIMD3.size=3 & b_2121=1 & Rm_FPR64 & b_1115=0x10 & b_1010=1 & Rn_FPR64 & Rd_FPR64 & Zd { -@if defined(SEMANTIC_primitive) Rd_FPR64 = Rn_FPR64 - Rm_FPR64; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:8 = Rn_FPR64 - Rm_FPR64; - Zd = zext(tmpd); # assigning to Rd_FPR64 -@elif defined(SEMANTIC_pseudo) - Rd_FPR64 = NEON_sub(Rn_FPR64, Rm_FPR64); -@endif } # C7.2.334 SUB (vector) page C7-2159 line 121431 MATCH x2e208400/mask=xbf20fc00 @@ -39916,82 +21994,24 @@ is b_3031=1 & u=1 & b_2428=0x1e & advSIMD3.size=3 & b_2121=1 & Rm_FPR64 & b_1115 :sub Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0x10 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { -@if defined(SEMANTIC_primitive) # simd infix Rd_VPR128.16B = Rn_VPR128.16B - Rm_VPR128.16B on lane size 1 - local tmp1:4 = 0; - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp1, Rn_VPR128.16B, 0, 1, 16); - simd_address_at(tmp2, Rm_VPR128.16B, 0, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 0, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) - (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR128.16B, 1, 1, 16); - simd_address_at(tmp2, Rm_VPR128.16B, 1, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 1, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) - (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR128.16B, 2, 1, 16); - simd_address_at(tmp2, Rm_VPR128.16B, 2, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 2, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) - (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR128.16B, 3, 1, 16); - simd_address_at(tmp2, Rm_VPR128.16B, 3, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 3, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) - (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR128.16B, 4, 1, 16); - simd_address_at(tmp2, Rm_VPR128.16B, 4, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 4, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) - (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR128.16B, 5, 1, 16); - simd_address_at(tmp2, Rm_VPR128.16B, 5, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 5, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) - (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR128.16B, 6, 1, 16); - simd_address_at(tmp2, Rm_VPR128.16B, 6, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 6, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) - (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR128.16B, 7, 1, 16); - simd_address_at(tmp2, Rm_VPR128.16B, 7, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 7, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) - (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR128.16B, 8, 1, 16); - simd_address_at(tmp2, Rm_VPR128.16B, 8, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 8, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) - (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR128.16B, 9, 1, 16); - simd_address_at(tmp2, Rm_VPR128.16B, 9, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 9, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) - (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR128.16B, 10, 1, 16); - simd_address_at(tmp2, Rm_VPR128.16B, 10, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 10, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) - (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR128.16B, 11, 1, 16); - simd_address_at(tmp2, Rm_VPR128.16B, 11, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 11, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) - (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR128.16B, 12, 1, 16); - simd_address_at(tmp2, Rm_VPR128.16B, 12, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 12, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) - (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR128.16B, 13, 1, 16); - simd_address_at(tmp2, Rm_VPR128.16B, 13, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 13, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) - (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR128.16B, 14, 1, 16); - simd_address_at(tmp2, Rm_VPR128.16B, 14, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 14, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) - (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR128.16B, 15, 1, 16); - simd_address_at(tmp2, Rm_VPR128.16B, 15, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 15, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) - (* [register]:1 tmp2); + Rd_VPR128.16B[0,8] = Rn_VPR128.16B[0,8] - Rm_VPR128.16B[0,8]; + Rd_VPR128.16B[8,8] = Rn_VPR128.16B[8,8] - Rm_VPR128.16B[8,8]; + Rd_VPR128.16B[16,8] = Rn_VPR128.16B[16,8] - Rm_VPR128.16B[16,8]; + Rd_VPR128.16B[24,8] = Rn_VPR128.16B[24,8] - Rm_VPR128.16B[24,8]; + Rd_VPR128.16B[32,8] = Rn_VPR128.16B[32,8] - Rm_VPR128.16B[32,8]; + Rd_VPR128.16B[40,8] = Rn_VPR128.16B[40,8] - Rm_VPR128.16B[40,8]; + Rd_VPR128.16B[48,8] = Rn_VPR128.16B[48,8] - Rm_VPR128.16B[48,8]; + Rd_VPR128.16B[56,8] = Rn_VPR128.16B[56,8] - Rm_VPR128.16B[56,8]; + Rd_VPR128.16B[64,8] = Rn_VPR128.16B[64,8] - Rm_VPR128.16B[64,8]; + Rd_VPR128.16B[72,8] = Rn_VPR128.16B[72,8] - Rm_VPR128.16B[72,8]; + Rd_VPR128.16B[80,8] = Rn_VPR128.16B[80,8] - Rm_VPR128.16B[80,8]; + Rd_VPR128.16B[88,8] = Rn_VPR128.16B[88,8] - Rm_VPR128.16B[88,8]; + Rd_VPR128.16B[96,8] = Rn_VPR128.16B[96,8] - Rm_VPR128.16B[96,8]; + Rd_VPR128.16B[104,8] = Rn_VPR128.16B[104,8] - Rm_VPR128.16B[104,8]; + Rd_VPR128.16B[112,8] = Rn_VPR128.16B[112,8] - Rm_VPR128.16B[112,8]; + Rd_VPR128.16B[120,8] = Rn_VPR128.16B[120,8] - Rm_VPR128.16B[120,8]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:16 = SIMD_INT_SUB(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.16B -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.16B = NEON_sub(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); -@endif } # C7.2.334 SUB (vector) page C7-2159 line 121431 MATCH x2e208400/mask=xbf20fc00 @@ -40003,26 +22023,10 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16 :sub Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=3 & b_2121=1 & Rm_VPR128.2D & b_1115=0x10 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_primitive) # simd infix Rd_VPR128.2D = Rn_VPR128.2D - Rm_VPR128.2D on lane size 8 - local tmp1:4 = 0; - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp1, Rn_VPR128.2D, 0, 8, 16); - simd_address_at(tmp2, Rm_VPR128.2D, 0, 8, 16); - simd_address_at(tmp3, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp3 = (* [register]:8 tmp1) - (* [register]:8 tmp2); - simd_address_at(tmp1, Rn_VPR128.2D, 1, 8, 16); - simd_address_at(tmp2, Rm_VPR128.2D, 1, 8, 16); - simd_address_at(tmp3, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp3 = (* [register]:8 tmp1) - (* [register]:8 tmp2); + Rd_VPR128.2D[0,64] = Rn_VPR128.2D[0,64] - Rm_VPR128.2D[0,64]; + Rd_VPR128.2D[64,64] = Rn_VPR128.2D[64,64] - Rm_VPR128.2D[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:16 = SIMD_INT_SUB(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.2D = NEON_sub(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); -@endif } # C7.2.334 SUB (vector) page C7-2159 line 121431 MATCH x2e208400/mask=xbf20fc00 @@ -40034,26 +22038,10 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=3 & b_2121=1 & Rm_VPR128.2D :sub Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0x10 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_primitive) # simd infix Rd_VPR64.2S = Rn_VPR64.2S - Rm_VPR64.2S on lane size 4 - local tmp1:4 = 0; - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp1, Rn_VPR64.2S, 0, 4, 8); - simd_address_at(tmp2, Rm_VPR64.2S, 0, 4, 8); - simd_address_at(tmp3, Rd_VPR64.2S, 0, 4, 8); - * [register]:4 tmp3 = (* [register]:4 tmp1) - (* [register]:4 tmp2); - simd_address_at(tmp1, Rn_VPR64.2S, 1, 4, 8); - simd_address_at(tmp2, Rm_VPR64.2S, 1, 4, 8); - simd_address_at(tmp3, Rd_VPR64.2S, 1, 4, 8); - * [register]:4 tmp3 = (* [register]:4 tmp1) - (* [register]:4 tmp2); + Rd_VPR64.2S[0,32] = Rn_VPR64.2S[0,32] - Rm_VPR64.2S[0,32]; + Rd_VPR64.2S[32,32] = Rn_VPR64.2S[32,32] - Rm_VPR64.2S[32,32]; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:8 = SIMD_INT_SUB(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.2S -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.2S = NEON_sub(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); -@endif } # C7.2.334 SUB (vector) page C7-2159 line 121431 MATCH x2e208400/mask=xbf20fc00 @@ -40065,34 +22053,12 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S :sub Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0x10 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { -@if defined(SEMANTIC_primitive) # simd infix Rd_VPR64.4H = Rn_VPR64.4H - Rm_VPR64.4H on lane size 2 - local tmp1:4 = 0; - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp1, Rn_VPR64.4H, 0, 2, 8); - simd_address_at(tmp2, Rm_VPR64.4H, 0, 2, 8); - simd_address_at(tmp3, Rd_VPR64.4H, 0, 2, 8); - * [register]:2 tmp3 = (* [register]:2 tmp1) - (* [register]:2 tmp2); - simd_address_at(tmp1, Rn_VPR64.4H, 1, 2, 8); - simd_address_at(tmp2, Rm_VPR64.4H, 1, 2, 8); - simd_address_at(tmp3, Rd_VPR64.4H, 1, 2, 8); - * [register]:2 tmp3 = (* [register]:2 tmp1) - (* [register]:2 tmp2); - simd_address_at(tmp1, Rn_VPR64.4H, 2, 2, 8); - simd_address_at(tmp2, Rm_VPR64.4H, 2, 2, 8); - simd_address_at(tmp3, Rd_VPR64.4H, 2, 2, 8); - * [register]:2 tmp3 = (* [register]:2 tmp1) - (* [register]:2 tmp2); - simd_address_at(tmp1, Rn_VPR64.4H, 3, 2, 8); - simd_address_at(tmp2, Rm_VPR64.4H, 3, 2, 8); - simd_address_at(tmp3, Rd_VPR64.4H, 3, 2, 8); - * [register]:2 tmp3 = (* [register]:2 tmp1) - (* [register]:2 tmp2); + Rd_VPR64.4H[0,16] = Rn_VPR64.4H[0,16] - Rm_VPR64.4H[0,16]; + Rd_VPR64.4H[16,16] = Rn_VPR64.4H[16,16] - Rm_VPR64.4H[16,16]; + Rd_VPR64.4H[32,16] = Rn_VPR64.4H[32,16] - Rm_VPR64.4H[32,16]; + Rd_VPR64.4H[48,16] = Rn_VPR64.4H[48,16] - Rm_VPR64.4H[48,16]; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:8 = SIMD_INT_SUB(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.4H -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.4H = NEON_sub(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); -@endif } # C7.2.334 SUB (vector) page C7-2159 line 121431 MATCH x2e208400/mask=xbf20fc00 @@ -40104,34 +22070,12 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H :sub Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0x10 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) # simd infix Rd_VPR128.4S = Rn_VPR128.4S - Rm_VPR128.4S on lane size 4 - local tmp1:4 = 0; - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp1, Rn_VPR128.4S, 0, 4, 16); - simd_address_at(tmp2, Rm_VPR128.4S, 0, 4, 16); - simd_address_at(tmp3, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp3 = (* [register]:4 tmp1) - (* [register]:4 tmp2); - simd_address_at(tmp1, Rn_VPR128.4S, 1, 4, 16); - simd_address_at(tmp2, Rm_VPR128.4S, 1, 4, 16); - simd_address_at(tmp3, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp3 = (* [register]:4 tmp1) - (* [register]:4 tmp2); - simd_address_at(tmp1, Rn_VPR128.4S, 2, 4, 16); - simd_address_at(tmp2, Rm_VPR128.4S, 2, 4, 16); - simd_address_at(tmp3, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp3 = (* [register]:4 tmp1) - (* [register]:4 tmp2); - simd_address_at(tmp1, Rn_VPR128.4S, 3, 4, 16); - simd_address_at(tmp2, Rm_VPR128.4S, 3, 4, 16); - simd_address_at(tmp3, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp3 = (* [register]:4 tmp1) - (* [register]:4 tmp2); + Rd_VPR128.4S[0,32] = Rn_VPR128.4S[0,32] - Rm_VPR128.4S[0,32]; + Rd_VPR128.4S[32,32] = Rn_VPR128.4S[32,32] - Rm_VPR128.4S[32,32]; + Rd_VPR128.4S[64,32] = Rn_VPR128.4S[64,32] - Rm_VPR128.4S[64,32]; + Rd_VPR128.4S[96,32] = Rn_VPR128.4S[96,32] - Rm_VPR128.4S[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:16 = SIMD_INT_SUB(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_sub(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); -@endif } # C7.2.334 SUB (vector) page C7-2159 line 121431 MATCH x2e208400/mask=xbf20fc00 @@ -40143,50 +22087,16 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S :sub Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0x10 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { -@if defined(SEMANTIC_primitive) # simd infix Rd_VPR64.8B = Rn_VPR64.8B - Rm_VPR64.8B on lane size 1 - local tmp1:4 = 0; - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp1, Rn_VPR64.8B, 0, 1, 8); - simd_address_at(tmp2, Rm_VPR64.8B, 0, 1, 8); - simd_address_at(tmp3, Rd_VPR64.8B, 0, 1, 8); - * [register]:1 tmp3 = (* [register]:1 tmp1) - (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR64.8B, 1, 1, 8); - simd_address_at(tmp2, Rm_VPR64.8B, 1, 1, 8); - simd_address_at(tmp3, Rd_VPR64.8B, 1, 1, 8); - * [register]:1 tmp3 = (* [register]:1 tmp1) - (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR64.8B, 2, 1, 8); - simd_address_at(tmp2, Rm_VPR64.8B, 2, 1, 8); - simd_address_at(tmp3, Rd_VPR64.8B, 2, 1, 8); - * [register]:1 tmp3 = (* [register]:1 tmp1) - (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR64.8B, 3, 1, 8); - simd_address_at(tmp2, Rm_VPR64.8B, 3, 1, 8); - simd_address_at(tmp3, Rd_VPR64.8B, 3, 1, 8); - * [register]:1 tmp3 = (* [register]:1 tmp1) - (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR64.8B, 4, 1, 8); - simd_address_at(tmp2, Rm_VPR64.8B, 4, 1, 8); - simd_address_at(tmp3, Rd_VPR64.8B, 4, 1, 8); - * [register]:1 tmp3 = (* [register]:1 tmp1) - (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR64.8B, 5, 1, 8); - simd_address_at(tmp2, Rm_VPR64.8B, 5, 1, 8); - simd_address_at(tmp3, Rd_VPR64.8B, 5, 1, 8); - * [register]:1 tmp3 = (* [register]:1 tmp1) - (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR64.8B, 6, 1, 8); - simd_address_at(tmp2, Rm_VPR64.8B, 6, 1, 8); - simd_address_at(tmp3, Rd_VPR64.8B, 6, 1, 8); - * [register]:1 tmp3 = (* [register]:1 tmp1) - (* [register]:1 tmp2); - simd_address_at(tmp1, Rn_VPR64.8B, 7, 1, 8); - simd_address_at(tmp2, Rm_VPR64.8B, 7, 1, 8); - simd_address_at(tmp3, Rd_VPR64.8B, 7, 1, 8); - * [register]:1 tmp3 = (* [register]:1 tmp1) - (* [register]:1 tmp2); + Rd_VPR64.8B[0,8] = Rn_VPR64.8B[0,8] - Rm_VPR64.8B[0,8]; + Rd_VPR64.8B[8,8] = Rn_VPR64.8B[8,8] - Rm_VPR64.8B[8,8]; + Rd_VPR64.8B[16,8] = Rn_VPR64.8B[16,8] - Rm_VPR64.8B[16,8]; + Rd_VPR64.8B[24,8] = Rn_VPR64.8B[24,8] - Rm_VPR64.8B[24,8]; + Rd_VPR64.8B[32,8] = Rn_VPR64.8B[32,8] - Rm_VPR64.8B[32,8]; + Rd_VPR64.8B[40,8] = Rn_VPR64.8B[40,8] - Rm_VPR64.8B[40,8]; + Rd_VPR64.8B[48,8] = Rn_VPR64.8B[48,8] - Rm_VPR64.8B[48,8]; + Rd_VPR64.8B[56,8] = Rn_VPR64.8B[56,8] - Rm_VPR64.8B[56,8]; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:8 = SIMD_INT_SUB(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.8B -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.8B = NEON_sub(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); -@endif } # C7.2.334 SUB (vector) page C7-2159 line 121431 MATCH x2e208400/mask=xbf20fc00 @@ -40198,50 +22108,16 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B :sub Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0x10 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_primitive) # simd infix Rd_VPR128.8H = Rn_VPR128.8H - Rm_VPR128.8H on lane size 2 - local tmp1:4 = 0; - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp1, Rn_VPR128.8H, 0, 2, 16); - simd_address_at(tmp2, Rm_VPR128.8H, 0, 2, 16); - simd_address_at(tmp3, Rd_VPR128.8H, 0, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp1) - (* [register]:2 tmp2); - simd_address_at(tmp1, Rn_VPR128.8H, 1, 2, 16); - simd_address_at(tmp2, Rm_VPR128.8H, 1, 2, 16); - simd_address_at(tmp3, Rd_VPR128.8H, 1, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp1) - (* [register]:2 tmp2); - simd_address_at(tmp1, Rn_VPR128.8H, 2, 2, 16); - simd_address_at(tmp2, Rm_VPR128.8H, 2, 2, 16); - simd_address_at(tmp3, Rd_VPR128.8H, 2, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp1) - (* [register]:2 tmp2); - simd_address_at(tmp1, Rn_VPR128.8H, 3, 2, 16); - simd_address_at(tmp2, Rm_VPR128.8H, 3, 2, 16); - simd_address_at(tmp3, Rd_VPR128.8H, 3, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp1) - (* [register]:2 tmp2); - simd_address_at(tmp1, Rn_VPR128.8H, 4, 2, 16); - simd_address_at(tmp2, Rm_VPR128.8H, 4, 2, 16); - simd_address_at(tmp3, Rd_VPR128.8H, 4, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp1) - (* [register]:2 tmp2); - simd_address_at(tmp1, Rn_VPR128.8H, 5, 2, 16); - simd_address_at(tmp2, Rm_VPR128.8H, 5, 2, 16); - simd_address_at(tmp3, Rd_VPR128.8H, 5, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp1) - (* [register]:2 tmp2); - simd_address_at(tmp1, Rn_VPR128.8H, 6, 2, 16); - simd_address_at(tmp2, Rm_VPR128.8H, 6, 2, 16); - simd_address_at(tmp3, Rd_VPR128.8H, 6, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp1) - (* [register]:2 tmp2); - simd_address_at(tmp1, Rn_VPR128.8H, 7, 2, 16); - simd_address_at(tmp2, Rm_VPR128.8H, 7, 2, 16); - simd_address_at(tmp3, Rd_VPR128.8H, 7, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp1) - (* [register]:2 tmp2); + Rd_VPR128.8H[0,16] = Rn_VPR128.8H[0,16] - Rm_VPR128.8H[0,16]; + Rd_VPR128.8H[16,16] = Rn_VPR128.8H[16,16] - Rm_VPR128.8H[16,16]; + Rd_VPR128.8H[32,16] = Rn_VPR128.8H[32,16] - Rm_VPR128.8H[32,16]; + Rd_VPR128.8H[48,16] = Rn_VPR128.8H[48,16] - Rm_VPR128.8H[48,16]; + Rd_VPR128.8H[64,16] = Rn_VPR128.8H[64,16] - Rm_VPR128.8H[64,16]; + Rd_VPR128.8H[80,16] = Rn_VPR128.8H[80,16] - Rm_VPR128.8H[80,16]; + Rd_VPR128.8H[96,16] = Rn_VPR128.8H[96,16] - Rm_VPR128.8H[96,16]; + Rd_VPR128.8H[112,16] = Rn_VPR128.8H[112,16] - Rm_VPR128.8H[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:16 = SIMD_INT_SUB(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.8H -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.8H = NEON_sub(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); -@endif } # C7.2.335 SUBHN, SUBHN2 page C7-2161 line 121565 MATCH x0e206000/mask=xbf20fc00 @@ -40253,87 +22129,25 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H :subhn2 Rd_VPR128.16B, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.8H & b_1215=0x6 & b_1011=0 & Rn_VPR128.8H & Rd_VPR128.16B & Zd { -@if defined(SEMANTIC_primitive) # simd infix TMPQ1 = Rn_VPR128.8H - Rm_VPR128.8H on lane size 2 - local tmp2:4 = 0; - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp2, Rn_VPR128.8H, 0, 2, 16); - simd_address_at(tmp3, Rm_VPR128.8H, 0, 2, 16); - simd_address_at(tmp4, TMPQ1, 0, 2, 16); - * [register]:2 tmp4 = (* [register]:2 tmp2) - (* [register]:2 tmp3); - simd_address_at(tmp2, Rn_VPR128.8H, 1, 2, 16); - simd_address_at(tmp3, Rm_VPR128.8H, 1, 2, 16); - simd_address_at(tmp4, TMPQ1, 1, 2, 16); - * [register]:2 tmp4 = (* [register]:2 tmp2) - (* [register]:2 tmp3); - simd_address_at(tmp2, Rn_VPR128.8H, 2, 2, 16); - simd_address_at(tmp3, Rm_VPR128.8H, 2, 2, 16); - simd_address_at(tmp4, TMPQ1, 2, 2, 16); - * [register]:2 tmp4 = (* [register]:2 tmp2) - (* [register]:2 tmp3); - simd_address_at(tmp2, Rn_VPR128.8H, 3, 2, 16); - simd_address_at(tmp3, Rm_VPR128.8H, 3, 2, 16); - simd_address_at(tmp4, TMPQ1, 3, 2, 16); - * [register]:2 tmp4 = (* [register]:2 tmp2) - (* [register]:2 tmp3); - simd_address_at(tmp2, Rn_VPR128.8H, 4, 2, 16); - simd_address_at(tmp3, Rm_VPR128.8H, 4, 2, 16); - simd_address_at(tmp4, TMPQ1, 4, 2, 16); - * [register]:2 tmp4 = (* [register]:2 tmp2) - (* [register]:2 tmp3); - simd_address_at(tmp2, Rn_VPR128.8H, 5, 2, 16); - simd_address_at(tmp3, Rm_VPR128.8H, 5, 2, 16); - simd_address_at(tmp4, TMPQ1, 5, 2, 16); - * [register]:2 tmp4 = (* [register]:2 tmp2) - (* [register]:2 tmp3); - simd_address_at(tmp2, Rn_VPR128.8H, 6, 2, 16); - simd_address_at(tmp3, Rm_VPR128.8H, 6, 2, 16); - simd_address_at(tmp4, TMPQ1, 6, 2, 16); - * [register]:2 tmp4 = (* [register]:2 tmp2) - (* [register]:2 tmp3); - simd_address_at(tmp2, Rn_VPR128.8H, 7, 2, 16); - simd_address_at(tmp3, Rm_VPR128.8H, 7, 2, 16); - simd_address_at(tmp4, TMPQ1, 7, 2, 16); - * [register]:2 tmp4 = (* [register]:2 tmp2) - (* [register]:2 tmp3); + TMPQ1[0,16] = Rn_VPR128.8H[0,16] - Rm_VPR128.8H[0,16]; + TMPQ1[16,16] = Rn_VPR128.8H[16,16] - Rm_VPR128.8H[16,16]; + TMPQ1[32,16] = Rn_VPR128.8H[32,16] - Rm_VPR128.8H[32,16]; + TMPQ1[48,16] = Rn_VPR128.8H[48,16] - Rm_VPR128.8H[48,16]; + TMPQ1[64,16] = Rn_VPR128.8H[64,16] - Rm_VPR128.8H[64,16]; + TMPQ1[80,16] = Rn_VPR128.8H[80,16] - Rm_VPR128.8H[80,16]; + TMPQ1[96,16] = Rn_VPR128.8H[96,16] - Rm_VPR128.8H[96,16]; + TMPQ1[112,16] = Rn_VPR128.8H[112,16] - Rm_VPR128.8H[112,16]; # simd shuffle Rd_VPR128.16B = TMPQ1 (@1-8@3-9@5-10@7-11@9-12@11-13@13-14@15-15) lane size 1 - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, TMPQ1, 1, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 8, 1, 16); - * [register]:1 tmp6 = * [register]:1 tmp5; - simd_address_at(tmp5, TMPQ1, 3, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 9, 1, 16); - * [register]:1 tmp6 = * [register]:1 tmp5; - simd_address_at(tmp5, TMPQ1, 5, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 10, 1, 16); - * [register]:1 tmp6 = * [register]:1 tmp5; - simd_address_at(tmp5, TMPQ1, 7, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 11, 1, 16); - * [register]:1 tmp6 = * [register]:1 tmp5; - simd_address_at(tmp5, TMPQ1, 9, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 12, 1, 16); - * [register]:1 tmp6 = * [register]:1 tmp5; - simd_address_at(tmp5, TMPQ1, 11, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 13, 1, 16); - * [register]:1 tmp6 = * [register]:1 tmp5; - simd_address_at(tmp5, TMPQ1, 13, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 14, 1, 16); - * [register]:1 tmp6 = * [register]:1 tmp5; - simd_address_at(tmp5, TMPQ1, 15, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 15, 1, 16); - * [register]:1 tmp6 = * [register]:1 tmp5; + Rd_VPR128.16B[64,8] = TMPQ1[8,8]; + Rd_VPR128.16B[72,8] = TMPQ1[24,8]; + Rd_VPR128.16B[80,8] = TMPQ1[40,8]; + Rd_VPR128.16B[88,8] = TMPQ1[56,8]; + Rd_VPR128.16B[96,8] = TMPQ1[72,8]; + Rd_VPR128.16B[104,8] = TMPQ1[88,8]; + Rd_VPR128.16B[112,8] = TMPQ1[104,8]; + Rd_VPR128.16B[120,8] = TMPQ1[120,8]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_SUB(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); - local tmp2:1 = 0; - local tmpd:16 = Rd_VPR128.16B; - tmp2 = SIMD_PIECE(tmp1, 1:1); tmpd = SIMD_COPY(tmpd, tmp2, 8:1); - tmp2 = SIMD_PIECE(tmp1, 3:1); tmpd = SIMD_COPY(tmpd, tmp2, 9:1); - tmp2 = SIMD_PIECE(tmp1, 5:1); tmpd = SIMD_COPY(tmpd, tmp2, 10:1); - tmp2 = SIMD_PIECE(tmp1, 7:1); tmpd = SIMD_COPY(tmpd, tmp2, 11:1); - tmp2 = SIMD_PIECE(tmp1, 9:1); tmpd = SIMD_COPY(tmpd, tmp2, 12:1); - tmp2 = SIMD_PIECE(tmp1, 11:1); tmpd = SIMD_COPY(tmpd, tmp2, 13:1); - tmp2 = SIMD_PIECE(tmp1, 13:1); tmpd = SIMD_COPY(tmpd, tmp2, 14:1); - tmp2 = SIMD_PIECE(tmp1, 15:1); tmpd = SIMD_COPY(tmpd, tmp2, 15:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.16B -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.16B = NEON_sub(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); -@endif } # C7.2.335 SUBHN, SUBHN2 page C7-2161 line 121565 MATCH x0e206000/mask=xbf20fc00 @@ -40345,39 +22159,13 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.8H :subhn2 Rd_VPR128.4S, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.2D & b_1215=0x6 & b_1011=0 & Rn_VPR128.2D & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) # simd infix TMPQ1 = Rn_VPR128.2D - Rm_VPR128.2D on lane size 8 - local tmp2:4 = 0; - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp2, Rn_VPR128.2D, 0, 8, 16); - simd_address_at(tmp3, Rm_VPR128.2D, 0, 8, 16); - simd_address_at(tmp4, TMPQ1, 0, 8, 16); - * [register]:8 tmp4 = (* [register]:8 tmp2) - (* [register]:8 tmp3); - simd_address_at(tmp2, Rn_VPR128.2D, 1, 8, 16); - simd_address_at(tmp3, Rm_VPR128.2D, 1, 8, 16); - simd_address_at(tmp4, TMPQ1, 1, 8, 16); - * [register]:8 tmp4 = (* [register]:8 tmp2) - (* [register]:8 tmp3); + TMPQ1[0,64] = Rn_VPR128.2D[0,64] - Rm_VPR128.2D[0,64]; + TMPQ1[64,64] = Rn_VPR128.2D[64,64] - Rm_VPR128.2D[64,64]; # simd shuffle Rd_VPR128.4S = TMPQ1 (@1-2@3-3) lane size 4 - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, TMPQ1, 1, 4, 16); - simd_address_at(tmp6, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp6 = * [register]:4 tmp5; - simd_address_at(tmp5, TMPQ1, 3, 4, 16); - simd_address_at(tmp6, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp6 = * [register]:4 tmp5; + Rd_VPR128.4S[64,32] = TMPQ1[32,32]; + Rd_VPR128.4S[96,32] = TMPQ1[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_SUB(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); - local tmp2:4 = 0; - local tmpd:16 = Rd_VPR128.4S; - tmp2 = SIMD_PIECE(tmp1, 1:1); tmpd = SIMD_COPY(tmpd, tmp2, 2:1); - tmp2 = SIMD_PIECE(tmp1, 3:1); tmpd = SIMD_COPY(tmpd, tmp2, 3:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_sub(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); -@endif } # C7.2.335 SUBHN, SUBHN2 page C7-2161 line 121565 MATCH x0e206000/mask=xbf20fc00 @@ -40389,55 +22177,17 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.2D :subhn2 Rd_VPR128.8H, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.4S & b_1215=0x6 & b_1011=0 & Rn_VPR128.4S & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_primitive) # simd infix TMPQ1 = Rn_VPR128.4S - Rm_VPR128.4S on lane size 4 - local tmp2:4 = 0; - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp2, Rn_VPR128.4S, 0, 4, 16); - simd_address_at(tmp3, Rm_VPR128.4S, 0, 4, 16); - simd_address_at(tmp4, TMPQ1, 0, 4, 16); - * [register]:4 tmp4 = (* [register]:4 tmp2) - (* [register]:4 tmp3); - simd_address_at(tmp2, Rn_VPR128.4S, 1, 4, 16); - simd_address_at(tmp3, Rm_VPR128.4S, 1, 4, 16); - simd_address_at(tmp4, TMPQ1, 1, 4, 16); - * [register]:4 tmp4 = (* [register]:4 tmp2) - (* [register]:4 tmp3); - simd_address_at(tmp2, Rn_VPR128.4S, 2, 4, 16); - simd_address_at(tmp3, Rm_VPR128.4S, 2, 4, 16); - simd_address_at(tmp4, TMPQ1, 2, 4, 16); - * [register]:4 tmp4 = (* [register]:4 tmp2) - (* [register]:4 tmp3); - simd_address_at(tmp2, Rn_VPR128.4S, 3, 4, 16); - simd_address_at(tmp3, Rm_VPR128.4S, 3, 4, 16); - simd_address_at(tmp4, TMPQ1, 3, 4, 16); - * [register]:4 tmp4 = (* [register]:4 tmp2) - (* [register]:4 tmp3); + TMPQ1[0,32] = Rn_VPR128.4S[0,32] - Rm_VPR128.4S[0,32]; + TMPQ1[32,32] = Rn_VPR128.4S[32,32] - Rm_VPR128.4S[32,32]; + TMPQ1[64,32] = Rn_VPR128.4S[64,32] - Rm_VPR128.4S[64,32]; + TMPQ1[96,32] = Rn_VPR128.4S[96,32] - Rm_VPR128.4S[96,32]; # simd shuffle Rd_VPR128.8H = TMPQ1 (@1-4@3-5@5-6@7-7) lane size 2 - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, TMPQ1, 1, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 4, 2, 16); - * [register]:2 tmp6 = * [register]:2 tmp5; - simd_address_at(tmp5, TMPQ1, 3, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 5, 2, 16); - * [register]:2 tmp6 = * [register]:2 tmp5; - simd_address_at(tmp5, TMPQ1, 5, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 6, 2, 16); - * [register]:2 tmp6 = * [register]:2 tmp5; - simd_address_at(tmp5, TMPQ1, 7, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 7, 2, 16); - * [register]:2 tmp6 = * [register]:2 tmp5; + Rd_VPR128.8H[64,16] = TMPQ1[16,16]; + Rd_VPR128.8H[80,16] = TMPQ1[48,16]; + Rd_VPR128.8H[96,16] = TMPQ1[80,16]; + Rd_VPR128.8H[112,16] = TMPQ1[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_SUB(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); - local tmp2:2 = 0; - local tmpd:16 = Rd_VPR128.8H; - tmp2 = SIMD_PIECE(tmp1, 1:1); tmpd = SIMD_COPY(tmpd, tmp2, 4:1); - tmp2 = SIMD_PIECE(tmp1, 3:1); tmpd = SIMD_COPY(tmpd, tmp2, 5:1); - tmp2 = SIMD_PIECE(tmp1, 5:1); tmpd = SIMD_COPY(tmpd, tmp2, 6:1); - tmp2 = SIMD_PIECE(tmp1, 7:1); tmpd = SIMD_COPY(tmpd, tmp2, 7:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.8H -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.8H = NEON_subhn2(Rd_VPR128.8H, Rn_VPR128.4S, Rm_VPR128.4S, 4:1); -@endif } # C7.2.335 SUBHN, SUBHN2 page C7-2161 line 121565 MATCH x0e206000/mask=xbf20fc00 @@ -40449,39 +22199,13 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.4S :subhn Rd_VPR64.2S, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.2D & b_1215=0x6 & b_1011=0 & Rn_VPR128.2D & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_primitive) # simd infix TMPQ1 = Rn_VPR128.2D - Rm_VPR128.2D on lane size 8 - local tmp2:4 = 0; - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp2, Rn_VPR128.2D, 0, 8, 16); - simd_address_at(tmp3, Rm_VPR128.2D, 0, 8, 16); - simd_address_at(tmp4, TMPQ1, 0, 8, 16); - * [register]:8 tmp4 = (* [register]:8 tmp2) - (* [register]:8 tmp3); - simd_address_at(tmp2, Rn_VPR128.2D, 1, 8, 16); - simd_address_at(tmp3, Rm_VPR128.2D, 1, 8, 16); - simd_address_at(tmp4, TMPQ1, 1, 8, 16); - * [register]:8 tmp4 = (* [register]:8 tmp2) - (* [register]:8 tmp3); + TMPQ1[0,64] = Rn_VPR128.2D[0,64] - Rm_VPR128.2D[0,64]; + TMPQ1[64,64] = Rn_VPR128.2D[64,64] - Rm_VPR128.2D[64,64]; # simd shuffle Rd_VPR64.2S = TMPQ1 (@1-0@3-1) lane size 4 - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, TMPQ1, 1, 4, 16); - simd_address_at(tmp6, Rd_VPR64.2S, 0, 4, 8); - * [register]:4 tmp6 = * [register]:4 tmp5; - simd_address_at(tmp5, TMPQ1, 3, 4, 16); - simd_address_at(tmp6, Rd_VPR64.2S, 1, 4, 8); - * [register]:4 tmp6 = * [register]:4 tmp5; + Rd_VPR64.2S[0,32] = TMPQ1[32,32]; + Rd_VPR64.2S[32,32] = TMPQ1[96,32]; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_SUB(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); - local tmp2:4 = 0; - local tmpd:8 = Rd_VPR64.2S; - tmp2 = SIMD_PIECE(tmp1, 1:1); tmpd = SIMD_COPY(tmpd, tmp2, 0:1); - tmp2 = SIMD_PIECE(tmp1, 3:1); tmpd = SIMD_COPY(tmpd, tmp2, 1:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.2S -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.2S = NEON_subhn(Rd_VPR64.2S, Rn_VPR128.2D, Rm_VPR128.2D, 8:1); -@endif } # C7.2.335 SUBHN, SUBHN2 page C7-2161 line 121565 MATCH x0e206000/mask=xbf20fc00 @@ -40493,55 +22217,17 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.2D :subhn Rd_VPR64.4H, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.4S & b_1215=0x6 & b_1011=0 & Rn_VPR128.4S & Rd_VPR64.4H & Zd { -@if defined(SEMANTIC_primitive) # simd infix TMPQ1 = Rn_VPR128.4S - Rm_VPR128.4S on lane size 4 - local tmp2:4 = 0; - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp2, Rn_VPR128.4S, 0, 4, 16); - simd_address_at(tmp3, Rm_VPR128.4S, 0, 4, 16); - simd_address_at(tmp4, TMPQ1, 0, 4, 16); - * [register]:4 tmp4 = (* [register]:4 tmp2) - (* [register]:4 tmp3); - simd_address_at(tmp2, Rn_VPR128.4S, 1, 4, 16); - simd_address_at(tmp3, Rm_VPR128.4S, 1, 4, 16); - simd_address_at(tmp4, TMPQ1, 1, 4, 16); - * [register]:4 tmp4 = (* [register]:4 tmp2) - (* [register]:4 tmp3); - simd_address_at(tmp2, Rn_VPR128.4S, 2, 4, 16); - simd_address_at(tmp3, Rm_VPR128.4S, 2, 4, 16); - simd_address_at(tmp4, TMPQ1, 2, 4, 16); - * [register]:4 tmp4 = (* [register]:4 tmp2) - (* [register]:4 tmp3); - simd_address_at(tmp2, Rn_VPR128.4S, 3, 4, 16); - simd_address_at(tmp3, Rm_VPR128.4S, 3, 4, 16); - simd_address_at(tmp4, TMPQ1, 3, 4, 16); - * [register]:4 tmp4 = (* [register]:4 tmp2) - (* [register]:4 tmp3); + TMPQ1[0,32] = Rn_VPR128.4S[0,32] - Rm_VPR128.4S[0,32]; + TMPQ1[32,32] = Rn_VPR128.4S[32,32] - Rm_VPR128.4S[32,32]; + TMPQ1[64,32] = Rn_VPR128.4S[64,32] - Rm_VPR128.4S[64,32]; + TMPQ1[96,32] = Rn_VPR128.4S[96,32] - Rm_VPR128.4S[96,32]; # simd shuffle Rd_VPR64.4H = TMPQ1 (@1-0@3-1@5-2@7-3) lane size 2 - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, TMPQ1, 1, 2, 16); - simd_address_at(tmp6, Rd_VPR64.4H, 0, 2, 8); - * [register]:2 tmp6 = * [register]:2 tmp5; - simd_address_at(tmp5, TMPQ1, 3, 2, 16); - simd_address_at(tmp6, Rd_VPR64.4H, 1, 2, 8); - * [register]:2 tmp6 = * [register]:2 tmp5; - simd_address_at(tmp5, TMPQ1, 5, 2, 16); - simd_address_at(tmp6, Rd_VPR64.4H, 2, 2, 8); - * [register]:2 tmp6 = * [register]:2 tmp5; - simd_address_at(tmp5, TMPQ1, 7, 2, 16); - simd_address_at(tmp6, Rd_VPR64.4H, 3, 2, 8); - * [register]:2 tmp6 = * [register]:2 tmp5; + Rd_VPR64.4H[0,16] = TMPQ1[16,16]; + Rd_VPR64.4H[16,16] = TMPQ1[48,16]; + Rd_VPR64.4H[32,16] = TMPQ1[80,16]; + Rd_VPR64.4H[48,16] = TMPQ1[112,16]; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_SUB(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); - local tmp2:2 = 0; - local tmpd:8 = Rd_VPR64.4H; - tmp2 = SIMD_PIECE(tmp1, 1:1); tmpd = SIMD_COPY(tmpd, tmp2, 0:1); - tmp2 = SIMD_PIECE(tmp1, 3:1); tmpd = SIMD_COPY(tmpd, tmp2, 1:1); - tmp2 = SIMD_PIECE(tmp1, 5:1); tmpd = SIMD_COPY(tmpd, tmp2, 2:1); - tmp2 = SIMD_PIECE(tmp1, 7:1); tmpd = SIMD_COPY(tmpd, tmp2, 3:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.4H -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.4H = NEON_subhn(Rd_VPR64.4H, Rn_VPR128.4S, Rm_VPR128.4S, 4:1); -@endif } # C7.2.335 SUBHN, SUBHN2 page C7-2161 line 121565 MATCH x0e206000/mask=xbf20fc00 @@ -40553,87 +22239,25 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.4S :subhn Rd_VPR64.8B, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.8H & b_1215=0x6 & b_1011=0 & Rn_VPR128.8H & Rd_VPR64.8B & Zd { -@if defined(SEMANTIC_primitive) # simd infix TMPQ1 = Rn_VPR128.8H - Rm_VPR128.8H on lane size 2 - local tmp2:4 = 0; - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp2, Rn_VPR128.8H, 0, 2, 16); - simd_address_at(tmp3, Rm_VPR128.8H, 0, 2, 16); - simd_address_at(tmp4, TMPQ1, 0, 2, 16); - * [register]:2 tmp4 = (* [register]:2 tmp2) - (* [register]:2 tmp3); - simd_address_at(tmp2, Rn_VPR128.8H, 1, 2, 16); - simd_address_at(tmp3, Rm_VPR128.8H, 1, 2, 16); - simd_address_at(tmp4, TMPQ1, 1, 2, 16); - * [register]:2 tmp4 = (* [register]:2 tmp2) - (* [register]:2 tmp3); - simd_address_at(tmp2, Rn_VPR128.8H, 2, 2, 16); - simd_address_at(tmp3, Rm_VPR128.8H, 2, 2, 16); - simd_address_at(tmp4, TMPQ1, 2, 2, 16); - * [register]:2 tmp4 = (* [register]:2 tmp2) - (* [register]:2 tmp3); - simd_address_at(tmp2, Rn_VPR128.8H, 3, 2, 16); - simd_address_at(tmp3, Rm_VPR128.8H, 3, 2, 16); - simd_address_at(tmp4, TMPQ1, 3, 2, 16); - * [register]:2 tmp4 = (* [register]:2 tmp2) - (* [register]:2 tmp3); - simd_address_at(tmp2, Rn_VPR128.8H, 4, 2, 16); - simd_address_at(tmp3, Rm_VPR128.8H, 4, 2, 16); - simd_address_at(tmp4, TMPQ1, 4, 2, 16); - * [register]:2 tmp4 = (* [register]:2 tmp2) - (* [register]:2 tmp3); - simd_address_at(tmp2, Rn_VPR128.8H, 5, 2, 16); - simd_address_at(tmp3, Rm_VPR128.8H, 5, 2, 16); - simd_address_at(tmp4, TMPQ1, 5, 2, 16); - * [register]:2 tmp4 = (* [register]:2 tmp2) - (* [register]:2 tmp3); - simd_address_at(tmp2, Rn_VPR128.8H, 6, 2, 16); - simd_address_at(tmp3, Rm_VPR128.8H, 6, 2, 16); - simd_address_at(tmp4, TMPQ1, 6, 2, 16); - * [register]:2 tmp4 = (* [register]:2 tmp2) - (* [register]:2 tmp3); - simd_address_at(tmp2, Rn_VPR128.8H, 7, 2, 16); - simd_address_at(tmp3, Rm_VPR128.8H, 7, 2, 16); - simd_address_at(tmp4, TMPQ1, 7, 2, 16); - * [register]:2 tmp4 = (* [register]:2 tmp2) - (* [register]:2 tmp3); + TMPQ1[0,16] = Rn_VPR128.8H[0,16] - Rm_VPR128.8H[0,16]; + TMPQ1[16,16] = Rn_VPR128.8H[16,16] - Rm_VPR128.8H[16,16]; + TMPQ1[32,16] = Rn_VPR128.8H[32,16] - Rm_VPR128.8H[32,16]; + TMPQ1[48,16] = Rn_VPR128.8H[48,16] - Rm_VPR128.8H[48,16]; + TMPQ1[64,16] = Rn_VPR128.8H[64,16] - Rm_VPR128.8H[64,16]; + TMPQ1[80,16] = Rn_VPR128.8H[80,16] - Rm_VPR128.8H[80,16]; + TMPQ1[96,16] = Rn_VPR128.8H[96,16] - Rm_VPR128.8H[96,16]; + TMPQ1[112,16] = Rn_VPR128.8H[112,16] - Rm_VPR128.8H[112,16]; # simd shuffle Rd_VPR64.8B = TMPQ1 (@1-0@3-1@5-2@7-3@9-4@11-5@13-6@15-7) lane size 1 - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, TMPQ1, 1, 1, 16); - simd_address_at(tmp6, Rd_VPR64.8B, 0, 1, 8); - * [register]:1 tmp6 = * [register]:1 tmp5; - simd_address_at(tmp5, TMPQ1, 3, 1, 16); - simd_address_at(tmp6, Rd_VPR64.8B, 1, 1, 8); - * [register]:1 tmp6 = * [register]:1 tmp5; - simd_address_at(tmp5, TMPQ1, 5, 1, 16); - simd_address_at(tmp6, Rd_VPR64.8B, 2, 1, 8); - * [register]:1 tmp6 = * [register]:1 tmp5; - simd_address_at(tmp5, TMPQ1, 7, 1, 16); - simd_address_at(tmp6, Rd_VPR64.8B, 3, 1, 8); - * [register]:1 tmp6 = * [register]:1 tmp5; - simd_address_at(tmp5, TMPQ1, 9, 1, 16); - simd_address_at(tmp6, Rd_VPR64.8B, 4, 1, 8); - * [register]:1 tmp6 = * [register]:1 tmp5; - simd_address_at(tmp5, TMPQ1, 11, 1, 16); - simd_address_at(tmp6, Rd_VPR64.8B, 5, 1, 8); - * [register]:1 tmp6 = * [register]:1 tmp5; - simd_address_at(tmp5, TMPQ1, 13, 1, 16); - simd_address_at(tmp6, Rd_VPR64.8B, 6, 1, 8); - * [register]:1 tmp6 = * [register]:1 tmp5; - simd_address_at(tmp5, TMPQ1, 15, 1, 16); - simd_address_at(tmp6, Rd_VPR64.8B, 7, 1, 8); - * [register]:1 tmp6 = * [register]:1 tmp5; + Rd_VPR64.8B[0,8] = TMPQ1[8,8]; + Rd_VPR64.8B[8,8] = TMPQ1[24,8]; + Rd_VPR64.8B[16,8] = TMPQ1[40,8]; + Rd_VPR64.8B[24,8] = TMPQ1[56,8]; + Rd_VPR64.8B[32,8] = TMPQ1[72,8]; + Rd_VPR64.8B[40,8] = TMPQ1[88,8]; + Rd_VPR64.8B[48,8] = TMPQ1[104,8]; + Rd_VPR64.8B[56,8] = TMPQ1[120,8]; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_SUB(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); - local tmp2:1 = 0; - local tmpd:8 = Rd_VPR64.8B; - tmp2 = SIMD_PIECE(tmp1, 1:1); tmpd = SIMD_COPY(tmpd, tmp2, 0:1); - tmp2 = SIMD_PIECE(tmp1, 3:1); tmpd = SIMD_COPY(tmpd, tmp2, 1:1); - tmp2 = SIMD_PIECE(tmp1, 5:1); tmpd = SIMD_COPY(tmpd, tmp2, 2:1); - tmp2 = SIMD_PIECE(tmp1, 7:1); tmpd = SIMD_COPY(tmpd, tmp2, 3:1); - tmp2 = SIMD_PIECE(tmp1, 9:1); tmpd = SIMD_COPY(tmpd, tmp2, 4:1); - tmp2 = SIMD_PIECE(tmp1, 11:1); tmpd = SIMD_COPY(tmpd, tmp2, 5:1); - tmp2 = SIMD_PIECE(tmp1, 13:1); tmpd = SIMD_COPY(tmpd, tmp2, 6:1); - tmp2 = SIMD_PIECE(tmp1, 15:1); tmpd = SIMD_COPY(tmpd, tmp2, 7:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.8B -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.8B = NEON_subhn(Rd_VPR64.8B, Rn_VPR128.8H, Rm_VPR128.8H, 2:1); -@endif } # C7.2.337 SUQADD page C7-2165 line 121781 MATCH x5e203800/mask=xff3ffc00 @@ -40646,15 +22270,8 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.8H :suqadd Rd_FPR8, Rn_FPR8 is b_31=0 & b_30=1 & b_2429=0b011110 & b_2223=0b00 & b_1021=0b100000001110 & Rd_FPR8 & Rn_FPR8 & Zd { -@if defined(SEMANTIC_primitive) Rd_FPR8 = Rd_FPR8 + Rn_FPR8; zext_zb(Zd); # zero upper 31 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:1 = Rd_FPR8 + Rn_FPR8; - Zd = zext(tmpd); # assigning to Rd_FPR8 -@elif defined(SEMANTIC_pseudo) - Rd_FPR8 = NEON_suqadd(Rd_FPR8, Rn_FPR8); -@endif } # C7.2.337 SUQADD page C7-2165 line 121781 MATCH x5e203800/mask=xff3ffc00 @@ -40667,15 +22284,8 @@ is b_31=0 & b_30=1 & b_2429=0b011110 & b_2223=0b00 & b_1021=0b100000001110 & Rd_ :suqadd Rd_FPR16, Rn_FPR16 is b_31=0 & b_30=1 & b_2429=0b011110 & b_2223=0b01 & b_1021=0b100000001110 & Rd_FPR16 & Rn_FPR16 & Zd { -@if defined(SEMANTIC_primitive) Rd_FPR16 = Rd_FPR16 + Rn_FPR16; zext_zh(Zd); # zero upper 30 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:2 = Rd_FPR16 + Rn_FPR16; - Zd = zext(tmpd); # assigning to Rd_FPR16 -@elif defined(SEMANTIC_pseudo) - Rd_FPR16 = NEON_suqadd(Rd_FPR16, Rn_FPR16); -@endif } # C7.2.337 SUQADD page C7-2165 line 121781 MATCH x5e203800/mask=xff3ffc00 @@ -40688,15 +22298,8 @@ is b_31=0 & b_30=1 & b_2429=0b011110 & b_2223=0b01 & b_1021=0b100000001110 & Rd_ :suqadd Rd_FPR32, Rn_FPR32 is b_31=0 & b_30=1 & b_2429=0b011110 & b_2223=0b10 & b_1021=0b100000001110 & Rd_FPR32 & Rn_FPR32 & Zd { -@if defined(SEMANTIC_primitive) Rd_FPR32 = Rd_FPR32 + Rn_FPR32; zext_zs(Zd); # zero upper 28 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:4 = Rd_FPR32 + Rn_FPR32; - Zd = zext(tmpd); # assigning to Rd_FPR32 -@elif defined(SEMANTIC_pseudo) - Rd_FPR32 = NEON_suqadd(Rd_FPR32, Rn_FPR32); -@endif } # C7.2.337 SUQADD page C7-2165 line 121781 MATCH x5e203800/mask=xff3ffc00 @@ -40709,15 +22312,8 @@ is b_31=0 & b_30=1 & b_2429=0b011110 & b_2223=0b10 & b_1021=0b100000001110 & Rd_ :suqadd Rd_FPR64, Rn_FPR64 is b_31=0 & b_30=1 & b_2429=0b011110 & b_2223=0b11 & b_1021=0b100000001110 & Rd_FPR64 & Rn_FPR64 & Zd { -@if defined(SEMANTIC_primitive) Rd_FPR64 = Rd_FPR64 + Rn_FPR64; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:8 = Rd_FPR64 + Rn_FPR64; - Zd = zext(tmpd); # assigning to Rd_FPR64 -@elif defined(SEMANTIC_pseudo) - Rd_FPR64 = NEON_suqadd(Rd_FPR64, Rn_FPR64); -@endif } # C7.2.337 SUQADD page C7-2165 line 121781 MATCH x0e203800/mask=xbf3ffc00 @@ -40730,50 +22326,16 @@ is b_31=0 & b_30=1 & b_2429=0b011110 & b_2223=0b11 & b_1021=0b100000001110 & Rd_ :suqadd Rd_VPR64.8B, Rn_VPR64.8B is b_31=0 & b_30=0 & b_2429=0b001110 & b_2223=0b00 & b_1021=0b100000001110 & Rd_VPR64.8B & Rn_VPR64.8B & Zd { -@if defined(SEMANTIC_primitive) # simd infix Rd_VPR64.8B = Rd_VPR64.8B + Rn_VPR64.8B on lane size 1 - local tmp1:4 = 0; - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp1, Rd_VPR64.8B, 0, 1, 8); - simd_address_at(tmp2, Rn_VPR64.8B, 0, 1, 8); - simd_address_at(tmp3, Rd_VPR64.8B, 0, 1, 8); - * [register]:1 tmp3 = (* [register]:1 tmp1) + (* [register]:1 tmp2); - simd_address_at(tmp1, Rd_VPR64.8B, 1, 1, 8); - simd_address_at(tmp2, Rn_VPR64.8B, 1, 1, 8); - simd_address_at(tmp3, Rd_VPR64.8B, 1, 1, 8); - * [register]:1 tmp3 = (* [register]:1 tmp1) + (* [register]:1 tmp2); - simd_address_at(tmp1, Rd_VPR64.8B, 2, 1, 8); - simd_address_at(tmp2, Rn_VPR64.8B, 2, 1, 8); - simd_address_at(tmp3, Rd_VPR64.8B, 2, 1, 8); - * [register]:1 tmp3 = (* [register]:1 tmp1) + (* [register]:1 tmp2); - simd_address_at(tmp1, Rd_VPR64.8B, 3, 1, 8); - simd_address_at(tmp2, Rn_VPR64.8B, 3, 1, 8); - simd_address_at(tmp3, Rd_VPR64.8B, 3, 1, 8); - * [register]:1 tmp3 = (* [register]:1 tmp1) + (* [register]:1 tmp2); - simd_address_at(tmp1, Rd_VPR64.8B, 4, 1, 8); - simd_address_at(tmp2, Rn_VPR64.8B, 4, 1, 8); - simd_address_at(tmp3, Rd_VPR64.8B, 4, 1, 8); - * [register]:1 tmp3 = (* [register]:1 tmp1) + (* [register]:1 tmp2); - simd_address_at(tmp1, Rd_VPR64.8B, 5, 1, 8); - simd_address_at(tmp2, Rn_VPR64.8B, 5, 1, 8); - simd_address_at(tmp3, Rd_VPR64.8B, 5, 1, 8); - * [register]:1 tmp3 = (* [register]:1 tmp1) + (* [register]:1 tmp2); - simd_address_at(tmp1, Rd_VPR64.8B, 6, 1, 8); - simd_address_at(tmp2, Rn_VPR64.8B, 6, 1, 8); - simd_address_at(tmp3, Rd_VPR64.8B, 6, 1, 8); - * [register]:1 tmp3 = (* [register]:1 tmp1) + (* [register]:1 tmp2); - simd_address_at(tmp1, Rd_VPR64.8B, 7, 1, 8); - simd_address_at(tmp2, Rn_VPR64.8B, 7, 1, 8); - simd_address_at(tmp3, Rd_VPR64.8B, 7, 1, 8); - * [register]:1 tmp3 = (* [register]:1 tmp1) + (* [register]:1 tmp2); + Rd_VPR64.8B[0,8] = Rd_VPR64.8B[0,8] + Rn_VPR64.8B[0,8]; + Rd_VPR64.8B[8,8] = Rd_VPR64.8B[8,8] + Rn_VPR64.8B[8,8]; + Rd_VPR64.8B[16,8] = Rd_VPR64.8B[16,8] + Rn_VPR64.8B[16,8]; + Rd_VPR64.8B[24,8] = Rd_VPR64.8B[24,8] + Rn_VPR64.8B[24,8]; + Rd_VPR64.8B[32,8] = Rd_VPR64.8B[32,8] + Rn_VPR64.8B[32,8]; + Rd_VPR64.8B[40,8] = Rd_VPR64.8B[40,8] + Rn_VPR64.8B[40,8]; + Rd_VPR64.8B[48,8] = Rd_VPR64.8B[48,8] + Rn_VPR64.8B[48,8]; + Rd_VPR64.8B[56,8] = Rd_VPR64.8B[56,8] + Rn_VPR64.8B[56,8]; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:8 = SIMD_INT_ADD(Rd_VPR64.8B, Rn_VPR64.8B, 1:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.8B -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.8B = NEON_suqadd(Rd_VPR64.8B, Rn_VPR64.8B, 1:1); -@endif } # C7.2.337 SUQADD page C7-2165 line 121781 MATCH x0e203800/mask=xbf3ffc00 @@ -40786,82 +22348,24 @@ is b_31=0 & b_30=0 & b_2429=0b001110 & b_2223=0b00 & b_1021=0b100000001110 & Rd_ :suqadd Rd_VPR128.16B, Rn_VPR128.16B is b_31=0 & b_30=1 & b_2429=0b001110 & b_2223=0b00 & b_1021=0b100000001110 & Rd_VPR128.16B & Rn_VPR128.16B & Zd { -@if defined(SEMANTIC_primitive) # simd infix Rd_VPR128.16B = Rd_VPR128.16B + Rn_VPR128.16B on lane size 1 - local tmp1:4 = 0; - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp1, Rd_VPR128.16B, 0, 1, 16); - simd_address_at(tmp2, Rn_VPR128.16B, 0, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 0, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) + (* [register]:1 tmp2); - simd_address_at(tmp1, Rd_VPR128.16B, 1, 1, 16); - simd_address_at(tmp2, Rn_VPR128.16B, 1, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 1, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) + (* [register]:1 tmp2); - simd_address_at(tmp1, Rd_VPR128.16B, 2, 1, 16); - simd_address_at(tmp2, Rn_VPR128.16B, 2, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 2, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) + (* [register]:1 tmp2); - simd_address_at(tmp1, Rd_VPR128.16B, 3, 1, 16); - simd_address_at(tmp2, Rn_VPR128.16B, 3, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 3, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) + (* [register]:1 tmp2); - simd_address_at(tmp1, Rd_VPR128.16B, 4, 1, 16); - simd_address_at(tmp2, Rn_VPR128.16B, 4, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 4, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) + (* [register]:1 tmp2); - simd_address_at(tmp1, Rd_VPR128.16B, 5, 1, 16); - simd_address_at(tmp2, Rn_VPR128.16B, 5, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 5, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) + (* [register]:1 tmp2); - simd_address_at(tmp1, Rd_VPR128.16B, 6, 1, 16); - simd_address_at(tmp2, Rn_VPR128.16B, 6, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 6, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) + (* [register]:1 tmp2); - simd_address_at(tmp1, Rd_VPR128.16B, 7, 1, 16); - simd_address_at(tmp2, Rn_VPR128.16B, 7, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 7, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) + (* [register]:1 tmp2); - simd_address_at(tmp1, Rd_VPR128.16B, 8, 1, 16); - simd_address_at(tmp2, Rn_VPR128.16B, 8, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 8, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) + (* [register]:1 tmp2); - simd_address_at(tmp1, Rd_VPR128.16B, 9, 1, 16); - simd_address_at(tmp2, Rn_VPR128.16B, 9, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 9, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) + (* [register]:1 tmp2); - simd_address_at(tmp1, Rd_VPR128.16B, 10, 1, 16); - simd_address_at(tmp2, Rn_VPR128.16B, 10, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 10, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) + (* [register]:1 tmp2); - simd_address_at(tmp1, Rd_VPR128.16B, 11, 1, 16); - simd_address_at(tmp2, Rn_VPR128.16B, 11, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 11, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) + (* [register]:1 tmp2); - simd_address_at(tmp1, Rd_VPR128.16B, 12, 1, 16); - simd_address_at(tmp2, Rn_VPR128.16B, 12, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 12, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) + (* [register]:1 tmp2); - simd_address_at(tmp1, Rd_VPR128.16B, 13, 1, 16); - simd_address_at(tmp2, Rn_VPR128.16B, 13, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 13, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) + (* [register]:1 tmp2); - simd_address_at(tmp1, Rd_VPR128.16B, 14, 1, 16); - simd_address_at(tmp2, Rn_VPR128.16B, 14, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 14, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) + (* [register]:1 tmp2); - simd_address_at(tmp1, Rd_VPR128.16B, 15, 1, 16); - simd_address_at(tmp2, Rn_VPR128.16B, 15, 1, 16); - simd_address_at(tmp3, Rd_VPR128.16B, 15, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp1) + (* [register]:1 tmp2); + Rd_VPR128.16B[0,8] = Rd_VPR128.16B[0,8] + Rn_VPR128.16B[0,8]; + Rd_VPR128.16B[8,8] = Rd_VPR128.16B[8,8] + Rn_VPR128.16B[8,8]; + Rd_VPR128.16B[16,8] = Rd_VPR128.16B[16,8] + Rn_VPR128.16B[16,8]; + Rd_VPR128.16B[24,8] = Rd_VPR128.16B[24,8] + Rn_VPR128.16B[24,8]; + Rd_VPR128.16B[32,8] = Rd_VPR128.16B[32,8] + Rn_VPR128.16B[32,8]; + Rd_VPR128.16B[40,8] = Rd_VPR128.16B[40,8] + Rn_VPR128.16B[40,8]; + Rd_VPR128.16B[48,8] = Rd_VPR128.16B[48,8] + Rn_VPR128.16B[48,8]; + Rd_VPR128.16B[56,8] = Rd_VPR128.16B[56,8] + Rn_VPR128.16B[56,8]; + Rd_VPR128.16B[64,8] = Rd_VPR128.16B[64,8] + Rn_VPR128.16B[64,8]; + Rd_VPR128.16B[72,8] = Rd_VPR128.16B[72,8] + Rn_VPR128.16B[72,8]; + Rd_VPR128.16B[80,8] = Rd_VPR128.16B[80,8] + Rn_VPR128.16B[80,8]; + Rd_VPR128.16B[88,8] = Rd_VPR128.16B[88,8] + Rn_VPR128.16B[88,8]; + Rd_VPR128.16B[96,8] = Rd_VPR128.16B[96,8] + Rn_VPR128.16B[96,8]; + Rd_VPR128.16B[104,8] = Rd_VPR128.16B[104,8] + Rn_VPR128.16B[104,8]; + Rd_VPR128.16B[112,8] = Rd_VPR128.16B[112,8] + Rn_VPR128.16B[112,8]; + Rd_VPR128.16B[120,8] = Rd_VPR128.16B[120,8] + Rn_VPR128.16B[120,8]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:16 = SIMD_INT_ADD(Rd_VPR128.16B, Rn_VPR128.16B, 1:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.16B -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.16B = NEON_suqadd(Rd_VPR128.16B, Rn_VPR128.16B, 1:1); -@endif } # C7.2.337 SUQADD page C7-2165 line 121781 MATCH x0e203800/mask=xbf3ffc00 @@ -40874,34 +22378,12 @@ is b_31=0 & b_30=1 & b_2429=0b001110 & b_2223=0b00 & b_1021=0b100000001110 & Rd_ :suqadd Rd_VPR64.4H, Rn_VPR64.4H is b_31=0 & b_30=0 & b_2429=0b001110 & b_2223=0b01 & b_1021=0b100000001110 & Rd_VPR64.4H & Rn_VPR64.4H & Zd { -@if defined(SEMANTIC_primitive) # simd infix Rd_VPR64.4H = Rd_VPR64.4H + Rn_VPR64.4H on lane size 2 - local tmp1:4 = 0; - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp1, Rd_VPR64.4H, 0, 2, 8); - simd_address_at(tmp2, Rn_VPR64.4H, 0, 2, 8); - simd_address_at(tmp3, Rd_VPR64.4H, 0, 2, 8); - * [register]:2 tmp3 = (* [register]:2 tmp1) + (* [register]:2 tmp2); - simd_address_at(tmp1, Rd_VPR64.4H, 1, 2, 8); - simd_address_at(tmp2, Rn_VPR64.4H, 1, 2, 8); - simd_address_at(tmp3, Rd_VPR64.4H, 1, 2, 8); - * [register]:2 tmp3 = (* [register]:2 tmp1) + (* [register]:2 tmp2); - simd_address_at(tmp1, Rd_VPR64.4H, 2, 2, 8); - simd_address_at(tmp2, Rn_VPR64.4H, 2, 2, 8); - simd_address_at(tmp3, Rd_VPR64.4H, 2, 2, 8); - * [register]:2 tmp3 = (* [register]:2 tmp1) + (* [register]:2 tmp2); - simd_address_at(tmp1, Rd_VPR64.4H, 3, 2, 8); - simd_address_at(tmp2, Rn_VPR64.4H, 3, 2, 8); - simd_address_at(tmp3, Rd_VPR64.4H, 3, 2, 8); - * [register]:2 tmp3 = (* [register]:2 tmp1) + (* [register]:2 tmp2); + Rd_VPR64.4H[0,16] = Rd_VPR64.4H[0,16] + Rn_VPR64.4H[0,16]; + Rd_VPR64.4H[16,16] = Rd_VPR64.4H[16,16] + Rn_VPR64.4H[16,16]; + Rd_VPR64.4H[32,16] = Rd_VPR64.4H[32,16] + Rn_VPR64.4H[32,16]; + Rd_VPR64.4H[48,16] = Rd_VPR64.4H[48,16] + Rn_VPR64.4H[48,16]; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:8 = SIMD_INT_ADD(Rd_VPR64.4H, Rn_VPR64.4H, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.4H -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.4H = NEON_suqadd(Rd_VPR64.4H, Rn_VPR64.4H, 2:1); -@endif } # C7.2.337 SUQADD page C7-2165 line 121781 MATCH x0e203800/mask=xbf3ffc00 @@ -40914,50 +22396,16 @@ is b_31=0 & b_30=0 & b_2429=0b001110 & b_2223=0b01 & b_1021=0b100000001110 & Rd_ :suqadd Rd_VPR128.8H, Rn_VPR128.8H is b_31=0 & b_30=1 & b_2429=0b001110 & b_2223=0b01 & b_1021=0b100000001110 & Rd_VPR128.8H & Rn_VPR128.8H & Zd { -@if defined(SEMANTIC_primitive) # simd infix Rd_VPR128.8H = Rd_VPR128.8H + Rn_VPR128.8H on lane size 2 - local tmp1:4 = 0; - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp1, Rd_VPR128.8H, 0, 2, 16); - simd_address_at(tmp2, Rn_VPR128.8H, 0, 2, 16); - simd_address_at(tmp3, Rd_VPR128.8H, 0, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp1) + (* [register]:2 tmp2); - simd_address_at(tmp1, Rd_VPR128.8H, 1, 2, 16); - simd_address_at(tmp2, Rn_VPR128.8H, 1, 2, 16); - simd_address_at(tmp3, Rd_VPR128.8H, 1, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp1) + (* [register]:2 tmp2); - simd_address_at(tmp1, Rd_VPR128.8H, 2, 2, 16); - simd_address_at(tmp2, Rn_VPR128.8H, 2, 2, 16); - simd_address_at(tmp3, Rd_VPR128.8H, 2, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp1) + (* [register]:2 tmp2); - simd_address_at(tmp1, Rd_VPR128.8H, 3, 2, 16); - simd_address_at(tmp2, Rn_VPR128.8H, 3, 2, 16); - simd_address_at(tmp3, Rd_VPR128.8H, 3, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp1) + (* [register]:2 tmp2); - simd_address_at(tmp1, Rd_VPR128.8H, 4, 2, 16); - simd_address_at(tmp2, Rn_VPR128.8H, 4, 2, 16); - simd_address_at(tmp3, Rd_VPR128.8H, 4, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp1) + (* [register]:2 tmp2); - simd_address_at(tmp1, Rd_VPR128.8H, 5, 2, 16); - simd_address_at(tmp2, Rn_VPR128.8H, 5, 2, 16); - simd_address_at(tmp3, Rd_VPR128.8H, 5, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp1) + (* [register]:2 tmp2); - simd_address_at(tmp1, Rd_VPR128.8H, 6, 2, 16); - simd_address_at(tmp2, Rn_VPR128.8H, 6, 2, 16); - simd_address_at(tmp3, Rd_VPR128.8H, 6, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp1) + (* [register]:2 tmp2); - simd_address_at(tmp1, Rd_VPR128.8H, 7, 2, 16); - simd_address_at(tmp2, Rn_VPR128.8H, 7, 2, 16); - simd_address_at(tmp3, Rd_VPR128.8H, 7, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp1) + (* [register]:2 tmp2); + Rd_VPR128.8H[0,16] = Rd_VPR128.8H[0,16] + Rn_VPR128.8H[0,16]; + Rd_VPR128.8H[16,16] = Rd_VPR128.8H[16,16] + Rn_VPR128.8H[16,16]; + Rd_VPR128.8H[32,16] = Rd_VPR128.8H[32,16] + Rn_VPR128.8H[32,16]; + Rd_VPR128.8H[48,16] = Rd_VPR128.8H[48,16] + Rn_VPR128.8H[48,16]; + Rd_VPR128.8H[64,16] = Rd_VPR128.8H[64,16] + Rn_VPR128.8H[64,16]; + Rd_VPR128.8H[80,16] = Rd_VPR128.8H[80,16] + Rn_VPR128.8H[80,16]; + Rd_VPR128.8H[96,16] = Rd_VPR128.8H[96,16] + Rn_VPR128.8H[96,16]; + Rd_VPR128.8H[112,16] = Rd_VPR128.8H[112,16] + Rn_VPR128.8H[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:16 = SIMD_INT_ADD(Rd_VPR128.8H, Rn_VPR128.8H, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.8H -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.8H = NEON_suqadd(Rd_VPR128.8H, Rn_VPR128.8H, 2:1); -@endif } # C7.2.337 SUQADD page C7-2165 line 121781 MATCH x0e203800/mask=xbf3ffc00 @@ -40970,26 +22418,10 @@ is b_31=0 & b_30=1 & b_2429=0b001110 & b_2223=0b01 & b_1021=0b100000001110 & Rd_ :suqadd Rd_VPR64.2S, Rn_VPR64.2S is b_31=0 & b_30=0 & b_2429=0b001110 & b_2223=0b10 & b_1021=0b100000001110 & Rd_VPR64.2S & Rn_VPR64.2S & Zd { -@if defined(SEMANTIC_primitive) # simd infix Rd_VPR64.2S = Rd_VPR64.2S + Rn_VPR64.2S on lane size 4 - local tmp1:4 = 0; - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp1, Rd_VPR64.2S, 0, 4, 8); - simd_address_at(tmp2, Rn_VPR64.2S, 0, 4, 8); - simd_address_at(tmp3, Rd_VPR64.2S, 0, 4, 8); - * [register]:4 tmp3 = (* [register]:4 tmp1) + (* [register]:4 tmp2); - simd_address_at(tmp1, Rd_VPR64.2S, 1, 4, 8); - simd_address_at(tmp2, Rn_VPR64.2S, 1, 4, 8); - simd_address_at(tmp3, Rd_VPR64.2S, 1, 4, 8); - * [register]:4 tmp3 = (* [register]:4 tmp1) + (* [register]:4 tmp2); + Rd_VPR64.2S[0,32] = Rd_VPR64.2S[0,32] + Rn_VPR64.2S[0,32]; + Rd_VPR64.2S[32,32] = Rd_VPR64.2S[32,32] + Rn_VPR64.2S[32,32]; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:8 = SIMD_INT_ADD(Rd_VPR64.2S, Rn_VPR64.2S, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.2S -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.2S = NEON_suqadd(Rd_VPR64.2S, Rn_VPR64.2S, 4:1); -@endif } # C7.2.337 SUQADD page C7-2165 line 121781 MATCH x0e203800/mask=xbf3ffc00 @@ -41002,34 +22434,12 @@ is b_31=0 & b_30=0 & b_2429=0b001110 & b_2223=0b10 & b_1021=0b100000001110 & Rd_ :suqadd Rd_VPR128.4S, Rn_VPR128.4S is b_31=0 & b_30=1 & b_2429=0b001110 & b_2223=0b10 & b_1021=0b100000001110 & Rd_VPR128.4S & Rn_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) # simd infix Rd_VPR128.4S = Rd_VPR128.4S + Rn_VPR128.4S on lane size 4 - local tmp1:4 = 0; - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp1, Rd_VPR128.4S, 0, 4, 16); - simd_address_at(tmp2, Rn_VPR128.4S, 0, 4, 16); - simd_address_at(tmp3, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp3 = (* [register]:4 tmp1) + (* [register]:4 tmp2); - simd_address_at(tmp1, Rd_VPR128.4S, 1, 4, 16); - simd_address_at(tmp2, Rn_VPR128.4S, 1, 4, 16); - simd_address_at(tmp3, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp3 = (* [register]:4 tmp1) + (* [register]:4 tmp2); - simd_address_at(tmp1, Rd_VPR128.4S, 2, 4, 16); - simd_address_at(tmp2, Rn_VPR128.4S, 2, 4, 16); - simd_address_at(tmp3, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp3 = (* [register]:4 tmp1) + (* [register]:4 tmp2); - simd_address_at(tmp1, Rd_VPR128.4S, 3, 4, 16); - simd_address_at(tmp2, Rn_VPR128.4S, 3, 4, 16); - simd_address_at(tmp3, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp3 = (* [register]:4 tmp1) + (* [register]:4 tmp2); + Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] + Rn_VPR128.4S[0,32]; + Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] + Rn_VPR128.4S[32,32]; + Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] + Rn_VPR128.4S[64,32]; + Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] + Rn_VPR128.4S[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:16 = SIMD_INT_ADD(Rd_VPR128.4S, Rn_VPR128.4S, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_suqadd(Rd_VPR128.4S, Rn_VPR128.4S, 4:1); -@endif } # C7.2.337 SUQADD page C7-2165 line 121781 MATCH x0e203800/mask=xbf3ffc00 @@ -41042,26 +22452,10 @@ is b_31=0 & b_30=1 & b_2429=0b001110 & b_2223=0b10 & b_1021=0b100000001110 & Rd_ :suqadd Rd_VPR128.2D, Rn_VPR128.2D is b_31=0 & b_30=1 & b_2429=0b001110 & b_2223=0b11 & b_1021=0b100000001110 & Rd_VPR128.2D & Rn_VPR128.2D & Zd { -@if defined(SEMANTIC_primitive) # simd infix Rd_VPR128.2D = Rd_VPR128.2D + Rn_VPR128.2D on lane size 8 - local tmp1:4 = 0; - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp1, Rd_VPR128.2D, 0, 8, 16); - simd_address_at(tmp2, Rn_VPR128.2D, 0, 8, 16); - simd_address_at(tmp3, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp3 = (* [register]:8 tmp1) + (* [register]:8 tmp2); - simd_address_at(tmp1, Rd_VPR128.2D, 1, 8, 16); - simd_address_at(tmp2, Rn_VPR128.2D, 1, 8, 16); - simd_address_at(tmp3, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp3 = (* [register]:8 tmp1) + (* [register]:8 tmp2); + Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] + Rn_VPR128.2D[0,64]; + Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] + Rn_VPR128.2D[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:16 = SIMD_INT_ADD(Rd_VPR128.2D, Rn_VPR128.2D, 8:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.2D = NEON_suqadd(Rd_VPR128.2D, Rn_VPR128.2D, 8:1); -@endif } # C7.2.338 SXTL, SXTL2 page C7-2167 line 121903 MATCH x0f00a400/mask=xbf87fc00 @@ -41074,45 +22468,17 @@ is b_31=0 & b_30=1 & b_2429=0b001110 & b_2223=0b11 & b_1021=0b100000001110 & Rd_ :sxtl2 Rd_VPR128.8H, Rn_VPR128.16B is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_1923=0x1 & Imm_uimm3=0 & b_1115=0x14 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rn_VPR128.16B, 1, 8, 16); - TMPD1 = * [register]:8 tmp1; + TMPD1 = Rn_VPR128.16B[64,64]; # simd resize Rd_VPR128.8H = sext(TMPD1) (lane size 1 to 2) - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp3, TMPD1, 0, 1, 8); - simd_address_at(tmp4, Rd_VPR128.8H, 0, 2, 16); - * [register]:2 tmp4 = sext(* [register]:1 tmp3); - simd_address_at(tmp3, TMPD1, 1, 1, 8); - simd_address_at(tmp4, Rd_VPR128.8H, 1, 2, 16); - * [register]:2 tmp4 = sext(* [register]:1 tmp3); - simd_address_at(tmp3, TMPD1, 2, 1, 8); - simd_address_at(tmp4, Rd_VPR128.8H, 2, 2, 16); - * [register]:2 tmp4 = sext(* [register]:1 tmp3); - simd_address_at(tmp3, TMPD1, 3, 1, 8); - simd_address_at(tmp4, Rd_VPR128.8H, 3, 2, 16); - * [register]:2 tmp4 = sext(* [register]:1 tmp3); - simd_address_at(tmp3, TMPD1, 4, 1, 8); - simd_address_at(tmp4, Rd_VPR128.8H, 4, 2, 16); - * [register]:2 tmp4 = sext(* [register]:1 tmp3); - simd_address_at(tmp3, TMPD1, 5, 1, 8); - simd_address_at(tmp4, Rd_VPR128.8H, 5, 2, 16); - * [register]:2 tmp4 = sext(* [register]:1 tmp3); - simd_address_at(tmp3, TMPD1, 6, 1, 8); - simd_address_at(tmp4, Rd_VPR128.8H, 6, 2, 16); - * [register]:2 tmp4 = sext(* [register]:1 tmp3); - simd_address_at(tmp3, TMPD1, 7, 1, 8); - simd_address_at(tmp4, Rd_VPR128.8H, 7, 2, 16); - * [register]:2 tmp4 = sext(* [register]:1 tmp3); + Rd_VPR128.8H[0,16] = sext(TMPD1[0,8]); + Rd_VPR128.8H[16,16] = sext(TMPD1[8,8]); + Rd_VPR128.8H[32,16] = sext(TMPD1[16,8]); + Rd_VPR128.8H[48,16] = sext(TMPD1[24,8]); + Rd_VPR128.8H[64,16] = sext(TMPD1[32,8]); + Rd_VPR128.8H[80,16] = sext(TMPD1[40,8]); + Rd_VPR128.8H[96,16] = sext(TMPD1[48,8]); + Rd_VPR128.8H[112,16] = sext(TMPD1[56,8]); zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Rn_VPR128.16B, 1:1); - local tmpd:16 = SIMD_INT_SEXT(tmp1, 1:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.8H -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.8H = NEON_sxtl2(Rn_VPR128.16B, 1:1); -@endif } # C7.2.338 SXTL, SXTL2 page C7-2167 line 121903 MATCH x0f00a400/mask=xbf87fc00 @@ -41125,25 +22491,11 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_1923=0x1 & Imm_uimm3=0 & b_1115=0x14 & :sxtl Rd_VPR128.2D, Rn_VPR64.2S is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_2123=1 & Imm_uimm5=0 & b_1115=0x14 & b_1010=1 & Rn_VPR64.2S & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_primitive) TMPD1 = Rn_VPR64.2S; # simd resize Rd_VPR128.2D = sext(TMPD1) (lane size 4 to 8) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, TMPD1, 0, 4, 8); - simd_address_at(tmp3, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp3 = sext(* [register]:4 tmp2); - simd_address_at(tmp2, TMPD1, 1, 4, 8); - simd_address_at(tmp3, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp3 = sext(* [register]:4 tmp2); + Rd_VPR128.2D[0,64] = sext(TMPD1[0,32]); + Rd_VPR128.2D[64,64] = sext(TMPD1[32,32]); zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = Rn_VPR64.2S; - local tmpd:16 = SIMD_INT_SEXT(tmp1, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.2D = NEON_sxtl(Rn_VPR64.2S, 4:1); -@endif } # C7.2.338 SXTL, SXTL2 page C7-2167 line 121903 MATCH x0f00a400/mask=xbf87fc00 @@ -41156,31 +22508,13 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_2123=1 & Imm_uimm5=0 & b_1115=0x14 & b_ :sxtl Rd_VPR128.4S, Rn_VPR64.4H is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_2023=0x1 & Imm_uimm4=0 & b_1115=0x14 & b_1010=1 & Rn_VPR64.4H & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) TMPD1 = Rn_VPR64.4H; # simd resize Rd_VPR128.4S = sext(TMPD1) (lane size 2 to 4) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, TMPD1, 0, 2, 8); - simd_address_at(tmp3, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp3 = sext(* [register]:2 tmp2); - simd_address_at(tmp2, TMPD1, 1, 2, 8); - simd_address_at(tmp3, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp3 = sext(* [register]:2 tmp2); - simd_address_at(tmp2, TMPD1, 2, 2, 8); - simd_address_at(tmp3, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp3 = sext(* [register]:2 tmp2); - simd_address_at(tmp2, TMPD1, 3, 2, 8); - simd_address_at(tmp3, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp3 = sext(* [register]:2 tmp2); + Rd_VPR128.4S[0,32] = sext(TMPD1[0,16]); + Rd_VPR128.4S[32,32] = sext(TMPD1[16,16]); + Rd_VPR128.4S[64,32] = sext(TMPD1[32,16]); + Rd_VPR128.4S[96,32] = sext(TMPD1[48,16]); zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = Rn_VPR64.4H; - local tmpd:16 = SIMD_INT_SEXT(tmp1, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_sxtl(Rn_VPR64.4H, 2:1); -@endif } # C7.2.338 SXTL, SXTL2 page C7-2167 line 121903 MATCH x0f00a400/mask=xbf87fc00 @@ -41193,27 +22527,11 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_2023=0x1 & Imm_uimm4=0 & b_1115=0x14 & :sxtl2 Rd_VPR128.2D, Rn_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2123=1 & Imm_uimm5=0 & b_1115=0x14 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rn_VPR128.4S, 1, 8, 16); - TMPD1 = * [register]:8 tmp1; + TMPD1 = Rn_VPR128.4S[64,64]; # simd resize Rd_VPR128.2D = sext(TMPD1) (lane size 4 to 8) - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp3, TMPD1, 0, 4, 8); - simd_address_at(tmp4, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp4 = sext(* [register]:4 tmp3); - simd_address_at(tmp3, TMPD1, 1, 4, 8); - simd_address_at(tmp4, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp4 = sext(* [register]:4 tmp3); + Rd_VPR128.2D[0,64] = sext(TMPD1[0,32]); + Rd_VPR128.2D[64,64] = sext(TMPD1[32,32]); zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Rn_VPR128.4S, 1:1); - local tmpd:16 = SIMD_INT_SEXT(tmp1, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.2D = NEON_sxtl2(Rn_VPR128.4S, 4:1); -@endif } # C7.2.338 SXTL, SXTL2 page C7-2167 line 121903 MATCH x0f00a400/mask=xbf87fc00 @@ -41226,43 +22544,17 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2123=1 & Imm_uimm5=0 & b_1115=0x14 & b_ :sxtl Rd_VPR128.8H, Rn_VPR64.8B is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_1923=0x1 & Imm_uimm3=0 & b_1115=0x14 & b_1010=1 & Rn_VPR64.8B & Rd_VPR128.8H & Rn_VPR128 & Zd { -@if defined(SEMANTIC_primitive) TMPD1 = Rn_VPR64.8B; # simd resize Rd_VPR128.8H = sext(TMPD1) (lane size 1 to 2) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, TMPD1, 0, 1, 8); - simd_address_at(tmp3, Rd_VPR128.8H, 0, 2, 16); - * [register]:2 tmp3 = sext(* [register]:1 tmp2); - simd_address_at(tmp2, TMPD1, 1, 1, 8); - simd_address_at(tmp3, Rd_VPR128.8H, 1, 2, 16); - * [register]:2 tmp3 = sext(* [register]:1 tmp2); - simd_address_at(tmp2, TMPD1, 2, 1, 8); - simd_address_at(tmp3, Rd_VPR128.8H, 2, 2, 16); - * [register]:2 tmp3 = sext(* [register]:1 tmp2); - simd_address_at(tmp2, TMPD1, 3, 1, 8); - simd_address_at(tmp3, Rd_VPR128.8H, 3, 2, 16); - * [register]:2 tmp3 = sext(* [register]:1 tmp2); - simd_address_at(tmp2, TMPD1, 4, 1, 8); - simd_address_at(tmp3, Rd_VPR128.8H, 4, 2, 16); - * [register]:2 tmp3 = sext(* [register]:1 tmp2); - simd_address_at(tmp2, TMPD1, 5, 1, 8); - simd_address_at(tmp3, Rd_VPR128.8H, 5, 2, 16); - * [register]:2 tmp3 = sext(* [register]:1 tmp2); - simd_address_at(tmp2, TMPD1, 6, 1, 8); - simd_address_at(tmp3, Rd_VPR128.8H, 6, 2, 16); - * [register]:2 tmp3 = sext(* [register]:1 tmp2); - simd_address_at(tmp2, TMPD1, 7, 1, 8); - simd_address_at(tmp3, Rd_VPR128.8H, 7, 2, 16); - * [register]:2 tmp3 = sext(* [register]:1 tmp2); + Rd_VPR128.8H[0,16] = sext(TMPD1[0,8]); + Rd_VPR128.8H[16,16] = sext(TMPD1[8,8]); + Rd_VPR128.8H[32,16] = sext(TMPD1[16,8]); + Rd_VPR128.8H[48,16] = sext(TMPD1[24,8]); + Rd_VPR128.8H[64,16] = sext(TMPD1[32,8]); + Rd_VPR128.8H[80,16] = sext(TMPD1[40,8]); + Rd_VPR128.8H[96,16] = sext(TMPD1[48,8]); + Rd_VPR128.8H[112,16] = sext(TMPD1[56,8]); zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = Rn_VPR64.8B; - local tmpd:16 = SIMD_INT_SEXT(tmp1, 1:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.8H -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.8H = NEON_sxtl(Rn_VPR64.8B, 1:1); -@endif } # C7.2.338 SXTL, SXTL2 page C7-2167 line 121903 MATCH x0f00a400/mask=xbf87fc00 @@ -41275,33 +22567,13 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_1923=0x1 & Imm_uimm3=0 & b_1115=0x14 & :sxtl2 Rd_VPR128.4S, Rn_VPR128.8H is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2023=0x1 & Imm_uimm4=0 & b_1115=0x14 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rn_VPR128.8H, 1, 8, 16); - TMPD1 = * [register]:8 tmp1; + TMPD1 = Rn_VPR128.8H[64,64]; # simd resize Rd_VPR128.4S = sext(TMPD1) (lane size 2 to 4) - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp3, TMPD1, 0, 2, 8); - simd_address_at(tmp4, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp4 = sext(* [register]:2 tmp3); - simd_address_at(tmp3, TMPD1, 1, 2, 8); - simd_address_at(tmp4, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp4 = sext(* [register]:2 tmp3); - simd_address_at(tmp3, TMPD1, 2, 2, 8); - simd_address_at(tmp4, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp4 = sext(* [register]:2 tmp3); - simd_address_at(tmp3, TMPD1, 3, 2, 8); - simd_address_at(tmp4, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp4 = sext(* [register]:2 tmp3); + Rd_VPR128.4S[0,32] = sext(TMPD1[0,16]); + Rd_VPR128.4S[32,32] = sext(TMPD1[16,16]); + Rd_VPR128.4S[64,32] = sext(TMPD1[32,16]); + Rd_VPR128.4S[96,32] = sext(TMPD1[48,16]); zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Rn_VPR128.8H, 1:1); - local tmpd:16 = SIMD_INT_SEXT(tmp1, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_sxtl2(Rn_VPR128.8H, 2:1); -@endif } # C7.2.322 TBL page C7-1717 line 99409 KEEPWITH @@ -41320,15 +22592,8 @@ tblx: "tbx" is b_12=1 & Rd_VPR128 { export Rd_VPR128; } :^tblx Rd_VPR64.8B, "{"^Rn_VPR128.16B^"}", Rm_VPR64.8B is b_31=0 & b_2129=0b001110000 & b_15=0 & b_1011=0b00 & b_30=0 & b_1314=0b00 & Rm_VPR64.8B & Rn_VPR128.16B & Rd_VPR64.8B & tblx & Zd { -@if defined(SEMANTIC_primitive) Rd_VPR64.8B = a64_TBL(tblx, Rn_VPR128.16B, Rm_VPR64.8B); zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:8 = a64_TBL(tblx, Rn_VPR128.16B, Rm_VPR64.8B); - Zd = zext(tmpd); # assigning to Rd_VPR64.8B -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.8B = NEON_tblx(tblx, Rn_VPR128.16B, Rm_VPR64.8B, 1:1); -@endif } # C7.2.339 TBL page C7-2169 line 122002 MATCH x0e000000/mask=xbfe09c00 @@ -41342,15 +22607,8 @@ is b_31=0 & b_2129=0b001110000 & b_15=0 & b_1011=0b00 & b_30=0 & b_1314=0b00 & R :^tblx Rd_VPR128.16B, "{"^Rn_VPR128.16B^"}", Rm_VPR128.16B is b_31=0 & b_2129=0b001110000 & b_15=0 & b_1011=0b00 & b_30=1 & b_1314=0b00 & Rm_VPR128.16B & Rn_VPR128.16B & Rd_VPR128.16B & tblx & Zd { -@if defined(SEMANTIC_primitive) Rd_VPR128.16B = a64_TBL(tblx, Rn_VPR128.16B, Rm_VPR128.16B); zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:16 = a64_TBL(tblx, Rn_VPR128.16B, Rm_VPR128.16B); - Zd = zext(tmpd); # assigning to Rd_VPR128.16B -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.16B = NEON_tblx(tblx, Rn_VPR128.16B, Rm_VPR128.16B, 1:1); -@endif } # C7.2.339 TBL page C7-2169 line 122002 MATCH x0e000000/mask=xbfe09c00 @@ -41364,15 +22622,8 @@ is b_31=0 & b_2129=0b001110000 & b_15=0 & b_1011=0b00 & b_30=1 & b_1314=0b00 & R :^tblx Rd_VPR64.8B, "{"^Rn_VPR128.16B^", "^Rnn_VPR128.16B^"}", Rm_VPR64.8B is b_31=0 & b_2129=0b001110000 & b_15=0 & b_1011=0b00 & b_30=0 & b_1314=0b01 & Rm_VPR64.8B & Rn_VPR128.16B & Rnn_VPR128.16B & Rd_VPR64.8B & tblx & Zd { -@if defined(SEMANTIC_primitive) Rd_VPR64.8B = a64_TBL(tblx, Rn_VPR128.16B, Rnn_VPR128.16B, Rm_VPR64.8B); zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:8 = a64_TBL(tblx, Rn_VPR128.16B, Rnn_VPR128.16B, Rm_VPR64.8B); - Zd = zext(tmpd); # assigning to Rd_VPR64.8B -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.8B = NEON_tblx(tblx, Rn_VPR128.16B, Rnn_VPR128.16B, Rm_VPR64.8B); -@endif } # C7.2.339 TBL page C7-2169 line 122002 MATCH x0e000000/mask=xbfe09c00 @@ -41386,15 +22637,8 @@ is b_31=0 & b_2129=0b001110000 & b_15=0 & b_1011=0b00 & b_30=0 & b_1314=0b01 & R :^tblx Rd_VPR128.16B, "{"^Rn_VPR128.16B^", "^Rnn_VPR128.16B^"}", Rm_VPR128.16B is b_31=0 & b_2129=0b001110000 & b_15=0 & b_1011=0b00 & b_30=1 & b_1314=0b01 & Rm_VPR128.16B & Rn_VPR128.16B & Rnn_VPR128.16B & Rd_VPR128.16B & tblx & Zd { -@if defined(SEMANTIC_primitive) Rd_VPR128.16B = a64_TBL(tblx, Rn_VPR128.16B, Rnn_VPR128.16B, Rm_VPR128.16B); zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:16 = a64_TBL(tblx, Rn_VPR128.16B, Rnn_VPR128.16B, Rm_VPR128.16B); - Zd = zext(tmpd); # assigning to Rd_VPR128.16B -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.16B = NEON_tblx(tblx, Rn_VPR128.16B, Rnn_VPR128.16B, Rm_VPR128.16B); -@endif } # C7.2.339 TBL page C7-2169 line 122002 MATCH x0e000000/mask=xbfe09c00 @@ -41408,15 +22652,8 @@ is b_31=0 & b_2129=0b001110000 & b_15=0 & b_1011=0b00 & b_30=1 & b_1314=0b01 & R :^tblx Rd_VPR64.8B, "{"^Rn_VPR128.16B^", "^Rnn_VPR128.16B^", "^Rnnn_VPR128.16B^"}", Rm_VPR64.8B is b_31=0 & b_2129=0b001110000 & b_15=0 & b_1011=0b00 & b_30=0 & b_1314=0b10 & Rm_VPR64.8B & Rn_VPR128.16B & Rnn_VPR128.16B & Rnnn_VPR128.16B & Rd_VPR64.8B & tblx & Zd { -@if defined(SEMANTIC_primitive) Rd_VPR64.8B = a64_TBL(tblx, Rn_VPR128.16B, Rnn_VPR128.16B, Rnnn_VPR128.16B, Rm_VPR64.8B); zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:8 = a64_TBL(tblx, Rn_VPR128.16B, Rnn_VPR128.16B, Rnnn_VPR128.16B, Rm_VPR64.8B); - Zd = zext(tmpd); # assigning to Rd_VPR64.8B -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.8B = NEON_tblx(tblx, Rn_VPR128.16B, Rnn_VPR128.16B, Rnnn_VPR128.16B, Rm_VPR64.8B); -@endif } # C7.2.339 TBL page C7-2169 line 122002 MATCH x0e000000/mask=xbfe09c00 @@ -41430,15 +22667,8 @@ is b_31=0 & b_2129=0b001110000 & b_15=0 & b_1011=0b00 & b_30=0 & b_1314=0b10 & R :^tblx Rd_VPR128.16B, "{"^Rn_VPR128.16B^", "^Rnn_VPR128.16B^", "^Rnnn_VPR128.16B^"}", Rm_VPR128.16B is b_31=0 & b_2129=0b001110000 & b_15=0 & b_1011=0b00 & b_30=1 & b_1314=0b10 & Rm_VPR128.16B & Rn_VPR128.16B & Rnn_VPR128.16B & Rnnn_VPR128.16B & Rd_VPR128.16B & tblx & Zd { -@if defined(SEMANTIC_primitive) Rd_VPR128.16B = a64_TBL(tblx, Rn_VPR128.16B, Rnn_VPR128.16B, Rnnn_VPR128.16B, Rm_VPR128.16B); zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:16 = a64_TBL(tblx, Rn_VPR128.16B, Rnn_VPR128.16B, Rnnn_VPR128.16B, Rm_VPR128.16B); - Zd = zext(tmpd); # assigning to Rd_VPR128.16B -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.16B = NEON_tblx(tblx, Rn_VPR128.16B, Rnn_VPR128.16B, Rnnn_VPR128.16B, Rm_VPR128.16B); -@endif } # C7.2.339 TBL page C7-2169 line 122002 MATCH x0e000000/mask=xbfe09c00 @@ -41452,15 +22682,8 @@ is b_31=0 & b_2129=0b001110000 & b_15=0 & b_1011=0b00 & b_30=1 & b_1314=0b10 & R :^tblx Rd_VPR64.8B, "{"^Rn_VPR128.16B^", "^Rnn_VPR128.16B^", "^Rnnn_VPR128.16B^", "^Rnnnn_VPR128.16B^"}", Rm_VPR64.8B is b_31=0 & b_2129=0b001110000 & b_15=0 & b_1011=0b00 & b_30=0 & b_1314=0b11 & Rm_VPR64.8B & Rn_VPR128.16B & Rnn_VPR128.16B & Rnnn_VPR128.16B & Rnnnn_VPR128.16B & Rd_VPR64.8B & tblx & Zd { -@if defined(SEMANTIC_primitive) Rd_VPR64.8B = a64_TBL(tblx, Rn_VPR128.16B, Rnn_VPR128.16B, Rnnn_VPR128.16B, Rnnnn_VPR128.16B, Rm_VPR64.8B); zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:8 = a64_TBL(tblx, Rn_VPR128.16B, Rnn_VPR128.16B, Rnnn_VPR128.16B, Rnnnn_VPR128.16B, Rm_VPR64.8B); - Zd = zext(tmpd); # assigning to Rd_VPR64.8B -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.8B = NEON_tblx(tblx, Rn_VPR128.16B, Rnn_VPR128.16B, Rnnn_VPR128.16B, Rnnnn_VPR128.16B, Rm_VPR64.8B); -@endif } # C7.2.339 TBL page C7-2169 line 122002 MATCH x0e000000/mask=xbfe09c00 @@ -41474,15 +22697,8 @@ is b_31=0 & b_2129=0b001110000 & b_15=0 & b_1011=0b00 & b_30=0 & b_1314=0b11 & R :^tblx Rd_VPR128.16B, "{"^Rn_VPR128.16B^", "^Rnn_VPR128.16B^", "^Rnnn_VPR128.16B^", "^Rnnnn_VPR128.16B^"}", Rm_VPR128.16B is b_31=0 & b_2129=0b001110000 & b_15=0 & b_1011=0b00 & b_30=1 & b_1314=0b11 & Rm_VPR128.16B & Rn_VPR128.16B & Rnn_VPR128.16B & Rnnn_VPR128.16B & Rnnnn_VPR128.16B & Rd_VPR128.16B & tblx & Zd { -@if defined(SEMANTIC_primitive) Rd_VPR128.16B = a64_TBL(tblx, Rn_VPR128.16B, Rnn_VPR128.16B, Rnnn_VPR128.16B, Rnnnn_VPR128.16B, Rm_VPR128.16B); zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:16 = a64_TBL(tblx, Rn_VPR128.16B, Rnn_VPR128.16B, Rnnn_VPR128.16B, Rnnnn_VPR128.16B, Rm_VPR128.16B); - Zd = zext(tmpd); # assigning to Rd_VPR128.16B -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.16B = NEON_tblx(tblx, Rn_VPR128.16B, Rnn_VPR128.16B, Rnnn_VPR128.16B, Rnnnn_VPR128.16B, Rm_VPR128.16B); -@endif } # C7.2.341 TRN1 page C7-2173 line 122256 MATCH x0e002800/mask=xbf20fc00 @@ -41494,90 +22710,27 @@ is b_31=0 & b_2129=0b001110000 & b_15=0 & b_1011=0b00 & b_30=1 & b_1314=0b11 & R :trn1 Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & b_2429=0xe & advSIMD3.size=0 & b_2121=0 & Rm_VPR128.16B & b_1515=0 & b_1214=2 & b_1011=2 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { -@if defined(SEMANTIC_primitive) TMPQ2 = Rm_VPR128.16B; TMPQ1 = Rn_VPR128.16B; # simd shuffle Rd_VPR128.16B = TMPQ1 (@0-0@2-2@4-4@6-6@8-8@10-10@12-12@14-14) lane size 1 - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp3, TMPQ1, 0, 1, 16); - simd_address_at(tmp4, Rd_VPR128.16B, 0, 1, 16); - * [register]:1 tmp4 = * [register]:1 tmp3; - simd_address_at(tmp3, TMPQ1, 2, 1, 16); - simd_address_at(tmp4, Rd_VPR128.16B, 2, 1, 16); - * [register]:1 tmp4 = * [register]:1 tmp3; - simd_address_at(tmp3, TMPQ1, 4, 1, 16); - simd_address_at(tmp4, Rd_VPR128.16B, 4, 1, 16); - * [register]:1 tmp4 = * [register]:1 tmp3; - simd_address_at(tmp3, TMPQ1, 6, 1, 16); - simd_address_at(tmp4, Rd_VPR128.16B, 6, 1, 16); - * [register]:1 tmp4 = * [register]:1 tmp3; - simd_address_at(tmp3, TMPQ1, 8, 1, 16); - simd_address_at(tmp4, Rd_VPR128.16B, 8, 1, 16); - * [register]:1 tmp4 = * [register]:1 tmp3; - simd_address_at(tmp3, TMPQ1, 10, 1, 16); - simd_address_at(tmp4, Rd_VPR128.16B, 10, 1, 16); - * [register]:1 tmp4 = * [register]:1 tmp3; - simd_address_at(tmp3, TMPQ1, 12, 1, 16); - simd_address_at(tmp4, Rd_VPR128.16B, 12, 1, 16); - * [register]:1 tmp4 = * [register]:1 tmp3; - simd_address_at(tmp3, TMPQ1, 14, 1, 16); - simd_address_at(tmp4, Rd_VPR128.16B, 14, 1, 16); - * [register]:1 tmp4 = * [register]:1 tmp3; + Rd_VPR128.16B[0,8] = TMPQ1[0,8]; + Rd_VPR128.16B[16,8] = TMPQ1[16,8]; + Rd_VPR128.16B[32,8] = TMPQ1[32,8]; + Rd_VPR128.16B[48,8] = TMPQ1[48,8]; + Rd_VPR128.16B[64,8] = TMPQ1[64,8]; + Rd_VPR128.16B[80,8] = TMPQ1[80,8]; + Rd_VPR128.16B[96,8] = TMPQ1[96,8]; + Rd_VPR128.16B[112,8] = TMPQ1[112,8]; # simd shuffle Rd_VPR128.16B = TMPQ2 (@0-1@2-3@4-5@6-7@8-9@10-11@12-13@14-15) lane size 1 - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, TMPQ2, 0, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 1, 1, 16); - * [register]:1 tmp6 = * [register]:1 tmp5; - simd_address_at(tmp5, TMPQ2, 2, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 3, 1, 16); - * [register]:1 tmp6 = * [register]:1 tmp5; - simd_address_at(tmp5, TMPQ2, 4, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 5, 1, 16); - * [register]:1 tmp6 = * [register]:1 tmp5; - simd_address_at(tmp5, TMPQ2, 6, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 7, 1, 16); - * [register]:1 tmp6 = * [register]:1 tmp5; - simd_address_at(tmp5, TMPQ2, 8, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 9, 1, 16); - * [register]:1 tmp6 = * [register]:1 tmp5; - simd_address_at(tmp5, TMPQ2, 10, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 11, 1, 16); - * [register]:1 tmp6 = * [register]:1 tmp5; - simd_address_at(tmp5, TMPQ2, 12, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 13, 1, 16); - * [register]:1 tmp6 = * [register]:1 tmp5; - simd_address_at(tmp5, TMPQ2, 14, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 15, 1, 16); - * [register]:1 tmp6 = * [register]:1 tmp5; + Rd_VPR128.16B[8,8] = TMPQ2[0,8]; + Rd_VPR128.16B[24,8] = TMPQ2[16,8]; + Rd_VPR128.16B[40,8] = TMPQ2[32,8]; + Rd_VPR128.16B[56,8] = TMPQ2[48,8]; + Rd_VPR128.16B[72,8] = TMPQ2[64,8]; + Rd_VPR128.16B[88,8] = TMPQ2[80,8]; + Rd_VPR128.16B[104,8] = TMPQ2[96,8]; + Rd_VPR128.16B[120,8] = TMPQ2[112,8]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = Rm_VPR128.16B; - local tmp2:16 = Rn_VPR128.16B; - local tmp3:1 = 0; - local tmpd:16 = Rd_VPR128.16B; - tmp3 = SIMD_PIECE(tmp2, 0:1); tmpd = SIMD_COPY(tmpd, tmp3, 0:1); - tmp3 = SIMD_PIECE(tmp2, 2:1); tmpd = SIMD_COPY(tmpd, tmp3, 2:1); - tmp3 = SIMD_PIECE(tmp2, 4:1); tmpd = SIMD_COPY(tmpd, tmp3, 4:1); - tmp3 = SIMD_PIECE(tmp2, 6:1); tmpd = SIMD_COPY(tmpd, tmp3, 6:1); - tmp3 = SIMD_PIECE(tmp2, 8:1); tmpd = SIMD_COPY(tmpd, tmp3, 8:1); - tmp3 = SIMD_PIECE(tmp2, 10:1); tmpd = SIMD_COPY(tmpd, tmp3, 10:1); - tmp3 = SIMD_PIECE(tmp2, 12:1); tmpd = SIMD_COPY(tmpd, tmp3, 12:1); - tmp3 = SIMD_PIECE(tmp2, 14:1); tmpd = SIMD_COPY(tmpd, tmp3, 14:1); - local tmp4:1 = 0; - tmp4 = SIMD_PIECE(tmp1, 0:1); tmpd = SIMD_COPY(tmpd, tmp4, 1:1); - tmp4 = SIMD_PIECE(tmp1, 2:1); tmpd = SIMD_COPY(tmpd, tmp4, 3:1); - tmp4 = SIMD_PIECE(tmp1, 4:1); tmpd = SIMD_COPY(tmpd, tmp4, 5:1); - tmp4 = SIMD_PIECE(tmp1, 6:1); tmpd = SIMD_COPY(tmpd, tmp4, 7:1); - tmp4 = SIMD_PIECE(tmp1, 8:1); tmpd = SIMD_COPY(tmpd, tmp4, 9:1); - tmp4 = SIMD_PIECE(tmp1, 10:1); tmpd = SIMD_COPY(tmpd, tmp4, 11:1); - tmp4 = SIMD_PIECE(tmp1, 12:1); tmpd = SIMD_COPY(tmpd, tmp4, 13:1); - tmp4 = SIMD_PIECE(tmp1, 14:1); tmpd = SIMD_COPY(tmpd, tmp4, 15:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.16B -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.16B = NEON_trn1(Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B, 1:1); -@endif } # C7.2.341 TRN1 page C7-2173 line 122256 MATCH x0e002800/mask=xbf20fc00 @@ -41589,34 +22742,13 @@ is b_3131=0 & q=1 & b_2429=0xe & advSIMD3.size=0 & b_2121=0 & Rm_VPR128.16B & b_ :trn1 Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=1 & b_2429=0xe & advSIMD3.size=3 & b_2121=0 & Rm_VPR128.2D & b_1515=0 & b_1214=2 & b_1011=2 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_primitive) TMPQ2 = Rm_VPR128.2D; TMPQ1 = Rn_VPR128.2D; # simd shuffle Rd_VPR128.2D = TMPQ1 (@0-0) lane size 8 - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp3, TMPQ1, 0, 8, 16); - simd_address_at(tmp4, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp4 = * [register]:8 tmp3; + Rd_VPR128.2D[0,64] = TMPQ1[0,64]; # simd shuffle Rd_VPR128.2D = TMPQ2 (@0-1) lane size 8 - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, TMPQ2, 0, 8, 16); - simd_address_at(tmp6, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp6 = * [register]:8 tmp5; + Rd_VPR128.2D[64,64] = TMPQ2[0,64]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = Rm_VPR128.2D; - local tmp2:16 = Rn_VPR128.2D; - local tmp3:8 = 0; - local tmpd:16 = Rd_VPR128.2D; - tmp3 = SIMD_PIECE(tmp2, 0:1); tmpd = SIMD_COPY(tmpd, tmp3, 0:1); - local tmp4:8 = 0; - tmp4 = SIMD_PIECE(tmp1, 0:1); tmpd = SIMD_COPY(tmpd, tmp4, 1:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.2D = NEON_trn1(Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D, 8:1); -@endif } # C7.2.341 TRN1 page C7-2173 line 122256 MATCH x0e002800/mask=xbf20fc00 @@ -41628,34 +22760,13 @@ is b_3131=0 & q=1 & b_2429=0xe & advSIMD3.size=3 & b_2121=0 & Rm_VPR128.2D & b_1 :trn1 Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & b_2429=0xe & advSIMD3.size=2 & b_2121=0 & Rm_VPR64.2S & b_1515=0 & b_1214=2 & b_1011=2 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_primitive) TMPD2 = Rm_VPR64.2S; TMPD1 = Rn_VPR64.2S; # simd shuffle Rd_VPR64.2S = TMPD1 (@0-0) lane size 4 - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp3, TMPD1, 0, 4, 8); - simd_address_at(tmp4, Rd_VPR64.2S, 0, 4, 8); - * [register]:4 tmp4 = * [register]:4 tmp3; + Rd_VPR64.2S[0,32] = TMPD1[0,32]; # simd shuffle Rd_VPR64.2S = TMPD2 (@0-1) lane size 4 - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, TMPD2, 0, 4, 8); - simd_address_at(tmp6, Rd_VPR64.2S, 1, 4, 8); - * [register]:4 tmp6 = * [register]:4 tmp5; + Rd_VPR64.2S[32,32] = TMPD2[0,32]; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = Rm_VPR64.2S; - local tmp2:8 = Rn_VPR64.2S; - local tmp3:4 = 0; - local tmpd:8 = Rd_VPR64.2S; - tmp3 = SIMD_PIECE(tmp2, 0:1); tmpd = SIMD_COPY(tmpd, tmp3, 0:1); - local tmp4:4 = 0; - tmp4 = SIMD_PIECE(tmp1, 0:1); tmpd = SIMD_COPY(tmpd, tmp4, 1:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.2S -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.2S = NEON_trn1(Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S, 4:1); -@endif } # C7.2.341 TRN1 page C7-2173 line 122256 MATCH x0e002800/mask=xbf20fc00 @@ -41667,42 +22778,15 @@ is b_3131=0 & q=0 & b_2429=0xe & advSIMD3.size=2 & b_2121=0 & Rm_VPR64.2S & b_15 :trn1 Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & b_2429=0xe & advSIMD3.size=1 & b_2121=0 & Rm_VPR64.4H & b_1515=0 & b_1214=2 & b_1011=2 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { -@if defined(SEMANTIC_primitive) TMPD2 = Rm_VPR64.4H; TMPD1 = Rn_VPR64.4H; # simd shuffle Rd_VPR64.4H = TMPD1 (@0-0@2-2) lane size 2 - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp3, TMPD1, 0, 2, 8); - simd_address_at(tmp4, Rd_VPR64.4H, 0, 2, 8); - * [register]:2 tmp4 = * [register]:2 tmp3; - simd_address_at(tmp3, TMPD1, 2, 2, 8); - simd_address_at(tmp4, Rd_VPR64.4H, 2, 2, 8); - * [register]:2 tmp4 = * [register]:2 tmp3; + Rd_VPR64.4H[0,16] = TMPD1[0,16]; + Rd_VPR64.4H[32,16] = TMPD1[32,16]; # simd shuffle Rd_VPR64.4H = TMPD2 (@0-1@2-3) lane size 2 - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, TMPD2, 0, 2, 8); - simd_address_at(tmp6, Rd_VPR64.4H, 1, 2, 8); - * [register]:2 tmp6 = * [register]:2 tmp5; - simd_address_at(tmp5, TMPD2, 2, 2, 8); - simd_address_at(tmp6, Rd_VPR64.4H, 3, 2, 8); - * [register]:2 tmp6 = * [register]:2 tmp5; + Rd_VPR64.4H[16,16] = TMPD2[0,16]; + Rd_VPR64.4H[48,16] = TMPD2[32,16]; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = Rm_VPR64.4H; - local tmp2:8 = Rn_VPR64.4H; - local tmp3:2 = 0; - local tmpd:8 = Rd_VPR64.4H; - tmp3 = SIMD_PIECE(tmp2, 0:1); tmpd = SIMD_COPY(tmpd, tmp3, 0:1); - tmp3 = SIMD_PIECE(tmp2, 2:1); tmpd = SIMD_COPY(tmpd, tmp3, 2:1); - local tmp4:2 = 0; - tmp4 = SIMD_PIECE(tmp1, 0:1); tmpd = SIMD_COPY(tmpd, tmp4, 1:1); - tmp4 = SIMD_PIECE(tmp1, 2:1); tmpd = SIMD_COPY(tmpd, tmp4, 3:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.4H -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.4H = NEON_trn1(Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H, 2:1); -@endif } # C7.2.341 TRN1 page C7-2173 line 122256 MATCH x0e002800/mask=xbf20fc00 @@ -41714,42 +22798,15 @@ is b_3131=0 & q=0 & b_2429=0xe & advSIMD3.size=1 & b_2121=0 & Rm_VPR64.4H & b_15 :trn1 Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & b_2429=0xe & advSIMD3.size=2 & b_2121=0 & Rm_VPR128.4S & b_1515=0 & b_1214=2 & b_1011=2 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) TMPQ2 = Rm_VPR128.4S; TMPQ1 = Rn_VPR128.4S; # simd shuffle Rd_VPR128.4S = TMPQ1 (@0-0@2-2) lane size 4 - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp3, TMPQ1, 0, 4, 16); - simd_address_at(tmp4, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp4 = * [register]:4 tmp3; - simd_address_at(tmp3, TMPQ1, 2, 4, 16); - simd_address_at(tmp4, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp4 = * [register]:4 tmp3; + Rd_VPR128.4S[0,32] = TMPQ1[0,32]; + Rd_VPR128.4S[64,32] = TMPQ1[64,32]; # simd shuffle Rd_VPR128.4S = TMPQ2 (@0-1@2-3) lane size 4 - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, TMPQ2, 0, 4, 16); - simd_address_at(tmp6, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp6 = * [register]:4 tmp5; - simd_address_at(tmp5, TMPQ2, 2, 4, 16); - simd_address_at(tmp6, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp6 = * [register]:4 tmp5; + Rd_VPR128.4S[32,32] = TMPQ2[0,32]; + Rd_VPR128.4S[96,32] = TMPQ2[64,32]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = Rm_VPR128.4S; - local tmp2:16 = Rn_VPR128.4S; - local tmp3:4 = 0; - local tmpd:16 = Rd_VPR128.4S; - tmp3 = SIMD_PIECE(tmp2, 0:1); tmpd = SIMD_COPY(tmpd, tmp3, 0:1); - tmp3 = SIMD_PIECE(tmp2, 2:1); tmpd = SIMD_COPY(tmpd, tmp3, 2:1); - local tmp4:4 = 0; - tmp4 = SIMD_PIECE(tmp1, 0:1); tmpd = SIMD_COPY(tmpd, tmp4, 1:1); - tmp4 = SIMD_PIECE(tmp1, 2:1); tmpd = SIMD_COPY(tmpd, tmp4, 3:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_trn1(Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S, 4:1); -@endif } # C7.2.341 TRN1 page C7-2173 line 122256 MATCH x0e002800/mask=xbf20fc00 @@ -41761,58 +22818,19 @@ is b_3131=0 & q=1 & b_2429=0xe & advSIMD3.size=2 & b_2121=0 & Rm_VPR128.4S & b_1 :trn1 Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & b_2429=0xe & advSIMD3.size=0 & b_2121=0 & Rm_VPR64.8B & b_1515=0 & b_1214=2 & b_1011=2 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { -@if defined(SEMANTIC_primitive) TMPD2 = Rm_VPR64.8B; TMPD1 = Rn_VPR64.8B; # simd shuffle Rd_VPR64.8B = TMPD1 (@0-0@2-2@4-4@6-6) lane size 1 - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp3, TMPD1, 0, 1, 8); - simd_address_at(tmp4, Rd_VPR64.8B, 0, 1, 8); - * [register]:1 tmp4 = * [register]:1 tmp3; - simd_address_at(tmp3, TMPD1, 2, 1, 8); - simd_address_at(tmp4, Rd_VPR64.8B, 2, 1, 8); - * [register]:1 tmp4 = * [register]:1 tmp3; - simd_address_at(tmp3, TMPD1, 4, 1, 8); - simd_address_at(tmp4, Rd_VPR64.8B, 4, 1, 8); - * [register]:1 tmp4 = * [register]:1 tmp3; - simd_address_at(tmp3, TMPD1, 6, 1, 8); - simd_address_at(tmp4, Rd_VPR64.8B, 6, 1, 8); - * [register]:1 tmp4 = * [register]:1 tmp3; + Rd_VPR64.8B[0,8] = TMPD1[0,8]; + Rd_VPR64.8B[16,8] = TMPD1[16,8]; + Rd_VPR64.8B[32,8] = TMPD1[32,8]; + Rd_VPR64.8B[48,8] = TMPD1[48,8]; # simd shuffle Rd_VPR64.8B = TMPD2 (@0-1@2-3@4-5@6-7) lane size 1 - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, TMPD2, 0, 1, 8); - simd_address_at(tmp6, Rd_VPR64.8B, 1, 1, 8); - * [register]:1 tmp6 = * [register]:1 tmp5; - simd_address_at(tmp5, TMPD2, 2, 1, 8); - simd_address_at(tmp6, Rd_VPR64.8B, 3, 1, 8); - * [register]:1 tmp6 = * [register]:1 tmp5; - simd_address_at(tmp5, TMPD2, 4, 1, 8); - simd_address_at(tmp6, Rd_VPR64.8B, 5, 1, 8); - * [register]:1 tmp6 = * [register]:1 tmp5; - simd_address_at(tmp5, TMPD2, 6, 1, 8); - simd_address_at(tmp6, Rd_VPR64.8B, 7, 1, 8); - * [register]:1 tmp6 = * [register]:1 tmp5; + Rd_VPR64.8B[8,8] = TMPD2[0,8]; + Rd_VPR64.8B[24,8] = TMPD2[16,8]; + Rd_VPR64.8B[40,8] = TMPD2[32,8]; + Rd_VPR64.8B[56,8] = TMPD2[48,8]; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = Rm_VPR64.8B; - local tmp2:8 = Rn_VPR64.8B; - local tmp3:1 = 0; - local tmpd:8 = Rd_VPR64.8B; - tmp3 = SIMD_PIECE(tmp2, 0:1); tmpd = SIMD_COPY(tmpd, tmp3, 0:1); - tmp3 = SIMD_PIECE(tmp2, 2:1); tmpd = SIMD_COPY(tmpd, tmp3, 2:1); - tmp3 = SIMD_PIECE(tmp2, 4:1); tmpd = SIMD_COPY(tmpd, tmp3, 4:1); - tmp3 = SIMD_PIECE(tmp2, 6:1); tmpd = SIMD_COPY(tmpd, tmp3, 6:1); - local tmp4:1 = 0; - tmp4 = SIMD_PIECE(tmp1, 0:1); tmpd = SIMD_COPY(tmpd, tmp4, 1:1); - tmp4 = SIMD_PIECE(tmp1, 2:1); tmpd = SIMD_COPY(tmpd, tmp4, 3:1); - tmp4 = SIMD_PIECE(tmp1, 4:1); tmpd = SIMD_COPY(tmpd, tmp4, 5:1); - tmp4 = SIMD_PIECE(tmp1, 6:1); tmpd = SIMD_COPY(tmpd, tmp4, 7:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.8B -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.8B = NEON_trn1(Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B, 1:1); -@endif } # C7.2.341 TRN1 page C7-2173 line 122256 MATCH x0e002800/mask=xbf20fc00 @@ -41824,58 +22842,19 @@ is b_3131=0 & q=0 & b_2429=0xe & advSIMD3.size=0 & b_2121=0 & Rm_VPR64.8B & b_15 :trn1 Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & b_2429=0xe & advSIMD3.size=1 & b_2121=0 & Rm_VPR128.8H & b_1515=0 & b_1214=2 & b_1011=2 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_primitive) TMPQ2 = Rm_VPR128.8H; TMPQ1 = Rn_VPR128.8H; # simd shuffle Rd_VPR128.8H = TMPQ1 (@0-0@2-2@4-4@6-6) lane size 2 - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp3, TMPQ1, 0, 2, 16); - simd_address_at(tmp4, Rd_VPR128.8H, 0, 2, 16); - * [register]:2 tmp4 = * [register]:2 tmp3; - simd_address_at(tmp3, TMPQ1, 2, 2, 16); - simd_address_at(tmp4, Rd_VPR128.8H, 2, 2, 16); - * [register]:2 tmp4 = * [register]:2 tmp3; - simd_address_at(tmp3, TMPQ1, 4, 2, 16); - simd_address_at(tmp4, Rd_VPR128.8H, 4, 2, 16); - * [register]:2 tmp4 = * [register]:2 tmp3; - simd_address_at(tmp3, TMPQ1, 6, 2, 16); - simd_address_at(tmp4, Rd_VPR128.8H, 6, 2, 16); - * [register]:2 tmp4 = * [register]:2 tmp3; + Rd_VPR128.8H[0,16] = TMPQ1[0,16]; + Rd_VPR128.8H[32,16] = TMPQ1[32,16]; + Rd_VPR128.8H[64,16] = TMPQ1[64,16]; + Rd_VPR128.8H[96,16] = TMPQ1[96,16]; # simd shuffle Rd_VPR128.8H = TMPQ2 (@0-1@2-3@4-5@6-7) lane size 2 - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, TMPQ2, 0, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 1, 2, 16); - * [register]:2 tmp6 = * [register]:2 tmp5; - simd_address_at(tmp5, TMPQ2, 2, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 3, 2, 16); - * [register]:2 tmp6 = * [register]:2 tmp5; - simd_address_at(tmp5, TMPQ2, 4, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 5, 2, 16); - * [register]:2 tmp6 = * [register]:2 tmp5; - simd_address_at(tmp5, TMPQ2, 6, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 7, 2, 16); - * [register]:2 tmp6 = * [register]:2 tmp5; + Rd_VPR128.8H[16,16] = TMPQ2[0,16]; + Rd_VPR128.8H[48,16] = TMPQ2[32,16]; + Rd_VPR128.8H[80,16] = TMPQ2[64,16]; + Rd_VPR128.8H[112,16] = TMPQ2[96,16]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = Rm_VPR128.8H; - local tmp2:16 = Rn_VPR128.8H; - local tmp3:2 = 0; - local tmpd:16 = Rd_VPR128.8H; - tmp3 = SIMD_PIECE(tmp2, 0:1); tmpd = SIMD_COPY(tmpd, tmp3, 0:1); - tmp3 = SIMD_PIECE(tmp2, 2:1); tmpd = SIMD_COPY(tmpd, tmp3, 2:1); - tmp3 = SIMD_PIECE(tmp2, 4:1); tmpd = SIMD_COPY(tmpd, tmp3, 4:1); - tmp3 = SIMD_PIECE(tmp2, 6:1); tmpd = SIMD_COPY(tmpd, tmp3, 6:1); - local tmp4:2 = 0; - tmp4 = SIMD_PIECE(tmp1, 0:1); tmpd = SIMD_COPY(tmpd, tmp4, 1:1); - tmp4 = SIMD_PIECE(tmp1, 2:1); tmpd = SIMD_COPY(tmpd, tmp4, 3:1); - tmp4 = SIMD_PIECE(tmp1, 4:1); tmpd = SIMD_COPY(tmpd, tmp4, 5:1); - tmp4 = SIMD_PIECE(tmp1, 6:1); tmpd = SIMD_COPY(tmpd, tmp4, 7:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.8H -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.8H = NEON_trn1(Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H, 2:1); -@endif } # C7.2.342 TRN2 page C7-2175 line 122373 MATCH x0e006800/mask=xbf20fc00 @@ -41887,90 +22866,27 @@ is b_3131=0 & q=1 & b_2429=0xe & advSIMD3.size=1 & b_2121=0 & Rm_VPR128.8H & b_1 :trn2 Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & b_2429=0xe & advSIMD3.size=0 & b_2121=0 & Rm_VPR128.16B & b_1515=0 & b_1214=6 & b_1011=2 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { -@if defined(SEMANTIC_primitive) TMPQ2 = Rm_VPR128.16B; TMPQ1 = Rn_VPR128.16B; # simd shuffle Rd_VPR128.16B = TMPQ1 (@1-0@3-2@5-4@7-6@9-8@11-10@13-12@15-14) lane size 1 - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp3, TMPQ1, 1, 1, 16); - simd_address_at(tmp4, Rd_VPR128.16B, 0, 1, 16); - * [register]:1 tmp4 = * [register]:1 tmp3; - simd_address_at(tmp3, TMPQ1, 3, 1, 16); - simd_address_at(tmp4, Rd_VPR128.16B, 2, 1, 16); - * [register]:1 tmp4 = * [register]:1 tmp3; - simd_address_at(tmp3, TMPQ1, 5, 1, 16); - simd_address_at(tmp4, Rd_VPR128.16B, 4, 1, 16); - * [register]:1 tmp4 = * [register]:1 tmp3; - simd_address_at(tmp3, TMPQ1, 7, 1, 16); - simd_address_at(tmp4, Rd_VPR128.16B, 6, 1, 16); - * [register]:1 tmp4 = * [register]:1 tmp3; - simd_address_at(tmp3, TMPQ1, 9, 1, 16); - simd_address_at(tmp4, Rd_VPR128.16B, 8, 1, 16); - * [register]:1 tmp4 = * [register]:1 tmp3; - simd_address_at(tmp3, TMPQ1, 11, 1, 16); - simd_address_at(tmp4, Rd_VPR128.16B, 10, 1, 16); - * [register]:1 tmp4 = * [register]:1 tmp3; - simd_address_at(tmp3, TMPQ1, 13, 1, 16); - simd_address_at(tmp4, Rd_VPR128.16B, 12, 1, 16); - * [register]:1 tmp4 = * [register]:1 tmp3; - simd_address_at(tmp3, TMPQ1, 15, 1, 16); - simd_address_at(tmp4, Rd_VPR128.16B, 14, 1, 16); - * [register]:1 tmp4 = * [register]:1 tmp3; + Rd_VPR128.16B[0,8] = TMPQ1[8,8]; + Rd_VPR128.16B[16,8] = TMPQ1[24,8]; + Rd_VPR128.16B[32,8] = TMPQ1[40,8]; + Rd_VPR128.16B[48,8] = TMPQ1[56,8]; + Rd_VPR128.16B[64,8] = TMPQ1[72,8]; + Rd_VPR128.16B[80,8] = TMPQ1[88,8]; + Rd_VPR128.16B[96,8] = TMPQ1[104,8]; + Rd_VPR128.16B[112,8] = TMPQ1[120,8]; # simd shuffle Rd_VPR128.16B = TMPQ2 (@1-1@3-3@5-5@7-7@9-9@11-11@13-13@15-15) lane size 1 - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, TMPQ2, 1, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 1, 1, 16); - * [register]:1 tmp6 = * [register]:1 tmp5; - simd_address_at(tmp5, TMPQ2, 3, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 3, 1, 16); - * [register]:1 tmp6 = * [register]:1 tmp5; - simd_address_at(tmp5, TMPQ2, 5, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 5, 1, 16); - * [register]:1 tmp6 = * [register]:1 tmp5; - simd_address_at(tmp5, TMPQ2, 7, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 7, 1, 16); - * [register]:1 tmp6 = * [register]:1 tmp5; - simd_address_at(tmp5, TMPQ2, 9, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 9, 1, 16); - * [register]:1 tmp6 = * [register]:1 tmp5; - simd_address_at(tmp5, TMPQ2, 11, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 11, 1, 16); - * [register]:1 tmp6 = * [register]:1 tmp5; - simd_address_at(tmp5, TMPQ2, 13, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 13, 1, 16); - * [register]:1 tmp6 = * [register]:1 tmp5; - simd_address_at(tmp5, TMPQ2, 15, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 15, 1, 16); - * [register]:1 tmp6 = * [register]:1 tmp5; + Rd_VPR128.16B[8,8] = TMPQ2[8,8]; + Rd_VPR128.16B[24,8] = TMPQ2[24,8]; + Rd_VPR128.16B[40,8] = TMPQ2[40,8]; + Rd_VPR128.16B[56,8] = TMPQ2[56,8]; + Rd_VPR128.16B[72,8] = TMPQ2[72,8]; + Rd_VPR128.16B[88,8] = TMPQ2[88,8]; + Rd_VPR128.16B[104,8] = TMPQ2[104,8]; + Rd_VPR128.16B[120,8] = TMPQ2[120,8]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = Rm_VPR128.16B; - local tmp2:16 = Rn_VPR128.16B; - local tmp3:1 = 0; - local tmpd:16 = Rd_VPR128.16B; - tmp3 = SIMD_PIECE(tmp2, 1:1); tmpd = SIMD_COPY(tmpd, tmp3, 0:1); - tmp3 = SIMD_PIECE(tmp2, 3:1); tmpd = SIMD_COPY(tmpd, tmp3, 2:1); - tmp3 = SIMD_PIECE(tmp2, 5:1); tmpd = SIMD_COPY(tmpd, tmp3, 4:1); - tmp3 = SIMD_PIECE(tmp2, 7:1); tmpd = SIMD_COPY(tmpd, tmp3, 6:1); - tmp3 = SIMD_PIECE(tmp2, 9:1); tmpd = SIMD_COPY(tmpd, tmp3, 8:1); - tmp3 = SIMD_PIECE(tmp2, 11:1); tmpd = SIMD_COPY(tmpd, tmp3, 10:1); - tmp3 = SIMD_PIECE(tmp2, 13:1); tmpd = SIMD_COPY(tmpd, tmp3, 12:1); - tmp3 = SIMD_PIECE(tmp2, 15:1); tmpd = SIMD_COPY(tmpd, tmp3, 14:1); - local tmp4:1 = 0; - tmp4 = SIMD_PIECE(tmp1, 1:1); tmpd = SIMD_COPY(tmpd, tmp4, 1:1); - tmp4 = SIMD_PIECE(tmp1, 3:1); tmpd = SIMD_COPY(tmpd, tmp4, 3:1); - tmp4 = SIMD_PIECE(tmp1, 5:1); tmpd = SIMD_COPY(tmpd, tmp4, 5:1); - tmp4 = SIMD_PIECE(tmp1, 7:1); tmpd = SIMD_COPY(tmpd, tmp4, 7:1); - tmp4 = SIMD_PIECE(tmp1, 9:1); tmpd = SIMD_COPY(tmpd, tmp4, 9:1); - tmp4 = SIMD_PIECE(tmp1, 11:1); tmpd = SIMD_COPY(tmpd, tmp4, 11:1); - tmp4 = SIMD_PIECE(tmp1, 13:1); tmpd = SIMD_COPY(tmpd, tmp4, 13:1); - tmp4 = SIMD_PIECE(tmp1, 15:1); tmpd = SIMD_COPY(tmpd, tmp4, 15:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.16B -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.16B = NEON_trn2(Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B, 1:1); -@endif } # C7.2.342 TRN2 page C7-2175 line 122373 MATCH x0e006800/mask=xbf20fc00 @@ -41982,34 +22898,13 @@ is b_3131=0 & q=1 & b_2429=0xe & advSIMD3.size=0 & b_2121=0 & Rm_VPR128.16B & b_ :trn2 Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=1 & b_2429=0xe & advSIMD3.size=3 & b_2121=0 & Rm_VPR128.2D & b_1515=0 & b_1214=6 & b_1011=2 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_primitive) TMPQ2 = Rm_VPR128.2D; TMPQ1 = Rn_VPR128.2D; # simd shuffle Rd_VPR128.2D = TMPQ1 (@1-0) lane size 8 - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp3, TMPQ1, 1, 8, 16); - simd_address_at(tmp4, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp4 = * [register]:8 tmp3; + Rd_VPR128.2D[0,64] = TMPQ1[64,64]; # simd shuffle Rd_VPR128.2D = TMPQ2 (@1-1) lane size 8 - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, TMPQ2, 1, 8, 16); - simd_address_at(tmp6, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp6 = * [register]:8 tmp5; + Rd_VPR128.2D[64,64] = TMPQ2[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = Rm_VPR128.2D; - local tmp2:16 = Rn_VPR128.2D; - local tmp3:8 = 0; - local tmpd:16 = Rd_VPR128.2D; - tmp3 = SIMD_PIECE(tmp2, 1:1); tmpd = SIMD_COPY(tmpd, tmp3, 0:1); - local tmp4:8 = 0; - tmp4 = SIMD_PIECE(tmp1, 1:1); tmpd = SIMD_COPY(tmpd, tmp4, 1:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.2D = NEON_trn2(Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D, 8:1); -@endif } # C7.2.342 TRN2 page C7-2175 line 122373 MATCH x0e006800/mask=xbf20fc00 @@ -42021,34 +22916,13 @@ is b_3131=0 & q=1 & b_2429=0xe & advSIMD3.size=3 & b_2121=0 & Rm_VPR128.2D & b_1 :trn2 Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & b_2429=0xe & advSIMD3.size=2 & b_2121=0 & Rm_VPR64.2S & b_1515=0 & b_1214=6 & b_1011=2 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_primitive) TMPD2 = Rm_VPR64.2S; TMPD1 = Rn_VPR64.2S; # simd shuffle Rd_VPR64.2S = TMPD1 (@1-0) lane size 4 - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp3, TMPD1, 1, 4, 8); - simd_address_at(tmp4, Rd_VPR64.2S, 0, 4, 8); - * [register]:4 tmp4 = * [register]:4 tmp3; + Rd_VPR64.2S[0,32] = TMPD1[32,32]; # simd shuffle Rd_VPR64.2S = TMPD2 (@1-1) lane size 4 - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, TMPD2, 1, 4, 8); - simd_address_at(tmp6, Rd_VPR64.2S, 1, 4, 8); - * [register]:4 tmp6 = * [register]:4 tmp5; + Rd_VPR64.2S[32,32] = TMPD2[32,32]; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = Rm_VPR64.2S; - local tmp2:8 = Rn_VPR64.2S; - local tmp3:4 = 0; - local tmpd:8 = Rd_VPR64.2S; - tmp3 = SIMD_PIECE(tmp2, 1:1); tmpd = SIMD_COPY(tmpd, tmp3, 0:1); - local tmp4:4 = 0; - tmp4 = SIMD_PIECE(tmp1, 1:1); tmpd = SIMD_COPY(tmpd, tmp4, 1:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.2S -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.2S = NEON_trn2(Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S, 4:1); -@endif } # C7.2.342 TRN2 page C7-2175 line 122373 MATCH x0e006800/mask=xbf20fc00 @@ -42060,42 +22934,15 @@ is b_3131=0 & q=0 & b_2429=0xe & advSIMD3.size=2 & b_2121=0 & Rm_VPR64.2S & b_15 :trn2 Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & b_2429=0xe & advSIMD3.size=1 & b_2121=0 & Rm_VPR64.4H & b_1515=0 & b_1214=6 & b_1011=2 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { -@if defined(SEMANTIC_primitive) TMPD2 = Rm_VPR64.4H; TMPD1 = Rn_VPR64.4H; # simd shuffle Rd_VPR64.4H = TMPD1 (@1-0@3-2) lane size 2 - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp3, TMPD1, 1, 2, 8); - simd_address_at(tmp4, Rd_VPR64.4H, 0, 2, 8); - * [register]:2 tmp4 = * [register]:2 tmp3; - simd_address_at(tmp3, TMPD1, 3, 2, 8); - simd_address_at(tmp4, Rd_VPR64.4H, 2, 2, 8); - * [register]:2 tmp4 = * [register]:2 tmp3; + Rd_VPR64.4H[0,16] = TMPD1[16,16]; + Rd_VPR64.4H[32,16] = TMPD1[48,16]; # simd shuffle Rd_VPR64.4H = TMPD2 (@1-1@3-3) lane size 2 - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, TMPD2, 1, 2, 8); - simd_address_at(tmp6, Rd_VPR64.4H, 1, 2, 8); - * [register]:2 tmp6 = * [register]:2 tmp5; - simd_address_at(tmp5, TMPD2, 3, 2, 8); - simd_address_at(tmp6, Rd_VPR64.4H, 3, 2, 8); - * [register]:2 tmp6 = * [register]:2 tmp5; + Rd_VPR64.4H[16,16] = TMPD2[16,16]; + Rd_VPR64.4H[48,16] = TMPD2[48,16]; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = Rm_VPR64.4H; - local tmp2:8 = Rn_VPR64.4H; - local tmp3:2 = 0; - local tmpd:8 = Rd_VPR64.4H; - tmp3 = SIMD_PIECE(tmp2, 1:1); tmpd = SIMD_COPY(tmpd, tmp3, 0:1); - tmp3 = SIMD_PIECE(tmp2, 3:1); tmpd = SIMD_COPY(tmpd, tmp3, 2:1); - local tmp4:2 = 0; - tmp4 = SIMD_PIECE(tmp1, 1:1); tmpd = SIMD_COPY(tmpd, tmp4, 1:1); - tmp4 = SIMD_PIECE(tmp1, 3:1); tmpd = SIMD_COPY(tmpd, tmp4, 3:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.4H -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.4H = NEON_trn2(Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H, 2:1); -@endif } # C7.2.342 TRN2 page C7-2175 line 122373 MATCH x0e006800/mask=xbf20fc00 @@ -42107,42 +22954,15 @@ is b_3131=0 & q=0 & b_2429=0xe & advSIMD3.size=1 & b_2121=0 & Rm_VPR64.4H & b_15 :trn2 Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & b_2429=0xe & advSIMD3.size=2 & b_2121=0 & Rm_VPR128.4S & b_1515=0 & b_1214=6 & b_1011=2 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) TMPQ2 = Rm_VPR128.4S; TMPQ1 = Rn_VPR128.4S; # simd shuffle Rd_VPR128.4S = TMPQ1 (@1-0@3-2) lane size 4 - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp3, TMPQ1, 1, 4, 16); - simd_address_at(tmp4, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp4 = * [register]:4 tmp3; - simd_address_at(tmp3, TMPQ1, 3, 4, 16); - simd_address_at(tmp4, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp4 = * [register]:4 tmp3; + Rd_VPR128.4S[0,32] = TMPQ1[32,32]; + Rd_VPR128.4S[64,32] = TMPQ1[96,32]; # simd shuffle Rd_VPR128.4S = TMPQ2 (@1-1@3-3) lane size 4 - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, TMPQ2, 1, 4, 16); - simd_address_at(tmp6, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp6 = * [register]:4 tmp5; - simd_address_at(tmp5, TMPQ2, 3, 4, 16); - simd_address_at(tmp6, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp6 = * [register]:4 tmp5; + Rd_VPR128.4S[32,32] = TMPQ2[32,32]; + Rd_VPR128.4S[96,32] = TMPQ2[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = Rm_VPR128.4S; - local tmp2:16 = Rn_VPR128.4S; - local tmp3:4 = 0; - local tmpd:16 = Rd_VPR128.4S; - tmp3 = SIMD_PIECE(tmp2, 1:1); tmpd = SIMD_COPY(tmpd, tmp3, 0:1); - tmp3 = SIMD_PIECE(tmp2, 3:1); tmpd = SIMD_COPY(tmpd, tmp3, 2:1); - local tmp4:4 = 0; - tmp4 = SIMD_PIECE(tmp1, 1:1); tmpd = SIMD_COPY(tmpd, tmp4, 1:1); - tmp4 = SIMD_PIECE(tmp1, 3:1); tmpd = SIMD_COPY(tmpd, tmp4, 3:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_trn2(Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S, 4:1); -@endif } # C7.2.342 TRN2 page C7-2175 line 122373 MATCH x0e006800/mask=xbf20fc00 @@ -42154,58 +22974,19 @@ is b_3131=0 & q=1 & b_2429=0xe & advSIMD3.size=2 & b_2121=0 & Rm_VPR128.4S & b_1 :trn2 Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & b_2429=0xe & advSIMD3.size=0 & b_2121=0 & Rm_VPR64.8B & b_1515=0 & b_1214=6 & b_1011=2 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { -@if defined(SEMANTIC_primitive) TMPD2 = Rm_VPR64.8B; TMPD1 = Rn_VPR64.8B; # simd shuffle Rd_VPR64.8B = TMPD1 (@1-0@3-2@5-4@7-6) lane size 1 - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp3, TMPD1, 1, 1, 8); - simd_address_at(tmp4, Rd_VPR64.8B, 0, 1, 8); - * [register]:1 tmp4 = * [register]:1 tmp3; - simd_address_at(tmp3, TMPD1, 3, 1, 8); - simd_address_at(tmp4, Rd_VPR64.8B, 2, 1, 8); - * [register]:1 tmp4 = * [register]:1 tmp3; - simd_address_at(tmp3, TMPD1, 5, 1, 8); - simd_address_at(tmp4, Rd_VPR64.8B, 4, 1, 8); - * [register]:1 tmp4 = * [register]:1 tmp3; - simd_address_at(tmp3, TMPD1, 7, 1, 8); - simd_address_at(tmp4, Rd_VPR64.8B, 6, 1, 8); - * [register]:1 tmp4 = * [register]:1 tmp3; + Rd_VPR64.8B[0,8] = TMPD1[8,8]; + Rd_VPR64.8B[16,8] = TMPD1[24,8]; + Rd_VPR64.8B[32,8] = TMPD1[40,8]; + Rd_VPR64.8B[48,8] = TMPD1[56,8]; # simd shuffle Rd_VPR64.8B = TMPD2 (@1-1@3-3@5-5@7-7) lane size 1 - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, TMPD2, 1, 1, 8); - simd_address_at(tmp6, Rd_VPR64.8B, 1, 1, 8); - * [register]:1 tmp6 = * [register]:1 tmp5; - simd_address_at(tmp5, TMPD2, 3, 1, 8); - simd_address_at(tmp6, Rd_VPR64.8B, 3, 1, 8); - * [register]:1 tmp6 = * [register]:1 tmp5; - simd_address_at(tmp5, TMPD2, 5, 1, 8); - simd_address_at(tmp6, Rd_VPR64.8B, 5, 1, 8); - * [register]:1 tmp6 = * [register]:1 tmp5; - simd_address_at(tmp5, TMPD2, 7, 1, 8); - simd_address_at(tmp6, Rd_VPR64.8B, 7, 1, 8); - * [register]:1 tmp6 = * [register]:1 tmp5; + Rd_VPR64.8B[8,8] = TMPD2[8,8]; + Rd_VPR64.8B[24,8] = TMPD2[24,8]; + Rd_VPR64.8B[40,8] = TMPD2[40,8]; + Rd_VPR64.8B[56,8] = TMPD2[56,8]; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = Rm_VPR64.8B; - local tmp2:8 = Rn_VPR64.8B; - local tmp3:1 = 0; - local tmpd:8 = Rd_VPR64.8B; - tmp3 = SIMD_PIECE(tmp2, 1:1); tmpd = SIMD_COPY(tmpd, tmp3, 0:1); - tmp3 = SIMD_PIECE(tmp2, 3:1); tmpd = SIMD_COPY(tmpd, tmp3, 2:1); - tmp3 = SIMD_PIECE(tmp2, 5:1); tmpd = SIMD_COPY(tmpd, tmp3, 4:1); - tmp3 = SIMD_PIECE(tmp2, 7:1); tmpd = SIMD_COPY(tmpd, tmp3, 6:1); - local tmp4:1 = 0; - tmp4 = SIMD_PIECE(tmp1, 1:1); tmpd = SIMD_COPY(tmpd, tmp4, 1:1); - tmp4 = SIMD_PIECE(tmp1, 3:1); tmpd = SIMD_COPY(tmpd, tmp4, 3:1); - tmp4 = SIMD_PIECE(tmp1, 5:1); tmpd = SIMD_COPY(tmpd, tmp4, 5:1); - tmp4 = SIMD_PIECE(tmp1, 7:1); tmpd = SIMD_COPY(tmpd, tmp4, 7:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.8B -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.8B = NEON_trn2(Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B, 1:1); -@endif } # C7.2.342 TRN2 page C7-2175 line 122373 MATCH x0e006800/mask=xbf20fc00 @@ -42217,58 +22998,19 @@ is b_3131=0 & q=0 & b_2429=0xe & advSIMD3.size=0 & b_2121=0 & Rm_VPR64.8B & b_15 :trn2 Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & b_2429=0xe & advSIMD3.size=1 & b_2121=0 & Rm_VPR128.8H & b_1515=0 & b_1214=6 & b_1011=2 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_primitive) TMPQ2 = Rm_VPR128.8H; TMPQ1 = Rn_VPR128.8H; # simd shuffle Rd_VPR128.8H = TMPQ1 (@1-0@3-2@5-4@7-6) lane size 2 - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp3, TMPQ1, 1, 2, 16); - simd_address_at(tmp4, Rd_VPR128.8H, 0, 2, 16); - * [register]:2 tmp4 = * [register]:2 tmp3; - simd_address_at(tmp3, TMPQ1, 3, 2, 16); - simd_address_at(tmp4, Rd_VPR128.8H, 2, 2, 16); - * [register]:2 tmp4 = * [register]:2 tmp3; - simd_address_at(tmp3, TMPQ1, 5, 2, 16); - simd_address_at(tmp4, Rd_VPR128.8H, 4, 2, 16); - * [register]:2 tmp4 = * [register]:2 tmp3; - simd_address_at(tmp3, TMPQ1, 7, 2, 16); - simd_address_at(tmp4, Rd_VPR128.8H, 6, 2, 16); - * [register]:2 tmp4 = * [register]:2 tmp3; + Rd_VPR128.8H[0,16] = TMPQ1[16,16]; + Rd_VPR128.8H[32,16] = TMPQ1[48,16]; + Rd_VPR128.8H[64,16] = TMPQ1[80,16]; + Rd_VPR128.8H[96,16] = TMPQ1[112,16]; # simd shuffle Rd_VPR128.8H = TMPQ2 (@1-1@3-3@5-5@7-7) lane size 2 - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, TMPQ2, 1, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 1, 2, 16); - * [register]:2 tmp6 = * [register]:2 tmp5; - simd_address_at(tmp5, TMPQ2, 3, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 3, 2, 16); - * [register]:2 tmp6 = * [register]:2 tmp5; - simd_address_at(tmp5, TMPQ2, 5, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 5, 2, 16); - * [register]:2 tmp6 = * [register]:2 tmp5; - simd_address_at(tmp5, TMPQ2, 7, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 7, 2, 16); - * [register]:2 tmp6 = * [register]:2 tmp5; + Rd_VPR128.8H[16,16] = TMPQ2[16,16]; + Rd_VPR128.8H[48,16] = TMPQ2[48,16]; + Rd_VPR128.8H[80,16] = TMPQ2[80,16]; + Rd_VPR128.8H[112,16] = TMPQ2[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = Rm_VPR128.8H; - local tmp2:16 = Rn_VPR128.8H; - local tmp3:2 = 0; - local tmpd:16 = Rd_VPR128.8H; - tmp3 = SIMD_PIECE(tmp2, 1:1); tmpd = SIMD_COPY(tmpd, tmp3, 0:1); - tmp3 = SIMD_PIECE(tmp2, 3:1); tmpd = SIMD_COPY(tmpd, tmp3, 2:1); - tmp3 = SIMD_PIECE(tmp2, 5:1); tmpd = SIMD_COPY(tmpd, tmp3, 4:1); - tmp3 = SIMD_PIECE(tmp2, 7:1); tmpd = SIMD_COPY(tmpd, tmp3, 6:1); - local tmp4:2 = 0; - tmp4 = SIMD_PIECE(tmp1, 1:1); tmpd = SIMD_COPY(tmpd, tmp4, 1:1); - tmp4 = SIMD_PIECE(tmp1, 3:1); tmpd = SIMD_COPY(tmpd, tmp4, 3:1); - tmp4 = SIMD_PIECE(tmp1, 5:1); tmpd = SIMD_COPY(tmpd, tmp4, 5:1); - tmp4 = SIMD_PIECE(tmp1, 7:1); tmpd = SIMD_COPY(tmpd, tmp4, 7:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.8H -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.8H = NEON_trn2(Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H, 2:1); -@endif } # C7.2.343 UABA page C7-2177 line 122490 MATCH x2e207c00/mask=xbf20fc00 @@ -42279,9 +23021,7 @@ is b_3131=0 & q=1 & b_2429=0xe & advSIMD3.size=1 & b_2121=0 & Rm_VPR128.8H & b_1 :uaba Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0xf & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.16B = NEON_uaba(Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B, 1:1); -@endif } # C7.2.343 UABA page C7-2177 line 122490 MATCH x2e207c00/mask=xbf20fc00 @@ -42292,9 +23032,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16 :uaba Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0xf & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.2S = NEON_uaba(Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S, 4:1); -@endif } # C7.2.343 UABA page C7-2177 line 122490 MATCH x2e207c00/mask=xbf20fc00 @@ -42305,9 +23043,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S :uaba Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0xf & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.4H = NEON_uaba(Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H, 2:1); -@endif } # C7.2.343 UABA page C7-2177 line 122490 MATCH x2e207c00/mask=xbf20fc00 @@ -42318,9 +23054,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H :uaba Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0xf & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_uaba(Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S, 4:1); -@endif } # C7.2.343 UABA page C7-2177 line 122490 MATCH x2e207c00/mask=xbf20fc00 @@ -42331,9 +23065,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S :uaba Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0xf & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.8B = NEON_uaba(Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B, 1:1); -@endif } # C7.2.343 UABA page C7-2177 line 122490 MATCH x2e207c00/mask=xbf20fc00 @@ -42344,9 +23076,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B :uaba Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0xf & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.8H = NEON_uaba(Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H, 2:1); -@endif } # C7.2.344 UABAL, UABAL2 page C7-2179 line 122590 MATCH x2e205000/mask=xbf20fc00 @@ -42358,77 +23088,24 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H :uabal2 Rd_VPR128.2D, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1215=0x5 & b_1011=0 & Rn_VPR128.4S & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rn_VPR128.4S, 1, 8, 16); - TMPD1 = * [register]:8 tmp1; + TMPD1 = Rn_VPR128.4S[64,64]; # simd resize TMPQ2 = zext(TMPD1) (lane size 4 to 8) - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, TMPD1, 0, 4, 8); - simd_address_at(tmp5, TMPQ2, 0, 8, 16); - * [register]:8 tmp5 = zext(* [register]:4 tmp4); - simd_address_at(tmp4, TMPD1, 1, 4, 8); - simd_address_at(tmp5, TMPQ2, 1, 8, 16); - * [register]:8 tmp5 = zext(* [register]:4 tmp4); - local tmp6:4 = 0; - simd_address_at(tmp6, Rm_VPR128.4S, 1, 8, 16); - TMPD3 = * [register]:8 tmp6; + TMPQ2[0,64] = zext(TMPD1[0,32]); + TMPQ2[64,64] = zext(TMPD1[32,32]); + TMPD3 = Rm_VPR128.4S[64,64]; # simd resize TMPQ4 = zext(TMPD3) (lane size 4 to 8) - local tmp9:4 = 0; - local tmp10:4 = 0; - simd_address_at(tmp9, TMPD3, 0, 4, 8); - simd_address_at(tmp10, TMPQ4, 0, 8, 16); - * [register]:8 tmp10 = zext(* [register]:4 tmp9); - simd_address_at(tmp9, TMPD3, 1, 4, 8); - simd_address_at(tmp10, TMPQ4, 1, 8, 16); - * [register]:8 tmp10 = zext(* [register]:4 tmp9); + TMPQ4[0,64] = zext(TMPD3[0,32]); + TMPQ4[64,64] = zext(TMPD3[32,32]); # simd infix TMPQ5 = TMPQ2 - TMPQ4 on lane size 8 - local tmp12:4 = 0; - local tmp13:4 = 0; - local tmp14:4 = 0; - simd_address_at(tmp12, TMPQ2, 0, 8, 16); - simd_address_at(tmp13, TMPQ4, 0, 8, 16); - simd_address_at(tmp14, TMPQ5, 0, 8, 16); - * [register]:8 tmp14 = (* [register]:8 tmp12) - (* [register]:8 tmp13); - simd_address_at(tmp12, TMPQ2, 1, 8, 16); - simd_address_at(tmp13, TMPQ4, 1, 8, 16); - simd_address_at(tmp14, TMPQ5, 1, 8, 16); - * [register]:8 tmp14 = (* [register]:8 tmp12) - (* [register]:8 tmp13); + TMPQ5[0,64] = TMPQ2[0,64] - TMPQ4[0,64]; + TMPQ5[64,64] = TMPQ2[64,64] - TMPQ4[64,64]; # simd unary TMPQ6 = MP_INT_ABS(TMPQ5) on lane size 8 - local tmp16:4 = 0; - local tmp17:4 = 0; - simd_address_at(tmp16, TMPQ5, 0, 8, 16); - simd_address_at(tmp17, TMPQ6, 0, 8, 16); - * [register]:8 tmp17 = MP_INT_ABS(* [register]:8 tmp16); - simd_address_at(tmp16, TMPQ5, 1, 8, 16); - simd_address_at(tmp17, TMPQ6, 1, 8, 16); - * [register]:8 tmp17 = MP_INT_ABS(* [register]:8 tmp16); + TMPQ6[0,64] = MP_INT_ABS(TMPQ5[0,64]); + TMPQ6[64,64] = MP_INT_ABS(TMPQ5[64,64]); # simd infix Rd_VPR128.2D = Rd_VPR128.2D + TMPQ6 on lane size 8 - local tmp18:4 = 0; - local tmp19:4 = 0; - local tmp20:4 = 0; - simd_address_at(tmp18, Rd_VPR128.2D, 0, 8, 16); - simd_address_at(tmp19, TMPQ6, 0, 8, 16); - simd_address_at(tmp20, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp20 = (* [register]:8 tmp18) + (* [register]:8 tmp19); - simd_address_at(tmp18, Rd_VPR128.2D, 1, 8, 16); - simd_address_at(tmp19, TMPQ6, 1, 8, 16); - simd_address_at(tmp20, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp20 = (* [register]:8 tmp18) + (* [register]:8 tmp19); + Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] + TMPQ6[0,64]; + Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] + TMPQ6[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Rn_VPR128.4S, 1:1); - local tmp2:16 = SIMD_INT_ZEXT(tmp1, 4:1); - local tmp3:8 = SIMD_PIECE(Rm_VPR128.4S, 1:1); - local tmp4:16 = SIMD_INT_ZEXT(tmp3, 4:1); - local tmp5:16 = SIMD_INT_SUB(tmp2, tmp4, 8:1); - local tmp6:16 = SIMD_INT_ABS(tmp5, 8:1); - local tmpd:16 = SIMD_INT_ADD(Rd_VPR128.2D, tmp6, 8:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.2D = NEON_uabal2(Rd_VPR128.2D, Rn_VPR128.4S, Rm_VPR128.4S, 4:1); -@endif } # C7.2.344 UABAL, UABAL2 page C7-2179 line 122590 MATCH x2e205000/mask=xbf20fc00 @@ -42440,111 +23117,34 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S :uabal2 Rd_VPR128.4S, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1215=0x5 & b_1011=0 & Rn_VPR128.8H & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rn_VPR128.8H, 1, 8, 16); - TMPD1 = * [register]:8 tmp1; + TMPD1 = Rn_VPR128.8H[64,64]; # simd resize TMPQ2 = zext(TMPD1) (lane size 2 to 4) - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, TMPD1, 0, 2, 8); - simd_address_at(tmp5, TMPQ2, 0, 4, 16); - * [register]:4 tmp5 = zext(* [register]:2 tmp4); - simd_address_at(tmp4, TMPD1, 1, 2, 8); - simd_address_at(tmp5, TMPQ2, 1, 4, 16); - * [register]:4 tmp5 = zext(* [register]:2 tmp4); - simd_address_at(tmp4, TMPD1, 2, 2, 8); - simd_address_at(tmp5, TMPQ2, 2, 4, 16); - * [register]:4 tmp5 = zext(* [register]:2 tmp4); - simd_address_at(tmp4, TMPD1, 3, 2, 8); - simd_address_at(tmp5, TMPQ2, 3, 4, 16); - * [register]:4 tmp5 = zext(* [register]:2 tmp4); - local tmp6:4 = 0; - simd_address_at(tmp6, Rm_VPR128.8H, 1, 8, 16); - TMPD3 = * [register]:8 tmp6; + TMPQ2[0,32] = zext(TMPD1[0,16]); + TMPQ2[32,32] = zext(TMPD1[16,16]); + TMPQ2[64,32] = zext(TMPD1[32,16]); + TMPQ2[96,32] = zext(TMPD1[48,16]); + TMPD3 = Rm_VPR128.8H[64,64]; # simd resize TMPQ4 = zext(TMPD3) (lane size 2 to 4) - local tmp9:4 = 0; - local tmp10:4 = 0; - simd_address_at(tmp9, TMPD3, 0, 2, 8); - simd_address_at(tmp10, TMPQ4, 0, 4, 16); - * [register]:4 tmp10 = zext(* [register]:2 tmp9); - simd_address_at(tmp9, TMPD3, 1, 2, 8); - simd_address_at(tmp10, TMPQ4, 1, 4, 16); - * [register]:4 tmp10 = zext(* [register]:2 tmp9); - simd_address_at(tmp9, TMPD3, 2, 2, 8); - simd_address_at(tmp10, TMPQ4, 2, 4, 16); - * [register]:4 tmp10 = zext(* [register]:2 tmp9); - simd_address_at(tmp9, TMPD3, 3, 2, 8); - simd_address_at(tmp10, TMPQ4, 3, 4, 16); - * [register]:4 tmp10 = zext(* [register]:2 tmp9); + TMPQ4[0,32] = zext(TMPD3[0,16]); + TMPQ4[32,32] = zext(TMPD3[16,16]); + TMPQ4[64,32] = zext(TMPD3[32,16]); + TMPQ4[96,32] = zext(TMPD3[48,16]); # simd infix TMPQ5 = TMPQ2 - TMPQ4 on lane size 4 - local tmp12:4 = 0; - local tmp13:4 = 0; - local tmp14:4 = 0; - simd_address_at(tmp12, TMPQ2, 0, 4, 16); - simd_address_at(tmp13, TMPQ4, 0, 4, 16); - simd_address_at(tmp14, TMPQ5, 0, 4, 16); - * [register]:4 tmp14 = (* [register]:4 tmp12) - (* [register]:4 tmp13); - simd_address_at(tmp12, TMPQ2, 1, 4, 16); - simd_address_at(tmp13, TMPQ4, 1, 4, 16); - simd_address_at(tmp14, TMPQ5, 1, 4, 16); - * [register]:4 tmp14 = (* [register]:4 tmp12) - (* [register]:4 tmp13); - simd_address_at(tmp12, TMPQ2, 2, 4, 16); - simd_address_at(tmp13, TMPQ4, 2, 4, 16); - simd_address_at(tmp14, TMPQ5, 2, 4, 16); - * [register]:4 tmp14 = (* [register]:4 tmp12) - (* [register]:4 tmp13); - simd_address_at(tmp12, TMPQ2, 3, 4, 16); - simd_address_at(tmp13, TMPQ4, 3, 4, 16); - simd_address_at(tmp14, TMPQ5, 3, 4, 16); - * [register]:4 tmp14 = (* [register]:4 tmp12) - (* [register]:4 tmp13); + TMPQ5[0,32] = TMPQ2[0,32] - TMPQ4[0,32]; + TMPQ5[32,32] = TMPQ2[32,32] - TMPQ4[32,32]; + TMPQ5[64,32] = TMPQ2[64,32] - TMPQ4[64,32]; + TMPQ5[96,32] = TMPQ2[96,32] - TMPQ4[96,32]; # simd unary TMPQ6 = MP_INT_ABS(TMPQ5) on lane size 4 - local tmp16:4 = 0; - local tmp17:4 = 0; - simd_address_at(tmp16, TMPQ5, 0, 4, 16); - simd_address_at(tmp17, TMPQ6, 0, 4, 16); - * [register]:4 tmp17 = MP_INT_ABS(* [register]:4 tmp16); - simd_address_at(tmp16, TMPQ5, 1, 4, 16); - simd_address_at(tmp17, TMPQ6, 1, 4, 16); - * [register]:4 tmp17 = MP_INT_ABS(* [register]:4 tmp16); - simd_address_at(tmp16, TMPQ5, 2, 4, 16); - simd_address_at(tmp17, TMPQ6, 2, 4, 16); - * [register]:4 tmp17 = MP_INT_ABS(* [register]:4 tmp16); - simd_address_at(tmp16, TMPQ5, 3, 4, 16); - simd_address_at(tmp17, TMPQ6, 3, 4, 16); - * [register]:4 tmp17 = MP_INT_ABS(* [register]:4 tmp16); + TMPQ6[0,32] = MP_INT_ABS(TMPQ5[0,32]); + TMPQ6[32,32] = MP_INT_ABS(TMPQ5[32,32]); + TMPQ6[64,32] = MP_INT_ABS(TMPQ5[64,32]); + TMPQ6[96,32] = MP_INT_ABS(TMPQ5[96,32]); # simd infix Rd_VPR128.4S = Rd_VPR128.4S + TMPQ6 on lane size 4 - local tmp18:4 = 0; - local tmp19:4 = 0; - local tmp20:4 = 0; - simd_address_at(tmp18, Rd_VPR128.4S, 0, 4, 16); - simd_address_at(tmp19, TMPQ6, 0, 4, 16); - simd_address_at(tmp20, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp20 = (* [register]:4 tmp18) + (* [register]:4 tmp19); - simd_address_at(tmp18, Rd_VPR128.4S, 1, 4, 16); - simd_address_at(tmp19, TMPQ6, 1, 4, 16); - simd_address_at(tmp20, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp20 = (* [register]:4 tmp18) + (* [register]:4 tmp19); - simd_address_at(tmp18, Rd_VPR128.4S, 2, 4, 16); - simd_address_at(tmp19, TMPQ6, 2, 4, 16); - simd_address_at(tmp20, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp20 = (* [register]:4 tmp18) + (* [register]:4 tmp19); - simd_address_at(tmp18, Rd_VPR128.4S, 3, 4, 16); - simd_address_at(tmp19, TMPQ6, 3, 4, 16); - simd_address_at(tmp20, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp20 = (* [register]:4 tmp18) + (* [register]:4 tmp19); + Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] + TMPQ6[0,32]; + Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] + TMPQ6[32,32]; + Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] + TMPQ6[64,32]; + Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] + TMPQ6[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Rn_VPR128.8H, 1:1); - local tmp2:16 = SIMD_INT_ZEXT(tmp1, 2:1); - local tmp3:8 = SIMD_PIECE(Rm_VPR128.8H, 1:1); - local tmp4:16 = SIMD_INT_ZEXT(tmp3, 2:1); - local tmp5:16 = SIMD_INT_SUB(tmp2, tmp4, 4:1); - local tmp6:16 = SIMD_INT_ABS(tmp5, 4:1); - local tmpd:16 = SIMD_INT_ADD(Rd_VPR128.4S, tmp6, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_uabal2(Rd_VPR128.4S, Rn_VPR128.8H, Rm_VPR128.8H, 2:1); -@endif } # C7.2.344 UABAL, UABAL2 page C7-2179 line 122590 MATCH x2e205000/mask=xbf20fc00 @@ -42556,179 +23156,54 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H :uabal2 Rd_VPR128.8H, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1215=0x5 & b_1011=0 & Rn_VPR128.16B & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rn_VPR128.16B, 1, 8, 16); - TMPD1 = * [register]:8 tmp1; + TMPD1 = Rn_VPR128.16B[64,64]; # simd resize TMPQ2 = zext(TMPD1) (lane size 1 to 2) - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, TMPD1, 0, 1, 8); - simd_address_at(tmp5, TMPQ2, 0, 2, 16); - * [register]:2 tmp5 = zext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 1, 1, 8); - simd_address_at(tmp5, TMPQ2, 1, 2, 16); - * [register]:2 tmp5 = zext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 2, 1, 8); - simd_address_at(tmp5, TMPQ2, 2, 2, 16); - * [register]:2 tmp5 = zext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 3, 1, 8); - simd_address_at(tmp5, TMPQ2, 3, 2, 16); - * [register]:2 tmp5 = zext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 4, 1, 8); - simd_address_at(tmp5, TMPQ2, 4, 2, 16); - * [register]:2 tmp5 = zext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 5, 1, 8); - simd_address_at(tmp5, TMPQ2, 5, 2, 16); - * [register]:2 tmp5 = zext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 6, 1, 8); - simd_address_at(tmp5, TMPQ2, 6, 2, 16); - * [register]:2 tmp5 = zext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 7, 1, 8); - simd_address_at(tmp5, TMPQ2, 7, 2, 16); - * [register]:2 tmp5 = zext(* [register]:1 tmp4); - local tmp6:4 = 0; - simd_address_at(tmp6, Rm_VPR128.16B, 1, 8, 16); - TMPD3 = * [register]:8 tmp6; + TMPQ2[0,16] = zext(TMPD1[0,8]); + TMPQ2[16,16] = zext(TMPD1[8,8]); + TMPQ2[32,16] = zext(TMPD1[16,8]); + TMPQ2[48,16] = zext(TMPD1[24,8]); + TMPQ2[64,16] = zext(TMPD1[32,8]); + TMPQ2[80,16] = zext(TMPD1[40,8]); + TMPQ2[96,16] = zext(TMPD1[48,8]); + TMPQ2[112,16] = zext(TMPD1[56,8]); + TMPD3 = Rm_VPR128.16B[64,64]; # simd resize TMPQ4 = zext(TMPD3) (lane size 1 to 2) - local tmp9:4 = 0; - local tmp10:4 = 0; - simd_address_at(tmp9, TMPD3, 0, 1, 8); - simd_address_at(tmp10, TMPQ4, 0, 2, 16); - * [register]:2 tmp10 = zext(* [register]:1 tmp9); - simd_address_at(tmp9, TMPD3, 1, 1, 8); - simd_address_at(tmp10, TMPQ4, 1, 2, 16); - * [register]:2 tmp10 = zext(* [register]:1 tmp9); - simd_address_at(tmp9, TMPD3, 2, 1, 8); - simd_address_at(tmp10, TMPQ4, 2, 2, 16); - * [register]:2 tmp10 = zext(* [register]:1 tmp9); - simd_address_at(tmp9, TMPD3, 3, 1, 8); - simd_address_at(tmp10, TMPQ4, 3, 2, 16); - * [register]:2 tmp10 = zext(* [register]:1 tmp9); - simd_address_at(tmp9, TMPD3, 4, 1, 8); - simd_address_at(tmp10, TMPQ4, 4, 2, 16); - * [register]:2 tmp10 = zext(* [register]:1 tmp9); - simd_address_at(tmp9, TMPD3, 5, 1, 8); - simd_address_at(tmp10, TMPQ4, 5, 2, 16); - * [register]:2 tmp10 = zext(* [register]:1 tmp9); - simd_address_at(tmp9, TMPD3, 6, 1, 8); - simd_address_at(tmp10, TMPQ4, 6, 2, 16); - * [register]:2 tmp10 = zext(* [register]:1 tmp9); - simd_address_at(tmp9, TMPD3, 7, 1, 8); - simd_address_at(tmp10, TMPQ4, 7, 2, 16); - * [register]:2 tmp10 = zext(* [register]:1 tmp9); + TMPQ4[0,16] = zext(TMPD3[0,8]); + TMPQ4[16,16] = zext(TMPD3[8,8]); + TMPQ4[32,16] = zext(TMPD3[16,8]); + TMPQ4[48,16] = zext(TMPD3[24,8]); + TMPQ4[64,16] = zext(TMPD3[32,8]); + TMPQ4[80,16] = zext(TMPD3[40,8]); + TMPQ4[96,16] = zext(TMPD3[48,8]); + TMPQ4[112,16] = zext(TMPD3[56,8]); # simd infix TMPQ5 = TMPQ2 - TMPQ4 on lane size 2 - local tmp12:4 = 0; - local tmp13:4 = 0; - local tmp14:4 = 0; - simd_address_at(tmp12, TMPQ2, 0, 2, 16); - simd_address_at(tmp13, TMPQ4, 0, 2, 16); - simd_address_at(tmp14, TMPQ5, 0, 2, 16); - * [register]:2 tmp14 = (* [register]:2 tmp12) - (* [register]:2 tmp13); - simd_address_at(tmp12, TMPQ2, 1, 2, 16); - simd_address_at(tmp13, TMPQ4, 1, 2, 16); - simd_address_at(tmp14, TMPQ5, 1, 2, 16); - * [register]:2 tmp14 = (* [register]:2 tmp12) - (* [register]:2 tmp13); - simd_address_at(tmp12, TMPQ2, 2, 2, 16); - simd_address_at(tmp13, TMPQ4, 2, 2, 16); - simd_address_at(tmp14, TMPQ5, 2, 2, 16); - * [register]:2 tmp14 = (* [register]:2 tmp12) - (* [register]:2 tmp13); - simd_address_at(tmp12, TMPQ2, 3, 2, 16); - simd_address_at(tmp13, TMPQ4, 3, 2, 16); - simd_address_at(tmp14, TMPQ5, 3, 2, 16); - * [register]:2 tmp14 = (* [register]:2 tmp12) - (* [register]:2 tmp13); - simd_address_at(tmp12, TMPQ2, 4, 2, 16); - simd_address_at(tmp13, TMPQ4, 4, 2, 16); - simd_address_at(tmp14, TMPQ5, 4, 2, 16); - * [register]:2 tmp14 = (* [register]:2 tmp12) - (* [register]:2 tmp13); - simd_address_at(tmp12, TMPQ2, 5, 2, 16); - simd_address_at(tmp13, TMPQ4, 5, 2, 16); - simd_address_at(tmp14, TMPQ5, 5, 2, 16); - * [register]:2 tmp14 = (* [register]:2 tmp12) - (* [register]:2 tmp13); - simd_address_at(tmp12, TMPQ2, 6, 2, 16); - simd_address_at(tmp13, TMPQ4, 6, 2, 16); - simd_address_at(tmp14, TMPQ5, 6, 2, 16); - * [register]:2 tmp14 = (* [register]:2 tmp12) - (* [register]:2 tmp13); - simd_address_at(tmp12, TMPQ2, 7, 2, 16); - simd_address_at(tmp13, TMPQ4, 7, 2, 16); - simd_address_at(tmp14, TMPQ5, 7, 2, 16); - * [register]:2 tmp14 = (* [register]:2 tmp12) - (* [register]:2 tmp13); + TMPQ5[0,16] = TMPQ2[0,16] - TMPQ4[0,16]; + TMPQ5[16,16] = TMPQ2[16,16] - TMPQ4[16,16]; + TMPQ5[32,16] = TMPQ2[32,16] - TMPQ4[32,16]; + TMPQ5[48,16] = TMPQ2[48,16] - TMPQ4[48,16]; + TMPQ5[64,16] = TMPQ2[64,16] - TMPQ4[64,16]; + TMPQ5[80,16] = TMPQ2[80,16] - TMPQ4[80,16]; + TMPQ5[96,16] = TMPQ2[96,16] - TMPQ4[96,16]; + TMPQ5[112,16] = TMPQ2[112,16] - TMPQ4[112,16]; # simd unary TMPQ6 = MP_INT_ABS(TMPQ5) on lane size 2 - local tmp16:4 = 0; - local tmp17:4 = 0; - simd_address_at(tmp16, TMPQ5, 0, 2, 16); - simd_address_at(tmp17, TMPQ6, 0, 2, 16); - * [register]:2 tmp17 = MP_INT_ABS(* [register]:2 tmp16); - simd_address_at(tmp16, TMPQ5, 1, 2, 16); - simd_address_at(tmp17, TMPQ6, 1, 2, 16); - * [register]:2 tmp17 = MP_INT_ABS(* [register]:2 tmp16); - simd_address_at(tmp16, TMPQ5, 2, 2, 16); - simd_address_at(tmp17, TMPQ6, 2, 2, 16); - * [register]:2 tmp17 = MP_INT_ABS(* [register]:2 tmp16); - simd_address_at(tmp16, TMPQ5, 3, 2, 16); - simd_address_at(tmp17, TMPQ6, 3, 2, 16); - * [register]:2 tmp17 = MP_INT_ABS(* [register]:2 tmp16); - simd_address_at(tmp16, TMPQ5, 4, 2, 16); - simd_address_at(tmp17, TMPQ6, 4, 2, 16); - * [register]:2 tmp17 = MP_INT_ABS(* [register]:2 tmp16); - simd_address_at(tmp16, TMPQ5, 5, 2, 16); - simd_address_at(tmp17, TMPQ6, 5, 2, 16); - * [register]:2 tmp17 = MP_INT_ABS(* [register]:2 tmp16); - simd_address_at(tmp16, TMPQ5, 6, 2, 16); - simd_address_at(tmp17, TMPQ6, 6, 2, 16); - * [register]:2 tmp17 = MP_INT_ABS(* [register]:2 tmp16); - simd_address_at(tmp16, TMPQ5, 7, 2, 16); - simd_address_at(tmp17, TMPQ6, 7, 2, 16); - * [register]:2 tmp17 = MP_INT_ABS(* [register]:2 tmp16); + TMPQ6[0,16] = MP_INT_ABS(TMPQ5[0,16]); + TMPQ6[16,16] = MP_INT_ABS(TMPQ5[16,16]); + TMPQ6[32,16] = MP_INT_ABS(TMPQ5[32,16]); + TMPQ6[48,16] = MP_INT_ABS(TMPQ5[48,16]); + TMPQ6[64,16] = MP_INT_ABS(TMPQ5[64,16]); + TMPQ6[80,16] = MP_INT_ABS(TMPQ5[80,16]); + TMPQ6[96,16] = MP_INT_ABS(TMPQ5[96,16]); + TMPQ6[112,16] = MP_INT_ABS(TMPQ5[112,16]); # simd infix Rd_VPR128.8H = Rd_VPR128.8H + TMPQ6 on lane size 2 - local tmp18:4 = 0; - local tmp19:4 = 0; - local tmp20:4 = 0; - simd_address_at(tmp18, Rd_VPR128.8H, 0, 2, 16); - simd_address_at(tmp19, TMPQ6, 0, 2, 16); - simd_address_at(tmp20, Rd_VPR128.8H, 0, 2, 16); - * [register]:2 tmp20 = (* [register]:2 tmp18) + (* [register]:2 tmp19); - simd_address_at(tmp18, Rd_VPR128.8H, 1, 2, 16); - simd_address_at(tmp19, TMPQ6, 1, 2, 16); - simd_address_at(tmp20, Rd_VPR128.8H, 1, 2, 16); - * [register]:2 tmp20 = (* [register]:2 tmp18) + (* [register]:2 tmp19); - simd_address_at(tmp18, Rd_VPR128.8H, 2, 2, 16); - simd_address_at(tmp19, TMPQ6, 2, 2, 16); - simd_address_at(tmp20, Rd_VPR128.8H, 2, 2, 16); - * [register]:2 tmp20 = (* [register]:2 tmp18) + (* [register]:2 tmp19); - simd_address_at(tmp18, Rd_VPR128.8H, 3, 2, 16); - simd_address_at(tmp19, TMPQ6, 3, 2, 16); - simd_address_at(tmp20, Rd_VPR128.8H, 3, 2, 16); - * [register]:2 tmp20 = (* [register]:2 tmp18) + (* [register]:2 tmp19); - simd_address_at(tmp18, Rd_VPR128.8H, 4, 2, 16); - simd_address_at(tmp19, TMPQ6, 4, 2, 16); - simd_address_at(tmp20, Rd_VPR128.8H, 4, 2, 16); - * [register]:2 tmp20 = (* [register]:2 tmp18) + (* [register]:2 tmp19); - simd_address_at(tmp18, Rd_VPR128.8H, 5, 2, 16); - simd_address_at(tmp19, TMPQ6, 5, 2, 16); - simd_address_at(tmp20, Rd_VPR128.8H, 5, 2, 16); - * [register]:2 tmp20 = (* [register]:2 tmp18) + (* [register]:2 tmp19); - simd_address_at(tmp18, Rd_VPR128.8H, 6, 2, 16); - simd_address_at(tmp19, TMPQ6, 6, 2, 16); - simd_address_at(tmp20, Rd_VPR128.8H, 6, 2, 16); - * [register]:2 tmp20 = (* [register]:2 tmp18) + (* [register]:2 tmp19); - simd_address_at(tmp18, Rd_VPR128.8H, 7, 2, 16); - simd_address_at(tmp19, TMPQ6, 7, 2, 16); - simd_address_at(tmp20, Rd_VPR128.8H, 7, 2, 16); - * [register]:2 tmp20 = (* [register]:2 tmp18) + (* [register]:2 tmp19); + Rd_VPR128.8H[0,16] = Rd_VPR128.8H[0,16] + TMPQ6[0,16]; + Rd_VPR128.8H[16,16] = Rd_VPR128.8H[16,16] + TMPQ6[16,16]; + Rd_VPR128.8H[32,16] = Rd_VPR128.8H[32,16] + TMPQ6[32,16]; + Rd_VPR128.8H[48,16] = Rd_VPR128.8H[48,16] + TMPQ6[48,16]; + Rd_VPR128.8H[64,16] = Rd_VPR128.8H[64,16] + TMPQ6[64,16]; + Rd_VPR128.8H[80,16] = Rd_VPR128.8H[80,16] + TMPQ6[80,16]; + Rd_VPR128.8H[96,16] = Rd_VPR128.8H[96,16] + TMPQ6[96,16]; + Rd_VPR128.8H[112,16] = Rd_VPR128.8H[112,16] + TMPQ6[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Rn_VPR128.16B, 1:1); - local tmp2:16 = SIMD_INT_ZEXT(tmp1, 1:1); - local tmp3:8 = SIMD_PIECE(Rm_VPR128.16B, 1:1); - local tmp4:16 = SIMD_INT_ZEXT(tmp3, 1:1); - local tmp5:16 = SIMD_INT_SUB(tmp2, tmp4, 2:1); - local tmp6:16 = SIMD_INT_ABS(tmp5, 2:1); - local tmpd:16 = SIMD_INT_ADD(Rd_VPR128.8H, tmp6, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.8H -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.8H = NEON_uabal2(Rd_VPR128.8H, Rn_VPR128.16B, Rm_VPR128.16B, 1:1); -@endif } # C7.2.344 UABAL, UABAL2 page C7-2179 line 122590 MATCH x2e205000/mask=xbf20fc00 @@ -42740,69 +23215,22 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16 :uabal Rd_VPR128.2D, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1215=0x5 & b_1011=0 & Rn_VPR64.2S & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_primitive) # simd resize TMPQ1 = zext(Rn_VPR64.2S) (lane size 4 to 8) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR64.2S, 0, 4, 8); - simd_address_at(tmp3, TMPQ1, 0, 8, 16); - * [register]:8 tmp3 = zext(* [register]:4 tmp2); - simd_address_at(tmp2, Rn_VPR64.2S, 1, 4, 8); - simd_address_at(tmp3, TMPQ1, 1, 8, 16); - * [register]:8 tmp3 = zext(* [register]:4 tmp2); + TMPQ1[0,64] = zext(Rn_VPR64.2S[0,32]); + TMPQ1[64,64] = zext(Rn_VPR64.2S[32,32]); # simd resize TMPQ2 = zext(Rm_VPR64.2S) (lane size 4 to 8) - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, Rm_VPR64.2S, 0, 4, 8); - simd_address_at(tmp6, TMPQ2, 0, 8, 16); - * [register]:8 tmp6 = zext(* [register]:4 tmp5); - simd_address_at(tmp5, Rm_VPR64.2S, 1, 4, 8); - simd_address_at(tmp6, TMPQ2, 1, 8, 16); - * [register]:8 tmp6 = zext(* [register]:4 tmp5); + TMPQ2[0,64] = zext(Rm_VPR64.2S[0,32]); + TMPQ2[64,64] = zext(Rm_VPR64.2S[32,32]); # simd infix TMPQ3 = TMPQ1 - TMPQ2 on lane size 8 - local tmp8:4 = 0; - local tmp9:4 = 0; - local tmp10:4 = 0; - simd_address_at(tmp8, TMPQ1, 0, 8, 16); - simd_address_at(tmp9, TMPQ2, 0, 8, 16); - simd_address_at(tmp10, TMPQ3, 0, 8, 16); - * [register]:8 tmp10 = (* [register]:8 tmp8) - (* [register]:8 tmp9); - simd_address_at(tmp8, TMPQ1, 1, 8, 16); - simd_address_at(tmp9, TMPQ2, 1, 8, 16); - simd_address_at(tmp10, TMPQ3, 1, 8, 16); - * [register]:8 tmp10 = (* [register]:8 tmp8) - (* [register]:8 tmp9); + TMPQ3[0,64] = TMPQ1[0,64] - TMPQ2[0,64]; + TMPQ3[64,64] = TMPQ1[64,64] - TMPQ2[64,64]; # simd unary TMPQ4 = MP_INT_ABS(TMPQ3) on lane size 8 - local tmp12:4 = 0; - local tmp13:4 = 0; - simd_address_at(tmp12, TMPQ3, 0, 8, 16); - simd_address_at(tmp13, TMPQ4, 0, 8, 16); - * [register]:8 tmp13 = MP_INT_ABS(* [register]:8 tmp12); - simd_address_at(tmp12, TMPQ3, 1, 8, 16); - simd_address_at(tmp13, TMPQ4, 1, 8, 16); - * [register]:8 tmp13 = MP_INT_ABS(* [register]:8 tmp12); + TMPQ4[0,64] = MP_INT_ABS(TMPQ3[0,64]); + TMPQ4[64,64] = MP_INT_ABS(TMPQ3[64,64]); # simd infix Rd_VPR128.2D = Rd_VPR128.2D + TMPQ4 on lane size 8 - local tmp14:4 = 0; - local tmp15:4 = 0; - local tmp16:4 = 0; - simd_address_at(tmp14, Rd_VPR128.2D, 0, 8, 16); - simd_address_at(tmp15, TMPQ4, 0, 8, 16); - simd_address_at(tmp16, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp16 = (* [register]:8 tmp14) + (* [register]:8 tmp15); - simd_address_at(tmp14, Rd_VPR128.2D, 1, 8, 16); - simd_address_at(tmp15, TMPQ4, 1, 8, 16); - simd_address_at(tmp16, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp16 = (* [register]:8 tmp14) + (* [register]:8 tmp15); + Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] + TMPQ4[0,64]; + Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] + TMPQ4[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_ZEXT(Rn_VPR64.2S, 4:1); - local tmp2:16 = SIMD_INT_ZEXT(Rm_VPR64.2S, 4:1); - local tmp3:16 = SIMD_INT_SUB(tmp1, tmp2, 8:1); - local tmp4:16 = SIMD_INT_ABS(tmp3, 8:1); - local tmpd:16 = SIMD_INT_ADD(Rd_VPR128.2D, tmp4, 8:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.2D = NEON_uabal(Rd_VPR128.2D, Rn_VPR64.2S, Rm_VPR64.2S, 4:1); -@endif } # C7.2.344 UABAL, UABAL2 page C7-2179 line 122590 MATCH x2e205000/mask=xbf20fc00 @@ -42814,103 +23242,32 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S :uabal Rd_VPR128.4S, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1215=0x5 & b_1011=0 & Rn_VPR64.4H & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) # simd resize TMPQ1 = zext(Rn_VPR64.4H) (lane size 2 to 4) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR64.4H, 0, 2, 8); - simd_address_at(tmp3, TMPQ1, 0, 4, 16); - * [register]:4 tmp3 = zext(* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR64.4H, 1, 2, 8); - simd_address_at(tmp3, TMPQ1, 1, 4, 16); - * [register]:4 tmp3 = zext(* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR64.4H, 2, 2, 8); - simd_address_at(tmp3, TMPQ1, 2, 4, 16); - * [register]:4 tmp3 = zext(* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR64.4H, 3, 2, 8); - simd_address_at(tmp3, TMPQ1, 3, 4, 16); - * [register]:4 tmp3 = zext(* [register]:2 tmp2); + TMPQ1[0,32] = zext(Rn_VPR64.4H[0,16]); + TMPQ1[32,32] = zext(Rn_VPR64.4H[16,16]); + TMPQ1[64,32] = zext(Rn_VPR64.4H[32,16]); + TMPQ1[96,32] = zext(Rn_VPR64.4H[48,16]); # simd resize TMPQ2 = zext(Rm_VPR64.4H) (lane size 2 to 4) - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, Rm_VPR64.4H, 0, 2, 8); - simd_address_at(tmp6, TMPQ2, 0, 4, 16); - * [register]:4 tmp6 = zext(* [register]:2 tmp5); - simd_address_at(tmp5, Rm_VPR64.4H, 1, 2, 8); - simd_address_at(tmp6, TMPQ2, 1, 4, 16); - * [register]:4 tmp6 = zext(* [register]:2 tmp5); - simd_address_at(tmp5, Rm_VPR64.4H, 2, 2, 8); - simd_address_at(tmp6, TMPQ2, 2, 4, 16); - * [register]:4 tmp6 = zext(* [register]:2 tmp5); - simd_address_at(tmp5, Rm_VPR64.4H, 3, 2, 8); - simd_address_at(tmp6, TMPQ2, 3, 4, 16); - * [register]:4 tmp6 = zext(* [register]:2 tmp5); + TMPQ2[0,32] = zext(Rm_VPR64.4H[0,16]); + TMPQ2[32,32] = zext(Rm_VPR64.4H[16,16]); + TMPQ2[64,32] = zext(Rm_VPR64.4H[32,16]); + TMPQ2[96,32] = zext(Rm_VPR64.4H[48,16]); # simd infix TMPQ3 = TMPQ1 - TMPQ2 on lane size 4 - local tmp8:4 = 0; - local tmp9:4 = 0; - local tmp10:4 = 0; - simd_address_at(tmp8, TMPQ1, 0, 4, 16); - simd_address_at(tmp9, TMPQ2, 0, 4, 16); - simd_address_at(tmp10, TMPQ3, 0, 4, 16); - * [register]:4 tmp10 = (* [register]:4 tmp8) - (* [register]:4 tmp9); - simd_address_at(tmp8, TMPQ1, 1, 4, 16); - simd_address_at(tmp9, TMPQ2, 1, 4, 16); - simd_address_at(tmp10, TMPQ3, 1, 4, 16); - * [register]:4 tmp10 = (* [register]:4 tmp8) - (* [register]:4 tmp9); - simd_address_at(tmp8, TMPQ1, 2, 4, 16); - simd_address_at(tmp9, TMPQ2, 2, 4, 16); - simd_address_at(tmp10, TMPQ3, 2, 4, 16); - * [register]:4 tmp10 = (* [register]:4 tmp8) - (* [register]:4 tmp9); - simd_address_at(tmp8, TMPQ1, 3, 4, 16); - simd_address_at(tmp9, TMPQ2, 3, 4, 16); - simd_address_at(tmp10, TMPQ3, 3, 4, 16); - * [register]:4 tmp10 = (* [register]:4 tmp8) - (* [register]:4 tmp9); + TMPQ3[0,32] = TMPQ1[0,32] - TMPQ2[0,32]; + TMPQ3[32,32] = TMPQ1[32,32] - TMPQ2[32,32]; + TMPQ3[64,32] = TMPQ1[64,32] - TMPQ2[64,32]; + TMPQ3[96,32] = TMPQ1[96,32] - TMPQ2[96,32]; # simd unary TMPQ4 = MP_INT_ABS(TMPQ3) on lane size 4 - local tmp12:4 = 0; - local tmp13:4 = 0; - simd_address_at(tmp12, TMPQ3, 0, 4, 16); - simd_address_at(tmp13, TMPQ4, 0, 4, 16); - * [register]:4 tmp13 = MP_INT_ABS(* [register]:4 tmp12); - simd_address_at(tmp12, TMPQ3, 1, 4, 16); - simd_address_at(tmp13, TMPQ4, 1, 4, 16); - * [register]:4 tmp13 = MP_INT_ABS(* [register]:4 tmp12); - simd_address_at(tmp12, TMPQ3, 2, 4, 16); - simd_address_at(tmp13, TMPQ4, 2, 4, 16); - * [register]:4 tmp13 = MP_INT_ABS(* [register]:4 tmp12); - simd_address_at(tmp12, TMPQ3, 3, 4, 16); - simd_address_at(tmp13, TMPQ4, 3, 4, 16); - * [register]:4 tmp13 = MP_INT_ABS(* [register]:4 tmp12); + TMPQ4[0,32] = MP_INT_ABS(TMPQ3[0,32]); + TMPQ4[32,32] = MP_INT_ABS(TMPQ3[32,32]); + TMPQ4[64,32] = MP_INT_ABS(TMPQ3[64,32]); + TMPQ4[96,32] = MP_INT_ABS(TMPQ3[96,32]); # simd infix Rd_VPR128.4S = Rd_VPR128.4S + TMPQ4 on lane size 4 - local tmp14:4 = 0; - local tmp15:4 = 0; - local tmp16:4 = 0; - simd_address_at(tmp14, Rd_VPR128.4S, 0, 4, 16); - simd_address_at(tmp15, TMPQ4, 0, 4, 16); - simd_address_at(tmp16, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp16 = (* [register]:4 tmp14) + (* [register]:4 tmp15); - simd_address_at(tmp14, Rd_VPR128.4S, 1, 4, 16); - simd_address_at(tmp15, TMPQ4, 1, 4, 16); - simd_address_at(tmp16, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp16 = (* [register]:4 tmp14) + (* [register]:4 tmp15); - simd_address_at(tmp14, Rd_VPR128.4S, 2, 4, 16); - simd_address_at(tmp15, TMPQ4, 2, 4, 16); - simd_address_at(tmp16, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp16 = (* [register]:4 tmp14) + (* [register]:4 tmp15); - simd_address_at(tmp14, Rd_VPR128.4S, 3, 4, 16); - simd_address_at(tmp15, TMPQ4, 3, 4, 16); - simd_address_at(tmp16, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp16 = (* [register]:4 tmp14) + (* [register]:4 tmp15); + Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] + TMPQ4[0,32]; + Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] + TMPQ4[32,32]; + Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] + TMPQ4[64,32]; + Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] + TMPQ4[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_ZEXT(Rn_VPR64.4H, 2:1); - local tmp2:16 = SIMD_INT_ZEXT(Rm_VPR64.4H, 2:1); - local tmp3:16 = SIMD_INT_SUB(tmp1, tmp2, 4:1); - local tmp4:16 = SIMD_INT_ABS(tmp3, 4:1); - local tmpd:16 = SIMD_INT_ADD(Rd_VPR128.4S, tmp4, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_uabal(Rd_VPR128.4S, Rn_VPR64.4H, Rm_VPR64.4H, 2:1); -@endif } # C7.2.344 UABAL, UABAL2 page C7-2179 line 122590 MATCH x2e205000/mask=xbf20fc00 @@ -42922,171 +23279,52 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H :uabal Rd_VPR128.8H, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1215=0x5 & b_1011=0 & Rn_VPR64.8B & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_primitive) # simd resize TMPQ1 = zext(Rn_VPR64.8B) (lane size 1 to 2) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR64.8B, 0, 1, 8); - simd_address_at(tmp3, TMPQ1, 0, 2, 16); - * [register]:2 tmp3 = zext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 1, 1, 8); - simd_address_at(tmp3, TMPQ1, 1, 2, 16); - * [register]:2 tmp3 = zext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 2, 1, 8); - simd_address_at(tmp3, TMPQ1, 2, 2, 16); - * [register]:2 tmp3 = zext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 3, 1, 8); - simd_address_at(tmp3, TMPQ1, 3, 2, 16); - * [register]:2 tmp3 = zext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 4, 1, 8); - simd_address_at(tmp3, TMPQ1, 4, 2, 16); - * [register]:2 tmp3 = zext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 5, 1, 8); - simd_address_at(tmp3, TMPQ1, 5, 2, 16); - * [register]:2 tmp3 = zext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 6, 1, 8); - simd_address_at(tmp3, TMPQ1, 6, 2, 16); - * [register]:2 tmp3 = zext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 7, 1, 8); - simd_address_at(tmp3, TMPQ1, 7, 2, 16); - * [register]:2 tmp3 = zext(* [register]:1 tmp2); + TMPQ1[0,16] = zext(Rn_VPR64.8B[0,8]); + TMPQ1[16,16] = zext(Rn_VPR64.8B[8,8]); + TMPQ1[32,16] = zext(Rn_VPR64.8B[16,8]); + TMPQ1[48,16] = zext(Rn_VPR64.8B[24,8]); + TMPQ1[64,16] = zext(Rn_VPR64.8B[32,8]); + TMPQ1[80,16] = zext(Rn_VPR64.8B[40,8]); + TMPQ1[96,16] = zext(Rn_VPR64.8B[48,8]); + TMPQ1[112,16] = zext(Rn_VPR64.8B[56,8]); # simd resize TMPQ2 = zext(Rm_VPR64.8B) (lane size 1 to 2) - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, Rm_VPR64.8B, 0, 1, 8); - simd_address_at(tmp6, TMPQ2, 0, 2, 16); - * [register]:2 tmp6 = zext(* [register]:1 tmp5); - simd_address_at(tmp5, Rm_VPR64.8B, 1, 1, 8); - simd_address_at(tmp6, TMPQ2, 1, 2, 16); - * [register]:2 tmp6 = zext(* [register]:1 tmp5); - simd_address_at(tmp5, Rm_VPR64.8B, 2, 1, 8); - simd_address_at(tmp6, TMPQ2, 2, 2, 16); - * [register]:2 tmp6 = zext(* [register]:1 tmp5); - simd_address_at(tmp5, Rm_VPR64.8B, 3, 1, 8); - simd_address_at(tmp6, TMPQ2, 3, 2, 16); - * [register]:2 tmp6 = zext(* [register]:1 tmp5); - simd_address_at(tmp5, Rm_VPR64.8B, 4, 1, 8); - simd_address_at(tmp6, TMPQ2, 4, 2, 16); - * [register]:2 tmp6 = zext(* [register]:1 tmp5); - simd_address_at(tmp5, Rm_VPR64.8B, 5, 1, 8); - simd_address_at(tmp6, TMPQ2, 5, 2, 16); - * [register]:2 tmp6 = zext(* [register]:1 tmp5); - simd_address_at(tmp5, Rm_VPR64.8B, 6, 1, 8); - simd_address_at(tmp6, TMPQ2, 6, 2, 16); - * [register]:2 tmp6 = zext(* [register]:1 tmp5); - simd_address_at(tmp5, Rm_VPR64.8B, 7, 1, 8); - simd_address_at(tmp6, TMPQ2, 7, 2, 16); - * [register]:2 tmp6 = zext(* [register]:1 tmp5); + TMPQ2[0,16] = zext(Rm_VPR64.8B[0,8]); + TMPQ2[16,16] = zext(Rm_VPR64.8B[8,8]); + TMPQ2[32,16] = zext(Rm_VPR64.8B[16,8]); + TMPQ2[48,16] = zext(Rm_VPR64.8B[24,8]); + TMPQ2[64,16] = zext(Rm_VPR64.8B[32,8]); + TMPQ2[80,16] = zext(Rm_VPR64.8B[40,8]); + TMPQ2[96,16] = zext(Rm_VPR64.8B[48,8]); + TMPQ2[112,16] = zext(Rm_VPR64.8B[56,8]); # simd infix TMPQ3 = TMPQ1 - TMPQ2 on lane size 2 - local tmp8:4 = 0; - local tmp9:4 = 0; - local tmp10:4 = 0; - simd_address_at(tmp8, TMPQ1, 0, 2, 16); - simd_address_at(tmp9, TMPQ2, 0, 2, 16); - simd_address_at(tmp10, TMPQ3, 0, 2, 16); - * [register]:2 tmp10 = (* [register]:2 tmp8) - (* [register]:2 tmp9); - simd_address_at(tmp8, TMPQ1, 1, 2, 16); - simd_address_at(tmp9, TMPQ2, 1, 2, 16); - simd_address_at(tmp10, TMPQ3, 1, 2, 16); - * [register]:2 tmp10 = (* [register]:2 tmp8) - (* [register]:2 tmp9); - simd_address_at(tmp8, TMPQ1, 2, 2, 16); - simd_address_at(tmp9, TMPQ2, 2, 2, 16); - simd_address_at(tmp10, TMPQ3, 2, 2, 16); - * [register]:2 tmp10 = (* [register]:2 tmp8) - (* [register]:2 tmp9); - simd_address_at(tmp8, TMPQ1, 3, 2, 16); - simd_address_at(tmp9, TMPQ2, 3, 2, 16); - simd_address_at(tmp10, TMPQ3, 3, 2, 16); - * [register]:2 tmp10 = (* [register]:2 tmp8) - (* [register]:2 tmp9); - simd_address_at(tmp8, TMPQ1, 4, 2, 16); - simd_address_at(tmp9, TMPQ2, 4, 2, 16); - simd_address_at(tmp10, TMPQ3, 4, 2, 16); - * [register]:2 tmp10 = (* [register]:2 tmp8) - (* [register]:2 tmp9); - simd_address_at(tmp8, TMPQ1, 5, 2, 16); - simd_address_at(tmp9, TMPQ2, 5, 2, 16); - simd_address_at(tmp10, TMPQ3, 5, 2, 16); - * [register]:2 tmp10 = (* [register]:2 tmp8) - (* [register]:2 tmp9); - simd_address_at(tmp8, TMPQ1, 6, 2, 16); - simd_address_at(tmp9, TMPQ2, 6, 2, 16); - simd_address_at(tmp10, TMPQ3, 6, 2, 16); - * [register]:2 tmp10 = (* [register]:2 tmp8) - (* [register]:2 tmp9); - simd_address_at(tmp8, TMPQ1, 7, 2, 16); - simd_address_at(tmp9, TMPQ2, 7, 2, 16); - simd_address_at(tmp10, TMPQ3, 7, 2, 16); - * [register]:2 tmp10 = (* [register]:2 tmp8) - (* [register]:2 tmp9); + TMPQ3[0,16] = TMPQ1[0,16] - TMPQ2[0,16]; + TMPQ3[16,16] = TMPQ1[16,16] - TMPQ2[16,16]; + TMPQ3[32,16] = TMPQ1[32,16] - TMPQ2[32,16]; + TMPQ3[48,16] = TMPQ1[48,16] - TMPQ2[48,16]; + TMPQ3[64,16] = TMPQ1[64,16] - TMPQ2[64,16]; + TMPQ3[80,16] = TMPQ1[80,16] - TMPQ2[80,16]; + TMPQ3[96,16] = TMPQ1[96,16] - TMPQ2[96,16]; + TMPQ3[112,16] = TMPQ1[112,16] - TMPQ2[112,16]; # simd unary TMPQ4 = MP_INT_ABS(TMPQ3) on lane size 2 - local tmp12:4 = 0; - local tmp13:4 = 0; - simd_address_at(tmp12, TMPQ3, 0, 2, 16); - simd_address_at(tmp13, TMPQ4, 0, 2, 16); - * [register]:2 tmp13 = MP_INT_ABS(* [register]:2 tmp12); - simd_address_at(tmp12, TMPQ3, 1, 2, 16); - simd_address_at(tmp13, TMPQ4, 1, 2, 16); - * [register]:2 tmp13 = MP_INT_ABS(* [register]:2 tmp12); - simd_address_at(tmp12, TMPQ3, 2, 2, 16); - simd_address_at(tmp13, TMPQ4, 2, 2, 16); - * [register]:2 tmp13 = MP_INT_ABS(* [register]:2 tmp12); - simd_address_at(tmp12, TMPQ3, 3, 2, 16); - simd_address_at(tmp13, TMPQ4, 3, 2, 16); - * [register]:2 tmp13 = MP_INT_ABS(* [register]:2 tmp12); - simd_address_at(tmp12, TMPQ3, 4, 2, 16); - simd_address_at(tmp13, TMPQ4, 4, 2, 16); - * [register]:2 tmp13 = MP_INT_ABS(* [register]:2 tmp12); - simd_address_at(tmp12, TMPQ3, 5, 2, 16); - simd_address_at(tmp13, TMPQ4, 5, 2, 16); - * [register]:2 tmp13 = MP_INT_ABS(* [register]:2 tmp12); - simd_address_at(tmp12, TMPQ3, 6, 2, 16); - simd_address_at(tmp13, TMPQ4, 6, 2, 16); - * [register]:2 tmp13 = MP_INT_ABS(* [register]:2 tmp12); - simd_address_at(tmp12, TMPQ3, 7, 2, 16); - simd_address_at(tmp13, TMPQ4, 7, 2, 16); - * [register]:2 tmp13 = MP_INT_ABS(* [register]:2 tmp12); + TMPQ4[0,16] = MP_INT_ABS(TMPQ3[0,16]); + TMPQ4[16,16] = MP_INT_ABS(TMPQ3[16,16]); + TMPQ4[32,16] = MP_INT_ABS(TMPQ3[32,16]); + TMPQ4[48,16] = MP_INT_ABS(TMPQ3[48,16]); + TMPQ4[64,16] = MP_INT_ABS(TMPQ3[64,16]); + TMPQ4[80,16] = MP_INT_ABS(TMPQ3[80,16]); + TMPQ4[96,16] = MP_INT_ABS(TMPQ3[96,16]); + TMPQ4[112,16] = MP_INT_ABS(TMPQ3[112,16]); # simd infix Rd_VPR128.8H = Rd_VPR128.8H + TMPQ4 on lane size 2 - local tmp14:4 = 0; - local tmp15:4 = 0; - local tmp16:4 = 0; - simd_address_at(tmp14, Rd_VPR128.8H, 0, 2, 16); - simd_address_at(tmp15, TMPQ4, 0, 2, 16); - simd_address_at(tmp16, Rd_VPR128.8H, 0, 2, 16); - * [register]:2 tmp16 = (* [register]:2 tmp14) + (* [register]:2 tmp15); - simd_address_at(tmp14, Rd_VPR128.8H, 1, 2, 16); - simd_address_at(tmp15, TMPQ4, 1, 2, 16); - simd_address_at(tmp16, Rd_VPR128.8H, 1, 2, 16); - * [register]:2 tmp16 = (* [register]:2 tmp14) + (* [register]:2 tmp15); - simd_address_at(tmp14, Rd_VPR128.8H, 2, 2, 16); - simd_address_at(tmp15, TMPQ4, 2, 2, 16); - simd_address_at(tmp16, Rd_VPR128.8H, 2, 2, 16); - * [register]:2 tmp16 = (* [register]:2 tmp14) + (* [register]:2 tmp15); - simd_address_at(tmp14, Rd_VPR128.8H, 3, 2, 16); - simd_address_at(tmp15, TMPQ4, 3, 2, 16); - simd_address_at(tmp16, Rd_VPR128.8H, 3, 2, 16); - * [register]:2 tmp16 = (* [register]:2 tmp14) + (* [register]:2 tmp15); - simd_address_at(tmp14, Rd_VPR128.8H, 4, 2, 16); - simd_address_at(tmp15, TMPQ4, 4, 2, 16); - simd_address_at(tmp16, Rd_VPR128.8H, 4, 2, 16); - * [register]:2 tmp16 = (* [register]:2 tmp14) + (* [register]:2 tmp15); - simd_address_at(tmp14, Rd_VPR128.8H, 5, 2, 16); - simd_address_at(tmp15, TMPQ4, 5, 2, 16); - simd_address_at(tmp16, Rd_VPR128.8H, 5, 2, 16); - * [register]:2 tmp16 = (* [register]:2 tmp14) + (* [register]:2 tmp15); - simd_address_at(tmp14, Rd_VPR128.8H, 6, 2, 16); - simd_address_at(tmp15, TMPQ4, 6, 2, 16); - simd_address_at(tmp16, Rd_VPR128.8H, 6, 2, 16); - * [register]:2 tmp16 = (* [register]:2 tmp14) + (* [register]:2 tmp15); - simd_address_at(tmp14, Rd_VPR128.8H, 7, 2, 16); - simd_address_at(tmp15, TMPQ4, 7, 2, 16); - simd_address_at(tmp16, Rd_VPR128.8H, 7, 2, 16); - * [register]:2 tmp16 = (* [register]:2 tmp14) + (* [register]:2 tmp15); + Rd_VPR128.8H[0,16] = Rd_VPR128.8H[0,16] + TMPQ4[0,16]; + Rd_VPR128.8H[16,16] = Rd_VPR128.8H[16,16] + TMPQ4[16,16]; + Rd_VPR128.8H[32,16] = Rd_VPR128.8H[32,16] + TMPQ4[32,16]; + Rd_VPR128.8H[48,16] = Rd_VPR128.8H[48,16] + TMPQ4[48,16]; + Rd_VPR128.8H[64,16] = Rd_VPR128.8H[64,16] + TMPQ4[64,16]; + Rd_VPR128.8H[80,16] = Rd_VPR128.8H[80,16] + TMPQ4[80,16]; + Rd_VPR128.8H[96,16] = Rd_VPR128.8H[96,16] + TMPQ4[96,16]; + Rd_VPR128.8H[112,16] = Rd_VPR128.8H[112,16] + TMPQ4[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_ZEXT(Rn_VPR64.8B, 1:1); - local tmp2:16 = SIMD_INT_ZEXT(Rm_VPR64.8B, 1:1); - local tmp3:16 = SIMD_INT_SUB(tmp1, tmp2, 2:1); - local tmp4:16 = SIMD_INT_ABS(tmp3, 2:1); - local tmpd:16 = SIMD_INT_ADD(Rd_VPR128.8H, tmp4, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.8H -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.8H = NEON_uabal(Rd_VPR128.8H, Rn_VPR64.8B, Rm_VPR64.8B, 1:1); -@endif } # C7.2.345 UABD page C7-2181 line 122708 MATCH x2e207400/mask=xbf20fc00 @@ -43097,9 +23335,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B :uabd Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0xe & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.16B = NEON_uabd(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); -@endif } # C7.2.345 UABD page C7-2181 line 122708 MATCH x2e207400/mask=xbf20fc00 @@ -43113,88 +23349,25 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16 :uabd Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0xe & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_primitive) # simd infix TMPD1 = Rn_VPR64.2S - Rm_VPR64.2S on lane size 4 - local tmp2:4 = 0; - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp2, Rn_VPR64.2S, 0, 4, 8); - simd_address_at(tmp3, Rm_VPR64.2S, 0, 4, 8); - simd_address_at(tmp4, TMPD1, 0, 4, 8); - * [register]:4 tmp4 = (* [register]:4 tmp2) - (* [register]:4 tmp3); - simd_address_at(tmp2, Rn_VPR64.2S, 1, 4, 8); - simd_address_at(tmp3, Rm_VPR64.2S, 1, 4, 8); - simd_address_at(tmp4, TMPD1, 1, 4, 8); - * [register]:4 tmp4 = (* [register]:4 tmp2) - (* [register]:4 tmp3); + TMPD1[0,32] = Rn_VPR64.2S[0,32] - Rm_VPR64.2S[0,32]; + TMPD1[32,32] = Rn_VPR64.2S[32,32] - Rm_VPR64.2S[32,32]; # simd infix TMPD2 = Rm_VPR64.2S - Rn_VPR64.2S on lane size 4 - local tmp6:4 = 0; - local tmp7:4 = 0; - local tmp8:4 = 0; - simd_address_at(tmp6, Rm_VPR64.2S, 0, 4, 8); - simd_address_at(tmp7, Rn_VPR64.2S, 0, 4, 8); - simd_address_at(tmp8, TMPD2, 0, 4, 8); - * [register]:4 tmp8 = (* [register]:4 tmp6) - (* [register]:4 tmp7); - simd_address_at(tmp6, Rm_VPR64.2S, 1, 4, 8); - simd_address_at(tmp7, Rn_VPR64.2S, 1, 4, 8); - simd_address_at(tmp8, TMPD2, 1, 4, 8); - * [register]:4 tmp8 = (* [register]:4 tmp6) - (* [register]:4 tmp7); + TMPD2[0,32] = Rm_VPR64.2S[0,32] - Rn_VPR64.2S[0,32]; + TMPD2[32,32] = Rm_VPR64.2S[32,32] - Rn_VPR64.2S[32,32]; # simd infix TMPD2 = TMPD2 * 2:4 on lane size 4 - local tmp9:4 = 0; - local tmp10:4 = 0; - simd_address_at(tmp9, TMPD2, 0, 4, 8); - simd_address_at(tmp10, TMPD2, 0, 4, 8); - * [register]:4 tmp10 = (* [register]:4 tmp9) * 2:4; - simd_address_at(tmp9, TMPD2, 1, 4, 8); - simd_address_at(tmp10, TMPD2, 1, 4, 8); - * [register]:4 tmp10 = (* [register]:4 tmp9) * 2:4; + TMPD2[0,32] = TMPD2[0,32] * 2:4; + TMPD2[32,32] = TMPD2[32,32] * 2:4; # simd infix TMPD3 = Rn_VPR64.2S < Rm_VPR64.2S on lane size 4 - local tmp12:4 = 0; - local tmp13:4 = 0; - local tmp14:4 = 0; - simd_address_at(tmp12, Rn_VPR64.2S, 0, 4, 8); - simd_address_at(tmp13, Rm_VPR64.2S, 0, 4, 8); - simd_address_at(tmp14, TMPD3, 0, 4, 8); - * [register]:4 tmp14 = zext((* [register]:4 tmp12) < (* [register]:4 tmp13)); - simd_address_at(tmp12, Rn_VPR64.2S, 1, 4, 8); - simd_address_at(tmp13, Rm_VPR64.2S, 1, 4, 8); - simd_address_at(tmp14, TMPD3, 1, 4, 8); - * [register]:4 tmp14 = zext((* [register]:4 tmp12) < (* [register]:4 tmp13)); + TMPD3[0,32] = zext(Rn_VPR64.2S[0,32] < Rm_VPR64.2S[0,32]); + TMPD3[32,32] = zext(Rn_VPR64.2S[32,32] < Rm_VPR64.2S[32,32]); # simd infix TMPD2 = TMPD2 * TMPD3 on lane size 4 - local tmp15:4 = 0; - local tmp16:4 = 0; - local tmp17:4 = 0; - simd_address_at(tmp15, TMPD2, 0, 4, 8); - simd_address_at(tmp16, TMPD3, 0, 4, 8); - simd_address_at(tmp17, TMPD2, 0, 4, 8); - * [register]:4 tmp17 = (* [register]:4 tmp15) * (* [register]:4 tmp16); - simd_address_at(tmp15, TMPD2, 1, 4, 8); - simd_address_at(tmp16, TMPD3, 1, 4, 8); - simd_address_at(tmp17, TMPD2, 1, 4, 8); - * [register]:4 tmp17 = (* [register]:4 tmp15) * (* [register]:4 tmp16); + TMPD2[0,32] = TMPD2[0,32] * TMPD3[0,32]; + TMPD2[32,32] = TMPD2[32,32] * TMPD3[32,32]; # simd infix Rd_VPR64.2S = TMPD1 + TMPD2 on lane size 4 - local tmp18:4 = 0; - local tmp19:4 = 0; - local tmp20:4 = 0; - simd_address_at(tmp18, TMPD1, 0, 4, 8); - simd_address_at(tmp19, TMPD2, 0, 4, 8); - simd_address_at(tmp20, Rd_VPR64.2S, 0, 4, 8); - * [register]:4 tmp20 = (* [register]:4 tmp18) + (* [register]:4 tmp19); - simd_address_at(tmp18, TMPD1, 1, 4, 8); - simd_address_at(tmp19, TMPD2, 1, 4, 8); - simd_address_at(tmp20, Rd_VPR64.2S, 1, 4, 8); - * [register]:4 tmp20 = (* [register]:4 tmp18) + (* [register]:4 tmp19); + Rd_VPR64.2S[0,32] = TMPD1[0,32] + TMPD2[0,32]; + Rd_VPR64.2S[32,32] = TMPD1[32,32] + TMPD2[32,32]; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_INT_SUB(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); - local tmp2:8 = SIMD_INT_SUB(Rm_VPR64.2S, Rn_VPR64.2S, 4:1); - tmp2 = SIMD_INT_MULT(tmp2, 2:4); - local tmp3:8 = SIMD_INT_LESS(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); - tmp2 = SIMD_INT_MULT(tmp2, tmp3, 4:1); - local tmpd:8 = SIMD_INT_ADD(tmp1, tmp2, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.2S -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.2S = NEON_uabd(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); -@endif } # C7.2.345 UABD page C7-2181 line 122708 MATCH x2e207400/mask=xbf20fc00 @@ -43205,9 +23378,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S :uabd Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0xe & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.4H = NEON_uabd(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); -@endif } # C7.2.345 UABD page C7-2181 line 122708 MATCH x2e207400/mask=xbf20fc00 @@ -43218,9 +23389,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H :uabd Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0xe & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_uabd(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); -@endif } # C7.2.345 UABD page C7-2181 line 122708 MATCH x2e207400/mask=xbf20fc00 @@ -43231,9 +23400,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S :uabd Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0xe & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.8B = NEON_uabd(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); -@endif } # C7.2.345 UABD page C7-2181 line 122708 MATCH x2e207400/mask=xbf20fc00 @@ -43244,9 +23411,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B :uabd Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0xe & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.8H = NEON_uabd(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); -@endif } # C7.2.346 UABDL, UABDL2 page C7-2183 line 122808 MATCH x2e207000/mask=xbf20fc00 @@ -43258,64 +23423,21 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H :uabdl2 Rd_VPR128.2D, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1215=0x7 & b_1011=0 & Rn_VPR128.4S & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rn_VPR128.4S, 1, 8, 16); - TMPD1 = * [register]:8 tmp1; + TMPD1 = Rn_VPR128.4S[64,64]; # simd resize TMPQ2 = zext(TMPD1) (lane size 4 to 8) - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, TMPD1, 0, 4, 8); - simd_address_at(tmp5, TMPQ2, 0, 8, 16); - * [register]:8 tmp5 = zext(* [register]:4 tmp4); - simd_address_at(tmp4, TMPD1, 1, 4, 8); - simd_address_at(tmp5, TMPQ2, 1, 8, 16); - * [register]:8 tmp5 = zext(* [register]:4 tmp4); - local tmp6:4 = 0; - simd_address_at(tmp6, Rm_VPR128.4S, 1, 8, 16); - TMPD3 = * [register]:8 tmp6; + TMPQ2[0,64] = zext(TMPD1[0,32]); + TMPQ2[64,64] = zext(TMPD1[32,32]); + TMPD3 = Rm_VPR128.4S[64,64]; # simd resize TMPQ4 = zext(TMPD3) (lane size 4 to 8) - local tmp9:4 = 0; - local tmp10:4 = 0; - simd_address_at(tmp9, TMPD3, 0, 4, 8); - simd_address_at(tmp10, TMPQ4, 0, 8, 16); - * [register]:8 tmp10 = zext(* [register]:4 tmp9); - simd_address_at(tmp9, TMPD3, 1, 4, 8); - simd_address_at(tmp10, TMPQ4, 1, 8, 16); - * [register]:8 tmp10 = zext(* [register]:4 tmp9); + TMPQ4[0,64] = zext(TMPD3[0,32]); + TMPQ4[64,64] = zext(TMPD3[32,32]); # simd infix TMPQ5 = TMPQ2 - TMPQ4 on lane size 8 - local tmp12:4 = 0; - local tmp13:4 = 0; - local tmp14:4 = 0; - simd_address_at(tmp12, TMPQ2, 0, 8, 16); - simd_address_at(tmp13, TMPQ4, 0, 8, 16); - simd_address_at(tmp14, TMPQ5, 0, 8, 16); - * [register]:8 tmp14 = (* [register]:8 tmp12) - (* [register]:8 tmp13); - simd_address_at(tmp12, TMPQ2, 1, 8, 16); - simd_address_at(tmp13, TMPQ4, 1, 8, 16); - simd_address_at(tmp14, TMPQ5, 1, 8, 16); - * [register]:8 tmp14 = (* [register]:8 tmp12) - (* [register]:8 tmp13); + TMPQ5[0,64] = TMPQ2[0,64] - TMPQ4[0,64]; + TMPQ5[64,64] = TMPQ2[64,64] - TMPQ4[64,64]; # simd unary Rd_VPR128.2D = MP_INT_ABS(TMPQ5) on lane size 8 - local tmp15:4 = 0; - local tmp16:4 = 0; - simd_address_at(tmp15, TMPQ5, 0, 8, 16); - simd_address_at(tmp16, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp16 = MP_INT_ABS(* [register]:8 tmp15); - simd_address_at(tmp15, TMPQ5, 1, 8, 16); - simd_address_at(tmp16, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp16 = MP_INT_ABS(* [register]:8 tmp15); + Rd_VPR128.2D[0,64] = MP_INT_ABS(TMPQ5[0,64]); + Rd_VPR128.2D[64,64] = MP_INT_ABS(TMPQ5[64,64]); zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Rn_VPR128.4S, 1:1); - local tmp2:16 = SIMD_INT_ZEXT(tmp1, 4:1); - local tmp3:8 = SIMD_PIECE(Rm_VPR128.4S, 1:1); - local tmp4:16 = SIMD_INT_ZEXT(tmp3, 4:1); - local tmp5:16 = SIMD_INT_SUB(tmp2, tmp4, 8:1); - local tmpd:16 = SIMD_INT_ABS(tmp5, 8:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.2D = NEON_uabdl2(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); -@endif } # C7.2.346 UABDL, UABDL2 page C7-2183 line 122808 MATCH x2e207000/mask=xbf20fc00 @@ -43327,90 +23449,29 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S :uabdl2 Rd_VPR128.4S, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1215=0x7 & b_1011=0 & Rn_VPR128.8H & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rn_VPR128.8H, 1, 8, 16); - TMPD1 = * [register]:8 tmp1; + TMPD1 = Rn_VPR128.8H[64,64]; # simd resize TMPQ2 = zext(TMPD1) (lane size 2 to 4) - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, TMPD1, 0, 2, 8); - simd_address_at(tmp5, TMPQ2, 0, 4, 16); - * [register]:4 tmp5 = zext(* [register]:2 tmp4); - simd_address_at(tmp4, TMPD1, 1, 2, 8); - simd_address_at(tmp5, TMPQ2, 1, 4, 16); - * [register]:4 tmp5 = zext(* [register]:2 tmp4); - simd_address_at(tmp4, TMPD1, 2, 2, 8); - simd_address_at(tmp5, TMPQ2, 2, 4, 16); - * [register]:4 tmp5 = zext(* [register]:2 tmp4); - simd_address_at(tmp4, TMPD1, 3, 2, 8); - simd_address_at(tmp5, TMPQ2, 3, 4, 16); - * [register]:4 tmp5 = zext(* [register]:2 tmp4); - local tmp6:4 = 0; - simd_address_at(tmp6, Rm_VPR128.8H, 1, 8, 16); - TMPD3 = * [register]:8 tmp6; + TMPQ2[0,32] = zext(TMPD1[0,16]); + TMPQ2[32,32] = zext(TMPD1[16,16]); + TMPQ2[64,32] = zext(TMPD1[32,16]); + TMPQ2[96,32] = zext(TMPD1[48,16]); + TMPD3 = Rm_VPR128.8H[64,64]; # simd resize TMPQ4 = zext(TMPD3) (lane size 2 to 4) - local tmp9:4 = 0; - local tmp10:4 = 0; - simd_address_at(tmp9, TMPD3, 0, 2, 8); - simd_address_at(tmp10, TMPQ4, 0, 4, 16); - * [register]:4 tmp10 = zext(* [register]:2 tmp9); - simd_address_at(tmp9, TMPD3, 1, 2, 8); - simd_address_at(tmp10, TMPQ4, 1, 4, 16); - * [register]:4 tmp10 = zext(* [register]:2 tmp9); - simd_address_at(tmp9, TMPD3, 2, 2, 8); - simd_address_at(tmp10, TMPQ4, 2, 4, 16); - * [register]:4 tmp10 = zext(* [register]:2 tmp9); - simd_address_at(tmp9, TMPD3, 3, 2, 8); - simd_address_at(tmp10, TMPQ4, 3, 4, 16); - * [register]:4 tmp10 = zext(* [register]:2 tmp9); + TMPQ4[0,32] = zext(TMPD3[0,16]); + TMPQ4[32,32] = zext(TMPD3[16,16]); + TMPQ4[64,32] = zext(TMPD3[32,16]); + TMPQ4[96,32] = zext(TMPD3[48,16]); # simd infix TMPQ5 = TMPQ2 - TMPQ4 on lane size 4 - local tmp12:4 = 0; - local tmp13:4 = 0; - local tmp14:4 = 0; - simd_address_at(tmp12, TMPQ2, 0, 4, 16); - simd_address_at(tmp13, TMPQ4, 0, 4, 16); - simd_address_at(tmp14, TMPQ5, 0, 4, 16); - * [register]:4 tmp14 = (* [register]:4 tmp12) - (* [register]:4 tmp13); - simd_address_at(tmp12, TMPQ2, 1, 4, 16); - simd_address_at(tmp13, TMPQ4, 1, 4, 16); - simd_address_at(tmp14, TMPQ5, 1, 4, 16); - * [register]:4 tmp14 = (* [register]:4 tmp12) - (* [register]:4 tmp13); - simd_address_at(tmp12, TMPQ2, 2, 4, 16); - simd_address_at(tmp13, TMPQ4, 2, 4, 16); - simd_address_at(tmp14, TMPQ5, 2, 4, 16); - * [register]:4 tmp14 = (* [register]:4 tmp12) - (* [register]:4 tmp13); - simd_address_at(tmp12, TMPQ2, 3, 4, 16); - simd_address_at(tmp13, TMPQ4, 3, 4, 16); - simd_address_at(tmp14, TMPQ5, 3, 4, 16); - * [register]:4 tmp14 = (* [register]:4 tmp12) - (* [register]:4 tmp13); + TMPQ5[0,32] = TMPQ2[0,32] - TMPQ4[0,32]; + TMPQ5[32,32] = TMPQ2[32,32] - TMPQ4[32,32]; + TMPQ5[64,32] = TMPQ2[64,32] - TMPQ4[64,32]; + TMPQ5[96,32] = TMPQ2[96,32] - TMPQ4[96,32]; # simd unary Rd_VPR128.4S = MP_INT_ABS(TMPQ5) on lane size 4 - local tmp15:4 = 0; - local tmp16:4 = 0; - simd_address_at(tmp15, TMPQ5, 0, 4, 16); - simd_address_at(tmp16, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp16 = MP_INT_ABS(* [register]:4 tmp15); - simd_address_at(tmp15, TMPQ5, 1, 4, 16); - simd_address_at(tmp16, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp16 = MP_INT_ABS(* [register]:4 tmp15); - simd_address_at(tmp15, TMPQ5, 2, 4, 16); - simd_address_at(tmp16, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp16 = MP_INT_ABS(* [register]:4 tmp15); - simd_address_at(tmp15, TMPQ5, 3, 4, 16); - simd_address_at(tmp16, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp16 = MP_INT_ABS(* [register]:4 tmp15); + Rd_VPR128.4S[0,32] = MP_INT_ABS(TMPQ5[0,32]); + Rd_VPR128.4S[32,32] = MP_INT_ABS(TMPQ5[32,32]); + Rd_VPR128.4S[64,32] = MP_INT_ABS(TMPQ5[64,32]); + Rd_VPR128.4S[96,32] = MP_INT_ABS(TMPQ5[96,32]); zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Rn_VPR128.8H, 1:1); - local tmp2:16 = SIMD_INT_ZEXT(tmp1, 2:1); - local tmp3:8 = SIMD_PIECE(Rm_VPR128.8H, 1:1); - local tmp4:16 = SIMD_INT_ZEXT(tmp3, 2:1); - local tmp5:16 = SIMD_INT_SUB(tmp2, tmp4, 4:1); - local tmpd:16 = SIMD_INT_ABS(tmp5, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_uabdl2(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); -@endif } # C7.2.346 UABDL, UABDL2 page C7-2183 line 122808 MATCH x2e207000/mask=xbf20fc00 @@ -43422,142 +23483,45 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H :uabdl2 Rd_VPR128.8H, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1215=0x7 & b_1011=0 & Rn_VPR128.16B & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rn_VPR128.16B, 1, 8, 16); - TMPD1 = * [register]:8 tmp1; + TMPD1 = Rn_VPR128.16B[64,64]; # simd resize TMPQ2 = zext(TMPD1) (lane size 1 to 2) - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, TMPD1, 0, 1, 8); - simd_address_at(tmp5, TMPQ2, 0, 2, 16); - * [register]:2 tmp5 = zext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 1, 1, 8); - simd_address_at(tmp5, TMPQ2, 1, 2, 16); - * [register]:2 tmp5 = zext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 2, 1, 8); - simd_address_at(tmp5, TMPQ2, 2, 2, 16); - * [register]:2 tmp5 = zext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 3, 1, 8); - simd_address_at(tmp5, TMPQ2, 3, 2, 16); - * [register]:2 tmp5 = zext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 4, 1, 8); - simd_address_at(tmp5, TMPQ2, 4, 2, 16); - * [register]:2 tmp5 = zext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 5, 1, 8); - simd_address_at(tmp5, TMPQ2, 5, 2, 16); - * [register]:2 tmp5 = zext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 6, 1, 8); - simd_address_at(tmp5, TMPQ2, 6, 2, 16); - * [register]:2 tmp5 = zext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 7, 1, 8); - simd_address_at(tmp5, TMPQ2, 7, 2, 16); - * [register]:2 tmp5 = zext(* [register]:1 tmp4); - local tmp6:4 = 0; - simd_address_at(tmp6, Rm_VPR128.16B, 1, 8, 16); - TMPD3 = * [register]:8 tmp6; + TMPQ2[0,16] = zext(TMPD1[0,8]); + TMPQ2[16,16] = zext(TMPD1[8,8]); + TMPQ2[32,16] = zext(TMPD1[16,8]); + TMPQ2[48,16] = zext(TMPD1[24,8]); + TMPQ2[64,16] = zext(TMPD1[32,8]); + TMPQ2[80,16] = zext(TMPD1[40,8]); + TMPQ2[96,16] = zext(TMPD1[48,8]); + TMPQ2[112,16] = zext(TMPD1[56,8]); + TMPD3 = Rm_VPR128.16B[64,64]; # simd resize TMPQ4 = zext(TMPD3) (lane size 1 to 2) - local tmp9:4 = 0; - local tmp10:4 = 0; - simd_address_at(tmp9, TMPD3, 0, 1, 8); - simd_address_at(tmp10, TMPQ4, 0, 2, 16); - * [register]:2 tmp10 = zext(* [register]:1 tmp9); - simd_address_at(tmp9, TMPD3, 1, 1, 8); - simd_address_at(tmp10, TMPQ4, 1, 2, 16); - * [register]:2 tmp10 = zext(* [register]:1 tmp9); - simd_address_at(tmp9, TMPD3, 2, 1, 8); - simd_address_at(tmp10, TMPQ4, 2, 2, 16); - * [register]:2 tmp10 = zext(* [register]:1 tmp9); - simd_address_at(tmp9, TMPD3, 3, 1, 8); - simd_address_at(tmp10, TMPQ4, 3, 2, 16); - * [register]:2 tmp10 = zext(* [register]:1 tmp9); - simd_address_at(tmp9, TMPD3, 4, 1, 8); - simd_address_at(tmp10, TMPQ4, 4, 2, 16); - * [register]:2 tmp10 = zext(* [register]:1 tmp9); - simd_address_at(tmp9, TMPD3, 5, 1, 8); - simd_address_at(tmp10, TMPQ4, 5, 2, 16); - * [register]:2 tmp10 = zext(* [register]:1 tmp9); - simd_address_at(tmp9, TMPD3, 6, 1, 8); - simd_address_at(tmp10, TMPQ4, 6, 2, 16); - * [register]:2 tmp10 = zext(* [register]:1 tmp9); - simd_address_at(tmp9, TMPD3, 7, 1, 8); - simd_address_at(tmp10, TMPQ4, 7, 2, 16); - * [register]:2 tmp10 = zext(* [register]:1 tmp9); + TMPQ4[0,16] = zext(TMPD3[0,8]); + TMPQ4[16,16] = zext(TMPD3[8,8]); + TMPQ4[32,16] = zext(TMPD3[16,8]); + TMPQ4[48,16] = zext(TMPD3[24,8]); + TMPQ4[64,16] = zext(TMPD3[32,8]); + TMPQ4[80,16] = zext(TMPD3[40,8]); + TMPQ4[96,16] = zext(TMPD3[48,8]); + TMPQ4[112,16] = zext(TMPD3[56,8]); # simd infix TMPQ5 = TMPQ2 - TMPQ4 on lane size 2 - local tmp12:4 = 0; - local tmp13:4 = 0; - local tmp14:4 = 0; - simd_address_at(tmp12, TMPQ2, 0, 2, 16); - simd_address_at(tmp13, TMPQ4, 0, 2, 16); - simd_address_at(tmp14, TMPQ5, 0, 2, 16); - * [register]:2 tmp14 = (* [register]:2 tmp12) - (* [register]:2 tmp13); - simd_address_at(tmp12, TMPQ2, 1, 2, 16); - simd_address_at(tmp13, TMPQ4, 1, 2, 16); - simd_address_at(tmp14, TMPQ5, 1, 2, 16); - * [register]:2 tmp14 = (* [register]:2 tmp12) - (* [register]:2 tmp13); - simd_address_at(tmp12, TMPQ2, 2, 2, 16); - simd_address_at(tmp13, TMPQ4, 2, 2, 16); - simd_address_at(tmp14, TMPQ5, 2, 2, 16); - * [register]:2 tmp14 = (* [register]:2 tmp12) - (* [register]:2 tmp13); - simd_address_at(tmp12, TMPQ2, 3, 2, 16); - simd_address_at(tmp13, TMPQ4, 3, 2, 16); - simd_address_at(tmp14, TMPQ5, 3, 2, 16); - * [register]:2 tmp14 = (* [register]:2 tmp12) - (* [register]:2 tmp13); - simd_address_at(tmp12, TMPQ2, 4, 2, 16); - simd_address_at(tmp13, TMPQ4, 4, 2, 16); - simd_address_at(tmp14, TMPQ5, 4, 2, 16); - * [register]:2 tmp14 = (* [register]:2 tmp12) - (* [register]:2 tmp13); - simd_address_at(tmp12, TMPQ2, 5, 2, 16); - simd_address_at(tmp13, TMPQ4, 5, 2, 16); - simd_address_at(tmp14, TMPQ5, 5, 2, 16); - * [register]:2 tmp14 = (* [register]:2 tmp12) - (* [register]:2 tmp13); - simd_address_at(tmp12, TMPQ2, 6, 2, 16); - simd_address_at(tmp13, TMPQ4, 6, 2, 16); - simd_address_at(tmp14, TMPQ5, 6, 2, 16); - * [register]:2 tmp14 = (* [register]:2 tmp12) - (* [register]:2 tmp13); - simd_address_at(tmp12, TMPQ2, 7, 2, 16); - simd_address_at(tmp13, TMPQ4, 7, 2, 16); - simd_address_at(tmp14, TMPQ5, 7, 2, 16); - * [register]:2 tmp14 = (* [register]:2 tmp12) - (* [register]:2 tmp13); + TMPQ5[0,16] = TMPQ2[0,16] - TMPQ4[0,16]; + TMPQ5[16,16] = TMPQ2[16,16] - TMPQ4[16,16]; + TMPQ5[32,16] = TMPQ2[32,16] - TMPQ4[32,16]; + TMPQ5[48,16] = TMPQ2[48,16] - TMPQ4[48,16]; + TMPQ5[64,16] = TMPQ2[64,16] - TMPQ4[64,16]; + TMPQ5[80,16] = TMPQ2[80,16] - TMPQ4[80,16]; + TMPQ5[96,16] = TMPQ2[96,16] - TMPQ4[96,16]; + TMPQ5[112,16] = TMPQ2[112,16] - TMPQ4[112,16]; # simd unary Rd_VPR128.8H = MP_INT_ABS(TMPQ5) on lane size 2 - local tmp15:4 = 0; - local tmp16:4 = 0; - simd_address_at(tmp15, TMPQ5, 0, 2, 16); - simd_address_at(tmp16, Rd_VPR128.8H, 0, 2, 16); - * [register]:2 tmp16 = MP_INT_ABS(* [register]:2 tmp15); - simd_address_at(tmp15, TMPQ5, 1, 2, 16); - simd_address_at(tmp16, Rd_VPR128.8H, 1, 2, 16); - * [register]:2 tmp16 = MP_INT_ABS(* [register]:2 tmp15); - simd_address_at(tmp15, TMPQ5, 2, 2, 16); - simd_address_at(tmp16, Rd_VPR128.8H, 2, 2, 16); - * [register]:2 tmp16 = MP_INT_ABS(* [register]:2 tmp15); - simd_address_at(tmp15, TMPQ5, 3, 2, 16); - simd_address_at(tmp16, Rd_VPR128.8H, 3, 2, 16); - * [register]:2 tmp16 = MP_INT_ABS(* [register]:2 tmp15); - simd_address_at(tmp15, TMPQ5, 4, 2, 16); - simd_address_at(tmp16, Rd_VPR128.8H, 4, 2, 16); - * [register]:2 tmp16 = MP_INT_ABS(* [register]:2 tmp15); - simd_address_at(tmp15, TMPQ5, 5, 2, 16); - simd_address_at(tmp16, Rd_VPR128.8H, 5, 2, 16); - * [register]:2 tmp16 = MP_INT_ABS(* [register]:2 tmp15); - simd_address_at(tmp15, TMPQ5, 6, 2, 16); - simd_address_at(tmp16, Rd_VPR128.8H, 6, 2, 16); - * [register]:2 tmp16 = MP_INT_ABS(* [register]:2 tmp15); - simd_address_at(tmp15, TMPQ5, 7, 2, 16); - simd_address_at(tmp16, Rd_VPR128.8H, 7, 2, 16); - * [register]:2 tmp16 = MP_INT_ABS(* [register]:2 tmp15); + Rd_VPR128.8H[0,16] = MP_INT_ABS(TMPQ5[0,16]); + Rd_VPR128.8H[16,16] = MP_INT_ABS(TMPQ5[16,16]); + Rd_VPR128.8H[32,16] = MP_INT_ABS(TMPQ5[32,16]); + Rd_VPR128.8H[48,16] = MP_INT_ABS(TMPQ5[48,16]); + Rd_VPR128.8H[64,16] = MP_INT_ABS(TMPQ5[64,16]); + Rd_VPR128.8H[80,16] = MP_INT_ABS(TMPQ5[80,16]); + Rd_VPR128.8H[96,16] = MP_INT_ABS(TMPQ5[96,16]); + Rd_VPR128.8H[112,16] = MP_INT_ABS(TMPQ5[112,16]); zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Rn_VPR128.16B, 1:1); - local tmp2:16 = SIMD_INT_ZEXT(tmp1, 1:1); - local tmp3:8 = SIMD_PIECE(Rm_VPR128.16B, 1:1); - local tmp4:16 = SIMD_INT_ZEXT(tmp3, 1:1); - local tmp5:16 = SIMD_INT_SUB(tmp2, tmp4, 2:1); - local tmpd:16 = SIMD_INT_ABS(tmp5, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.8H -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.8H = NEON_uabdl2(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); -@endif } # C7.2.346 UABDL, UABDL2 page C7-2183 line 122808 MATCH x2e207000/mask=xbf20fc00 @@ -43569,56 +23533,19 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16 :uabdl Rd_VPR128.2D, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1215=0x7 & b_1011=0 & Rn_VPR64.2S & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_primitive) # simd resize TMPQ1 = zext(Rn_VPR64.2S) (lane size 4 to 8) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR64.2S, 0, 4, 8); - simd_address_at(tmp3, TMPQ1, 0, 8, 16); - * [register]:8 tmp3 = zext(* [register]:4 tmp2); - simd_address_at(tmp2, Rn_VPR64.2S, 1, 4, 8); - simd_address_at(tmp3, TMPQ1, 1, 8, 16); - * [register]:8 tmp3 = zext(* [register]:4 tmp2); + TMPQ1[0,64] = zext(Rn_VPR64.2S[0,32]); + TMPQ1[64,64] = zext(Rn_VPR64.2S[32,32]); # simd resize TMPQ2 = zext(Rm_VPR64.2S) (lane size 4 to 8) - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, Rm_VPR64.2S, 0, 4, 8); - simd_address_at(tmp6, TMPQ2, 0, 8, 16); - * [register]:8 tmp6 = zext(* [register]:4 tmp5); - simd_address_at(tmp5, Rm_VPR64.2S, 1, 4, 8); - simd_address_at(tmp6, TMPQ2, 1, 8, 16); - * [register]:8 tmp6 = zext(* [register]:4 tmp5); + TMPQ2[0,64] = zext(Rm_VPR64.2S[0,32]); + TMPQ2[64,64] = zext(Rm_VPR64.2S[32,32]); # simd infix TMPQ3 = TMPQ1 - TMPQ2 on lane size 8 - local tmp8:4 = 0; - local tmp9:4 = 0; - local tmp10:4 = 0; - simd_address_at(tmp8, TMPQ1, 0, 8, 16); - simd_address_at(tmp9, TMPQ2, 0, 8, 16); - simd_address_at(tmp10, TMPQ3, 0, 8, 16); - * [register]:8 tmp10 = (* [register]:8 tmp8) - (* [register]:8 tmp9); - simd_address_at(tmp8, TMPQ1, 1, 8, 16); - simd_address_at(tmp9, TMPQ2, 1, 8, 16); - simd_address_at(tmp10, TMPQ3, 1, 8, 16); - * [register]:8 tmp10 = (* [register]:8 tmp8) - (* [register]:8 tmp9); + TMPQ3[0,64] = TMPQ1[0,64] - TMPQ2[0,64]; + TMPQ3[64,64] = TMPQ1[64,64] - TMPQ2[64,64]; # simd unary Rd_VPR128.2D = MP_INT_ABS(TMPQ3) on lane size 8 - local tmp11:4 = 0; - local tmp12:4 = 0; - simd_address_at(tmp11, TMPQ3, 0, 8, 16); - simd_address_at(tmp12, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp12 = MP_INT_ABS(* [register]:8 tmp11); - simd_address_at(tmp11, TMPQ3, 1, 8, 16); - simd_address_at(tmp12, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp12 = MP_INT_ABS(* [register]:8 tmp11); + Rd_VPR128.2D[0,64] = MP_INT_ABS(TMPQ3[0,64]); + Rd_VPR128.2D[64,64] = MP_INT_ABS(TMPQ3[64,64]); zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_ZEXT(Rn_VPR64.2S, 4:1); - local tmp2:16 = SIMD_INT_ZEXT(Rm_VPR64.2S, 4:1); - local tmp3:16 = SIMD_INT_SUB(tmp1, tmp2, 8:1); - local tmpd:16 = SIMD_INT_ABS(tmp3, 8:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.2D = NEON_uabdl(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); -@endif } # C7.2.346 UABDL, UABDL2 page C7-2183 line 122808 MATCH x2e207000/mask=xbf20fc00 @@ -43630,82 +23557,27 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S :uabdl Rd_VPR128.4S, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1215=0x7 & b_1011=0 & Rn_VPR64.4H & Rd_VPR128.4S & Rn_VPR128 & Rm_VPR128 & Zd { -@if defined(SEMANTIC_primitive) # simd resize TMPQ1 = zext(Rn_VPR64.4H) (lane size 2 to 4) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR64.4H, 0, 2, 8); - simd_address_at(tmp3, TMPQ1, 0, 4, 16); - * [register]:4 tmp3 = zext(* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR64.4H, 1, 2, 8); - simd_address_at(tmp3, TMPQ1, 1, 4, 16); - * [register]:4 tmp3 = zext(* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR64.4H, 2, 2, 8); - simd_address_at(tmp3, TMPQ1, 2, 4, 16); - * [register]:4 tmp3 = zext(* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR64.4H, 3, 2, 8); - simd_address_at(tmp3, TMPQ1, 3, 4, 16); - * [register]:4 tmp3 = zext(* [register]:2 tmp2); + TMPQ1[0,32] = zext(Rn_VPR64.4H[0,16]); + TMPQ1[32,32] = zext(Rn_VPR64.4H[16,16]); + TMPQ1[64,32] = zext(Rn_VPR64.4H[32,16]); + TMPQ1[96,32] = zext(Rn_VPR64.4H[48,16]); # simd resize TMPQ2 = zext(Rm_VPR64.4H) (lane size 2 to 4) - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, Rm_VPR64.4H, 0, 2, 8); - simd_address_at(tmp6, TMPQ2, 0, 4, 16); - * [register]:4 tmp6 = zext(* [register]:2 tmp5); - simd_address_at(tmp5, Rm_VPR64.4H, 1, 2, 8); - simd_address_at(tmp6, TMPQ2, 1, 4, 16); - * [register]:4 tmp6 = zext(* [register]:2 tmp5); - simd_address_at(tmp5, Rm_VPR64.4H, 2, 2, 8); - simd_address_at(tmp6, TMPQ2, 2, 4, 16); - * [register]:4 tmp6 = zext(* [register]:2 tmp5); - simd_address_at(tmp5, Rm_VPR64.4H, 3, 2, 8); - simd_address_at(tmp6, TMPQ2, 3, 4, 16); - * [register]:4 tmp6 = zext(* [register]:2 tmp5); + TMPQ2[0,32] = zext(Rm_VPR64.4H[0,16]); + TMPQ2[32,32] = zext(Rm_VPR64.4H[16,16]); + TMPQ2[64,32] = zext(Rm_VPR64.4H[32,16]); + TMPQ2[96,32] = zext(Rm_VPR64.4H[48,16]); # simd infix TMPQ3 = TMPQ1 - TMPQ2 on lane size 4 - local tmp8:4 = 0; - local tmp9:4 = 0; - local tmp10:4 = 0; - simd_address_at(tmp8, TMPQ1, 0, 4, 16); - simd_address_at(tmp9, TMPQ2, 0, 4, 16); - simd_address_at(tmp10, TMPQ3, 0, 4, 16); - * [register]:4 tmp10 = (* [register]:4 tmp8) - (* [register]:4 tmp9); - simd_address_at(tmp8, TMPQ1, 1, 4, 16); - simd_address_at(tmp9, TMPQ2, 1, 4, 16); - simd_address_at(tmp10, TMPQ3, 1, 4, 16); - * [register]:4 tmp10 = (* [register]:4 tmp8) - (* [register]:4 tmp9); - simd_address_at(tmp8, TMPQ1, 2, 4, 16); - simd_address_at(tmp9, TMPQ2, 2, 4, 16); - simd_address_at(tmp10, TMPQ3, 2, 4, 16); - * [register]:4 tmp10 = (* [register]:4 tmp8) - (* [register]:4 tmp9); - simd_address_at(tmp8, TMPQ1, 3, 4, 16); - simd_address_at(tmp9, TMPQ2, 3, 4, 16); - simd_address_at(tmp10, TMPQ3, 3, 4, 16); - * [register]:4 tmp10 = (* [register]:4 tmp8) - (* [register]:4 tmp9); + TMPQ3[0,32] = TMPQ1[0,32] - TMPQ2[0,32]; + TMPQ3[32,32] = TMPQ1[32,32] - TMPQ2[32,32]; + TMPQ3[64,32] = TMPQ1[64,32] - TMPQ2[64,32]; + TMPQ3[96,32] = TMPQ1[96,32] - TMPQ2[96,32]; # simd unary Rd_VPR128.4S = MP_INT_ABS(TMPQ3) on lane size 4 - local tmp11:4 = 0; - local tmp12:4 = 0; - simd_address_at(tmp11, TMPQ3, 0, 4, 16); - simd_address_at(tmp12, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp12 = MP_INT_ABS(* [register]:4 tmp11); - simd_address_at(tmp11, TMPQ3, 1, 4, 16); - simd_address_at(tmp12, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp12 = MP_INT_ABS(* [register]:4 tmp11); - simd_address_at(tmp11, TMPQ3, 2, 4, 16); - simd_address_at(tmp12, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp12 = MP_INT_ABS(* [register]:4 tmp11); - simd_address_at(tmp11, TMPQ3, 3, 4, 16); - simd_address_at(tmp12, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp12 = MP_INT_ABS(* [register]:4 tmp11); + Rd_VPR128.4S[0,32] = MP_INT_ABS(TMPQ3[0,32]); + Rd_VPR128.4S[32,32] = MP_INT_ABS(TMPQ3[32,32]); + Rd_VPR128.4S[64,32] = MP_INT_ABS(TMPQ3[64,32]); + Rd_VPR128.4S[96,32] = MP_INT_ABS(TMPQ3[96,32]); zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_ZEXT(Rn_VPR64.4H, 2:1); - local tmp2:16 = SIMD_INT_ZEXT(Rm_VPR64.4H, 2:1); - local tmp3:16 = SIMD_INT_SUB(tmp1, tmp2, 4:1); - local tmpd:16 = SIMD_INT_ABS(tmp3, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_uabdl(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); -@endif } # C7.2.346 UABDL, UABDL2 page C7-2183 line 122808 MATCH x2e207000/mask=xbf20fc00 @@ -43717,134 +23589,43 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H :uabdl Rd_VPR128.8H, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1215=0x7 & b_1011=0 & Rn_VPR64.8B & Rd_VPR128.8H & Rn_VPR128 & Rm_VPR128 & Zd { -@if defined(SEMANTIC_primitive) # simd resize TMPQ1 = zext(Rn_VPR64.8B) (lane size 1 to 2) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR64.8B, 0, 1, 8); - simd_address_at(tmp3, TMPQ1, 0, 2, 16); - * [register]:2 tmp3 = zext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 1, 1, 8); - simd_address_at(tmp3, TMPQ1, 1, 2, 16); - * [register]:2 tmp3 = zext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 2, 1, 8); - simd_address_at(tmp3, TMPQ1, 2, 2, 16); - * [register]:2 tmp3 = zext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 3, 1, 8); - simd_address_at(tmp3, TMPQ1, 3, 2, 16); - * [register]:2 tmp3 = zext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 4, 1, 8); - simd_address_at(tmp3, TMPQ1, 4, 2, 16); - * [register]:2 tmp3 = zext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 5, 1, 8); - simd_address_at(tmp3, TMPQ1, 5, 2, 16); - * [register]:2 tmp3 = zext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 6, 1, 8); - simd_address_at(tmp3, TMPQ1, 6, 2, 16); - * [register]:2 tmp3 = zext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 7, 1, 8); - simd_address_at(tmp3, TMPQ1, 7, 2, 16); - * [register]:2 tmp3 = zext(* [register]:1 tmp2); + TMPQ1[0,16] = zext(Rn_VPR64.8B[0,8]); + TMPQ1[16,16] = zext(Rn_VPR64.8B[8,8]); + TMPQ1[32,16] = zext(Rn_VPR64.8B[16,8]); + TMPQ1[48,16] = zext(Rn_VPR64.8B[24,8]); + TMPQ1[64,16] = zext(Rn_VPR64.8B[32,8]); + TMPQ1[80,16] = zext(Rn_VPR64.8B[40,8]); + TMPQ1[96,16] = zext(Rn_VPR64.8B[48,8]); + TMPQ1[112,16] = zext(Rn_VPR64.8B[56,8]); # simd resize TMPQ2 = zext(Rm_VPR64.8B) (lane size 1 to 2) - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, Rm_VPR64.8B, 0, 1, 8); - simd_address_at(tmp6, TMPQ2, 0, 2, 16); - * [register]:2 tmp6 = zext(* [register]:1 tmp5); - simd_address_at(tmp5, Rm_VPR64.8B, 1, 1, 8); - simd_address_at(tmp6, TMPQ2, 1, 2, 16); - * [register]:2 tmp6 = zext(* [register]:1 tmp5); - simd_address_at(tmp5, Rm_VPR64.8B, 2, 1, 8); - simd_address_at(tmp6, TMPQ2, 2, 2, 16); - * [register]:2 tmp6 = zext(* [register]:1 tmp5); - simd_address_at(tmp5, Rm_VPR64.8B, 3, 1, 8); - simd_address_at(tmp6, TMPQ2, 3, 2, 16); - * [register]:2 tmp6 = zext(* [register]:1 tmp5); - simd_address_at(tmp5, Rm_VPR64.8B, 4, 1, 8); - simd_address_at(tmp6, TMPQ2, 4, 2, 16); - * [register]:2 tmp6 = zext(* [register]:1 tmp5); - simd_address_at(tmp5, Rm_VPR64.8B, 5, 1, 8); - simd_address_at(tmp6, TMPQ2, 5, 2, 16); - * [register]:2 tmp6 = zext(* [register]:1 tmp5); - simd_address_at(tmp5, Rm_VPR64.8B, 6, 1, 8); - simd_address_at(tmp6, TMPQ2, 6, 2, 16); - * [register]:2 tmp6 = zext(* [register]:1 tmp5); - simd_address_at(tmp5, Rm_VPR64.8B, 7, 1, 8); - simd_address_at(tmp6, TMPQ2, 7, 2, 16); - * [register]:2 tmp6 = zext(* [register]:1 tmp5); + TMPQ2[0,16] = zext(Rm_VPR64.8B[0,8]); + TMPQ2[16,16] = zext(Rm_VPR64.8B[8,8]); + TMPQ2[32,16] = zext(Rm_VPR64.8B[16,8]); + TMPQ2[48,16] = zext(Rm_VPR64.8B[24,8]); + TMPQ2[64,16] = zext(Rm_VPR64.8B[32,8]); + TMPQ2[80,16] = zext(Rm_VPR64.8B[40,8]); + TMPQ2[96,16] = zext(Rm_VPR64.8B[48,8]); + TMPQ2[112,16] = zext(Rm_VPR64.8B[56,8]); # simd infix TMPQ3 = TMPQ1 - TMPQ2 on lane size 2 - local tmp8:4 = 0; - local tmp9:4 = 0; - local tmp10:4 = 0; - simd_address_at(tmp8, TMPQ1, 0, 2, 16); - simd_address_at(tmp9, TMPQ2, 0, 2, 16); - simd_address_at(tmp10, TMPQ3, 0, 2, 16); - * [register]:2 tmp10 = (* [register]:2 tmp8) - (* [register]:2 tmp9); - simd_address_at(tmp8, TMPQ1, 1, 2, 16); - simd_address_at(tmp9, TMPQ2, 1, 2, 16); - simd_address_at(tmp10, TMPQ3, 1, 2, 16); - * [register]:2 tmp10 = (* [register]:2 tmp8) - (* [register]:2 tmp9); - simd_address_at(tmp8, TMPQ1, 2, 2, 16); - simd_address_at(tmp9, TMPQ2, 2, 2, 16); - simd_address_at(tmp10, TMPQ3, 2, 2, 16); - * [register]:2 tmp10 = (* [register]:2 tmp8) - (* [register]:2 tmp9); - simd_address_at(tmp8, TMPQ1, 3, 2, 16); - simd_address_at(tmp9, TMPQ2, 3, 2, 16); - simd_address_at(tmp10, TMPQ3, 3, 2, 16); - * [register]:2 tmp10 = (* [register]:2 tmp8) - (* [register]:2 tmp9); - simd_address_at(tmp8, TMPQ1, 4, 2, 16); - simd_address_at(tmp9, TMPQ2, 4, 2, 16); - simd_address_at(tmp10, TMPQ3, 4, 2, 16); - * [register]:2 tmp10 = (* [register]:2 tmp8) - (* [register]:2 tmp9); - simd_address_at(tmp8, TMPQ1, 5, 2, 16); - simd_address_at(tmp9, TMPQ2, 5, 2, 16); - simd_address_at(tmp10, TMPQ3, 5, 2, 16); - * [register]:2 tmp10 = (* [register]:2 tmp8) - (* [register]:2 tmp9); - simd_address_at(tmp8, TMPQ1, 6, 2, 16); - simd_address_at(tmp9, TMPQ2, 6, 2, 16); - simd_address_at(tmp10, TMPQ3, 6, 2, 16); - * [register]:2 tmp10 = (* [register]:2 tmp8) - (* [register]:2 tmp9); - simd_address_at(tmp8, TMPQ1, 7, 2, 16); - simd_address_at(tmp9, TMPQ2, 7, 2, 16); - simd_address_at(tmp10, TMPQ3, 7, 2, 16); - * [register]:2 tmp10 = (* [register]:2 tmp8) - (* [register]:2 tmp9); + TMPQ3[0,16] = TMPQ1[0,16] - TMPQ2[0,16]; + TMPQ3[16,16] = TMPQ1[16,16] - TMPQ2[16,16]; + TMPQ3[32,16] = TMPQ1[32,16] - TMPQ2[32,16]; + TMPQ3[48,16] = TMPQ1[48,16] - TMPQ2[48,16]; + TMPQ3[64,16] = TMPQ1[64,16] - TMPQ2[64,16]; + TMPQ3[80,16] = TMPQ1[80,16] - TMPQ2[80,16]; + TMPQ3[96,16] = TMPQ1[96,16] - TMPQ2[96,16]; + TMPQ3[112,16] = TMPQ1[112,16] - TMPQ2[112,16]; # simd unary Rd_VPR128.8H = MP_INT_ABS(TMPQ3) on lane size 2 - local tmp11:4 = 0; - local tmp12:4 = 0; - simd_address_at(tmp11, TMPQ3, 0, 2, 16); - simd_address_at(tmp12, Rd_VPR128.8H, 0, 2, 16); - * [register]:2 tmp12 = MP_INT_ABS(* [register]:2 tmp11); - simd_address_at(tmp11, TMPQ3, 1, 2, 16); - simd_address_at(tmp12, Rd_VPR128.8H, 1, 2, 16); - * [register]:2 tmp12 = MP_INT_ABS(* [register]:2 tmp11); - simd_address_at(tmp11, TMPQ3, 2, 2, 16); - simd_address_at(tmp12, Rd_VPR128.8H, 2, 2, 16); - * [register]:2 tmp12 = MP_INT_ABS(* [register]:2 tmp11); - simd_address_at(tmp11, TMPQ3, 3, 2, 16); - simd_address_at(tmp12, Rd_VPR128.8H, 3, 2, 16); - * [register]:2 tmp12 = MP_INT_ABS(* [register]:2 tmp11); - simd_address_at(tmp11, TMPQ3, 4, 2, 16); - simd_address_at(tmp12, Rd_VPR128.8H, 4, 2, 16); - * [register]:2 tmp12 = MP_INT_ABS(* [register]:2 tmp11); - simd_address_at(tmp11, TMPQ3, 5, 2, 16); - simd_address_at(tmp12, Rd_VPR128.8H, 5, 2, 16); - * [register]:2 tmp12 = MP_INT_ABS(* [register]:2 tmp11); - simd_address_at(tmp11, TMPQ3, 6, 2, 16); - simd_address_at(tmp12, Rd_VPR128.8H, 6, 2, 16); - * [register]:2 tmp12 = MP_INT_ABS(* [register]:2 tmp11); - simd_address_at(tmp11, TMPQ3, 7, 2, 16); - simd_address_at(tmp12, Rd_VPR128.8H, 7, 2, 16); - * [register]:2 tmp12 = MP_INT_ABS(* [register]:2 tmp11); + Rd_VPR128.8H[0,16] = MP_INT_ABS(TMPQ3[0,16]); + Rd_VPR128.8H[16,16] = MP_INT_ABS(TMPQ3[16,16]); + Rd_VPR128.8H[32,16] = MP_INT_ABS(TMPQ3[32,16]); + Rd_VPR128.8H[48,16] = MP_INT_ABS(TMPQ3[48,16]); + Rd_VPR128.8H[64,16] = MP_INT_ABS(TMPQ3[64,16]); + Rd_VPR128.8H[80,16] = MP_INT_ABS(TMPQ3[80,16]); + Rd_VPR128.8H[96,16] = MP_INT_ABS(TMPQ3[96,16]); + Rd_VPR128.8H[112,16] = MP_INT_ABS(TMPQ3[112,16]); zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_ZEXT(Rn_VPR64.8B, 1:1); - local tmp2:16 = SIMD_INT_ZEXT(Rm_VPR64.8B, 1:1); - local tmp3:16 = SIMD_INT_SUB(tmp1, tmp2, 2:1); - local tmpd:16 = SIMD_INT_ABS(tmp3, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.8H -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.8H = NEON_uabdl(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); -@endif } # C7.2.347 UADALP page C7-2185 line 122926 MATCH x2e206800/mask=xbf3ffc00 @@ -43856,125 +23637,58 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B :uadalp Rd_VPR128.8H, Rn_VPR128.16B is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x6 & b_1011=2 & Rn_VPR128.16B & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_primitive) TMPQ1 = 0; # sipd infix TMPQ1 = +(Rn_VPR128.16B) on pairs lane size (1 to 2) - local tmp2:4 = 0; - local tmp3:4 = 0; - local tmp4:4 = 0; - local tmp5:1 = 0; - local tmp6:1 = 0; - local tmp7:2 = 0; - local tmp8:2 = 0; - simd_address_at(tmp2, Rn_VPR128.16B, 0, 1, 16); - simd_address_at(tmp3, Rn_VPR128.16B, 1, 1, 16); - simd_address_at(tmp4, TMPQ1, 0, 2, 16); - tmp5 = * [register]:1 tmp2; - tmp7 = zext(tmp5); - tmp6 = * [register]:1 tmp3; - tmp8 = zext(tmp6); - * [register]:2 tmp4 = tmp7 + tmp8; - simd_address_at(tmp2, Rn_VPR128.16B, 2, 1, 16); - simd_address_at(tmp3, Rn_VPR128.16B, 3, 1, 16); - simd_address_at(tmp4, TMPQ1, 1, 2, 16); - tmp5 = * [register]:1 tmp2; - tmp7 = zext(tmp5); - tmp6 = * [register]:1 tmp3; - tmp8 = zext(tmp6); - * [register]:2 tmp4 = tmp7 + tmp8; - simd_address_at(tmp2, Rn_VPR128.16B, 4, 1, 16); - simd_address_at(tmp3, Rn_VPR128.16B, 5, 1, 16); - simd_address_at(tmp4, TMPQ1, 2, 2, 16); - tmp5 = * [register]:1 tmp2; - tmp7 = zext(tmp5); - tmp6 = * [register]:1 tmp3; - tmp8 = zext(tmp6); - * [register]:2 tmp4 = tmp7 + tmp8; - simd_address_at(tmp2, Rn_VPR128.16B, 6, 1, 16); - simd_address_at(tmp3, Rn_VPR128.16B, 7, 1, 16); - simd_address_at(tmp4, TMPQ1, 3, 2, 16); - tmp5 = * [register]:1 tmp2; - tmp7 = zext(tmp5); - tmp6 = * [register]:1 tmp3; - tmp8 = zext(tmp6); - * [register]:2 tmp4 = tmp7 + tmp8; - simd_address_at(tmp2, Rn_VPR128.16B, 8, 1, 16); - simd_address_at(tmp3, Rn_VPR128.16B, 9, 1, 16); - simd_address_at(tmp4, TMPQ1, 4, 2, 16); - tmp5 = * [register]:1 tmp2; - tmp7 = zext(tmp5); - tmp6 = * [register]:1 tmp3; - tmp8 = zext(tmp6); - * [register]:2 tmp4 = tmp7 + tmp8; - simd_address_at(tmp2, Rn_VPR128.16B, 10, 1, 16); - simd_address_at(tmp3, Rn_VPR128.16B, 11, 1, 16); - simd_address_at(tmp4, TMPQ1, 5, 2, 16); - tmp5 = * [register]:1 tmp2; - tmp7 = zext(tmp5); - tmp6 = * [register]:1 tmp3; - tmp8 = zext(tmp6); - * [register]:2 tmp4 = tmp7 + tmp8; - simd_address_at(tmp2, Rn_VPR128.16B, 12, 1, 16); - simd_address_at(tmp3, Rn_VPR128.16B, 13, 1, 16); - simd_address_at(tmp4, TMPQ1, 6, 2, 16); - tmp5 = * [register]:1 tmp2; - tmp7 = zext(tmp5); - tmp6 = * [register]:1 tmp3; - tmp8 = zext(tmp6); - * [register]:2 tmp4 = tmp7 + tmp8; - simd_address_at(tmp2, Rn_VPR128.16B, 14, 1, 16); - simd_address_at(tmp3, Rn_VPR128.16B, 15, 1, 16); - simd_address_at(tmp4, TMPQ1, 7, 2, 16); - tmp5 = * [register]:1 tmp2; - tmp7 = zext(tmp5); - tmp6 = * [register]:1 tmp3; - tmp8 = zext(tmp6); - * [register]:2 tmp4 = tmp7 + tmp8; + tmp2 = Rn_VPR128.16B[0,8]; + tmp4 = zext(tmp2); + tmp3 = Rn_VPR128.16B[8,8]; + tmp5 = zext(tmp3); + TMPQ1[0,16] = tmp4 + tmp5; + tmp2 = Rn_VPR128.16B[16,8]; + tmp4 = zext(tmp2); + tmp3 = Rn_VPR128.16B[24,8]; + tmp5 = zext(tmp3); + TMPQ1[16,16] = tmp4 + tmp5; + tmp2 = Rn_VPR128.16B[32,8]; + tmp4 = zext(tmp2); + tmp3 = Rn_VPR128.16B[40,8]; + tmp5 = zext(tmp3); + TMPQ1[32,16] = tmp4 + tmp5; + tmp2 = Rn_VPR128.16B[48,8]; + tmp4 = zext(tmp2); + tmp3 = Rn_VPR128.16B[56,8]; + tmp5 = zext(tmp3); + TMPQ1[48,16] = tmp4 + tmp5; + tmp2 = Rn_VPR128.16B[64,8]; + tmp4 = zext(tmp2); + tmp3 = Rn_VPR128.16B[72,8]; + tmp5 = zext(tmp3); + TMPQ1[64,16] = tmp4 + tmp5; + tmp2 = Rn_VPR128.16B[80,8]; + tmp4 = zext(tmp2); + tmp3 = Rn_VPR128.16B[88,8]; + tmp5 = zext(tmp3); + TMPQ1[80,16] = tmp4 + tmp5; + tmp2 = Rn_VPR128.16B[96,8]; + tmp4 = zext(tmp2); + tmp3 = Rn_VPR128.16B[104,8]; + tmp5 = zext(tmp3); + TMPQ1[96,16] = tmp4 + tmp5; + tmp2 = Rn_VPR128.16B[112,8]; + tmp4 = zext(tmp2); + tmp3 = Rn_VPR128.16B[120,8]; + tmp5 = zext(tmp3); + TMPQ1[112,16] = tmp4 + tmp5; # simd infix Rd_VPR128.8H = Rd_VPR128.8H + TMPQ1 on lane size 2 - local tmp9:4 = 0; - local tmp10:4 = 0; - local tmp11:4 = 0; - simd_address_at(tmp9, Rd_VPR128.8H, 0, 2, 16); - simd_address_at(tmp10, TMPQ1, 0, 2, 16); - simd_address_at(tmp11, Rd_VPR128.8H, 0, 2, 16); - * [register]:2 tmp11 = (* [register]:2 tmp9) + (* [register]:2 tmp10); - simd_address_at(tmp9, Rd_VPR128.8H, 1, 2, 16); - simd_address_at(tmp10, TMPQ1, 1, 2, 16); - simd_address_at(tmp11, Rd_VPR128.8H, 1, 2, 16); - * [register]:2 tmp11 = (* [register]:2 tmp9) + (* [register]:2 tmp10); - simd_address_at(tmp9, Rd_VPR128.8H, 2, 2, 16); - simd_address_at(tmp10, TMPQ1, 2, 2, 16); - simd_address_at(tmp11, Rd_VPR128.8H, 2, 2, 16); - * [register]:2 tmp11 = (* [register]:2 tmp9) + (* [register]:2 tmp10); - simd_address_at(tmp9, Rd_VPR128.8H, 3, 2, 16); - simd_address_at(tmp10, TMPQ1, 3, 2, 16); - simd_address_at(tmp11, Rd_VPR128.8H, 3, 2, 16); - * [register]:2 tmp11 = (* [register]:2 tmp9) + (* [register]:2 tmp10); - simd_address_at(tmp9, Rd_VPR128.8H, 4, 2, 16); - simd_address_at(tmp10, TMPQ1, 4, 2, 16); - simd_address_at(tmp11, Rd_VPR128.8H, 4, 2, 16); - * [register]:2 tmp11 = (* [register]:2 tmp9) + (* [register]:2 tmp10); - simd_address_at(tmp9, Rd_VPR128.8H, 5, 2, 16); - simd_address_at(tmp10, TMPQ1, 5, 2, 16); - simd_address_at(tmp11, Rd_VPR128.8H, 5, 2, 16); - * [register]:2 tmp11 = (* [register]:2 tmp9) + (* [register]:2 tmp10); - simd_address_at(tmp9, Rd_VPR128.8H, 6, 2, 16); - simd_address_at(tmp10, TMPQ1, 6, 2, 16); - simd_address_at(tmp11, Rd_VPR128.8H, 6, 2, 16); - * [register]:2 tmp11 = (* [register]:2 tmp9) + (* [register]:2 tmp10); - simd_address_at(tmp9, Rd_VPR128.8H, 7, 2, 16); - simd_address_at(tmp10, TMPQ1, 7, 2, 16); - simd_address_at(tmp11, Rd_VPR128.8H, 7, 2, 16); - * [register]:2 tmp11 = (* [register]:2 tmp9) + (* [register]:2 tmp10); + Rd_VPR128.8H[0,16] = Rd_VPR128.8H[0,16] + TMPQ1[0,16]; + Rd_VPR128.8H[16,16] = Rd_VPR128.8H[16,16] + TMPQ1[16,16]; + Rd_VPR128.8H[32,16] = Rd_VPR128.8H[32,16] + TMPQ1[32,16]; + Rd_VPR128.8H[48,16] = Rd_VPR128.8H[48,16] + TMPQ1[48,16]; + Rd_VPR128.8H[64,16] = Rd_VPR128.8H[64,16] + TMPQ1[64,16]; + Rd_VPR128.8H[80,16] = Rd_VPR128.8H[80,16] + TMPQ1[80,16]; + Rd_VPR128.8H[96,16] = Rd_VPR128.8H[96,16] + TMPQ1[96,16]; + Rd_VPR128.8H[112,16] = Rd_VPR128.8H[112,16] + TMPQ1[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = 0; - tmp1 = SIMD_INT_ADD(Rn_VPR128.16B); - local tmpd:16 = SIMD_INT_ADD(Rd_VPR128.8H, tmp1, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.8H -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.8H = NEON_uadalp(Rd_VPR128.8H, Rn_VPR128.16B, 1:1); -@endif } # C7.2.347 UADALP page C7-2185 line 122926 MATCH x2e206800/mask=xbf3ffc00 @@ -43986,41 +23700,15 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x :uadalp Rd_VPR64.1D, Rn_VPR64.2S is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x10 & b_1216=0x6 & b_1011=2 & Rn_VPR64.2S & Rd_VPR64.1D & Zd { -@if defined(SEMANTIC_primitive) - TMPD1 = 0; # sipd infix TMPD1 = +(Rn_VPR64.2S) on pairs lane size (4 to 8) - local tmp2:4 = 0; - local tmp3:4 = 0; - local tmp4:4 = 0; - local tmp5:4 = 0; - local tmp6:4 = 0; - local tmp7:8 = 0; - local tmp8:8 = 0; - simd_address_at(tmp2, Rn_VPR64.2S, 0, 4, 8); - simd_address_at(tmp3, Rn_VPR64.2S, 1, 4, 8); - simd_address_at(tmp4, TMPD1, 0, 8, 8); - tmp5 = * [register]:4 tmp2; - tmp7 = zext(tmp5); - tmp6 = * [register]:4 tmp3; - tmp8 = zext(tmp6); - * [register]:8 tmp4 = tmp7 + tmp8; + tmp2 = Rn_VPR64.2S[0,32]; + tmp4 = zext(tmp2); + tmp3 = Rn_VPR64.2S[32,32]; + tmp5 = zext(tmp3); + tmpd1 = tmp4 + tmp5; # simd infix Rd_VPR64.1D = Rd_VPR64.1D + TMPD1 on lane size 8 - local tmp9:4 = 0; - local tmp10:4 = 0; - local tmp11:4 = 0; - simd_address_at(tmp9, Rd_VPR64.1D, 0, 8, 8); - simd_address_at(tmp10, TMPD1, 0, 8, 8); - simd_address_at(tmp11, Rd_VPR64.1D, 0, 8, 8); - * [register]:8 tmp11 = (* [register]:8 tmp9) + (* [register]:8 tmp10); + Rd_VPR64.1D[0,64] = Rd_VPR64.1D[0,64] + tmpd1; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = 0; - tmp1 = SIMD_INT_ADD(Rn_VPR64.2S); - local tmpd:8 = SIMD_INT_ADD(Rd_VPR64.1D, tmp1, 8:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.1D -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.1D = NEON_uadalp(Rd_VPR64.1D, Rn_VPR64.2S, 4:1); -@endif } # C7.2.347 UADALP page C7-2185 line 122926 MATCH x2e206800/mask=xbf3ffc00 @@ -44032,53 +23720,22 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x10 & b_1216=0x :uadalp Rd_VPR64.2S, Rn_VPR64.4H is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=0x6 & b_1011=2 & Rn_VPR64.4H & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_primitive) TMPD1 = 0; # sipd infix TMPD1 = +(Rn_VPR64.4H) on pairs lane size (2 to 4) - local tmp2:4 = 0; - local tmp3:4 = 0; - local tmp4:4 = 0; - local tmp5:2 = 0; - local tmp6:2 = 0; - local tmp7:4 = 0; - local tmp8:4 = 0; - simd_address_at(tmp2, Rn_VPR64.4H, 0, 2, 8); - simd_address_at(tmp3, Rn_VPR64.4H, 1, 2, 8); - simd_address_at(tmp4, TMPD1, 0, 4, 8); - tmp5 = * [register]:2 tmp2; - tmp7 = zext(tmp5); - tmp6 = * [register]:2 tmp3; - tmp8 = zext(tmp6); - * [register]:4 tmp4 = tmp7 + tmp8; - simd_address_at(tmp2, Rn_VPR64.4H, 2, 2, 8); - simd_address_at(tmp3, Rn_VPR64.4H, 3, 2, 8); - simd_address_at(tmp4, TMPD1, 1, 4, 8); - tmp5 = * [register]:2 tmp2; - tmp7 = zext(tmp5); - tmp6 = * [register]:2 tmp3; - tmp8 = zext(tmp6); - * [register]:4 tmp4 = tmp7 + tmp8; + tmp2 = Rn_VPR64.4H[0,16]; + tmp4 = zext(tmp2); + tmp3 = Rn_VPR64.4H[16,16]; + tmp5 = zext(tmp3); + TMPD1[0,32] = tmp4 + tmp5; + tmp2 = Rn_VPR64.4H[32,16]; + tmp4 = zext(tmp2); + tmp3 = Rn_VPR64.4H[48,16]; + tmp5 = zext(tmp3); + TMPD1[32,32] = tmp4 + tmp5; # simd infix Rd_VPR64.2S = Rd_VPR64.2S + TMPD1 on lane size 4 - local tmp9:4 = 0; - local tmp10:4 = 0; - local tmp11:4 = 0; - simd_address_at(tmp9, Rd_VPR64.2S, 0, 4, 8); - simd_address_at(tmp10, TMPD1, 0, 4, 8); - simd_address_at(tmp11, Rd_VPR64.2S, 0, 4, 8); - * [register]:4 tmp11 = (* [register]:4 tmp9) + (* [register]:4 tmp10); - simd_address_at(tmp9, Rd_VPR64.2S, 1, 4, 8); - simd_address_at(tmp10, TMPD1, 1, 4, 8); - simd_address_at(tmp11, Rd_VPR64.2S, 1, 4, 8); - * [register]:4 tmp11 = (* [register]:4 tmp9) + (* [register]:4 tmp10); + Rd_VPR64.2S[0,32] = Rd_VPR64.2S[0,32] + TMPD1[0,32]; + Rd_VPR64.2S[32,32] = Rd_VPR64.2S[32,32] + TMPD1[32,32]; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = 0; - tmp1 = SIMD_INT_ADD(Rn_VPR64.4H); - local tmpd:8 = SIMD_INT_ADD(Rd_VPR64.2S, tmp1, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.2S -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.2S = NEON_uadalp(Rd_VPR64.2S, Rn_VPR64.4H, 2:1); -@endif } # C7.2.347 UADALP page C7-2185 line 122926 MATCH x2e206800/mask=xbf3ffc00 @@ -44090,53 +23747,22 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=0x :uadalp Rd_VPR128.2D, Rn_VPR128.4S is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x10 & b_1216=0x6 & b_1011=2 & Rn_VPR128.4S & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_primitive) TMPQ1 = 0; # sipd infix TMPQ1 = +(Rn_VPR128.4S) on pairs lane size (4 to 8) - local tmp2:4 = 0; - local tmp3:4 = 0; - local tmp4:4 = 0; - local tmp5:4 = 0; - local tmp6:4 = 0; - local tmp7:8 = 0; - local tmp8:8 = 0; - simd_address_at(tmp2, Rn_VPR128.4S, 0, 4, 16); - simd_address_at(tmp3, Rn_VPR128.4S, 1, 4, 16); - simd_address_at(tmp4, TMPQ1, 0, 8, 16); - tmp5 = * [register]:4 tmp2; - tmp7 = zext(tmp5); - tmp6 = * [register]:4 tmp3; - tmp8 = zext(tmp6); - * [register]:8 tmp4 = tmp7 + tmp8; - simd_address_at(tmp2, Rn_VPR128.4S, 2, 4, 16); - simd_address_at(tmp3, Rn_VPR128.4S, 3, 4, 16); - simd_address_at(tmp4, TMPQ1, 1, 8, 16); - tmp5 = * [register]:4 tmp2; - tmp7 = zext(tmp5); - tmp6 = * [register]:4 tmp3; - tmp8 = zext(tmp6); - * [register]:8 tmp4 = tmp7 + tmp8; + tmp2 = Rn_VPR128.4S[0,32]; + tmp4 = zext(tmp2); + tmp3 = Rn_VPR128.4S[32,32]; + tmp5 = zext(tmp3); + TMPQ1[0,64] = tmp4 + tmp5; + tmp2 = Rn_VPR128.4S[64,32]; + tmp4 = zext(tmp2); + tmp3 = Rn_VPR128.4S[96,32]; + tmp5 = zext(tmp3); + TMPQ1[64,64] = tmp4 + tmp5; # simd infix Rd_VPR128.2D = Rd_VPR128.2D + TMPQ1 on lane size 8 - local tmp9:4 = 0; - local tmp10:4 = 0; - local tmp11:4 = 0; - simd_address_at(tmp9, Rd_VPR128.2D, 0, 8, 16); - simd_address_at(tmp10, TMPQ1, 0, 8, 16); - simd_address_at(tmp11, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp11 = (* [register]:8 tmp9) + (* [register]:8 tmp10); - simd_address_at(tmp9, Rd_VPR128.2D, 1, 8, 16); - simd_address_at(tmp10, TMPQ1, 1, 8, 16); - simd_address_at(tmp11, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp11 = (* [register]:8 tmp9) + (* [register]:8 tmp10); + Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] + TMPQ1[0,64]; + Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] + TMPQ1[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = 0; - tmp1 = SIMD_INT_ADD(Rn_VPR128.4S); - local tmpd:16 = SIMD_INT_ADD(Rd_VPR128.2D, tmp1, 8:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.2D = NEON_uadalp(Rd_VPR128.2D, Rn_VPR128.4S, 4:1); -@endif } # C7.2.347 UADALP page C7-2185 line 122926 MATCH x2e206800/mask=xbf3ffc00 @@ -44148,77 +23774,34 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x10 & b_1216=0x :uadalp Rd_VPR64.4H, Rn_VPR64.8B is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x6 & b_1011=2 & Rn_VPR64.8B & Rd_VPR64.4H & Zd { -@if defined(SEMANTIC_primitive) TMPD1 = 0; # sipd infix TMPD1 = +(Rn_VPR64.8B) on pairs lane size (1 to 2) - local tmp2:4 = 0; - local tmp3:4 = 0; - local tmp4:4 = 0; - local tmp5:1 = 0; - local tmp6:1 = 0; - local tmp7:2 = 0; - local tmp8:2 = 0; - simd_address_at(tmp2, Rn_VPR64.8B, 0, 1, 8); - simd_address_at(tmp3, Rn_VPR64.8B, 1, 1, 8); - simd_address_at(tmp4, TMPD1, 0, 2, 8); - tmp5 = * [register]:1 tmp2; - tmp7 = zext(tmp5); - tmp6 = * [register]:1 tmp3; - tmp8 = zext(tmp6); - * [register]:2 tmp4 = tmp7 + tmp8; - simd_address_at(tmp2, Rn_VPR64.8B, 2, 1, 8); - simd_address_at(tmp3, Rn_VPR64.8B, 3, 1, 8); - simd_address_at(tmp4, TMPD1, 1, 2, 8); - tmp5 = * [register]:1 tmp2; - tmp7 = zext(tmp5); - tmp6 = * [register]:1 tmp3; - tmp8 = zext(tmp6); - * [register]:2 tmp4 = tmp7 + tmp8; - simd_address_at(tmp2, Rn_VPR64.8B, 4, 1, 8); - simd_address_at(tmp3, Rn_VPR64.8B, 5, 1, 8); - simd_address_at(tmp4, TMPD1, 2, 2, 8); - tmp5 = * [register]:1 tmp2; - tmp7 = zext(tmp5); - tmp6 = * [register]:1 tmp3; - tmp8 = zext(tmp6); - * [register]:2 tmp4 = tmp7 + tmp8; - simd_address_at(tmp2, Rn_VPR64.8B, 6, 1, 8); - simd_address_at(tmp3, Rn_VPR64.8B, 7, 1, 8); - simd_address_at(tmp4, TMPD1, 3, 2, 8); - tmp5 = * [register]:1 tmp2; - tmp7 = zext(tmp5); - tmp6 = * [register]:1 tmp3; - tmp8 = zext(tmp6); - * [register]:2 tmp4 = tmp7 + tmp8; + tmp2 = Rn_VPR64.8B[0,8]; + tmp4 = zext(tmp2); + tmp3 = Rn_VPR64.8B[8,8]; + tmp5 = zext(tmp3); + TMPD1[0,16] = tmp4 + tmp5; + tmp2 = Rn_VPR64.8B[16,8]; + tmp4 = zext(tmp2); + tmp3 = Rn_VPR64.8B[24,8]; + tmp5 = zext(tmp3); + TMPD1[16,16] = tmp4 + tmp5; + tmp2 = Rn_VPR64.8B[32,8]; + tmp4 = zext(tmp2); + tmp3 = Rn_VPR64.8B[40,8]; + tmp5 = zext(tmp3); + TMPD1[32,16] = tmp4 + tmp5; + tmp2 = Rn_VPR64.8B[48,8]; + tmp4 = zext(tmp2); + tmp3 = Rn_VPR64.8B[56,8]; + tmp5 = zext(tmp3); + TMPD1[48,16] = tmp4 + tmp5; # simd infix Rd_VPR64.4H = Rd_VPR64.4H + TMPD1 on lane size 2 - local tmp9:4 = 0; - local tmp10:4 = 0; - local tmp11:4 = 0; - simd_address_at(tmp9, Rd_VPR64.4H, 0, 2, 8); - simd_address_at(tmp10, TMPD1, 0, 2, 8); - simd_address_at(tmp11, Rd_VPR64.4H, 0, 2, 8); - * [register]:2 tmp11 = (* [register]:2 tmp9) + (* [register]:2 tmp10); - simd_address_at(tmp9, Rd_VPR64.4H, 1, 2, 8); - simd_address_at(tmp10, TMPD1, 1, 2, 8); - simd_address_at(tmp11, Rd_VPR64.4H, 1, 2, 8); - * [register]:2 tmp11 = (* [register]:2 tmp9) + (* [register]:2 tmp10); - simd_address_at(tmp9, Rd_VPR64.4H, 2, 2, 8); - simd_address_at(tmp10, TMPD1, 2, 2, 8); - simd_address_at(tmp11, Rd_VPR64.4H, 2, 2, 8); - * [register]:2 tmp11 = (* [register]:2 tmp9) + (* [register]:2 tmp10); - simd_address_at(tmp9, Rd_VPR64.4H, 3, 2, 8); - simd_address_at(tmp10, TMPD1, 3, 2, 8); - simd_address_at(tmp11, Rd_VPR64.4H, 3, 2, 8); - * [register]:2 tmp11 = (* [register]:2 tmp9) + (* [register]:2 tmp10); + Rd_VPR64.4H[0,16] = Rd_VPR64.4H[0,16] + TMPD1[0,16]; + Rd_VPR64.4H[16,16] = Rd_VPR64.4H[16,16] + TMPD1[16,16]; + Rd_VPR64.4H[32,16] = Rd_VPR64.4H[32,16] + TMPD1[32,16]; + Rd_VPR64.4H[48,16] = Rd_VPR64.4H[48,16] + TMPD1[48,16]; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = 0; - tmp1 = SIMD_INT_ADD(Rn_VPR64.8B); - local tmpd:8 = SIMD_INT_ADD(Rd_VPR64.4H, tmp1, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.4H -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.4H = NEON_uadalp(Rd_VPR64.4H, Rn_VPR64.8B, 1:1); -@endif } # C7.2.347 UADALP page C7-2185 line 122926 MATCH x2e206800/mask=xbf3ffc00 @@ -44230,77 +23813,34 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x :uadalp Rd_VPR128.4S, Rn_VPR128.8H is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=0x6 & b_1011=2 & Rn_VPR128.8H & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) TMPQ1 = 0; # sipd infix TMPQ1 = +(Rn_VPR128.8H) on pairs lane size (2 to 4) - local tmp2:4 = 0; - local tmp3:4 = 0; - local tmp4:4 = 0; - local tmp5:2 = 0; - local tmp6:2 = 0; - local tmp7:4 = 0; - local tmp8:4 = 0; - simd_address_at(tmp2, Rn_VPR128.8H, 0, 2, 16); - simd_address_at(tmp3, Rn_VPR128.8H, 1, 2, 16); - simd_address_at(tmp4, TMPQ1, 0, 4, 16); - tmp5 = * [register]:2 tmp2; - tmp7 = zext(tmp5); - tmp6 = * [register]:2 tmp3; - tmp8 = zext(tmp6); - * [register]:4 tmp4 = tmp7 + tmp8; - simd_address_at(tmp2, Rn_VPR128.8H, 2, 2, 16); - simd_address_at(tmp3, Rn_VPR128.8H, 3, 2, 16); - simd_address_at(tmp4, TMPQ1, 1, 4, 16); - tmp5 = * [register]:2 tmp2; - tmp7 = zext(tmp5); - tmp6 = * [register]:2 tmp3; - tmp8 = zext(tmp6); - * [register]:4 tmp4 = tmp7 + tmp8; - simd_address_at(tmp2, Rn_VPR128.8H, 4, 2, 16); - simd_address_at(tmp3, Rn_VPR128.8H, 5, 2, 16); - simd_address_at(tmp4, TMPQ1, 2, 4, 16); - tmp5 = * [register]:2 tmp2; - tmp7 = zext(tmp5); - tmp6 = * [register]:2 tmp3; - tmp8 = zext(tmp6); - * [register]:4 tmp4 = tmp7 + tmp8; - simd_address_at(tmp2, Rn_VPR128.8H, 6, 2, 16); - simd_address_at(tmp3, Rn_VPR128.8H, 7, 2, 16); - simd_address_at(tmp4, TMPQ1, 3, 4, 16); - tmp5 = * [register]:2 tmp2; - tmp7 = zext(tmp5); - tmp6 = * [register]:2 tmp3; - tmp8 = zext(tmp6); - * [register]:4 tmp4 = tmp7 + tmp8; + tmp2 = Rn_VPR128.8H[0,16]; + tmp4 = zext(tmp2); + tmp3 = Rn_VPR128.8H[16,16]; + tmp5 = zext(tmp3); + TMPQ1[0,32] = tmp4 + tmp5; + tmp2 = Rn_VPR128.8H[32,16]; + tmp4 = zext(tmp2); + tmp3 = Rn_VPR128.8H[48,16]; + tmp5 = zext(tmp3); + TMPQ1[32,32] = tmp4 + tmp5; + tmp2 = Rn_VPR128.8H[64,16]; + tmp4 = zext(tmp2); + tmp3 = Rn_VPR128.8H[80,16]; + tmp5 = zext(tmp3); + TMPQ1[64,32] = tmp4 + tmp5; + tmp2 = Rn_VPR128.8H[96,16]; + tmp4 = zext(tmp2); + tmp3 = Rn_VPR128.8H[112,16]; + tmp5 = zext(tmp3); + TMPQ1[96,32] = tmp4 + tmp5; # simd infix Rd_VPR128.4S = Rd_VPR128.4S + TMPQ1 on lane size 4 - local tmp9:4 = 0; - local tmp10:4 = 0; - local tmp11:4 = 0; - simd_address_at(tmp9, Rd_VPR128.4S, 0, 4, 16); - simd_address_at(tmp10, TMPQ1, 0, 4, 16); - simd_address_at(tmp11, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp11 = (* [register]:4 tmp9) + (* [register]:4 tmp10); - simd_address_at(tmp9, Rd_VPR128.4S, 1, 4, 16); - simd_address_at(tmp10, TMPQ1, 1, 4, 16); - simd_address_at(tmp11, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp11 = (* [register]:4 tmp9) + (* [register]:4 tmp10); - simd_address_at(tmp9, Rd_VPR128.4S, 2, 4, 16); - simd_address_at(tmp10, TMPQ1, 2, 4, 16); - simd_address_at(tmp11, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp11 = (* [register]:4 tmp9) + (* [register]:4 tmp10); - simd_address_at(tmp9, Rd_VPR128.4S, 3, 4, 16); - simd_address_at(tmp10, TMPQ1, 3, 4, 16); - simd_address_at(tmp11, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp11 = (* [register]:4 tmp9) + (* [register]:4 tmp10); + Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] + TMPQ1[0,32]; + Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] + TMPQ1[32,32]; + Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] + TMPQ1[64,32]; + Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] + TMPQ1[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = 0; - tmp1 = SIMD_INT_ADD(Rn_VPR128.8H); - local tmpd:16 = SIMD_INT_ADD(Rd_VPR128.4S, tmp1, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_uadalp(Rd_VPR128.4S, Rn_VPR128.8H, 2:1); -@endif } # C7.2.348 UADDL, UADDL2 page C7-2187 line 123035 MATCH x2e200000/mask=xbf20fc00 @@ -44312,54 +23852,18 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=0x :uaddl2 Rd_VPR128.2D, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1215=0x0 & b_1011=0 & Rn_VPR128.4S & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rn_VPR128.4S, 1, 8, 16); - TMPD1 = * [register]:8 tmp1; + TMPD1 = Rn_VPR128.4S[64,64]; # simd resize TMPQ2 = zext(TMPD1) (lane size 4 to 8) - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, TMPD1, 0, 4, 8); - simd_address_at(tmp5, TMPQ2, 0, 8, 16); - * [register]:8 tmp5 = zext(* [register]:4 tmp4); - simd_address_at(tmp4, TMPD1, 1, 4, 8); - simd_address_at(tmp5, TMPQ2, 1, 8, 16); - * [register]:8 tmp5 = zext(* [register]:4 tmp4); - local tmp6:4 = 0; - simd_address_at(tmp6, Rm_VPR128.4S, 1, 8, 16); - TMPD3 = * [register]:8 tmp6; + TMPQ2[0,64] = zext(TMPD1[0,32]); + TMPQ2[64,64] = zext(TMPD1[32,32]); + TMPD3 = Rm_VPR128.4S[64,64]; # simd resize TMPQ4 = zext(TMPD3) (lane size 4 to 8) - local tmp9:4 = 0; - local tmp10:4 = 0; - simd_address_at(tmp9, TMPD3, 0, 4, 8); - simd_address_at(tmp10, TMPQ4, 0, 8, 16); - * [register]:8 tmp10 = zext(* [register]:4 tmp9); - simd_address_at(tmp9, TMPD3, 1, 4, 8); - simd_address_at(tmp10, TMPQ4, 1, 8, 16); - * [register]:8 tmp10 = zext(* [register]:4 tmp9); + TMPQ4[0,64] = zext(TMPD3[0,32]); + TMPQ4[64,64] = zext(TMPD3[32,32]); # simd infix Rd_VPR128.2D = TMPQ2 + TMPQ4 on lane size 8 - local tmp11:4 = 0; - local tmp12:4 = 0; - local tmp13:4 = 0; - simd_address_at(tmp11, TMPQ2, 0, 8, 16); - simd_address_at(tmp12, TMPQ4, 0, 8, 16); - simd_address_at(tmp13, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp13 = (* [register]:8 tmp11) + (* [register]:8 tmp12); - simd_address_at(tmp11, TMPQ2, 1, 8, 16); - simd_address_at(tmp12, TMPQ4, 1, 8, 16); - simd_address_at(tmp13, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp13 = (* [register]:8 tmp11) + (* [register]:8 tmp12); + Rd_VPR128.2D[0,64] = TMPQ2[0,64] + TMPQ4[0,64]; + Rd_VPR128.2D[64,64] = TMPQ2[64,64] + TMPQ4[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Rn_VPR128.4S, 1:1); - local tmp2:16 = SIMD_INT_ZEXT(tmp1, 4:1); - local tmp3:8 = SIMD_PIECE(Rm_VPR128.4S, 1:1); - local tmp4:16 = SIMD_INT_ZEXT(tmp3, 4:1); - local tmpd:16 = SIMD_INT_ADD(tmp2, tmp4, 8:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.2D = NEON_uaddl2(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); -@endif } # C7.2.348 UADDL, UADDL2 page C7-2187 line 123035 MATCH x2e200000/mask=xbf20fc00 @@ -44371,74 +23875,24 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S :uaddl2 Rd_VPR128.4S, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1215=0x0 & b_1011=0 & Rn_VPR128.8H & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rn_VPR128.8H, 1, 8, 16); - TMPD1 = * [register]:8 tmp1; + TMPD1 = Rn_VPR128.8H[64,64]; # simd resize TMPQ2 = zext(TMPD1) (lane size 2 to 4) - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, TMPD1, 0, 2, 8); - simd_address_at(tmp5, TMPQ2, 0, 4, 16); - * [register]:4 tmp5 = zext(* [register]:2 tmp4); - simd_address_at(tmp4, TMPD1, 1, 2, 8); - simd_address_at(tmp5, TMPQ2, 1, 4, 16); - * [register]:4 tmp5 = zext(* [register]:2 tmp4); - simd_address_at(tmp4, TMPD1, 2, 2, 8); - simd_address_at(tmp5, TMPQ2, 2, 4, 16); - * [register]:4 tmp5 = zext(* [register]:2 tmp4); - simd_address_at(tmp4, TMPD1, 3, 2, 8); - simd_address_at(tmp5, TMPQ2, 3, 4, 16); - * [register]:4 tmp5 = zext(* [register]:2 tmp4); - local tmp6:4 = 0; - simd_address_at(tmp6, Rm_VPR128.8H, 1, 8, 16); - TMPD3 = * [register]:8 tmp6; + TMPQ2[0,32] = zext(TMPD1[0,16]); + TMPQ2[32,32] = zext(TMPD1[16,16]); + TMPQ2[64,32] = zext(TMPD1[32,16]); + TMPQ2[96,32] = zext(TMPD1[48,16]); + TMPD3 = Rm_VPR128.8H[64,64]; # simd resize TMPQ4 = zext(TMPD3) (lane size 2 to 4) - local tmp9:4 = 0; - local tmp10:4 = 0; - simd_address_at(tmp9, TMPD3, 0, 2, 8); - simd_address_at(tmp10, TMPQ4, 0, 4, 16); - * [register]:4 tmp10 = zext(* [register]:2 tmp9); - simd_address_at(tmp9, TMPD3, 1, 2, 8); - simd_address_at(tmp10, TMPQ4, 1, 4, 16); - * [register]:4 tmp10 = zext(* [register]:2 tmp9); - simd_address_at(tmp9, TMPD3, 2, 2, 8); - simd_address_at(tmp10, TMPQ4, 2, 4, 16); - * [register]:4 tmp10 = zext(* [register]:2 tmp9); - simd_address_at(tmp9, TMPD3, 3, 2, 8); - simd_address_at(tmp10, TMPQ4, 3, 4, 16); - * [register]:4 tmp10 = zext(* [register]:2 tmp9); + TMPQ4[0,32] = zext(TMPD3[0,16]); + TMPQ4[32,32] = zext(TMPD3[16,16]); + TMPQ4[64,32] = zext(TMPD3[32,16]); + TMPQ4[96,32] = zext(TMPD3[48,16]); # simd infix Rd_VPR128.4S = TMPQ2 + TMPQ4 on lane size 4 - local tmp11:4 = 0; - local tmp12:4 = 0; - local tmp13:4 = 0; - simd_address_at(tmp11, TMPQ2, 0, 4, 16); - simd_address_at(tmp12, TMPQ4, 0, 4, 16); - simd_address_at(tmp13, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp13 = (* [register]:4 tmp11) + (* [register]:4 tmp12); - simd_address_at(tmp11, TMPQ2, 1, 4, 16); - simd_address_at(tmp12, TMPQ4, 1, 4, 16); - simd_address_at(tmp13, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp13 = (* [register]:4 tmp11) + (* [register]:4 tmp12); - simd_address_at(tmp11, TMPQ2, 2, 4, 16); - simd_address_at(tmp12, TMPQ4, 2, 4, 16); - simd_address_at(tmp13, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp13 = (* [register]:4 tmp11) + (* [register]:4 tmp12); - simd_address_at(tmp11, TMPQ2, 3, 4, 16); - simd_address_at(tmp12, TMPQ4, 3, 4, 16); - simd_address_at(tmp13, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp13 = (* [register]:4 tmp11) + (* [register]:4 tmp12); + Rd_VPR128.4S[0,32] = TMPQ2[0,32] + TMPQ4[0,32]; + Rd_VPR128.4S[32,32] = TMPQ2[32,32] + TMPQ4[32,32]; + Rd_VPR128.4S[64,32] = TMPQ2[64,32] + TMPQ4[64,32]; + Rd_VPR128.4S[96,32] = TMPQ2[96,32] + TMPQ4[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Rn_VPR128.8H, 1:1); - local tmp2:16 = SIMD_INT_ZEXT(tmp1, 2:1); - local tmp3:8 = SIMD_PIECE(Rm_VPR128.8H, 1:1); - local tmp4:16 = SIMD_INT_ZEXT(tmp3, 2:1); - local tmpd:16 = SIMD_INT_ADD(tmp2, tmp4, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_uaddl2(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); -@endif } # C7.2.348 UADDL, UADDL2 page C7-2187 line 123035 MATCH x2e200000/mask=xbf20fc00 @@ -44450,114 +23904,36 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H :uaddl2 Rd_VPR128.8H, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1215=0x0 & b_1011=0 & Rn_VPR128.16B & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rn_VPR128.16B, 1, 8, 16); - TMPD1 = * [register]:8 tmp1; + TMPD1 = Rn_VPR128.16B[64,64]; # simd resize TMPQ2 = zext(TMPD1) (lane size 1 to 2) - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, TMPD1, 0, 1, 8); - simd_address_at(tmp5, TMPQ2, 0, 2, 16); - * [register]:2 tmp5 = zext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 1, 1, 8); - simd_address_at(tmp5, TMPQ2, 1, 2, 16); - * [register]:2 tmp5 = zext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 2, 1, 8); - simd_address_at(tmp5, TMPQ2, 2, 2, 16); - * [register]:2 tmp5 = zext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 3, 1, 8); - simd_address_at(tmp5, TMPQ2, 3, 2, 16); - * [register]:2 tmp5 = zext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 4, 1, 8); - simd_address_at(tmp5, TMPQ2, 4, 2, 16); - * [register]:2 tmp5 = zext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 5, 1, 8); - simd_address_at(tmp5, TMPQ2, 5, 2, 16); - * [register]:2 tmp5 = zext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 6, 1, 8); - simd_address_at(tmp5, TMPQ2, 6, 2, 16); - * [register]:2 tmp5 = zext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 7, 1, 8); - simd_address_at(tmp5, TMPQ2, 7, 2, 16); - * [register]:2 tmp5 = zext(* [register]:1 tmp4); - local tmp6:4 = 0; - simd_address_at(tmp6, Rm_VPR128.16B, 1, 8, 16); - TMPD3 = * [register]:8 tmp6; + TMPQ2[0,16] = zext(TMPD1[0,8]); + TMPQ2[16,16] = zext(TMPD1[8,8]); + TMPQ2[32,16] = zext(TMPD1[16,8]); + TMPQ2[48,16] = zext(TMPD1[24,8]); + TMPQ2[64,16] = zext(TMPD1[32,8]); + TMPQ2[80,16] = zext(TMPD1[40,8]); + TMPQ2[96,16] = zext(TMPD1[48,8]); + TMPQ2[112,16] = zext(TMPD1[56,8]); + TMPD3 = Rm_VPR128.16B[64,64]; # simd resize TMPQ4 = zext(TMPD3) (lane size 1 to 2) - local tmp9:4 = 0; - local tmp10:4 = 0; - simd_address_at(tmp9, TMPD3, 0, 1, 8); - simd_address_at(tmp10, TMPQ4, 0, 2, 16); - * [register]:2 tmp10 = zext(* [register]:1 tmp9); - simd_address_at(tmp9, TMPD3, 1, 1, 8); - simd_address_at(tmp10, TMPQ4, 1, 2, 16); - * [register]:2 tmp10 = zext(* [register]:1 tmp9); - simd_address_at(tmp9, TMPD3, 2, 1, 8); - simd_address_at(tmp10, TMPQ4, 2, 2, 16); - * [register]:2 tmp10 = zext(* [register]:1 tmp9); - simd_address_at(tmp9, TMPD3, 3, 1, 8); - simd_address_at(tmp10, TMPQ4, 3, 2, 16); - * [register]:2 tmp10 = zext(* [register]:1 tmp9); - simd_address_at(tmp9, TMPD3, 4, 1, 8); - simd_address_at(tmp10, TMPQ4, 4, 2, 16); - * [register]:2 tmp10 = zext(* [register]:1 tmp9); - simd_address_at(tmp9, TMPD3, 5, 1, 8); - simd_address_at(tmp10, TMPQ4, 5, 2, 16); - * [register]:2 tmp10 = zext(* [register]:1 tmp9); - simd_address_at(tmp9, TMPD3, 6, 1, 8); - simd_address_at(tmp10, TMPQ4, 6, 2, 16); - * [register]:2 tmp10 = zext(* [register]:1 tmp9); - simd_address_at(tmp9, TMPD3, 7, 1, 8); - simd_address_at(tmp10, TMPQ4, 7, 2, 16); - * [register]:2 tmp10 = zext(* [register]:1 tmp9); + TMPQ4[0,16] = zext(TMPD3[0,8]); + TMPQ4[16,16] = zext(TMPD3[8,8]); + TMPQ4[32,16] = zext(TMPD3[16,8]); + TMPQ4[48,16] = zext(TMPD3[24,8]); + TMPQ4[64,16] = zext(TMPD3[32,8]); + TMPQ4[80,16] = zext(TMPD3[40,8]); + TMPQ4[96,16] = zext(TMPD3[48,8]); + TMPQ4[112,16] = zext(TMPD3[56,8]); # simd infix Rd_VPR128.8H = TMPQ2 + TMPQ4 on lane size 2 - local tmp11:4 = 0; - local tmp12:4 = 0; - local tmp13:4 = 0; - simd_address_at(tmp11, TMPQ2, 0, 2, 16); - simd_address_at(tmp12, TMPQ4, 0, 2, 16); - simd_address_at(tmp13, Rd_VPR128.8H, 0, 2, 16); - * [register]:2 tmp13 = (* [register]:2 tmp11) + (* [register]:2 tmp12); - simd_address_at(tmp11, TMPQ2, 1, 2, 16); - simd_address_at(tmp12, TMPQ4, 1, 2, 16); - simd_address_at(tmp13, Rd_VPR128.8H, 1, 2, 16); - * [register]:2 tmp13 = (* [register]:2 tmp11) + (* [register]:2 tmp12); - simd_address_at(tmp11, TMPQ2, 2, 2, 16); - simd_address_at(tmp12, TMPQ4, 2, 2, 16); - simd_address_at(tmp13, Rd_VPR128.8H, 2, 2, 16); - * [register]:2 tmp13 = (* [register]:2 tmp11) + (* [register]:2 tmp12); - simd_address_at(tmp11, TMPQ2, 3, 2, 16); - simd_address_at(tmp12, TMPQ4, 3, 2, 16); - simd_address_at(tmp13, Rd_VPR128.8H, 3, 2, 16); - * [register]:2 tmp13 = (* [register]:2 tmp11) + (* [register]:2 tmp12); - simd_address_at(tmp11, TMPQ2, 4, 2, 16); - simd_address_at(tmp12, TMPQ4, 4, 2, 16); - simd_address_at(tmp13, Rd_VPR128.8H, 4, 2, 16); - * [register]:2 tmp13 = (* [register]:2 tmp11) + (* [register]:2 tmp12); - simd_address_at(tmp11, TMPQ2, 5, 2, 16); - simd_address_at(tmp12, TMPQ4, 5, 2, 16); - simd_address_at(tmp13, Rd_VPR128.8H, 5, 2, 16); - * [register]:2 tmp13 = (* [register]:2 tmp11) + (* [register]:2 tmp12); - simd_address_at(tmp11, TMPQ2, 6, 2, 16); - simd_address_at(tmp12, TMPQ4, 6, 2, 16); - simd_address_at(tmp13, Rd_VPR128.8H, 6, 2, 16); - * [register]:2 tmp13 = (* [register]:2 tmp11) + (* [register]:2 tmp12); - simd_address_at(tmp11, TMPQ2, 7, 2, 16); - simd_address_at(tmp12, TMPQ4, 7, 2, 16); - simd_address_at(tmp13, Rd_VPR128.8H, 7, 2, 16); - * [register]:2 tmp13 = (* [register]:2 tmp11) + (* [register]:2 tmp12); + Rd_VPR128.8H[0,16] = TMPQ2[0,16] + TMPQ4[0,16]; + Rd_VPR128.8H[16,16] = TMPQ2[16,16] + TMPQ4[16,16]; + Rd_VPR128.8H[32,16] = TMPQ2[32,16] + TMPQ4[32,16]; + Rd_VPR128.8H[48,16] = TMPQ2[48,16] + TMPQ4[48,16]; + Rd_VPR128.8H[64,16] = TMPQ2[64,16] + TMPQ4[64,16]; + Rd_VPR128.8H[80,16] = TMPQ2[80,16] + TMPQ4[80,16]; + Rd_VPR128.8H[96,16] = TMPQ2[96,16] + TMPQ4[96,16]; + Rd_VPR128.8H[112,16] = TMPQ2[112,16] + TMPQ4[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Rn_VPR128.16B, 1:1); - local tmp2:16 = SIMD_INT_ZEXT(tmp1, 1:1); - local tmp3:8 = SIMD_PIECE(Rm_VPR128.16B, 1:1); - local tmp4:16 = SIMD_INT_ZEXT(tmp3, 1:1); - local tmpd:16 = SIMD_INT_ADD(tmp2, tmp4, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.8H -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.8H = NEON_uaddl2(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); -@endif } # C7.2.348 UADDL, UADDL2 page C7-2187 line 123035 MATCH x2e200000/mask=xbf20fc00 @@ -44569,46 +23945,16 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16 :uaddl Rd_VPR128.2D, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1215=0x0 & b_1011=0 & Rn_VPR64.2S & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_primitive) # simd resize TMPQ1 = zext(Rn_VPR64.2S) (lane size 4 to 8) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR64.2S, 0, 4, 8); - simd_address_at(tmp3, TMPQ1, 0, 8, 16); - * [register]:8 tmp3 = zext(* [register]:4 tmp2); - simd_address_at(tmp2, Rn_VPR64.2S, 1, 4, 8); - simd_address_at(tmp3, TMPQ1, 1, 8, 16); - * [register]:8 tmp3 = zext(* [register]:4 tmp2); + TMPQ1[0,64] = zext(Rn_VPR64.2S[0,32]); + TMPQ1[64,64] = zext(Rn_VPR64.2S[32,32]); # simd resize TMPQ2 = zext(Rm_VPR64.2S) (lane size 4 to 8) - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, Rm_VPR64.2S, 0, 4, 8); - simd_address_at(tmp6, TMPQ2, 0, 8, 16); - * [register]:8 tmp6 = zext(* [register]:4 tmp5); - simd_address_at(tmp5, Rm_VPR64.2S, 1, 4, 8); - simd_address_at(tmp6, TMPQ2, 1, 8, 16); - * [register]:8 tmp6 = zext(* [register]:4 tmp5); + TMPQ2[0,64] = zext(Rm_VPR64.2S[0,32]); + TMPQ2[64,64] = zext(Rm_VPR64.2S[32,32]); # simd infix Rd_VPR128.2D = TMPQ1 + TMPQ2 on lane size 8 - local tmp7:4 = 0; - local tmp8:4 = 0; - local tmp9:4 = 0; - simd_address_at(tmp7, TMPQ1, 0, 8, 16); - simd_address_at(tmp8, TMPQ2, 0, 8, 16); - simd_address_at(tmp9, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp9 = (* [register]:8 tmp7) + (* [register]:8 tmp8); - simd_address_at(tmp7, TMPQ1, 1, 8, 16); - simd_address_at(tmp8, TMPQ2, 1, 8, 16); - simd_address_at(tmp9, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp9 = (* [register]:8 tmp7) + (* [register]:8 tmp8); + Rd_VPR128.2D[0,64] = TMPQ1[0,64] + TMPQ2[0,64]; + Rd_VPR128.2D[64,64] = TMPQ1[64,64] + TMPQ2[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_ZEXT(Rn_VPR64.2S, 4:1); - local tmp2:16 = SIMD_INT_ZEXT(Rm_VPR64.2S, 4:1); - local tmpd:16 = SIMD_INT_ADD(tmp1, tmp2, 8:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.2D = NEON_uaddl(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); -@endif } # C7.2.348 UADDL, UADDL2 page C7-2187 line 123035 MATCH x2e200000/mask=xbf20fc00 @@ -44620,66 +23966,22 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S :uaddl Rd_VPR128.4S, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1215=0x0 & b_1011=0 & Rn_VPR64.4H & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) # simd resize TMPQ1 = zext(Rn_VPR64.4H) (lane size 2 to 4) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR64.4H, 0, 2, 8); - simd_address_at(tmp3, TMPQ1, 0, 4, 16); - * [register]:4 tmp3 = zext(* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR64.4H, 1, 2, 8); - simd_address_at(tmp3, TMPQ1, 1, 4, 16); - * [register]:4 tmp3 = zext(* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR64.4H, 2, 2, 8); - simd_address_at(tmp3, TMPQ1, 2, 4, 16); - * [register]:4 tmp3 = zext(* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR64.4H, 3, 2, 8); - simd_address_at(tmp3, TMPQ1, 3, 4, 16); - * [register]:4 tmp3 = zext(* [register]:2 tmp2); + TMPQ1[0,32] = zext(Rn_VPR64.4H[0,16]); + TMPQ1[32,32] = zext(Rn_VPR64.4H[16,16]); + TMPQ1[64,32] = zext(Rn_VPR64.4H[32,16]); + TMPQ1[96,32] = zext(Rn_VPR64.4H[48,16]); # simd resize TMPQ2 = zext(Rm_VPR64.4H) (lane size 2 to 4) - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, Rm_VPR64.4H, 0, 2, 8); - simd_address_at(tmp6, TMPQ2, 0, 4, 16); - * [register]:4 tmp6 = zext(* [register]:2 tmp5); - simd_address_at(tmp5, Rm_VPR64.4H, 1, 2, 8); - simd_address_at(tmp6, TMPQ2, 1, 4, 16); - * [register]:4 tmp6 = zext(* [register]:2 tmp5); - simd_address_at(tmp5, Rm_VPR64.4H, 2, 2, 8); - simd_address_at(tmp6, TMPQ2, 2, 4, 16); - * [register]:4 tmp6 = zext(* [register]:2 tmp5); - simd_address_at(tmp5, Rm_VPR64.4H, 3, 2, 8); - simd_address_at(tmp6, TMPQ2, 3, 4, 16); - * [register]:4 tmp6 = zext(* [register]:2 tmp5); + TMPQ2[0,32] = zext(Rm_VPR64.4H[0,16]); + TMPQ2[32,32] = zext(Rm_VPR64.4H[16,16]); + TMPQ2[64,32] = zext(Rm_VPR64.4H[32,16]); + TMPQ2[96,32] = zext(Rm_VPR64.4H[48,16]); # simd infix Rd_VPR128.4S = TMPQ1 + TMPQ2 on lane size 4 - local tmp7:4 = 0; - local tmp8:4 = 0; - local tmp9:4 = 0; - simd_address_at(tmp7, TMPQ1, 0, 4, 16); - simd_address_at(tmp8, TMPQ2, 0, 4, 16); - simd_address_at(tmp9, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp9 = (* [register]:4 tmp7) + (* [register]:4 tmp8); - simd_address_at(tmp7, TMPQ1, 1, 4, 16); - simd_address_at(tmp8, TMPQ2, 1, 4, 16); - simd_address_at(tmp9, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp9 = (* [register]:4 tmp7) + (* [register]:4 tmp8); - simd_address_at(tmp7, TMPQ1, 2, 4, 16); - simd_address_at(tmp8, TMPQ2, 2, 4, 16); - simd_address_at(tmp9, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp9 = (* [register]:4 tmp7) + (* [register]:4 tmp8); - simd_address_at(tmp7, TMPQ1, 3, 4, 16); - simd_address_at(tmp8, TMPQ2, 3, 4, 16); - simd_address_at(tmp9, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp9 = (* [register]:4 tmp7) + (* [register]:4 tmp8); + Rd_VPR128.4S[0,32] = TMPQ1[0,32] + TMPQ2[0,32]; + Rd_VPR128.4S[32,32] = TMPQ1[32,32] + TMPQ2[32,32]; + Rd_VPR128.4S[64,32] = TMPQ1[64,32] + TMPQ2[64,32]; + Rd_VPR128.4S[96,32] = TMPQ1[96,32] + TMPQ2[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_ZEXT(Rn_VPR64.4H, 2:1); - local tmp2:16 = SIMD_INT_ZEXT(Rm_VPR64.4H, 2:1); - local tmpd:16 = SIMD_INT_ADD(tmp1, tmp2, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_uaddl(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); -@endif } # C7.2.348 UADDL, UADDL2 page C7-2187 line 123035 MATCH x2e200000/mask=xbf20fc00 @@ -44691,106 +23993,34 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H :uaddl Rd_VPR128.8H, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1215=0x0 & b_1011=0 & Rn_VPR64.8B & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_primitive) # simd resize TMPQ1 = zext(Rn_VPR64.8B) (lane size 1 to 2) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR64.8B, 0, 1, 8); - simd_address_at(tmp3, TMPQ1, 0, 2, 16); - * [register]:2 tmp3 = zext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 1, 1, 8); - simd_address_at(tmp3, TMPQ1, 1, 2, 16); - * [register]:2 tmp3 = zext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 2, 1, 8); - simd_address_at(tmp3, TMPQ1, 2, 2, 16); - * [register]:2 tmp3 = zext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 3, 1, 8); - simd_address_at(tmp3, TMPQ1, 3, 2, 16); - * [register]:2 tmp3 = zext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 4, 1, 8); - simd_address_at(tmp3, TMPQ1, 4, 2, 16); - * [register]:2 tmp3 = zext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 5, 1, 8); - simd_address_at(tmp3, TMPQ1, 5, 2, 16); - * [register]:2 tmp3 = zext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 6, 1, 8); - simd_address_at(tmp3, TMPQ1, 6, 2, 16); - * [register]:2 tmp3 = zext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 7, 1, 8); - simd_address_at(tmp3, TMPQ1, 7, 2, 16); - * [register]:2 tmp3 = zext(* [register]:1 tmp2); + TMPQ1[0,16] = zext(Rn_VPR64.8B[0,8]); + TMPQ1[16,16] = zext(Rn_VPR64.8B[8,8]); + TMPQ1[32,16] = zext(Rn_VPR64.8B[16,8]); + TMPQ1[48,16] = zext(Rn_VPR64.8B[24,8]); + TMPQ1[64,16] = zext(Rn_VPR64.8B[32,8]); + TMPQ1[80,16] = zext(Rn_VPR64.8B[40,8]); + TMPQ1[96,16] = zext(Rn_VPR64.8B[48,8]); + TMPQ1[112,16] = zext(Rn_VPR64.8B[56,8]); # simd resize TMPQ2 = zext(Rm_VPR64.8B) (lane size 1 to 2) - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, Rm_VPR64.8B, 0, 1, 8); - simd_address_at(tmp6, TMPQ2, 0, 2, 16); - * [register]:2 tmp6 = zext(* [register]:1 tmp5); - simd_address_at(tmp5, Rm_VPR64.8B, 1, 1, 8); - simd_address_at(tmp6, TMPQ2, 1, 2, 16); - * [register]:2 tmp6 = zext(* [register]:1 tmp5); - simd_address_at(tmp5, Rm_VPR64.8B, 2, 1, 8); - simd_address_at(tmp6, TMPQ2, 2, 2, 16); - * [register]:2 tmp6 = zext(* [register]:1 tmp5); - simd_address_at(tmp5, Rm_VPR64.8B, 3, 1, 8); - simd_address_at(tmp6, TMPQ2, 3, 2, 16); - * [register]:2 tmp6 = zext(* [register]:1 tmp5); - simd_address_at(tmp5, Rm_VPR64.8B, 4, 1, 8); - simd_address_at(tmp6, TMPQ2, 4, 2, 16); - * [register]:2 tmp6 = zext(* [register]:1 tmp5); - simd_address_at(tmp5, Rm_VPR64.8B, 5, 1, 8); - simd_address_at(tmp6, TMPQ2, 5, 2, 16); - * [register]:2 tmp6 = zext(* [register]:1 tmp5); - simd_address_at(tmp5, Rm_VPR64.8B, 6, 1, 8); - simd_address_at(tmp6, TMPQ2, 6, 2, 16); - * [register]:2 tmp6 = zext(* [register]:1 tmp5); - simd_address_at(tmp5, Rm_VPR64.8B, 7, 1, 8); - simd_address_at(tmp6, TMPQ2, 7, 2, 16); - * [register]:2 tmp6 = zext(* [register]:1 tmp5); + TMPQ2[0,16] = zext(Rm_VPR64.8B[0,8]); + TMPQ2[16,16] = zext(Rm_VPR64.8B[8,8]); + TMPQ2[32,16] = zext(Rm_VPR64.8B[16,8]); + TMPQ2[48,16] = zext(Rm_VPR64.8B[24,8]); + TMPQ2[64,16] = zext(Rm_VPR64.8B[32,8]); + TMPQ2[80,16] = zext(Rm_VPR64.8B[40,8]); + TMPQ2[96,16] = zext(Rm_VPR64.8B[48,8]); + TMPQ2[112,16] = zext(Rm_VPR64.8B[56,8]); # simd infix Rd_VPR128.8H = TMPQ1 + TMPQ2 on lane size 2 - local tmp7:4 = 0; - local tmp8:4 = 0; - local tmp9:4 = 0; - simd_address_at(tmp7, TMPQ1, 0, 2, 16); - simd_address_at(tmp8, TMPQ2, 0, 2, 16); - simd_address_at(tmp9, Rd_VPR128.8H, 0, 2, 16); - * [register]:2 tmp9 = (* [register]:2 tmp7) + (* [register]:2 tmp8); - simd_address_at(tmp7, TMPQ1, 1, 2, 16); - simd_address_at(tmp8, TMPQ2, 1, 2, 16); - simd_address_at(tmp9, Rd_VPR128.8H, 1, 2, 16); - * [register]:2 tmp9 = (* [register]:2 tmp7) + (* [register]:2 tmp8); - simd_address_at(tmp7, TMPQ1, 2, 2, 16); - simd_address_at(tmp8, TMPQ2, 2, 2, 16); - simd_address_at(tmp9, Rd_VPR128.8H, 2, 2, 16); - * [register]:2 tmp9 = (* [register]:2 tmp7) + (* [register]:2 tmp8); - simd_address_at(tmp7, TMPQ1, 3, 2, 16); - simd_address_at(tmp8, TMPQ2, 3, 2, 16); - simd_address_at(tmp9, Rd_VPR128.8H, 3, 2, 16); - * [register]:2 tmp9 = (* [register]:2 tmp7) + (* [register]:2 tmp8); - simd_address_at(tmp7, TMPQ1, 4, 2, 16); - simd_address_at(tmp8, TMPQ2, 4, 2, 16); - simd_address_at(tmp9, Rd_VPR128.8H, 4, 2, 16); - * [register]:2 tmp9 = (* [register]:2 tmp7) + (* [register]:2 tmp8); - simd_address_at(tmp7, TMPQ1, 5, 2, 16); - simd_address_at(tmp8, TMPQ2, 5, 2, 16); - simd_address_at(tmp9, Rd_VPR128.8H, 5, 2, 16); - * [register]:2 tmp9 = (* [register]:2 tmp7) + (* [register]:2 tmp8); - simd_address_at(tmp7, TMPQ1, 6, 2, 16); - simd_address_at(tmp8, TMPQ2, 6, 2, 16); - simd_address_at(tmp9, Rd_VPR128.8H, 6, 2, 16); - * [register]:2 tmp9 = (* [register]:2 tmp7) + (* [register]:2 tmp8); - simd_address_at(tmp7, TMPQ1, 7, 2, 16); - simd_address_at(tmp8, TMPQ2, 7, 2, 16); - simd_address_at(tmp9, Rd_VPR128.8H, 7, 2, 16); - * [register]:2 tmp9 = (* [register]:2 tmp7) + (* [register]:2 tmp8); + Rd_VPR128.8H[0,16] = TMPQ1[0,16] + TMPQ2[0,16]; + Rd_VPR128.8H[16,16] = TMPQ1[16,16] + TMPQ2[16,16]; + Rd_VPR128.8H[32,16] = TMPQ1[32,16] + TMPQ2[32,16]; + Rd_VPR128.8H[48,16] = TMPQ1[48,16] + TMPQ2[48,16]; + Rd_VPR128.8H[64,16] = TMPQ1[64,16] + TMPQ2[64,16]; + Rd_VPR128.8H[80,16] = TMPQ1[80,16] + TMPQ2[80,16]; + Rd_VPR128.8H[96,16] = TMPQ1[96,16] + TMPQ2[96,16]; + Rd_VPR128.8H[112,16] = TMPQ1[112,16] + TMPQ2[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_ZEXT(Rn_VPR64.8B, 1:1); - local tmp2:16 = SIMD_INT_ZEXT(Rm_VPR64.8B, 1:1); - local tmpd:16 = SIMD_INT_ADD(tmp1, tmp2, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.8H -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.8H = NEON_uaddl(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); -@endif } # C7.2.349 UADDLP page C7-2189 line 123155 MATCH x2e202800/mask=xbf3ffc00 @@ -44803,56 +24033,29 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B :uaddlp Rd_VPR64.4H, Rn_VPR64.8B is b_31=0 & b_30=0 & b_2429=0b101110 & b_2223=0b00 & b_1021=0b100000001010 & Rd_VPR64.4H & Rn_VPR64.8B & Zd { -@if defined(SEMANTIC_primitive) TMPD1 = Rn_VPR64.8B; # sipd infix Rd_VPR64.4H = +(TMPD1) on pairs lane size (1 to 2) - local tmp2:4 = 0; - local tmp3:4 = 0; - local tmp4:4 = 0; - local tmp5:1 = 0; - local tmp6:1 = 0; - local tmp7:2 = 0; - local tmp8:2 = 0; - simd_address_at(tmp2, TMPD1, 0, 1, 8); - simd_address_at(tmp3, TMPD1, 1, 1, 8); - simd_address_at(tmp4, Rd_VPR64.4H, 0, 2, 8); - tmp5 = * [register]:1 tmp2; - tmp7 = zext(tmp5); - tmp6 = * [register]:1 tmp3; - tmp8 = zext(tmp6); - * [register]:2 tmp4 = tmp7 + tmp8; - simd_address_at(tmp2, TMPD1, 2, 1, 8); - simd_address_at(tmp3, TMPD1, 3, 1, 8); - simd_address_at(tmp4, Rd_VPR64.4H, 1, 2, 8); - tmp5 = * [register]:1 tmp2; - tmp7 = zext(tmp5); - tmp6 = * [register]:1 tmp3; - tmp8 = zext(tmp6); - * [register]:2 tmp4 = tmp7 + tmp8; - simd_address_at(tmp2, TMPD1, 4, 1, 8); - simd_address_at(tmp3, TMPD1, 5, 1, 8); - simd_address_at(tmp4, Rd_VPR64.4H, 2, 2, 8); - tmp5 = * [register]:1 tmp2; - tmp7 = zext(tmp5); - tmp6 = * [register]:1 tmp3; - tmp8 = zext(tmp6); - * [register]:2 tmp4 = tmp7 + tmp8; - simd_address_at(tmp2, TMPD1, 6, 1, 8); - simd_address_at(tmp3, TMPD1, 7, 1, 8); - simd_address_at(tmp4, Rd_VPR64.4H, 3, 2, 8); - tmp5 = * [register]:1 tmp2; - tmp7 = zext(tmp5); - tmp6 = * [register]:1 tmp3; - tmp8 = zext(tmp6); - * [register]:2 tmp4 = tmp7 + tmp8; + tmp2 = TMPD1[0,8]; + tmp4 = zext(tmp2); + tmp3 = TMPD1[8,8]; + tmp5 = zext(tmp3); + Rd_VPR64.4H[0,16] = tmp4 + tmp5; + tmp2 = TMPD1[16,8]; + tmp4 = zext(tmp2); + tmp3 = TMPD1[24,8]; + tmp5 = zext(tmp3); + Rd_VPR64.4H[16,16] = tmp4 + tmp5; + tmp2 = TMPD1[32,8]; + tmp4 = zext(tmp2); + tmp3 = TMPD1[40,8]; + tmp5 = zext(tmp3); + Rd_VPR64.4H[32,16] = tmp4 + tmp5; + tmp2 = TMPD1[48,8]; + tmp4 = zext(tmp2); + tmp3 = TMPD1[56,8]; + tmp5 = zext(tmp3); + Rd_VPR64.4H[48,16] = tmp4 + tmp5; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = Rn_VPR64.8B; - local tmpd:8 = SIMD_INT_ADD(tmp1, 1:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.4H -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.4H = NEON_uaddlp(Rn_VPR64.8B, 1:1); -@endif } # C7.2.349 UADDLP page C7-2189 line 123155 MATCH x2e202800/mask=xbf3ffc00 @@ -44865,88 +24068,49 @@ is b_31=0 & b_30=0 & b_2429=0b101110 & b_2223=0b00 & b_1021=0b100000001010 & Rd_ :uaddlp Rd_VPR128.8H, Rn_VPR128.16B is b_31=0 & b_30=1 & b_2429=0b101110 & b_2223=0b00 & b_1021=0b100000001010 & Rd_VPR128.8H & Rn_VPR128.16B & Zd { -@if defined(SEMANTIC_primitive) TMPQ1 = Rn_VPR128.16B; # sipd infix Rd_VPR128.8H = +(TMPQ1) on pairs lane size (1 to 2) - local tmp2:4 = 0; - local tmp3:4 = 0; - local tmp4:4 = 0; - local tmp5:1 = 0; - local tmp6:1 = 0; - local tmp7:2 = 0; - local tmp8:2 = 0; - simd_address_at(tmp2, TMPQ1, 0, 1, 16); - simd_address_at(tmp3, TMPQ1, 1, 1, 16); - simd_address_at(tmp4, Rd_VPR128.8H, 0, 2, 16); - tmp5 = * [register]:1 tmp2; - tmp7 = zext(tmp5); - tmp6 = * [register]:1 tmp3; - tmp8 = zext(tmp6); - * [register]:2 tmp4 = tmp7 + tmp8; - simd_address_at(tmp2, TMPQ1, 2, 1, 16); - simd_address_at(tmp3, TMPQ1, 3, 1, 16); - simd_address_at(tmp4, Rd_VPR128.8H, 1, 2, 16); - tmp5 = * [register]:1 tmp2; - tmp7 = zext(tmp5); - tmp6 = * [register]:1 tmp3; - tmp8 = zext(tmp6); - * [register]:2 tmp4 = tmp7 + tmp8; - simd_address_at(tmp2, TMPQ1, 4, 1, 16); - simd_address_at(tmp3, TMPQ1, 5, 1, 16); - simd_address_at(tmp4, Rd_VPR128.8H, 2, 2, 16); - tmp5 = * [register]:1 tmp2; - tmp7 = zext(tmp5); - tmp6 = * [register]:1 tmp3; - tmp8 = zext(tmp6); - * [register]:2 tmp4 = tmp7 + tmp8; - simd_address_at(tmp2, TMPQ1, 6, 1, 16); - simd_address_at(tmp3, TMPQ1, 7, 1, 16); - simd_address_at(tmp4, Rd_VPR128.8H, 3, 2, 16); - tmp5 = * [register]:1 tmp2; - tmp7 = zext(tmp5); - tmp6 = * [register]:1 tmp3; - tmp8 = zext(tmp6); - * [register]:2 tmp4 = tmp7 + tmp8; - simd_address_at(tmp2, TMPQ1, 8, 1, 16); - simd_address_at(tmp3, TMPQ1, 9, 1, 16); - simd_address_at(tmp4, Rd_VPR128.8H, 4, 2, 16); - tmp5 = * [register]:1 tmp2; - tmp7 = zext(tmp5); - tmp6 = * [register]:1 tmp3; - tmp8 = zext(tmp6); - * [register]:2 tmp4 = tmp7 + tmp8; - simd_address_at(tmp2, TMPQ1, 10, 1, 16); - simd_address_at(tmp3, TMPQ1, 11, 1, 16); - simd_address_at(tmp4, Rd_VPR128.8H, 5, 2, 16); - tmp5 = * [register]:1 tmp2; - tmp7 = zext(tmp5); - tmp6 = * [register]:1 tmp3; - tmp8 = zext(tmp6); - * [register]:2 tmp4 = tmp7 + tmp8; - simd_address_at(tmp2, TMPQ1, 12, 1, 16); - simd_address_at(tmp3, TMPQ1, 13, 1, 16); - simd_address_at(tmp4, Rd_VPR128.8H, 6, 2, 16); - tmp5 = * [register]:1 tmp2; - tmp7 = zext(tmp5); - tmp6 = * [register]:1 tmp3; - tmp8 = zext(tmp6); - * [register]:2 tmp4 = tmp7 + tmp8; - simd_address_at(tmp2, TMPQ1, 14, 1, 16); - simd_address_at(tmp3, TMPQ1, 15, 1, 16); - simd_address_at(tmp4, Rd_VPR128.8H, 7, 2, 16); - tmp5 = * [register]:1 tmp2; - tmp7 = zext(tmp5); - tmp6 = * [register]:1 tmp3; - tmp8 = zext(tmp6); - * [register]:2 tmp4 = tmp7 + tmp8; + tmp2 = TMPQ1[0,8]; + tmp4 = zext(tmp2); + tmp3 = TMPQ1[8,8]; + tmp5 = zext(tmp3); + Rd_VPR128.8H[0,16] = tmp4 + tmp5; + tmp2 = TMPQ1[16,8]; + tmp4 = zext(tmp2); + tmp3 = TMPQ1[24,8]; + tmp5 = zext(tmp3); + Rd_VPR128.8H[16,16] = tmp4 + tmp5; + tmp2 = TMPQ1[32,8]; + tmp4 = zext(tmp2); + tmp3 = TMPQ1[40,8]; + tmp5 = zext(tmp3); + Rd_VPR128.8H[32,16] = tmp4 + tmp5; + tmp2 = TMPQ1[48,8]; + tmp4 = zext(tmp2); + tmp3 = TMPQ1[56,8]; + tmp5 = zext(tmp3); + Rd_VPR128.8H[48,16] = tmp4 + tmp5; + tmp2 = TMPQ1[64,8]; + tmp4 = zext(tmp2); + tmp3 = TMPQ1[72,8]; + tmp5 = zext(tmp3); + Rd_VPR128.8H[64,16] = tmp4 + tmp5; + tmp2 = TMPQ1[80,8]; + tmp4 = zext(tmp2); + tmp3 = TMPQ1[88,8]; + tmp5 = zext(tmp3); + Rd_VPR128.8H[80,16] = tmp4 + tmp5; + tmp2 = TMPQ1[96,8]; + tmp4 = zext(tmp2); + tmp3 = TMPQ1[104,8]; + tmp5 = zext(tmp3); + Rd_VPR128.8H[96,16] = tmp4 + tmp5; + tmp2 = TMPQ1[112,8]; + tmp4 = zext(tmp2); + tmp3 = TMPQ1[120,8]; + tmp5 = zext(tmp3); + Rd_VPR128.8H[112,16] = tmp4 + tmp5; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = Rn_VPR128.16B; - local tmpd:16 = SIMD_INT_ADD(tmp1, 1:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.8H -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.8H = NEON_uaddlp(Rn_VPR128.16B, 1:1); -@endif } # C7.2.349 UADDLP page C7-2189 line 123155 MATCH x2e202800/mask=xbf3ffc00 @@ -44959,40 +24123,19 @@ is b_31=0 & b_30=1 & b_2429=0b101110 & b_2223=0b00 & b_1021=0b100000001010 & Rd_ :uaddlp Rd_VPR64.2S, Rn_VPR64.4H is b_31=0 & b_30=0 & b_2429=0b101110 & b_2223=0b01 & b_1021=0b100000001010 & Rd_VPR64.2S & Rn_VPR64.4H & Zd { -@if defined(SEMANTIC_primitive) TMPD1 = Rn_VPR64.4H; # sipd infix Rd_VPR64.2S = +(TMPD1) on pairs lane size (2 to 4) - local tmp2:4 = 0; - local tmp3:4 = 0; - local tmp4:4 = 0; - local tmp5:2 = 0; - local tmp6:2 = 0; - local tmp7:4 = 0; - local tmp8:4 = 0; - simd_address_at(tmp2, TMPD1, 0, 2, 8); - simd_address_at(tmp3, TMPD1, 1, 2, 8); - simd_address_at(tmp4, Rd_VPR64.2S, 0, 4, 8); - tmp5 = * [register]:2 tmp2; - tmp7 = zext(tmp5); - tmp6 = * [register]:2 tmp3; - tmp8 = zext(tmp6); - * [register]:4 tmp4 = tmp7 + tmp8; - simd_address_at(tmp2, TMPD1, 2, 2, 8); - simd_address_at(tmp3, TMPD1, 3, 2, 8); - simd_address_at(tmp4, Rd_VPR64.2S, 1, 4, 8); - tmp5 = * [register]:2 tmp2; - tmp7 = zext(tmp5); - tmp6 = * [register]:2 tmp3; - tmp8 = zext(tmp6); - * [register]:4 tmp4 = tmp7 + tmp8; + tmp2 = TMPD1[0,16]; + tmp4 = zext(tmp2); + tmp3 = TMPD1[16,16]; + tmp5 = zext(tmp3); + Rd_VPR64.2S[0,32] = tmp4 + tmp5; + tmp2 = TMPD1[32,16]; + tmp4 = zext(tmp2); + tmp3 = TMPD1[48,16]; + tmp5 = zext(tmp3); + Rd_VPR64.2S[32,32] = tmp4 + tmp5; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = Rn_VPR64.4H; - local tmpd:8 = SIMD_INT_ADD(tmp1, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.2S -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.2S = NEON_uaddlp(Rn_VPR64.4H, 2:1); -@endif } # C7.2.349 UADDLP page C7-2189 line 123155 MATCH x2e202800/mask=xbf3ffc00 @@ -45005,56 +24148,29 @@ is b_31=0 & b_30=0 & b_2429=0b101110 & b_2223=0b01 & b_1021=0b100000001010 & Rd_ :uaddlp Rd_VPR128.4S, Rn_VPR128.8H is b_31=0 & b_30=1 & b_2429=0b101110 & b_2223=0b01 & b_1021=0b100000001010 & Rd_VPR128.4S & Rn_VPR128.8H & Zd { -@if defined(SEMANTIC_primitive) TMPQ1 = Rn_VPR128.8H; # sipd infix Rd_VPR128.4S = +(TMPQ1) on pairs lane size (2 to 4) - local tmp2:4 = 0; - local tmp3:4 = 0; - local tmp4:4 = 0; - local tmp5:2 = 0; - local tmp6:2 = 0; - local tmp7:4 = 0; - local tmp8:4 = 0; - simd_address_at(tmp2, TMPQ1, 0, 2, 16); - simd_address_at(tmp3, TMPQ1, 1, 2, 16); - simd_address_at(tmp4, Rd_VPR128.4S, 0, 4, 16); - tmp5 = * [register]:2 tmp2; - tmp7 = zext(tmp5); - tmp6 = * [register]:2 tmp3; - tmp8 = zext(tmp6); - * [register]:4 tmp4 = tmp7 + tmp8; - simd_address_at(tmp2, TMPQ1, 2, 2, 16); - simd_address_at(tmp3, TMPQ1, 3, 2, 16); - simd_address_at(tmp4, Rd_VPR128.4S, 1, 4, 16); - tmp5 = * [register]:2 tmp2; - tmp7 = zext(tmp5); - tmp6 = * [register]:2 tmp3; - tmp8 = zext(tmp6); - * [register]:4 tmp4 = tmp7 + tmp8; - simd_address_at(tmp2, TMPQ1, 4, 2, 16); - simd_address_at(tmp3, TMPQ1, 5, 2, 16); - simd_address_at(tmp4, Rd_VPR128.4S, 2, 4, 16); - tmp5 = * [register]:2 tmp2; - tmp7 = zext(tmp5); - tmp6 = * [register]:2 tmp3; - tmp8 = zext(tmp6); - * [register]:4 tmp4 = tmp7 + tmp8; - simd_address_at(tmp2, TMPQ1, 6, 2, 16); - simd_address_at(tmp3, TMPQ1, 7, 2, 16); - simd_address_at(tmp4, Rd_VPR128.4S, 3, 4, 16); - tmp5 = * [register]:2 tmp2; - tmp7 = zext(tmp5); - tmp6 = * [register]:2 tmp3; - tmp8 = zext(tmp6); - * [register]:4 tmp4 = tmp7 + tmp8; + tmp2 = TMPQ1[0,16]; + tmp4 = zext(tmp2); + tmp3 = TMPQ1[16,16]; + tmp5 = zext(tmp3); + Rd_VPR128.4S[0,32] = tmp4 + tmp5; + tmp2 = TMPQ1[32,16]; + tmp4 = zext(tmp2); + tmp3 = TMPQ1[48,16]; + tmp5 = zext(tmp3); + Rd_VPR128.4S[32,32] = tmp4 + tmp5; + tmp2 = TMPQ1[64,16]; + tmp4 = zext(tmp2); + tmp3 = TMPQ1[80,16]; + tmp5 = zext(tmp3); + Rd_VPR128.4S[64,32] = tmp4 + tmp5; + tmp2 = TMPQ1[96,16]; + tmp4 = zext(tmp2); + tmp3 = TMPQ1[112,16]; + tmp5 = zext(tmp3); + Rd_VPR128.4S[96,32] = tmp4 + tmp5; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = Rn_VPR128.8H; - local tmpd:16 = SIMD_INT_ADD(tmp1, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_uaddlp(Rn_VPR128.8H, 2:1); -@endif } # C7.2.349 UADDLP page C7-2189 line 123155 MATCH x2e202800/mask=xbf3ffc00 @@ -45067,32 +24183,14 @@ is b_31=0 & b_30=1 & b_2429=0b101110 & b_2223=0b01 & b_1021=0b100000001010 & Rd_ :uaddlp Rd_VPR64.1D, Rn_VPR64.2S is b_31=0 & b_30=0 & b_2429=0b101110 & b_2223=0b10 & b_1021=0b100000001010 & Rd_VPR64.1D & Rn_VPR64.2S & Zd { -@if defined(SEMANTIC_primitive) TMPD1 = Rn_VPR64.2S; # sipd infix Rd_VPR64.1D = +(TMPD1) on pairs lane size (4 to 8) - local tmp2:4 = 0; - local tmp3:4 = 0; - local tmp4:4 = 0; - local tmp5:4 = 0; - local tmp6:4 = 0; - local tmp7:8 = 0; - local tmp8:8 = 0; - simd_address_at(tmp2, TMPD1, 0, 4, 8); - simd_address_at(tmp3, TMPD1, 1, 4, 8); - simd_address_at(tmp4, Rd_VPR64.1D, 0, 8, 8); - tmp5 = * [register]:4 tmp2; - tmp7 = zext(tmp5); - tmp6 = * [register]:4 tmp3; - tmp8 = zext(tmp6); - * [register]:8 tmp4 = tmp7 + tmp8; + tmp2 = TMPD1[0,32]; + tmp4 = zext(tmp2); + tmp3 = TMPD1[32,32]; + tmp5 = zext(tmp3); + Rd_VPR64.1D[0,64] = tmp4 + tmp5; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = Rn_VPR64.2S; - local tmpd:8 = SIMD_INT_ADD(tmp1, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.1D -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.1D = NEON_uaddlp(Rn_VPR64.2S, 4:1); -@endif } # C7.2.349 UADDLP page C7-2189 line 123155 MATCH x2e202800/mask=xbf3ffc00 @@ -45105,40 +24203,19 @@ is b_31=0 & b_30=0 & b_2429=0b101110 & b_2223=0b10 & b_1021=0b100000001010 & Rd_ :uaddlp Rd_VPR128.2D, Rn_VPR128.4S is b_31=0 & b_30=1 & b_2429=0b101110 & b_2223=0b10 & b_1021=0b100000001010 & Rd_VPR128.2D & Rn_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) TMPQ1 = Rn_VPR128.4S; # sipd infix Rd_VPR128.2D = +(TMPQ1) on pairs lane size (4 to 8) - local tmp2:4 = 0; - local tmp3:4 = 0; - local tmp4:4 = 0; - local tmp5:4 = 0; - local tmp6:4 = 0; - local tmp7:8 = 0; - local tmp8:8 = 0; - simd_address_at(tmp2, TMPQ1, 0, 4, 16); - simd_address_at(tmp3, TMPQ1, 1, 4, 16); - simd_address_at(tmp4, Rd_VPR128.2D, 0, 8, 16); - tmp5 = * [register]:4 tmp2; - tmp7 = zext(tmp5); - tmp6 = * [register]:4 tmp3; - tmp8 = zext(tmp6); - * [register]:8 tmp4 = tmp7 + tmp8; - simd_address_at(tmp2, TMPQ1, 2, 4, 16); - simd_address_at(tmp3, TMPQ1, 3, 4, 16); - simd_address_at(tmp4, Rd_VPR128.2D, 1, 8, 16); - tmp5 = * [register]:4 tmp2; - tmp7 = zext(tmp5); - tmp6 = * [register]:4 tmp3; - tmp8 = zext(tmp6); - * [register]:8 tmp4 = tmp7 + tmp8; + tmp2 = TMPQ1[0,32]; + tmp4 = zext(tmp2); + tmp3 = TMPQ1[32,32]; + tmp5 = zext(tmp3); + Rd_VPR128.2D[0,64] = tmp4 + tmp5; + tmp2 = TMPQ1[64,32]; + tmp4 = zext(tmp2); + tmp3 = TMPQ1[96,32]; + tmp5 = zext(tmp3); + Rd_VPR128.2D[64,64] = tmp4 + tmp5; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = Rn_VPR128.4S; - local tmpd:16 = SIMD_INT_ADD(tmp1, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.2D = NEON_uaddlp(Rn_VPR128.4S, 4:1); -@endif } # C7.2.350 UADDLV page C7-2191 line 123264 MATCH x2e303800/mask=xbf3ffc00 @@ -45149,9 +24226,7 @@ is b_31=0 & b_30=1 & b_2429=0b101110 & b_2223=0b10 & b_1021=0b100000001010 & Rd_ :uaddlv Rd_FPR64, Rn_VPR128.4S is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x18 & b_1216=0x3 & b_1011=2 & Rn_VPR128.4S & Rd_FPR64 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR64 = NEON_uaddlv(Rn_VPR128.4S, 4:1); -@endif } # C7.2.350 UADDLV page C7-2191 line 123264 MATCH x2e303800/mask=xbf3ffc00 @@ -45162,9 +24237,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x18 & b_1216=0x :uaddlv Rd_FPR16, Rn_VPR128.16B is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x18 & b_1216=0x3 & b_1011=2 & Rn_VPR128.16B & Rd_FPR16 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR16 = NEON_uaddlv(Rn_VPR128.16B, 1:1); -@endif } # C7.2.350 UADDLV page C7-2191 line 123264 MATCH x2e303800/mask=xbf3ffc00 @@ -45175,9 +24248,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x18 & b_1216=0x :uaddlv Rd_FPR16, Rn_VPR64.8B is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x18 & b_1216=0x3 & b_1011=2 & Rn_VPR64.8B & Rd_FPR16 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR16 = NEON_uaddlv(Rn_VPR64.8B, 1:1); -@endif } # C7.2.350 UADDLV page C7-2191 line 123264 MATCH x2e303800/mask=xbf3ffc00 @@ -45188,9 +24259,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x18 & b_1216=0x :uaddlv Rd_FPR32, Rn_VPR64.4H is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x18 & b_1216=0x3 & b_1011=2 & Rn_VPR64.4H & Rd_FPR32 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR32 = NEON_uaddlv(Rn_VPR64.4H, 2:1); -@endif } # C7.2.350 UADDLV page C7-2191 line 123264 MATCH x2e303800/mask=xbf3ffc00 @@ -45201,9 +24270,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x18 & b_1216=0x :uaddlv Rd_FPR32, Rn_VPR128.8H is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x18 & b_1216=0x3 & b_1011=2 & Rn_VPR128.8H & Rd_FPR32 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR32 = NEON_uaddlv(Rn_VPR128.8H, 2:1); -@endif } # C7.2.351 UADDW, UADDW2 page C7-2193 line 123362 MATCH x2e201000/mask=xbf20fc00 @@ -45215,40 +24282,14 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x18 & b_1216=0x :uaddw2 Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.4S is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1215=0x1 & b_1011=0 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rm_VPR128.4S, 1, 8, 16); - TMPD1 = * [register]:8 tmp1; + TMPD1 = Rm_VPR128.4S[64,64]; # simd resize TMPQ2 = zext(TMPD1) (lane size 4 to 8) - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, TMPD1, 0, 4, 8); - simd_address_at(tmp5, TMPQ2, 0, 8, 16); - * [register]:8 tmp5 = zext(* [register]:4 tmp4); - simd_address_at(tmp4, TMPD1, 1, 4, 8); - simd_address_at(tmp5, TMPQ2, 1, 8, 16); - * [register]:8 tmp5 = zext(* [register]:4 tmp4); + TMPQ2[0,64] = zext(TMPD1[0,32]); + TMPQ2[64,64] = zext(TMPD1[32,32]); # simd infix Rd_VPR128.2D = Rn_VPR128.2D + TMPQ2 on lane size 8 - local tmp6:4 = 0; - local tmp7:4 = 0; - local tmp8:4 = 0; - simd_address_at(tmp6, Rn_VPR128.2D, 0, 8, 16); - simd_address_at(tmp7, TMPQ2, 0, 8, 16); - simd_address_at(tmp8, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp8 = (* [register]:8 tmp6) + (* [register]:8 tmp7); - simd_address_at(tmp6, Rn_VPR128.2D, 1, 8, 16); - simd_address_at(tmp7, TMPQ2, 1, 8, 16); - simd_address_at(tmp8, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp8 = (* [register]:8 tmp6) + (* [register]:8 tmp7); + Rd_VPR128.2D[0,64] = Rn_VPR128.2D[0,64] + TMPQ2[0,64]; + Rd_VPR128.2D[64,64] = Rn_VPR128.2D[64,64] + TMPQ2[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Rm_VPR128.4S, 1:1); - local tmp2:16 = SIMD_INT_ZEXT(tmp1, 4:1); - local tmpd:16 = SIMD_INT_ADD(Rn_VPR128.2D, tmp2, 8:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.2D = NEON_uaddw2(Rn_VPR128.2D, Rm_VPR128.4S, 4:1); -@endif } # C7.2.351 UADDW, UADDW2 page C7-2193 line 123362 MATCH x2e201000/mask=xbf20fc00 @@ -45260,54 +24301,18 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S :uaddw2 Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.8H is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1215=0x1 & b_1011=0 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rm_VPR128.8H, 1, 8, 16); - TMPD1 = * [register]:8 tmp1; + TMPD1 = Rm_VPR128.8H[64,64]; # simd resize TMPQ2 = zext(TMPD1) (lane size 2 to 4) - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, TMPD1, 0, 2, 8); - simd_address_at(tmp5, TMPQ2, 0, 4, 16); - * [register]:4 tmp5 = zext(* [register]:2 tmp4); - simd_address_at(tmp4, TMPD1, 1, 2, 8); - simd_address_at(tmp5, TMPQ2, 1, 4, 16); - * [register]:4 tmp5 = zext(* [register]:2 tmp4); - simd_address_at(tmp4, TMPD1, 2, 2, 8); - simd_address_at(tmp5, TMPQ2, 2, 4, 16); - * [register]:4 tmp5 = zext(* [register]:2 tmp4); - simd_address_at(tmp4, TMPD1, 3, 2, 8); - simd_address_at(tmp5, TMPQ2, 3, 4, 16); - * [register]:4 tmp5 = zext(* [register]:2 tmp4); + TMPQ2[0,32] = zext(TMPD1[0,16]); + TMPQ2[32,32] = zext(TMPD1[16,16]); + TMPQ2[64,32] = zext(TMPD1[32,16]); + TMPQ2[96,32] = zext(TMPD1[48,16]); # simd infix Rd_VPR128.4S = Rn_VPR128.4S + TMPQ2 on lane size 4 - local tmp6:4 = 0; - local tmp7:4 = 0; - local tmp8:4 = 0; - simd_address_at(tmp6, Rn_VPR128.4S, 0, 4, 16); - simd_address_at(tmp7, TMPQ2, 0, 4, 16); - simd_address_at(tmp8, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp8 = (* [register]:4 tmp6) + (* [register]:4 tmp7); - simd_address_at(tmp6, Rn_VPR128.4S, 1, 4, 16); - simd_address_at(tmp7, TMPQ2, 1, 4, 16); - simd_address_at(tmp8, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp8 = (* [register]:4 tmp6) + (* [register]:4 tmp7); - simd_address_at(tmp6, Rn_VPR128.4S, 2, 4, 16); - simd_address_at(tmp7, TMPQ2, 2, 4, 16); - simd_address_at(tmp8, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp8 = (* [register]:4 tmp6) + (* [register]:4 tmp7); - simd_address_at(tmp6, Rn_VPR128.4S, 3, 4, 16); - simd_address_at(tmp7, TMPQ2, 3, 4, 16); - simd_address_at(tmp8, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp8 = (* [register]:4 tmp6) + (* [register]:4 tmp7); + Rd_VPR128.4S[0,32] = Rn_VPR128.4S[0,32] + TMPQ2[0,32]; + Rd_VPR128.4S[32,32] = Rn_VPR128.4S[32,32] + TMPQ2[32,32]; + Rd_VPR128.4S[64,32] = Rn_VPR128.4S[64,32] + TMPQ2[64,32]; + Rd_VPR128.4S[96,32] = Rn_VPR128.4S[96,32] + TMPQ2[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Rm_VPR128.8H, 1:1); - local tmp2:16 = SIMD_INT_ZEXT(tmp1, 2:1); - local tmpd:16 = SIMD_INT_ADD(Rn_VPR128.4S, tmp2, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_uaddw2(Rn_VPR128.4S, Rm_VPR128.8H, 2:1); -@endif } # C7.2.351 UADDW, UADDW2 page C7-2193 line 123362 MATCH x2e201000/mask=xbf20fc00 @@ -45319,82 +24324,26 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H :uaddw2 Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.16B is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1215=0x1 & b_1011=0 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rm_VPR128.16B, 1, 8, 16); - TMPD1 = * [register]:8 tmp1; + TMPD1 = Rm_VPR128.16B[64,64]; # simd resize TMPQ2 = zext(TMPD1) (lane size 1 to 2) - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, TMPD1, 0, 1, 8); - simd_address_at(tmp5, TMPQ2, 0, 2, 16); - * [register]:2 tmp5 = zext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 1, 1, 8); - simd_address_at(tmp5, TMPQ2, 1, 2, 16); - * [register]:2 tmp5 = zext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 2, 1, 8); - simd_address_at(tmp5, TMPQ2, 2, 2, 16); - * [register]:2 tmp5 = zext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 3, 1, 8); - simd_address_at(tmp5, TMPQ2, 3, 2, 16); - * [register]:2 tmp5 = zext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 4, 1, 8); - simd_address_at(tmp5, TMPQ2, 4, 2, 16); - * [register]:2 tmp5 = zext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 5, 1, 8); - simd_address_at(tmp5, TMPQ2, 5, 2, 16); - * [register]:2 tmp5 = zext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 6, 1, 8); - simd_address_at(tmp5, TMPQ2, 6, 2, 16); - * [register]:2 tmp5 = zext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 7, 1, 8); - simd_address_at(tmp5, TMPQ2, 7, 2, 16); - * [register]:2 tmp5 = zext(* [register]:1 tmp4); + TMPQ2[0,16] = zext(TMPD1[0,8]); + TMPQ2[16,16] = zext(TMPD1[8,8]); + TMPQ2[32,16] = zext(TMPD1[16,8]); + TMPQ2[48,16] = zext(TMPD1[24,8]); + TMPQ2[64,16] = zext(TMPD1[32,8]); + TMPQ2[80,16] = zext(TMPD1[40,8]); + TMPQ2[96,16] = zext(TMPD1[48,8]); + TMPQ2[112,16] = zext(TMPD1[56,8]); # simd infix Rd_VPR128.8H = Rn_VPR128.8H + TMPQ2 on lane size 2 - local tmp6:4 = 0; - local tmp7:4 = 0; - local tmp8:4 = 0; - simd_address_at(tmp6, Rn_VPR128.8H, 0, 2, 16); - simd_address_at(tmp7, TMPQ2, 0, 2, 16); - simd_address_at(tmp8, Rd_VPR128.8H, 0, 2, 16); - * [register]:2 tmp8 = (* [register]:2 tmp6) + (* [register]:2 tmp7); - simd_address_at(tmp6, Rn_VPR128.8H, 1, 2, 16); - simd_address_at(tmp7, TMPQ2, 1, 2, 16); - simd_address_at(tmp8, Rd_VPR128.8H, 1, 2, 16); - * [register]:2 tmp8 = (* [register]:2 tmp6) + (* [register]:2 tmp7); - simd_address_at(tmp6, Rn_VPR128.8H, 2, 2, 16); - simd_address_at(tmp7, TMPQ2, 2, 2, 16); - simd_address_at(tmp8, Rd_VPR128.8H, 2, 2, 16); - * [register]:2 tmp8 = (* [register]:2 tmp6) + (* [register]:2 tmp7); - simd_address_at(tmp6, Rn_VPR128.8H, 3, 2, 16); - simd_address_at(tmp7, TMPQ2, 3, 2, 16); - simd_address_at(tmp8, Rd_VPR128.8H, 3, 2, 16); - * [register]:2 tmp8 = (* [register]:2 tmp6) + (* [register]:2 tmp7); - simd_address_at(tmp6, Rn_VPR128.8H, 4, 2, 16); - simd_address_at(tmp7, TMPQ2, 4, 2, 16); - simd_address_at(tmp8, Rd_VPR128.8H, 4, 2, 16); - * [register]:2 tmp8 = (* [register]:2 tmp6) + (* [register]:2 tmp7); - simd_address_at(tmp6, Rn_VPR128.8H, 5, 2, 16); - simd_address_at(tmp7, TMPQ2, 5, 2, 16); - simd_address_at(tmp8, Rd_VPR128.8H, 5, 2, 16); - * [register]:2 tmp8 = (* [register]:2 tmp6) + (* [register]:2 tmp7); - simd_address_at(tmp6, Rn_VPR128.8H, 6, 2, 16); - simd_address_at(tmp7, TMPQ2, 6, 2, 16); - simd_address_at(tmp8, Rd_VPR128.8H, 6, 2, 16); - * [register]:2 tmp8 = (* [register]:2 tmp6) + (* [register]:2 tmp7); - simd_address_at(tmp6, Rn_VPR128.8H, 7, 2, 16); - simd_address_at(tmp7, TMPQ2, 7, 2, 16); - simd_address_at(tmp8, Rd_VPR128.8H, 7, 2, 16); - * [register]:2 tmp8 = (* [register]:2 tmp6) + (* [register]:2 tmp7); + Rd_VPR128.8H[0,16] = Rn_VPR128.8H[0,16] + TMPQ2[0,16]; + Rd_VPR128.8H[16,16] = Rn_VPR128.8H[16,16] + TMPQ2[16,16]; + Rd_VPR128.8H[32,16] = Rn_VPR128.8H[32,16] + TMPQ2[32,16]; + Rd_VPR128.8H[48,16] = Rn_VPR128.8H[48,16] + TMPQ2[48,16]; + Rd_VPR128.8H[64,16] = Rn_VPR128.8H[64,16] + TMPQ2[64,16]; + Rd_VPR128.8H[80,16] = Rn_VPR128.8H[80,16] + TMPQ2[80,16]; + Rd_VPR128.8H[96,16] = Rn_VPR128.8H[96,16] + TMPQ2[96,16]; + Rd_VPR128.8H[112,16] = Rn_VPR128.8H[112,16] + TMPQ2[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Rm_VPR128.16B, 1:1); - local tmp2:16 = SIMD_INT_ZEXT(tmp1, 1:1); - local tmpd:16 = SIMD_INT_ADD(Rn_VPR128.8H, tmp2, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.8H -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.8H = NEON_uaddw2(Rn_VPR128.8H, Rm_VPR128.16B, 1:1); -@endif } # C7.2.351 UADDW, UADDW2 page C7-2193 line 123362 MATCH x2e201000/mask=xbf20fc00 @@ -45406,36 +24355,13 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16 :uaddw Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR64.2S is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1215=0x1 & b_1011=0 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_primitive) # simd resize TMPQ1 = zext(Rm_VPR64.2S) (lane size 4 to 8) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rm_VPR64.2S, 0, 4, 8); - simd_address_at(tmp3, TMPQ1, 0, 8, 16); - * [register]:8 tmp3 = zext(* [register]:4 tmp2); - simd_address_at(tmp2, Rm_VPR64.2S, 1, 4, 8); - simd_address_at(tmp3, TMPQ1, 1, 8, 16); - * [register]:8 tmp3 = zext(* [register]:4 tmp2); + TMPQ1[0,64] = zext(Rm_VPR64.2S[0,32]); + TMPQ1[64,64] = zext(Rm_VPR64.2S[32,32]); # simd infix Rd_VPR128.2D = Rn_VPR128.2D + TMPQ1 on lane size 8 - local tmp4:4 = 0; - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp4, Rn_VPR128.2D, 0, 8, 16); - simd_address_at(tmp5, TMPQ1, 0, 8, 16); - simd_address_at(tmp6, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp6 = (* [register]:8 tmp4) + (* [register]:8 tmp5); - simd_address_at(tmp4, Rn_VPR128.2D, 1, 8, 16); - simd_address_at(tmp5, TMPQ1, 1, 8, 16); - simd_address_at(tmp6, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp6 = (* [register]:8 tmp4) + (* [register]:8 tmp5); + Rd_VPR128.2D[0,64] = Rn_VPR128.2D[0,64] + TMPQ1[0,64]; + Rd_VPR128.2D[64,64] = Rn_VPR128.2D[64,64] + TMPQ1[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_ZEXT(Rm_VPR64.2S, 4:1); - local tmpd:16 = SIMD_INT_ADD(Rn_VPR128.2D, tmp1, 8:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.2D = NEON_uaddw(Rn_VPR128.2D, Rm_VPR64.2S, 4:1); -@endif } # C7.2.351 UADDW, UADDW2 page C7-2193 line 123362 MATCH x2e201000/mask=xbf20fc00 @@ -45447,50 +24373,17 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S :uaddw Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR64.4H is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1215=0x1 & b_1011=0 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) # simd resize TMPQ1 = zext(Rm_VPR64.4H) (lane size 2 to 4) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rm_VPR64.4H, 0, 2, 8); - simd_address_at(tmp3, TMPQ1, 0, 4, 16); - * [register]:4 tmp3 = zext(* [register]:2 tmp2); - simd_address_at(tmp2, Rm_VPR64.4H, 1, 2, 8); - simd_address_at(tmp3, TMPQ1, 1, 4, 16); - * [register]:4 tmp3 = zext(* [register]:2 tmp2); - simd_address_at(tmp2, Rm_VPR64.4H, 2, 2, 8); - simd_address_at(tmp3, TMPQ1, 2, 4, 16); - * [register]:4 tmp3 = zext(* [register]:2 tmp2); - simd_address_at(tmp2, Rm_VPR64.4H, 3, 2, 8); - simd_address_at(tmp3, TMPQ1, 3, 4, 16); - * [register]:4 tmp3 = zext(* [register]:2 tmp2); + TMPQ1[0,32] = zext(Rm_VPR64.4H[0,16]); + TMPQ1[32,32] = zext(Rm_VPR64.4H[16,16]); + TMPQ1[64,32] = zext(Rm_VPR64.4H[32,16]); + TMPQ1[96,32] = zext(Rm_VPR64.4H[48,16]); # simd infix Rd_VPR128.4S = Rn_VPR128.4S + TMPQ1 on lane size 4 - local tmp4:4 = 0; - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp4, Rn_VPR128.4S, 0, 4, 16); - simd_address_at(tmp5, TMPQ1, 0, 4, 16); - simd_address_at(tmp6, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp6 = (* [register]:4 tmp4) + (* [register]:4 tmp5); - simd_address_at(tmp4, Rn_VPR128.4S, 1, 4, 16); - simd_address_at(tmp5, TMPQ1, 1, 4, 16); - simd_address_at(tmp6, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp6 = (* [register]:4 tmp4) + (* [register]:4 tmp5); - simd_address_at(tmp4, Rn_VPR128.4S, 2, 4, 16); - simd_address_at(tmp5, TMPQ1, 2, 4, 16); - simd_address_at(tmp6, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp6 = (* [register]:4 tmp4) + (* [register]:4 tmp5); - simd_address_at(tmp4, Rn_VPR128.4S, 3, 4, 16); - simd_address_at(tmp5, TMPQ1, 3, 4, 16); - simd_address_at(tmp6, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp6 = (* [register]:4 tmp4) + (* [register]:4 tmp5); + Rd_VPR128.4S[0,32] = Rn_VPR128.4S[0,32] + TMPQ1[0,32]; + Rd_VPR128.4S[32,32] = Rn_VPR128.4S[32,32] + TMPQ1[32,32]; + Rd_VPR128.4S[64,32] = Rn_VPR128.4S[64,32] + TMPQ1[64,32]; + Rd_VPR128.4S[96,32] = Rn_VPR128.4S[96,32] + TMPQ1[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_ZEXT(Rm_VPR64.4H, 2:1); - local tmpd:16 = SIMD_INT_ADD(Rn_VPR128.4S, tmp1, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_uaddw(Rn_VPR128.4S, Rm_VPR64.4H, 2:1); -@endif } # C7.2.351 UADDW, UADDW2 page C7-2193 line 123362 MATCH x2e201000/mask=xbf20fc00 @@ -45502,78 +24395,25 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H :uaddw Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR64.8B is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1215=0x1 & b_1011=0 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_primitive) # simd resize TMPQ1 = zext(Rm_VPR64.8B) (lane size 1 to 2) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rm_VPR64.8B, 0, 1, 8); - simd_address_at(tmp3, TMPQ1, 0, 2, 16); - * [register]:2 tmp3 = zext(* [register]:1 tmp2); - simd_address_at(tmp2, Rm_VPR64.8B, 1, 1, 8); - simd_address_at(tmp3, TMPQ1, 1, 2, 16); - * [register]:2 tmp3 = zext(* [register]:1 tmp2); - simd_address_at(tmp2, Rm_VPR64.8B, 2, 1, 8); - simd_address_at(tmp3, TMPQ1, 2, 2, 16); - * [register]:2 tmp3 = zext(* [register]:1 tmp2); - simd_address_at(tmp2, Rm_VPR64.8B, 3, 1, 8); - simd_address_at(tmp3, TMPQ1, 3, 2, 16); - * [register]:2 tmp3 = zext(* [register]:1 tmp2); - simd_address_at(tmp2, Rm_VPR64.8B, 4, 1, 8); - simd_address_at(tmp3, TMPQ1, 4, 2, 16); - * [register]:2 tmp3 = zext(* [register]:1 tmp2); - simd_address_at(tmp2, Rm_VPR64.8B, 5, 1, 8); - simd_address_at(tmp3, TMPQ1, 5, 2, 16); - * [register]:2 tmp3 = zext(* [register]:1 tmp2); - simd_address_at(tmp2, Rm_VPR64.8B, 6, 1, 8); - simd_address_at(tmp3, TMPQ1, 6, 2, 16); - * [register]:2 tmp3 = zext(* [register]:1 tmp2); - simd_address_at(tmp2, Rm_VPR64.8B, 7, 1, 8); - simd_address_at(tmp3, TMPQ1, 7, 2, 16); - * [register]:2 tmp3 = zext(* [register]:1 tmp2); + TMPQ1[0,16] = zext(Rm_VPR64.8B[0,8]); + TMPQ1[16,16] = zext(Rm_VPR64.8B[8,8]); + TMPQ1[32,16] = zext(Rm_VPR64.8B[16,8]); + TMPQ1[48,16] = zext(Rm_VPR64.8B[24,8]); + TMPQ1[64,16] = zext(Rm_VPR64.8B[32,8]); + TMPQ1[80,16] = zext(Rm_VPR64.8B[40,8]); + TMPQ1[96,16] = zext(Rm_VPR64.8B[48,8]); + TMPQ1[112,16] = zext(Rm_VPR64.8B[56,8]); # simd infix Rd_VPR128.8H = Rn_VPR128.8H + TMPQ1 on lane size 2 - local tmp4:4 = 0; - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp4, Rn_VPR128.8H, 0, 2, 16); - simd_address_at(tmp5, TMPQ1, 0, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 0, 2, 16); - * [register]:2 tmp6 = (* [register]:2 tmp4) + (* [register]:2 tmp5); - simd_address_at(tmp4, Rn_VPR128.8H, 1, 2, 16); - simd_address_at(tmp5, TMPQ1, 1, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 1, 2, 16); - * [register]:2 tmp6 = (* [register]:2 tmp4) + (* [register]:2 tmp5); - simd_address_at(tmp4, Rn_VPR128.8H, 2, 2, 16); - simd_address_at(tmp5, TMPQ1, 2, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 2, 2, 16); - * [register]:2 tmp6 = (* [register]:2 tmp4) + (* [register]:2 tmp5); - simd_address_at(tmp4, Rn_VPR128.8H, 3, 2, 16); - simd_address_at(tmp5, TMPQ1, 3, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 3, 2, 16); - * [register]:2 tmp6 = (* [register]:2 tmp4) + (* [register]:2 tmp5); - simd_address_at(tmp4, Rn_VPR128.8H, 4, 2, 16); - simd_address_at(tmp5, TMPQ1, 4, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 4, 2, 16); - * [register]:2 tmp6 = (* [register]:2 tmp4) + (* [register]:2 tmp5); - simd_address_at(tmp4, Rn_VPR128.8H, 5, 2, 16); - simd_address_at(tmp5, TMPQ1, 5, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 5, 2, 16); - * [register]:2 tmp6 = (* [register]:2 tmp4) + (* [register]:2 tmp5); - simd_address_at(tmp4, Rn_VPR128.8H, 6, 2, 16); - simd_address_at(tmp5, TMPQ1, 6, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 6, 2, 16); - * [register]:2 tmp6 = (* [register]:2 tmp4) + (* [register]:2 tmp5); - simd_address_at(tmp4, Rn_VPR128.8H, 7, 2, 16); - simd_address_at(tmp5, TMPQ1, 7, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 7, 2, 16); - * [register]:2 tmp6 = (* [register]:2 tmp4) + (* [register]:2 tmp5); + Rd_VPR128.8H[0,16] = Rn_VPR128.8H[0,16] + TMPQ1[0,16]; + Rd_VPR128.8H[16,16] = Rn_VPR128.8H[16,16] + TMPQ1[16,16]; + Rd_VPR128.8H[32,16] = Rn_VPR128.8H[32,16] + TMPQ1[32,16]; + Rd_VPR128.8H[48,16] = Rn_VPR128.8H[48,16] + TMPQ1[48,16]; + Rd_VPR128.8H[64,16] = Rn_VPR128.8H[64,16] + TMPQ1[64,16]; + Rd_VPR128.8H[80,16] = Rn_VPR128.8H[80,16] + TMPQ1[80,16]; + Rd_VPR128.8H[96,16] = Rn_VPR128.8H[96,16] + TMPQ1[96,16]; + Rd_VPR128.8H[112,16] = Rn_VPR128.8H[112,16] + TMPQ1[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_ZEXT(Rm_VPR64.8B, 1:1); - local tmpd:16 = SIMD_INT_ADD(Rn_VPR128.8H, tmp1, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.8H -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.8H = NEON_uaddw(Rn_VPR128.8H, Rm_VPR64.8B, 1:1); -@endif } # C7.2.352 UCVTF (vector, fixed-point) page C7-2195 line 123484 MATCH x7f00e400/mask=xff80fc00 @@ -45584,9 +24424,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B :ucvtf Rd_FPR64, Rn_FPR64, Imm_shr_imm64 is b_3031=1 & u=1 & b_2428=0x1f & b_2223=0b01 & Imm_shr_imm64 & b_1115=0x1c & b_1010=1 & Rn_FPR64 & Rd_FPR64 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR64 = NEON_ucvtf(Rn_FPR64, Imm_shr_imm64:1); -@endif } # C7.2.352 UCVTF (vector, fixed-point) page C7-2195 line 123484 MATCH x7f00e400/mask=xff80fc00 @@ -45597,9 +24435,7 @@ is b_3031=1 & u=1 & b_2428=0x1f & b_2223=0b01 & Imm_shr_imm64 & b_1115=0x1c & b_ :ucvtf Rd_FPR32, Rn_FPR32, Imm_shr_imm32 is b_3031=1 & u=1 & b_2428=0x1f & b_2123=1 & Imm_shr_imm32 & b_1115=0x1c & b_1010=1 & Rn_FPR32 & Rd_FPR32 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR32 = NEON_ucvtf(Rn_FPR32, Imm_shr_imm32:1); -@endif } # C7.2.352 UCVTF (vector, fixed-point) page C7-2195 line 123484 MATCH x7f00e400/mask=xff80fc00 @@ -45610,9 +24446,7 @@ is b_3031=1 & u=1 & b_2428=0x1f & b_2123=1 & Imm_shr_imm32 & b_1115=0x1c & b_101 :ucvtf Rd_FPR16, Rn_FPR16, Imm_shr_imm16 is b_3031=1 & u=1 & b_2428=0x1f & b_2023=1 & Imm_shr_imm16 & b_1115=0x1c & b_1010=1 & Rn_FPR16 & Rd_FPR16 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR16 = NEON_ucvtf(Rn_FPR16, Imm_shr_imm16:1); -@endif } # C7.2.352 UCVTF (vector, fixed-point) page C7-2195 line 123484 MATCH x2f00e400/mask=xbf80fc00 @@ -45623,9 +24457,7 @@ is b_3031=1 & u=1 & b_2428=0x1f & b_2023=1 & Imm_shr_imm16 & b_1115=0x1c & b_101 :ucvtf Rd_VPR128.2D, Rn_VPR128.2D, Imm_shr_imm64 is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2223=0b01 & Imm_shr_imm64 & b_1115=0x1c & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.2D = NEON_ucvtf(Rn_VPR128.2D, Imm_shr_imm64:1, 8:1); -@endif } # C7.2.352 UCVTF (vector, fixed-point) page C7-2195 line 123484 MATCH x2f00e400/mask=xbf80fc00 @@ -45636,9 +24468,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2223=0b01 & Imm_shr_imm64 & b_1115=0x1c :ucvtf Rd_VPR64.2S, Rn_VPR64.2S, Imm_shr_imm32 is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x1c & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.2S = NEON_ucvtf(Rn_VPR64.2S, Imm_shr_imm32:1, 4:1); -@endif } # C7.2.352 UCVTF (vector, fixed-point) page C7-2195 line 123484 MATCH x2f00e400/mask=xbf80fc00 @@ -45649,9 +24479,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x1c & :ucvtf Rd_VPR128.4S, Rn_VPR128.4S, Imm_shr_imm32 is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x1c & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_ucvtf(Rn_VPR128.4S, Imm_shr_imm32:1, 4:1); -@endif } # C7.2.352 UCVTF (vector, fixed-point) page C7-2195 line 123484 MATCH x2f00e400/mask=xbf80fc00 @@ -45662,9 +24490,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x1c & :ucvtf Rd_VPR64.4H, Rn_VPR64.4H, Imm_shr_imm32 is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2023=1 & Imm_shr_imm32 & b_1115=0x1c & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.4H = NEON_ucvtf(Rn_VPR64.4H, Imm_shr_imm32:1, 2:1); -@endif } # C7.2.352 UCVTF (vector, fixed-point) page C7-2195 line 123484 MATCH x2f00e400/mask=xbf80fc00 @@ -45675,9 +24501,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2023=1 & Imm_shr_imm32 & b_1115=0x1c & :ucvtf Rd_VPR128.8H, Rn_VPR128.8H, Imm_shr_imm32 is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2023=1 & Imm_shr_imm32 & b_1115=0x1c & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.8H = NEON_ucvtf(Rn_VPR128.8H, Imm_shr_imm32:1, 2:1); -@endif } # C7.2.353 UCVTF (vector, integer) page C7-2198 line 123634 MATCH x7e21d800/mask=xffbffc00 @@ -45688,9 +24512,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2023=1 & Imm_shr_imm32 & b_1115=0x1c & :ucvtf Rd_FPR32, Rn_FPR32 is b_3031=1 & u=1 & b_2428=0x1e & size_high=0 & b_1722=0x10 & b_1216=0x1d & b_1011=2 & Rn_FPR32 & Rd_FPR32 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR32 = NEON_ucvtf(Rn_FPR32); -@endif } # C7.2.353 UCVTF (vector, integer) page C7-2198 line 123634 MATCH x7e21d800/mask=xffbffc00 @@ -45701,9 +24523,7 @@ is b_3031=1 & u=1 & b_2428=0x1e & size_high=0 & b_1722=0x10 & b_1216=0x1d & b_10 :ucvtf Rd_FPR64, Rn_FPR64 is b_3031=1 & u=1 & b_2428=0x1e & size_high=0 & b_1722=0x30 & b_1216=0x1d & b_1011=2 & Rn_FPR64 & Rd_FPR64 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR64 = NEON_ucvtf(Rn_FPR64); -@endif } # C7.2.353 UCVTF (vector, integer) page C7-2198 line 123634 MATCH x2e21d800/mask=xbfbffc00 @@ -45714,9 +24534,7 @@ is b_3031=1 & u=1 & b_2428=0x1e & size_high=0 & b_1722=0x30 & b_1216=0x1d & b_10 :ucvtf Rd_VPR64.2S, Rn_VPR64.2S is sf=0 & q=0 & b_2929=1 & b_2428=0x0e & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x1d & b_1011=2 & Rd_VPR64.2S & Rn_VPR64.2S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.2S = NEON_ucvtf(Rn_VPR64.2S, 4:1); -@endif } # C7.2.353 UCVTF (vector, integer) page C7-2198 line 123634 MATCH x2e21d800/mask=xbfbffc00 @@ -45727,9 +24545,7 @@ is sf=0 & q=0 & b_2929=1 & b_2428=0x0e & advSIMD3.size=0 & b_1721=0x10 & b_1216= :ucvtf Rd_VPR128.4S, Rn_VPR128.4S is sf=0 & q=1 & b_2929=1 & b_2428=0x0e & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x1d & b_1011=2 & Rd_VPR128.4S & Rn_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_ucvtf(Rn_VPR128.4S, 4:1); -@endif } # C7.2.353 UCVTF (vector, integer) page C7-2198 line 123634 MATCH x2e21d800/mask=xbfbffc00 @@ -45740,9 +24556,7 @@ is sf=0 & q=1 & b_2929=1 & b_2428=0x0e & advSIMD3.size=0 & b_1721=0x10 & b_1216= :ucvtf Rd_VPR128.2D, Rn_VPR128.2D is sf=0 & q=1 & b_2929=1 & b_2428=0x0e & advSIMD3.size=1 & b_1721=0x10 & b_1216=0x1d & b_1011=2 & Rd_VPR128.2D & Rn_VPR128.2D & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.2D = NEON_ucvtf(Rn_VPR128.2D, 8:1); -@endif } # C7.2.353 UCVTF (vector, integer) page C7-2198 line 123634 MATCH x7e79d800/mask=xfffffc00 @@ -45754,9 +24568,7 @@ is sf=0 & q=1 & b_2929=1 & b_2428=0x0e & advSIMD3.size=1 & b_1721=0x10 & b_1216= :ucvtf Rd_FPR16, Rn_FPR16 is b_1031=0b0111111001111001110110 & Rd_FPR16 & Rn_FPR16 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR16 = NEON_ucvtf(Rn_FPR16); -@endif } # C7.2.353 UCVTF (vector, integer) page C7-2198 line 123634 MATCH x2e79d800/mask=xbffffc00 @@ -45768,9 +24580,7 @@ is b_1031=0b0111111001111001110110 & Rd_FPR16 & Rn_FPR16 & Zd :ucvtf Rd_VPR64.4H, Rn_VPR64.4H is b_31=0 & b_30=0 & b_1029=0b10111001111001110110 & Rd_VPR64.4H & Rn_VPR64.4H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.4H = NEON_ucvtf(Rn_VPR64.4H, 2:1); -@endif } # C7.2.353 UCVTF (vector, integer) page C7-2198 line 123634 MATCH x2e79d800/mask=xbffffc00 @@ -45782,9 +24592,7 @@ is b_31=0 & b_30=0 & b_1029=0b10111001111001110110 & Rd_VPR64.4H & Rn_VPR64.4H & :ucvtf Rd_VPR128.8H, Rn_VPR128.8H is b_31=0 & b_30=1 & b_1029=0b10111001111001110110 & Rd_VPR128.8H & Rn_VPR128.8H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.8H = NEON_ucvtf(Rn_VPR128.8H, 2:1); -@endif } # C7.2.354 UCVTF (scalar, fixed-point) page C7-2201 line 123812 MATCH x1e030000/mask=x7f3f0000 @@ -45797,19 +24605,10 @@ is b_31=0 & b_30=1 & b_1029=0b10111001111001110110 & Rd_VPR128.8H & Rn_VPR128.8H :ucvtf Rd_FPR16, Rn_GPR32, FBitsOp is sf=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=0 & mode=0 & fpOpcode=3 & b_15=1 & FBitsOp & FBits16 & Rn_GPR32 & Rd_FPR16 & Zd { -@if defined(SEMANTIC_primitive) local tmp1:8 = zext(Rn_GPR32); local tmp2:2 = int2float(tmp1); Rd_FPR16 = tmp2 f/ FBits16; zext_zh(Zd); # zero upper 30 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = zext(Rn_GPR32); - local tmp2:2 = int2float(tmp1); - Rd_FPR16 = tmp2 f/ FBits16; - zext_zh(Zd); # zero upper 30 bytes of Zd -@elif defined(SEMANTIC_pseudo) - Rd_FPR16 = NEON_ucvtf(Rn_GPR32, FBits16); -@endif } # C7.2.354 UCVTF (scalar, fixed-point) page C7-2201 line 123812 MATCH x1e030000/mask=x7f3f0000 @@ -45821,19 +24620,10 @@ is sf=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=0 & mode=0 & fpOpcode= :ucvtf Rd_FPR16, Rn_GPR64, FBitsOp is sf=1 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=0 & mode=0 & fpOpcode=3 & FBitsOp & FBits16 & Rn_GPR64 & Rd_FPR16 & Zd { -@if defined(SEMANTIC_primitive) local tmp1:9 = zext(Rn_GPR64); local tmp2:2 = int2float(tmp1); Rd_FPR16 = tmp2 f/ FBits16; zext_zh(Zd); # zero upper 30 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:9 = zext(Rn_GPR64); - local tmp2:2 = int2float(tmp1); - Rd_FPR16 = tmp2 f/ FBits16; - zext_zh(Zd); # zero upper 30 bytes of Zd -@elif defined(SEMANTIC_pseudo) - Rd_FPR16 = NEON_ucvtf(Rn_GPR64, FBits16); -@endif } # C7.2.354 UCVTF (scalar, fixed-point) page C7-2201 line 123812 MATCH x1e030000/mask=x7f3f0000 @@ -45846,19 +24636,10 @@ is sf=1 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=0 & mode=0 & fpOpcode= :ucvtf Rd_FPR64, Rn_GPR32, FBitsOp is sf=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=0 & mode=0 & fpOpcode=3 & b_15=1 & FBitsOp & FBits64 & Rn_GPR32 & Rd_FPR64 & Zd { -@if defined(SEMANTIC_primitive) local tmp1:8 = zext(Rn_GPR32); local tmp2:8 = int2float(tmp1); Rd_FPR64 = tmp2 f/ FBits64; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = zext(Rn_GPR32); - local tmp2:8 = int2float(tmp1); - Rd_FPR64 = tmp2 f/ FBits64; - zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pseudo) - Rd_FPR64 = NEON_ucvtf(Rn_GPR32, FBits64); -@endif } # C7.2.354 UCVTF (scalar, fixed-point) page C7-2201 line 123812 MATCH x1e030000/mask=x7f3f0000 @@ -45871,19 +24652,10 @@ is sf=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=0 & mode=0 & fpOpcode= :ucvtf Rd_FPR64, Rn_GPR64, FBitsOp is sf=1 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=0 & mode=0 & fpOpcode=3 & FBitsOp & FBits64 & Rn_GPR64 & Rd_FPR64 & Zd { -@if defined(SEMANTIC_primitive) local tmp1:9 = zext(Rn_GPR64); local tmp2:8 = int2float(tmp1); Rd_FPR64 = tmp2 f/ FBits64; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:9 = zext(Rn_GPR64); - local tmp2:8 = int2float(tmp1); - Rd_FPR64 = tmp2 f/ FBits64; - zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pseudo) - Rd_FPR64 = NEON_ucvtf(Rn_GPR64, FBits64); -@endif } # C7.2.354 UCVTF (scalar, fixed-point) page C7-2201 line 123812 MATCH x1e030000/mask=x7f3f0000 @@ -45896,19 +24668,10 @@ is sf=1 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=0 & mode=0 & fpOpcode= :ucvtf Rd_FPR32, Rn_GPR32, FBitsOp is sf=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=0 & mode=0 & fpOpcode=3 & b_15=1 & FBitsOp & FBits32 & Rn_GPR32 & Rd_FPR32 & Zd { -@if defined(SEMANTIC_primitive) local tmp1:8 = zext(Rn_GPR32); local tmp2:4 = int2float(tmp1); Rd_FPR32 = tmp2 f/ FBits32; zext_zs(Zd); # zero upper 28 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = zext(Rn_GPR32); - local tmp2:4 = int2float(tmp1); - Rd_FPR32 = tmp2 f/ FBits32; - zext_zs(Zd); # zero upper 28 bytes of Zd -@elif defined(SEMANTIC_pseudo) - Rd_FPR32 = NEON_ucvtf(Rn_GPR32, FBits32); -@endif } # C7.2.354 UCVTF (scalar, fixed-point) page C7-2201 line 123812 MATCH x1e030000/mask=x7f3f0000 @@ -45920,19 +24683,10 @@ is sf=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=0 & mode=0 & fpOpcode= :ucvtf Rd_FPR32, Rn_GPR64, FBitsOp is sf=1 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=0 & mode=0 & fpOpcode=3 & FBitsOp & FBits32 & Rn_GPR64 & Rd_FPR32 & Zd { -@if defined(SEMANTIC_primitive) local tmp1:9 = zext(Rn_GPR64); local tmp2:4 = int2float(tmp1); Rd_FPR32 = tmp2 f/ FBits32; zext_zs(Zd); # zero upper 28 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:9 = zext(Rn_GPR64); - local tmp2:4 = int2float(tmp1); - Rd_FPR32 = tmp2 f/ FBits32; - zext_zs(Zd); # zero upper 28 bytes of Zd -@elif defined(SEMANTIC_pseudo) - Rd_FPR32 = NEON_ucvtf(Rn_GPR64, FBits32); -@endif } # C7.2.355 UCVTF (scalar, integer) page C7-2203 line 123942 MATCH x1e230000/mask=x7f3ffc00 @@ -45944,17 +24698,9 @@ is sf=1 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=0 & mode=0 & fpOpcode= :ucvtf Rd_FPR16, Rn_GPR32 is sf=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=1 & rmode=0 & fpOpcode=3 & b_1015=0x0 & Rn_GPR32 & Rd_FPR16 & Zd { -@if defined(SEMANTIC_primitive) local tmp1:8 = zext(Rn_GPR32); Rd_FPR16 = int2float(tmp1); zext_zh(Zd); # zero upper 30 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = zext(Rn_GPR32); - Rd_FPR16 = int2float(tmp1); - zext_zh(Zd); # zero upper 30 bytes of Zd -@elif defined(SEMANTIC_pseudo) - Rd_FPR16 = NEON_ucvtf(Rn_GPR32); -@endif } # C7.2.355 UCVTF (scalar, integer) page C7-2203 line 123942 MATCH x1e230000/mask=x7f3ffc00 @@ -45966,17 +24712,9 @@ is sf=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=1 & rmode=0 & fpOpcode :ucvtf Rd_FPR16, Rn_GPR64 is sf=1 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=1 & rmode=0 & fpOpcode=3 & b_1015=0x0 & Rn_GPR64 & Rd_FPR16 & Zd { -@if defined(SEMANTIC_primitive) local tmp1:9 = zext(Rn_GPR64); Rd_FPR16 = int2float(tmp1); zext_zh(Zd); # zero upper 30 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:9 = zext(Rn_GPR64); - Rd_FPR16 = int2float(tmp1); - zext_zh(Zd); # zero upper 30 bytes of Zd -@elif defined(SEMANTIC_pseudo) - Rd_FPR16 = NEON_ucvtf(Rn_GPR64); -@endif } # C7.2.355 UCVTF (scalar, integer) page C7-2203 line 123942 MATCH x1e230000/mask=x7f3ffc00 @@ -45988,17 +24726,9 @@ is sf=1 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=1 & rmode=0 & fpOpcode :ucvtf Rd_FPR64, Rn_GPR32 is sf=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=1 & rmode=0 & fpOpcode=3 & b_1015=0x0 & Rn_GPR32 & Rd_FPR64 & Zd { -@if defined(SEMANTIC_primitive) local tmp1:8 = zext(Rn_GPR32); Rd_FPR64 = int2float(tmp1); zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = zext(Rn_GPR32); - Rd_FPR64 = int2float(tmp1); - zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pseudo) - Rd_FPR64 = NEON_ucvtf(Rn_GPR32); -@endif } # C7.2.355 UCVTF (scalar, integer) page C7-2203 line 123942 MATCH x1e230000/mask=x7f3ffc00 @@ -46010,17 +24740,9 @@ is sf=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=1 & rmode=0 & fpOpcode :ucvtf Rd_FPR64, Rn_GPR64 is sf=1 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=1 & rmode=0 & fpOpcode=3 & b_1015=0x0 & Rn_GPR64 & Rd_FPR64 & Zd { -@if defined(SEMANTIC_primitive) local tmp1:9 = zext(Rn_GPR64); Rd_FPR64 = int2float(tmp1); zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:9 = zext(Rn_GPR64); - Rd_FPR64 = int2float(tmp1); - zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pseudo) - Rd_FPR64 = NEON_ucvtf(Rn_GPR64); -@endif } # C7.2.355 UCVTF (scalar, integer) page C7-2203 line 123942 MATCH x1e230000/mask=x7f3ffc00 @@ -46032,17 +24754,9 @@ is sf=1 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=1 & rmode=0 & fpOpcode :ucvtf Rd_FPR32, Rn_GPR32 is sf=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=1 & rmode=0 & fpOpcode=3 & b_1015=0x0 & Rn_GPR32 & Rd_FPR32 & Zd { -@if defined(SEMANTIC_primitive) local tmp1:8 = zext(Rn_GPR32); Rd_FPR32 = int2float(tmp1); zext_zs(Zd); # zero upper 28 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = zext(Rn_GPR32); - Rd_FPR32 = int2float(tmp1); - zext_zs(Zd); # zero upper 28 bytes of Zd -@elif defined(SEMANTIC_pseudo) - Rd_FPR32 = NEON_ucvtf(Rn_GPR32); -@endif } # C7.2.355 UCVTF (scalar, integer) page C7-2203 line 123942 MATCH x1e230000/mask=x7f3ffc00 @@ -46054,17 +24768,9 @@ is sf=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=1 & rmode=0 & fpOpcode :ucvtf Rd_FPR32, Rn_GPR64 is sf=1 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=1 & rmode=0 & fpOpcode=3 & b_1015=0x0 & Rn_GPR64 & Rd_FPR32 & Zd { -@if defined(SEMANTIC_primitive) local tmp1:9 = zext(Rn_GPR64); Rd_FPR32 = int2float(tmp1); zext_zs(Zd); # zero upper 28 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:9 = zext(Rn_GPR64); - Rd_FPR32 = int2float(tmp1); - zext_zs(Zd); # zero upper 28 bytes of Zd -@elif defined(SEMANTIC_pseudo) - Rd_FPR32 = NEON_ucvtf(Rn_GPR64); -@endif } # C7.2.356 UDOT (by element) page C7-2205 line 124065 MATCH x2f00e000/mask=xbf00f400 @@ -46076,10 +24782,8 @@ is sf=1 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=1 & rmode=0 & fpOpcode :udot Rd_VPR64.2S, Rn_VPR64.8B, Re_VPR128.B.vIndex is b_31=0 & b_30=0 & b_2429=0b101111 & b_2223=0b10 & b_1215=0b1110 & b_10=0 & Rd_VPR64.2S & Rn_VPR64.8B & Re_VPR128.B.vIndex & Re_VPR128.S & vIndex & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) local tmp1:4 = SIMD_PIECE(Re_VPR128.S, vIndex:1); Rd_VPR64.2S = NEON_udot(Rn_VPR64.8B, tmp1, 1:1); -@endif } # C7.2.356 UDOT (by element) page C7-2205 line 124065 MATCH x2f00e000/mask=xbf00f400 @@ -46091,10 +24795,8 @@ is b_31=0 & b_30=0 & b_2429=0b101111 & b_2223=0b10 & b_1215=0b1110 & b_10=0 & Rd :udot Rd_VPR128.4S, Rn_VPR128.16B, Re_VPR128.B.vIndex is b_31=0 & b_30=1 & b_2429=0b101111 & b_2223=0b10 & b_1215=0b1110 & b_10=0 & Rd_VPR128.4S & Rn_VPR128.16B & Re_VPR128.B.vIndex & Re_VPR128.S & vIndex & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) local tmp1:4 = SIMD_PIECE(Re_VPR128.S, vIndex:1); Rd_VPR128.4S = NEON_udot(Rn_VPR128.16B, tmp1, 1:1); -@endif } # C7.2.357 UDOT (vector) page C7-2207 line 124164 MATCH x2e009400/mask=xbf20fc00 @@ -46106,9 +24808,7 @@ is b_31=0 & b_30=1 & b_2429=0b101111 & b_2223=0b10 & b_1215=0b1110 & b_10=0 & Rd :udot Rd_VPR64.2S, Rn_VPR64.8B, Rm_VPR64.8B is b_31=0 & b_30=0 & b_2429=0b101110 & b_2223=0b10 & b_21=0 & b_1015=0b100101 & Rd_VPR64.2S & Rn_VPR64.8B & Rm_VPR64.8B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.2S = NEON_udot(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); -@endif } # C7.2.357 UDOT (vector) page C7-2207 line 124164 MATCH x2e009400/mask=xbf20fc00 @@ -46120,9 +24820,7 @@ is b_31=0 & b_30=0 & b_2429=0b101110 & b_2223=0b10 & b_21=0 & b_1015=0b100101 & :udot Rd_VPR128.4S, Rn_VPR128.16B, Rm_VPR128.16B is b_31=0 & b_30=1 & b_2429=0b101110 & b_2223=0b10 & b_21=0 & b_1015=0b100101 & Rd_VPR128.4S & Rn_VPR128.16B & Rm_VPR128.16B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_udot(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); -@endif } # C7.2.358 UHADD page C7-2209 line 124262 MATCH x2e200400/mask=xbf20fc00 @@ -46133,9 +24831,7 @@ is b_31=0 & b_30=1 & b_2429=0b101110 & b_2223=0b10 & b_21=0 & b_1015=0b100101 & :uhadd Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0x0 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.16B = NEON_uhadd(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); -@endif } # C7.2.358 UHADD page C7-2209 line 124262 MATCH x2e200400/mask=xbf20fc00 @@ -46146,9 +24842,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16 :uhadd Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0x0 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.2S = NEON_uhadd(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); -@endif } # C7.2.358 UHADD page C7-2209 line 124262 MATCH x2e200400/mask=xbf20fc00 @@ -46159,9 +24853,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S :uhadd Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0x0 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.4H = NEON_uhadd(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); -@endif } # C7.2.358 UHADD page C7-2209 line 124262 MATCH x2e200400/mask=xbf20fc00 @@ -46172,9 +24864,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H :uhadd Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0x0 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_uhadd(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); -@endif } # C7.2.358 UHADD page C7-2209 line 124262 MATCH x2e200400/mask=xbf20fc00 @@ -46185,9 +24875,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S :uhadd Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0x0 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.8B = NEON_uhadd(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); -@endif } # C7.2.358 UHADD page C7-2209 line 124262 MATCH x2e200400/mask=xbf20fc00 @@ -46198,9 +24886,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B :uhadd Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0x0 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.8H = NEON_uhadd(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); -@endif } # C7.2.359 UHSUB page C7-2211 line 124362 MATCH x2e202400/mask=xbf20fc00 @@ -46211,9 +24897,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H :uhsub Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0x4 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.16B = NEON_uhsub(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); -@endif } # C7.2.359 UHSUB page C7-2211 line 124362 MATCH x2e202400/mask=xbf20fc00 @@ -46224,9 +24908,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16 :uhsub Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0x4 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.2S = NEON_uhsub(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); -@endif } # C7.2.359 UHSUB page C7-2211 line 124362 MATCH x2e202400/mask=xbf20fc00 @@ -46237,9 +24919,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S :uhsub Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0x4 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.4H = NEON_uhsub(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); -@endif } # C7.2.359 UHSUB page C7-2211 line 124362 MATCH x2e202400/mask=xbf20fc00 @@ -46250,9 +24930,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H :uhsub Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0x4 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_uhsub(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); -@endif } # C7.2.359 UHSUB page C7-2211 line 124362 MATCH x2e202400/mask=xbf20fc00 @@ -46263,9 +24941,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S :uhsub Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0x4 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.8B = NEON_uhsub(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); -@endif } # C7.2.359 UHSUB page C7-2211 line 124362 MATCH x2e202400/mask=xbf20fc00 @@ -46276,9 +24952,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B :uhsub Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0x4 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.8H = NEON_uhsub(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); -@endif } # C7.2.360 UMAX page C7-2213 line 124460 MATCH x2e206400/mask=xbf20fc00 @@ -46289,9 +24963,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H :umax Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0xc & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.16B = NEON_umax(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); -@endif } # C7.2.360 UMAX page C7-2213 line 124460 MATCH x2e206400/mask=xbf20fc00 @@ -46302,9 +24974,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16 :umax Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0xc & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.2S = NEON_umax(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); -@endif } # C7.2.360 UMAX page C7-2213 line 124460 MATCH x2e206400/mask=xbf20fc00 @@ -46315,9 +24985,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S :umax Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0xc & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.4H = NEON_umax(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); -@endif } # C7.2.360 UMAX page C7-2213 line 124460 MATCH x2e206400/mask=xbf20fc00 @@ -46328,9 +24996,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H :umax Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0xc & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_umax(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); -@endif } # C7.2.360 UMAX page C7-2213 line 124460 MATCH x2e206400/mask=xbf20fc00 @@ -46341,9 +25007,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S :umax Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0xc & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.8B = NEON_umax(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); -@endif } # C7.2.360 UMAX page C7-2213 line 124460 MATCH x2e206400/mask=xbf20fc00 @@ -46354,9 +25018,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B :umax Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0xc & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.8H = NEON_umax(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); -@endif } # C7.2.361 UMAXP page C7-2215 line 124560 MATCH x2e20a400/mask=xbf20fc00 @@ -46367,9 +25029,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H :umaxp Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0x14 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.16B = NEON_umaxp(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); -@endif } # C7.2.361 UMAXP page C7-2215 line 124560 MATCH x2e20a400/mask=xbf20fc00 @@ -46380,9 +25040,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16 :umaxp Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0x14 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.2S = NEON_umaxp(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); -@endif } # C7.2.361 UMAXP page C7-2215 line 124560 MATCH x2e20a400/mask=xbf20fc00 @@ -46393,9 +25051,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S :umaxp Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0x14 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.4H = NEON_umaxp(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); -@endif } # C7.2.361 UMAXP page C7-2215 line 124560 MATCH x2e20a400/mask=xbf20fc00 @@ -46406,9 +25062,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H :umaxp Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0x14 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_umaxp(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); -@endif } # C7.2.361 UMAXP page C7-2215 line 124560 MATCH x2e20a400/mask=xbf20fc00 @@ -46419,9 +25073,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S :umaxp Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0x14 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.8B = NEON_umaxp(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); -@endif } # C7.2.361 UMAXP page C7-2215 line 124560 MATCH x2e20a400/mask=xbf20fc00 @@ -46432,9 +25084,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B :umaxp Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0x14 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.8H = NEON_umaxp(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); -@endif } # C7.2.362 UMAXV page C7-2217 line 124662 MATCH x2e30a800/mask=xbf3ffc00 @@ -46445,9 +25095,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H :umaxv Rd_FPR8, Rn_VPR128.16B is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x18 & b_1216=0xa & b_1011=2 & Rn_VPR128.16B & Rd_FPR8 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR8 = NEON_umaxv(Rn_VPR128.16B, 1:1); -@endif } # C7.2.362 UMAXV page C7-2217 line 124662 MATCH x2e30a800/mask=xbf3ffc00 @@ -46458,9 +25106,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x18 & b_1216=0x :umaxv Rd_FPR8, Rn_VPR64.8B is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x18 & b_1216=0xa & b_1011=2 & Rn_VPR64.8B & Rd_FPR8 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR8 = NEON_umaxv(Rn_VPR64.8B, 1:1); -@endif } # C7.2.362 UMAXV page C7-2217 line 124662 MATCH x2e30a800/mask=xbf3ffc00 @@ -46471,9 +25117,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x18 & b_1216=0x :umaxv Rd_FPR16, Rn_VPR64.4H is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x18 & b_1216=0xa & b_1011=2 & Rn_VPR64.4H & Rd_FPR16 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR16 = NEON_umaxv(Rn_VPR64.4H, 2:1); -@endif } # C7.2.362 UMAXV page C7-2217 line 124662 MATCH x2e30a800/mask=xbf3ffc00 @@ -46484,9 +25128,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x18 & b_1216=0x :umaxv Rd_FPR16, Rn_VPR128.8H is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x18 & b_1216=0xa & b_1011=2 & Rn_VPR128.8H & Rd_FPR16 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR16 = NEON_umaxv(Rn_VPR128.8H, 2:1); -@endif } # C7.2.362 UMAXV page C7-2217 line 124662 MATCH x2e30a800/mask=xbf3ffc00 @@ -46497,9 +25139,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x18 & b_1216=0x :umaxv Rd_FPR32, Rn_VPR128.4S is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x18 & b_1216=0xa & b_1011=2 & Rn_VPR128.4S & Rd_FPR32 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR32 = NEON_umaxv(Rn_VPR128.4S, 4:1); -@endif } # C7.2.363 UMIN page C7-2219 line 124763 MATCH x2e206c00/mask=xbf20fc00 @@ -46510,9 +25150,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x18 & b_1216=0x :umin Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0xd & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.16B = NEON_umin(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); -@endif } # C7.2.363 UMIN page C7-2219 line 124763 MATCH x2e206c00/mask=xbf20fc00 @@ -46523,9 +25161,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16 :umin Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0xd & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.2S = NEON_umin(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); -@endif } # C7.2.363 UMIN page C7-2219 line 124763 MATCH x2e206c00/mask=xbf20fc00 @@ -46536,9 +25172,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S :umin Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0xd & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.4H = NEON_umin(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); -@endif } # C7.2.363 UMIN page C7-2219 line 124763 MATCH x2e206c00/mask=xbf20fc00 @@ -46549,9 +25183,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H :umin Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0xd & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_umin(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); -@endif } # C7.2.363 UMIN page C7-2219 line 124763 MATCH x2e206c00/mask=xbf20fc00 @@ -46562,9 +25194,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S :umin Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0xd & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.8B = NEON_umin(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); -@endif } # C7.2.363 UMIN page C7-2219 line 124763 MATCH x2e206c00/mask=xbf20fc00 @@ -46575,9 +25205,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B :umin Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0xd & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.8H = NEON_umin(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); -@endif } # C7.2.364 UMINP page C7-2221 line 124863 MATCH x2e20ac00/mask=xbf20fc00 @@ -46588,9 +25216,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H :uminp Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0x15 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.16B = NEON_uminp(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); -@endif } # C7.2.364 UMINP page C7-2221 line 124863 MATCH x2e20ac00/mask=xbf20fc00 @@ -46601,9 +25227,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16 :uminp Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0x15 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.2S = NEON_uminp(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); -@endif } # C7.2.364 UMINP page C7-2221 line 124863 MATCH x2e20ac00/mask=xbf20fc00 @@ -46614,9 +25238,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S :uminp Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0x15 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.4H = NEON_uminp(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); -@endif } # C7.2.364 UMINP page C7-2221 line 124863 MATCH x2e20ac00/mask=xbf20fc00 @@ -46627,9 +25249,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H :uminp Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0x15 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_uminp(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); -@endif } # C7.2.364 UMINP page C7-2221 line 124863 MATCH x2e20ac00/mask=xbf20fc00 @@ -46640,9 +25260,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S :uminp Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0x15 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.8B = NEON_uminp(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); -@endif } # C7.2.364 UMINP page C7-2221 line 124863 MATCH x2e20ac00/mask=xbf20fc00 @@ -46653,9 +25271,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B :uminp Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0x15 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.8H = NEON_uminp(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); -@endif } # C7.2.365 UMINV page C7-2223 line 124965 MATCH x2e31a800/mask=xbf3ffc00 @@ -46666,9 +25282,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H :uminv Rd_FPR8, Rn_VPR128.16B is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x18 & b_1216=0x1a & b_1011=2 & Rn_VPR128.16B & Rd_FPR8 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR8 = NEON_uminv(Rn_VPR128.16B, 1:1); -@endif } # C7.2.365 UMINV page C7-2223 line 124965 MATCH x2e31a800/mask=xbf3ffc00 @@ -46679,9 +25293,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x18 & b_1216=0x :uminv Rd_FPR8, Rn_VPR64.8B is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x18 & b_1216=0x1a & b_1011=2 & Rn_VPR64.8B & Rd_FPR8 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR8 = NEON_uminv(Rn_VPR64.8B, 1:1); -@endif } # C7.2.365 UMINV page C7-2223 line 124965 MATCH x2e31a800/mask=xbf3ffc00 @@ -46692,9 +25304,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x18 & b_1216=0x :uminv Rd_FPR16, Rn_VPR64.4H is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x18 & b_1216=0x1a & b_1011=2 & Rn_VPR64.4H & Rd_FPR16 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR16 = NEON_uminv(Rn_VPR64.4H, 2:1); -@endif } # C7.2.365 UMINV page C7-2223 line 124965 MATCH x2e31a800/mask=xbf3ffc00 @@ -46705,9 +25315,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x18 & b_1216=0x :uminv Rd_FPR16, Rn_VPR128.8H is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x18 & b_1216=0x1a & b_1011=2 & Rn_VPR128.8H & Rd_FPR16 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR16 = NEON_uminv(Rn_VPR128.8H, 2:1); -@endif } # C7.2.365 UMINV page C7-2223 line 124965 MATCH x2e31a800/mask=xbf3ffc00 @@ -46718,9 +25326,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x18 & b_1216=0x :uminv Rd_FPR32, Rn_VPR128.4S is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x18 & b_1216=0x1a & b_1011=2 & Rn_VPR128.4S & Rd_FPR32 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR32 = NEON_uminv(Rn_VPR128.4S, 4:1); -@endif } # C7.2.366 UMLAL, UMLAL2 (by element) page C7-2225 line 125066 MATCH x2f002000/mask=xbf00f400 @@ -46732,54 +25338,19 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x18 & b_1216=0x :umlal Rd_VPR128.2D, Rn_VPR64.2S, Re_VPR128.S.vIndex is b_3131=0 & q=0 & u=1 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & b_1215=0x2 & b_1010=0 & Rn_VPR64.2S & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_primitive) # simd resize TMPQ1 = zext(Rn_VPR64.2S) (lane size 4 to 8) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR64.2S, 0, 4, 8); - simd_address_at(tmp3, TMPQ1, 0, 8, 16); - * [register]:8 tmp3 = zext(* [register]:4 tmp2); - simd_address_at(tmp2, Rn_VPR64.2S, 1, 4, 8); - simd_address_at(tmp3, TMPQ1, 1, 8, 16); - * [register]:8 tmp3 = zext(* [register]:4 tmp2); - local tmp4:4 = 0; + TMPQ1[0,64] = zext(Rn_VPR64.2S[0,32]); + TMPQ1[64,64] = zext(Rn_VPR64.2S[32,32]); # simd element Re_VPR128.S[vIndex] lane size 4 - simd_address_at(tmp4, Re_VPR128.S, vIndex:4, 4, 16); - local tmp5:4 = * [register]:4 tmp4; - local tmp6:8 = zext(tmp5); - # simd infix TMPQ2 = TMPQ1 * tmp6 on lane size 8 - local tmp8:4 = 0; - local tmp9:4 = 0; - simd_address_at(tmp8, TMPQ1, 0, 8, 16); - simd_address_at(tmp9, TMPQ2, 0, 8, 16); - * [register]:8 tmp9 = (* [register]:8 tmp8) * tmp6; - simd_address_at(tmp8, TMPQ1, 1, 8, 16); - simd_address_at(tmp9, TMPQ2, 1, 8, 16); - * [register]:8 tmp9 = (* [register]:8 tmp8) * tmp6; - # simd infix Rd_VPR128.2D = Rd_VPR128.2D + TMPQ2 on lane size 8 - local tmp10:4 = 0; - local tmp11:4 = 0; - local tmp12:4 = 0; - simd_address_at(tmp10, Rd_VPR128.2D, 0, 8, 16); - simd_address_at(tmp11, TMPQ2, 0, 8, 16); - simd_address_at(tmp12, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp12 = (* [register]:8 tmp10) + (* [register]:8 tmp11); - simd_address_at(tmp10, Rd_VPR128.2D, 1, 8, 16); - simd_address_at(tmp11, TMPQ2, 1, 8, 16); - simd_address_at(tmp12, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp12 = (* [register]:8 tmp10) + (* [register]:8 tmp11); - zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_ZEXT(Rn_VPR64.2S, 4:1); - local tmp2:4 = SIMD_PIECE(Re_VPR128.S, vIndex:1); + local tmp2:4 = Re_VPR128.S.vIndex; local tmp3:8 = zext(tmp2); - local tmp4:16 = SIMD_INT_MULT(tmp1, tmp3); - local tmpd:16 = SIMD_INT_ADD(Rd_VPR128.2D, tmp4, 8:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - local tmp1:4 = SIMD_PIECE(Re_VPR128.S, vIndex:1); - Rd_VPR128.2D = NEON_umlal(Rd_VPR128.2D, Rn_VPR64.2S, tmp1, 4:1); -@endif + # simd infix TMPQ2 = TMPQ1 * tmp3 on lane size 8 + TMPQ2[0,64] = TMPQ1[0,64] * tmp3; + TMPQ2[64,64] = TMPQ1[64,64] * tmp3; + # simd infix Rd_VPR128.2D = Rd_VPR128.2D + TMPQ2 on lane size 8 + Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] + TMPQ2[0,64]; + Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] + TMPQ2[64,64]; + zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.366 UMLAL, UMLAL2 (by element) page C7-2225 line 125066 MATCH x2f002000/mask=xbf00f400 @@ -46791,58 +25362,20 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re :umlal2 Rd_VPR128.2D, Rn_VPR128.4S, Re_VPR128.S.vIndex is b_3131=0 & q=1 & u=1 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & b_1215=0x2 & b_1010=0 & Rn_VPR128.4S & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rn_VPR128.4S, 1, 8, 16); - TMPD1 = * [register]:8 tmp1; + TMPD1 = Rn_VPR128.4S[64,64]; # simd resize TMPQ2 = zext(TMPD1) (lane size 4 to 8) - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, TMPD1, 0, 4, 8); - simd_address_at(tmp5, TMPQ2, 0, 8, 16); - * [register]:8 tmp5 = zext(* [register]:4 tmp4); - simd_address_at(tmp4, TMPD1, 1, 4, 8); - simd_address_at(tmp5, TMPQ2, 1, 8, 16); - * [register]:8 tmp5 = zext(* [register]:4 tmp4); - local tmp6:4 = 0; + TMPQ2[0,64] = zext(TMPD1[0,32]); + TMPQ2[64,64] = zext(TMPD1[32,32]); # simd element Re_VPR128.S[vIndex] lane size 4 - simd_address_at(tmp6, Re_VPR128.S, vIndex:4, 4, 16); - local tmp7:4 = * [register]:4 tmp6; - local tmp8:8 = zext(tmp7); - # simd infix TMPQ3 = TMPQ2 * tmp8 on lane size 8 - local tmp10:4 = 0; - local tmp11:4 = 0; - simd_address_at(tmp10, TMPQ2, 0, 8, 16); - simd_address_at(tmp11, TMPQ3, 0, 8, 16); - * [register]:8 tmp11 = (* [register]:8 tmp10) * tmp8; - simd_address_at(tmp10, TMPQ2, 1, 8, 16); - simd_address_at(tmp11, TMPQ3, 1, 8, 16); - * [register]:8 tmp11 = (* [register]:8 tmp10) * tmp8; - # simd infix Rd_VPR128.2D = Rd_VPR128.2D + TMPQ3 on lane size 8 - local tmp12:4 = 0; - local tmp13:4 = 0; - local tmp14:4 = 0; - simd_address_at(tmp12, Rd_VPR128.2D, 0, 8, 16); - simd_address_at(tmp13, TMPQ3, 0, 8, 16); - simd_address_at(tmp14, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp14 = (* [register]:8 tmp12) + (* [register]:8 tmp13); - simd_address_at(tmp12, Rd_VPR128.2D, 1, 8, 16); - simd_address_at(tmp13, TMPQ3, 1, 8, 16); - simd_address_at(tmp14, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp14 = (* [register]:8 tmp12) + (* [register]:8 tmp13); - zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Rn_VPR128.4S, 1:1); - local tmp2:16 = SIMD_INT_ZEXT(tmp1, 4:1); - local tmp3:4 = SIMD_PIECE(Re_VPR128.S, vIndex:1); + local tmp3:4 = Re_VPR128.S.vIndex; local tmp4:8 = zext(tmp3); - local tmp5:16 = SIMD_INT_MULT(tmp2, tmp4); - local tmpd:16 = SIMD_INT_ADD(Rd_VPR128.2D, tmp5, 8:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - local tmp1:4 = SIMD_PIECE(Re_VPR128.S, vIndex:1); - Rd_VPR128.2D = NEON_umlal(Rd_VPR128.2D, Rn_VPR128.4S, tmp1, 4:1); -@endif + # simd infix TMPQ3 = TMPQ2 * tmp4 on lane size 8 + TMPQ3[0,64] = TMPQ2[0,64] * tmp4; + TMPQ3[64,64] = TMPQ2[64,64] * tmp4; + # simd infix Rd_VPR128.2D = Rd_VPR128.2D + TMPQ3 on lane size 8 + Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] + TMPQ3[0,64]; + Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] + TMPQ3[64,64]; + zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.366 UMLAL, UMLAL2 (by element) page C7-2225 line 125066 MATCH x2f002000/mask=xbf00f400 @@ -46854,74 +25387,25 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re :umlal Rd_VPR128.4S, Rn_VPR64.4H, Re_VPR128Lo.H.vIndexHLM is b_3131=0 & q=0 & u=1 & b_2428=0xf & advSIMD3.size=1 & Re_VPR128Lo.H.vIndexHLM & Re_VPR128Lo.H & vIndexHLM & b_1215=0x2 & b_1010=0 & Rn_VPR64.4H & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) # simd resize TMPQ1 = zext(Rn_VPR64.4H) (lane size 2 to 4) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR64.4H, 0, 2, 8); - simd_address_at(tmp3, TMPQ1, 0, 4, 16); - * [register]:4 tmp3 = zext(* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR64.4H, 1, 2, 8); - simd_address_at(tmp3, TMPQ1, 1, 4, 16); - * [register]:4 tmp3 = zext(* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR64.4H, 2, 2, 8); - simd_address_at(tmp3, TMPQ1, 2, 4, 16); - * [register]:4 tmp3 = zext(* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR64.4H, 3, 2, 8); - simd_address_at(tmp3, TMPQ1, 3, 4, 16); - * [register]:4 tmp3 = zext(* [register]:2 tmp2); - local tmp4:4 = 0; + TMPQ1[0,32] = zext(Rn_VPR64.4H[0,16]); + TMPQ1[32,32] = zext(Rn_VPR64.4H[16,16]); + TMPQ1[64,32] = zext(Rn_VPR64.4H[32,16]); + TMPQ1[96,32] = zext(Rn_VPR64.4H[48,16]); # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 - simd_address_at(tmp4, Re_VPR128Lo.H, vIndexHLM:4, 2, 16); - local tmp5:2 = * [register]:2 tmp4; - local tmp6:4 = zext(tmp5); - # simd infix TMPQ2 = TMPQ1 * tmp6 on lane size 4 - local tmp8:4 = 0; - local tmp9:4 = 0; - simd_address_at(tmp8, TMPQ1, 0, 4, 16); - simd_address_at(tmp9, TMPQ2, 0, 4, 16); - * [register]:4 tmp9 = (* [register]:4 tmp8) * tmp6; - simd_address_at(tmp8, TMPQ1, 1, 4, 16); - simd_address_at(tmp9, TMPQ2, 1, 4, 16); - * [register]:4 tmp9 = (* [register]:4 tmp8) * tmp6; - simd_address_at(tmp8, TMPQ1, 2, 4, 16); - simd_address_at(tmp9, TMPQ2, 2, 4, 16); - * [register]:4 tmp9 = (* [register]:4 tmp8) * tmp6; - simd_address_at(tmp8, TMPQ1, 3, 4, 16); - simd_address_at(tmp9, TMPQ2, 3, 4, 16); - * [register]:4 tmp9 = (* [register]:4 tmp8) * tmp6; - # simd infix Rd_VPR128.4S = Rd_VPR128.4S + TMPQ2 on lane size 4 - local tmp10:4 = 0; - local tmp11:4 = 0; - local tmp12:4 = 0; - simd_address_at(tmp10, Rd_VPR128.4S, 0, 4, 16); - simd_address_at(tmp11, TMPQ2, 0, 4, 16); - simd_address_at(tmp12, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp12 = (* [register]:4 tmp10) + (* [register]:4 tmp11); - simd_address_at(tmp10, Rd_VPR128.4S, 1, 4, 16); - simd_address_at(tmp11, TMPQ2, 1, 4, 16); - simd_address_at(tmp12, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp12 = (* [register]:4 tmp10) + (* [register]:4 tmp11); - simd_address_at(tmp10, Rd_VPR128.4S, 2, 4, 16); - simd_address_at(tmp11, TMPQ2, 2, 4, 16); - simd_address_at(tmp12, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp12 = (* [register]:4 tmp10) + (* [register]:4 tmp11); - simd_address_at(tmp10, Rd_VPR128.4S, 3, 4, 16); - simd_address_at(tmp11, TMPQ2, 3, 4, 16); - simd_address_at(tmp12, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp12 = (* [register]:4 tmp10) + (* [register]:4 tmp11); - zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_ZEXT(Rn_VPR64.4H, 2:1); - local tmp2:2 = SIMD_PIECE(Re_VPR128Lo.H, vIndexHLM:1); + local tmp2:2 = Re_VPR128Lo.H.vIndexHLM; local tmp3:4 = zext(tmp2); - local tmp4:16 = SIMD_INT_MULT(tmp1, tmp3); - local tmpd:16 = SIMD_INT_ADD(Rd_VPR128.4S, tmp4, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - local tmp1:2 = SIMD_PIECE(Re_VPR128Lo.H, vIndexHLM:1); - Rd_VPR128.4S = NEON_umlal(Rd_VPR128.4S, Rn_VPR64.4H, tmp1, 2:1); -@endif + # simd infix TMPQ2 = TMPQ1 * tmp3 on lane size 4 + TMPQ2[0,32] = TMPQ1[0,32] * tmp3; + TMPQ2[32,32] = TMPQ1[32,32] * tmp3; + TMPQ2[64,32] = TMPQ1[64,32] * tmp3; + TMPQ2[96,32] = TMPQ1[96,32] * tmp3; + # simd infix Rd_VPR128.4S = Rd_VPR128.4S + TMPQ2 on lane size 4 + Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] + TMPQ2[0,32]; + Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] + TMPQ2[32,32]; + Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] + TMPQ2[64,32]; + Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] + TMPQ2[96,32]; + zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.366 UMLAL, UMLAL2 (by element) page C7-2225 line 125066 MATCH x2f002000/mask=xbf00f400 @@ -46933,78 +25417,26 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xf & advSIMD3.size=1 & Re_VPR128Lo.H.vIndexHLM :umlal2 Rd_VPR128.4S, Rn_VPR128.8H, Re_VPR128Lo.H.vIndexHLM is b_3131=0 & q=1 & u=1 & b_2428=0xf & advSIMD3.size=1 & Re_VPR128Lo.H.vIndexHLM & Re_VPR128Lo.H & vIndexHLM & b_1215=0x2 & b_1010=0 & Rn_VPR128.8H & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rn_VPR128.8H, 1, 8, 16); - TMPD1 = * [register]:8 tmp1; + TMPD1 = Rn_VPR128.8H[64,64]; # simd resize TMPQ2 = zext(TMPD1) (lane size 2 to 4) - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, TMPD1, 0, 2, 8); - simd_address_at(tmp5, TMPQ2, 0, 4, 16); - * [register]:4 tmp5 = zext(* [register]:2 tmp4); - simd_address_at(tmp4, TMPD1, 1, 2, 8); - simd_address_at(tmp5, TMPQ2, 1, 4, 16); - * [register]:4 tmp5 = zext(* [register]:2 tmp4); - simd_address_at(tmp4, TMPD1, 2, 2, 8); - simd_address_at(tmp5, TMPQ2, 2, 4, 16); - * [register]:4 tmp5 = zext(* [register]:2 tmp4); - simd_address_at(tmp4, TMPD1, 3, 2, 8); - simd_address_at(tmp5, TMPQ2, 3, 4, 16); - * [register]:4 tmp5 = zext(* [register]:2 tmp4); - local tmp6:4 = 0; + TMPQ2[0,32] = zext(TMPD1[0,16]); + TMPQ2[32,32] = zext(TMPD1[16,16]); + TMPQ2[64,32] = zext(TMPD1[32,16]); + TMPQ2[96,32] = zext(TMPD1[48,16]); # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 - simd_address_at(tmp6, Re_VPR128Lo.H, vIndexHLM:4, 2, 16); - local tmp7:2 = * [register]:2 tmp6; - local tmp8:4 = zext(tmp7); - # simd infix TMPQ3 = TMPQ2 * tmp8 on lane size 4 - local tmp10:4 = 0; - local tmp11:4 = 0; - simd_address_at(tmp10, TMPQ2, 0, 4, 16); - simd_address_at(tmp11, TMPQ3, 0, 4, 16); - * [register]:4 tmp11 = (* [register]:4 tmp10) * tmp8; - simd_address_at(tmp10, TMPQ2, 1, 4, 16); - simd_address_at(tmp11, TMPQ3, 1, 4, 16); - * [register]:4 tmp11 = (* [register]:4 tmp10) * tmp8; - simd_address_at(tmp10, TMPQ2, 2, 4, 16); - simd_address_at(tmp11, TMPQ3, 2, 4, 16); - * [register]:4 tmp11 = (* [register]:4 tmp10) * tmp8; - simd_address_at(tmp10, TMPQ2, 3, 4, 16); - simd_address_at(tmp11, TMPQ3, 3, 4, 16); - * [register]:4 tmp11 = (* [register]:4 tmp10) * tmp8; - # simd infix Rd_VPR128.4S = Rd_VPR128.4S + TMPQ3 on lane size 4 - local tmp12:4 = 0; - local tmp13:4 = 0; - local tmp14:4 = 0; - simd_address_at(tmp12, Rd_VPR128.4S, 0, 4, 16); - simd_address_at(tmp13, TMPQ3, 0, 4, 16); - simd_address_at(tmp14, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp14 = (* [register]:4 tmp12) + (* [register]:4 tmp13); - simd_address_at(tmp12, Rd_VPR128.4S, 1, 4, 16); - simd_address_at(tmp13, TMPQ3, 1, 4, 16); - simd_address_at(tmp14, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp14 = (* [register]:4 tmp12) + (* [register]:4 tmp13); - simd_address_at(tmp12, Rd_VPR128.4S, 2, 4, 16); - simd_address_at(tmp13, TMPQ3, 2, 4, 16); - simd_address_at(tmp14, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp14 = (* [register]:4 tmp12) + (* [register]:4 tmp13); - simd_address_at(tmp12, Rd_VPR128.4S, 3, 4, 16); - simd_address_at(tmp13, TMPQ3, 3, 4, 16); - simd_address_at(tmp14, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp14 = (* [register]:4 tmp12) + (* [register]:4 tmp13); - zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Rn_VPR128.8H, 1:1); - local tmp2:16 = SIMD_INT_ZEXT(tmp1, 2:1); - local tmp3:2 = SIMD_PIECE(Re_VPR128Lo.H, vIndexHLM:1); + local tmp3:2 = Re_VPR128Lo.H.vIndexHLM; local tmp4:4 = zext(tmp3); - local tmp5:16 = SIMD_INT_MULT(tmp2, tmp4); - local tmpd:16 = SIMD_INT_ADD(Rd_VPR128.4S, tmp5, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - local tmp1:2 = SIMD_PIECE(Re_VPR128Lo.H, vIndexHLM:1); - Rd_VPR128.4S = NEON_umlal2(Rd_VPR128.4S, Rn_VPR128.8H, tmp1, 2:1); -@endif + # simd infix TMPQ3 = TMPQ2 * tmp4 on lane size 4 + TMPQ3[0,32] = TMPQ2[0,32] * tmp4; + TMPQ3[32,32] = TMPQ2[32,32] * tmp4; + TMPQ3[64,32] = TMPQ2[64,32] * tmp4; + TMPQ3[96,32] = TMPQ2[96,32] * tmp4; + # simd infix Rd_VPR128.4S = Rd_VPR128.4S + TMPQ3 on lane size 4 + Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] + TMPQ3[0,32]; + Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] + TMPQ3[32,32]; + Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] + TMPQ3[64,32]; + Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] + TMPQ3[96,32]; + zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.367 UMLAL, UMLAL2 (vector) page C7-2228 line 125227 MATCH x2e208000/mask=xbf20fc00 @@ -47016,67 +25448,21 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xf & advSIMD3.size=1 & Re_VPR128Lo.H.vIndexHLM :umlal2 Rd_VPR128.2D, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1215=0x8 & b_1011=0 & Rn_VPR128.4S & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rn_VPR128.4S, 1, 8, 16); - TMPD1 = * [register]:8 tmp1; + TMPD1 = Rn_VPR128.4S[64,64]; # simd resize TMPQ2 = zext(TMPD1) (lane size 4 to 8) - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, TMPD1, 0, 4, 8); - simd_address_at(tmp5, TMPQ2, 0, 8, 16); - * [register]:8 tmp5 = zext(* [register]:4 tmp4); - simd_address_at(tmp4, TMPD1, 1, 4, 8); - simd_address_at(tmp5, TMPQ2, 1, 8, 16); - * [register]:8 tmp5 = zext(* [register]:4 tmp4); - local tmp6:4 = 0; - simd_address_at(tmp6, Rm_VPR128.4S, 1, 8, 16); - TMPD3 = * [register]:8 tmp6; + TMPQ2[0,64] = zext(TMPD1[0,32]); + TMPQ2[64,64] = zext(TMPD1[32,32]); + TMPD3 = Rm_VPR128.4S[64,64]; # simd resize TMPQ4 = zext(TMPD3) (lane size 4 to 8) - local tmp9:4 = 0; - local tmp10:4 = 0; - simd_address_at(tmp9, TMPD3, 0, 4, 8); - simd_address_at(tmp10, TMPQ4, 0, 8, 16); - * [register]:8 tmp10 = zext(* [register]:4 tmp9); - simd_address_at(tmp9, TMPD3, 1, 4, 8); - simd_address_at(tmp10, TMPQ4, 1, 8, 16); - * [register]:8 tmp10 = zext(* [register]:4 tmp9); + TMPQ4[0,64] = zext(TMPD3[0,32]); + TMPQ4[64,64] = zext(TMPD3[32,32]); # simd infix TMPQ5 = TMPQ2 * TMPQ4 on lane size 8 - local tmp12:4 = 0; - local tmp13:4 = 0; - local tmp14:4 = 0; - simd_address_at(tmp12, TMPQ2, 0, 8, 16); - simd_address_at(tmp13, TMPQ4, 0, 8, 16); - simd_address_at(tmp14, TMPQ5, 0, 8, 16); - * [register]:8 tmp14 = (* [register]:8 tmp12) * (* [register]:8 tmp13); - simd_address_at(tmp12, TMPQ2, 1, 8, 16); - simd_address_at(tmp13, TMPQ4, 1, 8, 16); - simd_address_at(tmp14, TMPQ5, 1, 8, 16); - * [register]:8 tmp14 = (* [register]:8 tmp12) * (* [register]:8 tmp13); + TMPQ5[0,64] = TMPQ2[0,64] * TMPQ4[0,64]; + TMPQ5[64,64] = TMPQ2[64,64] * TMPQ4[64,64]; # simd infix Rd_VPR128.2D = Rd_VPR128.2D + TMPQ5 on lane size 8 - local tmp15:4 = 0; - local tmp16:4 = 0; - local tmp17:4 = 0; - simd_address_at(tmp15, Rd_VPR128.2D, 0, 8, 16); - simd_address_at(tmp16, TMPQ5, 0, 8, 16); - simd_address_at(tmp17, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp17 = (* [register]:8 tmp15) + (* [register]:8 tmp16); - simd_address_at(tmp15, Rd_VPR128.2D, 1, 8, 16); - simd_address_at(tmp16, TMPQ5, 1, 8, 16); - simd_address_at(tmp17, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp17 = (* [register]:8 tmp15) + (* [register]:8 tmp16); + Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] + TMPQ5[0,64]; + Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] + TMPQ5[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Rn_VPR128.4S, 1:1); - local tmp2:16 = SIMD_INT_ZEXT(tmp1, 4:1); - local tmp3:8 = SIMD_PIECE(Rm_VPR128.4S, 1:1); - local tmp4:16 = SIMD_INT_ZEXT(tmp3, 4:1); - local tmp5:16 = SIMD_INT_MULT(tmp2, tmp4, 8:1); - local tmpd:16 = SIMD_INT_ADD(Rd_VPR128.2D, tmp5, 8:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.2D = NEON_umlal2(Rd_VPR128.2D, Rn_VPR128.4S, Rm_VPR128.4S, 4:1); -@endif } # C7.2.367 UMLAL, UMLAL2 (vector) page C7-2228 line 125227 MATCH x2e208000/mask=xbf20fc00 @@ -47088,95 +25474,29 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S :umlal2 Rd_VPR128.4S, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1215=0x8 & b_1011=0 & Rn_VPR128.8H & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rn_VPR128.8H, 1, 8, 16); - TMPD1 = * [register]:8 tmp1; + TMPD1 = Rn_VPR128.8H[64,64]; # simd resize TMPQ2 = zext(TMPD1) (lane size 2 to 4) - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, TMPD1, 0, 2, 8); - simd_address_at(tmp5, TMPQ2, 0, 4, 16); - * [register]:4 tmp5 = zext(* [register]:2 tmp4); - simd_address_at(tmp4, TMPD1, 1, 2, 8); - simd_address_at(tmp5, TMPQ2, 1, 4, 16); - * [register]:4 tmp5 = zext(* [register]:2 tmp4); - simd_address_at(tmp4, TMPD1, 2, 2, 8); - simd_address_at(tmp5, TMPQ2, 2, 4, 16); - * [register]:4 tmp5 = zext(* [register]:2 tmp4); - simd_address_at(tmp4, TMPD1, 3, 2, 8); - simd_address_at(tmp5, TMPQ2, 3, 4, 16); - * [register]:4 tmp5 = zext(* [register]:2 tmp4); - local tmp6:4 = 0; - simd_address_at(tmp6, Rm_VPR128.8H, 1, 8, 16); - TMPD3 = * [register]:8 tmp6; + TMPQ2[0,32] = zext(TMPD1[0,16]); + TMPQ2[32,32] = zext(TMPD1[16,16]); + TMPQ2[64,32] = zext(TMPD1[32,16]); + TMPQ2[96,32] = zext(TMPD1[48,16]); + TMPD3 = Rm_VPR128.8H[64,64]; # simd resize TMPQ4 = zext(TMPD3) (lane size 2 to 4) - local tmp9:4 = 0; - local tmp10:4 = 0; - simd_address_at(tmp9, TMPD3, 0, 2, 8); - simd_address_at(tmp10, TMPQ4, 0, 4, 16); - * [register]:4 tmp10 = zext(* [register]:2 tmp9); - simd_address_at(tmp9, TMPD3, 1, 2, 8); - simd_address_at(tmp10, TMPQ4, 1, 4, 16); - * [register]:4 tmp10 = zext(* [register]:2 tmp9); - simd_address_at(tmp9, TMPD3, 2, 2, 8); - simd_address_at(tmp10, TMPQ4, 2, 4, 16); - * [register]:4 tmp10 = zext(* [register]:2 tmp9); - simd_address_at(tmp9, TMPD3, 3, 2, 8); - simd_address_at(tmp10, TMPQ4, 3, 4, 16); - * [register]:4 tmp10 = zext(* [register]:2 tmp9); + TMPQ4[0,32] = zext(TMPD3[0,16]); + TMPQ4[32,32] = zext(TMPD3[16,16]); + TMPQ4[64,32] = zext(TMPD3[32,16]); + TMPQ4[96,32] = zext(TMPD3[48,16]); # simd infix TMPQ5 = TMPQ2 * TMPQ4 on lane size 4 - local tmp12:4 = 0; - local tmp13:4 = 0; - local tmp14:4 = 0; - simd_address_at(tmp12, TMPQ2, 0, 4, 16); - simd_address_at(tmp13, TMPQ4, 0, 4, 16); - simd_address_at(tmp14, TMPQ5, 0, 4, 16); - * [register]:4 tmp14 = (* [register]:4 tmp12) * (* [register]:4 tmp13); - simd_address_at(tmp12, TMPQ2, 1, 4, 16); - simd_address_at(tmp13, TMPQ4, 1, 4, 16); - simd_address_at(tmp14, TMPQ5, 1, 4, 16); - * [register]:4 tmp14 = (* [register]:4 tmp12) * (* [register]:4 tmp13); - simd_address_at(tmp12, TMPQ2, 2, 4, 16); - simd_address_at(tmp13, TMPQ4, 2, 4, 16); - simd_address_at(tmp14, TMPQ5, 2, 4, 16); - * [register]:4 tmp14 = (* [register]:4 tmp12) * (* [register]:4 tmp13); - simd_address_at(tmp12, TMPQ2, 3, 4, 16); - simd_address_at(tmp13, TMPQ4, 3, 4, 16); - simd_address_at(tmp14, TMPQ5, 3, 4, 16); - * [register]:4 tmp14 = (* [register]:4 tmp12) * (* [register]:4 tmp13); + TMPQ5[0,32] = TMPQ2[0,32] * TMPQ4[0,32]; + TMPQ5[32,32] = TMPQ2[32,32] * TMPQ4[32,32]; + TMPQ5[64,32] = TMPQ2[64,32] * TMPQ4[64,32]; + TMPQ5[96,32] = TMPQ2[96,32] * TMPQ4[96,32]; # simd infix Rd_VPR128.4S = Rd_VPR128.4S + TMPQ5 on lane size 4 - local tmp15:4 = 0; - local tmp16:4 = 0; - local tmp17:4 = 0; - simd_address_at(tmp15, Rd_VPR128.4S, 0, 4, 16); - simd_address_at(tmp16, TMPQ5, 0, 4, 16); - simd_address_at(tmp17, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp17 = (* [register]:4 tmp15) + (* [register]:4 tmp16); - simd_address_at(tmp15, Rd_VPR128.4S, 1, 4, 16); - simd_address_at(tmp16, TMPQ5, 1, 4, 16); - simd_address_at(tmp17, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp17 = (* [register]:4 tmp15) + (* [register]:4 tmp16); - simd_address_at(tmp15, Rd_VPR128.4S, 2, 4, 16); - simd_address_at(tmp16, TMPQ5, 2, 4, 16); - simd_address_at(tmp17, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp17 = (* [register]:4 tmp15) + (* [register]:4 tmp16); - simd_address_at(tmp15, Rd_VPR128.4S, 3, 4, 16); - simd_address_at(tmp16, TMPQ5, 3, 4, 16); - simd_address_at(tmp17, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp17 = (* [register]:4 tmp15) + (* [register]:4 tmp16); + Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] + TMPQ5[0,32]; + Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] + TMPQ5[32,32]; + Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] + TMPQ5[64,32]; + Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] + TMPQ5[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Rn_VPR128.8H, 1:1); - local tmp2:16 = SIMD_INT_ZEXT(tmp1, 2:1); - local tmp3:8 = SIMD_PIECE(Rm_VPR128.8H, 1:1); - local tmp4:16 = SIMD_INT_ZEXT(tmp3, 2:1); - local tmp5:16 = SIMD_INT_MULT(tmp2, tmp4, 4:1); - local tmpd:16 = SIMD_INT_ADD(Rd_VPR128.4S, tmp5, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_umlal2(Rd_VPR128.4S, Rn_VPR128.8H, Rm_VPR128.8H, 2:1); -@endif } # C7.2.367 UMLAL, UMLAL2 (vector) page C7-2228 line 125227 MATCH x2e208000/mask=xbf20fc00 @@ -47188,151 +25508,45 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H :umlal2 Rd_VPR128.8H, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1215=0x8 & b_1011=0 & Rn_VPR128.16B & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rn_VPR128.16B, 1, 8, 16); - TMPD1 = * [register]:8 tmp1; + TMPD1 = Rn_VPR128.16B[64,64]; # simd resize TMPQ2 = zext(TMPD1) (lane size 1 to 2) - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, TMPD1, 0, 1, 8); - simd_address_at(tmp5, TMPQ2, 0, 2, 16); - * [register]:2 tmp5 = zext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 1, 1, 8); - simd_address_at(tmp5, TMPQ2, 1, 2, 16); - * [register]:2 tmp5 = zext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 2, 1, 8); - simd_address_at(tmp5, TMPQ2, 2, 2, 16); - * [register]:2 tmp5 = zext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 3, 1, 8); - simd_address_at(tmp5, TMPQ2, 3, 2, 16); - * [register]:2 tmp5 = zext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 4, 1, 8); - simd_address_at(tmp5, TMPQ2, 4, 2, 16); - * [register]:2 tmp5 = zext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 5, 1, 8); - simd_address_at(tmp5, TMPQ2, 5, 2, 16); - * [register]:2 tmp5 = zext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 6, 1, 8); - simd_address_at(tmp5, TMPQ2, 6, 2, 16); - * [register]:2 tmp5 = zext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 7, 1, 8); - simd_address_at(tmp5, TMPQ2, 7, 2, 16); - * [register]:2 tmp5 = zext(* [register]:1 tmp4); - local tmp6:4 = 0; - simd_address_at(tmp6, Rm_VPR128.16B, 1, 8, 16); - TMPD3 = * [register]:8 tmp6; + TMPQ2[0,16] = zext(TMPD1[0,8]); + TMPQ2[16,16] = zext(TMPD1[8,8]); + TMPQ2[32,16] = zext(TMPD1[16,8]); + TMPQ2[48,16] = zext(TMPD1[24,8]); + TMPQ2[64,16] = zext(TMPD1[32,8]); + TMPQ2[80,16] = zext(TMPD1[40,8]); + TMPQ2[96,16] = zext(TMPD1[48,8]); + TMPQ2[112,16] = zext(TMPD1[56,8]); + TMPD3 = Rm_VPR128.16B[64,64]; # simd resize TMPQ4 = zext(TMPD3) (lane size 1 to 2) - local tmp9:4 = 0; - local tmp10:4 = 0; - simd_address_at(tmp9, TMPD3, 0, 1, 8); - simd_address_at(tmp10, TMPQ4, 0, 2, 16); - * [register]:2 tmp10 = zext(* [register]:1 tmp9); - simd_address_at(tmp9, TMPD3, 1, 1, 8); - simd_address_at(tmp10, TMPQ4, 1, 2, 16); - * [register]:2 tmp10 = zext(* [register]:1 tmp9); - simd_address_at(tmp9, TMPD3, 2, 1, 8); - simd_address_at(tmp10, TMPQ4, 2, 2, 16); - * [register]:2 tmp10 = zext(* [register]:1 tmp9); - simd_address_at(tmp9, TMPD3, 3, 1, 8); - simd_address_at(tmp10, TMPQ4, 3, 2, 16); - * [register]:2 tmp10 = zext(* [register]:1 tmp9); - simd_address_at(tmp9, TMPD3, 4, 1, 8); - simd_address_at(tmp10, TMPQ4, 4, 2, 16); - * [register]:2 tmp10 = zext(* [register]:1 tmp9); - simd_address_at(tmp9, TMPD3, 5, 1, 8); - simd_address_at(tmp10, TMPQ4, 5, 2, 16); - * [register]:2 tmp10 = zext(* [register]:1 tmp9); - simd_address_at(tmp9, TMPD3, 6, 1, 8); - simd_address_at(tmp10, TMPQ4, 6, 2, 16); - * [register]:2 tmp10 = zext(* [register]:1 tmp9); - simd_address_at(tmp9, TMPD3, 7, 1, 8); - simd_address_at(tmp10, TMPQ4, 7, 2, 16); - * [register]:2 tmp10 = zext(* [register]:1 tmp9); + TMPQ4[0,16] = zext(TMPD3[0,8]); + TMPQ4[16,16] = zext(TMPD3[8,8]); + TMPQ4[32,16] = zext(TMPD3[16,8]); + TMPQ4[48,16] = zext(TMPD3[24,8]); + TMPQ4[64,16] = zext(TMPD3[32,8]); + TMPQ4[80,16] = zext(TMPD3[40,8]); + TMPQ4[96,16] = zext(TMPD3[48,8]); + TMPQ4[112,16] = zext(TMPD3[56,8]); # simd infix TMPQ5 = TMPQ2 * TMPQ4 on lane size 2 - local tmp12:4 = 0; - local tmp13:4 = 0; - local tmp14:4 = 0; - simd_address_at(tmp12, TMPQ2, 0, 2, 16); - simd_address_at(tmp13, TMPQ4, 0, 2, 16); - simd_address_at(tmp14, TMPQ5, 0, 2, 16); - * [register]:2 tmp14 = (* [register]:2 tmp12) * (* [register]:2 tmp13); - simd_address_at(tmp12, TMPQ2, 1, 2, 16); - simd_address_at(tmp13, TMPQ4, 1, 2, 16); - simd_address_at(tmp14, TMPQ5, 1, 2, 16); - * [register]:2 tmp14 = (* [register]:2 tmp12) * (* [register]:2 tmp13); - simd_address_at(tmp12, TMPQ2, 2, 2, 16); - simd_address_at(tmp13, TMPQ4, 2, 2, 16); - simd_address_at(tmp14, TMPQ5, 2, 2, 16); - * [register]:2 tmp14 = (* [register]:2 tmp12) * (* [register]:2 tmp13); - simd_address_at(tmp12, TMPQ2, 3, 2, 16); - simd_address_at(tmp13, TMPQ4, 3, 2, 16); - simd_address_at(tmp14, TMPQ5, 3, 2, 16); - * [register]:2 tmp14 = (* [register]:2 tmp12) * (* [register]:2 tmp13); - simd_address_at(tmp12, TMPQ2, 4, 2, 16); - simd_address_at(tmp13, TMPQ4, 4, 2, 16); - simd_address_at(tmp14, TMPQ5, 4, 2, 16); - * [register]:2 tmp14 = (* [register]:2 tmp12) * (* [register]:2 tmp13); - simd_address_at(tmp12, TMPQ2, 5, 2, 16); - simd_address_at(tmp13, TMPQ4, 5, 2, 16); - simd_address_at(tmp14, TMPQ5, 5, 2, 16); - * [register]:2 tmp14 = (* [register]:2 tmp12) * (* [register]:2 tmp13); - simd_address_at(tmp12, TMPQ2, 6, 2, 16); - simd_address_at(tmp13, TMPQ4, 6, 2, 16); - simd_address_at(tmp14, TMPQ5, 6, 2, 16); - * [register]:2 tmp14 = (* [register]:2 tmp12) * (* [register]:2 tmp13); - simd_address_at(tmp12, TMPQ2, 7, 2, 16); - simd_address_at(tmp13, TMPQ4, 7, 2, 16); - simd_address_at(tmp14, TMPQ5, 7, 2, 16); - * [register]:2 tmp14 = (* [register]:2 tmp12) * (* [register]:2 tmp13); + TMPQ5[0,16] = TMPQ2[0,16] * TMPQ4[0,16]; + TMPQ5[16,16] = TMPQ2[16,16] * TMPQ4[16,16]; + TMPQ5[32,16] = TMPQ2[32,16] * TMPQ4[32,16]; + TMPQ5[48,16] = TMPQ2[48,16] * TMPQ4[48,16]; + TMPQ5[64,16] = TMPQ2[64,16] * TMPQ4[64,16]; + TMPQ5[80,16] = TMPQ2[80,16] * TMPQ4[80,16]; + TMPQ5[96,16] = TMPQ2[96,16] * TMPQ4[96,16]; + TMPQ5[112,16] = TMPQ2[112,16] * TMPQ4[112,16]; # simd infix Rd_VPR128.8H = Rd_VPR128.8H + TMPQ5 on lane size 2 - local tmp15:4 = 0; - local tmp16:4 = 0; - local tmp17:4 = 0; - simd_address_at(tmp15, Rd_VPR128.8H, 0, 2, 16); - simd_address_at(tmp16, TMPQ5, 0, 2, 16); - simd_address_at(tmp17, Rd_VPR128.8H, 0, 2, 16); - * [register]:2 tmp17 = (* [register]:2 tmp15) + (* [register]:2 tmp16); - simd_address_at(tmp15, Rd_VPR128.8H, 1, 2, 16); - simd_address_at(tmp16, TMPQ5, 1, 2, 16); - simd_address_at(tmp17, Rd_VPR128.8H, 1, 2, 16); - * [register]:2 tmp17 = (* [register]:2 tmp15) + (* [register]:2 tmp16); - simd_address_at(tmp15, Rd_VPR128.8H, 2, 2, 16); - simd_address_at(tmp16, TMPQ5, 2, 2, 16); - simd_address_at(tmp17, Rd_VPR128.8H, 2, 2, 16); - * [register]:2 tmp17 = (* [register]:2 tmp15) + (* [register]:2 tmp16); - simd_address_at(tmp15, Rd_VPR128.8H, 3, 2, 16); - simd_address_at(tmp16, TMPQ5, 3, 2, 16); - simd_address_at(tmp17, Rd_VPR128.8H, 3, 2, 16); - * [register]:2 tmp17 = (* [register]:2 tmp15) + (* [register]:2 tmp16); - simd_address_at(tmp15, Rd_VPR128.8H, 4, 2, 16); - simd_address_at(tmp16, TMPQ5, 4, 2, 16); - simd_address_at(tmp17, Rd_VPR128.8H, 4, 2, 16); - * [register]:2 tmp17 = (* [register]:2 tmp15) + (* [register]:2 tmp16); - simd_address_at(tmp15, Rd_VPR128.8H, 5, 2, 16); - simd_address_at(tmp16, TMPQ5, 5, 2, 16); - simd_address_at(tmp17, Rd_VPR128.8H, 5, 2, 16); - * [register]:2 tmp17 = (* [register]:2 tmp15) + (* [register]:2 tmp16); - simd_address_at(tmp15, Rd_VPR128.8H, 6, 2, 16); - simd_address_at(tmp16, TMPQ5, 6, 2, 16); - simd_address_at(tmp17, Rd_VPR128.8H, 6, 2, 16); - * [register]:2 tmp17 = (* [register]:2 tmp15) + (* [register]:2 tmp16); - simd_address_at(tmp15, Rd_VPR128.8H, 7, 2, 16); - simd_address_at(tmp16, TMPQ5, 7, 2, 16); - simd_address_at(tmp17, Rd_VPR128.8H, 7, 2, 16); - * [register]:2 tmp17 = (* [register]:2 tmp15) + (* [register]:2 tmp16); + Rd_VPR128.8H[0,16] = Rd_VPR128.8H[0,16] + TMPQ5[0,16]; + Rd_VPR128.8H[16,16] = Rd_VPR128.8H[16,16] + TMPQ5[16,16]; + Rd_VPR128.8H[32,16] = Rd_VPR128.8H[32,16] + TMPQ5[32,16]; + Rd_VPR128.8H[48,16] = Rd_VPR128.8H[48,16] + TMPQ5[48,16]; + Rd_VPR128.8H[64,16] = Rd_VPR128.8H[64,16] + TMPQ5[64,16]; + Rd_VPR128.8H[80,16] = Rd_VPR128.8H[80,16] + TMPQ5[80,16]; + Rd_VPR128.8H[96,16] = Rd_VPR128.8H[96,16] + TMPQ5[96,16]; + Rd_VPR128.8H[112,16] = Rd_VPR128.8H[112,16] + TMPQ5[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Rn_VPR128.16B, 1:1); - local tmp2:16 = SIMD_INT_ZEXT(tmp1, 1:1); - local tmp3:8 = SIMD_PIECE(Rm_VPR128.16B, 1:1); - local tmp4:16 = SIMD_INT_ZEXT(tmp3, 1:1); - local tmp5:16 = SIMD_INT_MULT(tmp2, tmp4, 2:1); - local tmpd:16 = SIMD_INT_ADD(Rd_VPR128.8H, tmp5, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.8H -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.8H = NEON_umlal2(Rd_VPR128.8H, Rn_VPR128.16B, Rm_VPR128.16B, 1:1); -@endif } # C7.2.367 UMLAL, UMLAL2 (vector) page C7-2228 line 125227 MATCH x2e208000/mask=xbf20fc00 @@ -47344,59 +25558,19 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16 :umlal Rd_VPR128.2D, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1215=0x8 & b_1011=0 & Rn_VPR64.2S & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_primitive) # simd resize TMPQ1 = zext(Rn_VPR64.2S) (lane size 4 to 8) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR64.2S, 0, 4, 8); - simd_address_at(tmp3, TMPQ1, 0, 8, 16); - * [register]:8 tmp3 = zext(* [register]:4 tmp2); - simd_address_at(tmp2, Rn_VPR64.2S, 1, 4, 8); - simd_address_at(tmp3, TMPQ1, 1, 8, 16); - * [register]:8 tmp3 = zext(* [register]:4 tmp2); + TMPQ1[0,64] = zext(Rn_VPR64.2S[0,32]); + TMPQ1[64,64] = zext(Rn_VPR64.2S[32,32]); # simd resize TMPQ2 = zext(Rm_VPR64.2S) (lane size 4 to 8) - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, Rm_VPR64.2S, 0, 4, 8); - simd_address_at(tmp6, TMPQ2, 0, 8, 16); - * [register]:8 tmp6 = zext(* [register]:4 tmp5); - simd_address_at(tmp5, Rm_VPR64.2S, 1, 4, 8); - simd_address_at(tmp6, TMPQ2, 1, 8, 16); - * [register]:8 tmp6 = zext(* [register]:4 tmp5); + TMPQ2[0,64] = zext(Rm_VPR64.2S[0,32]); + TMPQ2[64,64] = zext(Rm_VPR64.2S[32,32]); # simd infix TMPQ3 = TMPQ1 * TMPQ2 on lane size 8 - local tmp8:4 = 0; - local tmp9:4 = 0; - local tmp10:4 = 0; - simd_address_at(tmp8, TMPQ1, 0, 8, 16); - simd_address_at(tmp9, TMPQ2, 0, 8, 16); - simd_address_at(tmp10, TMPQ3, 0, 8, 16); - * [register]:8 tmp10 = (* [register]:8 tmp8) * (* [register]:8 tmp9); - simd_address_at(tmp8, TMPQ1, 1, 8, 16); - simd_address_at(tmp9, TMPQ2, 1, 8, 16); - simd_address_at(tmp10, TMPQ3, 1, 8, 16); - * [register]:8 tmp10 = (* [register]:8 tmp8) * (* [register]:8 tmp9); + TMPQ3[0,64] = TMPQ1[0,64] * TMPQ2[0,64]; + TMPQ3[64,64] = TMPQ1[64,64] * TMPQ2[64,64]; # simd infix Rd_VPR128.2D = Rd_VPR128.2D + TMPQ3 on lane size 8 - local tmp11:4 = 0; - local tmp12:4 = 0; - local tmp13:4 = 0; - simd_address_at(tmp11, Rd_VPR128.2D, 0, 8, 16); - simd_address_at(tmp12, TMPQ3, 0, 8, 16); - simd_address_at(tmp13, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp13 = (* [register]:8 tmp11) + (* [register]:8 tmp12); - simd_address_at(tmp11, Rd_VPR128.2D, 1, 8, 16); - simd_address_at(tmp12, TMPQ3, 1, 8, 16); - simd_address_at(tmp13, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp13 = (* [register]:8 tmp11) + (* [register]:8 tmp12); + Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] + TMPQ3[0,64]; + Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] + TMPQ3[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_ZEXT(Rn_VPR64.2S, 4:1); - local tmp2:16 = SIMD_INT_ZEXT(Rm_VPR64.2S, 4:1); - local tmp3:16 = SIMD_INT_MULT(tmp1, tmp2, 8:1); - local tmpd:16 = SIMD_INT_ADD(Rd_VPR128.2D, tmp3, 8:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.2D = NEON_umlal(Rd_VPR128.2D, Rn_VPR64.2S, Rm_VPR64.2S, 4:1); -@endif } # C7.2.367 UMLAL, UMLAL2 (vector) page C7-2228 line 125227 MATCH x2e208000/mask=xbf20fc00 @@ -47408,87 +25582,27 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S :umlal Rd_VPR128.4S, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1215=0x8 & b_1011=0 & Rn_VPR64.4H & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) # simd resize TMPQ1 = zext(Rn_VPR64.4H) (lane size 2 to 4) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR64.4H, 0, 2, 8); - simd_address_at(tmp3, TMPQ1, 0, 4, 16); - * [register]:4 tmp3 = zext(* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR64.4H, 1, 2, 8); - simd_address_at(tmp3, TMPQ1, 1, 4, 16); - * [register]:4 tmp3 = zext(* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR64.4H, 2, 2, 8); - simd_address_at(tmp3, TMPQ1, 2, 4, 16); - * [register]:4 tmp3 = zext(* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR64.4H, 3, 2, 8); - simd_address_at(tmp3, TMPQ1, 3, 4, 16); - * [register]:4 tmp3 = zext(* [register]:2 tmp2); + TMPQ1[0,32] = zext(Rn_VPR64.4H[0,16]); + TMPQ1[32,32] = zext(Rn_VPR64.4H[16,16]); + TMPQ1[64,32] = zext(Rn_VPR64.4H[32,16]); + TMPQ1[96,32] = zext(Rn_VPR64.4H[48,16]); # simd resize TMPQ2 = zext(Rm_VPR64.4H) (lane size 2 to 4) - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, Rm_VPR64.4H, 0, 2, 8); - simd_address_at(tmp6, TMPQ2, 0, 4, 16); - * [register]:4 tmp6 = zext(* [register]:2 tmp5); - simd_address_at(tmp5, Rm_VPR64.4H, 1, 2, 8); - simd_address_at(tmp6, TMPQ2, 1, 4, 16); - * [register]:4 tmp6 = zext(* [register]:2 tmp5); - simd_address_at(tmp5, Rm_VPR64.4H, 2, 2, 8); - simd_address_at(tmp6, TMPQ2, 2, 4, 16); - * [register]:4 tmp6 = zext(* [register]:2 tmp5); - simd_address_at(tmp5, Rm_VPR64.4H, 3, 2, 8); - simd_address_at(tmp6, TMPQ2, 3, 4, 16); - * [register]:4 tmp6 = zext(* [register]:2 tmp5); + TMPQ2[0,32] = zext(Rm_VPR64.4H[0,16]); + TMPQ2[32,32] = zext(Rm_VPR64.4H[16,16]); + TMPQ2[64,32] = zext(Rm_VPR64.4H[32,16]); + TMPQ2[96,32] = zext(Rm_VPR64.4H[48,16]); # simd infix TMPQ3 = TMPQ1 * TMPQ2 on lane size 4 - local tmp8:4 = 0; - local tmp9:4 = 0; - local tmp10:4 = 0; - simd_address_at(tmp8, TMPQ1, 0, 4, 16); - simd_address_at(tmp9, TMPQ2, 0, 4, 16); - simd_address_at(tmp10, TMPQ3, 0, 4, 16); - * [register]:4 tmp10 = (* [register]:4 tmp8) * (* [register]:4 tmp9); - simd_address_at(tmp8, TMPQ1, 1, 4, 16); - simd_address_at(tmp9, TMPQ2, 1, 4, 16); - simd_address_at(tmp10, TMPQ3, 1, 4, 16); - * [register]:4 tmp10 = (* [register]:4 tmp8) * (* [register]:4 tmp9); - simd_address_at(tmp8, TMPQ1, 2, 4, 16); - simd_address_at(tmp9, TMPQ2, 2, 4, 16); - simd_address_at(tmp10, TMPQ3, 2, 4, 16); - * [register]:4 tmp10 = (* [register]:4 tmp8) * (* [register]:4 tmp9); - simd_address_at(tmp8, TMPQ1, 3, 4, 16); - simd_address_at(tmp9, TMPQ2, 3, 4, 16); - simd_address_at(tmp10, TMPQ3, 3, 4, 16); - * [register]:4 tmp10 = (* [register]:4 tmp8) * (* [register]:4 tmp9); + TMPQ3[0,32] = TMPQ1[0,32] * TMPQ2[0,32]; + TMPQ3[32,32] = TMPQ1[32,32] * TMPQ2[32,32]; + TMPQ3[64,32] = TMPQ1[64,32] * TMPQ2[64,32]; + TMPQ3[96,32] = TMPQ1[96,32] * TMPQ2[96,32]; # simd infix Rd_VPR128.4S = Rd_VPR128.4S + TMPQ3 on lane size 4 - local tmp11:4 = 0; - local tmp12:4 = 0; - local tmp13:4 = 0; - simd_address_at(tmp11, Rd_VPR128.4S, 0, 4, 16); - simd_address_at(tmp12, TMPQ3, 0, 4, 16); - simd_address_at(tmp13, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp13 = (* [register]:4 tmp11) + (* [register]:4 tmp12); - simd_address_at(tmp11, Rd_VPR128.4S, 1, 4, 16); - simd_address_at(tmp12, TMPQ3, 1, 4, 16); - simd_address_at(tmp13, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp13 = (* [register]:4 tmp11) + (* [register]:4 tmp12); - simd_address_at(tmp11, Rd_VPR128.4S, 2, 4, 16); - simd_address_at(tmp12, TMPQ3, 2, 4, 16); - simd_address_at(tmp13, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp13 = (* [register]:4 tmp11) + (* [register]:4 tmp12); - simd_address_at(tmp11, Rd_VPR128.4S, 3, 4, 16); - simd_address_at(tmp12, TMPQ3, 3, 4, 16); - simd_address_at(tmp13, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp13 = (* [register]:4 tmp11) + (* [register]:4 tmp12); + Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] + TMPQ3[0,32]; + Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] + TMPQ3[32,32]; + Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] + TMPQ3[64,32]; + Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] + TMPQ3[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_ZEXT(Rn_VPR64.4H, 2:1); - local tmp2:16 = SIMD_INT_ZEXT(Rm_VPR64.4H, 2:1); - local tmp3:16 = SIMD_INT_MULT(tmp1, tmp2, 4:1); - local tmpd:16 = SIMD_INT_ADD(Rd_VPR128.4S, tmp3, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_umlal(Rd_VPR128.4S, Rn_VPR64.4H, Rm_VPR64.4H, 2:1); -@endif } # C7.2.367 UMLAL, UMLAL2 (vector) page C7-2228 line 125227 MATCH x2e208000/mask=xbf20fc00 @@ -47500,143 +25614,43 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H :umlal Rd_VPR128.8H, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1215=0x8 & b_1011=0 & Rn_VPR64.8B & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_primitive) # simd resize TMPQ1 = zext(Rn_VPR64.8B) (lane size 1 to 2) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR64.8B, 0, 1, 8); - simd_address_at(tmp3, TMPQ1, 0, 2, 16); - * [register]:2 tmp3 = zext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 1, 1, 8); - simd_address_at(tmp3, TMPQ1, 1, 2, 16); - * [register]:2 tmp3 = zext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 2, 1, 8); - simd_address_at(tmp3, TMPQ1, 2, 2, 16); - * [register]:2 tmp3 = zext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 3, 1, 8); - simd_address_at(tmp3, TMPQ1, 3, 2, 16); - * [register]:2 tmp3 = zext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 4, 1, 8); - simd_address_at(tmp3, TMPQ1, 4, 2, 16); - * [register]:2 tmp3 = zext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 5, 1, 8); - simd_address_at(tmp3, TMPQ1, 5, 2, 16); - * [register]:2 tmp3 = zext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 6, 1, 8); - simd_address_at(tmp3, TMPQ1, 6, 2, 16); - * [register]:2 tmp3 = zext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 7, 1, 8); - simd_address_at(tmp3, TMPQ1, 7, 2, 16); - * [register]:2 tmp3 = zext(* [register]:1 tmp2); + TMPQ1[0,16] = zext(Rn_VPR64.8B[0,8]); + TMPQ1[16,16] = zext(Rn_VPR64.8B[8,8]); + TMPQ1[32,16] = zext(Rn_VPR64.8B[16,8]); + TMPQ1[48,16] = zext(Rn_VPR64.8B[24,8]); + TMPQ1[64,16] = zext(Rn_VPR64.8B[32,8]); + TMPQ1[80,16] = zext(Rn_VPR64.8B[40,8]); + TMPQ1[96,16] = zext(Rn_VPR64.8B[48,8]); + TMPQ1[112,16] = zext(Rn_VPR64.8B[56,8]); # simd resize TMPQ2 = zext(Rm_VPR64.8B) (lane size 1 to 2) - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, Rm_VPR64.8B, 0, 1, 8); - simd_address_at(tmp6, TMPQ2, 0, 2, 16); - * [register]:2 tmp6 = zext(* [register]:1 tmp5); - simd_address_at(tmp5, Rm_VPR64.8B, 1, 1, 8); - simd_address_at(tmp6, TMPQ2, 1, 2, 16); - * [register]:2 tmp6 = zext(* [register]:1 tmp5); - simd_address_at(tmp5, Rm_VPR64.8B, 2, 1, 8); - simd_address_at(tmp6, TMPQ2, 2, 2, 16); - * [register]:2 tmp6 = zext(* [register]:1 tmp5); - simd_address_at(tmp5, Rm_VPR64.8B, 3, 1, 8); - simd_address_at(tmp6, TMPQ2, 3, 2, 16); - * [register]:2 tmp6 = zext(* [register]:1 tmp5); - simd_address_at(tmp5, Rm_VPR64.8B, 4, 1, 8); - simd_address_at(tmp6, TMPQ2, 4, 2, 16); - * [register]:2 tmp6 = zext(* [register]:1 tmp5); - simd_address_at(tmp5, Rm_VPR64.8B, 5, 1, 8); - simd_address_at(tmp6, TMPQ2, 5, 2, 16); - * [register]:2 tmp6 = zext(* [register]:1 tmp5); - simd_address_at(tmp5, Rm_VPR64.8B, 6, 1, 8); - simd_address_at(tmp6, TMPQ2, 6, 2, 16); - * [register]:2 tmp6 = zext(* [register]:1 tmp5); - simd_address_at(tmp5, Rm_VPR64.8B, 7, 1, 8); - simd_address_at(tmp6, TMPQ2, 7, 2, 16); - * [register]:2 tmp6 = zext(* [register]:1 tmp5); + TMPQ2[0,16] = zext(Rm_VPR64.8B[0,8]); + TMPQ2[16,16] = zext(Rm_VPR64.8B[8,8]); + TMPQ2[32,16] = zext(Rm_VPR64.8B[16,8]); + TMPQ2[48,16] = zext(Rm_VPR64.8B[24,8]); + TMPQ2[64,16] = zext(Rm_VPR64.8B[32,8]); + TMPQ2[80,16] = zext(Rm_VPR64.8B[40,8]); + TMPQ2[96,16] = zext(Rm_VPR64.8B[48,8]); + TMPQ2[112,16] = zext(Rm_VPR64.8B[56,8]); # simd infix TMPQ3 = TMPQ1 * TMPQ2 on lane size 2 - local tmp8:4 = 0; - local tmp9:4 = 0; - local tmp10:4 = 0; - simd_address_at(tmp8, TMPQ1, 0, 2, 16); - simd_address_at(tmp9, TMPQ2, 0, 2, 16); - simd_address_at(tmp10, TMPQ3, 0, 2, 16); - * [register]:2 tmp10 = (* [register]:2 tmp8) * (* [register]:2 tmp9); - simd_address_at(tmp8, TMPQ1, 1, 2, 16); - simd_address_at(tmp9, TMPQ2, 1, 2, 16); - simd_address_at(tmp10, TMPQ3, 1, 2, 16); - * [register]:2 tmp10 = (* [register]:2 tmp8) * (* [register]:2 tmp9); - simd_address_at(tmp8, TMPQ1, 2, 2, 16); - simd_address_at(tmp9, TMPQ2, 2, 2, 16); - simd_address_at(tmp10, TMPQ3, 2, 2, 16); - * [register]:2 tmp10 = (* [register]:2 tmp8) * (* [register]:2 tmp9); - simd_address_at(tmp8, TMPQ1, 3, 2, 16); - simd_address_at(tmp9, TMPQ2, 3, 2, 16); - simd_address_at(tmp10, TMPQ3, 3, 2, 16); - * [register]:2 tmp10 = (* [register]:2 tmp8) * (* [register]:2 tmp9); - simd_address_at(tmp8, TMPQ1, 4, 2, 16); - simd_address_at(tmp9, TMPQ2, 4, 2, 16); - simd_address_at(tmp10, TMPQ3, 4, 2, 16); - * [register]:2 tmp10 = (* [register]:2 tmp8) * (* [register]:2 tmp9); - simd_address_at(tmp8, TMPQ1, 5, 2, 16); - simd_address_at(tmp9, TMPQ2, 5, 2, 16); - simd_address_at(tmp10, TMPQ3, 5, 2, 16); - * [register]:2 tmp10 = (* [register]:2 tmp8) * (* [register]:2 tmp9); - simd_address_at(tmp8, TMPQ1, 6, 2, 16); - simd_address_at(tmp9, TMPQ2, 6, 2, 16); - simd_address_at(tmp10, TMPQ3, 6, 2, 16); - * [register]:2 tmp10 = (* [register]:2 tmp8) * (* [register]:2 tmp9); - simd_address_at(tmp8, TMPQ1, 7, 2, 16); - simd_address_at(tmp9, TMPQ2, 7, 2, 16); - simd_address_at(tmp10, TMPQ3, 7, 2, 16); - * [register]:2 tmp10 = (* [register]:2 tmp8) * (* [register]:2 tmp9); + TMPQ3[0,16] = TMPQ1[0,16] * TMPQ2[0,16]; + TMPQ3[16,16] = TMPQ1[16,16] * TMPQ2[16,16]; + TMPQ3[32,16] = TMPQ1[32,16] * TMPQ2[32,16]; + TMPQ3[48,16] = TMPQ1[48,16] * TMPQ2[48,16]; + TMPQ3[64,16] = TMPQ1[64,16] * TMPQ2[64,16]; + TMPQ3[80,16] = TMPQ1[80,16] * TMPQ2[80,16]; + TMPQ3[96,16] = TMPQ1[96,16] * TMPQ2[96,16]; + TMPQ3[112,16] = TMPQ1[112,16] * TMPQ2[112,16]; # simd infix Rd_VPR128.8H = Rd_VPR128.8H + TMPQ3 on lane size 2 - local tmp11:4 = 0; - local tmp12:4 = 0; - local tmp13:4 = 0; - simd_address_at(tmp11, Rd_VPR128.8H, 0, 2, 16); - simd_address_at(tmp12, TMPQ3, 0, 2, 16); - simd_address_at(tmp13, Rd_VPR128.8H, 0, 2, 16); - * [register]:2 tmp13 = (* [register]:2 tmp11) + (* [register]:2 tmp12); - simd_address_at(tmp11, Rd_VPR128.8H, 1, 2, 16); - simd_address_at(tmp12, TMPQ3, 1, 2, 16); - simd_address_at(tmp13, Rd_VPR128.8H, 1, 2, 16); - * [register]:2 tmp13 = (* [register]:2 tmp11) + (* [register]:2 tmp12); - simd_address_at(tmp11, Rd_VPR128.8H, 2, 2, 16); - simd_address_at(tmp12, TMPQ3, 2, 2, 16); - simd_address_at(tmp13, Rd_VPR128.8H, 2, 2, 16); - * [register]:2 tmp13 = (* [register]:2 tmp11) + (* [register]:2 tmp12); - simd_address_at(tmp11, Rd_VPR128.8H, 3, 2, 16); - simd_address_at(tmp12, TMPQ3, 3, 2, 16); - simd_address_at(tmp13, Rd_VPR128.8H, 3, 2, 16); - * [register]:2 tmp13 = (* [register]:2 tmp11) + (* [register]:2 tmp12); - simd_address_at(tmp11, Rd_VPR128.8H, 4, 2, 16); - simd_address_at(tmp12, TMPQ3, 4, 2, 16); - simd_address_at(tmp13, Rd_VPR128.8H, 4, 2, 16); - * [register]:2 tmp13 = (* [register]:2 tmp11) + (* [register]:2 tmp12); - simd_address_at(tmp11, Rd_VPR128.8H, 5, 2, 16); - simd_address_at(tmp12, TMPQ3, 5, 2, 16); - simd_address_at(tmp13, Rd_VPR128.8H, 5, 2, 16); - * [register]:2 tmp13 = (* [register]:2 tmp11) + (* [register]:2 tmp12); - simd_address_at(tmp11, Rd_VPR128.8H, 6, 2, 16); - simd_address_at(tmp12, TMPQ3, 6, 2, 16); - simd_address_at(tmp13, Rd_VPR128.8H, 6, 2, 16); - * [register]:2 tmp13 = (* [register]:2 tmp11) + (* [register]:2 tmp12); - simd_address_at(tmp11, Rd_VPR128.8H, 7, 2, 16); - simd_address_at(tmp12, TMPQ3, 7, 2, 16); - simd_address_at(tmp13, Rd_VPR128.8H, 7, 2, 16); - * [register]:2 tmp13 = (* [register]:2 tmp11) + (* [register]:2 tmp12); + Rd_VPR128.8H[0,16] = Rd_VPR128.8H[0,16] + TMPQ3[0,16]; + Rd_VPR128.8H[16,16] = Rd_VPR128.8H[16,16] + TMPQ3[16,16]; + Rd_VPR128.8H[32,16] = Rd_VPR128.8H[32,16] + TMPQ3[32,16]; + Rd_VPR128.8H[48,16] = Rd_VPR128.8H[48,16] + TMPQ3[48,16]; + Rd_VPR128.8H[64,16] = Rd_VPR128.8H[64,16] + TMPQ3[64,16]; + Rd_VPR128.8H[80,16] = Rd_VPR128.8H[80,16] + TMPQ3[80,16]; + Rd_VPR128.8H[96,16] = Rd_VPR128.8H[96,16] + TMPQ3[96,16]; + Rd_VPR128.8H[112,16] = Rd_VPR128.8H[112,16] + TMPQ3[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_ZEXT(Rn_VPR64.8B, 1:1); - local tmp2:16 = SIMD_INT_ZEXT(Rm_VPR64.8B, 1:1); - local tmp3:16 = SIMD_INT_MULT(tmp1, tmp2, 2:1); - local tmpd:16 = SIMD_INT_ADD(Rd_VPR128.8H, tmp3, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.8H -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.8H = NEON_umlal(Rd_VPR128.8H, Rn_VPR64.8B, Rm_VPR64.8B, 1:1); -@endif } # C7.2.368 UMLSL, UMLSL2 (by element) page C7-2230 line 125350 MATCH x2f006000/mask=xbf00f400 @@ -47648,54 +25662,19 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B :umlsl Rd_VPR128.2D, Rn_VPR64.2S, Re_VPR128.S.vIndex is b_3131=0 & q=0 & u=1 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & b_1215=0x6 & b_1010=0 & Rn_VPR64.2S & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_primitive) # simd resize TMPQ1 = zext(Rn_VPR64.2S) (lane size 4 to 8) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR64.2S, 0, 4, 8); - simd_address_at(tmp3, TMPQ1, 0, 8, 16); - * [register]:8 tmp3 = zext(* [register]:4 tmp2); - simd_address_at(tmp2, Rn_VPR64.2S, 1, 4, 8); - simd_address_at(tmp3, TMPQ1, 1, 8, 16); - * [register]:8 tmp3 = zext(* [register]:4 tmp2); - local tmp4:4 = 0; + TMPQ1[0,64] = zext(Rn_VPR64.2S[0,32]); + TMPQ1[64,64] = zext(Rn_VPR64.2S[32,32]); # simd element Re_VPR128.S[vIndex] lane size 4 - simd_address_at(tmp4, Re_VPR128.S, vIndex:4, 4, 16); - local tmp5:4 = * [register]:4 tmp4; - local tmp6:8 = zext(tmp5); - # simd infix TMPQ2 = TMPQ1 * tmp6 on lane size 8 - local tmp8:4 = 0; - local tmp9:4 = 0; - simd_address_at(tmp8, TMPQ1, 0, 8, 16); - simd_address_at(tmp9, TMPQ2, 0, 8, 16); - * [register]:8 tmp9 = (* [register]:8 tmp8) * tmp6; - simd_address_at(tmp8, TMPQ1, 1, 8, 16); - simd_address_at(tmp9, TMPQ2, 1, 8, 16); - * [register]:8 tmp9 = (* [register]:8 tmp8) * tmp6; - # simd infix Rd_VPR128.2D = Rd_VPR128.2D - TMPQ2 on lane size 8 - local tmp10:4 = 0; - local tmp11:4 = 0; - local tmp12:4 = 0; - simd_address_at(tmp10, Rd_VPR128.2D, 0, 8, 16); - simd_address_at(tmp11, TMPQ2, 0, 8, 16); - simd_address_at(tmp12, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp12 = (* [register]:8 tmp10) - (* [register]:8 tmp11); - simd_address_at(tmp10, Rd_VPR128.2D, 1, 8, 16); - simd_address_at(tmp11, TMPQ2, 1, 8, 16); - simd_address_at(tmp12, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp12 = (* [register]:8 tmp10) - (* [register]:8 tmp11); - zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_ZEXT(Rn_VPR64.2S, 4:1); - local tmp2:4 = SIMD_PIECE(Re_VPR128.S, vIndex:1); + local tmp2:4 = Re_VPR128.S.vIndex; local tmp3:8 = zext(tmp2); - local tmp4:16 = SIMD_INT_MULT(tmp1, tmp3, 8:1); - local tmpd:16 = SIMD_INT_SUB(Rd_VPR128.2D, tmp4, 8:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - local tmp1:4 = SIMD_PIECE(Re_VPR128.S, vIndex:1); - Rd_VPR128.2D = NEON_umlsl(Rd_VPR128.2D, Rn_VPR64.2S, tmp1, 4:1); -@endif + # simd infix TMPQ2 = TMPQ1 * tmp3 on lane size 8 + TMPQ2[0,64] = TMPQ1[0,64] * tmp3; + TMPQ2[64,64] = TMPQ1[64,64] * tmp3; + # simd infix Rd_VPR128.2D = Rd_VPR128.2D - TMPQ2 on lane size 8 + Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] - TMPQ2[0,64]; + Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] - TMPQ2[64,64]; + zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.368 UMLSL, UMLSL2 (by element) page C7-2230 line 125350 MATCH x2f006000/mask=xbf00f400 @@ -47707,58 +25686,20 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re :umlsl2 Rd_VPR128.2D, Rn_VPR128.4S, Re_VPR128.S.vIndex is b_3131=0 & q=1 & u=1 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & b_1215=0x6 & b_1010=0 & Rn_VPR128.4S & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rn_VPR128.4S, 1, 8, 16); - TMPD1 = * [register]:8 tmp1; + TMPD1 = Rn_VPR128.4S[64,64]; # simd resize TMPQ2 = zext(TMPD1) (lane size 4 to 8) - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, TMPD1, 0, 4, 8); - simd_address_at(tmp5, TMPQ2, 0, 8, 16); - * [register]:8 tmp5 = zext(* [register]:4 tmp4); - simd_address_at(tmp4, TMPD1, 1, 4, 8); - simd_address_at(tmp5, TMPQ2, 1, 8, 16); - * [register]:8 tmp5 = zext(* [register]:4 tmp4); - local tmp6:4 = 0; + TMPQ2[0,64] = zext(TMPD1[0,32]); + TMPQ2[64,64] = zext(TMPD1[32,32]); # simd element Re_VPR128.S[vIndex] lane size 4 - simd_address_at(tmp6, Re_VPR128.S, vIndex:4, 4, 16); - local tmp7:4 = * [register]:4 tmp6; - local tmp8:8 = zext(tmp7); - # simd infix TMPQ3 = TMPQ2 * tmp8 on lane size 8 - local tmp10:4 = 0; - local tmp11:4 = 0; - simd_address_at(tmp10, TMPQ2, 0, 8, 16); - simd_address_at(tmp11, TMPQ3, 0, 8, 16); - * [register]:8 tmp11 = (* [register]:8 tmp10) * tmp8; - simd_address_at(tmp10, TMPQ2, 1, 8, 16); - simd_address_at(tmp11, TMPQ3, 1, 8, 16); - * [register]:8 tmp11 = (* [register]:8 tmp10) * tmp8; - # simd infix Rd_VPR128.2D = Rd_VPR128.2D - TMPQ3 on lane size 8 - local tmp12:4 = 0; - local tmp13:4 = 0; - local tmp14:4 = 0; - simd_address_at(tmp12, Rd_VPR128.2D, 0, 8, 16); - simd_address_at(tmp13, TMPQ3, 0, 8, 16); - simd_address_at(tmp14, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp14 = (* [register]:8 tmp12) - (* [register]:8 tmp13); - simd_address_at(tmp12, Rd_VPR128.2D, 1, 8, 16); - simd_address_at(tmp13, TMPQ3, 1, 8, 16); - simd_address_at(tmp14, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp14 = (* [register]:8 tmp12) - (* [register]:8 tmp13); - zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Rn_VPR128.4S, 1:1); - local tmp2:16 = SIMD_INT_ZEXT(tmp1, 4:1); - local tmp3:4 = SIMD_PIECE(Re_VPR128.S, vIndex:1); + local tmp3:4 = Re_VPR128.S.vIndex; local tmp4:8 = zext(tmp3); - local tmp5:16 = SIMD_INT_MULT(tmp2, tmp4, 8:1); - local tmpd:16 = SIMD_INT_SUB(Rd_VPR128.2D, tmp5, 8:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - local tmp1:4 = SIMD_PIECE(Re_VPR128.S, vIndex:1); - Rd_VPR128.2D = NEON_umlsl2(Rd_VPR128.2D, Rn_VPR128.4S, tmp1, 4:1); -@endif + # simd infix TMPQ3 = TMPQ2 * tmp4 on lane size 8 + TMPQ3[0,64] = TMPQ2[0,64] * tmp4; + TMPQ3[64,64] = TMPQ2[64,64] * tmp4; + # simd infix Rd_VPR128.2D = Rd_VPR128.2D - TMPQ3 on lane size 8 + Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] - TMPQ3[0,64]; + Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] - TMPQ3[64,64]; + zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.368 UMLSL, UMLSL2 (by element) page C7-2230 line 125350 MATCH x2f006000/mask=xbf00f400 @@ -47770,74 +25711,25 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re :umlsl Rd_VPR128.4S, Rn_VPR64.4H, Re_VPR128Lo.H.vIndexHLM is b_3131=0 & q=0 & u=1 & b_2428=0xf & advSIMD3.size=1 & Re_VPR128Lo.H.vIndexHLM & Re_VPR128Lo.H & vIndexHLM & b_1215=0x6 & b_1010=0 & Rn_VPR64.4H & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) # simd resize TMPQ1 = zext(Rn_VPR64.4H) (lane size 2 to 4) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR64.4H, 0, 2, 8); - simd_address_at(tmp3, TMPQ1, 0, 4, 16); - * [register]:4 tmp3 = zext(* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR64.4H, 1, 2, 8); - simd_address_at(tmp3, TMPQ1, 1, 4, 16); - * [register]:4 tmp3 = zext(* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR64.4H, 2, 2, 8); - simd_address_at(tmp3, TMPQ1, 2, 4, 16); - * [register]:4 tmp3 = zext(* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR64.4H, 3, 2, 8); - simd_address_at(tmp3, TMPQ1, 3, 4, 16); - * [register]:4 tmp3 = zext(* [register]:2 tmp2); - local tmp4:4 = 0; + TMPQ1[0,32] = zext(Rn_VPR64.4H[0,16]); + TMPQ1[32,32] = zext(Rn_VPR64.4H[16,16]); + TMPQ1[64,32] = zext(Rn_VPR64.4H[32,16]); + TMPQ1[96,32] = zext(Rn_VPR64.4H[48,16]); # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 - simd_address_at(tmp4, Re_VPR128Lo.H, vIndexHLM:4, 2, 16); - local tmp5:2 = * [register]:2 tmp4; - local tmp6:4 = zext(tmp5); - # simd infix TMPQ2 = TMPQ1 * tmp6 on lane size 4 - local tmp8:4 = 0; - local tmp9:4 = 0; - simd_address_at(tmp8, TMPQ1, 0, 4, 16); - simd_address_at(tmp9, TMPQ2, 0, 4, 16); - * [register]:4 tmp9 = (* [register]:4 tmp8) * tmp6; - simd_address_at(tmp8, TMPQ1, 1, 4, 16); - simd_address_at(tmp9, TMPQ2, 1, 4, 16); - * [register]:4 tmp9 = (* [register]:4 tmp8) * tmp6; - simd_address_at(tmp8, TMPQ1, 2, 4, 16); - simd_address_at(tmp9, TMPQ2, 2, 4, 16); - * [register]:4 tmp9 = (* [register]:4 tmp8) * tmp6; - simd_address_at(tmp8, TMPQ1, 3, 4, 16); - simd_address_at(tmp9, TMPQ2, 3, 4, 16); - * [register]:4 tmp9 = (* [register]:4 tmp8) * tmp6; - # simd infix Rd_VPR128.4S = Rd_VPR128.4S - TMPQ2 on lane size 4 - local tmp10:4 = 0; - local tmp11:4 = 0; - local tmp12:4 = 0; - simd_address_at(tmp10, Rd_VPR128.4S, 0, 4, 16); - simd_address_at(tmp11, TMPQ2, 0, 4, 16); - simd_address_at(tmp12, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp12 = (* [register]:4 tmp10) - (* [register]:4 tmp11); - simd_address_at(tmp10, Rd_VPR128.4S, 1, 4, 16); - simd_address_at(tmp11, TMPQ2, 1, 4, 16); - simd_address_at(tmp12, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp12 = (* [register]:4 tmp10) - (* [register]:4 tmp11); - simd_address_at(tmp10, Rd_VPR128.4S, 2, 4, 16); - simd_address_at(tmp11, TMPQ2, 2, 4, 16); - simd_address_at(tmp12, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp12 = (* [register]:4 tmp10) - (* [register]:4 tmp11); - simd_address_at(tmp10, Rd_VPR128.4S, 3, 4, 16); - simd_address_at(tmp11, TMPQ2, 3, 4, 16); - simd_address_at(tmp12, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp12 = (* [register]:4 tmp10) - (* [register]:4 tmp11); - zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_ZEXT(Rn_VPR64.4H, 2:1); - local tmp2:2 = SIMD_PIECE(Re_VPR128Lo.H, vIndexHLM:1); + local tmp2:2 = Re_VPR128Lo.H.vIndexHLM; local tmp3:4 = zext(tmp2); - local tmp4:16 = SIMD_INT_MULT(tmp1, tmp3, 4:1); - local tmpd:16 = SIMD_INT_SUB(Rd_VPR128.4S, tmp4, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - local tmp1:2 = SIMD_PIECE(Re_VPR128Lo.H, vIndexHLM:1); - Rd_VPR128.4S = NEON_umlsl(Rd_VPR128.4S, Rn_VPR64.4H, tmp1, 2:1); -@endif + # simd infix TMPQ2 = TMPQ1 * tmp3 on lane size 4 + TMPQ2[0,32] = TMPQ1[0,32] * tmp3; + TMPQ2[32,32] = TMPQ1[32,32] * tmp3; + TMPQ2[64,32] = TMPQ1[64,32] * tmp3; + TMPQ2[96,32] = TMPQ1[96,32] * tmp3; + # simd infix Rd_VPR128.4S = Rd_VPR128.4S - TMPQ2 on lane size 4 + Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] - TMPQ2[0,32]; + Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] - TMPQ2[32,32]; + Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] - TMPQ2[64,32]; + Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] - TMPQ2[96,32]; + zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.368 UMLSL, UMLSL2 (by element) page C7-2230 line 125350 MATCH x2f006000/mask=xbf00f400 @@ -47849,78 +25741,26 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xf & advSIMD3.size=1 & Re_VPR128Lo.H.vIndexHLM :umlsl2 Rd_VPR128.4S, Rn_VPR128.8H, Re_VPR128Lo.H.vIndexHLM is b_3131=0 & q=1 & u=1 & b_2428=0xf & advSIMD3.size=1 & Re_VPR128Lo.H.vIndexHLM & Re_VPR128Lo.H & vIndexHLM & b_1215=0x6 & b_1010=0 & Rn_VPR128.8H & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rn_VPR128.8H, 1, 8, 16); - TMPD1 = * [register]:8 tmp1; + TMPD1 = Rn_VPR128.8H[64,64]; # simd resize TMPQ2 = zext(TMPD1) (lane size 2 to 4) - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, TMPD1, 0, 2, 8); - simd_address_at(tmp5, TMPQ2, 0, 4, 16); - * [register]:4 tmp5 = zext(* [register]:2 tmp4); - simd_address_at(tmp4, TMPD1, 1, 2, 8); - simd_address_at(tmp5, TMPQ2, 1, 4, 16); - * [register]:4 tmp5 = zext(* [register]:2 tmp4); - simd_address_at(tmp4, TMPD1, 2, 2, 8); - simd_address_at(tmp5, TMPQ2, 2, 4, 16); - * [register]:4 tmp5 = zext(* [register]:2 tmp4); - simd_address_at(tmp4, TMPD1, 3, 2, 8); - simd_address_at(tmp5, TMPQ2, 3, 4, 16); - * [register]:4 tmp5 = zext(* [register]:2 tmp4); - local tmp6:4 = 0; + TMPQ2[0,32] = zext(TMPD1[0,16]); + TMPQ2[32,32] = zext(TMPD1[16,16]); + TMPQ2[64,32] = zext(TMPD1[32,16]); + TMPQ2[96,32] = zext(TMPD1[48,16]); # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 - simd_address_at(tmp6, Re_VPR128Lo.H, vIndexHLM:4, 2, 16); - local tmp7:2 = * [register]:2 tmp6; - local tmp8:4 = zext(tmp7); - # simd infix TMPQ3 = TMPQ2 * tmp8 on lane size 4 - local tmp10:4 = 0; - local tmp11:4 = 0; - simd_address_at(tmp10, TMPQ2, 0, 4, 16); - simd_address_at(tmp11, TMPQ3, 0, 4, 16); - * [register]:4 tmp11 = (* [register]:4 tmp10) * tmp8; - simd_address_at(tmp10, TMPQ2, 1, 4, 16); - simd_address_at(tmp11, TMPQ3, 1, 4, 16); - * [register]:4 tmp11 = (* [register]:4 tmp10) * tmp8; - simd_address_at(tmp10, TMPQ2, 2, 4, 16); - simd_address_at(tmp11, TMPQ3, 2, 4, 16); - * [register]:4 tmp11 = (* [register]:4 tmp10) * tmp8; - simd_address_at(tmp10, TMPQ2, 3, 4, 16); - simd_address_at(tmp11, TMPQ3, 3, 4, 16); - * [register]:4 tmp11 = (* [register]:4 tmp10) * tmp8; - # simd infix Rd_VPR128.4S = Rd_VPR128.4S - TMPQ3 on lane size 4 - local tmp12:4 = 0; - local tmp13:4 = 0; - local tmp14:4 = 0; - simd_address_at(tmp12, Rd_VPR128.4S, 0, 4, 16); - simd_address_at(tmp13, TMPQ3, 0, 4, 16); - simd_address_at(tmp14, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp14 = (* [register]:4 tmp12) - (* [register]:4 tmp13); - simd_address_at(tmp12, Rd_VPR128.4S, 1, 4, 16); - simd_address_at(tmp13, TMPQ3, 1, 4, 16); - simd_address_at(tmp14, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp14 = (* [register]:4 tmp12) - (* [register]:4 tmp13); - simd_address_at(tmp12, Rd_VPR128.4S, 2, 4, 16); - simd_address_at(tmp13, TMPQ3, 2, 4, 16); - simd_address_at(tmp14, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp14 = (* [register]:4 tmp12) - (* [register]:4 tmp13); - simd_address_at(tmp12, Rd_VPR128.4S, 3, 4, 16); - simd_address_at(tmp13, TMPQ3, 3, 4, 16); - simd_address_at(tmp14, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp14 = (* [register]:4 tmp12) - (* [register]:4 tmp13); - zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Rn_VPR128.8H, 1:1); - local tmp2:16 = SIMD_INT_ZEXT(tmp1, 2:1); - local tmp3:2 = SIMD_PIECE(Re_VPR128Lo.H, vIndexHLM:1); + local tmp3:2 = Re_VPR128Lo.H.vIndexHLM; local tmp4:4 = zext(tmp3); - local tmp5:16 = SIMD_INT_MULT(tmp2, tmp4, 4:1); - local tmpd:16 = SIMD_INT_SUB(Rd_VPR128.4S, tmp5, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - local tmp1:2 = SIMD_PIECE(Re_VPR128Lo.H, vIndexHLM:1); - Rd_VPR128.4S = NEON_umlsl2(Rd_VPR128.4S, Rn_VPR128.8H, tmp1, 2:1); -@endif + # simd infix TMPQ3 = TMPQ2 * tmp4 on lane size 4 + TMPQ3[0,32] = TMPQ2[0,32] * tmp4; + TMPQ3[32,32] = TMPQ2[32,32] * tmp4; + TMPQ3[64,32] = TMPQ2[64,32] * tmp4; + TMPQ3[96,32] = TMPQ2[96,32] * tmp4; + # simd infix Rd_VPR128.4S = Rd_VPR128.4S - TMPQ3 on lane size 4 + Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] - TMPQ3[0,32]; + Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] - TMPQ3[32,32]; + Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] - TMPQ3[64,32]; + Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] - TMPQ3[96,32]; + zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.369 UMLSL, UMLSL2 (vector) page C7-2233 line 125511 MATCH x2e20a000/mask=xbf20fc00 @@ -47932,67 +25772,21 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xf & advSIMD3.size=1 & Re_VPR128Lo.H.vIndexHLM :umlsl2 Rd_VPR128.2D, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1215=0xa & b_1011=0 & Rn_VPR128.4S & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rn_VPR128.4S, 1, 8, 16); - TMPD1 = * [register]:8 tmp1; + TMPD1 = Rn_VPR128.4S[64,64]; # simd resize TMPQ2 = zext(TMPD1) (lane size 4 to 8) - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, TMPD1, 0, 4, 8); - simd_address_at(tmp5, TMPQ2, 0, 8, 16); - * [register]:8 tmp5 = zext(* [register]:4 tmp4); - simd_address_at(tmp4, TMPD1, 1, 4, 8); - simd_address_at(tmp5, TMPQ2, 1, 8, 16); - * [register]:8 tmp5 = zext(* [register]:4 tmp4); - local tmp6:4 = 0; - simd_address_at(tmp6, Rm_VPR128.4S, 1, 8, 16); - TMPD3 = * [register]:8 tmp6; + TMPQ2[0,64] = zext(TMPD1[0,32]); + TMPQ2[64,64] = zext(TMPD1[32,32]); + TMPD3 = Rm_VPR128.4S[64,64]; # simd resize TMPQ4 = zext(TMPD3) (lane size 4 to 8) - local tmp9:4 = 0; - local tmp10:4 = 0; - simd_address_at(tmp9, TMPD3, 0, 4, 8); - simd_address_at(tmp10, TMPQ4, 0, 8, 16); - * [register]:8 tmp10 = zext(* [register]:4 tmp9); - simd_address_at(tmp9, TMPD3, 1, 4, 8); - simd_address_at(tmp10, TMPQ4, 1, 8, 16); - * [register]:8 tmp10 = zext(* [register]:4 tmp9); + TMPQ4[0,64] = zext(TMPD3[0,32]); + TMPQ4[64,64] = zext(TMPD3[32,32]); # simd infix TMPQ5 = TMPQ2 * TMPQ4 on lane size 8 - local tmp12:4 = 0; - local tmp13:4 = 0; - local tmp14:4 = 0; - simd_address_at(tmp12, TMPQ2, 0, 8, 16); - simd_address_at(tmp13, TMPQ4, 0, 8, 16); - simd_address_at(tmp14, TMPQ5, 0, 8, 16); - * [register]:8 tmp14 = (* [register]:8 tmp12) * (* [register]:8 tmp13); - simd_address_at(tmp12, TMPQ2, 1, 8, 16); - simd_address_at(tmp13, TMPQ4, 1, 8, 16); - simd_address_at(tmp14, TMPQ5, 1, 8, 16); - * [register]:8 tmp14 = (* [register]:8 tmp12) * (* [register]:8 tmp13); + TMPQ5[0,64] = TMPQ2[0,64] * TMPQ4[0,64]; + TMPQ5[64,64] = TMPQ2[64,64] * TMPQ4[64,64]; # simd infix Rd_VPR128.2D = Rd_VPR128.2D - TMPQ5 on lane size 8 - local tmp15:4 = 0; - local tmp16:4 = 0; - local tmp17:4 = 0; - simd_address_at(tmp15, Rd_VPR128.2D, 0, 8, 16); - simd_address_at(tmp16, TMPQ5, 0, 8, 16); - simd_address_at(tmp17, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp17 = (* [register]:8 tmp15) - (* [register]:8 tmp16); - simd_address_at(tmp15, Rd_VPR128.2D, 1, 8, 16); - simd_address_at(tmp16, TMPQ5, 1, 8, 16); - simd_address_at(tmp17, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp17 = (* [register]:8 tmp15) - (* [register]:8 tmp16); + Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] - TMPQ5[0,64]; + Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] - TMPQ5[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Rn_VPR128.4S, 1:1); - local tmp2:16 = SIMD_INT_ZEXT(tmp1, 4:1); - local tmp3:8 = SIMD_PIECE(Rm_VPR128.4S, 1:1); - local tmp4:16 = SIMD_INT_ZEXT(tmp3, 4:1); - local tmp5:16 = SIMD_INT_MULT(tmp2, tmp4, 8:1); - local tmpd:16 = SIMD_INT_SUB(Rd_VPR128.2D, tmp5, 8:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.2D = NEON_umlsl2(Rd_VPR128.2D, Rn_VPR128.4S, Rm_VPR128.4S, 4:1); -@endif } # C7.2.369 UMLSL, UMLSL2 (vector) page C7-2233 line 125511 MATCH x2e20a000/mask=xbf20fc00 @@ -48004,95 +25798,29 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S :umlsl2 Rd_VPR128.4S, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1215=0xa & b_1011=0 & Rn_VPR128.8H & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rn_VPR128.8H, 1, 8, 16); - TMPD1 = * [register]:8 tmp1; + TMPD1 = Rn_VPR128.8H[64,64]; # simd resize TMPQ2 = zext(TMPD1) (lane size 2 to 4) - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, TMPD1, 0, 2, 8); - simd_address_at(tmp5, TMPQ2, 0, 4, 16); - * [register]:4 tmp5 = zext(* [register]:2 tmp4); - simd_address_at(tmp4, TMPD1, 1, 2, 8); - simd_address_at(tmp5, TMPQ2, 1, 4, 16); - * [register]:4 tmp5 = zext(* [register]:2 tmp4); - simd_address_at(tmp4, TMPD1, 2, 2, 8); - simd_address_at(tmp5, TMPQ2, 2, 4, 16); - * [register]:4 tmp5 = zext(* [register]:2 tmp4); - simd_address_at(tmp4, TMPD1, 3, 2, 8); - simd_address_at(tmp5, TMPQ2, 3, 4, 16); - * [register]:4 tmp5 = zext(* [register]:2 tmp4); - local tmp6:4 = 0; - simd_address_at(tmp6, Rm_VPR128.8H, 1, 8, 16); - TMPD3 = * [register]:8 tmp6; + TMPQ2[0,32] = zext(TMPD1[0,16]); + TMPQ2[32,32] = zext(TMPD1[16,16]); + TMPQ2[64,32] = zext(TMPD1[32,16]); + TMPQ2[96,32] = zext(TMPD1[48,16]); + TMPD3 = Rm_VPR128.8H[64,64]; # simd resize TMPQ4 = zext(TMPD3) (lane size 2 to 4) - local tmp9:4 = 0; - local tmp10:4 = 0; - simd_address_at(tmp9, TMPD3, 0, 2, 8); - simd_address_at(tmp10, TMPQ4, 0, 4, 16); - * [register]:4 tmp10 = zext(* [register]:2 tmp9); - simd_address_at(tmp9, TMPD3, 1, 2, 8); - simd_address_at(tmp10, TMPQ4, 1, 4, 16); - * [register]:4 tmp10 = zext(* [register]:2 tmp9); - simd_address_at(tmp9, TMPD3, 2, 2, 8); - simd_address_at(tmp10, TMPQ4, 2, 4, 16); - * [register]:4 tmp10 = zext(* [register]:2 tmp9); - simd_address_at(tmp9, TMPD3, 3, 2, 8); - simd_address_at(tmp10, TMPQ4, 3, 4, 16); - * [register]:4 tmp10 = zext(* [register]:2 tmp9); + TMPQ4[0,32] = zext(TMPD3[0,16]); + TMPQ4[32,32] = zext(TMPD3[16,16]); + TMPQ4[64,32] = zext(TMPD3[32,16]); + TMPQ4[96,32] = zext(TMPD3[48,16]); # simd infix TMPQ5 = TMPQ2 * TMPQ4 on lane size 4 - local tmp12:4 = 0; - local tmp13:4 = 0; - local tmp14:4 = 0; - simd_address_at(tmp12, TMPQ2, 0, 4, 16); - simd_address_at(tmp13, TMPQ4, 0, 4, 16); - simd_address_at(tmp14, TMPQ5, 0, 4, 16); - * [register]:4 tmp14 = (* [register]:4 tmp12) * (* [register]:4 tmp13); - simd_address_at(tmp12, TMPQ2, 1, 4, 16); - simd_address_at(tmp13, TMPQ4, 1, 4, 16); - simd_address_at(tmp14, TMPQ5, 1, 4, 16); - * [register]:4 tmp14 = (* [register]:4 tmp12) * (* [register]:4 tmp13); - simd_address_at(tmp12, TMPQ2, 2, 4, 16); - simd_address_at(tmp13, TMPQ4, 2, 4, 16); - simd_address_at(tmp14, TMPQ5, 2, 4, 16); - * [register]:4 tmp14 = (* [register]:4 tmp12) * (* [register]:4 tmp13); - simd_address_at(tmp12, TMPQ2, 3, 4, 16); - simd_address_at(tmp13, TMPQ4, 3, 4, 16); - simd_address_at(tmp14, TMPQ5, 3, 4, 16); - * [register]:4 tmp14 = (* [register]:4 tmp12) * (* [register]:4 tmp13); + TMPQ5[0,32] = TMPQ2[0,32] * TMPQ4[0,32]; + TMPQ5[32,32] = TMPQ2[32,32] * TMPQ4[32,32]; + TMPQ5[64,32] = TMPQ2[64,32] * TMPQ4[64,32]; + TMPQ5[96,32] = TMPQ2[96,32] * TMPQ4[96,32]; # simd infix Rd_VPR128.4S = Rd_VPR128.4S - TMPQ5 on lane size 4 - local tmp15:4 = 0; - local tmp16:4 = 0; - local tmp17:4 = 0; - simd_address_at(tmp15, Rd_VPR128.4S, 0, 4, 16); - simd_address_at(tmp16, TMPQ5, 0, 4, 16); - simd_address_at(tmp17, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp17 = (* [register]:4 tmp15) - (* [register]:4 tmp16); - simd_address_at(tmp15, Rd_VPR128.4S, 1, 4, 16); - simd_address_at(tmp16, TMPQ5, 1, 4, 16); - simd_address_at(tmp17, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp17 = (* [register]:4 tmp15) - (* [register]:4 tmp16); - simd_address_at(tmp15, Rd_VPR128.4S, 2, 4, 16); - simd_address_at(tmp16, TMPQ5, 2, 4, 16); - simd_address_at(tmp17, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp17 = (* [register]:4 tmp15) - (* [register]:4 tmp16); - simd_address_at(tmp15, Rd_VPR128.4S, 3, 4, 16); - simd_address_at(tmp16, TMPQ5, 3, 4, 16); - simd_address_at(tmp17, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp17 = (* [register]:4 tmp15) - (* [register]:4 tmp16); + Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] - TMPQ5[0,32]; + Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] - TMPQ5[32,32]; + Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] - TMPQ5[64,32]; + Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] - TMPQ5[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Rn_VPR128.8H, 1:1); - local tmp2:16 = SIMD_INT_ZEXT(tmp1, 2:1); - local tmp3:8 = SIMD_PIECE(Rm_VPR128.8H, 1:1); - local tmp4:16 = SIMD_INT_ZEXT(tmp3, 2:1); - local tmp5:16 = SIMD_INT_MULT(tmp2, tmp4, 4:1); - local tmpd:16 = SIMD_INT_SUB(Rd_VPR128.4S, tmp5, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_umlsl2(Rd_VPR128.4S, Rn_VPR128.8H, Rm_VPR128.8H, 2:1); -@endif } # C7.2.369 UMLSL, UMLSL2 (vector) page C7-2233 line 125511 MATCH x2e20a000/mask=xbf20fc00 @@ -48104,151 +25832,45 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H :umlsl2 Rd_VPR128.8H, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1215=0xa & b_1011=0 & Rn_VPR128.16B & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rn_VPR128.16B, 1, 8, 16); - TMPD1 = * [register]:8 tmp1; + TMPD1 = Rn_VPR128.16B[64,64]; # simd resize TMPQ2 = zext(TMPD1) (lane size 1 to 2) - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, TMPD1, 0, 1, 8); - simd_address_at(tmp5, TMPQ2, 0, 2, 16); - * [register]:2 tmp5 = zext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 1, 1, 8); - simd_address_at(tmp5, TMPQ2, 1, 2, 16); - * [register]:2 tmp5 = zext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 2, 1, 8); - simd_address_at(tmp5, TMPQ2, 2, 2, 16); - * [register]:2 tmp5 = zext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 3, 1, 8); - simd_address_at(tmp5, TMPQ2, 3, 2, 16); - * [register]:2 tmp5 = zext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 4, 1, 8); - simd_address_at(tmp5, TMPQ2, 4, 2, 16); - * [register]:2 tmp5 = zext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 5, 1, 8); - simd_address_at(tmp5, TMPQ2, 5, 2, 16); - * [register]:2 tmp5 = zext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 6, 1, 8); - simd_address_at(tmp5, TMPQ2, 6, 2, 16); - * [register]:2 tmp5 = zext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 7, 1, 8); - simd_address_at(tmp5, TMPQ2, 7, 2, 16); - * [register]:2 tmp5 = zext(* [register]:1 tmp4); - local tmp6:4 = 0; - simd_address_at(tmp6, Rm_VPR128.16B, 1, 8, 16); - TMPD3 = * [register]:8 tmp6; + TMPQ2[0,16] = zext(TMPD1[0,8]); + TMPQ2[16,16] = zext(TMPD1[8,8]); + TMPQ2[32,16] = zext(TMPD1[16,8]); + TMPQ2[48,16] = zext(TMPD1[24,8]); + TMPQ2[64,16] = zext(TMPD1[32,8]); + TMPQ2[80,16] = zext(TMPD1[40,8]); + TMPQ2[96,16] = zext(TMPD1[48,8]); + TMPQ2[112,16] = zext(TMPD1[56,8]); + TMPD3 = Rm_VPR128.16B[64,64]; # simd resize TMPQ4 = zext(TMPD3) (lane size 1 to 2) - local tmp9:4 = 0; - local tmp10:4 = 0; - simd_address_at(tmp9, TMPD3, 0, 1, 8); - simd_address_at(tmp10, TMPQ4, 0, 2, 16); - * [register]:2 tmp10 = zext(* [register]:1 tmp9); - simd_address_at(tmp9, TMPD3, 1, 1, 8); - simd_address_at(tmp10, TMPQ4, 1, 2, 16); - * [register]:2 tmp10 = zext(* [register]:1 tmp9); - simd_address_at(tmp9, TMPD3, 2, 1, 8); - simd_address_at(tmp10, TMPQ4, 2, 2, 16); - * [register]:2 tmp10 = zext(* [register]:1 tmp9); - simd_address_at(tmp9, TMPD3, 3, 1, 8); - simd_address_at(tmp10, TMPQ4, 3, 2, 16); - * [register]:2 tmp10 = zext(* [register]:1 tmp9); - simd_address_at(tmp9, TMPD3, 4, 1, 8); - simd_address_at(tmp10, TMPQ4, 4, 2, 16); - * [register]:2 tmp10 = zext(* [register]:1 tmp9); - simd_address_at(tmp9, TMPD3, 5, 1, 8); - simd_address_at(tmp10, TMPQ4, 5, 2, 16); - * [register]:2 tmp10 = zext(* [register]:1 tmp9); - simd_address_at(tmp9, TMPD3, 6, 1, 8); - simd_address_at(tmp10, TMPQ4, 6, 2, 16); - * [register]:2 tmp10 = zext(* [register]:1 tmp9); - simd_address_at(tmp9, TMPD3, 7, 1, 8); - simd_address_at(tmp10, TMPQ4, 7, 2, 16); - * [register]:2 tmp10 = zext(* [register]:1 tmp9); + TMPQ4[0,16] = zext(TMPD3[0,8]); + TMPQ4[16,16] = zext(TMPD3[8,8]); + TMPQ4[32,16] = zext(TMPD3[16,8]); + TMPQ4[48,16] = zext(TMPD3[24,8]); + TMPQ4[64,16] = zext(TMPD3[32,8]); + TMPQ4[80,16] = zext(TMPD3[40,8]); + TMPQ4[96,16] = zext(TMPD3[48,8]); + TMPQ4[112,16] = zext(TMPD3[56,8]); # simd infix TMPQ5 = TMPQ2 * TMPQ4 on lane size 2 - local tmp12:4 = 0; - local tmp13:4 = 0; - local tmp14:4 = 0; - simd_address_at(tmp12, TMPQ2, 0, 2, 16); - simd_address_at(tmp13, TMPQ4, 0, 2, 16); - simd_address_at(tmp14, TMPQ5, 0, 2, 16); - * [register]:2 tmp14 = (* [register]:2 tmp12) * (* [register]:2 tmp13); - simd_address_at(tmp12, TMPQ2, 1, 2, 16); - simd_address_at(tmp13, TMPQ4, 1, 2, 16); - simd_address_at(tmp14, TMPQ5, 1, 2, 16); - * [register]:2 tmp14 = (* [register]:2 tmp12) * (* [register]:2 tmp13); - simd_address_at(tmp12, TMPQ2, 2, 2, 16); - simd_address_at(tmp13, TMPQ4, 2, 2, 16); - simd_address_at(tmp14, TMPQ5, 2, 2, 16); - * [register]:2 tmp14 = (* [register]:2 tmp12) * (* [register]:2 tmp13); - simd_address_at(tmp12, TMPQ2, 3, 2, 16); - simd_address_at(tmp13, TMPQ4, 3, 2, 16); - simd_address_at(tmp14, TMPQ5, 3, 2, 16); - * [register]:2 tmp14 = (* [register]:2 tmp12) * (* [register]:2 tmp13); - simd_address_at(tmp12, TMPQ2, 4, 2, 16); - simd_address_at(tmp13, TMPQ4, 4, 2, 16); - simd_address_at(tmp14, TMPQ5, 4, 2, 16); - * [register]:2 tmp14 = (* [register]:2 tmp12) * (* [register]:2 tmp13); - simd_address_at(tmp12, TMPQ2, 5, 2, 16); - simd_address_at(tmp13, TMPQ4, 5, 2, 16); - simd_address_at(tmp14, TMPQ5, 5, 2, 16); - * [register]:2 tmp14 = (* [register]:2 tmp12) * (* [register]:2 tmp13); - simd_address_at(tmp12, TMPQ2, 6, 2, 16); - simd_address_at(tmp13, TMPQ4, 6, 2, 16); - simd_address_at(tmp14, TMPQ5, 6, 2, 16); - * [register]:2 tmp14 = (* [register]:2 tmp12) * (* [register]:2 tmp13); - simd_address_at(tmp12, TMPQ2, 7, 2, 16); - simd_address_at(tmp13, TMPQ4, 7, 2, 16); - simd_address_at(tmp14, TMPQ5, 7, 2, 16); - * [register]:2 tmp14 = (* [register]:2 tmp12) * (* [register]:2 tmp13); + TMPQ5[0,16] = TMPQ2[0,16] * TMPQ4[0,16]; + TMPQ5[16,16] = TMPQ2[16,16] * TMPQ4[16,16]; + TMPQ5[32,16] = TMPQ2[32,16] * TMPQ4[32,16]; + TMPQ5[48,16] = TMPQ2[48,16] * TMPQ4[48,16]; + TMPQ5[64,16] = TMPQ2[64,16] * TMPQ4[64,16]; + TMPQ5[80,16] = TMPQ2[80,16] * TMPQ4[80,16]; + TMPQ5[96,16] = TMPQ2[96,16] * TMPQ4[96,16]; + TMPQ5[112,16] = TMPQ2[112,16] * TMPQ4[112,16]; # simd infix Rd_VPR128.8H = Rd_VPR128.8H - TMPQ5 on lane size 2 - local tmp15:4 = 0; - local tmp16:4 = 0; - local tmp17:4 = 0; - simd_address_at(tmp15, Rd_VPR128.8H, 0, 2, 16); - simd_address_at(tmp16, TMPQ5, 0, 2, 16); - simd_address_at(tmp17, Rd_VPR128.8H, 0, 2, 16); - * [register]:2 tmp17 = (* [register]:2 tmp15) - (* [register]:2 tmp16); - simd_address_at(tmp15, Rd_VPR128.8H, 1, 2, 16); - simd_address_at(tmp16, TMPQ5, 1, 2, 16); - simd_address_at(tmp17, Rd_VPR128.8H, 1, 2, 16); - * [register]:2 tmp17 = (* [register]:2 tmp15) - (* [register]:2 tmp16); - simd_address_at(tmp15, Rd_VPR128.8H, 2, 2, 16); - simd_address_at(tmp16, TMPQ5, 2, 2, 16); - simd_address_at(tmp17, Rd_VPR128.8H, 2, 2, 16); - * [register]:2 tmp17 = (* [register]:2 tmp15) - (* [register]:2 tmp16); - simd_address_at(tmp15, Rd_VPR128.8H, 3, 2, 16); - simd_address_at(tmp16, TMPQ5, 3, 2, 16); - simd_address_at(tmp17, Rd_VPR128.8H, 3, 2, 16); - * [register]:2 tmp17 = (* [register]:2 tmp15) - (* [register]:2 tmp16); - simd_address_at(tmp15, Rd_VPR128.8H, 4, 2, 16); - simd_address_at(tmp16, TMPQ5, 4, 2, 16); - simd_address_at(tmp17, Rd_VPR128.8H, 4, 2, 16); - * [register]:2 tmp17 = (* [register]:2 tmp15) - (* [register]:2 tmp16); - simd_address_at(tmp15, Rd_VPR128.8H, 5, 2, 16); - simd_address_at(tmp16, TMPQ5, 5, 2, 16); - simd_address_at(tmp17, Rd_VPR128.8H, 5, 2, 16); - * [register]:2 tmp17 = (* [register]:2 tmp15) - (* [register]:2 tmp16); - simd_address_at(tmp15, Rd_VPR128.8H, 6, 2, 16); - simd_address_at(tmp16, TMPQ5, 6, 2, 16); - simd_address_at(tmp17, Rd_VPR128.8H, 6, 2, 16); - * [register]:2 tmp17 = (* [register]:2 tmp15) - (* [register]:2 tmp16); - simd_address_at(tmp15, Rd_VPR128.8H, 7, 2, 16); - simd_address_at(tmp16, TMPQ5, 7, 2, 16); - simd_address_at(tmp17, Rd_VPR128.8H, 7, 2, 16); - * [register]:2 tmp17 = (* [register]:2 tmp15) - (* [register]:2 tmp16); + Rd_VPR128.8H[0,16] = Rd_VPR128.8H[0,16] - TMPQ5[0,16]; + Rd_VPR128.8H[16,16] = Rd_VPR128.8H[16,16] - TMPQ5[16,16]; + Rd_VPR128.8H[32,16] = Rd_VPR128.8H[32,16] - TMPQ5[32,16]; + Rd_VPR128.8H[48,16] = Rd_VPR128.8H[48,16] - TMPQ5[48,16]; + Rd_VPR128.8H[64,16] = Rd_VPR128.8H[64,16] - TMPQ5[64,16]; + Rd_VPR128.8H[80,16] = Rd_VPR128.8H[80,16] - TMPQ5[80,16]; + Rd_VPR128.8H[96,16] = Rd_VPR128.8H[96,16] - TMPQ5[96,16]; + Rd_VPR128.8H[112,16] = Rd_VPR128.8H[112,16] - TMPQ5[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Rn_VPR128.16B, 1:1); - local tmp2:16 = SIMD_INT_ZEXT(tmp1, 1:1); - local tmp3:8 = SIMD_PIECE(Rm_VPR128.16B, 1:1); - local tmp4:16 = SIMD_INT_ZEXT(tmp3, 1:1); - local tmp5:16 = SIMD_INT_MULT(tmp2, tmp4, 2:1); - local tmpd:16 = SIMD_INT_SUB(Rd_VPR128.8H, tmp5, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.8H -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.8H = NEON_umlsl2(Rd_VPR128.8H, Rn_VPR128.16B, Rm_VPR128.16B, 1:1); -@endif } # C7.2.369 UMLSL, UMLSL2 (vector) page C7-2233 line 125511 MATCH x2e20a000/mask=xbf20fc00 @@ -48260,59 +25882,19 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16 :umlsl Rd_VPR128.2D, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1215=0xa & b_1011=0 & Rn_VPR64.2S & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_primitive) # simd resize TMPQ1 = zext(Rn_VPR64.2S) (lane size 4 to 8) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR64.2S, 0, 4, 8); - simd_address_at(tmp3, TMPQ1, 0, 8, 16); - * [register]:8 tmp3 = zext(* [register]:4 tmp2); - simd_address_at(tmp2, Rn_VPR64.2S, 1, 4, 8); - simd_address_at(tmp3, TMPQ1, 1, 8, 16); - * [register]:8 tmp3 = zext(* [register]:4 tmp2); + TMPQ1[0,64] = zext(Rn_VPR64.2S[0,32]); + TMPQ1[64,64] = zext(Rn_VPR64.2S[32,32]); # simd resize TMPQ2 = zext(Rm_VPR64.2S) (lane size 4 to 8) - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, Rm_VPR64.2S, 0, 4, 8); - simd_address_at(tmp6, TMPQ2, 0, 8, 16); - * [register]:8 tmp6 = zext(* [register]:4 tmp5); - simd_address_at(tmp5, Rm_VPR64.2S, 1, 4, 8); - simd_address_at(tmp6, TMPQ2, 1, 8, 16); - * [register]:8 tmp6 = zext(* [register]:4 tmp5); + TMPQ2[0,64] = zext(Rm_VPR64.2S[0,32]); + TMPQ2[64,64] = zext(Rm_VPR64.2S[32,32]); # simd infix TMPQ3 = TMPQ1 * TMPQ2 on lane size 8 - local tmp8:4 = 0; - local tmp9:4 = 0; - local tmp10:4 = 0; - simd_address_at(tmp8, TMPQ1, 0, 8, 16); - simd_address_at(tmp9, TMPQ2, 0, 8, 16); - simd_address_at(tmp10, TMPQ3, 0, 8, 16); - * [register]:8 tmp10 = (* [register]:8 tmp8) * (* [register]:8 tmp9); - simd_address_at(tmp8, TMPQ1, 1, 8, 16); - simd_address_at(tmp9, TMPQ2, 1, 8, 16); - simd_address_at(tmp10, TMPQ3, 1, 8, 16); - * [register]:8 tmp10 = (* [register]:8 tmp8) * (* [register]:8 tmp9); + TMPQ3[0,64] = TMPQ1[0,64] * TMPQ2[0,64]; + TMPQ3[64,64] = TMPQ1[64,64] * TMPQ2[64,64]; # simd infix Rd_VPR128.2D = Rd_VPR128.2D - TMPQ3 on lane size 8 - local tmp11:4 = 0; - local tmp12:4 = 0; - local tmp13:4 = 0; - simd_address_at(tmp11, Rd_VPR128.2D, 0, 8, 16); - simd_address_at(tmp12, TMPQ3, 0, 8, 16); - simd_address_at(tmp13, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp13 = (* [register]:8 tmp11) - (* [register]:8 tmp12); - simd_address_at(tmp11, Rd_VPR128.2D, 1, 8, 16); - simd_address_at(tmp12, TMPQ3, 1, 8, 16); - simd_address_at(tmp13, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp13 = (* [register]:8 tmp11) - (* [register]:8 tmp12); + Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] - TMPQ3[0,64]; + Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] - TMPQ3[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_ZEXT(Rn_VPR64.2S, 4:1); - local tmp2:16 = SIMD_INT_ZEXT(Rm_VPR64.2S, 4:1); - local tmp3:16 = SIMD_INT_MULT(tmp1, tmp2, 8:1); - local tmpd:16 = SIMD_INT_SUB(Rd_VPR128.2D, tmp3, 8:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.2D = NEON_umlsl(Rd_VPR128.2D, Rn_VPR64.2S, Rm_VPR64.2S, 4:1); -@endif } # C7.2.369 UMLSL, UMLSL2 (vector) page C7-2233 line 125511 MATCH x2e20a000/mask=xbf20fc00 @@ -48324,87 +25906,27 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S :umlsl Rd_VPR128.4S, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1215=0xa & b_1011=0 & Rn_VPR64.4H & Rd_VPR128.4S & Rd_VPR128 & Zd { -@if defined(SEMANTIC_primitive) # simd resize TMPQ1 = zext(Rn_VPR64.4H) (lane size 2 to 4) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR64.4H, 0, 2, 8); - simd_address_at(tmp3, TMPQ1, 0, 4, 16); - * [register]:4 tmp3 = zext(* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR64.4H, 1, 2, 8); - simd_address_at(tmp3, TMPQ1, 1, 4, 16); - * [register]:4 tmp3 = zext(* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR64.4H, 2, 2, 8); - simd_address_at(tmp3, TMPQ1, 2, 4, 16); - * [register]:4 tmp3 = zext(* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR64.4H, 3, 2, 8); - simd_address_at(tmp3, TMPQ1, 3, 4, 16); - * [register]:4 tmp3 = zext(* [register]:2 tmp2); + TMPQ1[0,32] = zext(Rn_VPR64.4H[0,16]); + TMPQ1[32,32] = zext(Rn_VPR64.4H[16,16]); + TMPQ1[64,32] = zext(Rn_VPR64.4H[32,16]); + TMPQ1[96,32] = zext(Rn_VPR64.4H[48,16]); # simd resize TMPQ2 = zext(Rm_VPR64.4H) (lane size 2 to 4) - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, Rm_VPR64.4H, 0, 2, 8); - simd_address_at(tmp6, TMPQ2, 0, 4, 16); - * [register]:4 tmp6 = zext(* [register]:2 tmp5); - simd_address_at(tmp5, Rm_VPR64.4H, 1, 2, 8); - simd_address_at(tmp6, TMPQ2, 1, 4, 16); - * [register]:4 tmp6 = zext(* [register]:2 tmp5); - simd_address_at(tmp5, Rm_VPR64.4H, 2, 2, 8); - simd_address_at(tmp6, TMPQ2, 2, 4, 16); - * [register]:4 tmp6 = zext(* [register]:2 tmp5); - simd_address_at(tmp5, Rm_VPR64.4H, 3, 2, 8); - simd_address_at(tmp6, TMPQ2, 3, 4, 16); - * [register]:4 tmp6 = zext(* [register]:2 tmp5); + TMPQ2[0,32] = zext(Rm_VPR64.4H[0,16]); + TMPQ2[32,32] = zext(Rm_VPR64.4H[16,16]); + TMPQ2[64,32] = zext(Rm_VPR64.4H[32,16]); + TMPQ2[96,32] = zext(Rm_VPR64.4H[48,16]); # simd infix TMPQ3 = TMPQ1 * TMPQ2 on lane size 4 - local tmp8:4 = 0; - local tmp9:4 = 0; - local tmp10:4 = 0; - simd_address_at(tmp8, TMPQ1, 0, 4, 16); - simd_address_at(tmp9, TMPQ2, 0, 4, 16); - simd_address_at(tmp10, TMPQ3, 0, 4, 16); - * [register]:4 tmp10 = (* [register]:4 tmp8) * (* [register]:4 tmp9); - simd_address_at(tmp8, TMPQ1, 1, 4, 16); - simd_address_at(tmp9, TMPQ2, 1, 4, 16); - simd_address_at(tmp10, TMPQ3, 1, 4, 16); - * [register]:4 tmp10 = (* [register]:4 tmp8) * (* [register]:4 tmp9); - simd_address_at(tmp8, TMPQ1, 2, 4, 16); - simd_address_at(tmp9, TMPQ2, 2, 4, 16); - simd_address_at(tmp10, TMPQ3, 2, 4, 16); - * [register]:4 tmp10 = (* [register]:4 tmp8) * (* [register]:4 tmp9); - simd_address_at(tmp8, TMPQ1, 3, 4, 16); - simd_address_at(tmp9, TMPQ2, 3, 4, 16); - simd_address_at(tmp10, TMPQ3, 3, 4, 16); - * [register]:4 tmp10 = (* [register]:4 tmp8) * (* [register]:4 tmp9); + TMPQ3[0,32] = TMPQ1[0,32] * TMPQ2[0,32]; + TMPQ3[32,32] = TMPQ1[32,32] * TMPQ2[32,32]; + TMPQ3[64,32] = TMPQ1[64,32] * TMPQ2[64,32]; + TMPQ3[96,32] = TMPQ1[96,32] * TMPQ2[96,32]; # simd infix Rd_VPR128.4S = Rd_VPR128.4S - TMPQ3 on lane size 4 - local tmp11:4 = 0; - local tmp12:4 = 0; - local tmp13:4 = 0; - simd_address_at(tmp11, Rd_VPR128.4S, 0, 4, 16); - simd_address_at(tmp12, TMPQ3, 0, 4, 16); - simd_address_at(tmp13, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp13 = (* [register]:4 tmp11) - (* [register]:4 tmp12); - simd_address_at(tmp11, Rd_VPR128.4S, 1, 4, 16); - simd_address_at(tmp12, TMPQ3, 1, 4, 16); - simd_address_at(tmp13, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp13 = (* [register]:4 tmp11) - (* [register]:4 tmp12); - simd_address_at(tmp11, Rd_VPR128.4S, 2, 4, 16); - simd_address_at(tmp12, TMPQ3, 2, 4, 16); - simd_address_at(tmp13, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp13 = (* [register]:4 tmp11) - (* [register]:4 tmp12); - simd_address_at(tmp11, Rd_VPR128.4S, 3, 4, 16); - simd_address_at(tmp12, TMPQ3, 3, 4, 16); - simd_address_at(tmp13, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp13 = (* [register]:4 tmp11) - (* [register]:4 tmp12); + Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] - TMPQ3[0,32]; + Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] - TMPQ3[32,32]; + Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] - TMPQ3[64,32]; + Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] - TMPQ3[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_ZEXT(Rn_VPR64.4H, 2:1); - local tmp2:16 = SIMD_INT_ZEXT(Rm_VPR64.4H, 2:1); - local tmp3:16 = SIMD_INT_MULT(tmp1, tmp2, 4:1); - local tmpd:16 = SIMD_INT_SUB(Rd_VPR128.4S, tmp3, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_umlsl(Rd_VPR128.4S, Rn_VPR64.4H, Rm_VPR64.4H, 2:1); -@endif } # C7.2.369 UMLSL, UMLSL2 (vector) page C7-2233 line 125511 MATCH x2e20a000/mask=xbf20fc00 @@ -48416,143 +25938,43 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H :umlsl Rd_VPR128.8H, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1215=0xa & b_1011=0 & Rn_VPR64.8B & Rd_VPR128.8H & Rd_VPR128 & Zd { -@if defined(SEMANTIC_primitive) # simd resize TMPQ1 = zext(Rn_VPR64.8B) (lane size 1 to 2) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR64.8B, 0, 1, 8); - simd_address_at(tmp3, TMPQ1, 0, 2, 16); - * [register]:2 tmp3 = zext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 1, 1, 8); - simd_address_at(tmp3, TMPQ1, 1, 2, 16); - * [register]:2 tmp3 = zext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 2, 1, 8); - simd_address_at(tmp3, TMPQ1, 2, 2, 16); - * [register]:2 tmp3 = zext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 3, 1, 8); - simd_address_at(tmp3, TMPQ1, 3, 2, 16); - * [register]:2 tmp3 = zext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 4, 1, 8); - simd_address_at(tmp3, TMPQ1, 4, 2, 16); - * [register]:2 tmp3 = zext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 5, 1, 8); - simd_address_at(tmp3, TMPQ1, 5, 2, 16); - * [register]:2 tmp3 = zext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 6, 1, 8); - simd_address_at(tmp3, TMPQ1, 6, 2, 16); - * [register]:2 tmp3 = zext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 7, 1, 8); - simd_address_at(tmp3, TMPQ1, 7, 2, 16); - * [register]:2 tmp3 = zext(* [register]:1 tmp2); + TMPQ1[0,16] = zext(Rn_VPR64.8B[0,8]); + TMPQ1[16,16] = zext(Rn_VPR64.8B[8,8]); + TMPQ1[32,16] = zext(Rn_VPR64.8B[16,8]); + TMPQ1[48,16] = zext(Rn_VPR64.8B[24,8]); + TMPQ1[64,16] = zext(Rn_VPR64.8B[32,8]); + TMPQ1[80,16] = zext(Rn_VPR64.8B[40,8]); + TMPQ1[96,16] = zext(Rn_VPR64.8B[48,8]); + TMPQ1[112,16] = zext(Rn_VPR64.8B[56,8]); # simd resize TMPQ2 = zext(Rm_VPR64.8B) (lane size 1 to 2) - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, Rm_VPR64.8B, 0, 1, 8); - simd_address_at(tmp6, TMPQ2, 0, 2, 16); - * [register]:2 tmp6 = zext(* [register]:1 tmp5); - simd_address_at(tmp5, Rm_VPR64.8B, 1, 1, 8); - simd_address_at(tmp6, TMPQ2, 1, 2, 16); - * [register]:2 tmp6 = zext(* [register]:1 tmp5); - simd_address_at(tmp5, Rm_VPR64.8B, 2, 1, 8); - simd_address_at(tmp6, TMPQ2, 2, 2, 16); - * [register]:2 tmp6 = zext(* [register]:1 tmp5); - simd_address_at(tmp5, Rm_VPR64.8B, 3, 1, 8); - simd_address_at(tmp6, TMPQ2, 3, 2, 16); - * [register]:2 tmp6 = zext(* [register]:1 tmp5); - simd_address_at(tmp5, Rm_VPR64.8B, 4, 1, 8); - simd_address_at(tmp6, TMPQ2, 4, 2, 16); - * [register]:2 tmp6 = zext(* [register]:1 tmp5); - simd_address_at(tmp5, Rm_VPR64.8B, 5, 1, 8); - simd_address_at(tmp6, TMPQ2, 5, 2, 16); - * [register]:2 tmp6 = zext(* [register]:1 tmp5); - simd_address_at(tmp5, Rm_VPR64.8B, 6, 1, 8); - simd_address_at(tmp6, TMPQ2, 6, 2, 16); - * [register]:2 tmp6 = zext(* [register]:1 tmp5); - simd_address_at(tmp5, Rm_VPR64.8B, 7, 1, 8); - simd_address_at(tmp6, TMPQ2, 7, 2, 16); - * [register]:2 tmp6 = zext(* [register]:1 tmp5); + TMPQ2[0,16] = zext(Rm_VPR64.8B[0,8]); + TMPQ2[16,16] = zext(Rm_VPR64.8B[8,8]); + TMPQ2[32,16] = zext(Rm_VPR64.8B[16,8]); + TMPQ2[48,16] = zext(Rm_VPR64.8B[24,8]); + TMPQ2[64,16] = zext(Rm_VPR64.8B[32,8]); + TMPQ2[80,16] = zext(Rm_VPR64.8B[40,8]); + TMPQ2[96,16] = zext(Rm_VPR64.8B[48,8]); + TMPQ2[112,16] = zext(Rm_VPR64.8B[56,8]); # simd infix TMPQ3 = TMPQ1 * TMPQ2 on lane size 2 - local tmp8:4 = 0; - local tmp9:4 = 0; - local tmp10:4 = 0; - simd_address_at(tmp8, TMPQ1, 0, 2, 16); - simd_address_at(tmp9, TMPQ2, 0, 2, 16); - simd_address_at(tmp10, TMPQ3, 0, 2, 16); - * [register]:2 tmp10 = (* [register]:2 tmp8) * (* [register]:2 tmp9); - simd_address_at(tmp8, TMPQ1, 1, 2, 16); - simd_address_at(tmp9, TMPQ2, 1, 2, 16); - simd_address_at(tmp10, TMPQ3, 1, 2, 16); - * [register]:2 tmp10 = (* [register]:2 tmp8) * (* [register]:2 tmp9); - simd_address_at(tmp8, TMPQ1, 2, 2, 16); - simd_address_at(tmp9, TMPQ2, 2, 2, 16); - simd_address_at(tmp10, TMPQ3, 2, 2, 16); - * [register]:2 tmp10 = (* [register]:2 tmp8) * (* [register]:2 tmp9); - simd_address_at(tmp8, TMPQ1, 3, 2, 16); - simd_address_at(tmp9, TMPQ2, 3, 2, 16); - simd_address_at(tmp10, TMPQ3, 3, 2, 16); - * [register]:2 tmp10 = (* [register]:2 tmp8) * (* [register]:2 tmp9); - simd_address_at(tmp8, TMPQ1, 4, 2, 16); - simd_address_at(tmp9, TMPQ2, 4, 2, 16); - simd_address_at(tmp10, TMPQ3, 4, 2, 16); - * [register]:2 tmp10 = (* [register]:2 tmp8) * (* [register]:2 tmp9); - simd_address_at(tmp8, TMPQ1, 5, 2, 16); - simd_address_at(tmp9, TMPQ2, 5, 2, 16); - simd_address_at(tmp10, TMPQ3, 5, 2, 16); - * [register]:2 tmp10 = (* [register]:2 tmp8) * (* [register]:2 tmp9); - simd_address_at(tmp8, TMPQ1, 6, 2, 16); - simd_address_at(tmp9, TMPQ2, 6, 2, 16); - simd_address_at(tmp10, TMPQ3, 6, 2, 16); - * [register]:2 tmp10 = (* [register]:2 tmp8) * (* [register]:2 tmp9); - simd_address_at(tmp8, TMPQ1, 7, 2, 16); - simd_address_at(tmp9, TMPQ2, 7, 2, 16); - simd_address_at(tmp10, TMPQ3, 7, 2, 16); - * [register]:2 tmp10 = (* [register]:2 tmp8) * (* [register]:2 tmp9); + TMPQ3[0,16] = TMPQ1[0,16] * TMPQ2[0,16]; + TMPQ3[16,16] = TMPQ1[16,16] * TMPQ2[16,16]; + TMPQ3[32,16] = TMPQ1[32,16] * TMPQ2[32,16]; + TMPQ3[48,16] = TMPQ1[48,16] * TMPQ2[48,16]; + TMPQ3[64,16] = TMPQ1[64,16] * TMPQ2[64,16]; + TMPQ3[80,16] = TMPQ1[80,16] * TMPQ2[80,16]; + TMPQ3[96,16] = TMPQ1[96,16] * TMPQ2[96,16]; + TMPQ3[112,16] = TMPQ1[112,16] * TMPQ2[112,16]; # simd infix Rd_VPR128.8H = Rd_VPR128.8H - TMPQ3 on lane size 2 - local tmp11:4 = 0; - local tmp12:4 = 0; - local tmp13:4 = 0; - simd_address_at(tmp11, Rd_VPR128.8H, 0, 2, 16); - simd_address_at(tmp12, TMPQ3, 0, 2, 16); - simd_address_at(tmp13, Rd_VPR128.8H, 0, 2, 16); - * [register]:2 tmp13 = (* [register]:2 tmp11) - (* [register]:2 tmp12); - simd_address_at(tmp11, Rd_VPR128.8H, 1, 2, 16); - simd_address_at(tmp12, TMPQ3, 1, 2, 16); - simd_address_at(tmp13, Rd_VPR128.8H, 1, 2, 16); - * [register]:2 tmp13 = (* [register]:2 tmp11) - (* [register]:2 tmp12); - simd_address_at(tmp11, Rd_VPR128.8H, 2, 2, 16); - simd_address_at(tmp12, TMPQ3, 2, 2, 16); - simd_address_at(tmp13, Rd_VPR128.8H, 2, 2, 16); - * [register]:2 tmp13 = (* [register]:2 tmp11) - (* [register]:2 tmp12); - simd_address_at(tmp11, Rd_VPR128.8H, 3, 2, 16); - simd_address_at(tmp12, TMPQ3, 3, 2, 16); - simd_address_at(tmp13, Rd_VPR128.8H, 3, 2, 16); - * [register]:2 tmp13 = (* [register]:2 tmp11) - (* [register]:2 tmp12); - simd_address_at(tmp11, Rd_VPR128.8H, 4, 2, 16); - simd_address_at(tmp12, TMPQ3, 4, 2, 16); - simd_address_at(tmp13, Rd_VPR128.8H, 4, 2, 16); - * [register]:2 tmp13 = (* [register]:2 tmp11) - (* [register]:2 tmp12); - simd_address_at(tmp11, Rd_VPR128.8H, 5, 2, 16); - simd_address_at(tmp12, TMPQ3, 5, 2, 16); - simd_address_at(tmp13, Rd_VPR128.8H, 5, 2, 16); - * [register]:2 tmp13 = (* [register]:2 tmp11) - (* [register]:2 tmp12); - simd_address_at(tmp11, Rd_VPR128.8H, 6, 2, 16); - simd_address_at(tmp12, TMPQ3, 6, 2, 16); - simd_address_at(tmp13, Rd_VPR128.8H, 6, 2, 16); - * [register]:2 tmp13 = (* [register]:2 tmp11) - (* [register]:2 tmp12); - simd_address_at(tmp11, Rd_VPR128.8H, 7, 2, 16); - simd_address_at(tmp12, TMPQ3, 7, 2, 16); - simd_address_at(tmp13, Rd_VPR128.8H, 7, 2, 16); - * [register]:2 tmp13 = (* [register]:2 tmp11) - (* [register]:2 tmp12); + Rd_VPR128.8H[0,16] = Rd_VPR128.8H[0,16] - TMPQ3[0,16]; + Rd_VPR128.8H[16,16] = Rd_VPR128.8H[16,16] - TMPQ3[16,16]; + Rd_VPR128.8H[32,16] = Rd_VPR128.8H[32,16] - TMPQ3[32,16]; + Rd_VPR128.8H[48,16] = Rd_VPR128.8H[48,16] - TMPQ3[48,16]; + Rd_VPR128.8H[64,16] = Rd_VPR128.8H[64,16] - TMPQ3[64,16]; + Rd_VPR128.8H[80,16] = Rd_VPR128.8H[80,16] - TMPQ3[80,16]; + Rd_VPR128.8H[96,16] = Rd_VPR128.8H[96,16] - TMPQ3[96,16]; + Rd_VPR128.8H[112,16] = Rd_VPR128.8H[112,16] - TMPQ3[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_ZEXT(Rn_VPR64.8B, 1:1); - local tmp2:16 = SIMD_INT_ZEXT(Rm_VPR64.8B, 1:1); - local tmp3:16 = SIMD_INT_MULT(tmp1, tmp2, 2:1); - local tmpd:16 = SIMD_INT_SUB(Rd_VPR128.8H, tmp3, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.8H -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.8H = NEON_umlsl(Rd_VPR128.8H, Rn_VPR64.8B, Rm_VPR64.8B, 1:1); -@endif } # C7.2.371 UMOV page C7-2236 line 125692 MATCH x0e003c00/mask=xbfe0fc00 @@ -48562,23 +25984,12 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B # AUNIT --inst x0e013c00/mask=xffe1fc00 --status pass :umov Rd_GPR32, Rn_VPR128.B.imm_neon_uimm4 -is b_3131=0 & Q=0 & b_29=0 & b_2428=0xe & b_2123=0 & Rn_VPR128.B.imm_neon_uimm4 & imm_neon_uimm4 & b_1616=1 & b_1515=0 & imm4=0x7 & b_1010=1 & Rn_VPR128 & Rd_GPR32 & Rd_GPR64 +is b_3131=0 & Q=0 & b_29=0 & b_2428=0xe & b_2123=0 & Rn_VPR128.B.imm_neon_uimm4 & b_1616=1 & b_1515=0 & imm4=0x7 & b_1010=1 & Rn_VPR128 & Rd_GPR32 & Rd_GPR64 { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; # simd element Rn_VPR128[imm_neon_uimm4] lane size 1 - simd_address_at(tmp1, Rn_VPR128, imm_neon_uimm4:4, 1, 16); - local tmp2:1 = * [register]:1 tmp1; - Rd_GPR32 = zext(tmp2); + local tmp1:1 = Rn_VPR128.B.imm_neon_uimm4; + Rd_GPR32 = zext(tmp1); zext_rs(Rd_GPR64); # zero upper 28 bytes of Rd_GPR64 -@elif defined(SEMANTIC_pcode) - local tmp1:1 = SIMD_PIECE(Rn_VPR128, imm_neon_uimm4:1); - local tmpd:4 = zext(tmp1); - Rd_GPR64 = zext(tmpd); # assigning to Rd_GPR32 -@elif defined(SEMANTIC_pseudo) - local tmp1:1 = SIMD_PIECE(Rn_VPR128, imm_neon_uimm4:1); - Rd_GPR32 = NEON_umov(tmp1); -@endif } # C7.2.371 UMOV page C7-2236 line 125692 MATCH x0e003c00/mask=xbfe0fc00 @@ -48588,23 +25999,12 @@ is b_3131=0 & Q=0 & b_29=0 & b_2428=0xe & b_2123=0 & Rn_VPR128.B.imm_neon_uimm4 # AUNIT --inst x0e023c00/mask=xffe3fc00 --status pass :umov Rd_GPR32, Rn_VPR128.H.imm_neon_uimm3 -is b_3131=0 & Q=0 & b_29=0 & b_2428=0xe & b_2123=0 & Rn_VPR128.H.imm_neon_uimm3 & imm_neon_uimm3 & b_1617=2 & b_1515=0 & imm4=0x7 & b_1010=1 & Rn_VPR128 & Rd_GPR32 & Rd_GPR64 & Rd_VPR128 +is b_3131=0 & Q=0 & b_29=0 & b_2428=0xe & b_2123=0 & Rn_VPR128.H.imm_neon_uimm3 & b_1617=2 & b_1515=0 & imm4=0x7 & b_1010=1 & Rn_VPR128 & Rd_GPR32 & Rd_GPR64 & Rd_VPR128 { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; # simd element Rn_VPR128[imm_neon_uimm3] lane size 2 - simd_address_at(tmp1, Rn_VPR128, imm_neon_uimm3:4, 2, 16); - local tmp2:2 = * [register]:2 tmp1; - Rd_GPR32 = zext(tmp2); + local tmp1:2 = Rn_VPR128.H.imm_neon_uimm3; + Rd_GPR32 = zext(tmp1); zext_rs(Rd_GPR64); # zero upper 28 bytes of Rd_GPR64 -@elif defined(SEMANTIC_pcode) - local tmp1:2 = SIMD_PIECE(Rn_VPR128, imm_neon_uimm3:1); - local tmpd:4 = zext(tmp1); - Rd_GPR64 = zext(tmpd); # assigning to Rd_GPR32 -@elif defined(SEMANTIC_pseudo) - local tmp1:2 = SIMD_PIECE(Rn_VPR128, imm_neon_uimm3:1); - Rd_GPR32 = NEON_umov(tmp1); -@endif } # C7.2.372 UMULL, UMULL2 (by element) page C7-2238 line 125820 MATCH x2f00a000/mask=xbf00f400 @@ -48616,45 +26016,17 @@ is b_3131=0 & Q=0 & b_29=0 & b_2428=0xe & b_2123=0 & Rn_VPR128.H.imm_neon_uimm3 :umull2 Rd_VPR128.2D, Rn_VPR128.4S, Re_VPR128.S.vIndex is b_3131=0 & q=1 & u=1 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & b_1215=0xa & b_1010=0 & Rn_VPR128.4S & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rn_VPR128.4S, 1, 8, 16); - TMPD1 = * [register]:8 tmp1; + TMPD1 = Rn_VPR128.4S[64,64]; # simd resize TMPQ2 = zext(TMPD1) (lane size 4 to 8) - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, TMPD1, 0, 4, 8); - simd_address_at(tmp5, TMPQ2, 0, 8, 16); - * [register]:8 tmp5 = zext(* [register]:4 tmp4); - simd_address_at(tmp4, TMPD1, 1, 4, 8); - simd_address_at(tmp5, TMPQ2, 1, 8, 16); - * [register]:8 tmp5 = zext(* [register]:4 tmp4); - local tmp6:4 = 0; + TMPQ2[0,64] = zext(TMPD1[0,32]); + TMPQ2[64,64] = zext(TMPD1[32,32]); # simd element Re_VPR128.S[vIndex] lane size 4 - simd_address_at(tmp6, Re_VPR128.S, vIndex:4, 4, 16); - local tmp7:4 = * [register]:4 tmp6; - local tmp8:8 = zext(tmp7); - # simd infix Rd_VPR128.2D = TMPQ2 * tmp8 on lane size 8 - local tmp9:4 = 0; - local tmp10:4 = 0; - simd_address_at(tmp9, TMPQ2, 0, 8, 16); - simd_address_at(tmp10, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp10 = (* [register]:8 tmp9) * tmp8; - simd_address_at(tmp9, TMPQ2, 1, 8, 16); - simd_address_at(tmp10, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp10 = (* [register]:8 tmp9) * tmp8; - zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Rn_VPR128.4S, 1:1); - local tmp2:16 = SIMD_INT_ZEXT(tmp1, 4:1); - local tmp3:4 = SIMD_PIECE(Re_VPR128.S, vIndex:1); + local tmp3:4 = Re_VPR128.S.vIndex; local tmp4:8 = zext(tmp3); - local tmpd:16 = SIMD_INT_MULT(tmp2, tmp4); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - local tmp1:4 = SIMD_PIECE(Re_VPR128.S, vIndex:1); - Rd_VPR128.2D = NEON_umull2(Rn_VPR128.4S, tmp1, 4:1); -@endif + # simd infix Rd_VPR128.2D = TMPQ2 * tmp4 on lane size 8 + Rd_VPR128.2D[0,64] = TMPQ2[0,64] * tmp4; + Rd_VPR128.2D[64,64] = TMPQ2[64,64] * tmp4; + zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.372 UMULL, UMULL2 (by element) page C7-2238 line 125820 MATCH x2f00a000/mask=xbf00f400 @@ -48666,57 +26038,21 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re :umull2 Rd_VPR128.4S, Rn_VPR128.8H, Re_VPR128Lo.H.vIndexHLM is b_3131=0 & q=1 & u=1 & b_2428=0xf & advSIMD3.size=1 & Re_VPR128Lo.H.vIndexHLM & Re_VPR128Lo.H & vIndexHLM & b_1215=0xa & b_1010=0 & Rn_VPR128.8H & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rn_VPR128.8H, 1, 8, 16); - TMPD1 = * [register]:8 tmp1; + TMPD1 = Rn_VPR128.8H[64,64]; # simd resize TMPQ2 = zext(TMPD1) (lane size 2 to 4) - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, TMPD1, 0, 2, 8); - simd_address_at(tmp5, TMPQ2, 0, 4, 16); - * [register]:4 tmp5 = zext(* [register]:2 tmp4); - simd_address_at(tmp4, TMPD1, 1, 2, 8); - simd_address_at(tmp5, TMPQ2, 1, 4, 16); - * [register]:4 tmp5 = zext(* [register]:2 tmp4); - simd_address_at(tmp4, TMPD1, 2, 2, 8); - simd_address_at(tmp5, TMPQ2, 2, 4, 16); - * [register]:4 tmp5 = zext(* [register]:2 tmp4); - simd_address_at(tmp4, TMPD1, 3, 2, 8); - simd_address_at(tmp5, TMPQ2, 3, 4, 16); - * [register]:4 tmp5 = zext(* [register]:2 tmp4); - local tmp6:4 = 0; + TMPQ2[0,32] = zext(TMPD1[0,16]); + TMPQ2[32,32] = zext(TMPD1[16,16]); + TMPQ2[64,32] = zext(TMPD1[32,16]); + TMPQ2[96,32] = zext(TMPD1[48,16]); # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 - simd_address_at(tmp6, Re_VPR128Lo.H, vIndexHLM:4, 2, 16); - local tmp7:2 = * [register]:2 tmp6; - local tmp8:4 = zext(tmp7); - # simd infix Rd_VPR128.4S = TMPQ2 * tmp8 on lane size 4 - local tmp9:4 = 0; - local tmp10:4 = 0; - simd_address_at(tmp9, TMPQ2, 0, 4, 16); - simd_address_at(tmp10, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp10 = (* [register]:4 tmp9) * tmp8; - simd_address_at(tmp9, TMPQ2, 1, 4, 16); - simd_address_at(tmp10, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp10 = (* [register]:4 tmp9) * tmp8; - simd_address_at(tmp9, TMPQ2, 2, 4, 16); - simd_address_at(tmp10, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp10 = (* [register]:4 tmp9) * tmp8; - simd_address_at(tmp9, TMPQ2, 3, 4, 16); - simd_address_at(tmp10, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp10 = (* [register]:4 tmp9) * tmp8; - zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Rn_VPR128.8H, 1:1); - local tmp2:16 = SIMD_INT_ZEXT(tmp1, 2:1); - local tmp3:2 = SIMD_PIECE(Re_VPR128Lo.H, vIndexHLM:1); + local tmp3:2 = Re_VPR128Lo.H.vIndexHLM; local tmp4:4 = zext(tmp3); - local tmpd:16 = SIMD_INT_MULT(tmp2, tmp4); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - local tmp1:2 = SIMD_PIECE(Re_VPR128Lo.H, vIndexHLM:1); - Rd_VPR128.4S = NEON_umull2(Rn_VPR128.8H, tmp1, 2:1); -@endif + # simd infix Rd_VPR128.4S = TMPQ2 * tmp4 on lane size 4 + Rd_VPR128.4S[0,32] = TMPQ2[0,32] * tmp4; + Rd_VPR128.4S[32,32] = TMPQ2[32,32] * tmp4; + Rd_VPR128.4S[64,32] = TMPQ2[64,32] * tmp4; + Rd_VPR128.4S[96,32] = TMPQ2[96,32] * tmp4; + zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.372 UMULL, UMULL2 (by element) page C7-2238 line 125820 MATCH x2f00a000/mask=xbf00f400 @@ -48728,41 +26064,16 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xf & advSIMD3.size=1 & Re_VPR128Lo.H.vIndexHLM :umull Rd_VPR128.2D, Rn_VPR64.2S, Re_VPR128.S.vIndex is b_3131=0 & q=0 & u=1 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & b_1215=0xa & b_1010=0 & Rn_VPR64.2S & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_primitive) # simd resize TMPQ1 = zext(Rn_VPR64.2S) (lane size 4 to 8) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR64.2S, 0, 4, 8); - simd_address_at(tmp3, TMPQ1, 0, 8, 16); - * [register]:8 tmp3 = zext(* [register]:4 tmp2); - simd_address_at(tmp2, Rn_VPR64.2S, 1, 4, 8); - simd_address_at(tmp3, TMPQ1, 1, 8, 16); - * [register]:8 tmp3 = zext(* [register]:4 tmp2); - local tmp4:4 = 0; + TMPQ1[0,64] = zext(Rn_VPR64.2S[0,32]); + TMPQ1[64,64] = zext(Rn_VPR64.2S[32,32]); # simd element Re_VPR128.S[vIndex] lane size 4 - simd_address_at(tmp4, Re_VPR128.S, vIndex:4, 4, 16); - local tmp5:4 = * [register]:4 tmp4; - local tmp6:8 = zext(tmp5); - # simd infix Rd_VPR128.2D = TMPQ1 * tmp6 on lane size 8 - local tmp7:4 = 0; - local tmp8:4 = 0; - simd_address_at(tmp7, TMPQ1, 0, 8, 16); - simd_address_at(tmp8, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp8 = (* [register]:8 tmp7) * tmp6; - simd_address_at(tmp7, TMPQ1, 1, 8, 16); - simd_address_at(tmp8, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp8 = (* [register]:8 tmp7) * tmp6; - zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_ZEXT(Rn_VPR64.2S, 4:1); - local tmp2:4 = SIMD_PIECE(Re_VPR128.S, vIndex:1); + local tmp2:4 = Re_VPR128.S.vIndex; local tmp3:8 = zext(tmp2); - local tmpd:16 = SIMD_INT_MULT(tmp1, tmp3); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - local tmp1:4 = SIMD_PIECE(Re_VPR128.S, vIndex:1); - Rd_VPR128.2D = NEON_umull(Rn_VPR64.2S, tmp1, 4:1); -@endif + # simd infix Rd_VPR128.2D = TMPQ1 * tmp3 on lane size 8 + Rd_VPR128.2D[0,64] = TMPQ1[0,64] * tmp3; + Rd_VPR128.2D[64,64] = TMPQ1[64,64] * tmp3; + zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.372 UMULL, UMULL2 (by element) page C7-2238 line 125820 MATCH x2f00a000/mask=xbf00f400 @@ -48774,53 +26085,20 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re :umull Rd_VPR128.4S, Rn_VPR64.4H, Re_VPR128Lo.H.vIndexHLM is b_3131=0 & q=0 & u=1 & b_2428=0xf & advSIMD3.size=1 & Re_VPR128Lo.H.vIndexHLM & Re_VPR128Lo.H & vIndexHLM & b_1215=0xa & b_1010=0 & Rn_VPR64.4H & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) # simd resize TMPQ1 = zext(Rn_VPR64.4H) (lane size 2 to 4) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR64.4H, 0, 2, 8); - simd_address_at(tmp3, TMPQ1, 0, 4, 16); - * [register]:4 tmp3 = zext(* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR64.4H, 1, 2, 8); - simd_address_at(tmp3, TMPQ1, 1, 4, 16); - * [register]:4 tmp3 = zext(* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR64.4H, 2, 2, 8); - simd_address_at(tmp3, TMPQ1, 2, 4, 16); - * [register]:4 tmp3 = zext(* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR64.4H, 3, 2, 8); - simd_address_at(tmp3, TMPQ1, 3, 4, 16); - * [register]:4 tmp3 = zext(* [register]:2 tmp2); - local tmp4:4 = 0; + TMPQ1[0,32] = zext(Rn_VPR64.4H[0,16]); + TMPQ1[32,32] = zext(Rn_VPR64.4H[16,16]); + TMPQ1[64,32] = zext(Rn_VPR64.4H[32,16]); + TMPQ1[96,32] = zext(Rn_VPR64.4H[48,16]); # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 - simd_address_at(tmp4, Re_VPR128Lo.H, vIndexHLM:4, 2, 16); - local tmp5:2 = * [register]:2 tmp4; - local tmp6:4 = zext(tmp5); - # simd infix Rd_VPR128.4S = TMPQ1 * tmp6 on lane size 4 - local tmp7:4 = 0; - local tmp8:4 = 0; - simd_address_at(tmp7, TMPQ1, 0, 4, 16); - simd_address_at(tmp8, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp8 = (* [register]:4 tmp7) * tmp6; - simd_address_at(tmp7, TMPQ1, 1, 4, 16); - simd_address_at(tmp8, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp8 = (* [register]:4 tmp7) * tmp6; - simd_address_at(tmp7, TMPQ1, 2, 4, 16); - simd_address_at(tmp8, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp8 = (* [register]:4 tmp7) * tmp6; - simd_address_at(tmp7, TMPQ1, 3, 4, 16); - simd_address_at(tmp8, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp8 = (* [register]:4 tmp7) * tmp6; - zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_ZEXT(Rn_VPR64.4H, 2:1); - local tmp2:2 = SIMD_PIECE(Re_VPR128Lo.H, vIndexHLM:1); + local tmp2:2 = Re_VPR128Lo.H.vIndexHLM; local tmp3:4 = zext(tmp2); - local tmpd:16 = SIMD_INT_MULT(tmp1, tmp3); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - local tmp1:2 = SIMD_PIECE(Re_VPR128Lo.H, vIndexHLM:1); - Rd_VPR128.4S = NEON_umull(Rn_VPR64.4H, tmp1, 2:1); -@endif + # simd infix Rd_VPR128.4S = TMPQ1 * tmp3 on lane size 4 + Rd_VPR128.4S[0,32] = TMPQ1[0,32] * tmp3; + Rd_VPR128.4S[32,32] = TMPQ1[32,32] * tmp3; + Rd_VPR128.4S[64,32] = TMPQ1[64,32] * tmp3; + Rd_VPR128.4S[96,32] = TMPQ1[96,32] * tmp3; + zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.373 UMULL, UMULL2 (vector) page C7-2241 line 125973 MATCH x2e20c000/mask=xbf20fc00 @@ -48832,54 +26110,18 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xf & advSIMD3.size=1 & Re_VPR128Lo.H.vIndexHLM :umull2 Rd_VPR128.2D, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1215=0xc & b_1011=0 & Rn_VPR128.4S & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rn_VPR128.4S, 1, 8, 16); - TMPD1 = * [register]:8 tmp1; + TMPD1 = Rn_VPR128.4S[64,64]; # simd resize TMPQ2 = zext(TMPD1) (lane size 4 to 8) - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, TMPD1, 0, 4, 8); - simd_address_at(tmp5, TMPQ2, 0, 8, 16); - * [register]:8 tmp5 = zext(* [register]:4 tmp4); - simd_address_at(tmp4, TMPD1, 1, 4, 8); - simd_address_at(tmp5, TMPQ2, 1, 8, 16); - * [register]:8 tmp5 = zext(* [register]:4 tmp4); - local tmp6:4 = 0; - simd_address_at(tmp6, Rm_VPR128.4S, 1, 8, 16); - TMPD3 = * [register]:8 tmp6; + TMPQ2[0,64] = zext(TMPD1[0,32]); + TMPQ2[64,64] = zext(TMPD1[32,32]); + TMPD3 = Rm_VPR128.4S[64,64]; # simd resize TMPQ4 = zext(TMPD3) (lane size 4 to 8) - local tmp9:4 = 0; - local tmp10:4 = 0; - simd_address_at(tmp9, TMPD3, 0, 4, 8); - simd_address_at(tmp10, TMPQ4, 0, 8, 16); - * [register]:8 tmp10 = zext(* [register]:4 tmp9); - simd_address_at(tmp9, TMPD3, 1, 4, 8); - simd_address_at(tmp10, TMPQ4, 1, 8, 16); - * [register]:8 tmp10 = zext(* [register]:4 tmp9); + TMPQ4[0,64] = zext(TMPD3[0,32]); + TMPQ4[64,64] = zext(TMPD3[32,32]); # simd infix Rd_VPR128.2D = TMPQ2 * TMPQ4 on lane size 8 - local tmp11:4 = 0; - local tmp12:4 = 0; - local tmp13:4 = 0; - simd_address_at(tmp11, TMPQ2, 0, 8, 16); - simd_address_at(tmp12, TMPQ4, 0, 8, 16); - simd_address_at(tmp13, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp13 = (* [register]:8 tmp11) * (* [register]:8 tmp12); - simd_address_at(tmp11, TMPQ2, 1, 8, 16); - simd_address_at(tmp12, TMPQ4, 1, 8, 16); - simd_address_at(tmp13, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp13 = (* [register]:8 tmp11) * (* [register]:8 tmp12); + Rd_VPR128.2D[0,64] = TMPQ2[0,64] * TMPQ4[0,64]; + Rd_VPR128.2D[64,64] = TMPQ2[64,64] * TMPQ4[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Rn_VPR128.4S, 1:1); - local tmp2:16 = SIMD_INT_ZEXT(tmp1, 4:1); - local tmp3:8 = SIMD_PIECE(Rm_VPR128.4S, 1:1); - local tmp4:16 = SIMD_INT_ZEXT(tmp3, 4:1); - local tmpd:16 = SIMD_INT_MULT(tmp2, tmp4, 8:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.2D = NEON_umull2(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); -@endif } # C7.2.373 UMULL, UMULL2 (vector) page C7-2241 line 125973 MATCH x2e20c000/mask=xbf20fc00 @@ -48891,74 +26133,24 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S :umull2 Rd_VPR128.4S, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1215=0xc & b_1011=0 & Rn_VPR128.8H & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rn_VPR128.8H, 1, 8, 16); - TMPD1 = * [register]:8 tmp1; + TMPD1 = Rn_VPR128.8H[64,64]; # simd resize TMPQ2 = zext(TMPD1) (lane size 2 to 4) - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, TMPD1, 0, 2, 8); - simd_address_at(tmp5, TMPQ2, 0, 4, 16); - * [register]:4 tmp5 = zext(* [register]:2 tmp4); - simd_address_at(tmp4, TMPD1, 1, 2, 8); - simd_address_at(tmp5, TMPQ2, 1, 4, 16); - * [register]:4 tmp5 = zext(* [register]:2 tmp4); - simd_address_at(tmp4, TMPD1, 2, 2, 8); - simd_address_at(tmp5, TMPQ2, 2, 4, 16); - * [register]:4 tmp5 = zext(* [register]:2 tmp4); - simd_address_at(tmp4, TMPD1, 3, 2, 8); - simd_address_at(tmp5, TMPQ2, 3, 4, 16); - * [register]:4 tmp5 = zext(* [register]:2 tmp4); - local tmp6:4 = 0; - simd_address_at(tmp6, Rm_VPR128.8H, 1, 8, 16); - TMPD3 = * [register]:8 tmp6; + TMPQ2[0,32] = zext(TMPD1[0,16]); + TMPQ2[32,32] = zext(TMPD1[16,16]); + TMPQ2[64,32] = zext(TMPD1[32,16]); + TMPQ2[96,32] = zext(TMPD1[48,16]); + TMPD3 = Rm_VPR128.8H[64,64]; # simd resize TMPQ4 = zext(TMPD3) (lane size 2 to 4) - local tmp9:4 = 0; - local tmp10:4 = 0; - simd_address_at(tmp9, TMPD3, 0, 2, 8); - simd_address_at(tmp10, TMPQ4, 0, 4, 16); - * [register]:4 tmp10 = zext(* [register]:2 tmp9); - simd_address_at(tmp9, TMPD3, 1, 2, 8); - simd_address_at(tmp10, TMPQ4, 1, 4, 16); - * [register]:4 tmp10 = zext(* [register]:2 tmp9); - simd_address_at(tmp9, TMPD3, 2, 2, 8); - simd_address_at(tmp10, TMPQ4, 2, 4, 16); - * [register]:4 tmp10 = zext(* [register]:2 tmp9); - simd_address_at(tmp9, TMPD3, 3, 2, 8); - simd_address_at(tmp10, TMPQ4, 3, 4, 16); - * [register]:4 tmp10 = zext(* [register]:2 tmp9); + TMPQ4[0,32] = zext(TMPD3[0,16]); + TMPQ4[32,32] = zext(TMPD3[16,16]); + TMPQ4[64,32] = zext(TMPD3[32,16]); + TMPQ4[96,32] = zext(TMPD3[48,16]); # simd infix Rd_VPR128.4S = TMPQ2 * TMPQ4 on lane size 4 - local tmp11:4 = 0; - local tmp12:4 = 0; - local tmp13:4 = 0; - simd_address_at(tmp11, TMPQ2, 0, 4, 16); - simd_address_at(tmp12, TMPQ4, 0, 4, 16); - simd_address_at(tmp13, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp13 = (* [register]:4 tmp11) * (* [register]:4 tmp12); - simd_address_at(tmp11, TMPQ2, 1, 4, 16); - simd_address_at(tmp12, TMPQ4, 1, 4, 16); - simd_address_at(tmp13, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp13 = (* [register]:4 tmp11) * (* [register]:4 tmp12); - simd_address_at(tmp11, TMPQ2, 2, 4, 16); - simd_address_at(tmp12, TMPQ4, 2, 4, 16); - simd_address_at(tmp13, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp13 = (* [register]:4 tmp11) * (* [register]:4 tmp12); - simd_address_at(tmp11, TMPQ2, 3, 4, 16); - simd_address_at(tmp12, TMPQ4, 3, 4, 16); - simd_address_at(tmp13, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp13 = (* [register]:4 tmp11) * (* [register]:4 tmp12); + Rd_VPR128.4S[0,32] = TMPQ2[0,32] * TMPQ4[0,32]; + Rd_VPR128.4S[32,32] = TMPQ2[32,32] * TMPQ4[32,32]; + Rd_VPR128.4S[64,32] = TMPQ2[64,32] * TMPQ4[64,32]; + Rd_VPR128.4S[96,32] = TMPQ2[96,32] * TMPQ4[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Rn_VPR128.8H, 1:1); - local tmp2:16 = SIMD_INT_ZEXT(tmp1, 2:1); - local tmp3:8 = SIMD_PIECE(Rm_VPR128.8H, 1:1); - local tmp4:16 = SIMD_INT_ZEXT(tmp3, 2:1); - local tmpd:16 = SIMD_INT_MULT(tmp2, tmp4, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_umull2(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); -@endif } # C7.2.373 UMULL, UMULL2 (vector) page C7-2241 line 125973 MATCH x2e20c000/mask=xbf20fc00 @@ -48970,114 +26162,36 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H :umull2 Rd_VPR128.8H, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1215=0xc & b_1011=0 & Rn_VPR128.16B & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rn_VPR128.16B, 1, 8, 16); - TMPD1 = * [register]:8 tmp1; + TMPD1 = Rn_VPR128.16B[64,64]; # simd resize TMPQ2 = zext(TMPD1) (lane size 1 to 2) - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, TMPD1, 0, 1, 8); - simd_address_at(tmp5, TMPQ2, 0, 2, 16); - * [register]:2 tmp5 = zext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 1, 1, 8); - simd_address_at(tmp5, TMPQ2, 1, 2, 16); - * [register]:2 tmp5 = zext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 2, 1, 8); - simd_address_at(tmp5, TMPQ2, 2, 2, 16); - * [register]:2 tmp5 = zext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 3, 1, 8); - simd_address_at(tmp5, TMPQ2, 3, 2, 16); - * [register]:2 tmp5 = zext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 4, 1, 8); - simd_address_at(tmp5, TMPQ2, 4, 2, 16); - * [register]:2 tmp5 = zext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 5, 1, 8); - simd_address_at(tmp5, TMPQ2, 5, 2, 16); - * [register]:2 tmp5 = zext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 6, 1, 8); - simd_address_at(tmp5, TMPQ2, 6, 2, 16); - * [register]:2 tmp5 = zext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 7, 1, 8); - simd_address_at(tmp5, TMPQ2, 7, 2, 16); - * [register]:2 tmp5 = zext(* [register]:1 tmp4); - local tmp6:4 = 0; - simd_address_at(tmp6, Rm_VPR128.16B, 1, 8, 16); - TMPD3 = * [register]:8 tmp6; + TMPQ2[0,16] = zext(TMPD1[0,8]); + TMPQ2[16,16] = zext(TMPD1[8,8]); + TMPQ2[32,16] = zext(TMPD1[16,8]); + TMPQ2[48,16] = zext(TMPD1[24,8]); + TMPQ2[64,16] = zext(TMPD1[32,8]); + TMPQ2[80,16] = zext(TMPD1[40,8]); + TMPQ2[96,16] = zext(TMPD1[48,8]); + TMPQ2[112,16] = zext(TMPD1[56,8]); + TMPD3 = Rm_VPR128.16B[64,64]; # simd resize TMPQ4 = zext(TMPD3) (lane size 1 to 2) - local tmp9:4 = 0; - local tmp10:4 = 0; - simd_address_at(tmp9, TMPD3, 0, 1, 8); - simd_address_at(tmp10, TMPQ4, 0, 2, 16); - * [register]:2 tmp10 = zext(* [register]:1 tmp9); - simd_address_at(tmp9, TMPD3, 1, 1, 8); - simd_address_at(tmp10, TMPQ4, 1, 2, 16); - * [register]:2 tmp10 = zext(* [register]:1 tmp9); - simd_address_at(tmp9, TMPD3, 2, 1, 8); - simd_address_at(tmp10, TMPQ4, 2, 2, 16); - * [register]:2 tmp10 = zext(* [register]:1 tmp9); - simd_address_at(tmp9, TMPD3, 3, 1, 8); - simd_address_at(tmp10, TMPQ4, 3, 2, 16); - * [register]:2 tmp10 = zext(* [register]:1 tmp9); - simd_address_at(tmp9, TMPD3, 4, 1, 8); - simd_address_at(tmp10, TMPQ4, 4, 2, 16); - * [register]:2 tmp10 = zext(* [register]:1 tmp9); - simd_address_at(tmp9, TMPD3, 5, 1, 8); - simd_address_at(tmp10, TMPQ4, 5, 2, 16); - * [register]:2 tmp10 = zext(* [register]:1 tmp9); - simd_address_at(tmp9, TMPD3, 6, 1, 8); - simd_address_at(tmp10, TMPQ4, 6, 2, 16); - * [register]:2 tmp10 = zext(* [register]:1 tmp9); - simd_address_at(tmp9, TMPD3, 7, 1, 8); - simd_address_at(tmp10, TMPQ4, 7, 2, 16); - * [register]:2 tmp10 = zext(* [register]:1 tmp9); + TMPQ4[0,16] = zext(TMPD3[0,8]); + TMPQ4[16,16] = zext(TMPD3[8,8]); + TMPQ4[32,16] = zext(TMPD3[16,8]); + TMPQ4[48,16] = zext(TMPD3[24,8]); + TMPQ4[64,16] = zext(TMPD3[32,8]); + TMPQ4[80,16] = zext(TMPD3[40,8]); + TMPQ4[96,16] = zext(TMPD3[48,8]); + TMPQ4[112,16] = zext(TMPD3[56,8]); # simd infix Rd_VPR128.8H = TMPQ2 * TMPQ4 on lane size 2 - local tmp11:4 = 0; - local tmp12:4 = 0; - local tmp13:4 = 0; - simd_address_at(tmp11, TMPQ2, 0, 2, 16); - simd_address_at(tmp12, TMPQ4, 0, 2, 16); - simd_address_at(tmp13, Rd_VPR128.8H, 0, 2, 16); - * [register]:2 tmp13 = (* [register]:2 tmp11) * (* [register]:2 tmp12); - simd_address_at(tmp11, TMPQ2, 1, 2, 16); - simd_address_at(tmp12, TMPQ4, 1, 2, 16); - simd_address_at(tmp13, Rd_VPR128.8H, 1, 2, 16); - * [register]:2 tmp13 = (* [register]:2 tmp11) * (* [register]:2 tmp12); - simd_address_at(tmp11, TMPQ2, 2, 2, 16); - simd_address_at(tmp12, TMPQ4, 2, 2, 16); - simd_address_at(tmp13, Rd_VPR128.8H, 2, 2, 16); - * [register]:2 tmp13 = (* [register]:2 tmp11) * (* [register]:2 tmp12); - simd_address_at(tmp11, TMPQ2, 3, 2, 16); - simd_address_at(tmp12, TMPQ4, 3, 2, 16); - simd_address_at(tmp13, Rd_VPR128.8H, 3, 2, 16); - * [register]:2 tmp13 = (* [register]:2 tmp11) * (* [register]:2 tmp12); - simd_address_at(tmp11, TMPQ2, 4, 2, 16); - simd_address_at(tmp12, TMPQ4, 4, 2, 16); - simd_address_at(tmp13, Rd_VPR128.8H, 4, 2, 16); - * [register]:2 tmp13 = (* [register]:2 tmp11) * (* [register]:2 tmp12); - simd_address_at(tmp11, TMPQ2, 5, 2, 16); - simd_address_at(tmp12, TMPQ4, 5, 2, 16); - simd_address_at(tmp13, Rd_VPR128.8H, 5, 2, 16); - * [register]:2 tmp13 = (* [register]:2 tmp11) * (* [register]:2 tmp12); - simd_address_at(tmp11, TMPQ2, 6, 2, 16); - simd_address_at(tmp12, TMPQ4, 6, 2, 16); - simd_address_at(tmp13, Rd_VPR128.8H, 6, 2, 16); - * [register]:2 tmp13 = (* [register]:2 tmp11) * (* [register]:2 tmp12); - simd_address_at(tmp11, TMPQ2, 7, 2, 16); - simd_address_at(tmp12, TMPQ4, 7, 2, 16); - simd_address_at(tmp13, Rd_VPR128.8H, 7, 2, 16); - * [register]:2 tmp13 = (* [register]:2 tmp11) * (* [register]:2 tmp12); + Rd_VPR128.8H[0,16] = TMPQ2[0,16] * TMPQ4[0,16]; + Rd_VPR128.8H[16,16] = TMPQ2[16,16] * TMPQ4[16,16]; + Rd_VPR128.8H[32,16] = TMPQ2[32,16] * TMPQ4[32,16]; + Rd_VPR128.8H[48,16] = TMPQ2[48,16] * TMPQ4[48,16]; + Rd_VPR128.8H[64,16] = TMPQ2[64,16] * TMPQ4[64,16]; + Rd_VPR128.8H[80,16] = TMPQ2[80,16] * TMPQ4[80,16]; + Rd_VPR128.8H[96,16] = TMPQ2[96,16] * TMPQ4[96,16]; + Rd_VPR128.8H[112,16] = TMPQ2[112,16] * TMPQ4[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Rn_VPR128.16B, 1:1); - local tmp2:16 = SIMD_INT_ZEXT(tmp1, 1:1); - local tmp3:8 = SIMD_PIECE(Rm_VPR128.16B, 1:1); - local tmp4:16 = SIMD_INT_ZEXT(tmp3, 1:1); - local tmpd:16 = SIMD_INT_MULT(tmp2, tmp4, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.8H -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.8H = NEON_umull2(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); -@endif } # C7.2.373 UMULL, UMULL2 (vector) page C7-2241 line 125973 MATCH x2e20c000/mask=xbf20fc00 @@ -49088,9 +26202,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16 :umull Rd_VPR128.2D, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1215=0xc & b_1011=0 & Rn_VPR64.2S & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.2D = NEON_umull(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); -@endif } # C7.2.373 UMULL, UMULL2 (vector) page C7-2241 line 125973 MATCH x2e20c000/mask=xbf20fc00 @@ -49101,9 +26213,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S :umull Rd_VPR128.4S, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1215=0xc & b_1011=0 & Rn_VPR64.4H & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_umull(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); -@endif } # C7.2.373 UMULL, UMULL2 (vector) page C7-2241 line 125973 MATCH x2e20c000/mask=xbf20fc00 @@ -49114,9 +26224,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H :umull Rd_VPR128.8H, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1215=0xc & b_1011=0 & Rn_VPR64.8B & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.8H = NEON_umull(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); -@endif } # C7.2.374 UQADD page C7-2243 line 126088 MATCH x7e200c00/mask=xff20fc00 @@ -49127,9 +26235,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B :uqadd Rd_FPR8, Rn_FPR8, Rm_FPR8 is b_3031=1 & u=1 & b_2428=0x1e & advSIMD3.size=0 & b_2121=1 & Rm_FPR8 & b_1115=0x1 & b_1010=1 & Rn_FPR8 & Rd_FPR8 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR8 = NEON_uqadd(Rn_FPR8, Rm_FPR8); -@endif } # C7.2.374 UQADD page C7-2243 line 126088 MATCH x7e200c00/mask=xff20fc00 @@ -49140,9 +26246,7 @@ is b_3031=1 & u=1 & b_2428=0x1e & advSIMD3.size=0 & b_2121=1 & Rm_FPR8 & b_1115= :uqadd Rd_FPR64, Rn_FPR64, Rm_FPR64 is b_3031=1 & u=1 & b_2428=0x1e & advSIMD3.size=3 & b_2121=1 & Rm_FPR64 & b_1115=0x1 & b_1010=1 & Rn_FPR64 & Rd_FPR64 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR64 = NEON_uqadd(Rn_FPR64, Rm_FPR64); -@endif } # C7.2.374 UQADD page C7-2243 line 126088 MATCH x7e200c00/mask=xff20fc00 @@ -49153,9 +26257,7 @@ is b_3031=1 & u=1 & b_2428=0x1e & advSIMD3.size=3 & b_2121=1 & Rm_FPR64 & b_1115 :uqadd Rd_FPR16, Rn_FPR16, Rm_FPR16 is b_3031=1 & u=1 & b_2428=0x1e & advSIMD3.size=1 & b_2121=1 & Rm_FPR16 & b_1115=0x1 & b_1010=1 & Rn_FPR16 & Rd_FPR16 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR16 = NEON_uqadd(Rn_FPR16, Rm_FPR16); -@endif } # C7.2.374 UQADD page C7-2243 line 126088 MATCH x7e200c00/mask=xff20fc00 @@ -49166,9 +26268,7 @@ is b_3031=1 & u=1 & b_2428=0x1e & advSIMD3.size=1 & b_2121=1 & Rm_FPR16 & b_1115 :uqadd Rd_FPR32, Rn_FPR32, Rm_FPR32 is b_3031=1 & u=1 & b_2428=0x1e & advSIMD3.size=2 & b_2121=1 & Rm_FPR32 & b_1115=0x1 & b_1010=1 & Rn_FPR32 & Rd_FPR32 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR32 = NEON_uqadd(Rn_FPR32, Rm_FPR32); -@endif } # C7.2.374 UQADD page C7-2243 line 126088 MATCH x2e200c00/mask=xbf20fc00 @@ -49179,9 +26279,7 @@ is b_3031=1 & u=1 & b_2428=0x1e & advSIMD3.size=2 & b_2121=1 & Rm_FPR32 & b_1115 :uqadd Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0x1 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.16B = NEON_uqadd(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); -@endif } # C7.2.374 UQADD page C7-2243 line 126088 MATCH x2e200c00/mask=xbf20fc00 @@ -49192,9 +26290,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16 :uqadd Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=3 & b_2121=1 & Rm_VPR128.2D & b_1115=0x1 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.2D = NEON_uqadd(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); -@endif } # C7.2.374 UQADD page C7-2243 line 126088 MATCH x2e200c00/mask=xbf20fc00 @@ -49205,9 +26301,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=3 & b_2121=1 & Rm_VPR128.2D :uqadd Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0x1 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.2S = NEON_uqadd(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); -@endif } # C7.2.374 UQADD page C7-2243 line 126088 MATCH x2e200c00/mask=xbf20fc00 @@ -49218,9 +26312,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S :uqadd Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0x1 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.4H = NEON_uqadd(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); -@endif } # C7.2.374 UQADD page C7-2243 line 126088 MATCH x2e200c00/mask=xbf20fc00 @@ -49231,9 +26323,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H :uqadd Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0x1 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_uqadd(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); -@endif } # C7.2.374 UQADD page C7-2243 line 126088 MATCH x2e200c00/mask=xbf20fc00 @@ -49244,9 +26334,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S :uqadd Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0x1 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.8B = NEON_uqadd(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); -@endif } # C7.2.374 UQADD page C7-2243 line 126088 MATCH x2e200c00/mask=xbf20fc00 @@ -49257,9 +26345,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B :uqadd Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0x1 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.8H = NEON_uqadd(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); -@endif } # C7.2.375 UQRSHL page C7-2245 line 126213 MATCH x7e205c00/mask=xff20fc00 @@ -49270,9 +26356,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H :uqrshl Rd_FPR8, Rn_FPR8, Rm_FPR8 is b_3031=1 & u=1 & b_2428=0x1e & advSIMD3.size=0 & b_2121=1 & Rm_FPR8 & b_1115=0xb & b_1010=1 & Rn_FPR8 & Rd_FPR8 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR8 = NEON_uqrshl(Rn_FPR8, Rm_FPR8); -@endif } # C7.2.375 UQRSHL page C7-2245 line 126213 MATCH x7e205c00/mask=xff20fc00 @@ -49283,9 +26367,7 @@ is b_3031=1 & u=1 & b_2428=0x1e & advSIMD3.size=0 & b_2121=1 & Rm_FPR8 & b_1115= :uqrshl Rd_FPR64, Rn_FPR64, Rm_FPR64 is b_3031=1 & u=1 & b_2428=0x1e & advSIMD3.size=3 & b_2121=1 & Rm_FPR64 & b_1115=0xb & b_1010=1 & Rn_FPR64 & Rd_FPR64 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR64 = NEON_uqrshl(Rn_FPR64, Rm_FPR64); -@endif } # C7.2.375 UQRSHL page C7-2245 line 126213 MATCH x7e205c00/mask=xff20fc00 @@ -49296,9 +26378,7 @@ is b_3031=1 & u=1 & b_2428=0x1e & advSIMD3.size=3 & b_2121=1 & Rm_FPR64 & b_1115 :uqrshl Rd_FPR16, Rn_FPR16, Rm_FPR16 is b_3031=1 & u=1 & b_2428=0x1e & advSIMD3.size=1 & b_2121=1 & Rm_FPR16 & b_1115=0xb & b_1010=1 & Rn_FPR16 & Rd_FPR16 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR16 = NEON_uqrshl(Rn_FPR16, Rm_FPR16); -@endif } # C7.2.375 UQRSHL page C7-2245 line 126213 MATCH x7e205c00/mask=xff20fc00 @@ -49309,9 +26389,7 @@ is b_3031=1 & u=1 & b_2428=0x1e & advSIMD3.size=1 & b_2121=1 & Rm_FPR16 & b_1115 :uqrshl Rd_FPR32, Rn_FPR32, Rm_FPR32 is b_3031=1 & u=1 & b_2428=0x1e & advSIMD3.size=2 & b_2121=1 & Rm_FPR32 & b_1115=0xb & b_1010=1 & Rn_FPR32 & Rd_FPR32 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR32 = NEON_uqrshl(Rn_FPR32, Rm_FPR32); -@endif } # C7.2.375 UQRSHL page C7-2245 line 126213 MATCH x2e205c00/mask=xbf20fc00 @@ -49322,9 +26400,7 @@ is b_3031=1 & u=1 & b_2428=0x1e & advSIMD3.size=2 & b_2121=1 & Rm_FPR32 & b_1115 :uqrshl Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0xb & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.16B = NEON_uqrshl(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); -@endif } # C7.2.375 UQRSHL page C7-2245 line 126213 MATCH x2e205c00/mask=xbf20fc00 @@ -49335,9 +26411,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16 :uqrshl Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=3 & b_2121=1 & Rm_VPR128.2D & b_1115=0xb & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.2D = NEON_uqrshl(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); -@endif } # C7.2.375 UQRSHL page C7-2245 line 126213 MATCH x2e205c00/mask=xbf20fc00 @@ -49348,9 +26422,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=3 & b_2121=1 & Rm_VPR128.2D :uqrshl Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0xb & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.2S = NEON_uqrshl(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); -@endif } # C7.2.375 UQRSHL page C7-2245 line 126213 MATCH x2e205c00/mask=xbf20fc00 @@ -49361,9 +26433,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S :uqrshl Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0xb & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.4H = NEON_uqrshl(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); -@endif } # C7.2.375 UQRSHL page C7-2245 line 126213 MATCH x2e205c00/mask=xbf20fc00 @@ -49374,9 +26444,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H :uqrshl Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0xb & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_uqrshl(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); -@endif } # C7.2.375 UQRSHL page C7-2245 line 126213 MATCH x2e205c00/mask=xbf20fc00 @@ -49387,9 +26455,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S :uqrshl Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0xb & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.8B = NEON_uqrshl(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); -@endif } # C7.2.375 UQRSHL page C7-2245 line 126213 MATCH x2e205c00/mask=xbf20fc00 @@ -49400,9 +26466,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B :uqrshl Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0xb & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.8H = NEON_uqrshl(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); -@endif } # C7.2.376 UQRSHRN, UQRSHRN2 page C7-2247 line 126351 MATCH x2f009c00/mask=xbf80fc00 @@ -49413,9 +26477,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H :uqrshrn2 Rd_VPR128.16B, Rn_VPR128.8H, Imm_shr_imm8 is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x13 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.16B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.16B = NEON_uqrshrn2(Rn_VPR128.8H, Imm_shr_imm8:1, 2:1); -@endif } # C7.2.376 UQRSHRN, UQRSHRN2 page C7-2247 line 126351 MATCH x2f009c00/mask=xbf80fc00 @@ -49426,9 +26488,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x13 & :uqrshrn Rd_VPR64.2S, Rn_VPR128.2D, Imm_shr_imm32 is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x13 & b_1010=1 & Rn_VPR128.2D & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.2S = NEON_uqrshrn(Rn_VPR128.2D, Imm_shr_imm32:1, 8:1); -@endif } # C7.2.376 UQRSHRN, UQRSHRN2 page C7-2247 line 126351 MATCH x2f009c00/mask=xbf80fc00 @@ -49439,9 +26499,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x13 & :uqrshrn Rd_VPR64.4H, Rn_VPR128.4S, Imm_shr_imm16 is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x13 & b_1010=1 & Rn_VPR128.4S & Rd_VPR64.4H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.4H = NEON_uqrshrn(Rn_VPR128.4S, Imm_shr_imm16:1, 4:1); -@endif } # C7.2.376 UQRSHRN, UQRSHRN2 page C7-2247 line 126351 MATCH x2f009c00/mask=xbf80fc00 @@ -49452,9 +26510,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x13 :uqrshrn2 Rd_VPR128.4S, Rn_VPR128.2D, Imm_shr_imm32 is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x13 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_uqrshrn(Rn_VPR128.2D, Imm_shr_imm32:1, 8:1); -@endif } # C7.2.376 UQRSHRN, UQRSHRN2 page C7-2247 line 126351 MATCH x2f009c00/mask=xbf80fc00 @@ -49465,9 +26521,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x13 & :uqrshrn Rd_VPR64.8B, Rn_VPR128.8H, Imm_shr_imm8 is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x13 & b_1010=1 & Rn_VPR128.8H & Rd_VPR64.8B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.8B = NEON_uqrshrn(Rn_VPR128.8H, Imm_shr_imm8:1, 2:1); -@endif } # C7.2.376 UQRSHRN, UQRSHRN2 page C7-2247 line 126351 MATCH x2f009c00/mask=xbf80fc00 @@ -49478,9 +26532,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x13 & :uqrshrn2 Rd_VPR128.8H, Rn_VPR128.4S, Imm_shr_imm16 is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x13 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.8H = NEON_uqrshrn2(Rn_VPR128.4S, Imm_shr_imm16:1, 4:1); -@endif } # C7.2.376 UQRSHRN, UQRSHRN2 page C7-2247 line 126351 MATCH x7f009c00/mask=xff80fc00 @@ -49492,9 +26544,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x13 :uqrshrn Rd_FPR8, Rn_FPR16, Imm_shr_imm8 is b_2331=0b011111110 & b_1922=0b0001 & b_1015=0b100111 & Rd_FPR8 & Rn_FPR16 & Imm_shr_imm8 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR8 = NEON_uqrshrn(Rn_FPR16, Imm_shr_imm8:1); -@endif } # C7.2.376 UQRSHRN, UQRSHRN2 page C7-2247 line 126351 MATCH x7f009c00/mask=xff80fc00 @@ -49506,9 +26556,7 @@ is b_2331=0b011111110 & b_1922=0b0001 & b_1015=0b100111 & Rd_FPR8 & Rn_FPR16 & I :uqrshrn Rd_FPR16, Rn_FPR32, Imm_shr_imm16 is b_2331=0b011111110 & b_2022=0b001 & b_1015=0b100111 & Rd_FPR16 & Rn_FPR32 & Imm_shr_imm16 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR16 = NEON_uqrshrn(Rn_FPR32, Imm_shr_imm16:1); -@endif } # C7.2.376 UQRSHRN, UQRSHRN2 page C7-2247 line 126351 MATCH x7f009c00/mask=xff80fc00 @@ -49520,9 +26568,7 @@ is b_2331=0b011111110 & b_2022=0b001 & b_1015=0b100111 & Rd_FPR16 & Rn_FPR32 & I :uqrshrn Rd_FPR32, Rn_FPR64, Imm_shr_imm32 is b_2331=0b011111110 & b_2122=0b01 & b_1015=0b100111 & Rd_FPR32 & Rn_FPR64 & Imm_shr_imm32 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR32 = NEON_uqrshrn(Rn_FPR64, Imm_shr_imm32:1); -@endif } # C7.2.377 UQSHL (immediate) page C7-2250 line 126535 MATCH x2f007400/mask=xbf80fc00 @@ -49533,9 +26579,7 @@ is b_2331=0b011111110 & b_2122=0b01 & b_1015=0b100111 & Rd_FPR32 & Rn_FPR64 & Im :uqshl Rd_VPR128.16B, Rn_VPR128.16B, Imm_uimm3 is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_uimm3 & b_1115=0xe & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.16B = NEON_uqshl(Rn_VPR128.16B, Imm_uimm3:1, 1:1); -@endif } # C7.2.377 UQSHL (immediate) page C7-2250 line 126535 MATCH x2f007400/mask=xbf80fc00 @@ -49546,9 +26590,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_uimm3 & b_1115=0xe & b_1 :uqshl Rd_VPR128.2D, Rn_VPR128.2D, Imm_imm0_63 is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2223=0b01 & Imm_imm0_63 & b_1115=0xe & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.2D = NEON_uqshl(Rn_VPR128.2D, Imm_imm0_63:1, 8:1); -@endif } # C7.2.377 UQSHL (immediate) page C7-2250 line 126535 MATCH x2f007400/mask=xbf80fc00 @@ -49559,9 +26601,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2223=0b01 & Imm_imm0_63 & b_1115=0xe & :uqshl Rd_VPR64.2S, Rn_VPR64.2S, Imm_uimm5 is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2123=1 & Imm_uimm5 & b_1115=0xe & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.2S = NEON_uqshl(Rn_VPR64.2S, Imm_uimm5:1, 4:1); -@endif } # C7.2.377 UQSHL (immediate) page C7-2250 line 126535 MATCH x2f007400/mask=xbf80fc00 @@ -49572,9 +26612,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2123=1 & Imm_uimm5 & b_1115=0xe & b_101 :uqshl Rd_VPR64.4H, Rn_VPR64.4H, Imm_uimm4 is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_uimm4 & b_1115=0xe & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.4H = NEON_uqshl(Rn_VPR64.4H, Imm_uimm4:1, 2:1); -@endif } # C7.2.377 UQSHL (immediate) page C7-2250 line 126535 MATCH x2f007400/mask=xbf80fc00 @@ -49585,9 +26623,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_uimm4 & b_1115=0xe & b_1 :uqshl Rd_VPR128.4S, Rn_VPR128.4S, Imm_uimm5 is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2123=1 & Imm_uimm5 & b_1115=0xe & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_uqshl(Rn_VPR128.4S, Imm_uimm5:1, 4:1); -@endif } # C7.2.377 UQSHL (immediate) page C7-2250 line 126535 MATCH x2f007400/mask=xbf80fc00 @@ -49598,9 +26634,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2123=1 & Imm_uimm5 & b_1115=0xe & b_101 :uqshl Rd_VPR64.8B, Rn_VPR64.8B, Imm_uimm3 is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_uimm3 & b_1115=0xe & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.8B = NEON_uqshl(Rn_VPR64.8B, Imm_uimm3:1, 1:1); -@endif } # C7.2.377 UQSHL (immediate) page C7-2250 line 126535 MATCH x2f007400/mask=xbf80fc00 @@ -49611,9 +26645,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_uimm3 & b_1115=0xe & b_1 :uqshl Rd_VPR128.8H, Rn_VPR128.8H, Imm_uimm4 is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_uimm4 & b_1115=0xe & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.8H = NEON_uqshl(Rn_VPR128.8H, Imm_uimm4:1, 2:1); -@endif } # C7.2.377 UQSHL (immediate) page C7-2250 line 126535 MATCH x7f007400/mask=xff80fc00 @@ -49625,9 +26657,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_uimm4 & b_1115=0xe & b_1 :uqshl Rd_FPR8, Rn_FPR8, Imm_shr_imm8 is b_2331=0b011111110 & b_1922=0b0001 & b_1015=0b011101 & Rd_FPR8 & Rn_FPR8 & Imm_shr_imm8 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR8 = NEON_uqshl(Rn_FPR8, Imm_shr_imm8:1); -@endif } # C7.2.377 UQSHL (immediate) page C7-2250 line 126535 MATCH x7f007400/mask=xff80fc00 @@ -49639,9 +26669,7 @@ is b_2331=0b011111110 & b_1922=0b0001 & b_1015=0b011101 & Rd_FPR8 & Rn_FPR8 & Im :uqshl Rd_FPR16, Rn_FPR16, Imm_shr_imm16 is b_2331=0b011111110 & b_2022=0b001 & b_1015=0b011101 & Rd_FPR16 & Rn_FPR16 & Imm_shr_imm16 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR16 = NEON_uqshl(Rn_FPR16, Imm_shr_imm16:1); -@endif } # C7.2.377 UQSHL (immediate) page C7-2250 line 126535 MATCH x7f007400/mask=xff80fc00 @@ -49653,9 +26681,7 @@ is b_2331=0b011111110 & b_2022=0b001 & b_1015=0b011101 & Rd_FPR16 & Rn_FPR16 & I :uqshl Rd_FPR32, Rn_FPR32, Imm_shr_imm32 is b_2331=0b011111110 & b_2122=0b01 & b_1015=0b011101 & Rd_FPR32 & Rn_FPR32 & Imm_shr_imm32 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR32 = NEON_uqshl(Rn_FPR32, Imm_shr_imm32:1); -@endif } # C7.2.377 UQSHL (immediate) page C7-2250 line 126535 MATCH x7f007400/mask=xff80fc00 @@ -49667,9 +26693,7 @@ is b_2331=0b011111110 & b_2122=0b01 & b_1015=0b011101 & Rd_FPR32 & Rn_FPR32 & Im :uqshl Rd_FPR64, Rn_FPR64, Imm_shr_imm64 is b_2331=0b011111110 & b_22=1 & b_1015=0b011101 & Rd_FPR64 & Rn_FPR64 & Imm_shr_imm64 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR64 = NEON_uqshl(Rn_FPR64, Imm_shr_imm64:1); -@endif } # C7.2.378 UQSHL (register) page C7-2253 line 126700 MATCH x7e204c00/mask=xff20fc00 @@ -49680,9 +26704,7 @@ is b_2331=0b011111110 & b_22=1 & b_1015=0b011101 & Rd_FPR64 & Rn_FPR64 & Imm_shr :uqshl Rd_FPR8, Rn_FPR8, Rm_FPR8 is b_3031=1 & u=1 & b_2428=0x1e & advSIMD3.size=0 & b_2121=1 & Rm_FPR8 & b_1115=0x9 & b_1010=1 & Rn_FPR8 & Rd_FPR8 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR8 = NEON_uqshl(Rn_FPR8, Rm_FPR8); -@endif } # C7.2.378 UQSHL (register) page C7-2253 line 126700 MATCH x7e204c00/mask=xff20fc00 @@ -49693,9 +26715,7 @@ is b_3031=1 & u=1 & b_2428=0x1e & advSIMD3.size=0 & b_2121=1 & Rm_FPR8 & b_1115= :uqshl Rd_FPR64, Rn_FPR64, Rm_FPR64 is b_3031=1 & u=1 & b_2428=0x1e & advSIMD3.size=3 & b_2121=1 & Rm_FPR64 & b_1115=0x9 & b_1010=1 & Rn_FPR64 & Rd_FPR64 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR64 = NEON_uqshl(Rn_FPR64, Rm_FPR64); -@endif } # C7.2.378 UQSHL (register) page C7-2253 line 126700 MATCH x7e204c00/mask=xff20fc00 @@ -49706,9 +26726,7 @@ is b_3031=1 & u=1 & b_2428=0x1e & advSIMD3.size=3 & b_2121=1 & Rm_FPR64 & b_1115 :uqshl Rd_FPR16, Rn_FPR16, Rm_FPR16 is b_3031=1 & u=1 & b_2428=0x1e & advSIMD3.size=1 & b_2121=1 & Rm_FPR16 & b_1115=0x9 & b_1010=1 & Rn_FPR16 & Rd_FPR16 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR16 = NEON_uqshl(Rn_FPR16, Rm_FPR16); -@endif } # C7.2.378 UQSHL (register) page C7-2253 line 126700 MATCH x7e204c00/mask=xff20fc00 @@ -49719,9 +26737,7 @@ is b_3031=1 & u=1 & b_2428=0x1e & advSIMD3.size=1 & b_2121=1 & Rm_FPR16 & b_1115 :uqshl Rd_FPR32, Rn_FPR32, Rm_FPR32 is b_3031=1 & u=1 & b_2428=0x1e & advSIMD3.size=2 & b_2121=1 & Rm_FPR32 & b_1115=0x9 & b_1010=1 & Rn_FPR32 & Rd_FPR32 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR32 = NEON_uqshl(Rn_FPR32, Rm_FPR32); -@endif } # C7.2.378 UQSHL (register) page C7-2253 line 126700 MATCH x2e204c00/mask=xbf20fc00 @@ -49732,9 +26748,7 @@ is b_3031=1 & u=1 & b_2428=0x1e & advSIMD3.size=2 & b_2121=1 & Rm_FPR32 & b_1115 :uqshl Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0x9 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.16B = NEON_uqshl(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); -@endif } # C7.2.378 UQSHL (register) page C7-2253 line 126700 MATCH x2e204c00/mask=xbf20fc00 @@ -49745,9 +26759,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16 :uqshl Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=3 & b_2121=1 & Rm_VPR128.2D & b_1115=0x9 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.2D = NEON_uqshl(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); -@endif } # C7.2.378 UQSHL (register) page C7-2253 line 126700 MATCH x2e204c00/mask=xbf20fc00 @@ -49758,9 +26770,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=3 & b_2121=1 & Rm_VPR128.2D :uqshl Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0x9 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.2S = NEON_uqshl(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); -@endif } # C7.2.378 UQSHL (register) page C7-2253 line 126700 MATCH x2e204c00/mask=xbf20fc00 @@ -49771,9 +26781,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S :uqshl Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0x9 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.4H = NEON_uqshl(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); -@endif } # C7.2.378 UQSHL (register) page C7-2253 line 126700 MATCH x2e204c00/mask=xbf20fc00 @@ -49784,9 +26792,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H :uqshl Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0x9 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_uqshl(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); -@endif } # C7.2.378 UQSHL (register) page C7-2253 line 126700 MATCH x2e204c00/mask=xbf20fc00 @@ -49797,9 +26803,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S :uqshl Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0x9 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.8B = NEON_uqshl(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); -@endif } # C7.2.378 UQSHL (register) page C7-2253 line 126700 MATCH x2e204c00/mask=xbf20fc00 @@ -49810,9 +26814,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B :uqshl Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0x9 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.8H = NEON_uqshl(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); -@endif } # C7.2.379 UQSHRN, UQSHRN2 page C7-2255 line 126838 MATCH x2f009400/mask=xbf80fc00 @@ -49823,9 +26825,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H :uqshrn2 Rd_VPR128.16B, Rn_VPR128.8H, Imm_shr_imm8 is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x12 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.16B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.16B = NEON_uqshrn2(Rd_VPR128.16B, Rn_VPR128.8H, Imm_shr_imm8:1, 2:1); -@endif } # C7.2.379 UQSHRN, UQSHRN2 page C7-2255 line 126838 MATCH x2f009400/mask=xbf80fc00 @@ -49836,9 +26836,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x12 & :uqshrn Rd_VPR64.2S, Rn_VPR128.2D, Imm_shr_imm32 is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x12 & b_1010=1 & Rn_VPR128.2D & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.2S = NEON_uqshrn(Rd_VPR64.2S, Rn_VPR128.2D, Imm_shr_imm32:1, 8:1); -@endif } # C7.2.379 UQSHRN, UQSHRN2 page C7-2255 line 126838 MATCH x2f009400/mask=xbf80fc00 @@ -49849,9 +26847,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x12 & :uqshrn Rd_VPR64.4H, Rn_VPR128.4S, Imm_shr_imm16 is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x12 & b_1010=1 & Rn_VPR128.4S & Rd_VPR64.4H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.4H = NEON_uqshrn(Rd_VPR64.4H, Rn_VPR128.4S, Imm_shr_imm16:1, 4:1); -@endif } # C7.2.379 UQSHRN, UQSHRN2 page C7-2255 line 126838 MATCH x2f009400/mask=xbf80fc00 @@ -49862,9 +26858,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x12 :uqshrn2 Rd_VPR128.4S, Rn_VPR128.2D, Imm_shr_imm32 is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x12 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_uqshrn2(Rd_VPR128.4S, Rn_VPR128.2D, Imm_shr_imm32:1, 8:1); -@endif } # C7.2.379 UQSHRN, UQSHRN2 page C7-2255 line 126838 MATCH x2f009400/mask=xbf80fc00 @@ -49875,9 +26869,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x12 & :uqshrn Rd_VPR64.8B, Rn_VPR128.8H, Imm_shr_imm8 is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x12 & b_1010=1 & Rn_VPR128.8H & Rd_VPR64.8B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.8B = NEON_uqshrn(Rd_VPR64.8B, Rn_VPR128.8H, Imm_shr_imm8:1, 2:1); -@endif } # C7.2.379 UQSHRN, UQSHRN2 page C7-2255 line 126838 MATCH x2f009400/mask=xbf80fc00 @@ -49888,9 +26880,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x12 & :uqshrn2 Rd_VPR128.8H, Rn_VPR128.4S, Imm_shr_imm16 is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x12 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.8H = NEON_uqshrn2(Rd_VPR128.8H, Rn_VPR128.4S, Imm_shr_imm16:1, 4:1); -@endif } # C7.2.379 UQSHRN, UQSHRN2 page C7-2255 line 126838 MATCH x7f009400/mask=xff80fc00 @@ -49902,9 +26892,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x12 :uqshrn Rd_FPR8, Rn_FPR16, Imm_shr_imm8 is b_2331=0b011111110 & b_1922=0b0001 & b_1015=0b100101 & Rd_FPR8 & Rn_FPR16 & Imm_shr_imm8 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR8 = NEON_uqshrn(Rd_FPR8, Rn_FPR16, Imm_shr_imm8:1); -@endif } # C7.2.379 UQSHRN, UQSHRN2 page C7-2255 line 126838 MATCH x7f009400/mask=xff80fc00 @@ -49916,9 +26904,7 @@ is b_2331=0b011111110 & b_1922=0b0001 & b_1015=0b100101 & Rd_FPR8 & Rn_FPR16 & I :uqshrn Rd_FPR16, Rn_FPR32, Imm_shr_imm16 is b_2331=0b011111110 & b_2022=0b001 & b_1015=0b100101 & Rd_FPR16 & Rn_FPR32 & Imm_shr_imm16 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR16 = NEON_uqshrn(Rd_FPR16, Rn_FPR32, Imm_shr_imm16:1); -@endif } # C7.2.379 UQSHRN, UQSHRN2 page C7-2255 line 126838 MATCH x7f009400/mask=xff80fc00 @@ -49930,9 +26916,7 @@ is b_2331=0b011111110 & b_2022=0b001 & b_1015=0b100101 & Rd_FPR16 & Rn_FPR32 & I :uqshrn Rd_FPR32, Rn_FPR64, Imm_shr_imm32 is b_2331=0b011111110 & b_2122=0b01 & b_1015=0b100101 & Rd_FPR32 & Rn_FPR64 & Imm_shr_imm32 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR32 = NEON_uqshrn(Rd_FPR32, Rn_FPR64, Imm_shr_imm32:1); -@endif } # C7.2.380 UQSUB page C7-2258 line 127023 MATCH x7e202c00/mask=xff20fc00 @@ -49943,9 +26927,7 @@ is b_2331=0b011111110 & b_2122=0b01 & b_1015=0b100101 & Rd_FPR32 & Rn_FPR64 & Im :uqsub Rd_FPR8, Rn_FPR8, Rm_FPR8 is b_3031=1 & u=1 & b_2428=0x1e & advSIMD3.size=0 & b_2121=1 & Rm_FPR8 & b_1115=0x5 & b_1010=1 & Rn_FPR8 & Rd_FPR8 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR8 = NEON_uqsub(Rn_FPR8, Rm_FPR8); -@endif } # C7.2.380 UQSUB page C7-2258 line 127023 MATCH x7e202c00/mask=xff20fc00 @@ -49956,9 +26938,7 @@ is b_3031=1 & u=1 & b_2428=0x1e & advSIMD3.size=0 & b_2121=1 & Rm_FPR8 & b_1115= :uqsub Rd_FPR64, Rn_FPR64, Rm_FPR64 is b_3031=1 & u=1 & b_2428=0x1e & advSIMD3.size=3 & b_2121=1 & Rm_FPR64 & b_1115=0x5 & b_1010=1 & Rn_FPR64 & Rd_FPR64 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR64 = NEON_uqsub(Rn_FPR64, Rm_FPR64); -@endif } # C7.2.380 UQSUB page C7-2258 line 127023 MATCH x7e202c00/mask=xff20fc00 @@ -49969,9 +26949,7 @@ is b_3031=1 & u=1 & b_2428=0x1e & advSIMD3.size=3 & b_2121=1 & Rm_FPR64 & b_1115 :uqsub Rd_FPR16, Rn_FPR16, Rm_FPR16 is b_3031=1 & u=1 & b_2428=0x1e & advSIMD3.size=1 & b_2121=1 & Rm_FPR16 & b_1115=0x5 & b_1010=1 & Rn_FPR16 & Rd_FPR16 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR16 = NEON_uqsub(Rn_FPR16, Rm_FPR16); -@endif } # C7.2.380 UQSUB page C7-2258 line 127023 MATCH x7e202c00/mask=xff20fc00 @@ -49982,9 +26960,7 @@ is b_3031=1 & u=1 & b_2428=0x1e & advSIMD3.size=1 & b_2121=1 & Rm_FPR16 & b_1115 :uqsub Rd_FPR32, Rn_FPR32, Rm_FPR32 is b_3031=1 & u=1 & b_2428=0x1e & advSIMD3.size=2 & b_2121=1 & Rm_FPR32 & b_1115=0x5 & b_1010=1 & Rn_FPR32 & Rd_FPR32 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR32 = NEON_uqsub(Rn_FPR32, Rm_FPR32); -@endif } # C7.2.380 UQSUB page C7-2258 line 127023 MATCH x2e202c00/mask=xbf20fc00 @@ -49995,9 +26971,7 @@ is b_3031=1 & u=1 & b_2428=0x1e & advSIMD3.size=2 & b_2121=1 & Rm_FPR32 & b_1115 :uqsub Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0x5 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.16B = NEON_uqsub(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); -@endif } # C7.2.380 UQSUB page C7-2258 line 127023 MATCH x2e202c00/mask=xbf20fc00 @@ -50008,9 +26982,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16 :uqsub Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=3 & b_2121=1 & Rm_VPR128.2D & b_1115=0x5 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.2D = NEON_uqsub(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); -@endif } # C7.2.380 UQSUB page C7-2258 line 127023 MATCH x2e202c00/mask=xbf20fc00 @@ -50021,9 +26993,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=3 & b_2121=1 & Rm_VPR128.2D :uqsub Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0x5 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.2S = NEON_uqsub(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); -@endif } # C7.2.380 UQSUB page C7-2258 line 127023 MATCH x2e202c00/mask=xbf20fc00 @@ -50034,9 +27004,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S :uqsub Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0x5 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.4H = NEON_uqsub(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); -@endif } # C7.2.380 UQSUB page C7-2258 line 127023 MATCH x2e202c00/mask=xbf20fc00 @@ -50047,9 +27015,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H :uqsub Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0x5 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_uqsub(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); -@endif } # C7.2.380 UQSUB page C7-2258 line 127023 MATCH x2e202c00/mask=xbf20fc00 @@ -50060,9 +27026,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S :uqsub Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0x5 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.8B = NEON_uqsub(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); -@endif } # C7.2.380 UQSUB page C7-2258 line 127023 MATCH x2e202c00/mask=xbf20fc00 @@ -50073,9 +27037,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B :uqsub Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0x5 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.8H = NEON_uqsub(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); -@endif } # C7.2.381 UQXTN, UQXTN2 page C7-2260 line 127148 MATCH x7e214800/mask=xff3ffc00 @@ -50087,9 +27049,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H :uqxtn Rd_FPR8, Rn_FPR16 is b_31=0 & b_30=1 & b_2429=0b111110 & b_2223=0b00 & b_1021=0b100001010010 & Rd_FPR8 & Rn_FPR16 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR8 = NEON_uqxtn(Rd_FPR8, Rn_FPR16); -@endif } # C7.2.381 UQXTN, UQXTN2 page C7-2260 line 127148 MATCH x7e214800/mask=xff3ffc00 @@ -50101,9 +27061,7 @@ is b_31=0 & b_30=1 & b_2429=0b111110 & b_2223=0b00 & b_1021=0b100001010010 & Rd_ :uqxtn Rd_FPR16, Rn_FPR32 is b_31=0 & b_30=1 & b_2429=0b111110 & b_2223=0b01 & b_1021=0b100001010010 & Rd_FPR16 & Rn_FPR32 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR16 = NEON_uqxtn(Rd_FPR16, Rn_FPR32); -@endif } # C7.2.381 UQXTN, UQXTN2 page C7-2260 line 127148 MATCH x7e214800/mask=xff3ffc00 @@ -50115,9 +27073,7 @@ is b_31=0 & b_30=1 & b_2429=0b111110 & b_2223=0b01 & b_1021=0b100001010010 & Rd_ :uqxtn Rd_FPR32, Rn_FPR64 is b_31=0 & b_30=1 & b_2429=0b111110 & b_2223=0b10 & b_1021=0b100001010010 & Rd_FPR32 & Rn_FPR64 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR32 = NEON_uqxtn(Rd_FPR32, Rn_FPR64); -@endif } # C7.2.381 UQXTN, UQXTN2 page C7-2260 line 127148 MATCH x2e214800/mask=xbf3ffc00 @@ -50129,9 +27085,7 @@ is b_31=0 & b_30=1 & b_2429=0b111110 & b_2223=0b10 & b_1021=0b100001010010 & Rd_ :uqxtn Rd_VPR64.8B, Rn_VPR128.8H is b_31=0 & b_30=0 & b_2429=0b101110 & b_2223=0b00 & b_1021=0b100001010010 & Rd_VPR64.8B & Rn_VPR128.8H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.8B = NEON_uqxtn(Rd_VPR64.8B, Rn_VPR128.8H, 2:1); -@endif } # C7.2.381 UQXTN, UQXTN2 page C7-2260 line 127148 MATCH x2e214800/mask=xbf3ffc00 @@ -50143,9 +27097,7 @@ is b_31=0 & b_30=0 & b_2429=0b101110 & b_2223=0b00 & b_1021=0b100001010010 & Rd_ :uqxtn2 Rd_VPR128.16B, Rn_VPR128.8H is b_31=0 & b_30=1 & b_2429=0b101110 & b_2223=0b00 & b_1021=0b100001010010 & Rd_VPR128.16B & Rn_VPR128.8H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.16B = NEON_uqxtn2(Rd_VPR128.16B, Rn_VPR128.8H, 2:1); -@endif } # C7.2.381 UQXTN, UQXTN2 page C7-2260 line 127148 MATCH x2e214800/mask=xbf3ffc00 @@ -50157,9 +27109,7 @@ is b_31=0 & b_30=1 & b_2429=0b101110 & b_2223=0b00 & b_1021=0b100001010010 & Rd_ :uqxtn Rd_VPR64.4H, Rn_VPR128.4S is b_31=0 & b_30=0 & b_2429=0b101110 & b_2223=0b01 & b_1021=0b100001010010 & Rd_VPR64.4H & Rn_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.4H = NEON_uqxtn(Rd_VPR64.4H, Rn_VPR128.4S, 4:1); -@endif } # C7.2.381 UQXTN, UQXTN2 page C7-2260 line 127148 MATCH x2e214800/mask=xbf3ffc00 @@ -50171,9 +27121,7 @@ is b_31=0 & b_30=0 & b_2429=0b101110 & b_2223=0b01 & b_1021=0b100001010010 & Rd_ :uqxtn2 Rd_VPR128.8H, Rn_VPR128.4S is b_31=0 & b_30=1 & b_2429=0b101110 & b_2223=0b01 & b_1021=0b100001010010 & Rd_VPR128.8H & Rn_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.8H = NEON_uqxtn2(Rd_VPR128.8H, Rn_VPR128.4S, 4:1); -@endif } # C7.2.381 UQXTN, UQXTN2 page C7-2260 line 127148 MATCH x2e214800/mask=xbf3ffc00 @@ -50185,9 +27133,7 @@ is b_31=0 & b_30=1 & b_2429=0b101110 & b_2223=0b01 & b_1021=0b100001010010 & Rd_ :uqxtn Rd_VPR64.2S, Rn_VPR128.2D is b_31=0 & b_30=0 & b_2429=0b101110 & b_2223=0b10 & b_1021=0b100001010010 & Rd_VPR64.2S & Rn_VPR128.2D & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.2S = NEON_uqxtn(Rd_VPR64.2S, Rn_VPR128.2D, 8:1); -@endif } # C7.2.381 UQXTN, UQXTN2 page C7-2260 line 127148 MATCH x2e214800/mask=xbf3ffc00 @@ -50199,9 +27145,7 @@ is b_31=0 & b_30=0 & b_2429=0b101110 & b_2223=0b10 & b_1021=0b100001010010 & Rd_ :uqxtn2 Rd_VPR128.4S, Rn_VPR128.2D is b_31=0 & b_30=1 & b_2429=0b101110 & b_2223=0b10 & b_1021=0b100001010010 & Rd_VPR128.4S & Rn_VPR128.2D & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_uqxtn2(Rd_VPR128.4S, Rn_VPR128.2D, 8:1); -@endif } # C7.2.382 URECPE page C7-2263 line 127300 MATCH x0ea1c800/mask=xbfbffc00 @@ -50213,9 +27157,7 @@ is b_31=0 & b_30=1 & b_2429=0b101110 & b_2223=0b10 & b_1021=0b100001010010 & Rd_ :urecpe Rd_VPR64.2S, Rn_VPR64.2S is b_31=0 & b_30=0 & b_2329=0b0011101 & b_22=0 & b_1021=0b100001110010 & Rd_VPR64.2S & Rn_VPR64.2S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.2S = NEON_urecpe(Rn_VPR64.2S, 4:1); -@endif } # C7.2.382 URECPE page C7-2263 line 127300 MATCH x0ea1c800/mask=xbfbffc00 @@ -50227,9 +27169,7 @@ is b_31=0 & b_30=0 & b_2329=0b0011101 & b_22=0 & b_1021=0b100001110010 & Rd_VPR6 :urecpe Rd_VPR128.4S, Rn_VPR128.4S is b_31=0 & b_30=1 & b_2329=0b0011101 & b_22=0 & b_1021=0b100001110010 & Rd_VPR128.4S & Rn_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_urecpe(Rn_VPR128.4S, 4:1); -@endif } # C7.2.383 URHADD page C7-2264 line 127365 MATCH x2e201400/mask=xbf20fc00 @@ -50240,9 +27180,7 @@ is b_31=0 & b_30=1 & b_2329=0b0011101 & b_22=0 & b_1021=0b100001110010 & Rd_VPR1 :urhadd Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0x2 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.16B = NEON_urhadd(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); -@endif } # C7.2.383 URHADD page C7-2264 line 127365 MATCH x2e201400/mask=xbf20fc00 @@ -50253,9 +27191,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16 :urhadd Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0x2 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.2S = NEON_urhadd(Rn_VPR64.2S, Rm_VPR64.2S, 2:1); -@endif } # C7.2.383 URHADD page C7-2264 line 127365 MATCH x2e201400/mask=xbf20fc00 @@ -50266,9 +27202,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S :urhadd Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0x2 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.4H = NEON_urhadd(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); -@endif } # C7.2.383 URHADD page C7-2264 line 127365 MATCH x2e201400/mask=xbf20fc00 @@ -50279,9 +27213,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H :urhadd Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0x2 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_urhadd(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); -@endif } # C7.2.383 URHADD page C7-2264 line 127365 MATCH x2e201400/mask=xbf20fc00 @@ -50292,9 +27224,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S :urhadd Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0x2 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.8B = NEON_urhadd(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); -@endif } # C7.2.383 URHADD page C7-2264 line 127365 MATCH x2e201400/mask=xbf20fc00 @@ -50305,9 +27235,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B :urhadd Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0x2 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.8H = NEON_urhadd(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); -@endif } # C7.2.384 URSHL page C7-2266 line 127452 MATCH x7e205400/mask=xff20fc00 @@ -50318,9 +27246,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H :urshl Rd_FPR64, Rn_FPR64, Rm_FPR64 is b_3031=1 & u=1 & b_2428=0x1e & advSIMD3.size=3 & b_2121=1 & Rm_FPR64 & b_1115=0xa & b_1010=1 & Rn_FPR64 & Rd_FPR64 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR64 = NEON_urshl(Rn_FPR64, Rm_FPR64); -@endif } # C7.2.384 URSHL page C7-2266 line 127452 MATCH x2e205400/mask=xbf20fc00 @@ -50331,9 +27257,7 @@ is b_3031=1 & u=1 & b_2428=0x1e & advSIMD3.size=3 & b_2121=1 & Rm_FPR64 & b_1115 :urshl Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0xa & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.16B = NEON_urshl(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); -@endif } # C7.2.384 URSHL page C7-2266 line 127452 MATCH x2e205400/mask=xbf20fc00 @@ -50344,9 +27268,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16 :urshl Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=3 & b_2121=1 & Rm_VPR128.2D & b_1115=0xa & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.2D = NEON_urshl(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); -@endif } # C7.2.384 URSHL page C7-2266 line 127452 MATCH x2e205400/mask=xbf20fc00 @@ -50357,9 +27279,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=3 & b_2121=1 & Rm_VPR128.2D :urshl Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0xa & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.2S = NEON_urshl(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); -@endif } # C7.2.384 URSHL page C7-2266 line 127452 MATCH x2e205400/mask=xbf20fc00 @@ -50370,9 +27290,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S :urshl Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0xa & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.4H = NEON_urshl(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); -@endif } # C7.2.384 URSHL page C7-2266 line 127452 MATCH x2e205400/mask=xbf20fc00 @@ -50383,9 +27301,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H :urshl Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0xa & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_urshl(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); -@endif } # C7.2.384 URSHL page C7-2266 line 127452 MATCH x2e205400/mask=xbf20fc00 @@ -50396,9 +27312,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S :urshl Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0xa & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.8B = NEON_urshl(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); -@endif } # C7.2.384 URSHL page C7-2266 line 127452 MATCH x2e205400/mask=xbf20fc00 @@ -50409,9 +27323,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B :urshl Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0xa & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.8H = NEON_urshl(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); -@endif } # C7.2.385 URSHR page C7-2268 line 127587 MATCH x2f002400/mask=xbf80fc00 @@ -50422,9 +27334,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H :urshr Rd_VPR128.16B, Rn_VPR128.16B, Imm_shr_imm8 is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x4 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.16B = NEON_urshr(Rn_VPR128.16B, Imm_shr_imm8:1, 1:1); -@endif } # C7.2.385 URSHR page C7-2268 line 127587 MATCH x2f002400/mask=xbf80fc00 @@ -50435,9 +27345,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x4 & :urshr Rd_VPR128.2D, Rn_VPR128.2D, Imm_shr_imm64 is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2223=0b01 & Imm_shr_imm64 & b_1115=0x4 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.2D = NEON_urshr(Rn_VPR128.2D, Imm_shr_imm64:1, 8:1); -@endif } # C7.2.385 URSHR page C7-2268 line 127587 MATCH x2f002400/mask=xbf80fc00 @@ -50448,9 +27356,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2223=0b01 & Imm_shr_imm64 & b_1115=0x4 :urshr Rd_VPR64.2S, Rn_VPR64.2S, Imm_shr_imm32 is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x4 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.2S = NEON_urshr(Rn_VPR64.2S, Imm_shr_imm32:1, 4:1); -@endif } # C7.2.385 URSHR page C7-2268 line 127587 MATCH x2f002400/mask=xbf80fc00 @@ -50461,9 +27367,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x4 & b :urshr Rd_VPR64.4H, Rn_VPR64.4H, Imm_shr_imm16 is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x4 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.4H = NEON_urshr(Rn_VPR64.4H, Imm_shr_imm16:1, 2:1); -@endif } # C7.2.385 URSHR page C7-2268 line 127587 MATCH x2f002400/mask=xbf80fc00 @@ -50474,9 +27378,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x4 & :urshr Rd_VPR128.4S, Rn_VPR128.4S, Imm_shr_imm32 is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x4 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_urshr(Rn_VPR128.4S, Imm_shr_imm32:1, 4:1); -@endif } # C7.2.385 URSHR page C7-2268 line 127587 MATCH x2f002400/mask=xbf80fc00 @@ -50487,9 +27389,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x4 & b :urshr Rd_VPR64.8B, Rn_VPR64.8B, Imm_shr_imm8 is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x4 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.8B = NEON_urshr(Rn_VPR64.8B, Imm_shr_imm8:1, 1:1); -@endif } # C7.2.385 URSHR page C7-2268 line 127587 MATCH x2f002400/mask=xbf80fc00 @@ -50500,9 +27400,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x4 & :urshr Rd_VPR128.8H, Rn_VPR128.8H, Imm_shr_imm16 is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x4 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.8H = NEON_urshr(Rn_VPR128.8H, Imm_shr_imm16:1, 2:1); -@endif } # C7.2.385 URSHR page C7-2268 line 127587 MATCH x7f002400/mask=xff80fc00 @@ -50514,9 +27412,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x4 & :urshr Rd_FPR64, Rn_FPR64, Imm_shr_imm32 is b_2331=0b011111110 & b_22=1 & b_1015=0b001001 & Rd_FPR64 & Rn_FPR64 & Imm_shr_imm32 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR64 = NEON_urshr(Rn_FPR64, Imm_shr_imm32:1); -@endif } # C7.2.386 URSQRTE page C7-2270 line 127723 MATCH x2ea1c800/mask=xbfbffc00 @@ -50528,9 +27424,7 @@ is b_2331=0b011111110 & b_22=1 & b_1015=0b001001 & Rd_FPR64 & Rn_FPR64 & Imm_shr :ursqrte Rd_VPR64.2S, Rn_VPR64.2S is b_31=0 & b_30=0 & b_2329=0b1011101 & b_22=0 & b_1021=0b100001110010 & Rd_VPR64.2S & Rn_VPR64.2S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.2S = NEON_ursqrte(Rn_VPR64.2S, 4:1); -@endif } # C7.2.386 URSQRTE page C7-2270 line 127723 MATCH x2ea1c800/mask=xbfbffc00 @@ -50542,9 +27436,7 @@ is b_31=0 & b_30=0 & b_2329=0b1011101 & b_22=0 & b_1021=0b100001110010 & Rd_VPR6 :ursqrte Rd_VPR128.4S, Rn_VPR128.4S is b_31=0 & b_30=1 & b_2329=0b1011101 & b_22=0 & b_1021=0b100001110010 & Rd_VPR128.4S & Rn_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_ursqrte(Rn_VPR128.4S, 4:1); -@endif } # C7.2.387 URSRA page C7-2271 line 127788 MATCH x2f003400/mask=xbf80fc00 @@ -50556,134 +27448,41 @@ is b_31=0 & b_30=1 & b_2329=0b1011101 & b_22=0 & b_1021=0b100001110010 & Rd_VPR1 :ursra Rd_VPR128.16B, Rn_VPR128.16B, Imm_shr_imm8 is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x6 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { -@if defined(SEMANTIC_primitive) # simd infix TMPQ1 = Rn_VPR128.16B >> Imm_shr_imm8:1 on lane size 1 - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR128.16B, 0, 1, 16); - simd_address_at(tmp3, TMPQ1, 0, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp2) >> Imm_shr_imm8:1; - simd_address_at(tmp2, Rn_VPR128.16B, 1, 1, 16); - simd_address_at(tmp3, TMPQ1, 1, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp2) >> Imm_shr_imm8:1; - simd_address_at(tmp2, Rn_VPR128.16B, 2, 1, 16); - simd_address_at(tmp3, TMPQ1, 2, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp2) >> Imm_shr_imm8:1; - simd_address_at(tmp2, Rn_VPR128.16B, 3, 1, 16); - simd_address_at(tmp3, TMPQ1, 3, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp2) >> Imm_shr_imm8:1; - simd_address_at(tmp2, Rn_VPR128.16B, 4, 1, 16); - simd_address_at(tmp3, TMPQ1, 4, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp2) >> Imm_shr_imm8:1; - simd_address_at(tmp2, Rn_VPR128.16B, 5, 1, 16); - simd_address_at(tmp3, TMPQ1, 5, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp2) >> Imm_shr_imm8:1; - simd_address_at(tmp2, Rn_VPR128.16B, 6, 1, 16); - simd_address_at(tmp3, TMPQ1, 6, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp2) >> Imm_shr_imm8:1; - simd_address_at(tmp2, Rn_VPR128.16B, 7, 1, 16); - simd_address_at(tmp3, TMPQ1, 7, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp2) >> Imm_shr_imm8:1; - simd_address_at(tmp2, Rn_VPR128.16B, 8, 1, 16); - simd_address_at(tmp3, TMPQ1, 8, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp2) >> Imm_shr_imm8:1; - simd_address_at(tmp2, Rn_VPR128.16B, 9, 1, 16); - simd_address_at(tmp3, TMPQ1, 9, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp2) >> Imm_shr_imm8:1; - simd_address_at(tmp2, Rn_VPR128.16B, 10, 1, 16); - simd_address_at(tmp3, TMPQ1, 10, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp2) >> Imm_shr_imm8:1; - simd_address_at(tmp2, Rn_VPR128.16B, 11, 1, 16); - simd_address_at(tmp3, TMPQ1, 11, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp2) >> Imm_shr_imm8:1; - simd_address_at(tmp2, Rn_VPR128.16B, 12, 1, 16); - simd_address_at(tmp3, TMPQ1, 12, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp2) >> Imm_shr_imm8:1; - simd_address_at(tmp2, Rn_VPR128.16B, 13, 1, 16); - simd_address_at(tmp3, TMPQ1, 13, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp2) >> Imm_shr_imm8:1; - simd_address_at(tmp2, Rn_VPR128.16B, 14, 1, 16); - simd_address_at(tmp3, TMPQ1, 14, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp2) >> Imm_shr_imm8:1; - simd_address_at(tmp2, Rn_VPR128.16B, 15, 1, 16); - simd_address_at(tmp3, TMPQ1, 15, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp2) >> Imm_shr_imm8:1; + TMPQ1[0,8] = Rn_VPR128.16B[0,8] >> Imm_shr_imm8:1; + TMPQ1[8,8] = Rn_VPR128.16B[8,8] >> Imm_shr_imm8:1; + TMPQ1[16,8] = Rn_VPR128.16B[16,8] >> Imm_shr_imm8:1; + TMPQ1[24,8] = Rn_VPR128.16B[24,8] >> Imm_shr_imm8:1; + TMPQ1[32,8] = Rn_VPR128.16B[32,8] >> Imm_shr_imm8:1; + TMPQ1[40,8] = Rn_VPR128.16B[40,8] >> Imm_shr_imm8:1; + TMPQ1[48,8] = Rn_VPR128.16B[48,8] >> Imm_shr_imm8:1; + TMPQ1[56,8] = Rn_VPR128.16B[56,8] >> Imm_shr_imm8:1; + TMPQ1[64,8] = Rn_VPR128.16B[64,8] >> Imm_shr_imm8:1; + TMPQ1[72,8] = Rn_VPR128.16B[72,8] >> Imm_shr_imm8:1; + TMPQ1[80,8] = Rn_VPR128.16B[80,8] >> Imm_shr_imm8:1; + TMPQ1[88,8] = Rn_VPR128.16B[88,8] >> Imm_shr_imm8:1; + TMPQ1[96,8] = Rn_VPR128.16B[96,8] >> Imm_shr_imm8:1; + TMPQ1[104,8] = Rn_VPR128.16B[104,8] >> Imm_shr_imm8:1; + TMPQ1[112,8] = Rn_VPR128.16B[112,8] >> Imm_shr_imm8:1; + TMPQ1[120,8] = Rn_VPR128.16B[120,8] >> Imm_shr_imm8:1; # simd infix Rd_VPR128.16B = Rd_VPR128.16B + TMPQ1 on lane size 1 - local tmp4:4 = 0; - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp4, Rd_VPR128.16B, 0, 1, 16); - simd_address_at(tmp5, TMPQ1, 0, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 0, 1, 16); - * [register]:1 tmp6 = (* [register]:1 tmp4) + (* [register]:1 tmp5); - simd_address_at(tmp4, Rd_VPR128.16B, 1, 1, 16); - simd_address_at(tmp5, TMPQ1, 1, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 1, 1, 16); - * [register]:1 tmp6 = (* [register]:1 tmp4) + (* [register]:1 tmp5); - simd_address_at(tmp4, Rd_VPR128.16B, 2, 1, 16); - simd_address_at(tmp5, TMPQ1, 2, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 2, 1, 16); - * [register]:1 tmp6 = (* [register]:1 tmp4) + (* [register]:1 tmp5); - simd_address_at(tmp4, Rd_VPR128.16B, 3, 1, 16); - simd_address_at(tmp5, TMPQ1, 3, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 3, 1, 16); - * [register]:1 tmp6 = (* [register]:1 tmp4) + (* [register]:1 tmp5); - simd_address_at(tmp4, Rd_VPR128.16B, 4, 1, 16); - simd_address_at(tmp5, TMPQ1, 4, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 4, 1, 16); - * [register]:1 tmp6 = (* [register]:1 tmp4) + (* [register]:1 tmp5); - simd_address_at(tmp4, Rd_VPR128.16B, 5, 1, 16); - simd_address_at(tmp5, TMPQ1, 5, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 5, 1, 16); - * [register]:1 tmp6 = (* [register]:1 tmp4) + (* [register]:1 tmp5); - simd_address_at(tmp4, Rd_VPR128.16B, 6, 1, 16); - simd_address_at(tmp5, TMPQ1, 6, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 6, 1, 16); - * [register]:1 tmp6 = (* [register]:1 tmp4) + (* [register]:1 tmp5); - simd_address_at(tmp4, Rd_VPR128.16B, 7, 1, 16); - simd_address_at(tmp5, TMPQ1, 7, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 7, 1, 16); - * [register]:1 tmp6 = (* [register]:1 tmp4) + (* [register]:1 tmp5); - simd_address_at(tmp4, Rd_VPR128.16B, 8, 1, 16); - simd_address_at(tmp5, TMPQ1, 8, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 8, 1, 16); - * [register]:1 tmp6 = (* [register]:1 tmp4) + (* [register]:1 tmp5); - simd_address_at(tmp4, Rd_VPR128.16B, 9, 1, 16); - simd_address_at(tmp5, TMPQ1, 9, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 9, 1, 16); - * [register]:1 tmp6 = (* [register]:1 tmp4) + (* [register]:1 tmp5); - simd_address_at(tmp4, Rd_VPR128.16B, 10, 1, 16); - simd_address_at(tmp5, TMPQ1, 10, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 10, 1, 16); - * [register]:1 tmp6 = (* [register]:1 tmp4) + (* [register]:1 tmp5); - simd_address_at(tmp4, Rd_VPR128.16B, 11, 1, 16); - simd_address_at(tmp5, TMPQ1, 11, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 11, 1, 16); - * [register]:1 tmp6 = (* [register]:1 tmp4) + (* [register]:1 tmp5); - simd_address_at(tmp4, Rd_VPR128.16B, 12, 1, 16); - simd_address_at(tmp5, TMPQ1, 12, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 12, 1, 16); - * [register]:1 tmp6 = (* [register]:1 tmp4) + (* [register]:1 tmp5); - simd_address_at(tmp4, Rd_VPR128.16B, 13, 1, 16); - simd_address_at(tmp5, TMPQ1, 13, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 13, 1, 16); - * [register]:1 tmp6 = (* [register]:1 tmp4) + (* [register]:1 tmp5); - simd_address_at(tmp4, Rd_VPR128.16B, 14, 1, 16); - simd_address_at(tmp5, TMPQ1, 14, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 14, 1, 16); - * [register]:1 tmp6 = (* [register]:1 tmp4) + (* [register]:1 tmp5); - simd_address_at(tmp4, Rd_VPR128.16B, 15, 1, 16); - simd_address_at(tmp5, TMPQ1, 15, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 15, 1, 16); - * [register]:1 tmp6 = (* [register]:1 tmp4) + (* [register]:1 tmp5); + Rd_VPR128.16B[0,8] = Rd_VPR128.16B[0,8] + TMPQ1[0,8]; + Rd_VPR128.16B[8,8] = Rd_VPR128.16B[8,8] + TMPQ1[8,8]; + Rd_VPR128.16B[16,8] = Rd_VPR128.16B[16,8] + TMPQ1[16,8]; + Rd_VPR128.16B[24,8] = Rd_VPR128.16B[24,8] + TMPQ1[24,8]; + Rd_VPR128.16B[32,8] = Rd_VPR128.16B[32,8] + TMPQ1[32,8]; + Rd_VPR128.16B[40,8] = Rd_VPR128.16B[40,8] + TMPQ1[40,8]; + Rd_VPR128.16B[48,8] = Rd_VPR128.16B[48,8] + TMPQ1[48,8]; + Rd_VPR128.16B[56,8] = Rd_VPR128.16B[56,8] + TMPQ1[56,8]; + Rd_VPR128.16B[64,8] = Rd_VPR128.16B[64,8] + TMPQ1[64,8]; + Rd_VPR128.16B[72,8] = Rd_VPR128.16B[72,8] + TMPQ1[72,8]; + Rd_VPR128.16B[80,8] = Rd_VPR128.16B[80,8] + TMPQ1[80,8]; + Rd_VPR128.16B[88,8] = Rd_VPR128.16B[88,8] + TMPQ1[88,8]; + Rd_VPR128.16B[96,8] = Rd_VPR128.16B[96,8] + TMPQ1[96,8]; + Rd_VPR128.16B[104,8] = Rd_VPR128.16B[104,8] + TMPQ1[104,8]; + Rd_VPR128.16B[112,8] = Rd_VPR128.16B[112,8] + TMPQ1[112,8]; + Rd_VPR128.16B[120,8] = Rd_VPR128.16B[120,8] + TMPQ1[120,8]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_RIGHT(Rn_VPR128.16B, Imm_shr_imm8:1, 1:1); - local tmpd:16 = SIMD_INT_ADD(Rd_VPR128.16B, tmp1, 1:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.16B -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.16B = NEON_ursra(Rd_VPR128.16B, Rn_VPR128.16B, Imm_shr_imm8:1, 1:1); -@endif } # C7.2.387 URSRA page C7-2271 line 127788 MATCH x2f003400/mask=xbf80fc00 @@ -50695,38 +27494,14 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x6 & :ursra Rd_VPR128.2D, Rn_VPR128.2D, Imm_shr_imm64 is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2223=0b01 & Imm_shr_imm64 & b_1115=0x6 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_primitive) local tmp1:8 = zext(Imm_shr_imm64); # simd infix TMPQ1 = Rn_VPR128.2D >> tmp1 on lane size 8 - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp3, Rn_VPR128.2D, 0, 8, 16); - simd_address_at(tmp4, TMPQ1, 0, 8, 16); - * [register]:8 tmp4 = (* [register]:8 tmp3) >> tmp1; - simd_address_at(tmp3, Rn_VPR128.2D, 1, 8, 16); - simd_address_at(tmp4, TMPQ1, 1, 8, 16); - * [register]:8 tmp4 = (* [register]:8 tmp3) >> tmp1; + TMPQ1[0,64] = Rn_VPR128.2D[0,64] >> tmp1; + TMPQ1[64,64] = Rn_VPR128.2D[64,64] >> tmp1; # simd infix Rd_VPR128.2D = Rd_VPR128.2D + TMPQ1 on lane size 8 - local tmp5:4 = 0; - local tmp6:4 = 0; - local tmp7:4 = 0; - simd_address_at(tmp5, Rd_VPR128.2D, 0, 8, 16); - simd_address_at(tmp6, TMPQ1, 0, 8, 16); - simd_address_at(tmp7, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp7 = (* [register]:8 tmp5) + (* [register]:8 tmp6); - simd_address_at(tmp5, Rd_VPR128.2D, 1, 8, 16); - simd_address_at(tmp6, TMPQ1, 1, 8, 16); - simd_address_at(tmp7, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp7 = (* [register]:8 tmp5) + (* [register]:8 tmp6); + Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] + TMPQ1[0,64]; + Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] + TMPQ1[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = zext(Imm_shr_imm64); - local tmp2:16 = SIMD_INT_RIGHT(Rn_VPR128.2D, tmp1, 8:1); - local tmpd:16 = SIMD_INT_ADD(Rd_VPR128.2D, tmp2, 8:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.2D = NEON_ursra(Rd_VPR128.2D, Rn_VPR128.2D, Imm_shr_imm64:1, 8:1); -@endif } # C7.2.387 URSRA page C7-2271 line 127788 MATCH x2f003400/mask=xbf80fc00 @@ -50738,38 +27513,14 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2223=0b01 & Imm_shr_imm64 & b_1115=0x6 :ursra Rd_VPR64.2S, Rn_VPR64.2S, Imm_shr_imm32 is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x6 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_primitive) local tmp1:4 = Imm_shr_imm32; # simd infix TMPD1 = Rn_VPR64.2S >> tmp1 on lane size 4 - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp3, Rn_VPR64.2S, 0, 4, 8); - simd_address_at(tmp4, TMPD1, 0, 4, 8); - * [register]:4 tmp4 = (* [register]:4 tmp3) >> tmp1; - simd_address_at(tmp3, Rn_VPR64.2S, 1, 4, 8); - simd_address_at(tmp4, TMPD1, 1, 4, 8); - * [register]:4 tmp4 = (* [register]:4 tmp3) >> tmp1; + TMPD1[0,32] = Rn_VPR64.2S[0,32] >> tmp1; + TMPD1[32,32] = Rn_VPR64.2S[32,32] >> tmp1; # simd infix Rd_VPR64.2S = Rd_VPR64.2S + TMPD1 on lane size 4 - local tmp5:4 = 0; - local tmp6:4 = 0; - local tmp7:4 = 0; - simd_address_at(tmp5, Rd_VPR64.2S, 0, 4, 8); - simd_address_at(tmp6, TMPD1, 0, 4, 8); - simd_address_at(tmp7, Rd_VPR64.2S, 0, 4, 8); - * [register]:4 tmp7 = (* [register]:4 tmp5) + (* [register]:4 tmp6); - simd_address_at(tmp5, Rd_VPR64.2S, 1, 4, 8); - simd_address_at(tmp6, TMPD1, 1, 4, 8); - simd_address_at(tmp7, Rd_VPR64.2S, 1, 4, 8); - * [register]:4 tmp7 = (* [register]:4 tmp5) + (* [register]:4 tmp6); + Rd_VPR64.2S[0,32] = Rd_VPR64.2S[0,32] + TMPD1[0,32]; + Rd_VPR64.2S[32,32] = Rd_VPR64.2S[32,32] + TMPD1[32,32]; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:4 = Imm_shr_imm32; - local tmp2:8 = SIMD_INT_RIGHT(Rn_VPR64.2S, tmp1, 4:1); - local tmpd:8 = SIMD_INT_ADD(Rd_VPR64.2S, tmp2, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.2S -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.2S = NEON_ursra(Rd_VPR64.2S, Rn_VPR64.2S, Imm_shr_imm32:1, 4:1); -@endif } # C7.2.387 URSRA page C7-2271 line 127788 MATCH x2f003400/mask=xbf80fc00 @@ -50781,42 +27532,15 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x6 & b :ursra Rd_VPR64.4H, Rn_VPR64.4H, Imm_shr_imm16 is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x6 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { -@if defined(SEMANTIC_primitive) # simd infix TMPD1 = Rn_VPR64.4H >> Imm_shr_imm16:2 on lane size 2 - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR64.4H, 0, 2, 8); - simd_address_at(tmp3, TMPD1, 0, 2, 8); - * [register]:2 tmp3 = (* [register]:2 tmp2) >> Imm_shr_imm16:2; - simd_address_at(tmp2, Rn_VPR64.4H, 1, 2, 8); - simd_address_at(tmp3, TMPD1, 1, 2, 8); - * [register]:2 tmp3 = (* [register]:2 tmp2) >> Imm_shr_imm16:2; - simd_address_at(tmp2, Rn_VPR64.4H, 2, 2, 8); - simd_address_at(tmp3, TMPD1, 2, 2, 8); - * [register]:2 tmp3 = (* [register]:2 tmp2) >> Imm_shr_imm16:2; - simd_address_at(tmp2, Rn_VPR64.4H, 3, 2, 8); - simd_address_at(tmp3, TMPD1, 3, 2, 8); - * [register]:2 tmp3 = (* [register]:2 tmp2) >> Imm_shr_imm16:2; + TMPD1[0,16] = Rn_VPR64.4H[0,16] >> Imm_shr_imm16:2; + TMPD1[16,16] = Rn_VPR64.4H[16,16] >> Imm_shr_imm16:2; + TMPD1[32,16] = Rn_VPR64.4H[32,16] >> Imm_shr_imm16:2; + TMPD1[48,16] = Rn_VPR64.4H[48,16] >> Imm_shr_imm16:2; # simd infix Rd_VPR64.4H = Rd_VPR64.4H + TMPD1 on lane size 4 - local tmp4:4 = 0; - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp4, Rd_VPR64.4H, 0, 4, 8); - simd_address_at(tmp5, TMPD1, 0, 4, 8); - simd_address_at(tmp6, Rd_VPR64.4H, 0, 4, 8); - * [register]:4 tmp6 = (* [register]:4 tmp4) + (* [register]:4 tmp5); - simd_address_at(tmp4, Rd_VPR64.4H, 1, 4, 8); - simd_address_at(tmp5, TMPD1, 1, 4, 8); - simd_address_at(tmp6, Rd_VPR64.4H, 1, 4, 8); - * [register]:4 tmp6 = (* [register]:4 tmp4) + (* [register]:4 tmp5); + Rd_VPR64.4H[0,32] = Rd_VPR64.4H[0,32] + TMPD1[0,32]; + Rd_VPR64.4H[32,32] = Rd_VPR64.4H[32,32] + TMPD1[32,32]; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_INT_RIGHT(Rn_VPR64.4H, Imm_shr_imm16:2, 2:1); - local tmpd:8 = SIMD_INT_ADD(Rd_VPR64.4H, tmp1, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.4H -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.4H = NEON_ursra(Rd_VPR64.4H, Rn_VPR64.4H, Imm_shr_imm16:1, 2:1); -@endif } # C7.2.387 URSRA page C7-2271 line 127788 MATCH x2f003400/mask=xbf80fc00 @@ -50828,52 +27552,18 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x6 & :ursra Rd_VPR128.4S, Rn_VPR128.4S, Imm_shr_imm32 is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x6 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) local tmp1:4 = Imm_shr_imm32; # simd infix TMPQ1 = Rn_VPR128.4S >> tmp1 on lane size 4 - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp3, Rn_VPR128.4S, 0, 4, 16); - simd_address_at(tmp4, TMPQ1, 0, 4, 16); - * [register]:4 tmp4 = (* [register]:4 tmp3) >> tmp1; - simd_address_at(tmp3, Rn_VPR128.4S, 1, 4, 16); - simd_address_at(tmp4, TMPQ1, 1, 4, 16); - * [register]:4 tmp4 = (* [register]:4 tmp3) >> tmp1; - simd_address_at(tmp3, Rn_VPR128.4S, 2, 4, 16); - simd_address_at(tmp4, TMPQ1, 2, 4, 16); - * [register]:4 tmp4 = (* [register]:4 tmp3) >> tmp1; - simd_address_at(tmp3, Rn_VPR128.4S, 3, 4, 16); - simd_address_at(tmp4, TMPQ1, 3, 4, 16); - * [register]:4 tmp4 = (* [register]:4 tmp3) >> tmp1; + TMPQ1[0,32] = Rn_VPR128.4S[0,32] >> tmp1; + TMPQ1[32,32] = Rn_VPR128.4S[32,32] >> tmp1; + TMPQ1[64,32] = Rn_VPR128.4S[64,32] >> tmp1; + TMPQ1[96,32] = Rn_VPR128.4S[96,32] >> tmp1; # simd infix Rd_VPR128.4S = Rd_VPR128.4S + TMPQ1 on lane size 4 - local tmp5:4 = 0; - local tmp6:4 = 0; - local tmp7:4 = 0; - simd_address_at(tmp5, Rd_VPR128.4S, 0, 4, 16); - simd_address_at(tmp6, TMPQ1, 0, 4, 16); - simd_address_at(tmp7, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp7 = (* [register]:4 tmp5) + (* [register]:4 tmp6); - simd_address_at(tmp5, Rd_VPR128.4S, 1, 4, 16); - simd_address_at(tmp6, TMPQ1, 1, 4, 16); - simd_address_at(tmp7, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp7 = (* [register]:4 tmp5) + (* [register]:4 tmp6); - simd_address_at(tmp5, Rd_VPR128.4S, 2, 4, 16); - simd_address_at(tmp6, TMPQ1, 2, 4, 16); - simd_address_at(tmp7, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp7 = (* [register]:4 tmp5) + (* [register]:4 tmp6); - simd_address_at(tmp5, Rd_VPR128.4S, 3, 4, 16); - simd_address_at(tmp6, TMPQ1, 3, 4, 16); - simd_address_at(tmp7, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp7 = (* [register]:4 tmp5) + (* [register]:4 tmp6); + Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] + TMPQ1[0,32]; + Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] + TMPQ1[32,32]; + Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] + TMPQ1[64,32]; + Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] + TMPQ1[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:4 = Imm_shr_imm32; - local tmp2:16 = SIMD_INT_RIGHT(Rn_VPR128.4S, tmp1, 4:1); - local tmpd:16 = SIMD_INT_ADD(Rd_VPR128.4S, tmp2, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_ursra(Rd_VPR128.4S, Rn_VPR128.4S, Imm_shr_imm32:1, 4:1); -@endif } # C7.2.387 URSRA page C7-2271 line 127788 MATCH x2f003400/mask=xbf80fc00 @@ -50885,78 +27575,25 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x6 & b :ursra Rd_VPR64.8B, Rn_VPR64.8B, Imm_shr_imm8 is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x6 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { -@if defined(SEMANTIC_primitive) # simd infix TMPD1 = Rn_VPR64.8B >> Imm_shr_imm8:1 on lane size 1 - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR64.8B, 0, 1, 8); - simd_address_at(tmp3, TMPD1, 0, 1, 8); - * [register]:1 tmp3 = (* [register]:1 tmp2) >> Imm_shr_imm8:1; - simd_address_at(tmp2, Rn_VPR64.8B, 1, 1, 8); - simd_address_at(tmp3, TMPD1, 1, 1, 8); - * [register]:1 tmp3 = (* [register]:1 tmp2) >> Imm_shr_imm8:1; - simd_address_at(tmp2, Rn_VPR64.8B, 2, 1, 8); - simd_address_at(tmp3, TMPD1, 2, 1, 8); - * [register]:1 tmp3 = (* [register]:1 tmp2) >> Imm_shr_imm8:1; - simd_address_at(tmp2, Rn_VPR64.8B, 3, 1, 8); - simd_address_at(tmp3, TMPD1, 3, 1, 8); - * [register]:1 tmp3 = (* [register]:1 tmp2) >> Imm_shr_imm8:1; - simd_address_at(tmp2, Rn_VPR64.8B, 4, 1, 8); - simd_address_at(tmp3, TMPD1, 4, 1, 8); - * [register]:1 tmp3 = (* [register]:1 tmp2) >> Imm_shr_imm8:1; - simd_address_at(tmp2, Rn_VPR64.8B, 5, 1, 8); - simd_address_at(tmp3, TMPD1, 5, 1, 8); - * [register]:1 tmp3 = (* [register]:1 tmp2) >> Imm_shr_imm8:1; - simd_address_at(tmp2, Rn_VPR64.8B, 6, 1, 8); - simd_address_at(tmp3, TMPD1, 6, 1, 8); - * [register]:1 tmp3 = (* [register]:1 tmp2) >> Imm_shr_imm8:1; - simd_address_at(tmp2, Rn_VPR64.8B, 7, 1, 8); - simd_address_at(tmp3, TMPD1, 7, 1, 8); - * [register]:1 tmp3 = (* [register]:1 tmp2) >> Imm_shr_imm8:1; + TMPD1[0,8] = Rn_VPR64.8B[0,8] >> Imm_shr_imm8:1; + TMPD1[8,8] = Rn_VPR64.8B[8,8] >> Imm_shr_imm8:1; + TMPD1[16,8] = Rn_VPR64.8B[16,8] >> Imm_shr_imm8:1; + TMPD1[24,8] = Rn_VPR64.8B[24,8] >> Imm_shr_imm8:1; + TMPD1[32,8] = Rn_VPR64.8B[32,8] >> Imm_shr_imm8:1; + TMPD1[40,8] = Rn_VPR64.8B[40,8] >> Imm_shr_imm8:1; + TMPD1[48,8] = Rn_VPR64.8B[48,8] >> Imm_shr_imm8:1; + TMPD1[56,8] = Rn_VPR64.8B[56,8] >> Imm_shr_imm8:1; # simd infix Rd_VPR64.8B = Rd_VPR64.8B + TMPD1 on lane size 1 - local tmp4:4 = 0; - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp4, Rd_VPR64.8B, 0, 1, 8); - simd_address_at(tmp5, TMPD1, 0, 1, 8); - simd_address_at(tmp6, Rd_VPR64.8B, 0, 1, 8); - * [register]:1 tmp6 = (* [register]:1 tmp4) + (* [register]:1 tmp5); - simd_address_at(tmp4, Rd_VPR64.8B, 1, 1, 8); - simd_address_at(tmp5, TMPD1, 1, 1, 8); - simd_address_at(tmp6, Rd_VPR64.8B, 1, 1, 8); - * [register]:1 tmp6 = (* [register]:1 tmp4) + (* [register]:1 tmp5); - simd_address_at(tmp4, Rd_VPR64.8B, 2, 1, 8); - simd_address_at(tmp5, TMPD1, 2, 1, 8); - simd_address_at(tmp6, Rd_VPR64.8B, 2, 1, 8); - * [register]:1 tmp6 = (* [register]:1 tmp4) + (* [register]:1 tmp5); - simd_address_at(tmp4, Rd_VPR64.8B, 3, 1, 8); - simd_address_at(tmp5, TMPD1, 3, 1, 8); - simd_address_at(tmp6, Rd_VPR64.8B, 3, 1, 8); - * [register]:1 tmp6 = (* [register]:1 tmp4) + (* [register]:1 tmp5); - simd_address_at(tmp4, Rd_VPR64.8B, 4, 1, 8); - simd_address_at(tmp5, TMPD1, 4, 1, 8); - simd_address_at(tmp6, Rd_VPR64.8B, 4, 1, 8); - * [register]:1 tmp6 = (* [register]:1 tmp4) + (* [register]:1 tmp5); - simd_address_at(tmp4, Rd_VPR64.8B, 5, 1, 8); - simd_address_at(tmp5, TMPD1, 5, 1, 8); - simd_address_at(tmp6, Rd_VPR64.8B, 5, 1, 8); - * [register]:1 tmp6 = (* [register]:1 tmp4) + (* [register]:1 tmp5); - simd_address_at(tmp4, Rd_VPR64.8B, 6, 1, 8); - simd_address_at(tmp5, TMPD1, 6, 1, 8); - simd_address_at(tmp6, Rd_VPR64.8B, 6, 1, 8); - * [register]:1 tmp6 = (* [register]:1 tmp4) + (* [register]:1 tmp5); - simd_address_at(tmp4, Rd_VPR64.8B, 7, 1, 8); - simd_address_at(tmp5, TMPD1, 7, 1, 8); - simd_address_at(tmp6, Rd_VPR64.8B, 7, 1, 8); - * [register]:1 tmp6 = (* [register]:1 tmp4) + (* [register]:1 tmp5); + Rd_VPR64.8B[0,8] = Rd_VPR64.8B[0,8] + TMPD1[0,8]; + Rd_VPR64.8B[8,8] = Rd_VPR64.8B[8,8] + TMPD1[8,8]; + Rd_VPR64.8B[16,8] = Rd_VPR64.8B[16,8] + TMPD1[16,8]; + Rd_VPR64.8B[24,8] = Rd_VPR64.8B[24,8] + TMPD1[24,8]; + Rd_VPR64.8B[32,8] = Rd_VPR64.8B[32,8] + TMPD1[32,8]; + Rd_VPR64.8B[40,8] = Rd_VPR64.8B[40,8] + TMPD1[40,8]; + Rd_VPR64.8B[48,8] = Rd_VPR64.8B[48,8] + TMPD1[48,8]; + Rd_VPR64.8B[56,8] = Rd_VPR64.8B[56,8] + TMPD1[56,8]; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_INT_RIGHT(Rn_VPR64.8B, Imm_shr_imm8:1, 1:1); - local tmpd:8 = SIMD_INT_ADD(Rd_VPR64.8B, tmp1, 1:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.8B -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.8B = NEON_ursra(Rd_VPR64.8B, Rn_VPR64.8B, Imm_shr_imm8:1, 1:1); -@endif } # C7.2.387 URSRA page C7-2271 line 127788 MATCH x2f003400/mask=xbf80fc00 @@ -50968,78 +27605,25 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x6 & :ursra Rd_VPR128.8H, Rn_VPR128.8H, Imm_shr_imm16 is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x6 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_primitive) # simd infix TMPQ1 = Rn_VPR128.8H >> Imm_shr_imm16:2 on lane size 2 - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR128.8H, 0, 2, 16); - simd_address_at(tmp3, TMPQ1, 0, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp2) >> Imm_shr_imm16:2; - simd_address_at(tmp2, Rn_VPR128.8H, 1, 2, 16); - simd_address_at(tmp3, TMPQ1, 1, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp2) >> Imm_shr_imm16:2; - simd_address_at(tmp2, Rn_VPR128.8H, 2, 2, 16); - simd_address_at(tmp3, TMPQ1, 2, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp2) >> Imm_shr_imm16:2; - simd_address_at(tmp2, Rn_VPR128.8H, 3, 2, 16); - simd_address_at(tmp3, TMPQ1, 3, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp2) >> Imm_shr_imm16:2; - simd_address_at(tmp2, Rn_VPR128.8H, 4, 2, 16); - simd_address_at(tmp3, TMPQ1, 4, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp2) >> Imm_shr_imm16:2; - simd_address_at(tmp2, Rn_VPR128.8H, 5, 2, 16); - simd_address_at(tmp3, TMPQ1, 5, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp2) >> Imm_shr_imm16:2; - simd_address_at(tmp2, Rn_VPR128.8H, 6, 2, 16); - simd_address_at(tmp3, TMPQ1, 6, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp2) >> Imm_shr_imm16:2; - simd_address_at(tmp2, Rn_VPR128.8H, 7, 2, 16); - simd_address_at(tmp3, TMPQ1, 7, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp2) >> Imm_shr_imm16:2; + TMPQ1[0,16] = Rn_VPR128.8H[0,16] >> Imm_shr_imm16:2; + TMPQ1[16,16] = Rn_VPR128.8H[16,16] >> Imm_shr_imm16:2; + TMPQ1[32,16] = Rn_VPR128.8H[32,16] >> Imm_shr_imm16:2; + TMPQ1[48,16] = Rn_VPR128.8H[48,16] >> Imm_shr_imm16:2; + TMPQ1[64,16] = Rn_VPR128.8H[64,16] >> Imm_shr_imm16:2; + TMPQ1[80,16] = Rn_VPR128.8H[80,16] >> Imm_shr_imm16:2; + TMPQ1[96,16] = Rn_VPR128.8H[96,16] >> Imm_shr_imm16:2; + TMPQ1[112,16] = Rn_VPR128.8H[112,16] >> Imm_shr_imm16:2; # simd infix Rd_VPR128.8H = Rd_VPR128.8H + TMPQ1 on lane size 2 - local tmp4:4 = 0; - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp4, Rd_VPR128.8H, 0, 2, 16); - simd_address_at(tmp5, TMPQ1, 0, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 0, 2, 16); - * [register]:2 tmp6 = (* [register]:2 tmp4) + (* [register]:2 tmp5); - simd_address_at(tmp4, Rd_VPR128.8H, 1, 2, 16); - simd_address_at(tmp5, TMPQ1, 1, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 1, 2, 16); - * [register]:2 tmp6 = (* [register]:2 tmp4) + (* [register]:2 tmp5); - simd_address_at(tmp4, Rd_VPR128.8H, 2, 2, 16); - simd_address_at(tmp5, TMPQ1, 2, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 2, 2, 16); - * [register]:2 tmp6 = (* [register]:2 tmp4) + (* [register]:2 tmp5); - simd_address_at(tmp4, Rd_VPR128.8H, 3, 2, 16); - simd_address_at(tmp5, TMPQ1, 3, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 3, 2, 16); - * [register]:2 tmp6 = (* [register]:2 tmp4) + (* [register]:2 tmp5); - simd_address_at(tmp4, Rd_VPR128.8H, 4, 2, 16); - simd_address_at(tmp5, TMPQ1, 4, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 4, 2, 16); - * [register]:2 tmp6 = (* [register]:2 tmp4) + (* [register]:2 tmp5); - simd_address_at(tmp4, Rd_VPR128.8H, 5, 2, 16); - simd_address_at(tmp5, TMPQ1, 5, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 5, 2, 16); - * [register]:2 tmp6 = (* [register]:2 tmp4) + (* [register]:2 tmp5); - simd_address_at(tmp4, Rd_VPR128.8H, 6, 2, 16); - simd_address_at(tmp5, TMPQ1, 6, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 6, 2, 16); - * [register]:2 tmp6 = (* [register]:2 tmp4) + (* [register]:2 tmp5); - simd_address_at(tmp4, Rd_VPR128.8H, 7, 2, 16); - simd_address_at(tmp5, TMPQ1, 7, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 7, 2, 16); - * [register]:2 tmp6 = (* [register]:2 tmp4) + (* [register]:2 tmp5); + Rd_VPR128.8H[0,16] = Rd_VPR128.8H[0,16] + TMPQ1[0,16]; + Rd_VPR128.8H[16,16] = Rd_VPR128.8H[16,16] + TMPQ1[16,16]; + Rd_VPR128.8H[32,16] = Rd_VPR128.8H[32,16] + TMPQ1[32,16]; + Rd_VPR128.8H[48,16] = Rd_VPR128.8H[48,16] + TMPQ1[48,16]; + Rd_VPR128.8H[64,16] = Rd_VPR128.8H[64,16] + TMPQ1[64,16]; + Rd_VPR128.8H[80,16] = Rd_VPR128.8H[80,16] + TMPQ1[80,16]; + Rd_VPR128.8H[96,16] = Rd_VPR128.8H[96,16] + TMPQ1[96,16]; + Rd_VPR128.8H[112,16] = Rd_VPR128.8H[112,16] + TMPQ1[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_RIGHT(Rn_VPR128.8H, Imm_shr_imm16:2, 2:1); - local tmpd:16 = SIMD_INT_ADD(Rd_VPR128.8H, tmp1, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.8H -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.8H = NEON_ursra(Rd_VPR128.8H, Rn_VPR128.8H, Imm_shr_imm16:1, 2:1); -@endif } # C7.2.387 URSRA page C7-2271 line 127788 MATCH x7f003400/mask=xff80fc00 @@ -51052,19 +27636,10 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x6 & :ursra Rd_FPR64, Rn_FPR64, Imm_shr_imm64 is b_2331=0b011111110 & b_22=1 & b_1015=0b001101 & Rd_FPR64 & Rn_FPR64 & Imm_shr_imm64 & Zd { -@if defined(SEMANTIC_primitive) local tmp1:8 = zext(Imm_shr_imm64); local tmp2:8 = Rn_FPR64 >> tmp1; Rd_FPR64 = Rd_FPR64 + tmp2; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = zext(Imm_shr_imm64); - local tmp2:8 = Rn_FPR64 >> tmp1; - local tmpd:8 = Rd_FPR64 + tmp2; - Zd = zext(tmpd); # assigning to Rd_FPR64 -@elif defined(SEMANTIC_pseudo) - Rd_FPR64 = NEON_ursra(Rd_FPR64, Rn_FPR64, Imm_shr_imm64:1); -@endif } # C7.2.390 USHL page C7-2277 line 128100 MATCH x7e204400/mask=xff20fc00 @@ -51075,9 +27650,7 @@ is b_2331=0b011111110 & b_22=1 & b_1015=0b001101 & Rd_FPR64 & Rn_FPR64 & Imm_shr :ushl Rd_FPR64, Rn_FPR64, Rm_FPR64 is b_3031=1 & u=1 & b_2428=0x1e & advSIMD3.size=3 & b_2121=1 & Rm_FPR64 & b_1115=0x8 & b_1010=1 & Rn_FPR64 & Rd_FPR64 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR64 = NEON_ushl(Rn_FPR64, Rm_FPR64); -@endif } # C7.2.390 USHL page C7-2277 line 128100 MATCH x2e204400/mask=xbf20fc00 @@ -51088,9 +27661,7 @@ is b_3031=1 & u=1 & b_2428=0x1e & advSIMD3.size=3 & b_2121=1 & Rm_FPR64 & b_1115 :ushl Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0x8 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.16B = NEON_ushl(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); -@endif } # C7.2.390 USHL page C7-2277 line 128100 MATCH x2e204400/mask=xbf20fc00 @@ -51101,9 +27672,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16 :ushl Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=3 & b_2121=1 & Rm_VPR128.2D & b_1115=0x8 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.2D = NEON_ushl(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); -@endif } # C7.2.390 USHL page C7-2277 line 128100 MATCH x2e204400/mask=xbf20fc00 @@ -51114,9 +27683,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=3 & b_2121=1 & Rm_VPR128.2D :ushl Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0x8 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.2S = NEON_ushl(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); -@endif } # C7.2.390 USHL page C7-2277 line 128100 MATCH x2e204400/mask=xbf20fc00 @@ -51127,9 +27694,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S :ushl Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0x8 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.4H = NEON_ushl(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); -@endif } # C7.2.390 USHL page C7-2277 line 128100 MATCH x2e204400/mask=xbf20fc00 @@ -51140,9 +27705,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H :ushl Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0x8 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_ushl(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); -@endif } # C7.2.390 USHL page C7-2277 line 128100 MATCH x2e204400/mask=xbf20fc00 @@ -51153,9 +27716,7 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S :ushl Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0x8 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.8B = NEON_ushl(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); -@endif } # C7.2.390 USHL page C7-2277 line 128100 MATCH x2e204400/mask=xbf20fc00 @@ -51166,9 +27727,7 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B :ushl Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0x8 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.8H = NEON_ushl(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); -@endif } # C7.2.391 USHLL, USHLL2 page C7-2280 line 128256 MATCH x2f00a400/mask=xbf80fc00 @@ -51181,75 +27740,27 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H :ushll2 Rd_VPR128.8H, Rn_VPR128.16B, Imm_uimm3 is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_uimm3 & b_1115=0x14 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rn_VPR128.16B, 1, 8, 16); - TMPD1 = * [register]:8 tmp1; + TMPD1 = Rn_VPR128.16B[64,64]; # simd resize TMPQ2 = zext(TMPD1) (lane size 1 to 2) - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, TMPD1, 0, 1, 8); - simd_address_at(tmp5, TMPQ2, 0, 2, 16); - * [register]:2 tmp5 = zext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 1, 1, 8); - simd_address_at(tmp5, TMPQ2, 1, 2, 16); - * [register]:2 tmp5 = zext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 2, 1, 8); - simd_address_at(tmp5, TMPQ2, 2, 2, 16); - * [register]:2 tmp5 = zext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 3, 1, 8); - simd_address_at(tmp5, TMPQ2, 3, 2, 16); - * [register]:2 tmp5 = zext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 4, 1, 8); - simd_address_at(tmp5, TMPQ2, 4, 2, 16); - * [register]:2 tmp5 = zext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 5, 1, 8); - simd_address_at(tmp5, TMPQ2, 5, 2, 16); - * [register]:2 tmp5 = zext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 6, 1, 8); - simd_address_at(tmp5, TMPQ2, 6, 2, 16); - * [register]:2 tmp5 = zext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 7, 1, 8); - simd_address_at(tmp5, TMPQ2, 7, 2, 16); - * [register]:2 tmp5 = zext(* [register]:1 tmp4); - local tmp6:2 = Imm_uimm3; - # simd infix Rd_VPR128.8H = TMPQ2 << tmp6 on lane size 2 - local tmp7:4 = 0; - local tmp8:4 = 0; - simd_address_at(tmp7, TMPQ2, 0, 2, 16); - simd_address_at(tmp8, Rd_VPR128.8H, 0, 2, 16); - * [register]:2 tmp8 = (* [register]:2 tmp7) << tmp6; - simd_address_at(tmp7, TMPQ2, 1, 2, 16); - simd_address_at(tmp8, Rd_VPR128.8H, 1, 2, 16); - * [register]:2 tmp8 = (* [register]:2 tmp7) << tmp6; - simd_address_at(tmp7, TMPQ2, 2, 2, 16); - simd_address_at(tmp8, Rd_VPR128.8H, 2, 2, 16); - * [register]:2 tmp8 = (* [register]:2 tmp7) << tmp6; - simd_address_at(tmp7, TMPQ2, 3, 2, 16); - simd_address_at(tmp8, Rd_VPR128.8H, 3, 2, 16); - * [register]:2 tmp8 = (* [register]:2 tmp7) << tmp6; - simd_address_at(tmp7, TMPQ2, 4, 2, 16); - simd_address_at(tmp8, Rd_VPR128.8H, 4, 2, 16); - * [register]:2 tmp8 = (* [register]:2 tmp7) << tmp6; - simd_address_at(tmp7, TMPQ2, 5, 2, 16); - simd_address_at(tmp8, Rd_VPR128.8H, 5, 2, 16); - * [register]:2 tmp8 = (* [register]:2 tmp7) << tmp6; - simd_address_at(tmp7, TMPQ2, 6, 2, 16); - simd_address_at(tmp8, Rd_VPR128.8H, 6, 2, 16); - * [register]:2 tmp8 = (* [register]:2 tmp7) << tmp6; - simd_address_at(tmp7, TMPQ2, 7, 2, 16); - simd_address_at(tmp8, Rd_VPR128.8H, 7, 2, 16); - * [register]:2 tmp8 = (* [register]:2 tmp7) << tmp6; - zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Rn_VPR128.16B, 1:1); - local tmp2:16 = SIMD_INT_ZEXT(tmp1, 1:1); + TMPQ2[0,16] = zext(TMPD1[0,8]); + TMPQ2[16,16] = zext(TMPD1[8,8]); + TMPQ2[32,16] = zext(TMPD1[16,8]); + TMPQ2[48,16] = zext(TMPD1[24,8]); + TMPQ2[64,16] = zext(TMPD1[32,8]); + TMPQ2[80,16] = zext(TMPD1[40,8]); + TMPQ2[96,16] = zext(TMPD1[48,8]); + TMPQ2[112,16] = zext(TMPD1[56,8]); local tmp3:2 = Imm_uimm3; - local tmpd:16 = SIMD_INT_LEFT(tmp2, tmp3, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.8H -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.8H = NEON_ushll2(Rn_VPR128.16B, Imm_uimm3:1, 1:1); -@endif + # simd infix Rd_VPR128.8H = TMPQ2 << tmp3 on lane size 2 + Rd_VPR128.8H[0,16] = TMPQ2[0,16] << tmp3; + Rd_VPR128.8H[16,16] = TMPQ2[16,16] << tmp3; + Rd_VPR128.8H[32,16] = TMPQ2[32,16] << tmp3; + Rd_VPR128.8H[48,16] = TMPQ2[48,16] << tmp3; + Rd_VPR128.8H[64,16] = TMPQ2[64,16] << tmp3; + Rd_VPR128.8H[80,16] = TMPQ2[80,16] << tmp3; + Rd_VPR128.8H[96,16] = TMPQ2[96,16] << tmp3; + Rd_VPR128.8H[112,16] = TMPQ2[112,16] << tmp3; + zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.391 USHLL, USHLL2 page C7-2280 line 128256 MATCH x2f00a400/mask=xbf80fc00 @@ -51262,35 +27773,14 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_uimm3 & b_1115=0x14 & b_ :ushll Rd_VPR128.2D, Rn_VPR64.2S, Imm_uimm5 is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2123=1 & Imm_uimm5 & b_1115=0x14 & b_1010=1 & Rn_VPR64.2S & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_primitive) # simd resize TMPQ1 = zext(Rn_VPR64.2S) (lane size 4 to 8) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR64.2S, 0, 4, 8); - simd_address_at(tmp3, TMPQ1, 0, 8, 16); - * [register]:8 tmp3 = zext(* [register]:4 tmp2); - simd_address_at(tmp2, Rn_VPR64.2S, 1, 4, 8); - simd_address_at(tmp3, TMPQ1, 1, 8, 16); - * [register]:8 tmp3 = zext(* [register]:4 tmp2); - local tmp4:8 = Imm_uimm5; - # simd infix Rd_VPR128.2D = TMPQ1 << tmp4 on lane size 8 - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, TMPQ1, 0, 8, 16); - simd_address_at(tmp6, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp6 = (* [register]:8 tmp5) << tmp4; - simd_address_at(tmp5, TMPQ1, 1, 8, 16); - simd_address_at(tmp6, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp6 = (* [register]:8 tmp5) << tmp4; - zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_ZEXT(Rn_VPR64.2S, 4:1); + TMPQ1[0,64] = zext(Rn_VPR64.2S[0,32]); + TMPQ1[64,64] = zext(Rn_VPR64.2S[32,32]); local tmp2:8 = Imm_uimm5; - local tmpd:16 = SIMD_INT_LEFT(tmp1, tmp2, 8:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.2D = NEON_ushll(Rn_VPR64.2S, Imm_uimm5:1, 4:1); -@endif + # simd infix Rd_VPR128.2D = TMPQ1 << tmp2 on lane size 8 + Rd_VPR128.2D[0,64] = TMPQ1[0,64] << tmp2; + Rd_VPR128.2D[64,64] = TMPQ1[64,64] << tmp2; + zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.391 USHLL, USHLL2 page C7-2280 line 128256 MATCH x2f00a400/mask=xbf80fc00 @@ -51303,47 +27793,18 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2123=1 & Imm_uimm5 & b_1115=0x14 & b_10 :ushll Rd_VPR128.4S, Rn_VPR64.4H, Imm_uimm4 is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_uimm4 & b_1115=0x14 & b_1010=1 & Rn_VPR64.4H & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) # simd resize TMPQ1 = zext(Rn_VPR64.4H) (lane size 2 to 4) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR64.4H, 0, 2, 8); - simd_address_at(tmp3, TMPQ1, 0, 4, 16); - * [register]:4 tmp3 = zext(* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR64.4H, 1, 2, 8); - simd_address_at(tmp3, TMPQ1, 1, 4, 16); - * [register]:4 tmp3 = zext(* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR64.4H, 2, 2, 8); - simd_address_at(tmp3, TMPQ1, 2, 4, 16); - * [register]:4 tmp3 = zext(* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR64.4H, 3, 2, 8); - simd_address_at(tmp3, TMPQ1, 3, 4, 16); - * [register]:4 tmp3 = zext(* [register]:2 tmp2); - local tmp4:4 = Imm_uimm4; - # simd infix Rd_VPR128.4S = TMPQ1 << tmp4 on lane size 4 - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, TMPQ1, 0, 4, 16); - simd_address_at(tmp6, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp6 = (* [register]:4 tmp5) << tmp4; - simd_address_at(tmp5, TMPQ1, 1, 4, 16); - simd_address_at(tmp6, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp6 = (* [register]:4 tmp5) << tmp4; - simd_address_at(tmp5, TMPQ1, 2, 4, 16); - simd_address_at(tmp6, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp6 = (* [register]:4 tmp5) << tmp4; - simd_address_at(tmp5, TMPQ1, 3, 4, 16); - simd_address_at(tmp6, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp6 = (* [register]:4 tmp5) << tmp4; - zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_ZEXT(Rn_VPR64.4H, 2:1); + TMPQ1[0,32] = zext(Rn_VPR64.4H[0,16]); + TMPQ1[32,32] = zext(Rn_VPR64.4H[16,16]); + TMPQ1[64,32] = zext(Rn_VPR64.4H[32,16]); + TMPQ1[96,32] = zext(Rn_VPR64.4H[48,16]); local tmp2:4 = Imm_uimm4; - local tmpd:16 = SIMD_INT_LEFT(tmp1, tmp2, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_ushll(Rn_VPR64.4H, Imm_uimm4:1, 2:1); -@endif + # simd infix Rd_VPR128.4S = TMPQ1 << tmp2 on lane size 4 + Rd_VPR128.4S[0,32] = TMPQ1[0,32] << tmp2; + Rd_VPR128.4S[32,32] = TMPQ1[32,32] << tmp2; + Rd_VPR128.4S[64,32] = TMPQ1[64,32] << tmp2; + Rd_VPR128.4S[96,32] = TMPQ1[96,32] << tmp2; + zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.391 USHLL, USHLL2 page C7-2280 line 128256 MATCH x2f00a400/mask=xbf80fc00 @@ -51356,39 +27817,15 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_uimm4 & b_1115=0x14 & b_ :ushll2 Rd_VPR128.2D, Rn_VPR128.4S, Imm_uimm5 is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2123=1 & Imm_uimm5 & b_1115=0x14 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rn_VPR128.4S, 1, 8, 16); - TMPD1 = * [register]:8 tmp1; + TMPD1 = Rn_VPR128.4S[64,64]; # simd resize TMPQ2 = zext(TMPD1) (lane size 4 to 8) - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, TMPD1, 0, 4, 8); - simd_address_at(tmp5, TMPQ2, 0, 8, 16); - * [register]:8 tmp5 = zext(* [register]:4 tmp4); - simd_address_at(tmp4, TMPD1, 1, 4, 8); - simd_address_at(tmp5, TMPQ2, 1, 8, 16); - * [register]:8 tmp5 = zext(* [register]:4 tmp4); - local tmp6:8 = Imm_uimm5; - # simd infix Rd_VPR128.2D = TMPQ2 << tmp6 on lane size 8 - local tmp7:4 = 0; - local tmp8:4 = 0; - simd_address_at(tmp7, TMPQ2, 0, 8, 16); - simd_address_at(tmp8, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp8 = (* [register]:8 tmp7) << tmp6; - simd_address_at(tmp7, TMPQ2, 1, 8, 16); - simd_address_at(tmp8, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp8 = (* [register]:8 tmp7) << tmp6; - zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Rn_VPR128.4S, 1:1); - local tmp2:16 = SIMD_INT_ZEXT(tmp1, 4:1); + TMPQ2[0,64] = zext(TMPD1[0,32]); + TMPQ2[64,64] = zext(TMPD1[32,32]); local tmp3:8 = Imm_uimm5; - local tmpd:16 = SIMD_INT_LEFT(tmp2, tmp3, 8:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.2D = NEON_ushll2(Rn_VPR128.4S, Imm_uimm5:1, 4:1); -@endif + # simd infix Rd_VPR128.2D = TMPQ2 << tmp3 on lane size 8 + Rd_VPR128.2D[0,64] = TMPQ2[0,64] << tmp3; + Rd_VPR128.2D[64,64] = TMPQ2[64,64] << tmp3; + zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.391 USHLL, USHLL2 page C7-2280 line 128256 MATCH x2f00a400/mask=xbf80fc00 @@ -51401,71 +27838,26 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2123=1 & Imm_uimm5 & b_1115=0x14 & b_10 :ushll Rd_VPR128.8H, Rn_VPR64.8B, Imm_uimm3 is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_uimm3 & b_1115=0x14 & b_1010=1 & Rn_VPR64.8B & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_primitive) # simd resize TMPQ1 = zext(Rn_VPR64.8B) (lane size 1 to 2) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR64.8B, 0, 1, 8); - simd_address_at(tmp3, TMPQ1, 0, 2, 16); - * [register]:2 tmp3 = zext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 1, 1, 8); - simd_address_at(tmp3, TMPQ1, 1, 2, 16); - * [register]:2 tmp3 = zext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 2, 1, 8); - simd_address_at(tmp3, TMPQ1, 2, 2, 16); - * [register]:2 tmp3 = zext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 3, 1, 8); - simd_address_at(tmp3, TMPQ1, 3, 2, 16); - * [register]:2 tmp3 = zext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 4, 1, 8); - simd_address_at(tmp3, TMPQ1, 4, 2, 16); - * [register]:2 tmp3 = zext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 5, 1, 8); - simd_address_at(tmp3, TMPQ1, 5, 2, 16); - * [register]:2 tmp3 = zext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 6, 1, 8); - simd_address_at(tmp3, TMPQ1, 6, 2, 16); - * [register]:2 tmp3 = zext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 7, 1, 8); - simd_address_at(tmp3, TMPQ1, 7, 2, 16); - * [register]:2 tmp3 = zext(* [register]:1 tmp2); - local tmp4:2 = Imm_uimm3; - # simd infix Rd_VPR128.8H = TMPQ1 << tmp4 on lane size 2 - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, TMPQ1, 0, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 0, 2, 16); - * [register]:2 tmp6 = (* [register]:2 tmp5) << tmp4; - simd_address_at(tmp5, TMPQ1, 1, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 1, 2, 16); - * [register]:2 tmp6 = (* [register]:2 tmp5) << tmp4; - simd_address_at(tmp5, TMPQ1, 2, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 2, 2, 16); - * [register]:2 tmp6 = (* [register]:2 tmp5) << tmp4; - simd_address_at(tmp5, TMPQ1, 3, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 3, 2, 16); - * [register]:2 tmp6 = (* [register]:2 tmp5) << tmp4; - simd_address_at(tmp5, TMPQ1, 4, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 4, 2, 16); - * [register]:2 tmp6 = (* [register]:2 tmp5) << tmp4; - simd_address_at(tmp5, TMPQ1, 5, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 5, 2, 16); - * [register]:2 tmp6 = (* [register]:2 tmp5) << tmp4; - simd_address_at(tmp5, TMPQ1, 6, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 6, 2, 16); - * [register]:2 tmp6 = (* [register]:2 tmp5) << tmp4; - simd_address_at(tmp5, TMPQ1, 7, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 7, 2, 16); - * [register]:2 tmp6 = (* [register]:2 tmp5) << tmp4; - zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_ZEXT(Rn_VPR64.8B, 1:1); + TMPQ1[0,16] = zext(Rn_VPR64.8B[0,8]); + TMPQ1[16,16] = zext(Rn_VPR64.8B[8,8]); + TMPQ1[32,16] = zext(Rn_VPR64.8B[16,8]); + TMPQ1[48,16] = zext(Rn_VPR64.8B[24,8]); + TMPQ1[64,16] = zext(Rn_VPR64.8B[32,8]); + TMPQ1[80,16] = zext(Rn_VPR64.8B[40,8]); + TMPQ1[96,16] = zext(Rn_VPR64.8B[48,8]); + TMPQ1[112,16] = zext(Rn_VPR64.8B[56,8]); local tmp2:2 = Imm_uimm3; - local tmpd:16 = SIMD_INT_LEFT(tmp1, tmp2, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.8H -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.8H = NEON_ushll(Rn_VPR64.8B, Imm_uimm3:1, 1:1); -@endif + # simd infix Rd_VPR128.8H = TMPQ1 << tmp2 on lane size 2 + Rd_VPR128.8H[0,16] = TMPQ1[0,16] << tmp2; + Rd_VPR128.8H[16,16] = TMPQ1[16,16] << tmp2; + Rd_VPR128.8H[32,16] = TMPQ1[32,16] << tmp2; + Rd_VPR128.8H[48,16] = TMPQ1[48,16] << tmp2; + Rd_VPR128.8H[64,16] = TMPQ1[64,16] << tmp2; + Rd_VPR128.8H[80,16] = TMPQ1[80,16] << tmp2; + Rd_VPR128.8H[96,16] = TMPQ1[96,16] << tmp2; + Rd_VPR128.8H[112,16] = TMPQ1[112,16] << tmp2; + zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.391 USHLL, USHLL2 page C7-2280 line 128256 MATCH x2f00a400/mask=xbf80fc00 @@ -51478,51 +27870,19 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_uimm3 & b_1115=0x14 & b_ :ushll2 Rd_VPR128.4S, Rn_VPR128.8H, Imm_uimm4 is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_uimm4 & b_1115=0x14 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rn_VPR128.8H, 1, 8, 16); - TMPD1 = * [register]:8 tmp1; + TMPD1 = Rn_VPR128.8H[64,64]; # simd resize TMPQ2 = zext(TMPD1) (lane size 2 to 4) - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, TMPD1, 0, 2, 8); - simd_address_at(tmp5, TMPQ2, 0, 4, 16); - * [register]:4 tmp5 = zext(* [register]:2 tmp4); - simd_address_at(tmp4, TMPD1, 1, 2, 8); - simd_address_at(tmp5, TMPQ2, 1, 4, 16); - * [register]:4 tmp5 = zext(* [register]:2 tmp4); - simd_address_at(tmp4, TMPD1, 2, 2, 8); - simd_address_at(tmp5, TMPQ2, 2, 4, 16); - * [register]:4 tmp5 = zext(* [register]:2 tmp4); - simd_address_at(tmp4, TMPD1, 3, 2, 8); - simd_address_at(tmp5, TMPQ2, 3, 4, 16); - * [register]:4 tmp5 = zext(* [register]:2 tmp4); - local tmp6:4 = Imm_uimm4; - # simd infix Rd_VPR128.4S = TMPQ2 << tmp6 on lane size 4 - local tmp7:4 = 0; - local tmp8:4 = 0; - simd_address_at(tmp7, TMPQ2, 0, 4, 16); - simd_address_at(tmp8, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp8 = (* [register]:4 tmp7) << tmp6; - simd_address_at(tmp7, TMPQ2, 1, 4, 16); - simd_address_at(tmp8, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp8 = (* [register]:4 tmp7) << tmp6; - simd_address_at(tmp7, TMPQ2, 2, 4, 16); - simd_address_at(tmp8, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp8 = (* [register]:4 tmp7) << tmp6; - simd_address_at(tmp7, TMPQ2, 3, 4, 16); - simd_address_at(tmp8, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp8 = (* [register]:4 tmp7) << tmp6; - zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Rn_VPR128.8H, 1:1); - local tmp2:16 = SIMD_INT_ZEXT(tmp1, 2:1); + TMPQ2[0,32] = zext(TMPD1[0,16]); + TMPQ2[32,32] = zext(TMPD1[16,16]); + TMPQ2[64,32] = zext(TMPD1[32,16]); + TMPQ2[96,32] = zext(TMPD1[48,16]); local tmp3:4 = Imm_uimm4; - local tmpd:16 = SIMD_INT_LEFT(tmp2, tmp3, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_ushll2(Rn_VPR128.8H, Imm_uimm4:1, 2:1); -@endif + # simd infix Rd_VPR128.4S = TMPQ2 << tmp3 on lane size 4 + Rd_VPR128.4S[0,32] = TMPQ2[0,32] << tmp3; + Rd_VPR128.4S[32,32] = TMPQ2[32,32] << tmp3; + Rd_VPR128.4S[64,32] = TMPQ2[64,32] << tmp3; + Rd_VPR128.4S[96,32] = TMPQ2[96,32] << tmp3; + zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.392 USHR page C7-2282 line 128386 MATCH x2f000400/mask=xbf80fc00 @@ -51534,65 +27894,24 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_uimm4 & b_1115=0x14 & b_ :ushr Rd_VPR128.16B, Rn_VPR128.16B, Imm_shr_imm8 is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x0 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { -@if defined(SEMANTIC_primitive) # simd infix Rd_VPR128.16B = Rn_VPR128.16B >> Imm_shr_imm8:1 on lane size 1 - local tmp1:4 = 0; - local tmp2:4 = 0; - simd_address_at(tmp1, Rn_VPR128.16B, 0, 1, 16); - simd_address_at(tmp2, Rd_VPR128.16B, 0, 1, 16); - * [register]:1 tmp2 = (* [register]:1 tmp1) >> Imm_shr_imm8:1; - simd_address_at(tmp1, Rn_VPR128.16B, 1, 1, 16); - simd_address_at(tmp2, Rd_VPR128.16B, 1, 1, 16); - * [register]:1 tmp2 = (* [register]:1 tmp1) >> Imm_shr_imm8:1; - simd_address_at(tmp1, Rn_VPR128.16B, 2, 1, 16); - simd_address_at(tmp2, Rd_VPR128.16B, 2, 1, 16); - * [register]:1 tmp2 = (* [register]:1 tmp1) >> Imm_shr_imm8:1; - simd_address_at(tmp1, Rn_VPR128.16B, 3, 1, 16); - simd_address_at(tmp2, Rd_VPR128.16B, 3, 1, 16); - * [register]:1 tmp2 = (* [register]:1 tmp1) >> Imm_shr_imm8:1; - simd_address_at(tmp1, Rn_VPR128.16B, 4, 1, 16); - simd_address_at(tmp2, Rd_VPR128.16B, 4, 1, 16); - * [register]:1 tmp2 = (* [register]:1 tmp1) >> Imm_shr_imm8:1; - simd_address_at(tmp1, Rn_VPR128.16B, 5, 1, 16); - simd_address_at(tmp2, Rd_VPR128.16B, 5, 1, 16); - * [register]:1 tmp2 = (* [register]:1 tmp1) >> Imm_shr_imm8:1; - simd_address_at(tmp1, Rn_VPR128.16B, 6, 1, 16); - simd_address_at(tmp2, Rd_VPR128.16B, 6, 1, 16); - * [register]:1 tmp2 = (* [register]:1 tmp1) >> Imm_shr_imm8:1; - simd_address_at(tmp1, Rn_VPR128.16B, 7, 1, 16); - simd_address_at(tmp2, Rd_VPR128.16B, 7, 1, 16); - * [register]:1 tmp2 = (* [register]:1 tmp1) >> Imm_shr_imm8:1; - simd_address_at(tmp1, Rn_VPR128.16B, 8, 1, 16); - simd_address_at(tmp2, Rd_VPR128.16B, 8, 1, 16); - * [register]:1 tmp2 = (* [register]:1 tmp1) >> Imm_shr_imm8:1; - simd_address_at(tmp1, Rn_VPR128.16B, 9, 1, 16); - simd_address_at(tmp2, Rd_VPR128.16B, 9, 1, 16); - * [register]:1 tmp2 = (* [register]:1 tmp1) >> Imm_shr_imm8:1; - simd_address_at(tmp1, Rn_VPR128.16B, 10, 1, 16); - simd_address_at(tmp2, Rd_VPR128.16B, 10, 1, 16); - * [register]:1 tmp2 = (* [register]:1 tmp1) >> Imm_shr_imm8:1; - simd_address_at(tmp1, Rn_VPR128.16B, 11, 1, 16); - simd_address_at(tmp2, Rd_VPR128.16B, 11, 1, 16); - * [register]:1 tmp2 = (* [register]:1 tmp1) >> Imm_shr_imm8:1; - simd_address_at(tmp1, Rn_VPR128.16B, 12, 1, 16); - simd_address_at(tmp2, Rd_VPR128.16B, 12, 1, 16); - * [register]:1 tmp2 = (* [register]:1 tmp1) >> Imm_shr_imm8:1; - simd_address_at(tmp1, Rn_VPR128.16B, 13, 1, 16); - simd_address_at(tmp2, Rd_VPR128.16B, 13, 1, 16); - * [register]:1 tmp2 = (* [register]:1 tmp1) >> Imm_shr_imm8:1; - simd_address_at(tmp1, Rn_VPR128.16B, 14, 1, 16); - simd_address_at(tmp2, Rd_VPR128.16B, 14, 1, 16); - * [register]:1 tmp2 = (* [register]:1 tmp1) >> Imm_shr_imm8:1; - simd_address_at(tmp1, Rn_VPR128.16B, 15, 1, 16); - simd_address_at(tmp2, Rd_VPR128.16B, 15, 1, 16); - * [register]:1 tmp2 = (* [register]:1 tmp1) >> Imm_shr_imm8:1; + Rd_VPR128.16B[0,8] = Rn_VPR128.16B[0,8] >> Imm_shr_imm8:1; + Rd_VPR128.16B[8,8] = Rn_VPR128.16B[8,8] >> Imm_shr_imm8:1; + Rd_VPR128.16B[16,8] = Rn_VPR128.16B[16,8] >> Imm_shr_imm8:1; + Rd_VPR128.16B[24,8] = Rn_VPR128.16B[24,8] >> Imm_shr_imm8:1; + Rd_VPR128.16B[32,8] = Rn_VPR128.16B[32,8] >> Imm_shr_imm8:1; + Rd_VPR128.16B[40,8] = Rn_VPR128.16B[40,8] >> Imm_shr_imm8:1; + Rd_VPR128.16B[48,8] = Rn_VPR128.16B[48,8] >> Imm_shr_imm8:1; + Rd_VPR128.16B[56,8] = Rn_VPR128.16B[56,8] >> Imm_shr_imm8:1; + Rd_VPR128.16B[64,8] = Rn_VPR128.16B[64,8] >> Imm_shr_imm8:1; + Rd_VPR128.16B[72,8] = Rn_VPR128.16B[72,8] >> Imm_shr_imm8:1; + Rd_VPR128.16B[80,8] = Rn_VPR128.16B[80,8] >> Imm_shr_imm8:1; + Rd_VPR128.16B[88,8] = Rn_VPR128.16B[88,8] >> Imm_shr_imm8:1; + Rd_VPR128.16B[96,8] = Rn_VPR128.16B[96,8] >> Imm_shr_imm8:1; + Rd_VPR128.16B[104,8] = Rn_VPR128.16B[104,8] >> Imm_shr_imm8:1; + Rd_VPR128.16B[112,8] = Rn_VPR128.16B[112,8] >> Imm_shr_imm8:1; + Rd_VPR128.16B[120,8] = Rn_VPR128.16B[120,8] >> Imm_shr_imm8:1; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:16 = SIMD_INT_RIGHT(Rn_VPR128.16B, Imm_shr_imm8:1, 1:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.16B -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.16B = NEON_ushr(Rn_VPR128.16B, Imm_shr_imm8:1, 1:1); -@endif } # C7.2.392 USHR page C7-2282 line 128386 MATCH x2f000400/mask=xbf80fc00 @@ -51604,25 +27923,11 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x0 & :ushr Rd_VPR128.2D, Rn_VPR128.2D, Imm_shr_imm64 is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2223=0b01 & Imm_shr_imm64 & b_1115=0x0 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_primitive) local tmp1:8 = zext(Imm_shr_imm64); # simd infix Rd_VPR128.2D = Rn_VPR128.2D >> tmp1 on lane size 8 - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR128.2D, 0, 8, 16); - simd_address_at(tmp3, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp3 = (* [register]:8 tmp2) >> tmp1; - simd_address_at(tmp2, Rn_VPR128.2D, 1, 8, 16); - simd_address_at(tmp3, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp3 = (* [register]:8 tmp2) >> tmp1; + Rd_VPR128.2D[0,64] = Rn_VPR128.2D[0,64] >> tmp1; + Rd_VPR128.2D[64,64] = Rn_VPR128.2D[64,64] >> tmp1; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = zext(Imm_shr_imm64); - local tmpd:16 = SIMD_INT_RIGHT(Rn_VPR128.2D, tmp1, 8:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.2D = NEON_ushr(Rn_VPR128.2D, Imm_shr_imm64:1, 8:1); -@endif } # C7.2.392 USHR page C7-2282 line 128386 MATCH x2f000400/mask=xbf80fc00 @@ -51634,25 +27939,11 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2223=0b01 & Imm_shr_imm64 & b_1115=0x0 :ushr Rd_VPR64.2S, Rn_VPR64.2S, Imm_shr_imm32 is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x0 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_primitive) local tmp1:4 = Imm_shr_imm32; # simd infix Rd_VPR64.2S = Rn_VPR64.2S >> tmp1 on lane size 4 - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR64.2S, 0, 4, 8); - simd_address_at(tmp3, Rd_VPR64.2S, 0, 4, 8); - * [register]:4 tmp3 = (* [register]:4 tmp2) >> tmp1; - simd_address_at(tmp2, Rn_VPR64.2S, 1, 4, 8); - simd_address_at(tmp3, Rd_VPR64.2S, 1, 4, 8); - * [register]:4 tmp3 = (* [register]:4 tmp2) >> tmp1; + Rd_VPR64.2S[0,32] = Rn_VPR64.2S[0,32] >> tmp1; + Rd_VPR64.2S[32,32] = Rn_VPR64.2S[32,32] >> tmp1; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:4 = Imm_shr_imm32; - local tmpd:8 = SIMD_INT_RIGHT(Rn_VPR64.2S, tmp1, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.2S -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.2S = NEON_ushr(Rn_VPR64.2S, Imm_shr_imm32:1, 4:1); -@endif } # C7.2.392 USHR page C7-2282 line 128386 MATCH x2f000400/mask=xbf80fc00 @@ -51664,29 +27955,12 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x0 & b :ushr Rd_VPR64.4H, Rn_VPR64.4H, Imm_shr_imm16 is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x0 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { -@if defined(SEMANTIC_primitive) # simd infix Rd_VPR64.4H = Rn_VPR64.4H >> Imm_shr_imm16:2 on lane size 2 - local tmp1:4 = 0; - local tmp2:4 = 0; - simd_address_at(tmp1, Rn_VPR64.4H, 0, 2, 8); - simd_address_at(tmp2, Rd_VPR64.4H, 0, 2, 8); - * [register]:2 tmp2 = (* [register]:2 tmp1) >> Imm_shr_imm16:2; - simd_address_at(tmp1, Rn_VPR64.4H, 1, 2, 8); - simd_address_at(tmp2, Rd_VPR64.4H, 1, 2, 8); - * [register]:2 tmp2 = (* [register]:2 tmp1) >> Imm_shr_imm16:2; - simd_address_at(tmp1, Rn_VPR64.4H, 2, 2, 8); - simd_address_at(tmp2, Rd_VPR64.4H, 2, 2, 8); - * [register]:2 tmp2 = (* [register]:2 tmp1) >> Imm_shr_imm16:2; - simd_address_at(tmp1, Rn_VPR64.4H, 3, 2, 8); - simd_address_at(tmp2, Rd_VPR64.4H, 3, 2, 8); - * [register]:2 tmp2 = (* [register]:2 tmp1) >> Imm_shr_imm16:2; + Rd_VPR64.4H[0,16] = Rn_VPR64.4H[0,16] >> Imm_shr_imm16:2; + Rd_VPR64.4H[16,16] = Rn_VPR64.4H[16,16] >> Imm_shr_imm16:2; + Rd_VPR64.4H[32,16] = Rn_VPR64.4H[32,16] >> Imm_shr_imm16:2; + Rd_VPR64.4H[48,16] = Rn_VPR64.4H[48,16] >> Imm_shr_imm16:2; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:8 = SIMD_INT_RIGHT(Rn_VPR64.4H, Imm_shr_imm16:2, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.4H -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.4H = NEON_ushr(Rn_VPR64.4H, Imm_shr_imm16:1, 2:1); -@endif } # C7.2.392 USHR page C7-2282 line 128386 MATCH x2f000400/mask=xbf80fc00 @@ -51698,31 +27972,13 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x0 & :ushr Rd_VPR128.4S, Rn_VPR128.4S, Imm_shr_imm32 is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x0 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) local tmp1:4 = Imm_shr_imm32; # simd infix Rd_VPR128.4S = Rn_VPR128.4S >> tmp1 on lane size 4 - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR128.4S, 0, 4, 16); - simd_address_at(tmp3, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp3 = (* [register]:4 tmp2) >> tmp1; - simd_address_at(tmp2, Rn_VPR128.4S, 1, 4, 16); - simd_address_at(tmp3, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp3 = (* [register]:4 tmp2) >> tmp1; - simd_address_at(tmp2, Rn_VPR128.4S, 2, 4, 16); - simd_address_at(tmp3, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp3 = (* [register]:4 tmp2) >> tmp1; - simd_address_at(tmp2, Rn_VPR128.4S, 3, 4, 16); - simd_address_at(tmp3, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp3 = (* [register]:4 tmp2) >> tmp1; + Rd_VPR128.4S[0,32] = Rn_VPR128.4S[0,32] >> tmp1; + Rd_VPR128.4S[32,32] = Rn_VPR128.4S[32,32] >> tmp1; + Rd_VPR128.4S[64,32] = Rn_VPR128.4S[64,32] >> tmp1; + Rd_VPR128.4S[96,32] = Rn_VPR128.4S[96,32] >> tmp1; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:4 = Imm_shr_imm32; - local tmpd:16 = SIMD_INT_RIGHT(Rn_VPR128.4S, tmp1, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_ushr(Rn_VPR128.4S, Imm_shr_imm32:1, 4:1); -@endif } # C7.2.392 USHR page C7-2282 line 128386 MATCH x2f000400/mask=xbf80fc00 @@ -51734,41 +27990,16 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x0 & b :ushr Rd_VPR64.8B, Rn_VPR64.8B, Imm_shr_imm8 is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x0 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { -@if defined(SEMANTIC_primitive) # simd infix Rd_VPR64.8B = Rn_VPR64.8B >> Imm_shr_imm8:1 on lane size 1 - local tmp1:4 = 0; - local tmp2:4 = 0; - simd_address_at(tmp1, Rn_VPR64.8B, 0, 1, 8); - simd_address_at(tmp2, Rd_VPR64.8B, 0, 1, 8); - * [register]:1 tmp2 = (* [register]:1 tmp1) >> Imm_shr_imm8:1; - simd_address_at(tmp1, Rn_VPR64.8B, 1, 1, 8); - simd_address_at(tmp2, Rd_VPR64.8B, 1, 1, 8); - * [register]:1 tmp2 = (* [register]:1 tmp1) >> Imm_shr_imm8:1; - simd_address_at(tmp1, Rn_VPR64.8B, 2, 1, 8); - simd_address_at(tmp2, Rd_VPR64.8B, 2, 1, 8); - * [register]:1 tmp2 = (* [register]:1 tmp1) >> Imm_shr_imm8:1; - simd_address_at(tmp1, Rn_VPR64.8B, 3, 1, 8); - simd_address_at(tmp2, Rd_VPR64.8B, 3, 1, 8); - * [register]:1 tmp2 = (* [register]:1 tmp1) >> Imm_shr_imm8:1; - simd_address_at(tmp1, Rn_VPR64.8B, 4, 1, 8); - simd_address_at(tmp2, Rd_VPR64.8B, 4, 1, 8); - * [register]:1 tmp2 = (* [register]:1 tmp1) >> Imm_shr_imm8:1; - simd_address_at(tmp1, Rn_VPR64.8B, 5, 1, 8); - simd_address_at(tmp2, Rd_VPR64.8B, 5, 1, 8); - * [register]:1 tmp2 = (* [register]:1 tmp1) >> Imm_shr_imm8:1; - simd_address_at(tmp1, Rn_VPR64.8B, 6, 1, 8); - simd_address_at(tmp2, Rd_VPR64.8B, 6, 1, 8); - * [register]:1 tmp2 = (* [register]:1 tmp1) >> Imm_shr_imm8:1; - simd_address_at(tmp1, Rn_VPR64.8B, 7, 1, 8); - simd_address_at(tmp2, Rd_VPR64.8B, 7, 1, 8); - * [register]:1 tmp2 = (* [register]:1 tmp1) >> Imm_shr_imm8:1; + Rd_VPR64.8B[0,8] = Rn_VPR64.8B[0,8] >> Imm_shr_imm8:1; + Rd_VPR64.8B[8,8] = Rn_VPR64.8B[8,8] >> Imm_shr_imm8:1; + Rd_VPR64.8B[16,8] = Rn_VPR64.8B[16,8] >> Imm_shr_imm8:1; + Rd_VPR64.8B[24,8] = Rn_VPR64.8B[24,8] >> Imm_shr_imm8:1; + Rd_VPR64.8B[32,8] = Rn_VPR64.8B[32,8] >> Imm_shr_imm8:1; + Rd_VPR64.8B[40,8] = Rn_VPR64.8B[40,8] >> Imm_shr_imm8:1; + Rd_VPR64.8B[48,8] = Rn_VPR64.8B[48,8] >> Imm_shr_imm8:1; + Rd_VPR64.8B[56,8] = Rn_VPR64.8B[56,8] >> Imm_shr_imm8:1; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:8 = SIMD_INT_RIGHT(Rn_VPR64.8B, Imm_shr_imm8:1, 1:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.8B -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.8B = NEON_ushr(Rn_VPR64.8B, Imm_shr_imm8:1, 1:1); -@endif } # C7.2.392 USHR page C7-2282 line 128386 MATCH x2f000400/mask=xbf80fc00 @@ -51780,41 +28011,16 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x0 & :ushr Rd_VPR128.8H, Rn_VPR128.8H, Imm_shr_imm16 is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x0 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_primitive) # simd infix Rd_VPR128.8H = Rn_VPR128.8H >> Imm_shr_imm16:2 on lane size 2 - local tmp1:4 = 0; - local tmp2:4 = 0; - simd_address_at(tmp1, Rn_VPR128.8H, 0, 2, 16); - simd_address_at(tmp2, Rd_VPR128.8H, 0, 2, 16); - * [register]:2 tmp2 = (* [register]:2 tmp1) >> Imm_shr_imm16:2; - simd_address_at(tmp1, Rn_VPR128.8H, 1, 2, 16); - simd_address_at(tmp2, Rd_VPR128.8H, 1, 2, 16); - * [register]:2 tmp2 = (* [register]:2 tmp1) >> Imm_shr_imm16:2; - simd_address_at(tmp1, Rn_VPR128.8H, 2, 2, 16); - simd_address_at(tmp2, Rd_VPR128.8H, 2, 2, 16); - * [register]:2 tmp2 = (* [register]:2 tmp1) >> Imm_shr_imm16:2; - simd_address_at(tmp1, Rn_VPR128.8H, 3, 2, 16); - simd_address_at(tmp2, Rd_VPR128.8H, 3, 2, 16); - * [register]:2 tmp2 = (* [register]:2 tmp1) >> Imm_shr_imm16:2; - simd_address_at(tmp1, Rn_VPR128.8H, 4, 2, 16); - simd_address_at(tmp2, Rd_VPR128.8H, 4, 2, 16); - * [register]:2 tmp2 = (* [register]:2 tmp1) >> Imm_shr_imm16:2; - simd_address_at(tmp1, Rn_VPR128.8H, 5, 2, 16); - simd_address_at(tmp2, Rd_VPR128.8H, 5, 2, 16); - * [register]:2 tmp2 = (* [register]:2 tmp1) >> Imm_shr_imm16:2; - simd_address_at(tmp1, Rn_VPR128.8H, 6, 2, 16); - simd_address_at(tmp2, Rd_VPR128.8H, 6, 2, 16); - * [register]:2 tmp2 = (* [register]:2 tmp1) >> Imm_shr_imm16:2; - simd_address_at(tmp1, Rn_VPR128.8H, 7, 2, 16); - simd_address_at(tmp2, Rd_VPR128.8H, 7, 2, 16); - * [register]:2 tmp2 = (* [register]:2 tmp1) >> Imm_shr_imm16:2; + Rd_VPR128.8H[0,16] = Rn_VPR128.8H[0,16] >> Imm_shr_imm16:2; + Rd_VPR128.8H[16,16] = Rn_VPR128.8H[16,16] >> Imm_shr_imm16:2; + Rd_VPR128.8H[32,16] = Rn_VPR128.8H[32,16] >> Imm_shr_imm16:2; + Rd_VPR128.8H[48,16] = Rn_VPR128.8H[48,16] >> Imm_shr_imm16:2; + Rd_VPR128.8H[64,16] = Rn_VPR128.8H[64,16] >> Imm_shr_imm16:2; + Rd_VPR128.8H[80,16] = Rn_VPR128.8H[80,16] >> Imm_shr_imm16:2; + Rd_VPR128.8H[96,16] = Rn_VPR128.8H[96,16] >> Imm_shr_imm16:2; + Rd_VPR128.8H[112,16] = Rn_VPR128.8H[112,16] >> Imm_shr_imm16:2; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmpd:16 = SIMD_INT_RIGHT(Rn_VPR128.8H, Imm_shr_imm16:2, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.8H -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.8H = NEON_ushr(Rn_VPR128.8H, Imm_shr_imm16:1, 2:1); -@endif } # C7.2.392 USHR page C7-2282 line 128386 MATCH x7f000400/mask=xff80fc00 @@ -51827,17 +28033,9 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x0 & :ushr Rd_FPR64, Rn_FPR64, Imm_shr_imm64 is b_2331=0b011111110 & b_22=1 & b_1015=0b000001 & Rd_FPR64 & Rn_FPR64 & Imm_shr_imm64 & Zd { -@if defined(SEMANTIC_primitive) local tmp1:8 = zext(Imm_shr_imm64); Rd_FPR64 = Rn_FPR64 >> tmp1; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = zext(Imm_shr_imm64); - local tmpd:8 = Rn_FPR64 >> tmp1; - Zd = zext(tmpd); # assigning to Rd_FPR64 -@elif defined(SEMANTIC_pseudo) - Rd_FPR64 = NEON_ushr(Rn_FPR64, Imm_shr_imm64:1); -@endif } # C7.2.394 USQADD page C7-2286 line 128601 MATCH x7e203800/mask=xff3ffc00 @@ -51849,9 +28047,7 @@ is b_2331=0b011111110 & b_22=1 & b_1015=0b000001 & Rd_FPR64 & Rn_FPR64 & Imm_shr :usqadd Rd_FPR8, Rn_FPR8 is b_31=0 & b_30=1 & b_2429=0b111110 & b_2223=0b00 & b_1021=0b100000001110 & Rd_FPR8 & Rn_FPR8 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR8 = NEON_usqadd(Rd_FPR8, Rn_FPR8); -@endif } # C7.2.394 USQADD page C7-2286 line 128601 MATCH x7e203800/mask=xff3ffc00 @@ -51863,9 +28059,7 @@ is b_31=0 & b_30=1 & b_2429=0b111110 & b_2223=0b00 & b_1021=0b100000001110 & Rd_ :usqadd Rd_FPR16, Rn_FPR16 is b_31=0 & b_30=1 & b_2429=0b111110 & b_2223=0b01 & b_1021=0b100000001110 & Rd_FPR16 & Rn_FPR16 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR16 = NEON_usqadd(Rd_FPR16, Rn_FPR16); -@endif } # C7.2.394 USQADD page C7-2286 line 128601 MATCH x7e203800/mask=xff3ffc00 @@ -51877,9 +28071,7 @@ is b_31=0 & b_30=1 & b_2429=0b111110 & b_2223=0b01 & b_1021=0b100000001110 & Rd_ :usqadd Rd_FPR32, Rn_FPR32 is b_31=0 & b_30=1 & b_2429=0b111110 & b_2223=0b10 & b_1021=0b100000001110 & Rd_FPR32 & Rn_FPR32 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR32 = NEON_usqadd(Rd_FPR32, Rn_FPR32); -@endif } # C7.2.394 USQADD page C7-2286 line 128601 MATCH x7e203800/mask=xff3ffc00 @@ -51891,9 +28083,7 @@ is b_31=0 & b_30=1 & b_2429=0b111110 & b_2223=0b10 & b_1021=0b100000001110 & Rd_ :usqadd Rd_FPR64, Rn_FPR64 is b_31=0 & b_30=1 & b_2429=0b111110 & b_2223=0b11 & b_1021=0b100000001110 & Rd_FPR64 & Rn_FPR64 & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_FPR64 = NEON_usqadd(Rd_FPR64, Rn_FPR64); -@endif } # C7.2.394 USQADD page C7-2286 line 128601 MATCH x2e203800/mask=xbf3ffc00 @@ -51905,9 +28095,7 @@ is b_31=0 & b_30=1 & b_2429=0b111110 & b_2223=0b11 & b_1021=0b100000001110 & Rd_ :usqadd Rd_VPR64.8B, Rn_VPR64.8B is b_31=0 & b_30=0 & b_2429=0b101110 & b_2223=0b00 & b_1021=0b100000001110 & Rd_VPR64.8B & Rn_VPR64.8B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.8B = NEON_usqadd(Rd_VPR64.8B, Rn_VPR64.8B, 1:1); -@endif } # C7.2.394 USQADD page C7-2286 line 128601 MATCH x2e203800/mask=xbf3ffc00 @@ -51919,9 +28107,7 @@ is b_31=0 & b_30=0 & b_2429=0b101110 & b_2223=0b00 & b_1021=0b100000001110 & Rd_ :usqadd Rd_VPR128.16B, Rn_VPR128.16B is b_31=0 & b_30=1 & b_2429=0b101110 & b_2223=0b00 & b_1021=0b100000001110 & Rd_VPR128.16B & Rn_VPR128.16B & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.16B = NEON_usqadd(Rd_VPR128.16B, Rn_VPR128.16B, 1:1); -@endif } # C7.2.394 USQADD page C7-2286 line 128601 MATCH x2e203800/mask=xbf3ffc00 @@ -51933,9 +28119,7 @@ is b_31=0 & b_30=1 & b_2429=0b101110 & b_2223=0b00 & b_1021=0b100000001110 & Rd_ :usqadd Rd_VPR64.4H, Rn_VPR64.4H is b_31=0 & b_30=0 & b_2429=0b101110 & b_2223=0b01 & b_1021=0b100000001110 & Rd_VPR64.4H & Rn_VPR64.4H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.4H = NEON_usqadd(Rd_VPR64.4H, Rn_VPR64.4H, 2:1); -@endif } # C7.2.394 USQADD page C7-2286 line 128601 MATCH x2e203800/mask=xbf3ffc00 @@ -51947,9 +28131,7 @@ is b_31=0 & b_30=0 & b_2429=0b101110 & b_2223=0b01 & b_1021=0b100000001110 & Rd_ :usqadd Rd_VPR128.8H, Rn_VPR128.8H is b_31=0 & b_30=1 & b_2429=0b101110 & b_2223=0b01 & b_1021=0b100000001110 & Rd_VPR128.8H & Rn_VPR128.8H & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.8H = NEON_usqadd(Rd_VPR128.8H, Rn_VPR128.8H, 2:1); -@endif } # C7.2.394 USQADD page C7-2286 line 128601 MATCH x2e203800/mask=xbf3ffc00 @@ -51961,9 +28143,7 @@ is b_31=0 & b_30=1 & b_2429=0b101110 & b_2223=0b01 & b_1021=0b100000001110 & Rd_ :usqadd Rd_VPR64.2S, Rn_VPR64.2S is b_31=0 & b_30=0 & b_2429=0b101110 & b_2223=0b10 & b_1021=0b100000001110 & Rd_VPR64.2S & Rn_VPR64.2S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR64.2S = NEON_usqadd(Rd_VPR64.2S, Rn_VPR64.2S, 4:1); -@endif } # C7.2.394 USQADD page C7-2286 line 128601 MATCH x2e203800/mask=xbf3ffc00 @@ -51975,9 +28155,7 @@ is b_31=0 & b_30=0 & b_2429=0b101110 & b_2223=0b10 & b_1021=0b100000001110 & Rd_ :usqadd Rd_VPR128.4S, Rn_VPR128.4S is b_31=0 & b_30=1 & b_2429=0b101110 & b_2223=0b10 & b_1021=0b100000001110 & Rd_VPR128.4S & Rn_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_usqadd(Rd_VPR128.4S, Rn_VPR128.4S, 4:1); -@endif } # C7.2.394 USQADD page C7-2286 line 128601 MATCH x2e203800/mask=xbf3ffc00 @@ -51989,9 +28167,7 @@ is b_31=0 & b_30=1 & b_2429=0b101110 & b_2223=0b10 & b_1021=0b100000001110 & Rd_ :usqadd Rd_VPR128.2D, Rn_VPR128.2D is b_31=0 & b_30=1 & b_2429=0b101110 & b_2223=0b11 & b_1021=0b100000001110 & Rd_VPR128.2D & Rn_VPR128.2D & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.2D = NEON_usqadd(Rd_VPR128.2D, Rn_VPR128.2D, 8:1); -@endif } # C7.2.395 USRA page C7-2288 line 128723 MATCH x2f001400/mask=xbf80fc00 @@ -52003,134 +28179,41 @@ is b_31=0 & b_30=1 & b_2429=0b101110 & b_2223=0b11 & b_1021=0b100000001110 & Rd_ :usra Rd_VPR128.16B, Rn_VPR128.16B, Imm_shr_imm8 is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x2 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { -@if defined(SEMANTIC_primitive) # simd infix TMPQ1 = Rn_VPR128.16B >> Imm_shr_imm8:1 on lane size 1 - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR128.16B, 0, 1, 16); - simd_address_at(tmp3, TMPQ1, 0, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp2) >> Imm_shr_imm8:1; - simd_address_at(tmp2, Rn_VPR128.16B, 1, 1, 16); - simd_address_at(tmp3, TMPQ1, 1, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp2) >> Imm_shr_imm8:1; - simd_address_at(tmp2, Rn_VPR128.16B, 2, 1, 16); - simd_address_at(tmp3, TMPQ1, 2, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp2) >> Imm_shr_imm8:1; - simd_address_at(tmp2, Rn_VPR128.16B, 3, 1, 16); - simd_address_at(tmp3, TMPQ1, 3, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp2) >> Imm_shr_imm8:1; - simd_address_at(tmp2, Rn_VPR128.16B, 4, 1, 16); - simd_address_at(tmp3, TMPQ1, 4, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp2) >> Imm_shr_imm8:1; - simd_address_at(tmp2, Rn_VPR128.16B, 5, 1, 16); - simd_address_at(tmp3, TMPQ1, 5, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp2) >> Imm_shr_imm8:1; - simd_address_at(tmp2, Rn_VPR128.16B, 6, 1, 16); - simd_address_at(tmp3, TMPQ1, 6, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp2) >> Imm_shr_imm8:1; - simd_address_at(tmp2, Rn_VPR128.16B, 7, 1, 16); - simd_address_at(tmp3, TMPQ1, 7, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp2) >> Imm_shr_imm8:1; - simd_address_at(tmp2, Rn_VPR128.16B, 8, 1, 16); - simd_address_at(tmp3, TMPQ1, 8, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp2) >> Imm_shr_imm8:1; - simd_address_at(tmp2, Rn_VPR128.16B, 9, 1, 16); - simd_address_at(tmp3, TMPQ1, 9, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp2) >> Imm_shr_imm8:1; - simd_address_at(tmp2, Rn_VPR128.16B, 10, 1, 16); - simd_address_at(tmp3, TMPQ1, 10, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp2) >> Imm_shr_imm8:1; - simd_address_at(tmp2, Rn_VPR128.16B, 11, 1, 16); - simd_address_at(tmp3, TMPQ1, 11, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp2) >> Imm_shr_imm8:1; - simd_address_at(tmp2, Rn_VPR128.16B, 12, 1, 16); - simd_address_at(tmp3, TMPQ1, 12, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp2) >> Imm_shr_imm8:1; - simd_address_at(tmp2, Rn_VPR128.16B, 13, 1, 16); - simd_address_at(tmp3, TMPQ1, 13, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp2) >> Imm_shr_imm8:1; - simd_address_at(tmp2, Rn_VPR128.16B, 14, 1, 16); - simd_address_at(tmp3, TMPQ1, 14, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp2) >> Imm_shr_imm8:1; - simd_address_at(tmp2, Rn_VPR128.16B, 15, 1, 16); - simd_address_at(tmp3, TMPQ1, 15, 1, 16); - * [register]:1 tmp3 = (* [register]:1 tmp2) >> Imm_shr_imm8:1; + TMPQ1[0,8] = Rn_VPR128.16B[0,8] >> Imm_shr_imm8:1; + TMPQ1[8,8] = Rn_VPR128.16B[8,8] >> Imm_shr_imm8:1; + TMPQ1[16,8] = Rn_VPR128.16B[16,8] >> Imm_shr_imm8:1; + TMPQ1[24,8] = Rn_VPR128.16B[24,8] >> Imm_shr_imm8:1; + TMPQ1[32,8] = Rn_VPR128.16B[32,8] >> Imm_shr_imm8:1; + TMPQ1[40,8] = Rn_VPR128.16B[40,8] >> Imm_shr_imm8:1; + TMPQ1[48,8] = Rn_VPR128.16B[48,8] >> Imm_shr_imm8:1; + TMPQ1[56,8] = Rn_VPR128.16B[56,8] >> Imm_shr_imm8:1; + TMPQ1[64,8] = Rn_VPR128.16B[64,8] >> Imm_shr_imm8:1; + TMPQ1[72,8] = Rn_VPR128.16B[72,8] >> Imm_shr_imm8:1; + TMPQ1[80,8] = Rn_VPR128.16B[80,8] >> Imm_shr_imm8:1; + TMPQ1[88,8] = Rn_VPR128.16B[88,8] >> Imm_shr_imm8:1; + TMPQ1[96,8] = Rn_VPR128.16B[96,8] >> Imm_shr_imm8:1; + TMPQ1[104,8] = Rn_VPR128.16B[104,8] >> Imm_shr_imm8:1; + TMPQ1[112,8] = Rn_VPR128.16B[112,8] >> Imm_shr_imm8:1; + TMPQ1[120,8] = Rn_VPR128.16B[120,8] >> Imm_shr_imm8:1; # simd infix Rd_VPR128.16B = Rd_VPR128.16B + TMPQ1 on lane size 1 - local tmp4:4 = 0; - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp4, Rd_VPR128.16B, 0, 1, 16); - simd_address_at(tmp5, TMPQ1, 0, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 0, 1, 16); - * [register]:1 tmp6 = (* [register]:1 tmp4) + (* [register]:1 tmp5); - simd_address_at(tmp4, Rd_VPR128.16B, 1, 1, 16); - simd_address_at(tmp5, TMPQ1, 1, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 1, 1, 16); - * [register]:1 tmp6 = (* [register]:1 tmp4) + (* [register]:1 tmp5); - simd_address_at(tmp4, Rd_VPR128.16B, 2, 1, 16); - simd_address_at(tmp5, TMPQ1, 2, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 2, 1, 16); - * [register]:1 tmp6 = (* [register]:1 tmp4) + (* [register]:1 tmp5); - simd_address_at(tmp4, Rd_VPR128.16B, 3, 1, 16); - simd_address_at(tmp5, TMPQ1, 3, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 3, 1, 16); - * [register]:1 tmp6 = (* [register]:1 tmp4) + (* [register]:1 tmp5); - simd_address_at(tmp4, Rd_VPR128.16B, 4, 1, 16); - simd_address_at(tmp5, TMPQ1, 4, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 4, 1, 16); - * [register]:1 tmp6 = (* [register]:1 tmp4) + (* [register]:1 tmp5); - simd_address_at(tmp4, Rd_VPR128.16B, 5, 1, 16); - simd_address_at(tmp5, TMPQ1, 5, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 5, 1, 16); - * [register]:1 tmp6 = (* [register]:1 tmp4) + (* [register]:1 tmp5); - simd_address_at(tmp4, Rd_VPR128.16B, 6, 1, 16); - simd_address_at(tmp5, TMPQ1, 6, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 6, 1, 16); - * [register]:1 tmp6 = (* [register]:1 tmp4) + (* [register]:1 tmp5); - simd_address_at(tmp4, Rd_VPR128.16B, 7, 1, 16); - simd_address_at(tmp5, TMPQ1, 7, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 7, 1, 16); - * [register]:1 tmp6 = (* [register]:1 tmp4) + (* [register]:1 tmp5); - simd_address_at(tmp4, Rd_VPR128.16B, 8, 1, 16); - simd_address_at(tmp5, TMPQ1, 8, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 8, 1, 16); - * [register]:1 tmp6 = (* [register]:1 tmp4) + (* [register]:1 tmp5); - simd_address_at(tmp4, Rd_VPR128.16B, 9, 1, 16); - simd_address_at(tmp5, TMPQ1, 9, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 9, 1, 16); - * [register]:1 tmp6 = (* [register]:1 tmp4) + (* [register]:1 tmp5); - simd_address_at(tmp4, Rd_VPR128.16B, 10, 1, 16); - simd_address_at(tmp5, TMPQ1, 10, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 10, 1, 16); - * [register]:1 tmp6 = (* [register]:1 tmp4) + (* [register]:1 tmp5); - simd_address_at(tmp4, Rd_VPR128.16B, 11, 1, 16); - simd_address_at(tmp5, TMPQ1, 11, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 11, 1, 16); - * [register]:1 tmp6 = (* [register]:1 tmp4) + (* [register]:1 tmp5); - simd_address_at(tmp4, Rd_VPR128.16B, 12, 1, 16); - simd_address_at(tmp5, TMPQ1, 12, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 12, 1, 16); - * [register]:1 tmp6 = (* [register]:1 tmp4) + (* [register]:1 tmp5); - simd_address_at(tmp4, Rd_VPR128.16B, 13, 1, 16); - simd_address_at(tmp5, TMPQ1, 13, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 13, 1, 16); - * [register]:1 tmp6 = (* [register]:1 tmp4) + (* [register]:1 tmp5); - simd_address_at(tmp4, Rd_VPR128.16B, 14, 1, 16); - simd_address_at(tmp5, TMPQ1, 14, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 14, 1, 16); - * [register]:1 tmp6 = (* [register]:1 tmp4) + (* [register]:1 tmp5); - simd_address_at(tmp4, Rd_VPR128.16B, 15, 1, 16); - simd_address_at(tmp5, TMPQ1, 15, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 15, 1, 16); - * [register]:1 tmp6 = (* [register]:1 tmp4) + (* [register]:1 tmp5); + Rd_VPR128.16B[0,8] = Rd_VPR128.16B[0,8] + TMPQ1[0,8]; + Rd_VPR128.16B[8,8] = Rd_VPR128.16B[8,8] + TMPQ1[8,8]; + Rd_VPR128.16B[16,8] = Rd_VPR128.16B[16,8] + TMPQ1[16,8]; + Rd_VPR128.16B[24,8] = Rd_VPR128.16B[24,8] + TMPQ1[24,8]; + Rd_VPR128.16B[32,8] = Rd_VPR128.16B[32,8] + TMPQ1[32,8]; + Rd_VPR128.16B[40,8] = Rd_VPR128.16B[40,8] + TMPQ1[40,8]; + Rd_VPR128.16B[48,8] = Rd_VPR128.16B[48,8] + TMPQ1[48,8]; + Rd_VPR128.16B[56,8] = Rd_VPR128.16B[56,8] + TMPQ1[56,8]; + Rd_VPR128.16B[64,8] = Rd_VPR128.16B[64,8] + TMPQ1[64,8]; + Rd_VPR128.16B[72,8] = Rd_VPR128.16B[72,8] + TMPQ1[72,8]; + Rd_VPR128.16B[80,8] = Rd_VPR128.16B[80,8] + TMPQ1[80,8]; + Rd_VPR128.16B[88,8] = Rd_VPR128.16B[88,8] + TMPQ1[88,8]; + Rd_VPR128.16B[96,8] = Rd_VPR128.16B[96,8] + TMPQ1[96,8]; + Rd_VPR128.16B[104,8] = Rd_VPR128.16B[104,8] + TMPQ1[104,8]; + Rd_VPR128.16B[112,8] = Rd_VPR128.16B[112,8] + TMPQ1[112,8]; + Rd_VPR128.16B[120,8] = Rd_VPR128.16B[120,8] + TMPQ1[120,8]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_RIGHT(Rn_VPR128.16B, Imm_shr_imm8:1, 1:1); - local tmpd:16 = SIMD_INT_ADD(Rd_VPR128.16B, tmp1, 1:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.16B -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.16B = NEON_usra(Rd_VPR128.16B, Rn_VPR128.16B, Imm_shr_imm8:1, 1:1); -@endif } # C7.2.395 USRA page C7-2288 line 128723 MATCH x2f001400/mask=xbf80fc00 @@ -52142,38 +28225,14 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x2 & :usra Rd_VPR128.2D, Rn_VPR128.2D, Imm_shr_imm64 is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2223=0b01 & Imm_shr_imm64 & b_1115=0x2 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_primitive) local tmp1:8 = zext(Imm_shr_imm64); # simd infix TMPQ1 = Rn_VPR128.2D >> tmp1 on lane size 8 - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp3, Rn_VPR128.2D, 0, 8, 16); - simd_address_at(tmp4, TMPQ1, 0, 8, 16); - * [register]:8 tmp4 = (* [register]:8 tmp3) >> tmp1; - simd_address_at(tmp3, Rn_VPR128.2D, 1, 8, 16); - simd_address_at(tmp4, TMPQ1, 1, 8, 16); - * [register]:8 tmp4 = (* [register]:8 tmp3) >> tmp1; + TMPQ1[0,64] = Rn_VPR128.2D[0,64] >> tmp1; + TMPQ1[64,64] = Rn_VPR128.2D[64,64] >> tmp1; # simd infix Rd_VPR128.2D = Rd_VPR128.2D + TMPQ1 on lane size 8 - local tmp5:4 = 0; - local tmp6:4 = 0; - local tmp7:4 = 0; - simd_address_at(tmp5, Rd_VPR128.2D, 0, 8, 16); - simd_address_at(tmp6, TMPQ1, 0, 8, 16); - simd_address_at(tmp7, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp7 = (* [register]:8 tmp5) + (* [register]:8 tmp6); - simd_address_at(tmp5, Rd_VPR128.2D, 1, 8, 16); - simd_address_at(tmp6, TMPQ1, 1, 8, 16); - simd_address_at(tmp7, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp7 = (* [register]:8 tmp5) + (* [register]:8 tmp6); + Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] + TMPQ1[0,64]; + Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] + TMPQ1[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = zext(Imm_shr_imm64); - local tmp2:16 = SIMD_INT_RIGHT(Rn_VPR128.2D, tmp1, 8:1); - local tmpd:16 = SIMD_INT_ADD(Rd_VPR128.2D, tmp2, 8:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.2D = NEON_usra(Rd_VPR128.2D, Rn_VPR128.2D, Imm_shr_imm64:1, 8:1); -@endif } # C7.2.395 USRA page C7-2288 line 128723 MATCH x2f001400/mask=xbf80fc00 @@ -52185,38 +28244,14 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2223=0b01 & Imm_shr_imm64 & b_1115=0x2 :usra Rd_VPR64.2S, Rn_VPR64.2S, Imm_shr_imm32 is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x2 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_primitive) local tmp1:4 = Imm_shr_imm32; # simd infix TMPD1 = Rn_VPR64.2S >> tmp1 on lane size 4 - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp3, Rn_VPR64.2S, 0, 4, 8); - simd_address_at(tmp4, TMPD1, 0, 4, 8); - * [register]:4 tmp4 = (* [register]:4 tmp3) >> tmp1; - simd_address_at(tmp3, Rn_VPR64.2S, 1, 4, 8); - simd_address_at(tmp4, TMPD1, 1, 4, 8); - * [register]:4 tmp4 = (* [register]:4 tmp3) >> tmp1; + TMPD1[0,32] = Rn_VPR64.2S[0,32] >> tmp1; + TMPD1[32,32] = Rn_VPR64.2S[32,32] >> tmp1; # simd infix Rd_VPR64.2S = Rd_VPR64.2S + TMPD1 on lane size 4 - local tmp5:4 = 0; - local tmp6:4 = 0; - local tmp7:4 = 0; - simd_address_at(tmp5, Rd_VPR64.2S, 0, 4, 8); - simd_address_at(tmp6, TMPD1, 0, 4, 8); - simd_address_at(tmp7, Rd_VPR64.2S, 0, 4, 8); - * [register]:4 tmp7 = (* [register]:4 tmp5) + (* [register]:4 tmp6); - simd_address_at(tmp5, Rd_VPR64.2S, 1, 4, 8); - simd_address_at(tmp6, TMPD1, 1, 4, 8); - simd_address_at(tmp7, Rd_VPR64.2S, 1, 4, 8); - * [register]:4 tmp7 = (* [register]:4 tmp5) + (* [register]:4 tmp6); + Rd_VPR64.2S[0,32] = Rd_VPR64.2S[0,32] + TMPD1[0,32]; + Rd_VPR64.2S[32,32] = Rd_VPR64.2S[32,32] + TMPD1[32,32]; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:4 = Imm_shr_imm32; - local tmp2:8 = SIMD_INT_RIGHT(Rn_VPR64.2S, tmp1, 4:1); - local tmpd:8 = SIMD_INT_ADD(Rd_VPR64.2S, tmp2, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.2S -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.2S = NEON_usra(Rd_VPR64.2S, Rn_VPR64.2S, Imm_shr_imm32:1, 4:1); -@endif } # C7.2.395 USRA page C7-2288 line 128723 MATCH x2f001400/mask=xbf80fc00 @@ -52228,50 +28263,17 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x2 & b :usra Rd_VPR64.4H, Rn_VPR64.4H, Imm_shr_imm16 is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x2 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { -@if defined(SEMANTIC_primitive) # simd infix TMPD1 = Rn_VPR64.4H >> Imm_shr_imm16:2 on lane size 2 - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR64.4H, 0, 2, 8); - simd_address_at(tmp3, TMPD1, 0, 2, 8); - * [register]:2 tmp3 = (* [register]:2 tmp2) >> Imm_shr_imm16:2; - simd_address_at(tmp2, Rn_VPR64.4H, 1, 2, 8); - simd_address_at(tmp3, TMPD1, 1, 2, 8); - * [register]:2 tmp3 = (* [register]:2 tmp2) >> Imm_shr_imm16:2; - simd_address_at(tmp2, Rn_VPR64.4H, 2, 2, 8); - simd_address_at(tmp3, TMPD1, 2, 2, 8); - * [register]:2 tmp3 = (* [register]:2 tmp2) >> Imm_shr_imm16:2; - simd_address_at(tmp2, Rn_VPR64.4H, 3, 2, 8); - simd_address_at(tmp3, TMPD1, 3, 2, 8); - * [register]:2 tmp3 = (* [register]:2 tmp2) >> Imm_shr_imm16:2; + TMPD1[0,16] = Rn_VPR64.4H[0,16] >> Imm_shr_imm16:2; + TMPD1[16,16] = Rn_VPR64.4H[16,16] >> Imm_shr_imm16:2; + TMPD1[32,16] = Rn_VPR64.4H[32,16] >> Imm_shr_imm16:2; + TMPD1[48,16] = Rn_VPR64.4H[48,16] >> Imm_shr_imm16:2; # simd infix Rd_VPR64.4H = Rd_VPR64.4H + TMPD1 on lane size 2 - local tmp4:4 = 0; - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp4, Rd_VPR64.4H, 0, 2, 8); - simd_address_at(tmp5, TMPD1, 0, 2, 8); - simd_address_at(tmp6, Rd_VPR64.4H, 0, 2, 8); - * [register]:2 tmp6 = (* [register]:2 tmp4) + (* [register]:2 tmp5); - simd_address_at(tmp4, Rd_VPR64.4H, 1, 2, 8); - simd_address_at(tmp5, TMPD1, 1, 2, 8); - simd_address_at(tmp6, Rd_VPR64.4H, 1, 2, 8); - * [register]:2 tmp6 = (* [register]:2 tmp4) + (* [register]:2 tmp5); - simd_address_at(tmp4, Rd_VPR64.4H, 2, 2, 8); - simd_address_at(tmp5, TMPD1, 2, 2, 8); - simd_address_at(tmp6, Rd_VPR64.4H, 2, 2, 8); - * [register]:2 tmp6 = (* [register]:2 tmp4) + (* [register]:2 tmp5); - simd_address_at(tmp4, Rd_VPR64.4H, 3, 2, 8); - simd_address_at(tmp5, TMPD1, 3, 2, 8); - simd_address_at(tmp6, Rd_VPR64.4H, 3, 2, 8); - * [register]:2 tmp6 = (* [register]:2 tmp4) + (* [register]:2 tmp5); + Rd_VPR64.4H[0,16] = Rd_VPR64.4H[0,16] + TMPD1[0,16]; + Rd_VPR64.4H[16,16] = Rd_VPR64.4H[16,16] + TMPD1[16,16]; + Rd_VPR64.4H[32,16] = Rd_VPR64.4H[32,16] + TMPD1[32,16]; + Rd_VPR64.4H[48,16] = Rd_VPR64.4H[48,16] + TMPD1[48,16]; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_INT_RIGHT(Rn_VPR64.4H, Imm_shr_imm16:2, 2:1); - local tmpd:8 = SIMD_INT_ADD(Rd_VPR64.4H, tmp1, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.4H -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.4H = NEON_usra(Rd_VPR64.4H, Rn_VPR64.4H, Imm_shr_imm16:1, 2:1); -@endif } # C7.2.395 USRA page C7-2288 line 128723 MATCH x2f001400/mask=xbf80fc00 @@ -52283,52 +28285,18 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x2 & :usra Rd_VPR128.4S, Rn_VPR128.4S, Imm_shr_imm32 is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x2 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) local tmp1:4 = Imm_shr_imm32; # simd infix TMPQ1 = Rn_VPR128.4S >> tmp1 on lane size 4 - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp3, Rn_VPR128.4S, 0, 4, 16); - simd_address_at(tmp4, TMPQ1, 0, 4, 16); - * [register]:4 tmp4 = (* [register]:4 tmp3) >> tmp1; - simd_address_at(tmp3, Rn_VPR128.4S, 1, 4, 16); - simd_address_at(tmp4, TMPQ1, 1, 4, 16); - * [register]:4 tmp4 = (* [register]:4 tmp3) >> tmp1; - simd_address_at(tmp3, Rn_VPR128.4S, 2, 4, 16); - simd_address_at(tmp4, TMPQ1, 2, 4, 16); - * [register]:4 tmp4 = (* [register]:4 tmp3) >> tmp1; - simd_address_at(tmp3, Rn_VPR128.4S, 3, 4, 16); - simd_address_at(tmp4, TMPQ1, 3, 4, 16); - * [register]:4 tmp4 = (* [register]:4 tmp3) >> tmp1; + TMPQ1[0,32] = Rn_VPR128.4S[0,32] >> tmp1; + TMPQ1[32,32] = Rn_VPR128.4S[32,32] >> tmp1; + TMPQ1[64,32] = Rn_VPR128.4S[64,32] >> tmp1; + TMPQ1[96,32] = Rn_VPR128.4S[96,32] >> tmp1; # simd infix Rd_VPR128.4S = Rd_VPR128.4S + TMPQ1 on lane size 4 - local tmp5:4 = 0; - local tmp6:4 = 0; - local tmp7:4 = 0; - simd_address_at(tmp5, Rd_VPR128.4S, 0, 4, 16); - simd_address_at(tmp6, TMPQ1, 0, 4, 16); - simd_address_at(tmp7, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp7 = (* [register]:4 tmp5) + (* [register]:4 tmp6); - simd_address_at(tmp5, Rd_VPR128.4S, 1, 4, 16); - simd_address_at(tmp6, TMPQ1, 1, 4, 16); - simd_address_at(tmp7, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp7 = (* [register]:4 tmp5) + (* [register]:4 tmp6); - simd_address_at(tmp5, Rd_VPR128.4S, 2, 4, 16); - simd_address_at(tmp6, TMPQ1, 2, 4, 16); - simd_address_at(tmp7, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp7 = (* [register]:4 tmp5) + (* [register]:4 tmp6); - simd_address_at(tmp5, Rd_VPR128.4S, 3, 4, 16); - simd_address_at(tmp6, TMPQ1, 3, 4, 16); - simd_address_at(tmp7, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp7 = (* [register]:4 tmp5) + (* [register]:4 tmp6); + Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] + TMPQ1[0,32]; + Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] + TMPQ1[32,32]; + Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] + TMPQ1[64,32]; + Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] + TMPQ1[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:4 = Imm_shr_imm32; - local tmp2:16 = SIMD_INT_RIGHT(Rn_VPR128.4S, tmp1, 4:1); - local tmpd:16 = SIMD_INT_ADD(Rd_VPR128.4S, tmp2, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_usra(Rd_VPR128.4S, Rn_VPR128.4S, Imm_shr_imm32:1, 4:1); -@endif } # C7.2.395 USRA page C7-2288 line 128723 MATCH x2f001400/mask=xbf80fc00 @@ -52340,78 +28308,25 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x2 & b :usra Rd_VPR64.8B, Rn_VPR64.8B, Imm_shr_imm8 is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x2 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { -@if defined(SEMANTIC_primitive) # simd infix TMPD1 = Rn_VPR64.8B >> Imm_shr_imm8:1 on lane size 1 - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR64.8B, 0, 1, 8); - simd_address_at(tmp3, TMPD1, 0, 1, 8); - * [register]:1 tmp3 = (* [register]:1 tmp2) >> Imm_shr_imm8:1; - simd_address_at(tmp2, Rn_VPR64.8B, 1, 1, 8); - simd_address_at(tmp3, TMPD1, 1, 1, 8); - * [register]:1 tmp3 = (* [register]:1 tmp2) >> Imm_shr_imm8:1; - simd_address_at(tmp2, Rn_VPR64.8B, 2, 1, 8); - simd_address_at(tmp3, TMPD1, 2, 1, 8); - * [register]:1 tmp3 = (* [register]:1 tmp2) >> Imm_shr_imm8:1; - simd_address_at(tmp2, Rn_VPR64.8B, 3, 1, 8); - simd_address_at(tmp3, TMPD1, 3, 1, 8); - * [register]:1 tmp3 = (* [register]:1 tmp2) >> Imm_shr_imm8:1; - simd_address_at(tmp2, Rn_VPR64.8B, 4, 1, 8); - simd_address_at(tmp3, TMPD1, 4, 1, 8); - * [register]:1 tmp3 = (* [register]:1 tmp2) >> Imm_shr_imm8:1; - simd_address_at(tmp2, Rn_VPR64.8B, 5, 1, 8); - simd_address_at(tmp3, TMPD1, 5, 1, 8); - * [register]:1 tmp3 = (* [register]:1 tmp2) >> Imm_shr_imm8:1; - simd_address_at(tmp2, Rn_VPR64.8B, 6, 1, 8); - simd_address_at(tmp3, TMPD1, 6, 1, 8); - * [register]:1 tmp3 = (* [register]:1 tmp2) >> Imm_shr_imm8:1; - simd_address_at(tmp2, Rn_VPR64.8B, 7, 1, 8); - simd_address_at(tmp3, TMPD1, 7, 1, 8); - * [register]:1 tmp3 = (* [register]:1 tmp2) >> Imm_shr_imm8:1; + TMPD1[0,8] = Rn_VPR64.8B[0,8] >> Imm_shr_imm8:1; + TMPD1[8,8] = Rn_VPR64.8B[8,8] >> Imm_shr_imm8:1; + TMPD1[16,8] = Rn_VPR64.8B[16,8] >> Imm_shr_imm8:1; + TMPD1[24,8] = Rn_VPR64.8B[24,8] >> Imm_shr_imm8:1; + TMPD1[32,8] = Rn_VPR64.8B[32,8] >> Imm_shr_imm8:1; + TMPD1[40,8] = Rn_VPR64.8B[40,8] >> Imm_shr_imm8:1; + TMPD1[48,8] = Rn_VPR64.8B[48,8] >> Imm_shr_imm8:1; + TMPD1[56,8] = Rn_VPR64.8B[56,8] >> Imm_shr_imm8:1; # simd infix Rd_VPR64.8B = Rd_VPR64.8B + TMPD1 on lane size 1 - local tmp4:4 = 0; - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp4, Rd_VPR64.8B, 0, 1, 8); - simd_address_at(tmp5, TMPD1, 0, 1, 8); - simd_address_at(tmp6, Rd_VPR64.8B, 0, 1, 8); - * [register]:1 tmp6 = (* [register]:1 tmp4) + (* [register]:1 tmp5); - simd_address_at(tmp4, Rd_VPR64.8B, 1, 1, 8); - simd_address_at(tmp5, TMPD1, 1, 1, 8); - simd_address_at(tmp6, Rd_VPR64.8B, 1, 1, 8); - * [register]:1 tmp6 = (* [register]:1 tmp4) + (* [register]:1 tmp5); - simd_address_at(tmp4, Rd_VPR64.8B, 2, 1, 8); - simd_address_at(tmp5, TMPD1, 2, 1, 8); - simd_address_at(tmp6, Rd_VPR64.8B, 2, 1, 8); - * [register]:1 tmp6 = (* [register]:1 tmp4) + (* [register]:1 tmp5); - simd_address_at(tmp4, Rd_VPR64.8B, 3, 1, 8); - simd_address_at(tmp5, TMPD1, 3, 1, 8); - simd_address_at(tmp6, Rd_VPR64.8B, 3, 1, 8); - * [register]:1 tmp6 = (* [register]:1 tmp4) + (* [register]:1 tmp5); - simd_address_at(tmp4, Rd_VPR64.8B, 4, 1, 8); - simd_address_at(tmp5, TMPD1, 4, 1, 8); - simd_address_at(tmp6, Rd_VPR64.8B, 4, 1, 8); - * [register]:1 tmp6 = (* [register]:1 tmp4) + (* [register]:1 tmp5); - simd_address_at(tmp4, Rd_VPR64.8B, 5, 1, 8); - simd_address_at(tmp5, TMPD1, 5, 1, 8); - simd_address_at(tmp6, Rd_VPR64.8B, 5, 1, 8); - * [register]:1 tmp6 = (* [register]:1 tmp4) + (* [register]:1 tmp5); - simd_address_at(tmp4, Rd_VPR64.8B, 6, 1, 8); - simd_address_at(tmp5, TMPD1, 6, 1, 8); - simd_address_at(tmp6, Rd_VPR64.8B, 6, 1, 8); - * [register]:1 tmp6 = (* [register]:1 tmp4) + (* [register]:1 tmp5); - simd_address_at(tmp4, Rd_VPR64.8B, 7, 1, 8); - simd_address_at(tmp5, TMPD1, 7, 1, 8); - simd_address_at(tmp6, Rd_VPR64.8B, 7, 1, 8); - * [register]:1 tmp6 = (* [register]:1 tmp4) + (* [register]:1 tmp5); + Rd_VPR64.8B[0,8] = Rd_VPR64.8B[0,8] + TMPD1[0,8]; + Rd_VPR64.8B[8,8] = Rd_VPR64.8B[8,8] + TMPD1[8,8]; + Rd_VPR64.8B[16,8] = Rd_VPR64.8B[16,8] + TMPD1[16,8]; + Rd_VPR64.8B[24,8] = Rd_VPR64.8B[24,8] + TMPD1[24,8]; + Rd_VPR64.8B[32,8] = Rd_VPR64.8B[32,8] + TMPD1[32,8]; + Rd_VPR64.8B[40,8] = Rd_VPR64.8B[40,8] + TMPD1[40,8]; + Rd_VPR64.8B[48,8] = Rd_VPR64.8B[48,8] + TMPD1[48,8]; + Rd_VPR64.8B[56,8] = Rd_VPR64.8B[56,8] + TMPD1[56,8]; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_INT_RIGHT(Rn_VPR64.8B, Imm_shr_imm8:1, 1:1); - local tmpd:8 = SIMD_INT_ADD(Rd_VPR64.8B, tmp1, 1:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.8B -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.8B = NEON_usra(Rd_VPR64.8B, Rn_VPR64.8B, Imm_shr_imm8:1, 1:1); -@endif } # C7.2.395 USRA page C7-2288 line 128723 MATCH x2f001400/mask=xbf80fc00 @@ -52423,78 +28338,25 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x2 & :usra Rd_VPR128.8H, Rn_VPR128.8H, Imm_shr_imm16 is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x2 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_primitive) # simd infix TMPQ1 = Rn_VPR128.8H >> Imm_shr_imm16:2 on lane size 2 - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR128.8H, 0, 2, 16); - simd_address_at(tmp3, TMPQ1, 0, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp2) >> Imm_shr_imm16:2; - simd_address_at(tmp2, Rn_VPR128.8H, 1, 2, 16); - simd_address_at(tmp3, TMPQ1, 1, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp2) >> Imm_shr_imm16:2; - simd_address_at(tmp2, Rn_VPR128.8H, 2, 2, 16); - simd_address_at(tmp3, TMPQ1, 2, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp2) >> Imm_shr_imm16:2; - simd_address_at(tmp2, Rn_VPR128.8H, 3, 2, 16); - simd_address_at(tmp3, TMPQ1, 3, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp2) >> Imm_shr_imm16:2; - simd_address_at(tmp2, Rn_VPR128.8H, 4, 2, 16); - simd_address_at(tmp3, TMPQ1, 4, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp2) >> Imm_shr_imm16:2; - simd_address_at(tmp2, Rn_VPR128.8H, 5, 2, 16); - simd_address_at(tmp3, TMPQ1, 5, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp2) >> Imm_shr_imm16:2; - simd_address_at(tmp2, Rn_VPR128.8H, 6, 2, 16); - simd_address_at(tmp3, TMPQ1, 6, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp2) >> Imm_shr_imm16:2; - simd_address_at(tmp2, Rn_VPR128.8H, 7, 2, 16); - simd_address_at(tmp3, TMPQ1, 7, 2, 16); - * [register]:2 tmp3 = (* [register]:2 tmp2) >> Imm_shr_imm16:2; + TMPQ1[0,16] = Rn_VPR128.8H[0,16] >> Imm_shr_imm16:2; + TMPQ1[16,16] = Rn_VPR128.8H[16,16] >> Imm_shr_imm16:2; + TMPQ1[32,16] = Rn_VPR128.8H[32,16] >> Imm_shr_imm16:2; + TMPQ1[48,16] = Rn_VPR128.8H[48,16] >> Imm_shr_imm16:2; + TMPQ1[64,16] = Rn_VPR128.8H[64,16] >> Imm_shr_imm16:2; + TMPQ1[80,16] = Rn_VPR128.8H[80,16] >> Imm_shr_imm16:2; + TMPQ1[96,16] = Rn_VPR128.8H[96,16] >> Imm_shr_imm16:2; + TMPQ1[112,16] = Rn_VPR128.8H[112,16] >> Imm_shr_imm16:2; # simd infix Rd_VPR128.8H = Rd_VPR128.8H + TMPQ1 on lane size 2 - local tmp4:4 = 0; - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp4, Rd_VPR128.8H, 0, 2, 16); - simd_address_at(tmp5, TMPQ1, 0, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 0, 2, 16); - * [register]:2 tmp6 = (* [register]:2 tmp4) + (* [register]:2 tmp5); - simd_address_at(tmp4, Rd_VPR128.8H, 1, 2, 16); - simd_address_at(tmp5, TMPQ1, 1, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 1, 2, 16); - * [register]:2 tmp6 = (* [register]:2 tmp4) + (* [register]:2 tmp5); - simd_address_at(tmp4, Rd_VPR128.8H, 2, 2, 16); - simd_address_at(tmp5, TMPQ1, 2, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 2, 2, 16); - * [register]:2 tmp6 = (* [register]:2 tmp4) + (* [register]:2 tmp5); - simd_address_at(tmp4, Rd_VPR128.8H, 3, 2, 16); - simd_address_at(tmp5, TMPQ1, 3, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 3, 2, 16); - * [register]:2 tmp6 = (* [register]:2 tmp4) + (* [register]:2 tmp5); - simd_address_at(tmp4, Rd_VPR128.8H, 4, 2, 16); - simd_address_at(tmp5, TMPQ1, 4, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 4, 2, 16); - * [register]:2 tmp6 = (* [register]:2 tmp4) + (* [register]:2 tmp5); - simd_address_at(tmp4, Rd_VPR128.8H, 5, 2, 16); - simd_address_at(tmp5, TMPQ1, 5, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 5, 2, 16); - * [register]:2 tmp6 = (* [register]:2 tmp4) + (* [register]:2 tmp5); - simd_address_at(tmp4, Rd_VPR128.8H, 6, 2, 16); - simd_address_at(tmp5, TMPQ1, 6, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 6, 2, 16); - * [register]:2 tmp6 = (* [register]:2 tmp4) + (* [register]:2 tmp5); - simd_address_at(tmp4, Rd_VPR128.8H, 7, 2, 16); - simd_address_at(tmp5, TMPQ1, 7, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 7, 2, 16); - * [register]:2 tmp6 = (* [register]:2 tmp4) + (* [register]:2 tmp5); + Rd_VPR128.8H[0,16] = Rd_VPR128.8H[0,16] + TMPQ1[0,16]; + Rd_VPR128.8H[16,16] = Rd_VPR128.8H[16,16] + TMPQ1[16,16]; + Rd_VPR128.8H[32,16] = Rd_VPR128.8H[32,16] + TMPQ1[32,16]; + Rd_VPR128.8H[48,16] = Rd_VPR128.8H[48,16] + TMPQ1[48,16]; + Rd_VPR128.8H[64,16] = Rd_VPR128.8H[64,16] + TMPQ1[64,16]; + Rd_VPR128.8H[80,16] = Rd_VPR128.8H[80,16] + TMPQ1[80,16]; + Rd_VPR128.8H[96,16] = Rd_VPR128.8H[96,16] + TMPQ1[96,16]; + Rd_VPR128.8H[112,16] = Rd_VPR128.8H[112,16] + TMPQ1[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_RIGHT(Rn_VPR128.8H, Imm_shr_imm16:2, 2:1); - local tmpd:16 = SIMD_INT_ADD(Rd_VPR128.8H, tmp1, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.8H -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.8H = NEON_usra(Rd_VPR128.8H, Rn_VPR128.8H, Imm_shr_imm16:1, 2:1); -@endif } # C7.2.395 USRA page C7-2288 line 128723 MATCH x7f001400/mask=xff80fc00 @@ -52507,19 +28369,10 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x2 & :usra Rd_FPR64, Rn_FPR64, Imm_shr_imm64 is b_2331=0b011111110 & b_22=1 & b_1015=0b000101 & Rd_FPR64 & Rn_FPR64 & Imm_shr_imm64 & Zd { -@if defined(SEMANTIC_primitive) local tmp1:8 = zext(Imm_shr_imm64); local tmp2:8 = Rn_FPR64 >> tmp1; Rd_FPR64 = Rd_FPR64 + tmp2; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = zext(Imm_shr_imm64); - local tmp2:8 = Rn_FPR64 >> tmp1; - local tmpd:8 = Rd_FPR64 + tmp2; - Zd = zext(tmpd); # assigning to Rd_FPR64 -@elif defined(SEMANTIC_pseudo) - Rd_FPR64 = NEON_usra(Rd_FPR64, Rn_FPR64, Imm_shr_imm64:1); -@endif } # C7.2.396 USUBL, USUBL2 page C7-2291 line 128880 MATCH x2e202000/mask=xbf20fc00 @@ -52531,54 +28384,18 @@ is b_2331=0b011111110 & b_22=1 & b_1015=0b000101 & Rd_FPR64 & Rn_FPR64 & Imm_shr :usubl2 Rd_VPR128.2D, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1215=0x2 & b_1011=0 & Rn_VPR128.4S & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rn_VPR128.4S, 1, 8, 16); - TMPD1 = * [register]:8 tmp1; + TMPD1 = Rn_VPR128.4S[64,64]; # simd resize TMPQ2 = zext(TMPD1) (lane size 4 to 8) - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, TMPD1, 0, 4, 8); - simd_address_at(tmp5, TMPQ2, 0, 8, 16); - * [register]:8 tmp5 = zext(* [register]:4 tmp4); - simd_address_at(tmp4, TMPD1, 1, 4, 8); - simd_address_at(tmp5, TMPQ2, 1, 8, 16); - * [register]:8 tmp5 = zext(* [register]:4 tmp4); - local tmp6:4 = 0; - simd_address_at(tmp6, Rm_VPR128.4S, 1, 8, 16); - TMPD3 = * [register]:8 tmp6; + TMPQ2[0,64] = zext(TMPD1[0,32]); + TMPQ2[64,64] = zext(TMPD1[32,32]); + TMPD3 = Rm_VPR128.4S[64,64]; # simd resize TMPQ4 = zext(TMPD3) (lane size 4 to 8) - local tmp9:4 = 0; - local tmp10:4 = 0; - simd_address_at(tmp9, TMPD3, 0, 4, 8); - simd_address_at(tmp10, TMPQ4, 0, 8, 16); - * [register]:8 tmp10 = zext(* [register]:4 tmp9); - simd_address_at(tmp9, TMPD3, 1, 4, 8); - simd_address_at(tmp10, TMPQ4, 1, 8, 16); - * [register]:8 tmp10 = zext(* [register]:4 tmp9); + TMPQ4[0,64] = zext(TMPD3[0,32]); + TMPQ4[64,64] = zext(TMPD3[32,32]); # simd infix Rd_VPR128.2D = TMPQ2 - TMPQ4 on lane size 8 - local tmp11:4 = 0; - local tmp12:4 = 0; - local tmp13:4 = 0; - simd_address_at(tmp11, TMPQ2, 0, 8, 16); - simd_address_at(tmp12, TMPQ4, 0, 8, 16); - simd_address_at(tmp13, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp13 = (* [register]:8 tmp11) - (* [register]:8 tmp12); - simd_address_at(tmp11, TMPQ2, 1, 8, 16); - simd_address_at(tmp12, TMPQ4, 1, 8, 16); - simd_address_at(tmp13, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp13 = (* [register]:8 tmp11) - (* [register]:8 tmp12); + Rd_VPR128.2D[0,64] = TMPQ2[0,64] - TMPQ4[0,64]; + Rd_VPR128.2D[64,64] = TMPQ2[64,64] - TMPQ4[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Rn_VPR128.4S, 1:1); - local tmp2:16 = SIMD_INT_ZEXT(tmp1, 4:1); - local tmp3:8 = SIMD_PIECE(Rm_VPR128.4S, 1:1); - local tmp4:16 = SIMD_INT_ZEXT(tmp3, 4:1); - local tmpd:16 = SIMD_INT_SUB(tmp2, tmp4, 8:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.2D = NEON_usubl2(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); -@endif } # C7.2.396 USUBL, USUBL2 page C7-2291 line 128880 MATCH x2e202000/mask=xbf20fc00 @@ -52590,74 +28407,24 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S :usubl2 Rd_VPR128.4S, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1215=0x2 & b_1011=0 & Rn_VPR128.8H & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rn_VPR128.8H, 1, 8, 16); - TMPD1 = * [register]:8 tmp1; + TMPD1 = Rn_VPR128.8H[64,64]; # simd resize TMPQ2 = zext(TMPD1) (lane size 2 to 4) - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, TMPD1, 0, 2, 8); - simd_address_at(tmp5, TMPQ2, 0, 4, 16); - * [register]:4 tmp5 = zext(* [register]:2 tmp4); - simd_address_at(tmp4, TMPD1, 1, 2, 8); - simd_address_at(tmp5, TMPQ2, 1, 4, 16); - * [register]:4 tmp5 = zext(* [register]:2 tmp4); - simd_address_at(tmp4, TMPD1, 2, 2, 8); - simd_address_at(tmp5, TMPQ2, 2, 4, 16); - * [register]:4 tmp5 = zext(* [register]:2 tmp4); - simd_address_at(tmp4, TMPD1, 3, 2, 8); - simd_address_at(tmp5, TMPQ2, 3, 4, 16); - * [register]:4 tmp5 = zext(* [register]:2 tmp4); - local tmp6:4 = 0; - simd_address_at(tmp6, Rm_VPR128.8H, 1, 8, 16); - TMPD3 = * [register]:8 tmp6; + TMPQ2[0,32] = zext(TMPD1[0,16]); + TMPQ2[32,32] = zext(TMPD1[16,16]); + TMPQ2[64,32] = zext(TMPD1[32,16]); + TMPQ2[96,32] = zext(TMPD1[48,16]); + TMPD3 = Rm_VPR128.8H[64,64]; # simd resize TMPQ4 = zext(TMPD3) (lane size 2 to 4) - local tmp9:4 = 0; - local tmp10:4 = 0; - simd_address_at(tmp9, TMPD3, 0, 2, 8); - simd_address_at(tmp10, TMPQ4, 0, 4, 16); - * [register]:4 tmp10 = zext(* [register]:2 tmp9); - simd_address_at(tmp9, TMPD3, 1, 2, 8); - simd_address_at(tmp10, TMPQ4, 1, 4, 16); - * [register]:4 tmp10 = zext(* [register]:2 tmp9); - simd_address_at(tmp9, TMPD3, 2, 2, 8); - simd_address_at(tmp10, TMPQ4, 2, 4, 16); - * [register]:4 tmp10 = zext(* [register]:2 tmp9); - simd_address_at(tmp9, TMPD3, 3, 2, 8); - simd_address_at(tmp10, TMPQ4, 3, 4, 16); - * [register]:4 tmp10 = zext(* [register]:2 tmp9); + TMPQ4[0,32] = zext(TMPD3[0,16]); + TMPQ4[32,32] = zext(TMPD3[16,16]); + TMPQ4[64,32] = zext(TMPD3[32,16]); + TMPQ4[96,32] = zext(TMPD3[48,16]); # simd infix Rd_VPR128.4S = TMPQ2 - TMPQ4 on lane size 4 - local tmp11:4 = 0; - local tmp12:4 = 0; - local tmp13:4 = 0; - simd_address_at(tmp11, TMPQ2, 0, 4, 16); - simd_address_at(tmp12, TMPQ4, 0, 4, 16); - simd_address_at(tmp13, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp13 = (* [register]:4 tmp11) - (* [register]:4 tmp12); - simd_address_at(tmp11, TMPQ2, 1, 4, 16); - simd_address_at(tmp12, TMPQ4, 1, 4, 16); - simd_address_at(tmp13, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp13 = (* [register]:4 tmp11) - (* [register]:4 tmp12); - simd_address_at(tmp11, TMPQ2, 2, 4, 16); - simd_address_at(tmp12, TMPQ4, 2, 4, 16); - simd_address_at(tmp13, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp13 = (* [register]:4 tmp11) - (* [register]:4 tmp12); - simd_address_at(tmp11, TMPQ2, 3, 4, 16); - simd_address_at(tmp12, TMPQ4, 3, 4, 16); - simd_address_at(tmp13, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp13 = (* [register]:4 tmp11) - (* [register]:4 tmp12); + Rd_VPR128.4S[0,32] = TMPQ2[0,32] - TMPQ4[0,32]; + Rd_VPR128.4S[32,32] = TMPQ2[32,32] - TMPQ4[32,32]; + Rd_VPR128.4S[64,32] = TMPQ2[64,32] - TMPQ4[64,32]; + Rd_VPR128.4S[96,32] = TMPQ2[96,32] - TMPQ4[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Rn_VPR128.8H, 1:1); - local tmp2:16 = SIMD_INT_ZEXT(tmp1, 2:1); - local tmp3:8 = SIMD_PIECE(Rm_VPR128.8H, 1:1); - local tmp4:16 = SIMD_INT_ZEXT(tmp3, 2:1); - local tmpd:16 = SIMD_INT_SUB(tmp2, tmp4, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_usubl2(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); -@endif } # C7.2.396 USUBL, USUBL2 page C7-2291 line 128880 MATCH x2e202000/mask=xbf20fc00 @@ -52669,114 +28436,36 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H :usubl2 Rd_VPR128.8H, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1215=0x2 & b_1011=0 & Rn_VPR128.16B & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rn_VPR128.16B, 1, 8, 16); - TMPD1 = * [register]:8 tmp1; + TMPD1 = Rn_VPR128.16B[64,64]; # simd resize TMPQ2 = zext(TMPD1) (lane size 1 to 2) - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, TMPD1, 0, 1, 8); - simd_address_at(tmp5, TMPQ2, 0, 2, 16); - * [register]:2 tmp5 = zext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 1, 1, 8); - simd_address_at(tmp5, TMPQ2, 1, 2, 16); - * [register]:2 tmp5 = zext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 2, 1, 8); - simd_address_at(tmp5, TMPQ2, 2, 2, 16); - * [register]:2 tmp5 = zext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 3, 1, 8); - simd_address_at(tmp5, TMPQ2, 3, 2, 16); - * [register]:2 tmp5 = zext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 4, 1, 8); - simd_address_at(tmp5, TMPQ2, 4, 2, 16); - * [register]:2 tmp5 = zext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 5, 1, 8); - simd_address_at(tmp5, TMPQ2, 5, 2, 16); - * [register]:2 tmp5 = zext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 6, 1, 8); - simd_address_at(tmp5, TMPQ2, 6, 2, 16); - * [register]:2 tmp5 = zext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 7, 1, 8); - simd_address_at(tmp5, TMPQ2, 7, 2, 16); - * [register]:2 tmp5 = zext(* [register]:1 tmp4); - local tmp6:4 = 0; - simd_address_at(tmp6, Rm_VPR128.16B, 1, 8, 16); - TMPD3 = * [register]:8 tmp6; + TMPQ2[0,16] = zext(TMPD1[0,8]); + TMPQ2[16,16] = zext(TMPD1[8,8]); + TMPQ2[32,16] = zext(TMPD1[16,8]); + TMPQ2[48,16] = zext(TMPD1[24,8]); + TMPQ2[64,16] = zext(TMPD1[32,8]); + TMPQ2[80,16] = zext(TMPD1[40,8]); + TMPQ2[96,16] = zext(TMPD1[48,8]); + TMPQ2[112,16] = zext(TMPD1[56,8]); + TMPD3 = Rm_VPR128.16B[64,64]; # simd resize TMPQ4 = zext(TMPD3) (lane size 1 to 2) - local tmp9:4 = 0; - local tmp10:4 = 0; - simd_address_at(tmp9, TMPD3, 0, 1, 8); - simd_address_at(tmp10, TMPQ4, 0, 2, 16); - * [register]:2 tmp10 = zext(* [register]:1 tmp9); - simd_address_at(tmp9, TMPD3, 1, 1, 8); - simd_address_at(tmp10, TMPQ4, 1, 2, 16); - * [register]:2 tmp10 = zext(* [register]:1 tmp9); - simd_address_at(tmp9, TMPD3, 2, 1, 8); - simd_address_at(tmp10, TMPQ4, 2, 2, 16); - * [register]:2 tmp10 = zext(* [register]:1 tmp9); - simd_address_at(tmp9, TMPD3, 3, 1, 8); - simd_address_at(tmp10, TMPQ4, 3, 2, 16); - * [register]:2 tmp10 = zext(* [register]:1 tmp9); - simd_address_at(tmp9, TMPD3, 4, 1, 8); - simd_address_at(tmp10, TMPQ4, 4, 2, 16); - * [register]:2 tmp10 = zext(* [register]:1 tmp9); - simd_address_at(tmp9, TMPD3, 5, 1, 8); - simd_address_at(tmp10, TMPQ4, 5, 2, 16); - * [register]:2 tmp10 = zext(* [register]:1 tmp9); - simd_address_at(tmp9, TMPD3, 6, 1, 8); - simd_address_at(tmp10, TMPQ4, 6, 2, 16); - * [register]:2 tmp10 = zext(* [register]:1 tmp9); - simd_address_at(tmp9, TMPD3, 7, 1, 8); - simd_address_at(tmp10, TMPQ4, 7, 2, 16); - * [register]:2 tmp10 = zext(* [register]:1 tmp9); + TMPQ4[0,16] = zext(TMPD3[0,8]); + TMPQ4[16,16] = zext(TMPD3[8,8]); + TMPQ4[32,16] = zext(TMPD3[16,8]); + TMPQ4[48,16] = zext(TMPD3[24,8]); + TMPQ4[64,16] = zext(TMPD3[32,8]); + TMPQ4[80,16] = zext(TMPD3[40,8]); + TMPQ4[96,16] = zext(TMPD3[48,8]); + TMPQ4[112,16] = zext(TMPD3[56,8]); # simd infix Rd_VPR128.8H = TMPQ2 - TMPQ4 on lane size 2 - local tmp11:4 = 0; - local tmp12:4 = 0; - local tmp13:4 = 0; - simd_address_at(tmp11, TMPQ2, 0, 2, 16); - simd_address_at(tmp12, TMPQ4, 0, 2, 16); - simd_address_at(tmp13, Rd_VPR128.8H, 0, 2, 16); - * [register]:2 tmp13 = (* [register]:2 tmp11) - (* [register]:2 tmp12); - simd_address_at(tmp11, TMPQ2, 1, 2, 16); - simd_address_at(tmp12, TMPQ4, 1, 2, 16); - simd_address_at(tmp13, Rd_VPR128.8H, 1, 2, 16); - * [register]:2 tmp13 = (* [register]:2 tmp11) - (* [register]:2 tmp12); - simd_address_at(tmp11, TMPQ2, 2, 2, 16); - simd_address_at(tmp12, TMPQ4, 2, 2, 16); - simd_address_at(tmp13, Rd_VPR128.8H, 2, 2, 16); - * [register]:2 tmp13 = (* [register]:2 tmp11) - (* [register]:2 tmp12); - simd_address_at(tmp11, TMPQ2, 3, 2, 16); - simd_address_at(tmp12, TMPQ4, 3, 2, 16); - simd_address_at(tmp13, Rd_VPR128.8H, 3, 2, 16); - * [register]:2 tmp13 = (* [register]:2 tmp11) - (* [register]:2 tmp12); - simd_address_at(tmp11, TMPQ2, 4, 2, 16); - simd_address_at(tmp12, TMPQ4, 4, 2, 16); - simd_address_at(tmp13, Rd_VPR128.8H, 4, 2, 16); - * [register]:2 tmp13 = (* [register]:2 tmp11) - (* [register]:2 tmp12); - simd_address_at(tmp11, TMPQ2, 5, 2, 16); - simd_address_at(tmp12, TMPQ4, 5, 2, 16); - simd_address_at(tmp13, Rd_VPR128.8H, 5, 2, 16); - * [register]:2 tmp13 = (* [register]:2 tmp11) - (* [register]:2 tmp12); - simd_address_at(tmp11, TMPQ2, 6, 2, 16); - simd_address_at(tmp12, TMPQ4, 6, 2, 16); - simd_address_at(tmp13, Rd_VPR128.8H, 6, 2, 16); - * [register]:2 tmp13 = (* [register]:2 tmp11) - (* [register]:2 tmp12); - simd_address_at(tmp11, TMPQ2, 7, 2, 16); - simd_address_at(tmp12, TMPQ4, 7, 2, 16); - simd_address_at(tmp13, Rd_VPR128.8H, 7, 2, 16); - * [register]:2 tmp13 = (* [register]:2 tmp11) - (* [register]:2 tmp12); + Rd_VPR128.8H[0,16] = TMPQ2[0,16] - TMPQ4[0,16]; + Rd_VPR128.8H[16,16] = TMPQ2[16,16] - TMPQ4[16,16]; + Rd_VPR128.8H[32,16] = TMPQ2[32,16] - TMPQ4[32,16]; + Rd_VPR128.8H[48,16] = TMPQ2[48,16] - TMPQ4[48,16]; + Rd_VPR128.8H[64,16] = TMPQ2[64,16] - TMPQ4[64,16]; + Rd_VPR128.8H[80,16] = TMPQ2[80,16] - TMPQ4[80,16]; + Rd_VPR128.8H[96,16] = TMPQ2[96,16] - TMPQ4[96,16]; + Rd_VPR128.8H[112,16] = TMPQ2[112,16] - TMPQ4[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Rn_VPR128.16B, 1:1); - local tmp2:16 = SIMD_INT_ZEXT(tmp1, 1:1); - local tmp3:8 = SIMD_PIECE(Rm_VPR128.16B, 1:1); - local tmp4:16 = SIMD_INT_ZEXT(tmp3, 1:1); - local tmpd:16 = SIMD_INT_SUB(tmp2, tmp4, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.8H -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.8H = NEON_usubl2(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); -@endif } # C7.2.396 USUBL, USUBL2 page C7-2291 line 128880 MATCH x2e202000/mask=xbf20fc00 @@ -52788,46 +28477,16 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16 :usubl Rd_VPR128.2D, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1215=0x2 & b_1011=0 & Rn_VPR64.2S & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_primitive) # simd resize TMPQ1 = zext(Rn_VPR64.2S) (lane size 4 to 8) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR64.2S, 0, 4, 8); - simd_address_at(tmp3, TMPQ1, 0, 8, 16); - * [register]:8 tmp3 = zext(* [register]:4 tmp2); - simd_address_at(tmp2, Rn_VPR64.2S, 1, 4, 8); - simd_address_at(tmp3, TMPQ1, 1, 8, 16); - * [register]:8 tmp3 = zext(* [register]:4 tmp2); + TMPQ1[0,64] = zext(Rn_VPR64.2S[0,32]); + TMPQ1[64,64] = zext(Rn_VPR64.2S[32,32]); # simd resize TMPQ2 = zext(Rm_VPR64.2S) (lane size 4 to 8) - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, Rm_VPR64.2S, 0, 4, 8); - simd_address_at(tmp6, TMPQ2, 0, 8, 16); - * [register]:8 tmp6 = zext(* [register]:4 tmp5); - simd_address_at(tmp5, Rm_VPR64.2S, 1, 4, 8); - simd_address_at(tmp6, TMPQ2, 1, 8, 16); - * [register]:8 tmp6 = zext(* [register]:4 tmp5); + TMPQ2[0,64] = zext(Rm_VPR64.2S[0,32]); + TMPQ2[64,64] = zext(Rm_VPR64.2S[32,32]); # simd infix Rd_VPR128.2D = TMPQ1 - TMPQ2 on lane size 8 - local tmp7:4 = 0; - local tmp8:4 = 0; - local tmp9:4 = 0; - simd_address_at(tmp7, TMPQ1, 0, 8, 16); - simd_address_at(tmp8, TMPQ2, 0, 8, 16); - simd_address_at(tmp9, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp9 = (* [register]:8 tmp7) - (* [register]:8 tmp8); - simd_address_at(tmp7, TMPQ1, 1, 8, 16); - simd_address_at(tmp8, TMPQ2, 1, 8, 16); - simd_address_at(tmp9, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp9 = (* [register]:8 tmp7) - (* [register]:8 tmp8); + Rd_VPR128.2D[0,64] = TMPQ1[0,64] - TMPQ2[0,64]; + Rd_VPR128.2D[64,64] = TMPQ1[64,64] - TMPQ2[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_ZEXT(Rn_VPR64.2S, 4:1); - local tmp2:16 = SIMD_INT_ZEXT(Rm_VPR64.2S, 4:1); - local tmpd:16 = SIMD_INT_SUB(tmp1, tmp2, 8:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.2D = NEON_usubl(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); -@endif } # C7.2.396 USUBL, USUBL2 page C7-2291 line 128880 MATCH x2e202000/mask=xbf20fc00 @@ -52839,66 +28498,22 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S :usubl Rd_VPR128.4S, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1215=0x2 & b_1011=0 & Rn_VPR64.4H & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) # simd resize TMPQ1 = zext(Rn_VPR64.4H) (lane size 2 to 4) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR64.4H, 0, 2, 8); - simd_address_at(tmp3, TMPQ1, 0, 4, 16); - * [register]:4 tmp3 = zext(* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR64.4H, 1, 2, 8); - simd_address_at(tmp3, TMPQ1, 1, 4, 16); - * [register]:4 tmp3 = zext(* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR64.4H, 2, 2, 8); - simd_address_at(tmp3, TMPQ1, 2, 4, 16); - * [register]:4 tmp3 = zext(* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR64.4H, 3, 2, 8); - simd_address_at(tmp3, TMPQ1, 3, 4, 16); - * [register]:4 tmp3 = zext(* [register]:2 tmp2); + TMPQ1[0,32] = zext(Rn_VPR64.4H[0,16]); + TMPQ1[32,32] = zext(Rn_VPR64.4H[16,16]); + TMPQ1[64,32] = zext(Rn_VPR64.4H[32,16]); + TMPQ1[96,32] = zext(Rn_VPR64.4H[48,16]); # simd resize TMPQ2 = zext(Rm_VPR64.4H) (lane size 2 to 4) - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, Rm_VPR64.4H, 0, 2, 8); - simd_address_at(tmp6, TMPQ2, 0, 4, 16); - * [register]:4 tmp6 = zext(* [register]:2 tmp5); - simd_address_at(tmp5, Rm_VPR64.4H, 1, 2, 8); - simd_address_at(tmp6, TMPQ2, 1, 4, 16); - * [register]:4 tmp6 = zext(* [register]:2 tmp5); - simd_address_at(tmp5, Rm_VPR64.4H, 2, 2, 8); - simd_address_at(tmp6, TMPQ2, 2, 4, 16); - * [register]:4 tmp6 = zext(* [register]:2 tmp5); - simd_address_at(tmp5, Rm_VPR64.4H, 3, 2, 8); - simd_address_at(tmp6, TMPQ2, 3, 4, 16); - * [register]:4 tmp6 = zext(* [register]:2 tmp5); + TMPQ2[0,32] = zext(Rm_VPR64.4H[0,16]); + TMPQ2[32,32] = zext(Rm_VPR64.4H[16,16]); + TMPQ2[64,32] = zext(Rm_VPR64.4H[32,16]); + TMPQ2[96,32] = zext(Rm_VPR64.4H[48,16]); # simd infix Rd_VPR128.4S = TMPQ1 - TMPQ2 on lane size 4 - local tmp7:4 = 0; - local tmp8:4 = 0; - local tmp9:4 = 0; - simd_address_at(tmp7, TMPQ1, 0, 4, 16); - simd_address_at(tmp8, TMPQ2, 0, 4, 16); - simd_address_at(tmp9, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp9 = (* [register]:4 tmp7) - (* [register]:4 tmp8); - simd_address_at(tmp7, TMPQ1, 1, 4, 16); - simd_address_at(tmp8, TMPQ2, 1, 4, 16); - simd_address_at(tmp9, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp9 = (* [register]:4 tmp7) - (* [register]:4 tmp8); - simd_address_at(tmp7, TMPQ1, 2, 4, 16); - simd_address_at(tmp8, TMPQ2, 2, 4, 16); - simd_address_at(tmp9, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp9 = (* [register]:4 tmp7) - (* [register]:4 tmp8); - simd_address_at(tmp7, TMPQ1, 3, 4, 16); - simd_address_at(tmp8, TMPQ2, 3, 4, 16); - simd_address_at(tmp9, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp9 = (* [register]:4 tmp7) - (* [register]:4 tmp8); + Rd_VPR128.4S[0,32] = TMPQ1[0,32] - TMPQ2[0,32]; + Rd_VPR128.4S[32,32] = TMPQ1[32,32] - TMPQ2[32,32]; + Rd_VPR128.4S[64,32] = TMPQ1[64,32] - TMPQ2[64,32]; + Rd_VPR128.4S[96,32] = TMPQ1[96,32] - TMPQ2[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_ZEXT(Rn_VPR64.4H, 2:1); - local tmp2:16 = SIMD_INT_ZEXT(Rm_VPR64.4H, 2:1); - local tmpd:16 = SIMD_INT_SUB(tmp1, tmp2, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_usubl(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); -@endif } # C7.2.396 USUBL, USUBL2 page C7-2291 line 128880 MATCH x2e202000/mask=xbf20fc00 @@ -52910,106 +28525,34 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H :usubl Rd_VPR128.8H, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1215=0x2 & b_1011=0 & Rn_VPR64.8B & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_primitive) # simd resize TMPQ1 = zext(Rn_VPR64.8B) (lane size 1 to 2) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR64.8B, 0, 1, 8); - simd_address_at(tmp3, TMPQ1, 0, 2, 16); - * [register]:2 tmp3 = zext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 1, 1, 8); - simd_address_at(tmp3, TMPQ1, 1, 2, 16); - * [register]:2 tmp3 = zext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 2, 1, 8); - simd_address_at(tmp3, TMPQ1, 2, 2, 16); - * [register]:2 tmp3 = zext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 3, 1, 8); - simd_address_at(tmp3, TMPQ1, 3, 2, 16); - * [register]:2 tmp3 = zext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 4, 1, 8); - simd_address_at(tmp3, TMPQ1, 4, 2, 16); - * [register]:2 tmp3 = zext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 5, 1, 8); - simd_address_at(tmp3, TMPQ1, 5, 2, 16); - * [register]:2 tmp3 = zext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 6, 1, 8); - simd_address_at(tmp3, TMPQ1, 6, 2, 16); - * [register]:2 tmp3 = zext(* [register]:1 tmp2); - simd_address_at(tmp2, Rn_VPR64.8B, 7, 1, 8); - simd_address_at(tmp3, TMPQ1, 7, 2, 16); - * [register]:2 tmp3 = zext(* [register]:1 tmp2); + TMPQ1[0,16] = zext(Rn_VPR64.8B[0,8]); + TMPQ1[16,16] = zext(Rn_VPR64.8B[8,8]); + TMPQ1[32,16] = zext(Rn_VPR64.8B[16,8]); + TMPQ1[48,16] = zext(Rn_VPR64.8B[24,8]); + TMPQ1[64,16] = zext(Rn_VPR64.8B[32,8]); + TMPQ1[80,16] = zext(Rn_VPR64.8B[40,8]); + TMPQ1[96,16] = zext(Rn_VPR64.8B[48,8]); + TMPQ1[112,16] = zext(Rn_VPR64.8B[56,8]); # simd resize TMPQ2 = zext(Rm_VPR64.8B) (lane size 1 to 2) - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, Rm_VPR64.8B, 0, 1, 8); - simd_address_at(tmp6, TMPQ2, 0, 2, 16); - * [register]:2 tmp6 = zext(* [register]:1 tmp5); - simd_address_at(tmp5, Rm_VPR64.8B, 1, 1, 8); - simd_address_at(tmp6, TMPQ2, 1, 2, 16); - * [register]:2 tmp6 = zext(* [register]:1 tmp5); - simd_address_at(tmp5, Rm_VPR64.8B, 2, 1, 8); - simd_address_at(tmp6, TMPQ2, 2, 2, 16); - * [register]:2 tmp6 = zext(* [register]:1 tmp5); - simd_address_at(tmp5, Rm_VPR64.8B, 3, 1, 8); - simd_address_at(tmp6, TMPQ2, 3, 2, 16); - * [register]:2 tmp6 = zext(* [register]:1 tmp5); - simd_address_at(tmp5, Rm_VPR64.8B, 4, 1, 8); - simd_address_at(tmp6, TMPQ2, 4, 2, 16); - * [register]:2 tmp6 = zext(* [register]:1 tmp5); - simd_address_at(tmp5, Rm_VPR64.8B, 5, 1, 8); - simd_address_at(tmp6, TMPQ2, 5, 2, 16); - * [register]:2 tmp6 = zext(* [register]:1 tmp5); - simd_address_at(tmp5, Rm_VPR64.8B, 6, 1, 8); - simd_address_at(tmp6, TMPQ2, 6, 2, 16); - * [register]:2 tmp6 = zext(* [register]:1 tmp5); - simd_address_at(tmp5, Rm_VPR64.8B, 7, 1, 8); - simd_address_at(tmp6, TMPQ2, 7, 2, 16); - * [register]:2 tmp6 = zext(* [register]:1 tmp5); + TMPQ2[0,16] = zext(Rm_VPR64.8B[0,8]); + TMPQ2[16,16] = zext(Rm_VPR64.8B[8,8]); + TMPQ2[32,16] = zext(Rm_VPR64.8B[16,8]); + TMPQ2[48,16] = zext(Rm_VPR64.8B[24,8]); + TMPQ2[64,16] = zext(Rm_VPR64.8B[32,8]); + TMPQ2[80,16] = zext(Rm_VPR64.8B[40,8]); + TMPQ2[96,16] = zext(Rm_VPR64.8B[48,8]); + TMPQ2[112,16] = zext(Rm_VPR64.8B[56,8]); # simd infix Rd_VPR128.8H = TMPQ1 - TMPQ2 on lane size 2 - local tmp7:4 = 0; - local tmp8:4 = 0; - local tmp9:4 = 0; - simd_address_at(tmp7, TMPQ1, 0, 2, 16); - simd_address_at(tmp8, TMPQ2, 0, 2, 16); - simd_address_at(tmp9, Rd_VPR128.8H, 0, 2, 16); - * [register]:2 tmp9 = (* [register]:2 tmp7) - (* [register]:2 tmp8); - simd_address_at(tmp7, TMPQ1, 1, 2, 16); - simd_address_at(tmp8, TMPQ2, 1, 2, 16); - simd_address_at(tmp9, Rd_VPR128.8H, 1, 2, 16); - * [register]:2 tmp9 = (* [register]:2 tmp7) - (* [register]:2 tmp8); - simd_address_at(tmp7, TMPQ1, 2, 2, 16); - simd_address_at(tmp8, TMPQ2, 2, 2, 16); - simd_address_at(tmp9, Rd_VPR128.8H, 2, 2, 16); - * [register]:2 tmp9 = (* [register]:2 tmp7) - (* [register]:2 tmp8); - simd_address_at(tmp7, TMPQ1, 3, 2, 16); - simd_address_at(tmp8, TMPQ2, 3, 2, 16); - simd_address_at(tmp9, Rd_VPR128.8H, 3, 2, 16); - * [register]:2 tmp9 = (* [register]:2 tmp7) - (* [register]:2 tmp8); - simd_address_at(tmp7, TMPQ1, 4, 2, 16); - simd_address_at(tmp8, TMPQ2, 4, 2, 16); - simd_address_at(tmp9, Rd_VPR128.8H, 4, 2, 16); - * [register]:2 tmp9 = (* [register]:2 tmp7) - (* [register]:2 tmp8); - simd_address_at(tmp7, TMPQ1, 5, 2, 16); - simd_address_at(tmp8, TMPQ2, 5, 2, 16); - simd_address_at(tmp9, Rd_VPR128.8H, 5, 2, 16); - * [register]:2 tmp9 = (* [register]:2 tmp7) - (* [register]:2 tmp8); - simd_address_at(tmp7, TMPQ1, 6, 2, 16); - simd_address_at(tmp8, TMPQ2, 6, 2, 16); - simd_address_at(tmp9, Rd_VPR128.8H, 6, 2, 16); - * [register]:2 tmp9 = (* [register]:2 tmp7) - (* [register]:2 tmp8); - simd_address_at(tmp7, TMPQ1, 7, 2, 16); - simd_address_at(tmp8, TMPQ2, 7, 2, 16); - simd_address_at(tmp9, Rd_VPR128.8H, 7, 2, 16); - * [register]:2 tmp9 = (* [register]:2 tmp7) - (* [register]:2 tmp8); + Rd_VPR128.8H[0,16] = TMPQ1[0,16] - TMPQ2[0,16]; + Rd_VPR128.8H[16,16] = TMPQ1[16,16] - TMPQ2[16,16]; + Rd_VPR128.8H[32,16] = TMPQ1[32,16] - TMPQ2[32,16]; + Rd_VPR128.8H[48,16] = TMPQ1[48,16] - TMPQ2[48,16]; + Rd_VPR128.8H[64,16] = TMPQ1[64,16] - TMPQ2[64,16]; + Rd_VPR128.8H[80,16] = TMPQ1[80,16] - TMPQ2[80,16]; + Rd_VPR128.8H[96,16] = TMPQ1[96,16] - TMPQ2[96,16]; + Rd_VPR128.8H[112,16] = TMPQ1[112,16] - TMPQ2[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_ZEXT(Rn_VPR64.8B, 1:1); - local tmp2:16 = SIMD_INT_ZEXT(Rm_VPR64.8B, 1:1); - local tmpd:16 = SIMD_INT_SUB(tmp1, tmp2, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.8H -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.8H = NEON_usubl(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); -@endif } # C7.2.397 USUBW, USUBW2 page C7-2293 line 129000 MATCH x2e203000/mask=xbf20fc00 @@ -53021,40 +28564,14 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B :usubw2 Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.4S is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1215=0x3 & b_1011=0 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rm_VPR128.4S, 1, 8, 16); - TMPD1 = * [register]:8 tmp1; + TMPD1 = Rm_VPR128.4S[64,64]; # simd resize TMPQ2 = zext(TMPD1) (lane size 4 to 8) - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, TMPD1, 0, 4, 8); - simd_address_at(tmp5, TMPQ2, 0, 8, 16); - * [register]:8 tmp5 = zext(* [register]:4 tmp4); - simd_address_at(tmp4, TMPD1, 1, 4, 8); - simd_address_at(tmp5, TMPQ2, 1, 8, 16); - * [register]:8 tmp5 = zext(* [register]:4 tmp4); + TMPQ2[0,64] = zext(TMPD1[0,32]); + TMPQ2[64,64] = zext(TMPD1[32,32]); # simd infix Rd_VPR128.2D = Rn_VPR128.2D - TMPQ2 on lane size 8 - local tmp6:4 = 0; - local tmp7:4 = 0; - local tmp8:4 = 0; - simd_address_at(tmp6, Rn_VPR128.2D, 0, 8, 16); - simd_address_at(tmp7, TMPQ2, 0, 8, 16); - simd_address_at(tmp8, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp8 = (* [register]:8 tmp6) - (* [register]:8 tmp7); - simd_address_at(tmp6, Rn_VPR128.2D, 1, 8, 16); - simd_address_at(tmp7, TMPQ2, 1, 8, 16); - simd_address_at(tmp8, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp8 = (* [register]:8 tmp6) - (* [register]:8 tmp7); + Rd_VPR128.2D[0,64] = Rn_VPR128.2D[0,64] - TMPQ2[0,64]; + Rd_VPR128.2D[64,64] = Rn_VPR128.2D[64,64] - TMPQ2[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Rm_VPR128.4S, 1:1); - local tmp2:16 = SIMD_INT_ZEXT(tmp1, 4:1); - local tmpd:16 = SIMD_INT_SUB(Rn_VPR128.2D, tmp2, 8:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.2D = NEON_usubw2(Rn_VPR128.2D, Rm_VPR128.4S, 4:1); -@endif } # C7.2.397 USUBW, USUBW2 page C7-2293 line 129000 MATCH x2e203000/mask=xbf20fc00 @@ -53066,54 +28583,18 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S :usubw2 Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.8H is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1215=0x3 & b_1011=0 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rm_VPR128.8H, 1, 8, 16); - TMPD1 = * [register]:8 tmp1; + TMPD1 = Rm_VPR128.8H[64,64]; # simd resize TMPQ2 = zext(TMPD1) (lane size 2 to 4) - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, TMPD1, 0, 2, 8); - simd_address_at(tmp5, TMPQ2, 0, 4, 16); - * [register]:4 tmp5 = zext(* [register]:2 tmp4); - simd_address_at(tmp4, TMPD1, 1, 2, 8); - simd_address_at(tmp5, TMPQ2, 1, 4, 16); - * [register]:4 tmp5 = zext(* [register]:2 tmp4); - simd_address_at(tmp4, TMPD1, 2, 2, 8); - simd_address_at(tmp5, TMPQ2, 2, 4, 16); - * [register]:4 tmp5 = zext(* [register]:2 tmp4); - simd_address_at(tmp4, TMPD1, 3, 2, 8); - simd_address_at(tmp5, TMPQ2, 3, 4, 16); - * [register]:4 tmp5 = zext(* [register]:2 tmp4); + TMPQ2[0,32] = zext(TMPD1[0,16]); + TMPQ2[32,32] = zext(TMPD1[16,16]); + TMPQ2[64,32] = zext(TMPD1[32,16]); + TMPQ2[96,32] = zext(TMPD1[48,16]); # simd infix Rd_VPR128.4S = Rn_VPR128.4S - TMPQ2 on lane size 4 - local tmp6:4 = 0; - local tmp7:4 = 0; - local tmp8:4 = 0; - simd_address_at(tmp6, Rn_VPR128.4S, 0, 4, 16); - simd_address_at(tmp7, TMPQ2, 0, 4, 16); - simd_address_at(tmp8, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp8 = (* [register]:4 tmp6) - (* [register]:4 tmp7); - simd_address_at(tmp6, Rn_VPR128.4S, 1, 4, 16); - simd_address_at(tmp7, TMPQ2, 1, 4, 16); - simd_address_at(tmp8, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp8 = (* [register]:4 tmp6) - (* [register]:4 tmp7); - simd_address_at(tmp6, Rn_VPR128.4S, 2, 4, 16); - simd_address_at(tmp7, TMPQ2, 2, 4, 16); - simd_address_at(tmp8, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp8 = (* [register]:4 tmp6) - (* [register]:4 tmp7); - simd_address_at(tmp6, Rn_VPR128.4S, 3, 4, 16); - simd_address_at(tmp7, TMPQ2, 3, 4, 16); - simd_address_at(tmp8, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp8 = (* [register]:4 tmp6) - (* [register]:4 tmp7); + Rd_VPR128.4S[0,32] = Rn_VPR128.4S[0,32] - TMPQ2[0,32]; + Rd_VPR128.4S[32,32] = Rn_VPR128.4S[32,32] - TMPQ2[32,32]; + Rd_VPR128.4S[64,32] = Rn_VPR128.4S[64,32] - TMPQ2[64,32]; + Rd_VPR128.4S[96,32] = Rn_VPR128.4S[96,32] - TMPQ2[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Rm_VPR128.8H, 1:1); - local tmp2:16 = SIMD_INT_ZEXT(tmp1, 2:1); - local tmpd:16 = SIMD_INT_SUB(Rn_VPR128.4S, tmp2, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_usubw2(Rn_VPR128.4S, Rm_VPR128.8H, 2:1); -@endif } # C7.2.397 USUBW, USUBW2 page C7-2293 line 129000 MATCH x2e203000/mask=xbf20fc00 @@ -53125,82 +28606,26 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H :usubw2 Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.16B is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1215=0x3 & b_1011=0 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rm_VPR128.16B, 1, 8, 16); - TMPD1 = * [register]:8 tmp1; + TMPD1 = Rm_VPR128.16B[64,64]; # simd resize TMPQ2 = zext(TMPD1) (lane size 1 to 2) - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, TMPD1, 0, 1, 8); - simd_address_at(tmp5, TMPQ2, 0, 2, 16); - * [register]:2 tmp5 = zext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 1, 1, 8); - simd_address_at(tmp5, TMPQ2, 1, 2, 16); - * [register]:2 tmp5 = zext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 2, 1, 8); - simd_address_at(tmp5, TMPQ2, 2, 2, 16); - * [register]:2 tmp5 = zext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 3, 1, 8); - simd_address_at(tmp5, TMPQ2, 3, 2, 16); - * [register]:2 tmp5 = zext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 4, 1, 8); - simd_address_at(tmp5, TMPQ2, 4, 2, 16); - * [register]:2 tmp5 = zext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 5, 1, 8); - simd_address_at(tmp5, TMPQ2, 5, 2, 16); - * [register]:2 tmp5 = zext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 6, 1, 8); - simd_address_at(tmp5, TMPQ2, 6, 2, 16); - * [register]:2 tmp5 = zext(* [register]:1 tmp4); - simd_address_at(tmp4, TMPD1, 7, 1, 8); - simd_address_at(tmp5, TMPQ2, 7, 2, 16); - * [register]:2 tmp5 = zext(* [register]:1 tmp4); + TMPQ2[0,16] = zext(TMPD1[0,8]); + TMPQ2[16,16] = zext(TMPD1[8,8]); + TMPQ2[32,16] = zext(TMPD1[16,8]); + TMPQ2[48,16] = zext(TMPD1[24,8]); + TMPQ2[64,16] = zext(TMPD1[32,8]); + TMPQ2[80,16] = zext(TMPD1[40,8]); + TMPQ2[96,16] = zext(TMPD1[48,8]); + TMPQ2[112,16] = zext(TMPD1[56,8]); # simd infix Rd_VPR128.8H = Rn_VPR128.8H - TMPQ2 on lane size 2 - local tmp6:4 = 0; - local tmp7:4 = 0; - local tmp8:4 = 0; - simd_address_at(tmp6, Rn_VPR128.8H, 0, 2, 16); - simd_address_at(tmp7, TMPQ2, 0, 2, 16); - simd_address_at(tmp8, Rd_VPR128.8H, 0, 2, 16); - * [register]:2 tmp8 = (* [register]:2 tmp6) - (* [register]:2 tmp7); - simd_address_at(tmp6, Rn_VPR128.8H, 1, 2, 16); - simd_address_at(tmp7, TMPQ2, 1, 2, 16); - simd_address_at(tmp8, Rd_VPR128.8H, 1, 2, 16); - * [register]:2 tmp8 = (* [register]:2 tmp6) - (* [register]:2 tmp7); - simd_address_at(tmp6, Rn_VPR128.8H, 2, 2, 16); - simd_address_at(tmp7, TMPQ2, 2, 2, 16); - simd_address_at(tmp8, Rd_VPR128.8H, 2, 2, 16); - * [register]:2 tmp8 = (* [register]:2 tmp6) - (* [register]:2 tmp7); - simd_address_at(tmp6, Rn_VPR128.8H, 3, 2, 16); - simd_address_at(tmp7, TMPQ2, 3, 2, 16); - simd_address_at(tmp8, Rd_VPR128.8H, 3, 2, 16); - * [register]:2 tmp8 = (* [register]:2 tmp6) - (* [register]:2 tmp7); - simd_address_at(tmp6, Rn_VPR128.8H, 4, 2, 16); - simd_address_at(tmp7, TMPQ2, 4, 2, 16); - simd_address_at(tmp8, Rd_VPR128.8H, 4, 2, 16); - * [register]:2 tmp8 = (* [register]:2 tmp6) - (* [register]:2 tmp7); - simd_address_at(tmp6, Rn_VPR128.8H, 5, 2, 16); - simd_address_at(tmp7, TMPQ2, 5, 2, 16); - simd_address_at(tmp8, Rd_VPR128.8H, 5, 2, 16); - * [register]:2 tmp8 = (* [register]:2 tmp6) - (* [register]:2 tmp7); - simd_address_at(tmp6, Rn_VPR128.8H, 6, 2, 16); - simd_address_at(tmp7, TMPQ2, 6, 2, 16); - simd_address_at(tmp8, Rd_VPR128.8H, 6, 2, 16); - * [register]:2 tmp8 = (* [register]:2 tmp6) - (* [register]:2 tmp7); - simd_address_at(tmp6, Rn_VPR128.8H, 7, 2, 16); - simd_address_at(tmp7, TMPQ2, 7, 2, 16); - simd_address_at(tmp8, Rd_VPR128.8H, 7, 2, 16); - * [register]:2 tmp8 = (* [register]:2 tmp6) - (* [register]:2 tmp7); + Rd_VPR128.8H[0,16] = Rn_VPR128.8H[0,16] - TMPQ2[0,16]; + Rd_VPR128.8H[16,16] = Rn_VPR128.8H[16,16] - TMPQ2[16,16]; + Rd_VPR128.8H[32,16] = Rn_VPR128.8H[32,16] - TMPQ2[32,16]; + Rd_VPR128.8H[48,16] = Rn_VPR128.8H[48,16] - TMPQ2[48,16]; + Rd_VPR128.8H[64,16] = Rn_VPR128.8H[64,16] - TMPQ2[64,16]; + Rd_VPR128.8H[80,16] = Rn_VPR128.8H[80,16] - TMPQ2[80,16]; + Rd_VPR128.8H[96,16] = Rn_VPR128.8H[96,16] - TMPQ2[96,16]; + Rd_VPR128.8H[112,16] = Rn_VPR128.8H[112,16] - TMPQ2[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Rm_VPR128.16B, 1:1); - local tmp2:16 = SIMD_INT_ZEXT(tmp1, 1:1); - local tmpd:16 = SIMD_INT_SUB(Rn_VPR128.8H, tmp2, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.8H -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.8H = NEON_usubw2(Rn_VPR128.8H, Rm_VPR128.16B, 1:1); -@endif } # C7.2.397 USUBW, USUBW2 page C7-2293 line 129000 MATCH x2e203000/mask=xbf20fc00 @@ -53212,36 +28637,13 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16 :usubw Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR64.2S is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1215=0x3 & b_1011=0 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_primitive) # simd resize TMPQ1 = zext(Rm_VPR64.2S) (lane size 4 to 8) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rm_VPR64.2S, 0, 4, 8); - simd_address_at(tmp3, TMPQ1, 0, 8, 16); - * [register]:8 tmp3 = zext(* [register]:4 tmp2); - simd_address_at(tmp2, Rm_VPR64.2S, 1, 4, 8); - simd_address_at(tmp3, TMPQ1, 1, 8, 16); - * [register]:8 tmp3 = zext(* [register]:4 tmp2); + TMPQ1[0,64] = zext(Rm_VPR64.2S[0,32]); + TMPQ1[64,64] = zext(Rm_VPR64.2S[32,32]); # simd infix Rd_VPR128.2D = Rn_VPR128.2D - TMPQ1 on lane size 8 - local tmp4:4 = 0; - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp4, Rn_VPR128.2D, 0, 8, 16); - simd_address_at(tmp5, TMPQ1, 0, 8, 16); - simd_address_at(tmp6, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp6 = (* [register]:8 tmp4) - (* [register]:8 tmp5); - simd_address_at(tmp4, Rn_VPR128.2D, 1, 8, 16); - simd_address_at(tmp5, TMPQ1, 1, 8, 16); - simd_address_at(tmp6, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp6 = (* [register]:8 tmp4) - (* [register]:8 tmp5); + Rd_VPR128.2D[0,64] = Rn_VPR128.2D[0,64] - TMPQ1[0,64]; + Rd_VPR128.2D[64,64] = Rn_VPR128.2D[64,64] - TMPQ1[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_ZEXT(Rm_VPR64.2S, 4:1); - local tmpd:16 = SIMD_INT_SUB(Rn_VPR128.2D, tmp1, 8:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.2D = NEON_usubw(Rn_VPR128.2D, Rm_VPR64.2S, 4:1); -@endif } # C7.2.397 USUBW, USUBW2 page C7-2293 line 129000 MATCH x2e203000/mask=xbf20fc00 @@ -53253,50 +28655,17 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S :usubw Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR64.4H is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1215=0x3 & b_1011=0 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) # simd resize TMPQ1 = zext(Rm_VPR64.4H) (lane size 2 to 4) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rm_VPR64.4H, 0, 2, 8); - simd_address_at(tmp3, TMPQ1, 0, 4, 16); - * [register]:4 tmp3 = zext(* [register]:2 tmp2); - simd_address_at(tmp2, Rm_VPR64.4H, 1, 2, 8); - simd_address_at(tmp3, TMPQ1, 1, 4, 16); - * [register]:4 tmp3 = zext(* [register]:2 tmp2); - simd_address_at(tmp2, Rm_VPR64.4H, 2, 2, 8); - simd_address_at(tmp3, TMPQ1, 2, 4, 16); - * [register]:4 tmp3 = zext(* [register]:2 tmp2); - simd_address_at(tmp2, Rm_VPR64.4H, 3, 2, 8); - simd_address_at(tmp3, TMPQ1, 3, 4, 16); - * [register]:4 tmp3 = zext(* [register]:2 tmp2); + TMPQ1[0,32] = zext(Rm_VPR64.4H[0,16]); + TMPQ1[32,32] = zext(Rm_VPR64.4H[16,16]); + TMPQ1[64,32] = zext(Rm_VPR64.4H[32,16]); + TMPQ1[96,32] = zext(Rm_VPR64.4H[48,16]); # simd infix Rd_VPR128.4S = Rn_VPR128.4S - TMPQ1 on lane size 4 - local tmp4:4 = 0; - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp4, Rn_VPR128.4S, 0, 4, 16); - simd_address_at(tmp5, TMPQ1, 0, 4, 16); - simd_address_at(tmp6, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp6 = (* [register]:4 tmp4) - (* [register]:4 tmp5); - simd_address_at(tmp4, Rn_VPR128.4S, 1, 4, 16); - simd_address_at(tmp5, TMPQ1, 1, 4, 16); - simd_address_at(tmp6, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp6 = (* [register]:4 tmp4) - (* [register]:4 tmp5); - simd_address_at(tmp4, Rn_VPR128.4S, 2, 4, 16); - simd_address_at(tmp5, TMPQ1, 2, 4, 16); - simd_address_at(tmp6, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp6 = (* [register]:4 tmp4) - (* [register]:4 tmp5); - simd_address_at(tmp4, Rn_VPR128.4S, 3, 4, 16); - simd_address_at(tmp5, TMPQ1, 3, 4, 16); - simd_address_at(tmp6, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp6 = (* [register]:4 tmp4) - (* [register]:4 tmp5); + Rd_VPR128.4S[0,32] = Rn_VPR128.4S[0,32] - TMPQ1[0,32]; + Rd_VPR128.4S[32,32] = Rn_VPR128.4S[32,32] - TMPQ1[32,32]; + Rd_VPR128.4S[64,32] = Rn_VPR128.4S[64,32] - TMPQ1[64,32]; + Rd_VPR128.4S[96,32] = Rn_VPR128.4S[96,32] - TMPQ1[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_ZEXT(Rm_VPR64.4H, 2:1); - local tmpd:16 = SIMD_INT_SUB(Rn_VPR128.4S, tmp1, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_usubw(Rn_VPR128.4S, Rm_VPR64.4H, 2:1); -@endif } # C7.2.397 USUBW, USUBW2 page C7-2293 line 129000 MATCH x2e203000/mask=xbf20fc00 @@ -53308,78 +28677,25 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H :usubw Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR64.8B is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1215=0x3 & b_1011=0 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_primitive) # simd resize TMPQ1 = zext(Rm_VPR64.8B) (lane size 1 to 2) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rm_VPR64.8B, 0, 1, 8); - simd_address_at(tmp3, TMPQ1, 0, 2, 16); - * [register]:2 tmp3 = zext(* [register]:1 tmp2); - simd_address_at(tmp2, Rm_VPR64.8B, 1, 1, 8); - simd_address_at(tmp3, TMPQ1, 1, 2, 16); - * [register]:2 tmp3 = zext(* [register]:1 tmp2); - simd_address_at(tmp2, Rm_VPR64.8B, 2, 1, 8); - simd_address_at(tmp3, TMPQ1, 2, 2, 16); - * [register]:2 tmp3 = zext(* [register]:1 tmp2); - simd_address_at(tmp2, Rm_VPR64.8B, 3, 1, 8); - simd_address_at(tmp3, TMPQ1, 3, 2, 16); - * [register]:2 tmp3 = zext(* [register]:1 tmp2); - simd_address_at(tmp2, Rm_VPR64.8B, 4, 1, 8); - simd_address_at(tmp3, TMPQ1, 4, 2, 16); - * [register]:2 tmp3 = zext(* [register]:1 tmp2); - simd_address_at(tmp2, Rm_VPR64.8B, 5, 1, 8); - simd_address_at(tmp3, TMPQ1, 5, 2, 16); - * [register]:2 tmp3 = zext(* [register]:1 tmp2); - simd_address_at(tmp2, Rm_VPR64.8B, 6, 1, 8); - simd_address_at(tmp3, TMPQ1, 6, 2, 16); - * [register]:2 tmp3 = zext(* [register]:1 tmp2); - simd_address_at(tmp2, Rm_VPR64.8B, 7, 1, 8); - simd_address_at(tmp3, TMPQ1, 7, 2, 16); - * [register]:2 tmp3 = zext(* [register]:1 tmp2); + TMPQ1[0,16] = zext(Rm_VPR64.8B[0,8]); + TMPQ1[16,16] = zext(Rm_VPR64.8B[8,8]); + TMPQ1[32,16] = zext(Rm_VPR64.8B[16,8]); + TMPQ1[48,16] = zext(Rm_VPR64.8B[24,8]); + TMPQ1[64,16] = zext(Rm_VPR64.8B[32,8]); + TMPQ1[80,16] = zext(Rm_VPR64.8B[40,8]); + TMPQ1[96,16] = zext(Rm_VPR64.8B[48,8]); + TMPQ1[112,16] = zext(Rm_VPR64.8B[56,8]); # simd infix Rd_VPR128.8H = Rn_VPR128.8H - TMPQ1 on lane size 2 - local tmp4:4 = 0; - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp4, Rn_VPR128.8H, 0, 2, 16); - simd_address_at(tmp5, TMPQ1, 0, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 0, 2, 16); - * [register]:2 tmp6 = (* [register]:2 tmp4) - (* [register]:2 tmp5); - simd_address_at(tmp4, Rn_VPR128.8H, 1, 2, 16); - simd_address_at(tmp5, TMPQ1, 1, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 1, 2, 16); - * [register]:2 tmp6 = (* [register]:2 tmp4) - (* [register]:2 tmp5); - simd_address_at(tmp4, Rn_VPR128.8H, 2, 2, 16); - simd_address_at(tmp5, TMPQ1, 2, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 2, 2, 16); - * [register]:2 tmp6 = (* [register]:2 tmp4) - (* [register]:2 tmp5); - simd_address_at(tmp4, Rn_VPR128.8H, 3, 2, 16); - simd_address_at(tmp5, TMPQ1, 3, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 3, 2, 16); - * [register]:2 tmp6 = (* [register]:2 tmp4) - (* [register]:2 tmp5); - simd_address_at(tmp4, Rn_VPR128.8H, 4, 2, 16); - simd_address_at(tmp5, TMPQ1, 4, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 4, 2, 16); - * [register]:2 tmp6 = (* [register]:2 tmp4) - (* [register]:2 tmp5); - simd_address_at(tmp4, Rn_VPR128.8H, 5, 2, 16); - simd_address_at(tmp5, TMPQ1, 5, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 5, 2, 16); - * [register]:2 tmp6 = (* [register]:2 tmp4) - (* [register]:2 tmp5); - simd_address_at(tmp4, Rn_VPR128.8H, 6, 2, 16); - simd_address_at(tmp5, TMPQ1, 6, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 6, 2, 16); - * [register]:2 tmp6 = (* [register]:2 tmp4) - (* [register]:2 tmp5); - simd_address_at(tmp4, Rn_VPR128.8H, 7, 2, 16); - simd_address_at(tmp5, TMPQ1, 7, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 7, 2, 16); - * [register]:2 tmp6 = (* [register]:2 tmp4) - (* [register]:2 tmp5); + Rd_VPR128.8H[0,16] = Rn_VPR128.8H[0,16] - TMPQ1[0,16]; + Rd_VPR128.8H[16,16] = Rn_VPR128.8H[16,16] - TMPQ1[16,16]; + Rd_VPR128.8H[32,16] = Rn_VPR128.8H[32,16] - TMPQ1[32,16]; + Rd_VPR128.8H[48,16] = Rn_VPR128.8H[48,16] - TMPQ1[48,16]; + Rd_VPR128.8H[64,16] = Rn_VPR128.8H[64,16] - TMPQ1[64,16]; + Rd_VPR128.8H[80,16] = Rn_VPR128.8H[80,16] - TMPQ1[80,16]; + Rd_VPR128.8H[96,16] = Rn_VPR128.8H[96,16] - TMPQ1[96,16]; + Rd_VPR128.8H[112,16] = Rn_VPR128.8H[112,16] - TMPQ1[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_ZEXT(Rm_VPR64.8B, 1:1); - local tmpd:16 = SIMD_INT_SUB(Rn_VPR128.8H, tmp1, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.8H -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.8H = NEON_usubw(Rn_VPR128.8H, Rm_VPR64.8B, 1:1); -@endif } # C7.2.398 UXTL, UXTL2 page C7-2295 line 129122 MATCH x2f00a400/mask=xbf87fc00 @@ -53392,45 +28708,17 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B :uxtl2 Rd_VPR128.8H, Rn_VPR128.16B is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_uimm3=0 & b_1115=0x14 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rn_VPR128.16B, 1, 8, 16); - TMPD1 = * [register]:8 tmp1; + TMPD1 = Rn_VPR128.16B[64,64]; # simd resize Rd_VPR128.8H = zext(TMPD1) (lane size 1 to 2) - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp3, TMPD1, 0, 1, 8); - simd_address_at(tmp4, Rd_VPR128.8H, 0, 2, 16); - * [register]:2 tmp4 = zext(* [register]:1 tmp3); - simd_address_at(tmp3, TMPD1, 1, 1, 8); - simd_address_at(tmp4, Rd_VPR128.8H, 1, 2, 16); - * [register]:2 tmp4 = zext(* [register]:1 tmp3); - simd_address_at(tmp3, TMPD1, 2, 1, 8); - simd_address_at(tmp4, Rd_VPR128.8H, 2, 2, 16); - * [register]:2 tmp4 = zext(* [register]:1 tmp3); - simd_address_at(tmp3, TMPD1, 3, 1, 8); - simd_address_at(tmp4, Rd_VPR128.8H, 3, 2, 16); - * [register]:2 tmp4 = zext(* [register]:1 tmp3); - simd_address_at(tmp3, TMPD1, 4, 1, 8); - simd_address_at(tmp4, Rd_VPR128.8H, 4, 2, 16); - * [register]:2 tmp4 = zext(* [register]:1 tmp3); - simd_address_at(tmp3, TMPD1, 5, 1, 8); - simd_address_at(tmp4, Rd_VPR128.8H, 5, 2, 16); - * [register]:2 tmp4 = zext(* [register]:1 tmp3); - simd_address_at(tmp3, TMPD1, 6, 1, 8); - simd_address_at(tmp4, Rd_VPR128.8H, 6, 2, 16); - * [register]:2 tmp4 = zext(* [register]:1 tmp3); - simd_address_at(tmp3, TMPD1, 7, 1, 8); - simd_address_at(tmp4, Rd_VPR128.8H, 7, 2, 16); - * [register]:2 tmp4 = zext(* [register]:1 tmp3); + Rd_VPR128.8H[0,16] = zext(TMPD1[0,8]); + Rd_VPR128.8H[16,16] = zext(TMPD1[8,8]); + Rd_VPR128.8H[32,16] = zext(TMPD1[16,8]); + Rd_VPR128.8H[48,16] = zext(TMPD1[24,8]); + Rd_VPR128.8H[64,16] = zext(TMPD1[32,8]); + Rd_VPR128.8H[80,16] = zext(TMPD1[40,8]); + Rd_VPR128.8H[96,16] = zext(TMPD1[48,8]); + Rd_VPR128.8H[112,16] = zext(TMPD1[56,8]); zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Rn_VPR128.16B, 1:1); - local tmpd:16 = SIMD_INT_ZEXT(tmp1, 1:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.8H -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.8H = NEON_uxtl2(Rn_VPR128.16B, 1:1); -@endif } # C7.2.398 UXTL, UXTL2 page C7-2295 line 129122 MATCH x2f00a400/mask=xbf87fc00 @@ -53443,25 +28731,11 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_uimm3=0 & b_1115=0x14 & :uxtl Rd_VPR128.2D, Rn_VPR64.2S is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2123=1 & Imm_uimm5=0 & b_1115=0x14 & b_1010=1 & Rn_VPR64.2S & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_primitive) TMPD1 = Rn_VPR64.2S; # simd resize Rd_VPR128.2D = zext(TMPD1) (lane size 4 to 8) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, TMPD1, 0, 4, 8); - simd_address_at(tmp3, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp3 = zext(* [register]:4 tmp2); - simd_address_at(tmp2, TMPD1, 1, 4, 8); - simd_address_at(tmp3, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp3 = zext(* [register]:4 tmp2); + Rd_VPR128.2D[0,64] = zext(TMPD1[0,32]); + Rd_VPR128.2D[64,64] = zext(TMPD1[32,32]); zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = Rn_VPR64.2S; - local tmpd:16 = SIMD_INT_ZEXT(tmp1, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.2D = NEON_uxtl(Rn_VPR64.2S, 4:1); -@endif } # C7.2.398 UXTL, UXTL2 page C7-2295 line 129122 MATCH x2f00a400/mask=xbf87fc00 @@ -53474,31 +28748,13 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2123=1 & Imm_uimm5=0 & b_1115=0x14 & b_ :uxtl Rd_VPR128.4S, Rn_VPR64.4H is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_uimm4=0 & b_1115=0x14 & b_1010=1 & Rn_VPR64.4H & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) TMPD1 = Rn_VPR64.4H; # simd resize Rd_VPR128.4S = zext(TMPD1) (lane size 2 to 4) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, TMPD1, 0, 2, 8); - simd_address_at(tmp3, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp3 = zext(* [register]:2 tmp2); - simd_address_at(tmp2, TMPD1, 1, 2, 8); - simd_address_at(tmp3, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp3 = zext(* [register]:2 tmp2); - simd_address_at(tmp2, TMPD1, 2, 2, 8); - simd_address_at(tmp3, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp3 = zext(* [register]:2 tmp2); - simd_address_at(tmp2, TMPD1, 3, 2, 8); - simd_address_at(tmp3, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp3 = zext(* [register]:2 tmp2); + Rd_VPR128.4S[0,32] = zext(TMPD1[0,16]); + Rd_VPR128.4S[32,32] = zext(TMPD1[16,16]); + Rd_VPR128.4S[64,32] = zext(TMPD1[32,16]); + Rd_VPR128.4S[96,32] = zext(TMPD1[48,16]); zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = Rn_VPR64.4H; - local tmpd:16 = SIMD_INT_ZEXT(tmp1, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_uxtl(Rn_VPR64.4H, 2:1); -@endif } # C7.2.398 UXTL, UXTL2 page C7-2295 line 129122 MATCH x2f00a400/mask=xbf87fc00 @@ -53511,27 +28767,11 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_uimm4=0 & b_1115=0x14 & :uxtl2 Rd_VPR128.2D, Rn_VPR128.4S is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2123=1 & Imm_uimm5=0 & b_1115=0x14 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rn_VPR128.4S, 1, 8, 16); - TMPD1 = * [register]:8 tmp1; + TMPD1 = Rn_VPR128.4S[64,64]; # simd resize Rd_VPR128.2D = zext(TMPD1) (lane size 4 to 8) - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp3, TMPD1, 0, 4, 8); - simd_address_at(tmp4, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp4 = zext(* [register]:4 tmp3); - simd_address_at(tmp3, TMPD1, 1, 4, 8); - simd_address_at(tmp4, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp4 = zext(* [register]:4 tmp3); + Rd_VPR128.2D[0,64] = zext(TMPD1[0,32]); + Rd_VPR128.2D[64,64] = zext(TMPD1[32,32]); zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Rn_VPR128.4S, 1:1); - local tmpd:16 = SIMD_INT_ZEXT(tmp1, 4:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.2D = NEON_uxtl2(Rn_VPR128.4S, 4:1); -@endif } # C7.2.398 UXTL, UXTL2 page C7-2295 line 129122 MATCH x2f00a400/mask=xbf87fc00 @@ -53544,43 +28784,17 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2123=1 & Imm_uimm5=0 & b_1115=0x14 & b_ :uxtl Rd_VPR128.8H, Rn_VPR64.8B is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_uimm3=0 & b_1115=0x14 & b_1010=1 & Rn_VPR64.8B & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_primitive) TMPD1 = Rn_VPR64.8B; # simd resize Rd_VPR128.8H = zext(TMPD1) (lane size 1 to 2) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, TMPD1, 0, 1, 8); - simd_address_at(tmp3, Rd_VPR128.8H, 0, 2, 16); - * [register]:2 tmp3 = zext(* [register]:1 tmp2); - simd_address_at(tmp2, TMPD1, 1, 1, 8); - simd_address_at(tmp3, Rd_VPR128.8H, 1, 2, 16); - * [register]:2 tmp3 = zext(* [register]:1 tmp2); - simd_address_at(tmp2, TMPD1, 2, 1, 8); - simd_address_at(tmp3, Rd_VPR128.8H, 2, 2, 16); - * [register]:2 tmp3 = zext(* [register]:1 tmp2); - simd_address_at(tmp2, TMPD1, 3, 1, 8); - simd_address_at(tmp3, Rd_VPR128.8H, 3, 2, 16); - * [register]:2 tmp3 = zext(* [register]:1 tmp2); - simd_address_at(tmp2, TMPD1, 4, 1, 8); - simd_address_at(tmp3, Rd_VPR128.8H, 4, 2, 16); - * [register]:2 tmp3 = zext(* [register]:1 tmp2); - simd_address_at(tmp2, TMPD1, 5, 1, 8); - simd_address_at(tmp3, Rd_VPR128.8H, 5, 2, 16); - * [register]:2 tmp3 = zext(* [register]:1 tmp2); - simd_address_at(tmp2, TMPD1, 6, 1, 8); - simd_address_at(tmp3, Rd_VPR128.8H, 6, 2, 16); - * [register]:2 tmp3 = zext(* [register]:1 tmp2); - simd_address_at(tmp2, TMPD1, 7, 1, 8); - simd_address_at(tmp3, Rd_VPR128.8H, 7, 2, 16); - * [register]:2 tmp3 = zext(* [register]:1 tmp2); + Rd_VPR128.8H[0,16] = zext(TMPD1[0,8]); + Rd_VPR128.8H[16,16] = zext(TMPD1[8,8]); + Rd_VPR128.8H[32,16] = zext(TMPD1[16,8]); + Rd_VPR128.8H[48,16] = zext(TMPD1[24,8]); + Rd_VPR128.8H[64,16] = zext(TMPD1[32,8]); + Rd_VPR128.8H[80,16] = zext(TMPD1[40,8]); + Rd_VPR128.8H[96,16] = zext(TMPD1[48,8]); + Rd_VPR128.8H[112,16] = zext(TMPD1[56,8]); zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = Rn_VPR64.8B; - local tmpd:16 = SIMD_INT_ZEXT(tmp1, 1:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.8H -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.8H = NEON_uxtl(Rn_VPR64.8B, 1:1); -@endif } # C7.2.398 UXTL, UXTL2 page C7-2295 line 129122 MATCH x2f00a400/mask=xbf87fc00 @@ -53593,33 +28807,13 @@ is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_uimm3=0 & b_1115=0x14 & :uxtl2 Rd_VPR128.4S, Rn_VPR128.8H is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_uimm4=0 & b_1115=0x14 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) - local tmp1:4 = 0; - simd_address_at(tmp1, Rn_VPR128.8H, 1, 8, 16); - TMPD1 = * [register]:8 tmp1; + TMPD1 = Rn_VPR128.8H[64,64]; # simd resize Rd_VPR128.4S = zext(TMPD1) (lane size 2 to 4) - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp3, TMPD1, 0, 2, 8); - simd_address_at(tmp4, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp4 = zext(* [register]:2 tmp3); - simd_address_at(tmp3, TMPD1, 1, 2, 8); - simd_address_at(tmp4, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp4 = zext(* [register]:2 tmp3); - simd_address_at(tmp3, TMPD1, 2, 2, 8); - simd_address_at(tmp4, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp4 = zext(* [register]:2 tmp3); - simd_address_at(tmp3, TMPD1, 3, 2, 8); - simd_address_at(tmp4, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp4 = zext(* [register]:2 tmp3); + Rd_VPR128.4S[0,32] = zext(TMPD1[0,16]); + Rd_VPR128.4S[32,32] = zext(TMPD1[16,16]); + Rd_VPR128.4S[64,32] = zext(TMPD1[32,16]); + Rd_VPR128.4S[96,32] = zext(TMPD1[48,16]); zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_PIECE(Rn_VPR128.8H, 1:1); - local tmpd:16 = SIMD_INT_ZEXT(tmp1, 2:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_uxtl2(Rn_VPR128.8H, 2:1); -@endif } # C7.2.399 UZP1 page C7-2297 line 129221 MATCH x0e001800/mask=xbf20fc00 @@ -53631,90 +28825,27 @@ is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_uimm4=0 & b_1115=0x14 & :uzp1 Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & b_2429=0xe & advSIMD3.size=0 & b_2121=0 & Rm_VPR128.16B & b_1515=0 & b_1214=1 & b_1011=2 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { -@if defined(SEMANTIC_primitive) TMPQ2 = Rm_VPR128.16B; TMPQ1 = Rn_VPR128.16B; # simd shuffle Rd_VPR128.16B = TMPQ1 (@0-0@2-1@4-2@6-3@8-4@10-5@12-6@14-7) lane size 1 - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp3, TMPQ1, 0, 1, 16); - simd_address_at(tmp4, Rd_VPR128.16B, 0, 1, 16); - * [register]:1 tmp4 = * [register]:1 tmp3; - simd_address_at(tmp3, TMPQ1, 2, 1, 16); - simd_address_at(tmp4, Rd_VPR128.16B, 1, 1, 16); - * [register]:1 tmp4 = * [register]:1 tmp3; - simd_address_at(tmp3, TMPQ1, 4, 1, 16); - simd_address_at(tmp4, Rd_VPR128.16B, 2, 1, 16); - * [register]:1 tmp4 = * [register]:1 tmp3; - simd_address_at(tmp3, TMPQ1, 6, 1, 16); - simd_address_at(tmp4, Rd_VPR128.16B, 3, 1, 16); - * [register]:1 tmp4 = * [register]:1 tmp3; - simd_address_at(tmp3, TMPQ1, 8, 1, 16); - simd_address_at(tmp4, Rd_VPR128.16B, 4, 1, 16); - * [register]:1 tmp4 = * [register]:1 tmp3; - simd_address_at(tmp3, TMPQ1, 10, 1, 16); - simd_address_at(tmp4, Rd_VPR128.16B, 5, 1, 16); - * [register]:1 tmp4 = * [register]:1 tmp3; - simd_address_at(tmp3, TMPQ1, 12, 1, 16); - simd_address_at(tmp4, Rd_VPR128.16B, 6, 1, 16); - * [register]:1 tmp4 = * [register]:1 tmp3; - simd_address_at(tmp3, TMPQ1, 14, 1, 16); - simd_address_at(tmp4, Rd_VPR128.16B, 7, 1, 16); - * [register]:1 tmp4 = * [register]:1 tmp3; + Rd_VPR128.16B[0,8] = TMPQ1[0,8]; + Rd_VPR128.16B[8,8] = TMPQ1[16,8]; + Rd_VPR128.16B[16,8] = TMPQ1[32,8]; + Rd_VPR128.16B[24,8] = TMPQ1[48,8]; + Rd_VPR128.16B[32,8] = TMPQ1[64,8]; + Rd_VPR128.16B[40,8] = TMPQ1[80,8]; + Rd_VPR128.16B[48,8] = TMPQ1[96,8]; + Rd_VPR128.16B[56,8] = TMPQ1[112,8]; # simd shuffle Rd_VPR128.16B = TMPQ2 (@0-8@2-9@4-10@6-11@8-12@10-13@12-14@14-15) lane size 1 - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, TMPQ2, 0, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 8, 1, 16); - * [register]:1 tmp6 = * [register]:1 tmp5; - simd_address_at(tmp5, TMPQ2, 2, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 9, 1, 16); - * [register]:1 tmp6 = * [register]:1 tmp5; - simd_address_at(tmp5, TMPQ2, 4, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 10, 1, 16); - * [register]:1 tmp6 = * [register]:1 tmp5; - simd_address_at(tmp5, TMPQ2, 6, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 11, 1, 16); - * [register]:1 tmp6 = * [register]:1 tmp5; - simd_address_at(tmp5, TMPQ2, 8, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 12, 1, 16); - * [register]:1 tmp6 = * [register]:1 tmp5; - simd_address_at(tmp5, TMPQ2, 10, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 13, 1, 16); - * [register]:1 tmp6 = * [register]:1 tmp5; - simd_address_at(tmp5, TMPQ2, 12, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 14, 1, 16); - * [register]:1 tmp6 = * [register]:1 tmp5; - simd_address_at(tmp5, TMPQ2, 14, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 15, 1, 16); - * [register]:1 tmp6 = * [register]:1 tmp5; + Rd_VPR128.16B[64,8] = TMPQ2[0,8]; + Rd_VPR128.16B[72,8] = TMPQ2[16,8]; + Rd_VPR128.16B[80,8] = TMPQ2[32,8]; + Rd_VPR128.16B[88,8] = TMPQ2[48,8]; + Rd_VPR128.16B[96,8] = TMPQ2[64,8]; + Rd_VPR128.16B[104,8] = TMPQ2[80,8]; + Rd_VPR128.16B[112,8] = TMPQ2[96,8]; + Rd_VPR128.16B[120,8] = TMPQ2[112,8]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = Rm_VPR128.16B; - local tmp2:16 = Rn_VPR128.16B; - local tmp3:1 = 0; - local tmpd:16 = Rd_VPR128.16B; - tmp3 = SIMD_PIECE(tmp2, 0:1); tmpd = SIMD_COPY(tmpd, tmp3, 0:1); - tmp3 = SIMD_PIECE(tmp2, 2:1); tmpd = SIMD_COPY(tmpd, tmp3, 1:1); - tmp3 = SIMD_PIECE(tmp2, 4:1); tmpd = SIMD_COPY(tmpd, tmp3, 2:1); - tmp3 = SIMD_PIECE(tmp2, 6:1); tmpd = SIMD_COPY(tmpd, tmp3, 3:1); - tmp3 = SIMD_PIECE(tmp2, 8:1); tmpd = SIMD_COPY(tmpd, tmp3, 4:1); - tmp3 = SIMD_PIECE(tmp2, 10:1); tmpd = SIMD_COPY(tmpd, tmp3, 5:1); - tmp3 = SIMD_PIECE(tmp2, 12:1); tmpd = SIMD_COPY(tmpd, tmp3, 6:1); - tmp3 = SIMD_PIECE(tmp2, 14:1); tmpd = SIMD_COPY(tmpd, tmp3, 7:1); - local tmp4:1 = 0; - tmp4 = SIMD_PIECE(tmp1, 0:1); tmpd = SIMD_COPY(tmpd, tmp4, 8:1); - tmp4 = SIMD_PIECE(tmp1, 2:1); tmpd = SIMD_COPY(tmpd, tmp4, 9:1); - tmp4 = SIMD_PIECE(tmp1, 4:1); tmpd = SIMD_COPY(tmpd, tmp4, 10:1); - tmp4 = SIMD_PIECE(tmp1, 6:1); tmpd = SIMD_COPY(tmpd, tmp4, 11:1); - tmp4 = SIMD_PIECE(tmp1, 8:1); tmpd = SIMD_COPY(tmpd, tmp4, 12:1); - tmp4 = SIMD_PIECE(tmp1, 10:1); tmpd = SIMD_COPY(tmpd, tmp4, 13:1); - tmp4 = SIMD_PIECE(tmp1, 12:1); tmpd = SIMD_COPY(tmpd, tmp4, 14:1); - tmp4 = SIMD_PIECE(tmp1, 14:1); tmpd = SIMD_COPY(tmpd, tmp4, 15:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.16B -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.16B = NEON_uzp1(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); -@endif } # C7.2.399 UZP1 page C7-2297 line 129221 MATCH x0e001800/mask=xbf20fc00 @@ -53726,34 +28857,13 @@ is b_3131=0 & q=1 & b_2429=0xe & advSIMD3.size=0 & b_2121=0 & Rm_VPR128.16B & b_ :uzp1 Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=1 & b_2429=0xe & advSIMD3.size=3 & b_2121=0 & Rm_VPR128.2D & b_1515=0 & b_1214=1 & b_1011=2 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_primitive) TMPQ2 = Rm_VPR128.2D; TMPQ1 = Rn_VPR128.2D; # simd shuffle Rd_VPR128.2D = TMPQ1 (@0-0) lane size 8 - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp3, TMPQ1, 0, 8, 16); - simd_address_at(tmp4, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp4 = * [register]:8 tmp3; + Rd_VPR128.2D[0,64] = TMPQ1[0,64]; # simd shuffle Rd_VPR128.2D = TMPQ2 (@0-1) lane size 8 - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, TMPQ2, 0, 8, 16); - simd_address_at(tmp6, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp6 = * [register]:8 tmp5; + Rd_VPR128.2D[64,64] = TMPQ2[0,64]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = Rm_VPR128.2D; - local tmp2:16 = Rn_VPR128.2D; - local tmp3:8 = 0; - local tmpd:16 = Rd_VPR128.2D; - tmp3 = SIMD_PIECE(tmp2, 0:1); tmpd = SIMD_COPY(tmpd, tmp3, 0:1); - local tmp4:8 = 0; - tmp4 = SIMD_PIECE(tmp1, 0:1); tmpd = SIMD_COPY(tmpd, tmp4, 1:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.2D = NEON_uzp1(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); -@endif } # C7.2.399 UZP1 page C7-2297 line 129221 MATCH x0e001800/mask=xbf20fc00 @@ -53765,34 +28875,13 @@ is b_3131=0 & q=1 & b_2429=0xe & advSIMD3.size=3 & b_2121=0 & Rm_VPR128.2D & b_1 :uzp1 Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & b_2429=0xe & advSIMD3.size=2 & b_2121=0 & Rm_VPR64.2S & b_1515=0 & b_1214=1 & b_1011=2 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_primitive) TMPD2 = Rm_VPR64.2S; TMPD1 = Rn_VPR64.2S; # simd shuffle Rd_VPR64.2S = TMPD1 (@0-0) lane size 4 - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp3, TMPD1, 0, 4, 8); - simd_address_at(tmp4, Rd_VPR64.2S, 0, 4, 8); - * [register]:4 tmp4 = * [register]:4 tmp3; + Rd_VPR64.2S[0,32] = TMPD1[0,32]; # simd shuffle Rd_VPR64.2S = TMPD2 (@0-1) lane size 4 - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, TMPD2, 0, 4, 8); - simd_address_at(tmp6, Rd_VPR64.2S, 1, 4, 8); - * [register]:4 tmp6 = * [register]:4 tmp5; + Rd_VPR64.2S[32,32] = TMPD2[0,32]; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = Rm_VPR64.2S; - local tmp2:8 = Rn_VPR64.2S; - local tmp3:4 = 0; - local tmpd:8 = Rd_VPR64.2S; - tmp3 = SIMD_PIECE(tmp2, 0:1); tmpd = SIMD_COPY(tmpd, tmp3, 0:1); - local tmp4:4 = 0; - tmp4 = SIMD_PIECE(tmp1, 0:1); tmpd = SIMD_COPY(tmpd, tmp4, 1:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.2S -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.2S = NEON_uzp1(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); -@endif } # C7.2.399 UZP1 page C7-2297 line 129221 MATCH x0e001800/mask=xbf20fc00 @@ -53804,42 +28893,15 @@ is b_3131=0 & q=0 & b_2429=0xe & advSIMD3.size=2 & b_2121=0 & Rm_VPR64.2S & b_15 :uzp1 Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & b_2429=0xe & advSIMD3.size=1 & b_2121=0 & Rm_VPR64.4H & b_1515=0 & b_1214=1 & b_1011=2 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { -@if defined(SEMANTIC_primitive) TMPD2 = Rm_VPR64.4H; TMPD1 = Rn_VPR64.4H; # simd shuffle Rd_VPR64.4H = TMPD1 (@0-0@2-1) lane size 2 - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp3, TMPD1, 0, 2, 8); - simd_address_at(tmp4, Rd_VPR64.4H, 0, 2, 8); - * [register]:2 tmp4 = * [register]:2 tmp3; - simd_address_at(tmp3, TMPD1, 2, 2, 8); - simd_address_at(tmp4, Rd_VPR64.4H, 1, 2, 8); - * [register]:2 tmp4 = * [register]:2 tmp3; + Rd_VPR64.4H[0,16] = TMPD1[0,16]; + Rd_VPR64.4H[16,16] = TMPD1[32,16]; # simd shuffle Rd_VPR64.4H = TMPD2 (@0-2@2-3) lane size 2 - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, TMPD2, 0, 2, 8); - simd_address_at(tmp6, Rd_VPR64.4H, 2, 2, 8); - * [register]:2 tmp6 = * [register]:2 tmp5; - simd_address_at(tmp5, TMPD2, 2, 2, 8); - simd_address_at(tmp6, Rd_VPR64.4H, 3, 2, 8); - * [register]:2 tmp6 = * [register]:2 tmp5; + Rd_VPR64.4H[32,16] = TMPD2[0,16]; + Rd_VPR64.4H[48,16] = TMPD2[32,16]; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = Rm_VPR64.4H; - local tmp2:8 = Rn_VPR64.4H; - local tmp3:2 = 0; - local tmpd:8 = Rd_VPR64.4H; - tmp3 = SIMD_PIECE(tmp2, 0:1); tmpd = SIMD_COPY(tmpd, tmp3, 0:1); - tmp3 = SIMD_PIECE(tmp2, 2:1); tmpd = SIMD_COPY(tmpd, tmp3, 1:1); - local tmp4:2 = 0; - tmp4 = SIMD_PIECE(tmp1, 0:1); tmpd = SIMD_COPY(tmpd, tmp4, 2:1); - tmp4 = SIMD_PIECE(tmp1, 2:1); tmpd = SIMD_COPY(tmpd, tmp4, 3:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.4H -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.4H = NEON_uzp1(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); -@endif } # C7.2.399 UZP1 page C7-2297 line 129221 MATCH x0e001800/mask=xbf20fc00 @@ -53851,42 +28913,15 @@ is b_3131=0 & q=0 & b_2429=0xe & advSIMD3.size=1 & b_2121=0 & Rm_VPR64.4H & b_15 :uzp1 Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & b_2429=0xe & advSIMD3.size=2 & b_2121=0 & Rm_VPR128.4S & b_1515=0 & b_1214=1 & b_1011=2 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) TMPQ2 = Rm_VPR128.4S; TMPQ1 = Rn_VPR128.4S; # simd shuffle Rd_VPR128.4S = TMPQ1 (@0-0@2-1) lane size 4 - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp3, TMPQ1, 0, 4, 16); - simd_address_at(tmp4, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp4 = * [register]:4 tmp3; - simd_address_at(tmp3, TMPQ1, 2, 4, 16); - simd_address_at(tmp4, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp4 = * [register]:4 tmp3; + Rd_VPR128.4S[0,32] = TMPQ1[0,32]; + Rd_VPR128.4S[32,32] = TMPQ1[64,32]; # simd shuffle Rd_VPR128.4S = TMPQ2 (@0-2@2-3) lane size 4 - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, TMPQ2, 0, 4, 16); - simd_address_at(tmp6, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp6 = * [register]:4 tmp5; - simd_address_at(tmp5, TMPQ2, 2, 4, 16); - simd_address_at(tmp6, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp6 = * [register]:4 tmp5; + Rd_VPR128.4S[64,32] = TMPQ2[0,32]; + Rd_VPR128.4S[96,32] = TMPQ2[64,32]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = Rm_VPR128.4S; - local tmp2:16 = Rn_VPR128.4S; - local tmp3:4 = 0; - local tmpd:16 = Rd_VPR128.4S; - tmp3 = SIMD_PIECE(tmp2, 0:1); tmpd = SIMD_COPY(tmpd, tmp3, 0:1); - tmp3 = SIMD_PIECE(tmp2, 2:1); tmpd = SIMD_COPY(tmpd, tmp3, 1:1); - local tmp4:4 = 0; - tmp4 = SIMD_PIECE(tmp1, 0:1); tmpd = SIMD_COPY(tmpd, tmp4, 2:1); - tmp4 = SIMD_PIECE(tmp1, 2:1); tmpd = SIMD_COPY(tmpd, tmp4, 3:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_uzp1(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); -@endif } # C7.2.399 UZP1 page C7-2297 line 129221 MATCH x0e001800/mask=xbf20fc00 @@ -53898,58 +28933,19 @@ is b_3131=0 & q=1 & b_2429=0xe & advSIMD3.size=2 & b_2121=0 & Rm_VPR128.4S & b_1 :uzp1 Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & b_2429=0xe & advSIMD3.size=0 & b_2121=0 & Rm_VPR64.8B & b_1515=0 & b_1214=1 & b_1011=2 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { -@if defined(SEMANTIC_primitive) TMPD2 = Rm_VPR64.8B; TMPD1 = Rn_VPR64.8B; # simd shuffle Rd_VPR64.8B = TMPD1 (@0-0@2-1@4-2@6-3) lane size 1 - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp3, TMPD1, 0, 1, 8); - simd_address_at(tmp4, Rd_VPR64.8B, 0, 1, 8); - * [register]:1 tmp4 = * [register]:1 tmp3; - simd_address_at(tmp3, TMPD1, 2, 1, 8); - simd_address_at(tmp4, Rd_VPR64.8B, 1, 1, 8); - * [register]:1 tmp4 = * [register]:1 tmp3; - simd_address_at(tmp3, TMPD1, 4, 1, 8); - simd_address_at(tmp4, Rd_VPR64.8B, 2, 1, 8); - * [register]:1 tmp4 = * [register]:1 tmp3; - simd_address_at(tmp3, TMPD1, 6, 1, 8); - simd_address_at(tmp4, Rd_VPR64.8B, 3, 1, 8); - * [register]:1 tmp4 = * [register]:1 tmp3; + Rd_VPR64.8B[0,8] = TMPD1[0,8]; + Rd_VPR64.8B[8,8] = TMPD1[16,8]; + Rd_VPR64.8B[16,8] = TMPD1[32,8]; + Rd_VPR64.8B[24,8] = TMPD1[48,8]; # simd shuffle Rd_VPR64.8B = TMPD2 (@0-4@2-5@4-6@6-7) lane size 1 - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, TMPD2, 0, 1, 8); - simd_address_at(tmp6, Rd_VPR64.8B, 4, 1, 8); - * [register]:1 tmp6 = * [register]:1 tmp5; - simd_address_at(tmp5, TMPD2, 2, 1, 8); - simd_address_at(tmp6, Rd_VPR64.8B, 5, 1, 8); - * [register]:1 tmp6 = * [register]:1 tmp5; - simd_address_at(tmp5, TMPD2, 4, 1, 8); - simd_address_at(tmp6, Rd_VPR64.8B, 6, 1, 8); - * [register]:1 tmp6 = * [register]:1 tmp5; - simd_address_at(tmp5, TMPD2, 6, 1, 8); - simd_address_at(tmp6, Rd_VPR64.8B, 7, 1, 8); - * [register]:1 tmp6 = * [register]:1 tmp5; + Rd_VPR64.8B[32,8] = TMPD2[0,8]; + Rd_VPR64.8B[40,8] = TMPD2[16,8]; + Rd_VPR64.8B[48,8] = TMPD2[32,8]; + Rd_VPR64.8B[56,8] = TMPD2[48,8]; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = Rm_VPR64.8B; - local tmp2:8 = Rn_VPR64.8B; - local tmp3:1 = 0; - local tmpd:8 = Rd_VPR64.8B; - tmp3 = SIMD_PIECE(tmp2, 0:1); tmpd = SIMD_COPY(tmpd, tmp3, 0:1); - tmp3 = SIMD_PIECE(tmp2, 2:1); tmpd = SIMD_COPY(tmpd, tmp3, 1:1); - tmp3 = SIMD_PIECE(tmp2, 4:1); tmpd = SIMD_COPY(tmpd, tmp3, 2:1); - tmp3 = SIMD_PIECE(tmp2, 6:1); tmpd = SIMD_COPY(tmpd, tmp3, 3:1); - local tmp4:1 = 0; - tmp4 = SIMD_PIECE(tmp1, 0:1); tmpd = SIMD_COPY(tmpd, tmp4, 4:1); - tmp4 = SIMD_PIECE(tmp1, 2:1); tmpd = SIMD_COPY(tmpd, tmp4, 5:1); - tmp4 = SIMD_PIECE(tmp1, 4:1); tmpd = SIMD_COPY(tmpd, tmp4, 6:1); - tmp4 = SIMD_PIECE(tmp1, 6:1); tmpd = SIMD_COPY(tmpd, tmp4, 7:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.8B -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.8B = NEON_uzp1(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); -@endif } # C7.2.399 UZP1 page C7-2297 line 129221 MATCH x0e001800/mask=xbf20fc00 @@ -53961,58 +28957,19 @@ is b_3131=0 & q=0 & b_2429=0xe & advSIMD3.size=0 & b_2121=0 & Rm_VPR64.8B & b_15 :uzp1 Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & b_2429=0xe & advSIMD3.size=1 & b_2121=0 & Rm_VPR128.8H & b_1515=0 & b_1214=1 & b_1011=2 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_primitive) TMPQ2 = Rm_VPR128.8H; TMPQ1 = Rn_VPR128.8H; # simd shuffle Rd_VPR128.8H = TMPQ1 (@0-0@2-1@4-2@6-3) lane size 2 - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp3, TMPQ1, 0, 2, 16); - simd_address_at(tmp4, Rd_VPR128.8H, 0, 2, 16); - * [register]:2 tmp4 = * [register]:2 tmp3; - simd_address_at(tmp3, TMPQ1, 2, 2, 16); - simd_address_at(tmp4, Rd_VPR128.8H, 1, 2, 16); - * [register]:2 tmp4 = * [register]:2 tmp3; - simd_address_at(tmp3, TMPQ1, 4, 2, 16); - simd_address_at(tmp4, Rd_VPR128.8H, 2, 2, 16); - * [register]:2 tmp4 = * [register]:2 tmp3; - simd_address_at(tmp3, TMPQ1, 6, 2, 16); - simd_address_at(tmp4, Rd_VPR128.8H, 3, 2, 16); - * [register]:2 tmp4 = * [register]:2 tmp3; + Rd_VPR128.8H[0,16] = TMPQ1[0,16]; + Rd_VPR128.8H[16,16] = TMPQ1[32,16]; + Rd_VPR128.8H[32,16] = TMPQ1[64,16]; + Rd_VPR128.8H[48,16] = TMPQ1[96,16]; # simd shuffle Rd_VPR128.8H = TMPQ2 (@0-4@2-5@4-6@6-7) lane size 2 - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, TMPQ2, 0, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 4, 2, 16); - * [register]:2 tmp6 = * [register]:2 tmp5; - simd_address_at(tmp5, TMPQ2, 2, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 5, 2, 16); - * [register]:2 tmp6 = * [register]:2 tmp5; - simd_address_at(tmp5, TMPQ2, 4, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 6, 2, 16); - * [register]:2 tmp6 = * [register]:2 tmp5; - simd_address_at(tmp5, TMPQ2, 6, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 7, 2, 16); - * [register]:2 tmp6 = * [register]:2 tmp5; + Rd_VPR128.8H[64,16] = TMPQ2[0,16]; + Rd_VPR128.8H[80,16] = TMPQ2[32,16]; + Rd_VPR128.8H[96,16] = TMPQ2[64,16]; + Rd_VPR128.8H[112,16] = TMPQ2[96,16]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = Rm_VPR128.8H; - local tmp2:16 = Rn_VPR128.8H; - local tmp3:2 = 0; - local tmpd:16 = Rd_VPR128.8H; - tmp3 = SIMD_PIECE(tmp2, 0:1); tmpd = SIMD_COPY(tmpd, tmp3, 0:1); - tmp3 = SIMD_PIECE(tmp2, 2:1); tmpd = SIMD_COPY(tmpd, tmp3, 1:1); - tmp3 = SIMD_PIECE(tmp2, 4:1); tmpd = SIMD_COPY(tmpd, tmp3, 2:1); - tmp3 = SIMD_PIECE(tmp2, 6:1); tmpd = SIMD_COPY(tmpd, tmp3, 3:1); - local tmp4:2 = 0; - tmp4 = SIMD_PIECE(tmp1, 0:1); tmpd = SIMD_COPY(tmpd, tmp4, 4:1); - tmp4 = SIMD_PIECE(tmp1, 2:1); tmpd = SIMD_COPY(tmpd, tmp4, 5:1); - tmp4 = SIMD_PIECE(tmp1, 4:1); tmpd = SIMD_COPY(tmpd, tmp4, 6:1); - tmp4 = SIMD_PIECE(tmp1, 6:1); tmpd = SIMD_COPY(tmpd, tmp4, 7:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.8H -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.8H = NEON_uzp1(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); -@endif } # C7.2.400 UZP2 page C7-2299 line 129332 MATCH x0e005800/mask=xbf20fc00 @@ -54024,90 +28981,27 @@ is b_3131=0 & q=1 & b_2429=0xe & advSIMD3.size=1 & b_2121=0 & Rm_VPR128.8H & b_1 :uzp2 Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & b_2429=0xe & advSIMD3.size=0 & b_2121=0 & Rm_VPR128.16B & b_1515=0 & b_1214=5 & b_1011=2 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { -@if defined(SEMANTIC_primitive) TMPQ2 = Rm_VPR128.16B; TMPQ1 = Rn_VPR128.16B; # simd shuffle Rd_VPR128.16B = TMPQ1 (@1-0@3-1@5-2@7-3@9-4@11-5@13-6@15-7) lane size 1 - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp3, TMPQ1, 1, 1, 16); - simd_address_at(tmp4, Rd_VPR128.16B, 0, 1, 16); - * [register]:1 tmp4 = * [register]:1 tmp3; - simd_address_at(tmp3, TMPQ1, 3, 1, 16); - simd_address_at(tmp4, Rd_VPR128.16B, 1, 1, 16); - * [register]:1 tmp4 = * [register]:1 tmp3; - simd_address_at(tmp3, TMPQ1, 5, 1, 16); - simd_address_at(tmp4, Rd_VPR128.16B, 2, 1, 16); - * [register]:1 tmp4 = * [register]:1 tmp3; - simd_address_at(tmp3, TMPQ1, 7, 1, 16); - simd_address_at(tmp4, Rd_VPR128.16B, 3, 1, 16); - * [register]:1 tmp4 = * [register]:1 tmp3; - simd_address_at(tmp3, TMPQ1, 9, 1, 16); - simd_address_at(tmp4, Rd_VPR128.16B, 4, 1, 16); - * [register]:1 tmp4 = * [register]:1 tmp3; - simd_address_at(tmp3, TMPQ1, 11, 1, 16); - simd_address_at(tmp4, Rd_VPR128.16B, 5, 1, 16); - * [register]:1 tmp4 = * [register]:1 tmp3; - simd_address_at(tmp3, TMPQ1, 13, 1, 16); - simd_address_at(tmp4, Rd_VPR128.16B, 6, 1, 16); - * [register]:1 tmp4 = * [register]:1 tmp3; - simd_address_at(tmp3, TMPQ1, 15, 1, 16); - simd_address_at(tmp4, Rd_VPR128.16B, 7, 1, 16); - * [register]:1 tmp4 = * [register]:1 tmp3; + Rd_VPR128.16B[0,8] = TMPQ1[8,8]; + Rd_VPR128.16B[8,8] = TMPQ1[24,8]; + Rd_VPR128.16B[16,8] = TMPQ1[40,8]; + Rd_VPR128.16B[24,8] = TMPQ1[56,8]; + Rd_VPR128.16B[32,8] = TMPQ1[72,8]; + Rd_VPR128.16B[40,8] = TMPQ1[88,8]; + Rd_VPR128.16B[48,8] = TMPQ1[104,8]; + Rd_VPR128.16B[56,8] = TMPQ1[120,8]; # simd shuffle Rd_VPR128.16B = TMPQ2 (@1-8@3-9@5-10@7-11@9-12@11-13@13-14@15-15) lane size 1 - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, TMPQ2, 1, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 8, 1, 16); - * [register]:1 tmp6 = * [register]:1 tmp5; - simd_address_at(tmp5, TMPQ2, 3, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 9, 1, 16); - * [register]:1 tmp6 = * [register]:1 tmp5; - simd_address_at(tmp5, TMPQ2, 5, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 10, 1, 16); - * [register]:1 tmp6 = * [register]:1 tmp5; - simd_address_at(tmp5, TMPQ2, 7, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 11, 1, 16); - * [register]:1 tmp6 = * [register]:1 tmp5; - simd_address_at(tmp5, TMPQ2, 9, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 12, 1, 16); - * [register]:1 tmp6 = * [register]:1 tmp5; - simd_address_at(tmp5, TMPQ2, 11, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 13, 1, 16); - * [register]:1 tmp6 = * [register]:1 tmp5; - simd_address_at(tmp5, TMPQ2, 13, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 14, 1, 16); - * [register]:1 tmp6 = * [register]:1 tmp5; - simd_address_at(tmp5, TMPQ2, 15, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 15, 1, 16); - * [register]:1 tmp6 = * [register]:1 tmp5; + Rd_VPR128.16B[64,8] = TMPQ2[8,8]; + Rd_VPR128.16B[72,8] = TMPQ2[24,8]; + Rd_VPR128.16B[80,8] = TMPQ2[40,8]; + Rd_VPR128.16B[88,8] = TMPQ2[56,8]; + Rd_VPR128.16B[96,8] = TMPQ2[72,8]; + Rd_VPR128.16B[104,8] = TMPQ2[88,8]; + Rd_VPR128.16B[112,8] = TMPQ2[104,8]; + Rd_VPR128.16B[120,8] = TMPQ2[120,8]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = Rm_VPR128.16B; - local tmp2:16 = Rn_VPR128.16B; - local tmp3:1 = 0; - local tmpd:16 = Rd_VPR128.16B; - tmp3 = SIMD_PIECE(tmp2, 1:1); tmpd = SIMD_COPY(tmpd, tmp3, 0:1); - tmp3 = SIMD_PIECE(tmp2, 3:1); tmpd = SIMD_COPY(tmpd, tmp3, 1:1); - tmp3 = SIMD_PIECE(tmp2, 5:1); tmpd = SIMD_COPY(tmpd, tmp3, 2:1); - tmp3 = SIMD_PIECE(tmp2, 7:1); tmpd = SIMD_COPY(tmpd, tmp3, 3:1); - tmp3 = SIMD_PIECE(tmp2, 9:1); tmpd = SIMD_COPY(tmpd, tmp3, 4:1); - tmp3 = SIMD_PIECE(tmp2, 11:1); tmpd = SIMD_COPY(tmpd, tmp3, 5:1); - tmp3 = SIMD_PIECE(tmp2, 13:1); tmpd = SIMD_COPY(tmpd, tmp3, 6:1); - tmp3 = SIMD_PIECE(tmp2, 15:1); tmpd = SIMD_COPY(tmpd, tmp3, 7:1); - local tmp4:1 = 0; - tmp4 = SIMD_PIECE(tmp1, 1:1); tmpd = SIMD_COPY(tmpd, tmp4, 8:1); - tmp4 = SIMD_PIECE(tmp1, 3:1); tmpd = SIMD_COPY(tmpd, tmp4, 9:1); - tmp4 = SIMD_PIECE(tmp1, 5:1); tmpd = SIMD_COPY(tmpd, tmp4, 10:1); - tmp4 = SIMD_PIECE(tmp1, 7:1); tmpd = SIMD_COPY(tmpd, tmp4, 11:1); - tmp4 = SIMD_PIECE(tmp1, 9:1); tmpd = SIMD_COPY(tmpd, tmp4, 12:1); - tmp4 = SIMD_PIECE(tmp1, 11:1); tmpd = SIMD_COPY(tmpd, tmp4, 13:1); - tmp4 = SIMD_PIECE(tmp1, 13:1); tmpd = SIMD_COPY(tmpd, tmp4, 14:1); - tmp4 = SIMD_PIECE(tmp1, 15:1); tmpd = SIMD_COPY(tmpd, tmp4, 15:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.16B -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.16B = NEON_uzp2(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); -@endif } # C7.2.400 UZP2 page C7-2299 line 129332 MATCH x0e005800/mask=xbf20fc00 @@ -54119,34 +29013,13 @@ is b_3131=0 & q=1 & b_2429=0xe & advSIMD3.size=0 & b_2121=0 & Rm_VPR128.16B & b_ :uzp2 Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=1 & b_2429=0xe & advSIMD3.size=3 & b_2121=0 & Rm_VPR128.2D & b_1515=0 & b_1214=5 & b_1011=2 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_primitive) TMPQ2 = Rm_VPR128.2D; TMPQ1 = Rn_VPR128.2D; # simd shuffle Rd_VPR128.2D = TMPQ1 (@1-0) lane size 8 - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp3, TMPQ1, 1, 8, 16); - simd_address_at(tmp4, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp4 = * [register]:8 tmp3; + Rd_VPR128.2D[0,64] = TMPQ1[64,64]; # simd shuffle Rd_VPR128.2D = TMPQ2 (@1-1) lane size 8 - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, TMPQ2, 1, 8, 16); - simd_address_at(tmp6, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp6 = * [register]:8 tmp5; + Rd_VPR128.2D[64,64] = TMPQ2[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = Rm_VPR128.2D; - local tmp2:16 = Rn_VPR128.2D; - local tmp3:8 = 0; - local tmpd:16 = Rd_VPR128.2D; - tmp3 = SIMD_PIECE(tmp2, 1:1); tmpd = SIMD_COPY(tmpd, tmp3, 0:1); - local tmp4:8 = 0; - tmp4 = SIMD_PIECE(tmp1, 1:1); tmpd = SIMD_COPY(tmpd, tmp4, 1:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.2D = NEON_uzp2(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); -@endif } # C7.2.400 UZP2 page C7-2299 line 129332 MATCH x0e005800/mask=xbf20fc00 @@ -54158,34 +29031,13 @@ is b_3131=0 & q=1 & b_2429=0xe & advSIMD3.size=3 & b_2121=0 & Rm_VPR128.2D & b_1 :uzp2 Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & b_2429=0xe & advSIMD3.size=2 & b_2121=0 & Rm_VPR64.2S & b_1515=0 & b_1214=5 & b_1011=2 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_primitive) TMPD2 = Rm_VPR64.2S; TMPD1 = Rn_VPR64.2S; # simd shuffle Rd_VPR64.2S = TMPD1 (@1-0) lane size 4 - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp3, TMPD1, 1, 4, 8); - simd_address_at(tmp4, Rd_VPR64.2S, 0, 4, 8); - * [register]:4 tmp4 = * [register]:4 tmp3; + Rd_VPR64.2S[0,32] = TMPD1[32,32]; # simd shuffle Rd_VPR64.2S = TMPD2 (@1-1) lane size 4 - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, TMPD2, 1, 4, 8); - simd_address_at(tmp6, Rd_VPR64.2S, 1, 4, 8); - * [register]:4 tmp6 = * [register]:4 tmp5; + Rd_VPR64.2S[32,32] = TMPD2[32,32]; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = Rm_VPR64.2S; - local tmp2:8 = Rn_VPR64.2S; - local tmp3:4 = 0; - local tmpd:8 = Rd_VPR64.2S; - tmp3 = SIMD_PIECE(tmp2, 1:1); tmpd = SIMD_COPY(tmpd, tmp3, 0:1); - local tmp4:4 = 0; - tmp4 = SIMD_PIECE(tmp1, 1:1); tmpd = SIMD_COPY(tmpd, tmp4, 1:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.2S -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.2S = NEON_uzp2(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); -@endif } # C7.2.400 UZP2 page C7-2299 line 129332 MATCH x0e005800/mask=xbf20fc00 @@ -54197,42 +29049,15 @@ is b_3131=0 & q=0 & b_2429=0xe & advSIMD3.size=2 & b_2121=0 & Rm_VPR64.2S & b_15 :uzp2 Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & b_2429=0xe & advSIMD3.size=1 & b_2121=0 & Rm_VPR64.4H & b_1515=0 & b_1214=5 & b_1011=2 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { -@if defined(SEMANTIC_primitive) TMPD2 = Rm_VPR64.4H; TMPD1 = Rn_VPR64.4H; # simd shuffle Rd_VPR64.4H = TMPD1 (@1-0@3-1) lane size 2 - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp3, TMPD1, 1, 2, 8); - simd_address_at(tmp4, Rd_VPR64.4H, 0, 2, 8); - * [register]:2 tmp4 = * [register]:2 tmp3; - simd_address_at(tmp3, TMPD1, 3, 2, 8); - simd_address_at(tmp4, Rd_VPR64.4H, 1, 2, 8); - * [register]:2 tmp4 = * [register]:2 tmp3; + Rd_VPR64.4H[0,16] = TMPD1[16,16]; + Rd_VPR64.4H[16,16] = TMPD1[48,16]; # simd shuffle Rd_VPR64.4H = TMPD2 (@1-2@3-3) lane size 2 - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, TMPD2, 1, 2, 8); - simd_address_at(tmp6, Rd_VPR64.4H, 2, 2, 8); - * [register]:2 tmp6 = * [register]:2 tmp5; - simd_address_at(tmp5, TMPD2, 3, 2, 8); - simd_address_at(tmp6, Rd_VPR64.4H, 3, 2, 8); - * [register]:2 tmp6 = * [register]:2 tmp5; + Rd_VPR64.4H[32,16] = TMPD2[16,16]; + Rd_VPR64.4H[48,16] = TMPD2[48,16]; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = Rm_VPR64.4H; - local tmp2:8 = Rn_VPR64.4H; - local tmp3:2 = 0; - local tmpd:8 = Rd_VPR64.4H; - tmp3 = SIMD_PIECE(tmp2, 1:1); tmpd = SIMD_COPY(tmpd, tmp3, 0:1); - tmp3 = SIMD_PIECE(tmp2, 3:1); tmpd = SIMD_COPY(tmpd, tmp3, 1:1); - local tmp4:2 = 0; - tmp4 = SIMD_PIECE(tmp1, 1:1); tmpd = SIMD_COPY(tmpd, tmp4, 2:1); - tmp4 = SIMD_PIECE(tmp1, 3:1); tmpd = SIMD_COPY(tmpd, tmp4, 3:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.4H -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.4H = NEON_uzp2(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); -@endif } # C7.2.400 UZP2 page C7-2299 line 129332 MATCH x0e005800/mask=xbf20fc00 @@ -54244,42 +29069,15 @@ is b_3131=0 & q=0 & b_2429=0xe & advSIMD3.size=1 & b_2121=0 & Rm_VPR64.4H & b_15 :uzp2 Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & b_2429=0xe & advSIMD3.size=2 & b_2121=0 & Rm_VPR128.4S & b_1515=0 & b_1214=5 & b_1011=2 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) TMPQ2 = Rm_VPR128.4S; TMPQ1 = Rn_VPR128.4S; # simd shuffle Rd_VPR128.4S = TMPQ1 (@1-0@3-1) lane size 4 - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp3, TMPQ1, 1, 4, 16); - simd_address_at(tmp4, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp4 = * [register]:4 tmp3; - simd_address_at(tmp3, TMPQ1, 3, 4, 16); - simd_address_at(tmp4, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp4 = * [register]:4 tmp3; + Rd_VPR128.4S[0,32] = TMPQ1[32,32]; + Rd_VPR128.4S[32,32] = TMPQ1[96,32]; # simd shuffle Rd_VPR128.4S = TMPQ2 (@1-2@3-3) lane size 4 - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, TMPQ2, 1, 4, 16); - simd_address_at(tmp6, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp6 = * [register]:4 tmp5; - simd_address_at(tmp5, TMPQ2, 3, 4, 16); - simd_address_at(tmp6, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp6 = * [register]:4 tmp5; + Rd_VPR128.4S[64,32] = TMPQ2[32,32]; + Rd_VPR128.4S[96,32] = TMPQ2[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = Rm_VPR128.4S; - local tmp2:16 = Rn_VPR128.4S; - local tmp3:4 = 0; - local tmpd:16 = Rd_VPR128.4S; - tmp3 = SIMD_PIECE(tmp2, 1:1); tmpd = SIMD_COPY(tmpd, tmp3, 0:1); - tmp3 = SIMD_PIECE(tmp2, 3:1); tmpd = SIMD_COPY(tmpd, tmp3, 1:1); - local tmp4:4 = 0; - tmp4 = SIMD_PIECE(tmp1, 1:1); tmpd = SIMD_COPY(tmpd, tmp4, 2:1); - tmp4 = SIMD_PIECE(tmp1, 3:1); tmpd = SIMD_COPY(tmpd, tmp4, 3:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_uzp2(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); -@endif } # C7.2.400 UZP2 page C7-2299 line 129332 MATCH x0e005800/mask=xbf20fc00 @@ -54291,58 +29089,19 @@ is b_3131=0 & q=1 & b_2429=0xe & advSIMD3.size=2 & b_2121=0 & Rm_VPR128.4S & b_1 :uzp2 Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & b_2429=0xe & advSIMD3.size=0 & b_2121=0 & Rm_VPR64.8B & b_1515=0 & b_1214=5 & b_1011=2 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { -@if defined(SEMANTIC_primitive) TMPD2 = Rm_VPR64.8B; TMPD1 = Rn_VPR64.8B; # simd shuffle Rd_VPR64.8B = TMPD1 (@1-0@3-1@5-2@7-3) lane size 1 - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp3, TMPD1, 1, 1, 8); - simd_address_at(tmp4, Rd_VPR64.8B, 0, 1, 8); - * [register]:1 tmp4 = * [register]:1 tmp3; - simd_address_at(tmp3, TMPD1, 3, 1, 8); - simd_address_at(tmp4, Rd_VPR64.8B, 1, 1, 8); - * [register]:1 tmp4 = * [register]:1 tmp3; - simd_address_at(tmp3, TMPD1, 5, 1, 8); - simd_address_at(tmp4, Rd_VPR64.8B, 2, 1, 8); - * [register]:1 tmp4 = * [register]:1 tmp3; - simd_address_at(tmp3, TMPD1, 7, 1, 8); - simd_address_at(tmp4, Rd_VPR64.8B, 3, 1, 8); - * [register]:1 tmp4 = * [register]:1 tmp3; + Rd_VPR64.8B[0,8] = TMPD1[8,8]; + Rd_VPR64.8B[8,8] = TMPD1[24,8]; + Rd_VPR64.8B[16,8] = TMPD1[40,8]; + Rd_VPR64.8B[24,8] = TMPD1[56,8]; # simd shuffle Rd_VPR64.8B = TMPD2 (@1-4@3-5@5-6@7-7) lane size 1 - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, TMPD2, 1, 1, 8); - simd_address_at(tmp6, Rd_VPR64.8B, 4, 1, 8); - * [register]:1 tmp6 = * [register]:1 tmp5; - simd_address_at(tmp5, TMPD2, 3, 1, 8); - simd_address_at(tmp6, Rd_VPR64.8B, 5, 1, 8); - * [register]:1 tmp6 = * [register]:1 tmp5; - simd_address_at(tmp5, TMPD2, 5, 1, 8); - simd_address_at(tmp6, Rd_VPR64.8B, 6, 1, 8); - * [register]:1 tmp6 = * [register]:1 tmp5; - simd_address_at(tmp5, TMPD2, 7, 1, 8); - simd_address_at(tmp6, Rd_VPR64.8B, 7, 1, 8); - * [register]:1 tmp6 = * [register]:1 tmp5; + Rd_VPR64.8B[32,8] = TMPD2[8,8]; + Rd_VPR64.8B[40,8] = TMPD2[24,8]; + Rd_VPR64.8B[48,8] = TMPD2[40,8]; + Rd_VPR64.8B[56,8] = TMPD2[56,8]; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = Rm_VPR64.8B; - local tmp2:8 = Rn_VPR64.8B; - local tmp3:1 = 0; - local tmpd:8 = Rd_VPR64.8B; - tmp3 = SIMD_PIECE(tmp2, 1:1); tmpd = SIMD_COPY(tmpd, tmp3, 0:1); - tmp3 = SIMD_PIECE(tmp2, 3:1); tmpd = SIMD_COPY(tmpd, tmp3, 1:1); - tmp3 = SIMD_PIECE(tmp2, 5:1); tmpd = SIMD_COPY(tmpd, tmp3, 2:1); - tmp3 = SIMD_PIECE(tmp2, 7:1); tmpd = SIMD_COPY(tmpd, tmp3, 3:1); - local tmp4:1 = 0; - tmp4 = SIMD_PIECE(tmp1, 1:1); tmpd = SIMD_COPY(tmpd, tmp4, 4:1); - tmp4 = SIMD_PIECE(tmp1, 3:1); tmpd = SIMD_COPY(tmpd, tmp4, 5:1); - tmp4 = SIMD_PIECE(tmp1, 5:1); tmpd = SIMD_COPY(tmpd, tmp4, 6:1); - tmp4 = SIMD_PIECE(tmp1, 7:1); tmpd = SIMD_COPY(tmpd, tmp4, 7:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.8B -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.8B = NEON_uzp2(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); -@endif } # C7.2.400 UZP2 page C7-2299 line 129332 MATCH x0e005800/mask=xbf20fc00 @@ -54354,58 +29113,19 @@ is b_3131=0 & q=0 & b_2429=0xe & advSIMD3.size=0 & b_2121=0 & Rm_VPR64.8B & b_15 :uzp2 Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & b_2429=0xe & advSIMD3.size=1 & b_2121=0 & Rm_VPR128.8H & b_1515=0 & b_1214=5 & b_1011=2 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_primitive) TMPQ2 = Rm_VPR128.8H; TMPQ1 = Rn_VPR128.8H; # simd shuffle Rd_VPR128.8H = TMPQ1 (@1-0@3-1@5-2@7-3) lane size 2 - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp3, TMPQ1, 1, 2, 16); - simd_address_at(tmp4, Rd_VPR128.8H, 0, 2, 16); - * [register]:2 tmp4 = * [register]:2 tmp3; - simd_address_at(tmp3, TMPQ1, 3, 2, 16); - simd_address_at(tmp4, Rd_VPR128.8H, 1, 2, 16); - * [register]:2 tmp4 = * [register]:2 tmp3; - simd_address_at(tmp3, TMPQ1, 5, 2, 16); - simd_address_at(tmp4, Rd_VPR128.8H, 2, 2, 16); - * [register]:2 tmp4 = * [register]:2 tmp3; - simd_address_at(tmp3, TMPQ1, 7, 2, 16); - simd_address_at(tmp4, Rd_VPR128.8H, 3, 2, 16); - * [register]:2 tmp4 = * [register]:2 tmp3; + Rd_VPR128.8H[0,16] = TMPQ1[16,16]; + Rd_VPR128.8H[16,16] = TMPQ1[48,16]; + Rd_VPR128.8H[32,16] = TMPQ1[80,16]; + Rd_VPR128.8H[48,16] = TMPQ1[112,16]; # simd shuffle Rd_VPR128.8H = TMPQ2 (@1-4@3-5@5-6@7-7) lane size 2 - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, TMPQ2, 1, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 4, 2, 16); - * [register]:2 tmp6 = * [register]:2 tmp5; - simd_address_at(tmp5, TMPQ2, 3, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 5, 2, 16); - * [register]:2 tmp6 = * [register]:2 tmp5; - simd_address_at(tmp5, TMPQ2, 5, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 6, 2, 16); - * [register]:2 tmp6 = * [register]:2 tmp5; - simd_address_at(tmp5, TMPQ2, 7, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 7, 2, 16); - * [register]:2 tmp6 = * [register]:2 tmp5; + Rd_VPR128.8H[64,16] = TMPQ2[16,16]; + Rd_VPR128.8H[80,16] = TMPQ2[48,16]; + Rd_VPR128.8H[96,16] = TMPQ2[80,16]; + Rd_VPR128.8H[112,16] = TMPQ2[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = Rm_VPR128.8H; - local tmp2:16 = Rn_VPR128.8H; - local tmp3:2 = 0; - local tmpd:16 = Rd_VPR128.8H; - tmp3 = SIMD_PIECE(tmp2, 1:1); tmpd = SIMD_COPY(tmpd, tmp3, 0:1); - tmp3 = SIMD_PIECE(tmp2, 3:1); tmpd = SIMD_COPY(tmpd, tmp3, 1:1); - tmp3 = SIMD_PIECE(tmp2, 5:1); tmpd = SIMD_COPY(tmpd, tmp3, 2:1); - tmp3 = SIMD_PIECE(tmp2, 7:1); tmpd = SIMD_COPY(tmpd, tmp3, 3:1); - local tmp4:2 = 0; - tmp4 = SIMD_PIECE(tmp1, 1:1); tmpd = SIMD_COPY(tmpd, tmp4, 4:1); - tmp4 = SIMD_PIECE(tmp1, 3:1); tmpd = SIMD_COPY(tmpd, tmp4, 5:1); - tmp4 = SIMD_PIECE(tmp1, 5:1); tmpd = SIMD_COPY(tmpd, tmp4, 6:1); - tmp4 = SIMD_PIECE(tmp1, 7:1); tmpd = SIMD_COPY(tmpd, tmp4, 7:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.8H -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.8H = NEON_uzp2(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); -@endif } # C7.2.401 XAR page C7-2301 line 129443 MATCH xce800000/mask=xffe00000 @@ -54418,38 +29138,14 @@ is b_3131=0 & q=1 & b_2429=0xe & advSIMD3.size=1 & b_2121=0 & Rm_VPR128.8H & b_1 :xar Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D, LSB_bitfield64_imm is b_2131=0b11001110100 & Rd_VPR128.2D & Rn_VPR128.2D & Rm_VPR128.2D & LSB_bitfield64_imm & Zd { -@if defined(SEMANTIC_primitive) # simd infix TMPQ1 = Rn_VPR128.2D | Rm_VPR128.2D on lane size 8 - local tmp2:4 = 0; - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp2, Rn_VPR128.2D, 0, 8, 16); - simd_address_at(tmp3, Rm_VPR128.2D, 0, 8, 16); - simd_address_at(tmp4, TMPQ1, 0, 8, 16); - * [register]:8 tmp4 = (* [register]:8 tmp2) | (* [register]:8 tmp3); - simd_address_at(tmp2, Rn_VPR128.2D, 1, 8, 16); - simd_address_at(tmp3, Rm_VPR128.2D, 1, 8, 16); - simd_address_at(tmp4, TMPQ1, 1, 8, 16); - * [register]:8 tmp4 = (* [register]:8 tmp2) | (* [register]:8 tmp3); - local tmp5:8 = LSB_bitfield64_imm; - # simd infix Rd_VPR128.2D = TMPQ1 >> tmp5 on lane size 8 - local tmp6:4 = 0; - local tmp7:4 = 0; - simd_address_at(tmp6, TMPQ1, 0, 8, 16); - simd_address_at(tmp7, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp7 = (* [register]:8 tmp6) >> tmp5; - simd_address_at(tmp6, TMPQ1, 1, 8, 16); - simd_address_at(tmp7, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp7 = (* [register]:8 tmp6) >> tmp5; - zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = SIMD_INT_OR(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); + TMPQ1[0,64] = Rn_VPR128.2D[0,64] | Rm_VPR128.2D[0,64]; + TMPQ1[64,64] = Rn_VPR128.2D[64,64] | Rm_VPR128.2D[64,64]; local tmp2:8 = LSB_bitfield64_imm; - local tmpd:16 = SIMD_INT_RIGHT(tmp1, tmp2, 8:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.2D = NEON_xar(Rn_VPR128.2D, Rm_VPR128.2D, LSB_bitfield64_imm, 8:1); -@endif + # simd infix Rd_VPR128.2D = TMPQ1 >> tmp2 on lane size 8 + Rd_VPR128.2D[0,64] = TMPQ1[0,64] >> tmp2; + Rd_VPR128.2D[64,64] = TMPQ1[64,64] >> tmp2; + zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.402 XTN, XTN2 page C7-2302 line 129514 MATCH x0e212800/mask=xbf3ffc00 @@ -54461,36 +29157,13 @@ is b_2131=0b11001110100 & Rd_VPR128.2D & Rn_VPR128.2D & Rm_VPR128.2D & LSB_bitfi :xtn Rd_VPR64.2S, Rn_VPR128.2D is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x10 & b_1216=0x12 & b_1011=2 & Rn_VPR128.2D & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_primitive) # simd resize TMPD1 = zext(Rn_VPR128.2D) (lane size 8 to 4) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR128.2D, 0, 8, 16); - simd_address_at(tmp3, TMPD1, 0, 4, 8); - * [register]:4 tmp3 = (* [register]:8 tmp2); - simd_address_at(tmp2, Rn_VPR128.2D, 1, 8, 16); - simd_address_at(tmp3, TMPD1, 1, 4, 8); - * [register]:4 tmp3 = (* [register]:8 tmp2); + TMPD1[0,32] = Rn_VPR128.2D[0,32]; + TMPD1[32,32] = Rn_VPR128.2D[64,32]; # simd shuffle Rd_VPR64.2S = TMPD1 (@0-0@1-1) lane size 4 - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, TMPD1, 0, 4, 8); - simd_address_at(tmp5, Rd_VPR64.2S, 0, 4, 8); - * [register]:4 tmp5 = * [register]:4 tmp4; - simd_address_at(tmp4, TMPD1, 1, 4, 8); - simd_address_at(tmp5, Rd_VPR64.2S, 1, 4, 8); - * [register]:4 tmp5 = * [register]:4 tmp4; + Rd_VPR64.2S[0,32] = TMPD1[0,32]; + Rd_VPR64.2S[32,32] = TMPD1[32,32]; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_INT_ZEXT(Rn_VPR128.2D, 8:1); - local tmp2:4 = 0; - local tmpd:8 = Rd_VPR64.2S; - tmp2 = SIMD_PIECE(tmp1, 0:1); tmpd = SIMD_COPY(tmpd, tmp2, 0:1); - tmp2 = SIMD_PIECE(tmp1, 1:1); tmpd = SIMD_COPY(tmpd, tmp2, 1:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.2S -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.2S = NEON_xtn(Rd_VPR64.2S, Rn_VPR128.2D, 8:1); -@endif } # C7.2.402 XTN, XTN2 page C7-2302 line 129514 MATCH x0e212800/mask=xbf3ffc00 @@ -54502,36 +29175,13 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x10 & b_1216=0x :xtn2 Rd_VPR128.4S, Rn_VPR128.2D is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x10 & b_1216=0x12 & b_1011=2 & Rn_VPR128.2D & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) # simd resize TMPD1 = zext(Rn_VPR128.2D) (lane size 8 to 4) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR128.2D, 0, 8, 16); - simd_address_at(tmp3, TMPD1, 0, 4, 8); - * [register]:4 tmp3 = (* [register]:8 tmp2); - simd_address_at(tmp2, Rn_VPR128.2D, 1, 8, 16); - simd_address_at(tmp3, TMPD1, 1, 4, 8); - * [register]:4 tmp3 = (* [register]:8 tmp2); + TMPD1[0,32] = Rn_VPR128.2D[0,32]; + TMPD1[32,32] = Rn_VPR128.2D[64,32]; # simd shuffle Rd_VPR128.4S = TMPD1 (@0-2@1-3) lane size 4 - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, TMPD1, 0, 4, 8); - simd_address_at(tmp5, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp5 = * [register]:4 tmp4; - simd_address_at(tmp4, TMPD1, 1, 4, 8); - simd_address_at(tmp5, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp5 = * [register]:4 tmp4; + Rd_VPR128.4S[64,32] = TMPD1[0,32]; + Rd_VPR128.4S[96,32] = TMPD1[32,32]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_INT_ZEXT(Rn_VPR128.2D, 8:1); - local tmp2:4 = 0; - local tmpd:16 = Rd_VPR128.4S; - tmp2 = SIMD_PIECE(tmp1, 0:1); tmpd = SIMD_COPY(tmpd, tmp2, 2:1); - tmp2 = SIMD_PIECE(tmp1, 1:1); tmpd = SIMD_COPY(tmpd, tmp2, 3:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_xtn2(Rd_VPR128.4S, Rn_VPR128.2D, 8:1); -@endif } # C7.2.402 XTN, XTN2 page C7-2302 line 129514 MATCH x0e212800/mask=xbf3ffc00 @@ -54543,50 +29193,17 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x10 & b_1216=0x :xtn Rd_VPR64.4H, Rn_VPR128.4S is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=0x12 & b_1011=2 & Rn_VPR128.4S & Rd_VPR64.4H & Zd { -@if defined(SEMANTIC_primitive) # simd resize TMPD1 = zext(Rn_VPR128.4S) (lane size 4 to 2) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR128.4S, 0, 4, 16); - simd_address_at(tmp3, TMPD1, 0, 2, 8); - * [register]:2 tmp3 = (* [register]:4 tmp2); - simd_address_at(tmp2, Rn_VPR128.4S, 1, 4, 16); - simd_address_at(tmp3, TMPD1, 1, 2, 8); - * [register]:2 tmp3 = (* [register]:4 tmp2); - simd_address_at(tmp2, Rn_VPR128.4S, 2, 4, 16); - simd_address_at(tmp3, TMPD1, 2, 2, 8); - * [register]:2 tmp3 = (* [register]:4 tmp2); - simd_address_at(tmp2, Rn_VPR128.4S, 3, 4, 16); - simd_address_at(tmp3, TMPD1, 3, 2, 8); - * [register]:2 tmp3 = (* [register]:4 tmp2); + TMPD1[0,16] = Rn_VPR128.4S[0,16]; + TMPD1[16,16] = Rn_VPR128.4S[32,16]; + TMPD1[32,16] = Rn_VPR128.4S[64,16]; + TMPD1[48,16] = Rn_VPR128.4S[96,16]; # simd shuffle Rd_VPR64.4H = TMPD1 (@0-0@1-1@2-2@3-3) lane size 2 - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, TMPD1, 0, 2, 8); - simd_address_at(tmp5, Rd_VPR64.4H, 0, 2, 8); - * [register]:2 tmp5 = * [register]:2 tmp4; - simd_address_at(tmp4, TMPD1, 1, 2, 8); - simd_address_at(tmp5, Rd_VPR64.4H, 1, 2, 8); - * [register]:2 tmp5 = * [register]:2 tmp4; - simd_address_at(tmp4, TMPD1, 2, 2, 8); - simd_address_at(tmp5, Rd_VPR64.4H, 2, 2, 8); - * [register]:2 tmp5 = * [register]:2 tmp4; - simd_address_at(tmp4, TMPD1, 3, 2, 8); - simd_address_at(tmp5, Rd_VPR64.4H, 3, 2, 8); - * [register]:2 tmp5 = * [register]:2 tmp4; + Rd_VPR64.4H[0,16] = TMPD1[0,16]; + Rd_VPR64.4H[16,16] = TMPD1[16,16]; + Rd_VPR64.4H[32,16] = TMPD1[32,16]; + Rd_VPR64.4H[48,16] = TMPD1[48,16]; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_INT_ZEXT(Rn_VPR128.4S, 4:1); - local tmp2:2 = 0; - local tmpd:8 = Rd_VPR64.4H; - tmp2 = SIMD_PIECE(tmp1, 0:1); tmpd = SIMD_COPY(tmpd, tmp2, 0:1); - tmp2 = SIMD_PIECE(tmp1, 1:1); tmpd = SIMD_COPY(tmpd, tmp2, 1:1); - tmp2 = SIMD_PIECE(tmp1, 2:1); tmpd = SIMD_COPY(tmpd, tmp2, 2:1); - tmp2 = SIMD_PIECE(tmp1, 3:1); tmpd = SIMD_COPY(tmpd, tmp2, 3:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.4H -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.4H = NEON_xtn(Rd_VPR64.4H, Rn_VPR128.4S, 4:1); -@endif } # C7.2.402 XTN, XTN2 page C7-2302 line 129514 MATCH x0e212800/mask=xbf3ffc00 @@ -54598,50 +29215,17 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=0x :xtn2 Rd_VPR128.8H, Rn_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=0x12 & b_1011=2 & Rn_VPR128.4S & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_primitive) # simd resize TMPD1 = zext(Rn_VPR128.4S) (lane size 4 to 2) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR128.4S, 0, 4, 16); - simd_address_at(tmp3, TMPD1, 0, 2, 8); - * [register]:2 tmp3 = (* [register]:4 tmp2); - simd_address_at(tmp2, Rn_VPR128.4S, 1, 4, 16); - simd_address_at(tmp3, TMPD1, 1, 2, 8); - * [register]:2 tmp3 = (* [register]:4 tmp2); - simd_address_at(tmp2, Rn_VPR128.4S, 2, 4, 16); - simd_address_at(tmp3, TMPD1, 2, 2, 8); - * [register]:2 tmp3 = (* [register]:4 tmp2); - simd_address_at(tmp2, Rn_VPR128.4S, 3, 4, 16); - simd_address_at(tmp3, TMPD1, 3, 2, 8); - * [register]:2 tmp3 = (* [register]:4 tmp2); + TMPD1[0,16] = Rn_VPR128.4S[0,16]; + TMPD1[16,16] = Rn_VPR128.4S[32,16]; + TMPD1[32,16] = Rn_VPR128.4S[64,16]; + TMPD1[48,16] = Rn_VPR128.4S[96,16]; # simd shuffle Rd_VPR128.8H = TMPD1 (@0-4@1-5@2-6@3-7) lane size 2 - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, TMPD1, 0, 2, 8); - simd_address_at(tmp5, Rd_VPR128.8H, 4, 2, 16); - * [register]:2 tmp5 = * [register]:2 tmp4; - simd_address_at(tmp4, TMPD1, 1, 2, 8); - simd_address_at(tmp5, Rd_VPR128.8H, 5, 2, 16); - * [register]:2 tmp5 = * [register]:2 tmp4; - simd_address_at(tmp4, TMPD1, 2, 2, 8); - simd_address_at(tmp5, Rd_VPR128.8H, 6, 2, 16); - * [register]:2 tmp5 = * [register]:2 tmp4; - simd_address_at(tmp4, TMPD1, 3, 2, 8); - simd_address_at(tmp5, Rd_VPR128.8H, 7, 2, 16); - * [register]:2 tmp5 = * [register]:2 tmp4; + Rd_VPR128.8H[64,16] = TMPD1[0,16]; + Rd_VPR128.8H[80,16] = TMPD1[16,16]; + Rd_VPR128.8H[96,16] = TMPD1[32,16]; + Rd_VPR128.8H[112,16] = TMPD1[48,16]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_INT_ZEXT(Rn_VPR128.4S, 4:1); - local tmp2:2 = 0; - local tmpd:16 = Rd_VPR128.8H; - tmp2 = SIMD_PIECE(tmp1, 0:1); tmpd = SIMD_COPY(tmpd, tmp2, 4:1); - tmp2 = SIMD_PIECE(tmp1, 1:1); tmpd = SIMD_COPY(tmpd, tmp2, 5:1); - tmp2 = SIMD_PIECE(tmp1, 2:1); tmpd = SIMD_COPY(tmpd, tmp2, 6:1); - tmp2 = SIMD_PIECE(tmp1, 3:1); tmpd = SIMD_COPY(tmpd, tmp2, 7:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.8H -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.8H = NEON_xtn2(Rd_VPR128.8H, Rn_VPR128.4S, 4:1); -@endif } # C7.2.402 XTN, XTN2 page C7-2302 line 129514 MATCH x0e212800/mask=xbf3ffc00 @@ -54653,78 +29237,25 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=0x :xtn Rd_VPR64.8B, Rn_VPR128.8H is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x12 & b_1011=2 & Rn_VPR128.8H & Rd_VPR64.8B & Zd { -@if defined(SEMANTIC_primitive) # simd resize TMPD1 = zext(Rn_VPR128.8H) (lane size 2 to 1) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR128.8H, 0, 2, 16); - simd_address_at(tmp3, TMPD1, 0, 1, 8); - * [register]:1 tmp3 = (* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR128.8H, 1, 2, 16); - simd_address_at(tmp3, TMPD1, 1, 1, 8); - * [register]:1 tmp3 = (* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR128.8H, 2, 2, 16); - simd_address_at(tmp3, TMPD1, 2, 1, 8); - * [register]:1 tmp3 = (* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR128.8H, 3, 2, 16); - simd_address_at(tmp3, TMPD1, 3, 1, 8); - * [register]:1 tmp3 = (* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR128.8H, 4, 2, 16); - simd_address_at(tmp3, TMPD1, 4, 1, 8); - * [register]:1 tmp3 = (* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR128.8H, 5, 2, 16); - simd_address_at(tmp3, TMPD1, 5, 1, 8); - * [register]:1 tmp3 = (* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR128.8H, 6, 2, 16); - simd_address_at(tmp3, TMPD1, 6, 1, 8); - * [register]:1 tmp3 = (* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR128.8H, 7, 2, 16); - simd_address_at(tmp3, TMPD1, 7, 1, 8); - * [register]:1 tmp3 = (* [register]:2 tmp2); + TMPD1[0,8] = Rn_VPR128.8H[0,8]; + TMPD1[8,8] = Rn_VPR128.8H[16,8]; + TMPD1[16,8] = Rn_VPR128.8H[32,8]; + TMPD1[24,8] = Rn_VPR128.8H[48,8]; + TMPD1[32,8] = Rn_VPR128.8H[64,8]; + TMPD1[40,8] = Rn_VPR128.8H[80,8]; + TMPD1[48,8] = Rn_VPR128.8H[96,8]; + TMPD1[56,8] = Rn_VPR128.8H[112,8]; # simd shuffle Rd_VPR64.8B = TMPD1 (@0-0@1-1@2-2@3-3@4-4@5-5@6-6@7-7) lane size 1 - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, TMPD1, 0, 1, 8); - simd_address_at(tmp5, Rd_VPR64.8B, 0, 1, 8); - * [register]:1 tmp5 = * [register]:1 tmp4; - simd_address_at(tmp4, TMPD1, 1, 1, 8); - simd_address_at(tmp5, Rd_VPR64.8B, 1, 1, 8); - * [register]:1 tmp5 = * [register]:1 tmp4; - simd_address_at(tmp4, TMPD1, 2, 1, 8); - simd_address_at(tmp5, Rd_VPR64.8B, 2, 1, 8); - * [register]:1 tmp5 = * [register]:1 tmp4; - simd_address_at(tmp4, TMPD1, 3, 1, 8); - simd_address_at(tmp5, Rd_VPR64.8B, 3, 1, 8); - * [register]:1 tmp5 = * [register]:1 tmp4; - simd_address_at(tmp4, TMPD1, 4, 1, 8); - simd_address_at(tmp5, Rd_VPR64.8B, 4, 1, 8); - * [register]:1 tmp5 = * [register]:1 tmp4; - simd_address_at(tmp4, TMPD1, 5, 1, 8); - simd_address_at(tmp5, Rd_VPR64.8B, 5, 1, 8); - * [register]:1 tmp5 = * [register]:1 tmp4; - simd_address_at(tmp4, TMPD1, 6, 1, 8); - simd_address_at(tmp5, Rd_VPR64.8B, 6, 1, 8); - * [register]:1 tmp5 = * [register]:1 tmp4; - simd_address_at(tmp4, TMPD1, 7, 1, 8); - simd_address_at(tmp5, Rd_VPR64.8B, 7, 1, 8); - * [register]:1 tmp5 = * [register]:1 tmp4; + Rd_VPR64.8B[0,8] = TMPD1[0,8]; + Rd_VPR64.8B[8,8] = TMPD1[8,8]; + Rd_VPR64.8B[16,8] = TMPD1[16,8]; + Rd_VPR64.8B[24,8] = TMPD1[24,8]; + Rd_VPR64.8B[32,8] = TMPD1[32,8]; + Rd_VPR64.8B[40,8] = TMPD1[40,8]; + Rd_VPR64.8B[48,8] = TMPD1[48,8]; + Rd_VPR64.8B[56,8] = TMPD1[56,8]; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_INT_ZEXT(Rn_VPR128.8H, 2:1); - local tmp2:1 = 0; - local tmpd:8 = Rd_VPR64.8B; - tmp2 = SIMD_PIECE(tmp1, 0:1); tmpd = SIMD_COPY(tmpd, tmp2, 0:1); - tmp2 = SIMD_PIECE(tmp1, 1:1); tmpd = SIMD_COPY(tmpd, tmp2, 1:1); - tmp2 = SIMD_PIECE(tmp1, 2:1); tmpd = SIMD_COPY(tmpd, tmp2, 2:1); - tmp2 = SIMD_PIECE(tmp1, 3:1); tmpd = SIMD_COPY(tmpd, tmp2, 3:1); - tmp2 = SIMD_PIECE(tmp1, 4:1); tmpd = SIMD_COPY(tmpd, tmp2, 4:1); - tmp2 = SIMD_PIECE(tmp1, 5:1); tmpd = SIMD_COPY(tmpd, tmp2, 5:1); - tmp2 = SIMD_PIECE(tmp1, 6:1); tmpd = SIMD_COPY(tmpd, tmp2, 6:1); - tmp2 = SIMD_PIECE(tmp1, 7:1); tmpd = SIMD_COPY(tmpd, tmp2, 7:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.8B -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.8B = NEON_xtn(Rd_VPR64.8B, Rn_VPR128.8H, 2:1); -@endif } # C7.2.402 XTN, XTN2 page C7-2302 line 129514 MATCH x0e212800/mask=xbf3ffc00 @@ -54736,78 +29267,25 @@ is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x :xtn2 Rd_VPR128.16B, Rn_VPR128.8H is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x12 & b_1011=2 & Rn_VPR128.8H & Rd_VPR128.16B & Zd { -@if defined(SEMANTIC_primitive) # simd resize TMPD1 = zext(Rn_VPR128.8H) (lane size 2 to 1) - local tmp2:4 = 0; - local tmp3:4 = 0; - simd_address_at(tmp2, Rn_VPR128.8H, 0, 2, 16); - simd_address_at(tmp3, TMPD1, 0, 1, 8); - * [register]:1 tmp3 = (* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR128.8H, 1, 2, 16); - simd_address_at(tmp3, TMPD1, 1, 1, 8); - * [register]:1 tmp3 = (* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR128.8H, 2, 2, 16); - simd_address_at(tmp3, TMPD1, 2, 1, 8); - * [register]:1 tmp3 = (* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR128.8H, 3, 2, 16); - simd_address_at(tmp3, TMPD1, 3, 1, 8); - * [register]:1 tmp3 = (* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR128.8H, 4, 2, 16); - simd_address_at(tmp3, TMPD1, 4, 1, 8); - * [register]:1 tmp3 = (* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR128.8H, 5, 2, 16); - simd_address_at(tmp3, TMPD1, 5, 1, 8); - * [register]:1 tmp3 = (* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR128.8H, 6, 2, 16); - simd_address_at(tmp3, TMPD1, 6, 1, 8); - * [register]:1 tmp3 = (* [register]:2 tmp2); - simd_address_at(tmp2, Rn_VPR128.8H, 7, 2, 16); - simd_address_at(tmp3, TMPD1, 7, 1, 8); - * [register]:1 tmp3 = (* [register]:2 tmp2); + TMPD1[0,8] = Rn_VPR128.8H[0,8]; + TMPD1[8,8] = Rn_VPR128.8H[16,8]; + TMPD1[16,8] = Rn_VPR128.8H[32,8]; + TMPD1[24,8] = Rn_VPR128.8H[48,8]; + TMPD1[32,8] = Rn_VPR128.8H[64,8]; + TMPD1[40,8] = Rn_VPR128.8H[80,8]; + TMPD1[48,8] = Rn_VPR128.8H[96,8]; + TMPD1[56,8] = Rn_VPR128.8H[112,8]; # simd shuffle Rd_VPR128.16B = TMPD1 (@0-8@1-9@2-10@3-11@4-12@5-13@6-14@7-15) lane size 1 - local tmp4:4 = 0; - local tmp5:4 = 0; - simd_address_at(tmp4, TMPD1, 0, 1, 8); - simd_address_at(tmp5, Rd_VPR128.16B, 8, 1, 16); - * [register]:1 tmp5 = * [register]:1 tmp4; - simd_address_at(tmp4, TMPD1, 1, 1, 8); - simd_address_at(tmp5, Rd_VPR128.16B, 9, 1, 16); - * [register]:1 tmp5 = * [register]:1 tmp4; - simd_address_at(tmp4, TMPD1, 2, 1, 8); - simd_address_at(tmp5, Rd_VPR128.16B, 10, 1, 16); - * [register]:1 tmp5 = * [register]:1 tmp4; - simd_address_at(tmp4, TMPD1, 3, 1, 8); - simd_address_at(tmp5, Rd_VPR128.16B, 11, 1, 16); - * [register]:1 tmp5 = * [register]:1 tmp4; - simd_address_at(tmp4, TMPD1, 4, 1, 8); - simd_address_at(tmp5, Rd_VPR128.16B, 12, 1, 16); - * [register]:1 tmp5 = * [register]:1 tmp4; - simd_address_at(tmp4, TMPD1, 5, 1, 8); - simd_address_at(tmp5, Rd_VPR128.16B, 13, 1, 16); - * [register]:1 tmp5 = * [register]:1 tmp4; - simd_address_at(tmp4, TMPD1, 6, 1, 8); - simd_address_at(tmp5, Rd_VPR128.16B, 14, 1, 16); - * [register]:1 tmp5 = * [register]:1 tmp4; - simd_address_at(tmp4, TMPD1, 7, 1, 8); - simd_address_at(tmp5, Rd_VPR128.16B, 15, 1, 16); - * [register]:1 tmp5 = * [register]:1 tmp4; + Rd_VPR128.16B[64,8] = TMPD1[0,8]; + Rd_VPR128.16B[72,8] = TMPD1[8,8]; + Rd_VPR128.16B[80,8] = TMPD1[16,8]; + Rd_VPR128.16B[88,8] = TMPD1[24,8]; + Rd_VPR128.16B[96,8] = TMPD1[32,8]; + Rd_VPR128.16B[104,8] = TMPD1[40,8]; + Rd_VPR128.16B[112,8] = TMPD1[48,8]; + Rd_VPR128.16B[120,8] = TMPD1[56,8]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = SIMD_INT_ZEXT(Rn_VPR128.8H, 2:1); - local tmp2:1 = 0; - local tmpd:16 = Rd_VPR128.16B; - tmp2 = SIMD_PIECE(tmp1, 0:1); tmpd = SIMD_COPY(tmpd, tmp2, 8:1); - tmp2 = SIMD_PIECE(tmp1, 1:1); tmpd = SIMD_COPY(tmpd, tmp2, 9:1); - tmp2 = SIMD_PIECE(tmp1, 2:1); tmpd = SIMD_COPY(tmpd, tmp2, 10:1); - tmp2 = SIMD_PIECE(tmp1, 3:1); tmpd = SIMD_COPY(tmpd, tmp2, 11:1); - tmp2 = SIMD_PIECE(tmp1, 4:1); tmpd = SIMD_COPY(tmpd, tmp2, 12:1); - tmp2 = SIMD_PIECE(tmp1, 5:1); tmpd = SIMD_COPY(tmpd, tmp2, 13:1); - tmp2 = SIMD_PIECE(tmp1, 6:1); tmpd = SIMD_COPY(tmpd, tmp2, 14:1); - tmp2 = SIMD_PIECE(tmp1, 7:1); tmpd = SIMD_COPY(tmpd, tmp2, 15:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.16B -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.16B = NEON_xtn2(Rd_VPR128.16B, Rn_VPR128.8H, 2:1); -@endif } # C7.2.403 ZIP1 page C7-2304 line 129621 MATCH x0e003800/mask=xbf20fc00 @@ -54819,90 +29297,27 @@ is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x :zip1 Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & b_2429=0xe & advSIMD3.size=0 & b_2121=0 & Rm_VPR128.16B & b_1515=0 & b_1214=3 & b_1011=2 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { -@if defined(SEMANTIC_primitive) TMPQ2 = Rm_VPR128.16B; TMPQ1 = Rn_VPR128.16B; # simd shuffle Rd_VPR128.16B = TMPQ1 (@0-0@1-2@2-4@3-6@4-8@5-10@6-12@7-14) lane size 1 - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp3, TMPQ1, 0, 1, 16); - simd_address_at(tmp4, Rd_VPR128.16B, 0, 1, 16); - * [register]:1 tmp4 = * [register]:1 tmp3; - simd_address_at(tmp3, TMPQ1, 1, 1, 16); - simd_address_at(tmp4, Rd_VPR128.16B, 2, 1, 16); - * [register]:1 tmp4 = * [register]:1 tmp3; - simd_address_at(tmp3, TMPQ1, 2, 1, 16); - simd_address_at(tmp4, Rd_VPR128.16B, 4, 1, 16); - * [register]:1 tmp4 = * [register]:1 tmp3; - simd_address_at(tmp3, TMPQ1, 3, 1, 16); - simd_address_at(tmp4, Rd_VPR128.16B, 6, 1, 16); - * [register]:1 tmp4 = * [register]:1 tmp3; - simd_address_at(tmp3, TMPQ1, 4, 1, 16); - simd_address_at(tmp4, Rd_VPR128.16B, 8, 1, 16); - * [register]:1 tmp4 = * [register]:1 tmp3; - simd_address_at(tmp3, TMPQ1, 5, 1, 16); - simd_address_at(tmp4, Rd_VPR128.16B, 10, 1, 16); - * [register]:1 tmp4 = * [register]:1 tmp3; - simd_address_at(tmp3, TMPQ1, 6, 1, 16); - simd_address_at(tmp4, Rd_VPR128.16B, 12, 1, 16); - * [register]:1 tmp4 = * [register]:1 tmp3; - simd_address_at(tmp3, TMPQ1, 7, 1, 16); - simd_address_at(tmp4, Rd_VPR128.16B, 14, 1, 16); - * [register]:1 tmp4 = * [register]:1 tmp3; + Rd_VPR128.16B[0,8] = TMPQ1[0,8]; + Rd_VPR128.16B[16,8] = TMPQ1[8,8]; + Rd_VPR128.16B[32,8] = TMPQ1[16,8]; + Rd_VPR128.16B[48,8] = TMPQ1[24,8]; + Rd_VPR128.16B[64,8] = TMPQ1[32,8]; + Rd_VPR128.16B[80,8] = TMPQ1[40,8]; + Rd_VPR128.16B[96,8] = TMPQ1[48,8]; + Rd_VPR128.16B[112,8] = TMPQ1[56,8]; # simd shuffle Rd_VPR128.16B = TMPQ2 (@0-1@1-3@2-5@3-7@4-9@5-11@6-13@7-15) lane size 1 - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, TMPQ2, 0, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 1, 1, 16); - * [register]:1 tmp6 = * [register]:1 tmp5; - simd_address_at(tmp5, TMPQ2, 1, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 3, 1, 16); - * [register]:1 tmp6 = * [register]:1 tmp5; - simd_address_at(tmp5, TMPQ2, 2, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 5, 1, 16); - * [register]:1 tmp6 = * [register]:1 tmp5; - simd_address_at(tmp5, TMPQ2, 3, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 7, 1, 16); - * [register]:1 tmp6 = * [register]:1 tmp5; - simd_address_at(tmp5, TMPQ2, 4, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 9, 1, 16); - * [register]:1 tmp6 = * [register]:1 tmp5; - simd_address_at(tmp5, TMPQ2, 5, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 11, 1, 16); - * [register]:1 tmp6 = * [register]:1 tmp5; - simd_address_at(tmp5, TMPQ2, 6, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 13, 1, 16); - * [register]:1 tmp6 = * [register]:1 tmp5; - simd_address_at(tmp5, TMPQ2, 7, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 15, 1, 16); - * [register]:1 tmp6 = * [register]:1 tmp5; + Rd_VPR128.16B[8,8] = TMPQ2[0,8]; + Rd_VPR128.16B[24,8] = TMPQ2[8,8]; + Rd_VPR128.16B[40,8] = TMPQ2[16,8]; + Rd_VPR128.16B[56,8] = TMPQ2[24,8]; + Rd_VPR128.16B[72,8] = TMPQ2[32,8]; + Rd_VPR128.16B[88,8] = TMPQ2[40,8]; + Rd_VPR128.16B[104,8] = TMPQ2[48,8]; + Rd_VPR128.16B[120,8] = TMPQ2[56,8]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = Rm_VPR128.16B; - local tmp2:16 = Rn_VPR128.16B; - local tmp3:1 = 0; - local tmpd:16 = Rd_VPR128.16B; - tmp3 = SIMD_PIECE(tmp2, 0:1); tmpd = SIMD_COPY(tmpd, tmp3, 0:1); - tmp3 = SIMD_PIECE(tmp2, 1:1); tmpd = SIMD_COPY(tmpd, tmp3, 2:1); - tmp3 = SIMD_PIECE(tmp2, 2:1); tmpd = SIMD_COPY(tmpd, tmp3, 4:1); - tmp3 = SIMD_PIECE(tmp2, 3:1); tmpd = SIMD_COPY(tmpd, tmp3, 6:1); - tmp3 = SIMD_PIECE(tmp2, 4:1); tmpd = SIMD_COPY(tmpd, tmp3, 8:1); - tmp3 = SIMD_PIECE(tmp2, 5:1); tmpd = SIMD_COPY(tmpd, tmp3, 10:1); - tmp3 = SIMD_PIECE(tmp2, 6:1); tmpd = SIMD_COPY(tmpd, tmp3, 12:1); - tmp3 = SIMD_PIECE(tmp2, 7:1); tmpd = SIMD_COPY(tmpd, tmp3, 14:1); - local tmp4:1 = 0; - tmp4 = SIMD_PIECE(tmp1, 0:1); tmpd = SIMD_COPY(tmpd, tmp4, 1:1); - tmp4 = SIMD_PIECE(tmp1, 1:1); tmpd = SIMD_COPY(tmpd, tmp4, 3:1); - tmp4 = SIMD_PIECE(tmp1, 2:1); tmpd = SIMD_COPY(tmpd, tmp4, 5:1); - tmp4 = SIMD_PIECE(tmp1, 3:1); tmpd = SIMD_COPY(tmpd, tmp4, 7:1); - tmp4 = SIMD_PIECE(tmp1, 4:1); tmpd = SIMD_COPY(tmpd, tmp4, 9:1); - tmp4 = SIMD_PIECE(tmp1, 5:1); tmpd = SIMD_COPY(tmpd, tmp4, 11:1); - tmp4 = SIMD_PIECE(tmp1, 6:1); tmpd = SIMD_COPY(tmpd, tmp4, 13:1); - tmp4 = SIMD_PIECE(tmp1, 7:1); tmpd = SIMD_COPY(tmpd, tmp4, 15:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.16B -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.16B = NEON_zip1(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); -@endif } # C7.2.403 ZIP1 page C7-2304 line 129621 MATCH x0e003800/mask=xbf20fc00 @@ -54914,34 +29329,13 @@ is b_3131=0 & q=1 & b_2429=0xe & advSIMD3.size=0 & b_2121=0 & Rm_VPR128.16B & b_ :zip1 Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=1 & b_2429=0xe & advSIMD3.size=3 & b_2121=0 & Rm_VPR128.2D & b_1515=0 & b_1214=3 & b_1011=2 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_primitive) TMPQ2 = Rm_VPR128.2D; TMPQ1 = Rn_VPR128.2D; # simd shuffle Rd_VPR128.2D = TMPQ1 (@0-0) lane size 8 - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp3, TMPQ1, 0, 8, 16); - simd_address_at(tmp4, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp4 = * [register]:8 tmp3; + Rd_VPR128.2D[0,64] = TMPQ1[0,64]; # simd shuffle Rd_VPR128.2D = TMPQ2 (@0-1) lane size 8 - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, TMPQ2, 0, 8, 16); - simd_address_at(tmp6, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp6 = * [register]:8 tmp5; + Rd_VPR128.2D[64,64] = TMPQ2[0,64]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = Rm_VPR128.2D; - local tmp2:16 = Rn_VPR128.2D; - local tmp3:8 = 0; - local tmpd:16 = Rd_VPR128.2D; - tmp3 = SIMD_PIECE(tmp2, 0:1); tmpd = SIMD_COPY(tmpd, tmp3, 0:1); - local tmp4:8 = 0; - tmp4 = SIMD_PIECE(tmp1, 0:1); tmpd = SIMD_COPY(tmpd, tmp4, 1:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.2D = NEON_zip1(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); -@endif } # C7.2.403 ZIP1 page C7-2304 line 129621 MATCH x0e003800/mask=xbf20fc00 @@ -54953,34 +29347,13 @@ is b_3131=0 & q=1 & b_2429=0xe & advSIMD3.size=3 & b_2121=0 & Rm_VPR128.2D & b_1 :zip1 Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & b_2429=0xe & advSIMD3.size=2 & b_2121=0 & Rm_VPR64.2S & b_1515=0 & b_1214=3 & b_1011=2 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_primitive) TMPD2 = Rm_VPR64.2S; TMPD1 = Rn_VPR64.2S; # simd shuffle Rd_VPR64.2S = TMPD1 (@0-0) lane size 4 - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp3, TMPD1, 0, 4, 8); - simd_address_at(tmp4, Rd_VPR64.2S, 0, 4, 8); - * [register]:4 tmp4 = * [register]:4 tmp3; + Rd_VPR64.2S[0,32] = TMPD1[0,32]; # simd shuffle Rd_VPR64.2S = TMPD2 (@0-1) lane size 4 - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, TMPD2, 0, 4, 8); - simd_address_at(tmp6, Rd_VPR64.2S, 1, 4, 8); - * [register]:4 tmp6 = * [register]:4 tmp5; + Rd_VPR64.2S[32,32] = TMPD2[0,32]; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = Rm_VPR64.2S; - local tmp2:8 = Rn_VPR64.2S; - local tmp3:4 = 0; - local tmpd:8 = Rd_VPR64.2S; - tmp3 = SIMD_PIECE(tmp2, 0:1); tmpd = SIMD_COPY(tmpd, tmp3, 0:1); - local tmp4:4 = 0; - tmp4 = SIMD_PIECE(tmp1, 0:1); tmpd = SIMD_COPY(tmpd, tmp4, 1:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.2S -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.2S = NEON_zip1(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); -@endif } # C7.2.403 ZIP1 page C7-2304 line 129621 MATCH x0e003800/mask=xbf20fc00 @@ -54992,42 +29365,15 @@ is b_3131=0 & q=0 & b_2429=0xe & advSIMD3.size=2 & b_2121=0 & Rm_VPR64.2S & b_15 :zip1 Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & b_2429=0xe & advSIMD3.size=1 & b_2121=0 & Rm_VPR64.4H & b_1515=0 & b_1214=3 & b_1011=2 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { -@if defined(SEMANTIC_primitive) TMPD2 = Rm_VPR64.4H; TMPD1 = Rn_VPR64.4H; # simd shuffle Rd_VPR64.4H = TMPD1 (@0-0@1-2) lane size 2 - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp3, TMPD1, 0, 2, 8); - simd_address_at(tmp4, Rd_VPR64.4H, 0, 2, 8); - * [register]:2 tmp4 = * [register]:2 tmp3; - simd_address_at(tmp3, TMPD1, 1, 2, 8); - simd_address_at(tmp4, Rd_VPR64.4H, 2, 2, 8); - * [register]:2 tmp4 = * [register]:2 tmp3; + Rd_VPR64.4H[0,16] = TMPD1[0,16]; + Rd_VPR64.4H[32,16] = TMPD1[16,16]; # simd shuffle Rd_VPR64.4H = TMPD2 (@0-1@1-3) lane size 2 - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, TMPD2, 0, 2, 8); - simd_address_at(tmp6, Rd_VPR64.4H, 1, 2, 8); - * [register]:2 tmp6 = * [register]:2 tmp5; - simd_address_at(tmp5, TMPD2, 1, 2, 8); - simd_address_at(tmp6, Rd_VPR64.4H, 3, 2, 8); - * [register]:2 tmp6 = * [register]:2 tmp5; + Rd_VPR64.4H[16,16] = TMPD2[0,16]; + Rd_VPR64.4H[48,16] = TMPD2[16,16]; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = Rm_VPR64.4H; - local tmp2:8 = Rn_VPR64.4H; - local tmp3:2 = 0; - local tmpd:8 = Rd_VPR64.4H; - tmp3 = SIMD_PIECE(tmp2, 0:1); tmpd = SIMD_COPY(tmpd, tmp3, 0:1); - tmp3 = SIMD_PIECE(tmp2, 1:1); tmpd = SIMD_COPY(tmpd, tmp3, 2:1); - local tmp4:2 = 0; - tmp4 = SIMD_PIECE(tmp1, 0:1); tmpd = SIMD_COPY(tmpd, tmp4, 1:1); - tmp4 = SIMD_PIECE(tmp1, 1:1); tmpd = SIMD_COPY(tmpd, tmp4, 3:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.4H -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.4H = NEON_zip1(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); -@endif } # C7.2.403 ZIP1 page C7-2304 line 129621 MATCH x0e003800/mask=xbf20fc00 @@ -55039,42 +29385,15 @@ is b_3131=0 & q=0 & b_2429=0xe & advSIMD3.size=1 & b_2121=0 & Rm_VPR64.4H & b_15 :zip1 Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & b_2429=0xe & advSIMD3.size=2 & b_2121=0 & Rm_VPR128.4S & b_1515=0 & b_1214=3 & b_1011=2 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) TMPQ2 = Rm_VPR128.4S; TMPQ1 = Rn_VPR128.4S; # simd shuffle Rd_VPR128.4S = TMPQ1 (@0-0@1-2) lane size 4 - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp3, TMPQ1, 0, 4, 16); - simd_address_at(tmp4, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp4 = * [register]:4 tmp3; - simd_address_at(tmp3, TMPQ1, 1, 4, 16); - simd_address_at(tmp4, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp4 = * [register]:4 tmp3; + Rd_VPR128.4S[0,32] = TMPQ1[0,32]; + Rd_VPR128.4S[64,32] = TMPQ1[32,32]; # simd shuffle Rd_VPR128.4S = TMPQ2 (@0-1@1-3) lane size 4 - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, TMPQ2, 0, 4, 16); - simd_address_at(tmp6, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp6 = * [register]:4 tmp5; - simd_address_at(tmp5, TMPQ2, 1, 4, 16); - simd_address_at(tmp6, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp6 = * [register]:4 tmp5; + Rd_VPR128.4S[32,32] = TMPQ2[0,32]; + Rd_VPR128.4S[96,32] = TMPQ2[32,32]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = Rm_VPR128.4S; - local tmp2:16 = Rn_VPR128.4S; - local tmp3:4 = 0; - local tmpd:16 = Rd_VPR128.4S; - tmp3 = SIMD_PIECE(tmp2, 0:1); tmpd = SIMD_COPY(tmpd, tmp3, 0:1); - tmp3 = SIMD_PIECE(tmp2, 1:1); tmpd = SIMD_COPY(tmpd, tmp3, 2:1); - local tmp4:4 = 0; - tmp4 = SIMD_PIECE(tmp1, 0:1); tmpd = SIMD_COPY(tmpd, tmp4, 1:1); - tmp4 = SIMD_PIECE(tmp1, 1:1); tmpd = SIMD_COPY(tmpd, tmp4, 3:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_zip1(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); -@endif } # C7.2.403 ZIP1 page C7-2304 line 129621 MATCH x0e003800/mask=xbf20fc00 @@ -55086,58 +29405,19 @@ is b_3131=0 & q=1 & b_2429=0xe & advSIMD3.size=2 & b_2121=0 & Rm_VPR128.4S & b_1 :zip1 Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & b_2429=0xe & advSIMD3.size=0 & b_2121=0 & Rm_VPR64.8B & b_1515=0 & b_1214=3 & b_1011=2 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { -@if defined(SEMANTIC_primitive) TMPD2 = Rm_VPR64.8B; TMPD1 = Rn_VPR64.8B; # simd shuffle Rd_VPR64.8B = TMPD1 (@0-0@1-2@2-4@3-6) lane size 1 - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp3, TMPD1, 0, 1, 8); - simd_address_at(tmp4, Rd_VPR64.8B, 0, 1, 8); - * [register]:1 tmp4 = * [register]:1 tmp3; - simd_address_at(tmp3, TMPD1, 1, 1, 8); - simd_address_at(tmp4, Rd_VPR64.8B, 2, 1, 8); - * [register]:1 tmp4 = * [register]:1 tmp3; - simd_address_at(tmp3, TMPD1, 2, 1, 8); - simd_address_at(tmp4, Rd_VPR64.8B, 4, 1, 8); - * [register]:1 tmp4 = * [register]:1 tmp3; - simd_address_at(tmp3, TMPD1, 3, 1, 8); - simd_address_at(tmp4, Rd_VPR64.8B, 6, 1, 8); - * [register]:1 tmp4 = * [register]:1 tmp3; + Rd_VPR64.8B[0,8] = TMPD1[0,8]; + Rd_VPR64.8B[16,8] = TMPD1[8,8]; + Rd_VPR64.8B[32,8] = TMPD1[16,8]; + Rd_VPR64.8B[48,8] = TMPD1[24,8]; # simd shuffle Rd_VPR64.8B = TMPD2 (@0-1@1-3@2-5@3-7) lane size 1 - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, TMPD2, 0, 1, 8); - simd_address_at(tmp6, Rd_VPR64.8B, 1, 1, 8); - * [register]:1 tmp6 = * [register]:1 tmp5; - simd_address_at(tmp5, TMPD2, 1, 1, 8); - simd_address_at(tmp6, Rd_VPR64.8B, 3, 1, 8); - * [register]:1 tmp6 = * [register]:1 tmp5; - simd_address_at(tmp5, TMPD2, 2, 1, 8); - simd_address_at(tmp6, Rd_VPR64.8B, 5, 1, 8); - * [register]:1 tmp6 = * [register]:1 tmp5; - simd_address_at(tmp5, TMPD2, 3, 1, 8); - simd_address_at(tmp6, Rd_VPR64.8B, 7, 1, 8); - * [register]:1 tmp6 = * [register]:1 tmp5; + Rd_VPR64.8B[8,8] = TMPD2[0,8]; + Rd_VPR64.8B[24,8] = TMPD2[8,8]; + Rd_VPR64.8B[40,8] = TMPD2[16,8]; + Rd_VPR64.8B[56,8] = TMPD2[24,8]; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = Rm_VPR64.8B; - local tmp2:8 = Rn_VPR64.8B; - local tmp3:1 = 0; - local tmpd:8 = Rd_VPR64.8B; - tmp3 = SIMD_PIECE(tmp2, 0:1); tmpd = SIMD_COPY(tmpd, tmp3, 0:1); - tmp3 = SIMD_PIECE(tmp2, 1:1); tmpd = SIMD_COPY(tmpd, tmp3, 2:1); - tmp3 = SIMD_PIECE(tmp2, 2:1); tmpd = SIMD_COPY(tmpd, tmp3, 4:1); - tmp3 = SIMD_PIECE(tmp2, 3:1); tmpd = SIMD_COPY(tmpd, tmp3, 6:1); - local tmp4:1 = 0; - tmp4 = SIMD_PIECE(tmp1, 0:1); tmpd = SIMD_COPY(tmpd, tmp4, 1:1); - tmp4 = SIMD_PIECE(tmp1, 1:1); tmpd = SIMD_COPY(tmpd, tmp4, 3:1); - tmp4 = SIMD_PIECE(tmp1, 2:1); tmpd = SIMD_COPY(tmpd, tmp4, 5:1); - tmp4 = SIMD_PIECE(tmp1, 3:1); tmpd = SIMD_COPY(tmpd, tmp4, 7:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.8B -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.8B = NEON_zip1(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); -@endif } # C7.2.403 ZIP1 page C7-2304 line 129621 MATCH x0e003800/mask=xbf20fc00 @@ -55149,58 +29429,19 @@ is b_3131=0 & q=0 & b_2429=0xe & advSIMD3.size=0 & b_2121=0 & Rm_VPR64.8B & b_15 :zip1 Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & b_2429=0xe & advSIMD3.size=1 & b_2121=0 & Rm_VPR128.8H & b_1515=0 & b_1214=3 & b_1011=2 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_primitive) TMPQ2 = Rm_VPR128.8H; TMPQ1 = Rn_VPR128.8H; # simd shuffle Rd_VPR128.8H = TMPQ1 (@0-0@1-2@2-4@3-6) lane size 2 - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp3, TMPQ1, 0, 2, 16); - simd_address_at(tmp4, Rd_VPR128.8H, 0, 2, 16); - * [register]:2 tmp4 = * [register]:2 tmp3; - simd_address_at(tmp3, TMPQ1, 1, 2, 16); - simd_address_at(tmp4, Rd_VPR128.8H, 2, 2, 16); - * [register]:2 tmp4 = * [register]:2 tmp3; - simd_address_at(tmp3, TMPQ1, 2, 2, 16); - simd_address_at(tmp4, Rd_VPR128.8H, 4, 2, 16); - * [register]:2 tmp4 = * [register]:2 tmp3; - simd_address_at(tmp3, TMPQ1, 3, 2, 16); - simd_address_at(tmp4, Rd_VPR128.8H, 6, 2, 16); - * [register]:2 tmp4 = * [register]:2 tmp3; + Rd_VPR128.8H[0,16] = TMPQ1[0,16]; + Rd_VPR128.8H[32,16] = TMPQ1[16,16]; + Rd_VPR128.8H[64,16] = TMPQ1[32,16]; + Rd_VPR128.8H[96,16] = TMPQ1[48,16]; # simd shuffle Rd_VPR128.8H = TMPQ2 (@0-1@1-3@2-5@3-7) lane size 2 - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, TMPQ2, 0, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 1, 2, 16); - * [register]:2 tmp6 = * [register]:2 tmp5; - simd_address_at(tmp5, TMPQ2, 1, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 3, 2, 16); - * [register]:2 tmp6 = * [register]:2 tmp5; - simd_address_at(tmp5, TMPQ2, 2, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 5, 2, 16); - * [register]:2 tmp6 = * [register]:2 tmp5; - simd_address_at(tmp5, TMPQ2, 3, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 7, 2, 16); - * [register]:2 tmp6 = * [register]:2 tmp5; + Rd_VPR128.8H[16,16] = TMPQ2[0,16]; + Rd_VPR128.8H[48,16] = TMPQ2[16,16]; + Rd_VPR128.8H[80,16] = TMPQ2[32,16]; + Rd_VPR128.8H[112,16] = TMPQ2[48,16]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = Rm_VPR128.8H; - local tmp2:16 = Rn_VPR128.8H; - local tmp3:2 = 0; - local tmpd:16 = Rd_VPR128.8H; - tmp3 = SIMD_PIECE(tmp2, 0:1); tmpd = SIMD_COPY(tmpd, tmp3, 0:1); - tmp3 = SIMD_PIECE(tmp2, 1:1); tmpd = SIMD_COPY(tmpd, tmp3, 2:1); - tmp3 = SIMD_PIECE(tmp2, 2:1); tmpd = SIMD_COPY(tmpd, tmp3, 4:1); - tmp3 = SIMD_PIECE(tmp2, 3:1); tmpd = SIMD_COPY(tmpd, tmp3, 6:1); - local tmp4:2 = 0; - tmp4 = SIMD_PIECE(tmp1, 0:1); tmpd = SIMD_COPY(tmpd, tmp4, 1:1); - tmp4 = SIMD_PIECE(tmp1, 1:1); tmpd = SIMD_COPY(tmpd, tmp4, 3:1); - tmp4 = SIMD_PIECE(tmp1, 2:1); tmpd = SIMD_COPY(tmpd, tmp4, 5:1); - tmp4 = SIMD_PIECE(tmp1, 3:1); tmpd = SIMD_COPY(tmpd, tmp4, 7:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.8H -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.8H = NEON_zip1(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); -@endif } # C7.2.404 ZIP2 page C7-2306 line 129735 MATCH x0e007800/mask=xbf20fc00 @@ -55212,90 +29453,27 @@ is b_3131=0 & q=1 & b_2429=0xe & advSIMD3.size=1 & b_2121=0 & Rm_VPR128.8H & b_1 :zip2 Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & b_2429=0xe & advSIMD3.size=0 & b_2121=0 & Rm_VPR128.16B & b_1515=0 & b_1214=7 & b_1011=2 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { -@if defined(SEMANTIC_primitive) TMPQ2 = Rm_VPR128.16B; TMPQ1 = Rn_VPR128.16B; # simd shuffle Rd_VPR128.16B = TMPQ1 (@8-0@9-2@10-4@11-6@12-8@13-10@14-12@15-14) lane size 1 - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp3, TMPQ1, 8, 1, 16); - simd_address_at(tmp4, Rd_VPR128.16B, 0, 1, 16); - * [register]:1 tmp4 = * [register]:1 tmp3; - simd_address_at(tmp3, TMPQ1, 9, 1, 16); - simd_address_at(tmp4, Rd_VPR128.16B, 2, 1, 16); - * [register]:1 tmp4 = * [register]:1 tmp3; - simd_address_at(tmp3, TMPQ1, 10, 1, 16); - simd_address_at(tmp4, Rd_VPR128.16B, 4, 1, 16); - * [register]:1 tmp4 = * [register]:1 tmp3; - simd_address_at(tmp3, TMPQ1, 11, 1, 16); - simd_address_at(tmp4, Rd_VPR128.16B, 6, 1, 16); - * [register]:1 tmp4 = * [register]:1 tmp3; - simd_address_at(tmp3, TMPQ1, 12, 1, 16); - simd_address_at(tmp4, Rd_VPR128.16B, 8, 1, 16); - * [register]:1 tmp4 = * [register]:1 tmp3; - simd_address_at(tmp3, TMPQ1, 13, 1, 16); - simd_address_at(tmp4, Rd_VPR128.16B, 10, 1, 16); - * [register]:1 tmp4 = * [register]:1 tmp3; - simd_address_at(tmp3, TMPQ1, 14, 1, 16); - simd_address_at(tmp4, Rd_VPR128.16B, 12, 1, 16); - * [register]:1 tmp4 = * [register]:1 tmp3; - simd_address_at(tmp3, TMPQ1, 15, 1, 16); - simd_address_at(tmp4, Rd_VPR128.16B, 14, 1, 16); - * [register]:1 tmp4 = * [register]:1 tmp3; + Rd_VPR128.16B[0,8] = TMPQ1[64,8]; + Rd_VPR128.16B[16,8] = TMPQ1[72,8]; + Rd_VPR128.16B[32,8] = TMPQ1[80,8]; + Rd_VPR128.16B[48,8] = TMPQ1[88,8]; + Rd_VPR128.16B[64,8] = TMPQ1[96,8]; + Rd_VPR128.16B[80,8] = TMPQ1[104,8]; + Rd_VPR128.16B[96,8] = TMPQ1[112,8]; + Rd_VPR128.16B[112,8] = TMPQ1[120,8]; # simd shuffle Rd_VPR128.16B = TMPQ2 (@8-1@9-3@10-5@11-7@12-9@13-11@14-13@15-15) lane size 1 - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, TMPQ2, 8, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 1, 1, 16); - * [register]:1 tmp6 = * [register]:1 tmp5; - simd_address_at(tmp5, TMPQ2, 9, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 3, 1, 16); - * [register]:1 tmp6 = * [register]:1 tmp5; - simd_address_at(tmp5, TMPQ2, 10, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 5, 1, 16); - * [register]:1 tmp6 = * [register]:1 tmp5; - simd_address_at(tmp5, TMPQ2, 11, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 7, 1, 16); - * [register]:1 tmp6 = * [register]:1 tmp5; - simd_address_at(tmp5, TMPQ2, 12, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 9, 1, 16); - * [register]:1 tmp6 = * [register]:1 tmp5; - simd_address_at(tmp5, TMPQ2, 13, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 11, 1, 16); - * [register]:1 tmp6 = * [register]:1 tmp5; - simd_address_at(tmp5, TMPQ2, 14, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 13, 1, 16); - * [register]:1 tmp6 = * [register]:1 tmp5; - simd_address_at(tmp5, TMPQ2, 15, 1, 16); - simd_address_at(tmp6, Rd_VPR128.16B, 15, 1, 16); - * [register]:1 tmp6 = * [register]:1 tmp5; + Rd_VPR128.16B[8,8] = TMPQ2[64,8]; + Rd_VPR128.16B[24,8] = TMPQ2[72,8]; + Rd_VPR128.16B[40,8] = TMPQ2[80,8]; + Rd_VPR128.16B[56,8] = TMPQ2[88,8]; + Rd_VPR128.16B[72,8] = TMPQ2[96,8]; + Rd_VPR128.16B[88,8] = TMPQ2[104,8]; + Rd_VPR128.16B[104,8] = TMPQ2[112,8]; + Rd_VPR128.16B[120,8] = TMPQ2[120,8]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = Rm_VPR128.16B; - local tmp2:16 = Rn_VPR128.16B; - local tmp3:1 = 0; - local tmpd:16 = Rd_VPR128.16B; - tmp3 = SIMD_PIECE(tmp2, 8:1); tmpd = SIMD_COPY(tmpd, tmp3, 0:1); - tmp3 = SIMD_PIECE(tmp2, 9:1); tmpd = SIMD_COPY(tmpd, tmp3, 2:1); - tmp3 = SIMD_PIECE(tmp2, 10:1); tmpd = SIMD_COPY(tmpd, tmp3, 4:1); - tmp3 = SIMD_PIECE(tmp2, 11:1); tmpd = SIMD_COPY(tmpd, tmp3, 6:1); - tmp3 = SIMD_PIECE(tmp2, 12:1); tmpd = SIMD_COPY(tmpd, tmp3, 8:1); - tmp3 = SIMD_PIECE(tmp2, 13:1); tmpd = SIMD_COPY(tmpd, tmp3, 10:1); - tmp3 = SIMD_PIECE(tmp2, 14:1); tmpd = SIMD_COPY(tmpd, tmp3, 12:1); - tmp3 = SIMD_PIECE(tmp2, 15:1); tmpd = SIMD_COPY(tmpd, tmp3, 14:1); - local tmp4:1 = 0; - tmp4 = SIMD_PIECE(tmp1, 8:1); tmpd = SIMD_COPY(tmpd, tmp4, 1:1); - tmp4 = SIMD_PIECE(tmp1, 9:1); tmpd = SIMD_COPY(tmpd, tmp4, 3:1); - tmp4 = SIMD_PIECE(tmp1, 10:1); tmpd = SIMD_COPY(tmpd, tmp4, 5:1); - tmp4 = SIMD_PIECE(tmp1, 11:1); tmpd = SIMD_COPY(tmpd, tmp4, 7:1); - tmp4 = SIMD_PIECE(tmp1, 12:1); tmpd = SIMD_COPY(tmpd, tmp4, 9:1); - tmp4 = SIMD_PIECE(tmp1, 13:1); tmpd = SIMD_COPY(tmpd, tmp4, 11:1); - tmp4 = SIMD_PIECE(tmp1, 14:1); tmpd = SIMD_COPY(tmpd, tmp4, 13:1); - tmp4 = SIMD_PIECE(tmp1, 15:1); tmpd = SIMD_COPY(tmpd, tmp4, 15:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.16B -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.16B = NEON_zip2(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); -@endif } # C7.2.404 ZIP2 page C7-2306 line 129735 MATCH x0e007800/mask=xbf20fc00 @@ -55307,34 +29485,13 @@ is b_3131=0 & q=1 & b_2429=0xe & advSIMD3.size=0 & b_2121=0 & Rm_VPR128.16B & b_ :zip2 Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=1 & b_2429=0xe & advSIMD3.size=3 & b_2121=0 & Rm_VPR128.2D & b_1515=0 & b_1214=7 & b_1011=2 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { -@if defined(SEMANTIC_primitive) TMPQ2 = Rm_VPR128.2D; TMPQ1 = Rn_VPR128.2D; # simd shuffle Rd_VPR128.2D = TMPQ1 (@1-0) lane size 8 - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp3, TMPQ1, 1, 8, 16); - simd_address_at(tmp4, Rd_VPR128.2D, 0, 8, 16); - * [register]:8 tmp4 = * [register]:8 tmp3; + Rd_VPR128.2D[0,64] = TMPQ1[64,64]; # simd shuffle Rd_VPR128.2D = TMPQ2 (@1-1) lane size 8 - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, TMPQ2, 1, 8, 16); - simd_address_at(tmp6, Rd_VPR128.2D, 1, 8, 16); - * [register]:8 tmp6 = * [register]:8 tmp5; + Rd_VPR128.2D[64,64] = TMPQ2[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = Rm_VPR128.2D; - local tmp2:16 = Rn_VPR128.2D; - local tmp3:8 = 0; - local tmpd:16 = Rd_VPR128.2D; - tmp3 = SIMD_PIECE(tmp2, 1:1); tmpd = SIMD_COPY(tmpd, tmp3, 0:1); - local tmp4:8 = 0; - tmp4 = SIMD_PIECE(tmp1, 1:1); tmpd = SIMD_COPY(tmpd, tmp4, 1:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.2D -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.2D = NEON_zip2(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); -@endif } # C7.2.404 ZIP2 page C7-2306 line 129735 MATCH x0e007800/mask=xbf20fc00 @@ -55346,34 +29503,13 @@ is b_3131=0 & q=1 & b_2429=0xe & advSIMD3.size=3 & b_2121=0 & Rm_VPR128.2D & b_1 :zip2 Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & b_2429=0xe & advSIMD3.size=2 & b_2121=0 & Rm_VPR64.2S & b_1515=0 & b_1214=7 & b_1011=2 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { -@if defined(SEMANTIC_primitive) TMPD2 = Rm_VPR64.2S; TMPD1 = Rn_VPR64.2S; # simd shuffle Rd_VPR64.2S = TMPD1 (@1-0) lane size 4 - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp3, TMPD1, 1, 4, 8); - simd_address_at(tmp4, Rd_VPR64.2S, 0, 4, 8); - * [register]:4 tmp4 = * [register]:4 tmp3; + Rd_VPR64.2S[0,32] = TMPD1[32,32]; # simd shuffle Rd_VPR64.2S = TMPD2 (@1-1) lane size 4 - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, TMPD2, 1, 4, 8); - simd_address_at(tmp6, Rd_VPR64.2S, 1, 4, 8); - * [register]:4 tmp6 = * [register]:4 tmp5; + Rd_VPR64.2S[32,32] = TMPD2[32,32]; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = Rm_VPR64.2S; - local tmp2:8 = Rn_VPR64.2S; - local tmp3:4 = 0; - local tmpd:8 = Rd_VPR64.2S; - tmp3 = SIMD_PIECE(tmp2, 1:1); tmpd = SIMD_COPY(tmpd, tmp3, 0:1); - local tmp4:4 = 0; - tmp4 = SIMD_PIECE(tmp1, 1:1); tmpd = SIMD_COPY(tmpd, tmp4, 1:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.2S -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.2S = NEON_zip2(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); -@endif } # C7.2.404 ZIP2 page C7-2306 line 129735 MATCH x0e007800/mask=xbf20fc00 @@ -55385,42 +29521,15 @@ is b_3131=0 & q=0 & b_2429=0xe & advSIMD3.size=2 & b_2121=0 & Rm_VPR64.2S & b_15 :zip2 Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & b_2429=0xe & advSIMD3.size=1 & b_2121=0 & Rm_VPR64.4H & b_1515=0 & b_1214=7 & b_1011=2 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { -@if defined(SEMANTIC_primitive) TMPD2 = Rm_VPR64.4H; TMPD1 = Rn_VPR64.4H; # simd shuffle Rd_VPR64.4H = TMPD1 (@2-0@3-2) lane size 2 - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp3, TMPD1, 2, 2, 8); - simd_address_at(tmp4, Rd_VPR64.4H, 0, 2, 8); - * [register]:2 tmp4 = * [register]:2 tmp3; - simd_address_at(tmp3, TMPD1, 3, 2, 8); - simd_address_at(tmp4, Rd_VPR64.4H, 2, 2, 8); - * [register]:2 tmp4 = * [register]:2 tmp3; + Rd_VPR64.4H[0,16] = TMPD1[32,16]; + Rd_VPR64.4H[32,16] = TMPD1[48,16]; # simd shuffle Rd_VPR64.4H = TMPD2 (@2-1@3-3) lane size 2 - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, TMPD2, 2, 2, 8); - simd_address_at(tmp6, Rd_VPR64.4H, 1, 2, 8); - * [register]:2 tmp6 = * [register]:2 tmp5; - simd_address_at(tmp5, TMPD2, 3, 2, 8); - simd_address_at(tmp6, Rd_VPR64.4H, 3, 2, 8); - * [register]:2 tmp6 = * [register]:2 tmp5; + Rd_VPR64.4H[16,16] = TMPD2[32,16]; + Rd_VPR64.4H[48,16] = TMPD2[48,16]; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = Rm_VPR64.4H; - local tmp2:8 = Rn_VPR64.4H; - local tmp3:2 = 0; - local tmpd:8 = Rd_VPR64.4H; - tmp3 = SIMD_PIECE(tmp2, 2:1); tmpd = SIMD_COPY(tmpd, tmp3, 0:1); - tmp3 = SIMD_PIECE(tmp2, 3:1); tmpd = SIMD_COPY(tmpd, tmp3, 2:1); - local tmp4:2 = 0; - tmp4 = SIMD_PIECE(tmp1, 2:1); tmpd = SIMD_COPY(tmpd, tmp4, 1:1); - tmp4 = SIMD_PIECE(tmp1, 3:1); tmpd = SIMD_COPY(tmpd, tmp4, 3:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.4H -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.4H = NEON_zip2(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); -@endif } # C7.2.404 ZIP2 page C7-2306 line 129735 MATCH x0e007800/mask=xbf20fc00 @@ -55432,42 +29541,15 @@ is b_3131=0 & q=0 & b_2429=0xe & advSIMD3.size=1 & b_2121=0 & Rm_VPR64.4H & b_15 :zip2 Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & b_2429=0xe & advSIMD3.size=2 & b_2121=0 & Rm_VPR128.4S & b_1515=0 & b_1214=7 & b_1011=2 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_primitive) TMPQ2 = Rm_VPR128.4S; TMPQ1 = Rn_VPR128.4S; # simd shuffle Rd_VPR128.4S = TMPQ1 (@2-0@3-2) lane size 4 - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp3, TMPQ1, 2, 4, 16); - simd_address_at(tmp4, Rd_VPR128.4S, 0, 4, 16); - * [register]:4 tmp4 = * [register]:4 tmp3; - simd_address_at(tmp3, TMPQ1, 3, 4, 16); - simd_address_at(tmp4, Rd_VPR128.4S, 2, 4, 16); - * [register]:4 tmp4 = * [register]:4 tmp3; + Rd_VPR128.4S[0,32] = TMPQ1[64,32]; + Rd_VPR128.4S[64,32] = TMPQ1[96,32]; # simd shuffle Rd_VPR128.4S = TMPQ2 (@2-1@3-3) lane size 4 - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, TMPQ2, 2, 4, 16); - simd_address_at(tmp6, Rd_VPR128.4S, 1, 4, 16); - * [register]:4 tmp6 = * [register]:4 tmp5; - simd_address_at(tmp5, TMPQ2, 3, 4, 16); - simd_address_at(tmp6, Rd_VPR128.4S, 3, 4, 16); - * [register]:4 tmp6 = * [register]:4 tmp5; + Rd_VPR128.4S[32,32] = TMPQ2[64,32]; + Rd_VPR128.4S[96,32] = TMPQ2[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = Rm_VPR128.4S; - local tmp2:16 = Rn_VPR128.4S; - local tmp3:4 = 0; - local tmpd:16 = Rd_VPR128.4S; - tmp3 = SIMD_PIECE(tmp2, 2:1); tmpd = SIMD_COPY(tmpd, tmp3, 0:1); - tmp3 = SIMD_PIECE(tmp2, 3:1); tmpd = SIMD_COPY(tmpd, tmp3, 2:1); - local tmp4:4 = 0; - tmp4 = SIMD_PIECE(tmp1, 2:1); tmpd = SIMD_COPY(tmpd, tmp4, 1:1); - tmp4 = SIMD_PIECE(tmp1, 3:1); tmpd = SIMD_COPY(tmpd, tmp4, 3:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.4S -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.4S = NEON_zip2(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); -@endif } # C7.2.404 ZIP2 page C7-2306 line 129735 MATCH x0e007800/mask=xbf20fc00 @@ -55479,58 +29561,19 @@ is b_3131=0 & q=1 & b_2429=0xe & advSIMD3.size=2 & b_2121=0 & Rm_VPR128.4S & b_1 :zip2 Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & b_2429=0xe & advSIMD3.size=0 & b_2121=0 & Rm_VPR64.8B & b_1515=0 & b_1214=7 & b_1011=2 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { -@if defined(SEMANTIC_primitive) TMPD2 = Rm_VPR64.8B; TMPD1 = Rn_VPR64.8B; # simd shuffle Rd_VPR64.8B = TMPD1 (@4-0@5-2@6-4@7-6) lane size 1 - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp3, TMPD1, 4, 1, 8); - simd_address_at(tmp4, Rd_VPR64.8B, 0, 1, 8); - * [register]:1 tmp4 = * [register]:1 tmp3; - simd_address_at(tmp3, TMPD1, 5, 1, 8); - simd_address_at(tmp4, Rd_VPR64.8B, 2, 1, 8); - * [register]:1 tmp4 = * [register]:1 tmp3; - simd_address_at(tmp3, TMPD1, 6, 1, 8); - simd_address_at(tmp4, Rd_VPR64.8B, 4, 1, 8); - * [register]:1 tmp4 = * [register]:1 tmp3; - simd_address_at(tmp3, TMPD1, 7, 1, 8); - simd_address_at(tmp4, Rd_VPR64.8B, 6, 1, 8); - * [register]:1 tmp4 = * [register]:1 tmp3; + Rd_VPR64.8B[0,8] = TMPD1[32,8]; + Rd_VPR64.8B[16,8] = TMPD1[40,8]; + Rd_VPR64.8B[32,8] = TMPD1[48,8]; + Rd_VPR64.8B[48,8] = TMPD1[56,8]; # simd shuffle Rd_VPR64.8B = TMPD2 (@4-1@5-3@6-5@7-7) lane size 1 - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, TMPD2, 4, 1, 8); - simd_address_at(tmp6, Rd_VPR64.8B, 1, 1, 8); - * [register]:1 tmp6 = * [register]:1 tmp5; - simd_address_at(tmp5, TMPD2, 5, 1, 8); - simd_address_at(tmp6, Rd_VPR64.8B, 3, 1, 8); - * [register]:1 tmp6 = * [register]:1 tmp5; - simd_address_at(tmp5, TMPD2, 6, 1, 8); - simd_address_at(tmp6, Rd_VPR64.8B, 5, 1, 8); - * [register]:1 tmp6 = * [register]:1 tmp5; - simd_address_at(tmp5, TMPD2, 7, 1, 8); - simd_address_at(tmp6, Rd_VPR64.8B, 7, 1, 8); - * [register]:1 tmp6 = * [register]:1 tmp5; + Rd_VPR64.8B[8,8] = TMPD2[32,8]; + Rd_VPR64.8B[24,8] = TMPD2[40,8]; + Rd_VPR64.8B[40,8] = TMPD2[48,8]; + Rd_VPR64.8B[56,8] = TMPD2[56,8]; zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:8 = Rm_VPR64.8B; - local tmp2:8 = Rn_VPR64.8B; - local tmp3:1 = 0; - local tmpd:8 = Rd_VPR64.8B; - tmp3 = SIMD_PIECE(tmp2, 4:1); tmpd = SIMD_COPY(tmpd, tmp3, 0:1); - tmp3 = SIMD_PIECE(tmp2, 5:1); tmpd = SIMD_COPY(tmpd, tmp3, 2:1); - tmp3 = SIMD_PIECE(tmp2, 6:1); tmpd = SIMD_COPY(tmpd, tmp3, 4:1); - tmp3 = SIMD_PIECE(tmp2, 7:1); tmpd = SIMD_COPY(tmpd, tmp3, 6:1); - local tmp4:1 = 0; - tmp4 = SIMD_PIECE(tmp1, 4:1); tmpd = SIMD_COPY(tmpd, tmp4, 1:1); - tmp4 = SIMD_PIECE(tmp1, 5:1); tmpd = SIMD_COPY(tmpd, tmp4, 3:1); - tmp4 = SIMD_PIECE(tmp1, 6:1); tmpd = SIMD_COPY(tmpd, tmp4, 5:1); - tmp4 = SIMD_PIECE(tmp1, 7:1); tmpd = SIMD_COPY(tmpd, tmp4, 7:1); - Zd = zext(tmpd); # assigning to Rd_VPR64.8B -@elif defined(SEMANTIC_pseudo) - Rd_VPR64.8B = NEON_zip2(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); -@endif } # C7.2.404 ZIP2 page C7-2306 line 129735 MATCH x0e007800/mask=xbf20fc00 @@ -55542,58 +29585,19 @@ is b_3131=0 & q=0 & b_2429=0xe & advSIMD3.size=0 & b_2121=0 & Rm_VPR64.8B & b_15 :zip2 Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & b_2429=0xe & advSIMD3.size=1 & b_2121=0 & Rm_VPR128.8H & b_1515=0 & b_1214=7 & b_1011=2 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { -@if defined(SEMANTIC_primitive) TMPQ2 = Rm_VPR128.8H; TMPQ1 = Rn_VPR128.8H; # simd shuffle Rd_VPR128.8H = TMPQ1 (@4-0@5-2@6-4@7-6) lane size 2 - local tmp3:4 = 0; - local tmp4:4 = 0; - simd_address_at(tmp3, TMPQ1, 4, 2, 16); - simd_address_at(tmp4, Rd_VPR128.8H, 0, 2, 16); - * [register]:2 tmp4 = * [register]:2 tmp3; - simd_address_at(tmp3, TMPQ1, 5, 2, 16); - simd_address_at(tmp4, Rd_VPR128.8H, 2, 2, 16); - * [register]:2 tmp4 = * [register]:2 tmp3; - simd_address_at(tmp3, TMPQ1, 6, 2, 16); - simd_address_at(tmp4, Rd_VPR128.8H, 4, 2, 16); - * [register]:2 tmp4 = * [register]:2 tmp3; - simd_address_at(tmp3, TMPQ1, 7, 2, 16); - simd_address_at(tmp4, Rd_VPR128.8H, 6, 2, 16); - * [register]:2 tmp4 = * [register]:2 tmp3; + Rd_VPR128.8H[0,16] = TMPQ1[64,16]; + Rd_VPR128.8H[32,16] = TMPQ1[80,16]; + Rd_VPR128.8H[64,16] = TMPQ1[96,16]; + Rd_VPR128.8H[96,16] = TMPQ1[112,16]; # simd shuffle Rd_VPR128.8H = TMPQ2 (@4-1@5-3@6-5@7-7) lane size 2 - local tmp5:4 = 0; - local tmp6:4 = 0; - simd_address_at(tmp5, TMPQ2, 4, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 1, 2, 16); - * [register]:2 tmp6 = * [register]:2 tmp5; - simd_address_at(tmp5, TMPQ2, 5, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 3, 2, 16); - * [register]:2 tmp6 = * [register]:2 tmp5; - simd_address_at(tmp5, TMPQ2, 6, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 5, 2, 16); - * [register]:2 tmp6 = * [register]:2 tmp5; - simd_address_at(tmp5, TMPQ2, 7, 2, 16); - simd_address_at(tmp6, Rd_VPR128.8H, 7, 2, 16); - * [register]:2 tmp6 = * [register]:2 tmp5; + Rd_VPR128.8H[16,16] = TMPQ2[64,16]; + Rd_VPR128.8H[48,16] = TMPQ2[80,16]; + Rd_VPR128.8H[80,16] = TMPQ2[96,16]; + Rd_VPR128.8H[112,16] = TMPQ2[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd -@elif defined(SEMANTIC_pcode) - local tmp1:16 = Rm_VPR128.8H; - local tmp2:16 = Rn_VPR128.8H; - local tmp3:2 = 0; - local tmpd:16 = Rd_VPR128.8H; - tmp3 = SIMD_PIECE(tmp2, 4:1); tmpd = SIMD_COPY(tmpd, tmp3, 0:1); - tmp3 = SIMD_PIECE(tmp2, 5:1); tmpd = SIMD_COPY(tmpd, tmp3, 2:1); - tmp3 = SIMD_PIECE(tmp2, 6:1); tmpd = SIMD_COPY(tmpd, tmp3, 4:1); - tmp3 = SIMD_PIECE(tmp2, 7:1); tmpd = SIMD_COPY(tmpd, tmp3, 6:1); - local tmp4:2 = 0; - tmp4 = SIMD_PIECE(tmp1, 4:1); tmpd = SIMD_COPY(tmpd, tmp4, 1:1); - tmp4 = SIMD_PIECE(tmp1, 5:1); tmpd = SIMD_COPY(tmpd, tmp4, 3:1); - tmp4 = SIMD_PIECE(tmp1, 6:1); tmpd = SIMD_COPY(tmpd, tmp4, 5:1); - tmp4 = SIMD_PIECE(tmp1, 7:1); tmpd = SIMD_COPY(tmpd, tmp4, 7:1); - Zd = zext(tmpd); # assigning to Rd_VPR128.8H -@elif defined(SEMANTIC_pseudo) - Rd_VPR128.8H = NEON_zip2(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); -@endif } @@ -55607,15 +29611,8 @@ is b_3131=0 & q=1 & b_2429=0xe & advSIMD3.size=1 & b_2121=0 & Rm_VPR128.8H & b_1 :bfcvt Rd_FPR16, Rn_FPR32 is b_1031=0b0001111001100011010000 & Rd_FPR16 & Rn_FPR32 & Zd { -@if defined(SEMANTIC_primitive) Rd_FPR16 = float2float(Rn_FPR32); - zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pcode) - Rd_FPR16 = float2float(Rn_FPR32); - zext_zd(Zd); # zero upper 24 bytes of Zd -@elif defined(SEMANTIC_pseudo) - Rd_FPR16 = NEON_bfcvt(Rn_FPR32); -@endif + zext_zh(Zd); # zero upper 30 bytes of Zd } # C7.2.14 BFCVTN, BFCVTN2 page C7-1418 line 78518 MATCH x0ea16800/mask=xbffffc00 @@ -55626,11 +29623,15 @@ is b_1031=0b0001111001100011010000 & Rd_FPR16 & Rn_FPR32 & Zd # b_0031=0.00111010100001011010.......... :bfcvtn Rd_VPR128.4S, Rn_VPR128.4H -is b_3131=0b0 & Q=0 & b_1029=0b00111010100001011010 & Rn_VPR128.4H & Rd_VPR128.4S +is b_3131=0b0 & Q=0 & b_1029=0b00111010100001011010 & Rn_VPR128.4H & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) - Rd_VPR128.4S = NEON_bfcvtn(Rn_VPR128.4H); -@endif + TMPQ1 = Rn_VPR128.4H; + # simd resize Rd_VPR128.4S = float2float(TMPQ1) (lane size 4 to 4) + Rd_VPR128.4S[0,32] = float2float(TMPQ1[0,32]); + Rd_VPR128.4S[32,32] = float2float(TMPQ1[32,32]); + Rd_VPR128.4S[64,32] = float2float(TMPQ1[64,32]); + Rd_VPR128.4S[96,32] = float2float(TMPQ1[96,32]); + zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.14 BFCVTN, BFCVTN2 page C7-1418 line 78518 MATCH x0ea16800/mask=xbffffc00 @@ -55640,11 +29641,15 @@ is b_3131=0b0 & Q=0 & b_1029=0b00111010100001011010 & Rn_VPR128.4H & Rd_VPR128.4 # SMACRO(pseudo) ARG1 ARG2 &=NEON_fcvtn/2@4 :bfcvtn2 Rd_VPR128.4S, Rn_VPR128.8H -is b_3131=0b0 & Q=1 & b_1029=0b00111010100001011010 & Rn_VPR128.8H & Rd_VPR128.4S +is b_3131=0b0 & Q=1 & b_1029=0b00111010100001011010 & Rn_VPR128.8H & Rd_VPR128.4S & Zd { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) - Rd_VPR128.4S = NEON_bfcvtn2(Rn_VPR128.8H); -@endif + TMPQ1 = Rn_VPR128.8H; + # simd resize Rd_VPR128.4S = float2float(TMPQ1) (lane size 4 to 4) + Rd_VPR128.4S[0,32] = float2float(TMPQ1[0,32]); + Rd_VPR128.4S[32,32] = float2float(TMPQ1[32,32]); + Rd_VPR128.4S[64,32] = float2float(TMPQ1[64,32]); + Rd_VPR128.4S[96,32] = float2float(TMPQ1[96,32]); + zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.15 BFDOT (by element) page C7-1420 line 78603 MATCH x0f40f000/mask=xbfc0f400 @@ -55655,9 +29660,7 @@ is b_3131=0b0 & Q=1 & b_1029=0b00111010100001011010 & Rn_VPR128.8H & Rd_VPR128.4 :bfdot Rd_VPR128.2S, Rn_VPR128.4H, , Re_VPR128.H.vIndexHL is b_3131=0b0 & Q=0 & b_2229=0b00111101 & Re_VPR128.H.vIndexHL & b_1215=0b1111 & b_1010=0b0 & Rn_VPR128.4H & Rd_VPR128.2S { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.2S = NEON_bfdot(Rn_VPR128.4H, Re_VPR128.H.vIndexHL); -@endif } # C7.2.15 BFDOT (by element) page C7-1420 line 78603 MATCH x0f40f000/mask=xbfc0f400 @@ -55666,9 +29669,7 @@ is b_3131=0b0 & Q=0 & b_2229=0b00111101 & Re_VPR128.H.vIndexHL & b_1215=0b1111 & :bfdot Rd_VPR128.4S, Rn_VPR128.8H, Re_VPR128.H.vIndexHL is b_3131=0b0 & Q=1 & b_2229=0b00111101 & Re_VPR128.H.vIndexHL & b_1215=0b1111 & b_1010=0b0 & Rn_VPR128.8H & Rd_VPR128.4S { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_bfdot(Rn_VPR128.8H, Re_VPR128.H.vIndexHL); -@endif } # C7.2.16 BFDOT (vector) page C7-1422 line 78694 MATCH x2e40fc00/mask=xbfe0fc00 @@ -55679,9 +29680,7 @@ is b_3131=0b0 & Q=1 & b_2229=0b00111101 & Re_VPR128.H.vIndexHL & b_1215=0b1111 & :bfdot Rd_VPR128.2S, Rn_VPR128.4H, Rm_VPR128.4H is b_3131=0b0 & Q=0 & b_2129=0b101110010 & Rm_VPR128.4H & b_1015=0b111111 & Rn_VPR128.4H & Rd_VPR128.2S { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.2S = NEON_bfdot(Rn_VPR128.4H, Rm_VPR128.4H); -@endif } # C7.2.16 BFDOT (vector) page C7-1422 line 78694 MATCH x2e40fc00/mask=xbfe0fc00 @@ -55690,9 +29689,7 @@ is b_3131=0b0 & Q=0 & b_2129=0b101110010 & Rm_VPR128.4H & b_1015=0b111111 & Rn_V :bfdot Rd_VPR128.4S, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0b0 & Q=1 & b_2129=0b101110010 & Rm_VPR128.8H & b_1015=0b111111 & Rn_VPR128.8H & Rd_VPR128.4S { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_bfdot(Rn_VPR128.8H, Rm_VPR128.8H); -@endif } # C7.2.17 BFMLALB, BFMLALT (by element) page C7-1424 line 78780 MATCH x0fc0f000/mask=xbfc0f400 @@ -55703,9 +29700,7 @@ is b_3131=0b0 & Q=1 & b_2129=0b101110010 & Rm_VPR128.8H & b_1015=0b111111 & Rn_V :bfmlalb Rd_VPR128.4S, Rn_VPR128.8H, Re_VPR128Lo.H.vIndexHLM is b_3131=0b0 & Q=0 & b_2229=0b00111111 & Re_VPR128Lo.H.vIndexHLM & b_1215=0b1111 & b_1010=0b0 & Rn_VPR128.8H & Rd_VPR128.4S { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_bfmlalb(Rd_VPR128.4S, Rn_VPR128.8H, Re_VPR128Lo.H.vIndexHLM); -@endif } # C7.2.17 BFMLALB, BFMLALT (by element) page C7-1424 line 78780 MATCH x0fc0f000/mask=xbfc0f400 @@ -55714,9 +29709,7 @@ is b_3131=0b0 & Q=0 & b_2229=0b00111111 & Re_VPR128Lo.H.vIndexHLM & b_1215=0b111 :bfmlalt Rd_VPR128.4S, Rn_VPR128.8H, Re_VPR128Lo.H.vIndexHLM is b_3131=0b0 & Q=1 & b_2229=0b00111111 & Re_VPR128Lo.H.vIndexHLM & b_1215=0b1111 & b_1010=0b0 & Rn_VPR128.8H & Rd_VPR128.4S { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_bfmlalt(Rd_VPR128.4S, Rn_VPR128.8H, Re_VPR128Lo.H.vIndexHLM); -@endif } # C7.2.18 BFMLALB, BFMLALT (vector) page C7-1426 line 78870 MATCH x2ec0fc00/mask=xbfe0fc00 @@ -55727,9 +29720,7 @@ is b_3131=0b0 & Q=1 & b_2229=0b00111111 & Re_VPR128Lo.H.vIndexHLM & b_1215=0b111 :bfmlalb Rd_VPR128.4S, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0b0 & Q=0 & b_2129=0b101110110 & Rm_VPR128.8H & b_1015=0b111111 & Rn_VPR128.8H & Rd_VPR128.4S { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_bfmlalb(Rd_VPR128.4S, Rn_VPR128.8H, Rm_VPR128.8H); -@endif } # C7.2.18 BFMLALB, BFMLALT (vector) page C7-1426 line 78870 MATCH x2ec0fc00/mask=xbfe0fc00 @@ -55738,9 +29729,7 @@ is b_3131=0b0 & Q=0 & b_2129=0b101110110 & Rm_VPR128.8H & b_1015=0b111111 & Rn_V :bfmlalt Rd_VPR128.4S, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0b0 & Q=1 & b_2129=0b101110110 & Rm_VPR128.8H & b_1015=0b111111 & Rn_VPR128.8H & Rd_VPR128.4S { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_bfmlalt(Rd_VPR128.4S, Rn_VPR128.8H, Rm_VPR128.8H); -@endif } # C7.2.19 BFMMLA page C7-1427 line 78943 MATCH x6e40ec00/mask=xffe0fc00 @@ -55751,9 +29740,7 @@ is b_3131=0b0 & Q=1 & b_2129=0b101110110 & Rm_VPR128.8H & b_1015=0b111111 & Rn_ :bfmmla Rd_VPR128.4S, Rn_VPR128.8H, Rm_VPR128.8H is b_2131=0b01101110010 & Rm_VPR128.8H & b_1015=0b111011 & Rn_VPR128.8H & Rd_VPR128.4S { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_bfmmla(Rd_VPR128.4S, Rn_VPR128.8H, Rm_VPR128.8H); -@endif } # C7.2.147 FRINT32X (vector) page C7-1726 line 96547 MATCH x2e21e800/mask=xbfbffc00 @@ -55836,9 +29823,7 @@ unimpl :smmla Rd_VPR128.4S, Rn_VPR128.16B, Rm_VPR128.16B is b_2131=0b01001110100 & Rm_VPR128.16B & b_1015=0b101001 & Rn_VPR128.16B & Rd_VPR128.4S { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_smmla(Rd_VPR128.4S, Rn_VPR128.16B, Rm_VPR128.16B); -@endif } # C7.2.336 SUDOT (by element) page C7-2163 line 121691 MATCH x0f00f000/mask=xbfc0f400 @@ -55849,9 +29834,7 @@ is b_2131=0b01001110100 & Rm_VPR128.16B & b_1015=0b101001 & Rn_VPR128.16B & Rd_ :sudot Rd_VPR128.2S, Rn_VPR128.8B, Re_VPR128.H.vIndexHL is b_3131=0b0 & Q=0 & b_2229=0b00111100 & Re_VPR128.H.vIndexHL & b_1215=0b1111 & b_1010=0b0 & Rn_VPR128.8B & Rd_VPR128.2S { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.2S = NEON_sudot(Rd_VPR128.2S, Rn_VPR128.8B, Re_VPR128.H.vIndexHL); -@endif } # C7.2.336 SUDOT (by element) page C7-2163 line 121691 MATCH x0f00f000/mask=xbfc0f400 @@ -55860,9 +29843,7 @@ is b_3131=0b0 & Q=0 & b_2229=0b00111100 & Re_VPR128.H.vIndexHL & b_1215=0b1111 & :sudot Rd_VPR128.4S, Rn_VPR128.16B, Re_VPR128.H.vIndexHL is b_3131=0b0 & Q=1 & b_2229=0b00111100 & Re_VPR128.H.vIndexHL & b_1215=0b1111 & b_1010=0b0 & Rn_VPR128.16B & Rd_VPR128.4S { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_sudot(Rd_VPR128.4S, Rn_VPR128.16B, Re_VPR128.H.vIndexHL); -@endif } # C7.2.370 UMMLA (vector) page C7-2235 line 125634 MATCH x6e80a400/mask=xffe0fc00 @@ -55873,9 +29854,7 @@ is b_3131=0b0 & Q=1 & b_2229=0b00111100 & Re_VPR128.H.vIndexHL & b_1215=0b1111 & :ummla Rd_VPR128.4S, Rn_VPR128.16B, Rm_VPR128.16B is b_2131=0b01101110100 & Rm_VPR128.16B & b_1015=0b101001 & Rn_VPR128.16B & Rd_VPR128.4S { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) - Rd_VPR128.4S = NEON_ummla(Rd_VPR128.4S, Rn_VPR128.16B, Rm_VPR128.16B); -@endif + Rd_VPR128.4S = NEON_ummla(Rd_VPR128.4S, Rn_VPR128.16B, Rm_VPR128.16B, 1:1); } # C7.2.388 USDOT (vector) page C7-2273 line 127924 MATCH x0e809c00/mask=xbfe0fc00 @@ -55886,9 +29865,7 @@ is b_2131=0b01101110100 & Rm_VPR128.16B & b_1015=0b101001 & Rn_VPR128.16B & Rd_V :usdot Rd_VPR128.2S, Rn_VPR128.8B, Rm_VPR128.8B is b_3131=0b0 & Q=0 & b_2129=0b001110100 & Rm_VPR128.8B & b_1015=0b100111 & Rn_VPR128.8B & Rd_VPR128.2S { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.2S = NEON_usdot(Rd_VPR128.2S, Rn_VPR128.8B, Rm_VPR128.8B); -@endif } # C7.2.388 USDOT (vector) page C7-2273 line 127924 MATCH x0e809c00/mask=xbfe0fc00 @@ -55897,9 +29874,7 @@ is b_3131=0b0 & Q=0 & b_2129=0b001110100 & Rm_VPR128.8B & b_1015=0b100111 & Rn_V :usdot Rd_VPR128.4S, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0b0 & Q=1 & b_2129=0b001110100 & Rn_VPR128.16B & b_1015=0b100111 & Rm_VPR128.16B & Rd_VPR128.4S { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_usdot(Rd_VPR128.4S, Rn_VPR128.16B, Rm_VPR128.16B); -@endif } # C7.2.389 USDOT (by element) page C7-2275 line 128010 MATCH x0f80f000/mask=xbfc0f400 @@ -55910,9 +29885,7 @@ is b_3131=0b0 & Q=1 & b_2129=0b001110100 & Rn_VPR128.16B & b_1015=0b100111 & Rm_ :usdot Rd_VPR128.2S, Rn_VPR128.8B, Re_VPR128.H.vIndexHL is b_3131=0b0 & Q=0 & b_2229=0b00111110 & Re_VPR128.H.vIndexHL & b_1215=0b1111 & b_1010=0b0 & Rn_VPR128.8B & Rd_VPR128.2S { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.2S = NEON_usdot(Rd_VPR128.2S, Rn_VPR128.8B, Re_VPR128.H.vIndexHL); -@endif } # C7.2.389 USDOT (by element) page C7-2275 line 128010 MATCH x0f80f000/mask=xbfc0f400 @@ -55921,9 +29894,7 @@ is b_3131=0b0 & Q=0 & b_2229=0b00111110 & Re_VPR128.H.vIndexHL & b_1215=0b1111 & :usdot Rd_VPR128.4S, Rn_VPR128.16B, Re_VPR128.H.vIndexHL is b_3131=0b0 & Q=1 & b_2229=0b00111110 & Re_VPR128.H.vIndexHL & b_1215=0b1111 & b_1010=0b0 & Rn_VPR128.16B & Rd_VPR128.4S { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) Rd_VPR128.4S = NEON_usdot(Rd_VPR128.4S, Rn_VPR128.16B, Re_VPR128.H.vIndexHL); -@endif } # C7.2.393 USMMLA (vector) page C7-2285 line 128543 MATCH x4e80ac00/mask=xffe0fc00 @@ -55934,7 +29905,5 @@ is b_3131=0b0 & Q=1 & b_2229=0b00111110 & Re_VPR128.H.vIndexHL & b_1215=0b1111 & :usmmla Rd_VPR128.4S, Rn_VPR128.16B, Rm_VPR128.16B is b_2131=0b01001110100 & Rm_VPR128.16B & b_1015=0b101011 & Rn_VPR128.16B & Rd_VPR128.4S { -@if defined(SEMANTIC_pseudo) || defined(SEMANTIC_primitive) || defined(SEMANTIC_pcode) - Rd_VPR128.4S = NEON_usmmla(Rd_VPR128.4S, Rn_VPR128.16B, Rm_VPR128.16B); -@endif + Rd_VPR128.4S = NEON_usmmla(Rd_VPR128.4S, Rn_VPR128.16B, Rm_VPR128.16B, 1:1); } diff --git a/Ghidra/Processors/AARCH64/src/main/java/ghidra/program/emulation/AARCH64EmulateInstructionStateModifier.java b/Ghidra/Processors/AARCH64/src/main/java/ghidra/program/emulation/AARCH64EmulateInstructionStateModifier.java index 9612b87e1b..f2f9a5513b 100644 --- a/Ghidra/Processors/AARCH64/src/main/java/ghidra/program/emulation/AARCH64EmulateInstructionStateModifier.java +++ b/Ghidra/Processors/AARCH64/src/main/java/ghidra/program/emulation/AARCH64EmulateInstructionStateModifier.java @@ -35,7 +35,7 @@ public class AARCH64EmulateInstructionStateModifier extends EmulateInstructionSt // BLANK: // COPY: - registerPcodeOpBehavior("SIMD_COPY", new SIMD_COPY()); +// registerPcodeOpBehavior("SIMD_COPY", new SIMD_COPY()); // LOAD: // STORE: // BRANCH: @@ -53,41 +53,41 @@ public class AARCH64EmulateInstructionStateModifier extends EmulateInstructionSt // INT_LESS: // INT_LESSEQUAL: // INT_ZEXT: - registerPcodeOpBehavior("SIMD_INT_ZEXT", new SIMD_INT_ZEXT()); +// registerPcodeOpBehavior("SIMD_INT_ZEXT", new SIMD_INT_ZEXT()); // INT_SEXT: - registerPcodeOpBehavior("SIMD_INT_SEXT", new SIMD_INT_SEXT()); +// registerPcodeOpBehavior("SIMD_INT_SEXT", new SIMD_INT_SEXT()); // INT_ABS (no equivalent SLEIGH primitive): registerPcodeOpBehavior("MP_INT_ABS", new MP_INT_ABS()); - registerPcodeOpBehavior("SIMD_INT_ABS", new SIMD_INT_ABS()); +// registerPcodeOpBehavior("SIMD_INT_ABS", new SIMD_INT_ABS()); // INT_ADD: // registerPcodeOpBehavior("SIMD_INT_ADD", new SIMD_INT_ADD()); // registerPcodeOpBehavior("SIPD_INT_ADD", new SIPD_INT_ADD()); // INT_SUB: - registerPcodeOpBehavior("SIMD_INT_SUB", new SIMD_INT_SUB()); +// registerPcodeOpBehavior("SIMD_INT_SUB", new SIMD_INT_SUB()); // INT_CARRY: // INT_SCARRY: // INT_SBORROW: // INT_2COMP: - registerPcodeOpBehavior("SIMD_INT_2COMP", new SIMD_INT_2COMP()); +// registerPcodeOpBehavior("SIMD_INT_2COMP", new SIMD_INT_2COMP()); // INT_NEGATE: // registerPcodeOpBehavior("MP_INT_NEGATE", new MP_INT_NEGATE()); - registerPcodeOpBehavior("SIMD_INT_NEGATE", new SIMD_INT_NEGATE()); +// registerPcodeOpBehavior("SIMD_INT_NEGATE", new SIMD_INT_NEGATE()); // INT_XOR: - registerPcodeOpBehavior("SIMD_INT_XOR", new SIMD_INT_XOR()); +// registerPcodeOpBehavior("SIMD_INT_XOR", new SIMD_INT_XOR()); // INT_AND: // registerPcodeOpBehavior("MP_INT_AND", new MP_INT_AND()); - registerPcodeOpBehavior("SIMD_INT_AND", new SIMD_INT_AND()); +// registerPcodeOpBehavior("SIMD_INT_AND", new SIMD_INT_AND()); // INT_OR: - registerPcodeOpBehavior("SIMD_INT_OR", new SIMD_INT_OR()); +// registerPcodeOpBehavior("SIMD_INT_OR", new SIMD_INT_OR()); // INT_LEFT: - registerPcodeOpBehavior("SIMD_INT_LEFT", new SIMD_INT_LEFT()); +// registerPcodeOpBehavior("SIMD_INT_LEFT", new SIMD_INT_LEFT()); // INT_RIGHT: - registerPcodeOpBehavior("SIMD_INT_RIGHT", new SIMD_INT_RIGHT()); +// registerPcodeOpBehavior("SIMD_INT_RIGHT", new SIMD_INT_RIGHT()); registerPcodeOpBehavior("MP_INT_RIGHT", new MP_INT_RIGHT()); // INT_SRIGHT: - registerPcodeOpBehavior("SIMD_INT_SRIGHT", new SIMD_INT_SRIGHT()); +// registerPcodeOpBehavior("SIMD_INT_SRIGHT", new SIMD_INT_SRIGHT()); // INT_MULT: - registerPcodeOpBehavior("SIMD_INT_MULT", new SIMD_INT_MULT()); +// registerPcodeOpBehavior("SIMD_INT_MULT", new SIMD_INT_MULT()); registerPcodeOpBehavior("MP_INT_MULT", new MP_INT_MULT()); registerPcodeOpBehavior("MP_INT_UMULT", new MP_INT_UMULT()); // INT_DIV: @@ -105,24 +105,24 @@ public class AARCH64EmulateInstructionStateModifier extends EmulateInstructionSt // UNUSED1: // FLOAT_NAN: // FLOAT_ADD: - registerPcodeOpBehavior("SIMD_FLOAT_ADD", new SIMD_FLOAT_ADD()); +// registerPcodeOpBehavior("SIMD_FLOAT_ADD", new SIMD_FLOAT_ADD()); // registerPcodeOpBehavior("SIPD_FLOAT_ADD", new SIPD_FLOAT_ADD()); // FLOAT_DIV: - registerPcodeOpBehavior("SIMD_FLOAT_DIV", new SIMD_FLOAT_DIV()); +// registerPcodeOpBehavior("SIMD_FLOAT_DIV", new SIMD_FLOAT_DIV()); // FLOAT_MULT: - registerPcodeOpBehavior("SIMD_FLOAT_MULT", new SIMD_FLOAT_MULT()); +// registerPcodeOpBehavior("SIMD_FLOAT_MULT", new SIMD_FLOAT_MULT()); // FLOAT_SUB: - registerPcodeOpBehavior("SIMD_FLOAT_SUB", new SIMD_FLOAT_SUB()); +// registerPcodeOpBehavior("SIMD_FLOAT_SUB", new SIMD_FLOAT_SUB()); // FLOAT_NEG: - registerPcodeOpBehavior("SIMD_FLOAT_NEG", new SIMD_FLOAT_NEG()); +// registerPcodeOpBehavior("SIMD_FLOAT_NEG", new SIMD_FLOAT_NEG()); // FLOAT_ABS: - registerPcodeOpBehavior("SIMD_FLOAT_ABS", new SIMD_FLOAT_ABS()); +// registerPcodeOpBehavior("SIMD_FLOAT_ABS", new SIMD_FLOAT_ABS()); // FLOAT_SQRT: // INT2FLOAT: // FLOAT2FLOAT: - registerPcodeOpBehavior("SIMD_FLOAT2FLOAT", new SIMD_FLOAT2FLOAT()); +// registerPcodeOpBehavior("SIMD_FLOAT2FLOAT", new SIMD_FLOAT2FLOAT()); // TRUNC: - registerPcodeOpBehavior("SIMD_TRUNC", new SIMD_TRUNC()); +// registerPcodeOpBehavior("SIMD_TRUNC", new SIMD_TRUNC()); // CEIL: // FLOOR: // ROUND: @@ -153,7 +153,9 @@ public class AARCH64EmulateInstructionStateModifier extends EmulateInstructionSt private long getmask(long esize) { long mask = -1; - if (esize < 8) mask = mask >>> ((8 - esize) * 8); + if (esize < 8) { + mask = mask >>> ((8 - esize) * 8); + } return mask; } @@ -182,7 +184,9 @@ public class AARCH64EmulateInstructionStateModifier extends EmulateInstructionSt // the byte array is in big endian order protected long bytes_to_long(byte[] bytes, int lsb, int esize) { - if (lsb <= 0) return 0; + if (lsb <= 0) { + return 0; + } int i = lsb - esize; if (i < 0) { @@ -203,7 +207,9 @@ public class AARCH64EmulateInstructionStateModifier extends EmulateInstructionSt // array. protected void insert_long(long value, byte[] outBytes, int lsb, int esize) { - if (lsb - esize < 0) throw new LowlevelError("insert_long: byte array too small"); + if (lsb - esize < 0) { + throw new LowlevelError("insert_long: byte array too small"); + } for (int j = 0; j < esize; j++) { outBytes[lsb - j - 1] = (byte) (value & 0xff); value = value >> 8; @@ -218,7 +224,9 @@ public class AARCH64EmulateInstructionStateModifier extends EmulateInstructionSt protected byte[] varnode_to_bytes(Varnode outputVarnode, byte[] initBytes, int esize) { byte[] outBytes = new byte[outputVarnode.getSize()]; - if (initBytes == null) return outBytes; + if (initBytes == null) { + return outBytes; + } byte ext = 0; @@ -228,7 +236,9 @@ public class AARCH64EmulateInstructionStateModifier extends EmulateInstructionSt ext = (byte) ((initBytes[j - 1] >= 0) ? 0 : 0xff); } else { outBytes[i - 1] = ext; - if (((i - 1) % esize) == 0) break; + if (((i - 1) % esize) == 0) { + break; + } } } @@ -256,9 +266,13 @@ public class AARCH64EmulateInstructionStateModifier extends EmulateInstructionSt // Requires 1 input int numArgs = inputs.length - 1; - if (numArgs != 2) throw new LowlevelError(this.getClass().getName() + ": requires 2 inputs (op, size), got " + numArgs); + if (numArgs != 2) { + throw new LowlevelError(this.getClass().getName() + ": requires 2 inputs (op, size), got " + numArgs); + } - if (outputVarnode == null) throw new LowlevelError(this.getClass().getName() + ": missing required output"); + if (outputVarnode == null) { + throw new LowlevelError(this.getClass().getName() + ": missing required output"); + } MemoryState memoryState = emu.getMemoryState(); @@ -267,16 +281,19 @@ public class AARCH64EmulateInstructionStateModifier extends EmulateInstructionSt Varnode simdVarnode = inputs[1]; int esize = (int) memoryState.getValue(inputs[2]); - if (outputVarnode.getSize() < simdVarnode.getSize()) + if (outputVarnode.getSize() < simdVarnode.getSize()) { throw new LowlevelError(this.getClass().getName() + ": input size (" + simdVarnode.getSize() + ") exceeds output size (" + outputVarnode.getSize() + ")"); + } - if (esize != 1 && esize != 2 && esize != 4 && esize != 8) + if (esize != 1 && esize != 2 && esize != 4 && esize != 8) { throw new LowlevelError(this.getClass().getName() + ": operand must be 1, 2, 4, or 8 bytes: got " + esize); + } - if ((outputVarnode.getSize() % esize) != 0) + if ((outputVarnode.getSize() % esize) != 0) { throw new LowlevelError(this.getClass().getName() + ": output size (" + outputVarnode.getSize() + ") must be a multiple of operand size (" + esize + ")"); + } } } @@ -516,9 +533,13 @@ public class AARCH64EmulateInstructionStateModifier extends EmulateInstructionSt // Requires 2 or 3 inputs int numArgs = inputs.length - 1; - if (numArgs != 2 && numArgs != 3) throw new LowlevelError(this.getClass().getName() + ": requires 3 inputs (simd, op, esize), got " + numArgs); + if (numArgs != 2 && numArgs != 3) { + throw new LowlevelError(this.getClass().getName() + ": requires 3 inputs (simd, op, esize), got " + numArgs); + } - if (outputVarnode == null) throw new LowlevelError(this.getClass().getName() + ": missing required output"); + if (outputVarnode == null) { + throw new LowlevelError(this.getClass().getName() + ": missing required output"); + } MemoryState memoryState = emu.getMemoryState(); @@ -529,23 +550,28 @@ public class AARCH64EmulateInstructionStateModifier extends EmulateInstructionSt int esize = opVarnode.getSize(); boolean opConstant = (numArgs == 2); - if (! opConstant) + if (! opConstant) { esize = (int) memoryState.getValue(inputs[3]); + } - if (outputVarnode.getSize() < simdVarnode.getSize()) + if (outputVarnode.getSize() < simdVarnode.getSize()) { throw new LowlevelError(this.getClass().getName() + ": input size (" + simdVarnode.getSize() + ") exceeds output size (" + outputVarnode.getSize() + ")"); + } - if (esize != 1 && esize != 2 && esize != 4 && esize != 8) + if (esize != 1 && esize != 2 && esize != 4 && esize != 8) { throw new LowlevelError(this.getClass().getName() + ": operand must be 1, 2, 4, or 8 bytes: got " + esize); + } - if ((outputVarnode.getSize() % esize) != 0) + if ((outputVarnode.getSize() % esize) != 0) { throw new LowlevelError(this.getClass().getName() + ": output size (" + outputVarnode.getSize() + ") must be a multiple of operand size (" + esize + ")"); + } - if (! opConstant && simdVarnode.getSize() != opVarnode.getSize()) + if (! opConstant && simdVarnode.getSize() != opVarnode.getSize()) { throw new LowlevelError(this.getClass().getName() + ": simd size (" + outputVarnode.getSize() + ") and operand size (" + esize + ") must be the same for simd operation"); + } } } @@ -565,7 +591,9 @@ public class AARCH64EmulateInstructionStateModifier extends EmulateInstructionSt Varnode opVarnode = inputs[2]; boolean opConstant = (inputs.length == 3); int esize = opVarnode.getSize(); - if (! opConstant) esize = (int) memoryState.getValue(inputs[3]); + if (! opConstant) { + esize = (int) memoryState.getValue(inputs[3]); + } int opstep = (opConstant ? 0 : esize); byte[] simdBytes = memoryState.getBigInteger(simdVarnode, true).toByteArray(); @@ -607,7 +635,9 @@ public class AARCH64EmulateInstructionStateModifier extends EmulateInstructionSt Varnode opVarnode = inputs[2]; boolean opConstant = (inputs.length == 3); int esize = opVarnode.getSize(); - if (! opConstant) esize = (int) memoryState.getValue(inputs[3]); + if (! opConstant) { + esize = (int) memoryState.getValue(inputs[3]); + } int opstep = (opConstant ? 0 : esize); byte[] simdBytes = memoryState.getBigInteger(simdVarnode, false).toByteArray(); @@ -653,9 +683,13 @@ public class AARCH64EmulateInstructionStateModifier extends EmulateInstructionSt // Requires 2 inputs int numArgs = inputs.length - 1; - if (numArgs != 2 && numArgs != 3) throw new LowlevelError(this.getClass().getName() + ": requires 2 or 3 inputs (pairData*, esize), got " + numArgs); + if (numArgs != 2 && numArgs != 3) { + throw new LowlevelError(this.getClass().getName() + ": requires 2 or 3 inputs (pairData*, esize), got " + numArgs); + } - if (outputVarnode == null) throw new LowlevelError(this.getClass().getName() + ": missing required output"); + if (outputVarnode == null) { + throw new LowlevelError(this.getClass().getName() + ": missing required output"); + } MemoryState memoryState = emu.getMemoryState(); @@ -673,11 +707,13 @@ public class AARCH64EmulateInstructionStateModifier extends EmulateInstructionSt int osize = outputVarnode.getSize(); int oesize = (iesize * osize) / isize; - if (iesize != 1 && iesize != 2 && iesize != 4 && iesize != 8) + if (iesize != 1 && iesize != 2 && iesize != 4 && iesize != 8) { throw new LowlevelError(this.getClass().getName() + ": operand lanes must be 1, 2, 4, or 8 bytes: got " + iesize); + } - if (oesize != 1 && oesize != 2 && oesize != 4 && oesize != 8) + if (oesize != 1 && oesize != 2 && oesize != 4 && oesize != 8) { throw new LowlevelError(this.getClass().getName() + ": output lanes must be 1, 2, 4, or 8 bytes: got " + oesize); + } } } @@ -921,9 +957,13 @@ public class AARCH64EmulateInstructionStateModifier extends EmulateInstructionSt @Override public void evaluate(Emulate emu, Varnode outputVarnode, Varnode[] inputs) { int numArgs = inputs.length - 1; - if (numArgs != 2) throw new LowlevelError("MP_INT_EQUAL: requires 2 (Vm, Vn), got " + numArgs); + if (numArgs != 2) { + throw new LowlevelError("MP_INT_EQUAL: requires 2 (Vm, Vn), got " + numArgs); + } - if (outputVarnode == null) throw new LowlevelError("MP_INT_EQUAL: missing required output"); + if (outputVarnode == null) { + throw new LowlevelError("MP_INT_EQUAL: missing required output"); + } MemoryState memoryState = emu.getMemoryState(); BigInteger cmp1 = memoryState.getBigInteger(inputs[1], false); @@ -956,9 +996,13 @@ public class AARCH64EmulateInstructionStateModifier extends EmulateInstructionSt @Override public void evaluate(Emulate emu, Varnode outputVarnode, Varnode[] inputs) { int numArgs = inputs.length - 1; - if (numArgs != 1) throw new LowlevelError("MP_INT_ABS: requires 1 (Vn), got " + numArgs); + if (numArgs != 1) { + throw new LowlevelError("MP_INT_ABS: requires 1 (Vn), got " + numArgs); + } - if (outputVarnode == null) throw new LowlevelError("MP_INT_ABS: missing required output"); + if (outputVarnode == null) { + throw new LowlevelError("MP_INT_ABS: missing required output"); + } MemoryState memoryState = emu.getMemoryState(); BigInteger op = memoryState.getBigInteger(inputs[1], true); @@ -981,6 +1025,7 @@ public class AARCH64EmulateInstructionStateModifier extends EmulateInstructionSt @SuppressWarnings("unused") private class SIPD_INT_ADD extends SIPD_SOP2 { + @Override protected long op2(long x, long y, int iesize, int oesize) { return x + y; } } @@ -1005,9 +1050,13 @@ public class AARCH64EmulateInstructionStateModifier extends EmulateInstructionSt @Override public void evaluate(Emulate emu, Varnode outputVarnode, Varnode[] inputs) { int numArgs = inputs.length - 1; - if (numArgs != 1) throw new LowlevelError("MP_INT_NEGATE: requires 1 (Vn), got " + numArgs); + if (numArgs != 1) { + throw new LowlevelError("MP_INT_NEGATE: requires 1 (Vn), got " + numArgs); + } - if (outputVarnode == null) throw new LowlevelError("MP_INT_NEGATE: missing required output"); + if (outputVarnode == null) { + throw new LowlevelError("MP_INT_NEGATE: missing required output"); + } MemoryState memoryState = emu.getMemoryState(); byte[] value = memoryState.getBigInteger(inputs[1], true).toByteArray(); @@ -1047,9 +1096,13 @@ public class AARCH64EmulateInstructionStateModifier extends EmulateInstructionSt @Override public void evaluate(Emulate emu, Varnode outputVarnode, Varnode[] inputs) { int numArgs = inputs.length - 1; - if (numArgs != 2) throw new LowlevelError("MP_INT_AND: requires 2 (Vm, Vn), got " + numArgs); + if (numArgs != 2) { + throw new LowlevelError("MP_INT_AND: requires 2 (Vm, Vn), got " + numArgs); + } - if (outputVarnode == null) throw new LowlevelError("MP_INT_AND: missing required output"); + if (outputVarnode == null) { + throw new LowlevelError("MP_INT_AND: missing required output"); + } MemoryState memoryState = emu.getMemoryState(); BigInteger value = memoryState.getBigInteger(inputs[1], false); @@ -1094,9 +1147,13 @@ public class AARCH64EmulateInstructionStateModifier extends EmulateInstructionSt @Override public void evaluate(Emulate emu, Varnode outputVarnode, Varnode[] inputs) { int numArgs = inputs.length - 1; - if (numArgs != 2) throw new LowlevelError("MP_INT_RIGHT: requires 2 (Vn, shift), got " + numArgs); + if (numArgs != 2) { + throw new LowlevelError("MP_INT_RIGHT: requires 2 (Vn, shift), got " + numArgs); + } - if (outputVarnode == null) throw new LowlevelError("MP_INT_RIGHT: missing required output"); + if (outputVarnode == null) { + throw new LowlevelError("MP_INT_RIGHT: missing required output"); + } MemoryState memoryState = emu.getMemoryState(); @@ -1131,9 +1188,13 @@ public class AARCH64EmulateInstructionStateModifier extends EmulateInstructionSt @Override public void evaluate(Emulate emu, Varnode outputVarnode, Varnode[] inputs) { int numArgs = inputs.length - 1; - if (numArgs != 2) throw new LowlevelError("MP_INT_MULT: requires 2 (Vm, Vn), got " + numArgs); + if (numArgs != 2) { + throw new LowlevelError("MP_INT_MULT: requires 2 (Vm, Vn), got " + numArgs); + } - if (outputVarnode == null) throw new LowlevelError("MP_INT_MULT: missing required output"); + if (outputVarnode == null) { + throw new LowlevelError("MP_INT_MULT: missing required output"); + } MemoryState memoryState = emu.getMemoryState(); BigInteger value = memoryState.getBigInteger(inputs[1], true); @@ -1157,9 +1218,13 @@ public class AARCH64EmulateInstructionStateModifier extends EmulateInstructionSt @Override public void evaluate(Emulate emu, Varnode outputVarnode, Varnode[] inputs) { int numArgs = inputs.length - 1; - if (numArgs != 2) throw new LowlevelError("MP_INT_UMULT: requires 2 (Vm, Vn), got " + numArgs); + if (numArgs != 2) { + throw new LowlevelError("MP_INT_UMULT: requires 2 (Vm, Vn), got " + numArgs); + } - if (outputVarnode == null) throw new LowlevelError("MP_INT_UMULT: missing required output"); + if (outputVarnode == null) { + throw new LowlevelError("MP_INT_UMULT: missing required output"); + } MemoryState memoryState = emu.getMemoryState(); BigInteger value = memoryState.getBigInteger(inputs[1], false); @@ -1195,28 +1260,47 @@ public class AARCH64EmulateInstructionStateModifier extends EmulateInstructionSt @SuppressWarnings("unused") private class SIPD_FLOAT_ADD extends SIPD_UOP2 { + @Override protected long op2(long x, long y, int iesize, int oesize) { if (iesize == 2) { float fx = shortBitsToFloat(x); float fy = shortBitsToFloat(y); float fz = fx + fy; - if (oesize == 2) return floatToShortBits(fz); - if (oesize == 4) return (long) Float.floatToIntBits(fz); - if (oesize == 8) return Double.doubleToLongBits((double) fz); + if (oesize == 2) { + return floatToShortBits(fz); + } + if (oesize == 4) { + return (long) Float.floatToIntBits(fz); + } + if (oesize == 8) { + return Double.doubleToLongBits((double) fz); + } } else if (iesize == 4) { float fx = Float.intBitsToFloat((int) x); float fy = Float.intBitsToFloat((int) y); float fz = fx + fy; - if (oesize == 2) return floatToShortBits(fz); - if (oesize == 4) return (long) Float.floatToIntBits(fz); - if (oesize == 8) return Double.doubleToLongBits((double) fz); + if (oesize == 2) { + return floatToShortBits(fz); + } + if (oesize == 4) { + return (long) Float.floatToIntBits(fz); + } + if (oesize == 8) { + return Double.doubleToLongBits((double) fz); + } } else if (iesize == 8) { double fx = Double.longBitsToDouble(x); double fy = Double.longBitsToDouble(y); double fz = fx + fy; - if (oesize == 2) return floatToShortBits((float) fz); - if (oesize == 4) return (long) Float.floatToIntBits((float) fz); - if (oesize == 8) return Double.doubleToLongBits(fz); + if (oesize == 2) { + return floatToShortBits((float) fz); + } + if (oesize == 4) { + return (long) Float.floatToIntBits((float) fz); + } + if (oesize == 8) { + return Double.doubleToLongBits(fz); + } } return 0; } @@ -1399,9 +1483,13 @@ public class AARCH64EmulateInstructionStateModifier extends EmulateInstructionSt // Requires 2 inputs int numArgs = inputs.length - 1; - if (numArgs != 2) throw new LowlevelError("SIMD_PIECE: requires 2 inputs, got " + numArgs); + if (numArgs != 2) { + throw new LowlevelError("SIMD_PIECE: requires 2 inputs, got " + numArgs); + } - if (outputVarnode == null) throw new LowlevelError("SIMD_PIECE: missing required output"); + if (outputVarnode == null) { + throw new LowlevelError("SIMD_PIECE: missing required output"); + } MemoryState memoryState = emu.getMemoryState(); @@ -1410,9 +1498,10 @@ public class AARCH64EmulateInstructionStateModifier extends EmulateInstructionSt Varnode simdVarnode = inputs[1]; int offset = (int) memoryState.getValue(inputs[2]); - if (simdVarnode.getSize() < (offset + 1) * outputVarnode.getSize()) + if (simdVarnode.getSize() < (offset + 1) * outputVarnode.getSize()) { throw new LowlevelError("SIMD_PIECE: input size (" + simdVarnode.getSize() + ") too small to extract output size (" + outputVarnode.getSize() + ") from offset (" + offset + ")"); + } // Allocate a byte array of the correct size to hold the output // initialized to all zeros @@ -1453,8 +1542,12 @@ public class AARCH64EmulateInstructionStateModifier extends EmulateInstructionSt @Override public void evaluate(Emulate emu, Varnode outputVarnode, Varnode[] inputs) { int numArgs = inputs.length - 1; - if (numArgs != 2) throw new LowlevelError(this.getClass().getName() + ": requires 2 inputs (Vn, Vm), got " + numArgs); - if (outputVarnode == null) throw new LowlevelError(this.getClass().getName() + ": missing required output"); + if (numArgs != 2) { + throw new LowlevelError(this.getClass().getName() + ": requires 2 inputs (Vn, Vm), got " + numArgs); + } + if (outputVarnode == null) { + throw new LowlevelError(this.getClass().getName() + ": missing required output"); + } MemoryState memoryState = emu.getMemoryState(); @@ -1463,17 +1556,22 @@ public class AARCH64EmulateInstructionStateModifier extends EmulateInstructionSt Varnode VnVarnode = inputs[1]; Varnode VmVarnode = inputs[2]; - if (outSize != VnVarnode.getSize() + VmVarnode.getSize()) + if (outSize != VnVarnode.getSize() + VmVarnode.getSize()) { throw new LowlevelError(this.getClass().getName() + ": output size (" + outSize + ") must equal the sum of input sizes (" + VnVarnode.getSize() + "," + VmVarnode.getSize() + ")"); + } byte[] outBytes = new byte[outSize]; byte[] VnBytes = memoryState.getBigInteger(VnVarnode, false).toByteArray(); byte[] VmBytes = memoryState.getBigInteger(VmVarnode, false).toByteArray(); - for (int i = outSize - 1, j = VnBytes.length - 1; i >= 0 && j >= 0; i--, j--) outBytes[i] = VnBytes[j]; - for (int i = outSize - VnVarnode.getSize() - 1, j = VmBytes.length - 1; i >= 0 && j >= 0; i--, j--) outBytes[i] = VmBytes[j]; + for (int i = outSize - 1, j = VnBytes.length - 1; i >= 0 && j >= 0; i--, j--) { + outBytes[i] = VnBytes[j]; + } + for (int i = outSize - VnVarnode.getSize() - 1, j = VmBytes.length - 1; i >= 0 && j >= 0; i--, j--) { + outBytes[i] = VmBytes[j]; + } } } @@ -1495,18 +1593,23 @@ public class AARCH64EmulateInstructionStateModifier extends EmulateInstructionSt public void evaluate(Emulate emu, Varnode outputVarnode, Varnode[] inputs) { int numArgs = inputs.length - 1; - if (numArgs < 3 || numArgs > 6) throw new LowlevelError("a64_TBL: requires 3 to 6 inputs (Vinit, Vn-Vn4, Vm), got " + numArgs); + if (numArgs < 3 || numArgs > 6) { + throw new LowlevelError("a64_TBL: requires 3 to 6 inputs (Vinit, Vn-Vn4, Vm), got " + numArgs); + } - if (outputVarnode == null) throw new LowlevelError("a64_TBL: missing required output"); + if (outputVarnode == null) { + throw new LowlevelError("a64_TBL: missing required output"); + } MemoryState memoryState = emu.getMemoryState(); Varnode updateVarnode = inputs[1]; Varnode indexVarnode = inputs[numArgs]; // The index size must match the output size - if (outputVarnode.getSize() != indexVarnode.getSize()) + if (outputVarnode.getSize() != indexVarnode.getSize()) { throw new LowlevelError("a64_TBL: the output size (" + outputVarnode.getSize() + ") must match the index size (" + indexVarnode.getSize() + ")"); + } int regs = numArgs - 2; int elements = outputVarnode.getSize(); @@ -1514,8 +1617,9 @@ public class AARCH64EmulateInstructionStateModifier extends EmulateInstructionSt // The indices are converted to little endian order byte[] indices = new byte[elements]; byte[] vx = memoryState.getBigInteger(indexVarnode, false).toByteArray(); - for (int j = 0; j < vx.length && j < elements; j++) + for (int j = 0; j < vx.length && j < elements; j++) { indices[j] = vx[vx.length - j - 1]; + } // Create table from registers // It consists of 16, 32, 48, or 64 bytes from Vn1-Vn4 @@ -1526,8 +1630,9 @@ public class AARCH64EmulateInstructionStateModifier extends EmulateInstructionSt byte[] table = new byte[64]; for (int i = 0; i < regs; i++) { byte[] vn = memoryState.getBigInteger(inputs[2 + i], false).toByteArray(); - for (int j = 0; j < vn.length && i * 16 + j < 64; j++) + for (int j = 0; j < vn.length && i * 16 + j < 64; j++) { table[i*16 + j] = vn[vn.length - j - 1]; + } } // The result is pre-initialized to Vi @@ -1536,8 +1641,9 @@ public class AARCH64EmulateInstructionStateModifier extends EmulateInstructionSt byte[] result = new byte[elements]; byte[] vi = memoryState.getBigInteger(updateVarnode, false).toByteArray(); - for (int j = 0; j < vi.length && j < elements; j++) + for (int j = 0; j < vi.length && j < elements; j++) { result[j] = vi[vi.length - j - 1]; + } // Since the indices, table, and result // are all in little endian order @@ -1546,7 +1652,9 @@ public class AARCH64EmulateInstructionStateModifier extends EmulateInstructionSt for (int i = 0; i < elements; i++) { int index = (int) (indices[i] & 0xff); - if (index < 16 * regs) result[i] = table[index]; + if (index < 16 * regs) { + result[i] = table[index]; + } } // reverse the endianness of the result, in place From 6ed4ce628c8ed1a45f4180c6912db874ca16b8ea Mon Sep 17 00:00:00 2001 From: caheckman <48068198+caheckman@users.noreply.github.com> Date: Tue, 20 Oct 2020 11:23:59 -0400 Subject: [PATCH 2/5] token endianness --- .../Decompiler/src/decompile/cpp/slgh_compile.cc | 9 +++++++-- .../Decompiler/src/decompile/cpp/slgh_compile.hh | 2 +- .../Decompiler/src/decompile/cpp/slghparse.y | 4 +++- .../Decompiler/src/decompile/cpp/slghscan.l | 8 ++++---- .../main/antlr/ghidra/sleigh/grammar/BaseLexer.g | 1 + .../antlr/ghidra/sleigh/grammar/SleighCompiler.g | 12 +++++++++++- .../main/antlr/ghidra/sleigh/grammar/SleighEcho.g | 1 + .../antlr/ghidra/sleigh/grammar/SleighParser.g | 1 + .../plugin/processors/sleigh/SleighLanguage.java | 4 ++-- .../pcodeCPort/slgh_compile/SleighCompile.java | 15 +++++++++++---- 10 files changed, 42 insertions(+), 15 deletions(-) diff --git a/Ghidra/Features/Decompiler/src/decompile/cpp/slgh_compile.cc b/Ghidra/Features/Decompiler/src/decompile/cpp/slgh_compile.cc index 481c702c0a..64d30b516a 100644 --- a/Ghidra/Features/Decompiler/src/decompile/cpp/slgh_compile.cc +++ b/Ghidra/Features/Decompiler/src/decompile/cpp/slgh_compile.cc @@ -1987,7 +1987,7 @@ bool SleighCompile::undefinePreprocValue(const string &nm) // Functions needed by the parser -TokenSymbol *SleighCompile::defineToken(string *name,uintb *sz) +TokenSymbol *SleighCompile::defineToken(string *name,uintb *sz,int4 endian) { uint4 size = *sz; @@ -1998,7 +1998,12 @@ TokenSymbol *SleighCompile::defineToken(string *name,uintb *sz) } else size = size/8; - Token *newtoken = new Token(*name,size,isBigEndian(),tokentable.size()); + bool isBig; + if (endian ==0) + isBig = isBigEndian(); + else + isBig = (endian > 0); + Token *newtoken = new Token(*name,size,isBig,tokentable.size()); tokentable.push_back(newtoken); delete name; TokenSymbol *res = new TokenSymbol(newtoken); diff --git a/Ghidra/Features/Decompiler/src/decompile/cpp/slgh_compile.hh b/Ghidra/Features/Decompiler/src/decompile/cpp/slgh_compile.hh index 185efa346c..30a7d1c284 100644 --- a/Ghidra/Features/Decompiler/src/decompile/cpp/slgh_compile.hh +++ b/Ghidra/Features/Decompiler/src/decompile/cpp/slgh_compile.hh @@ -261,7 +261,7 @@ public: bool undefinePreprocValue(const string &nm); // Parser functions - TokenSymbol *defineToken(string *name,uintb *sz); + TokenSymbol *defineToken(string *name,uintb *sz,int4 endian); void addTokenField(TokenSymbol *sym,FieldQuality *qual); bool addContextField(VarnodeSymbol *sym,FieldQuality *qual); void newSpace(SpaceQuality *qual); diff --git a/Ghidra/Features/Decompiler/src/decompile/cpp/slghparse.y b/Ghidra/Features/Decompiler/src/decompile/cpp/slghparse.y index 4bcc73bf9f..c34f5a4656 100644 --- a/Ghidra/Features/Decompiler/src/decompile/cpp/slghparse.y +++ b/Ghidra/Features/Decompiler/src/decompile/cpp/slghparse.y @@ -181,7 +181,9 @@ aligndef: DEFINE_KEY ALIGN_KEY '=' INTEGER ';' { slgh->setAlignment(*$4); delete ; tokendef: tokenprop ';' {} ; -tokenprop: DEFINE_KEY TOKEN_KEY STRING '(' INTEGER ')' { $$ = slgh->defineToken($3,$5); } +tokenprop: DEFINE_KEY TOKEN_KEY STRING '(' INTEGER ')' { $$ = slgh->defineToken($3,$5,0); } + | DEFINE_KEY TOKEN_KEY STRING '(' INTEGER ')' ENDIAN_KEY '=' LITTLE_KEY { $$ = slgh->defineToken($3,$5,-1); } + | DEFINE_KEY TOKEN_KEY STRING '(' INTEGER ')' ENDIAN_KEY '=' BIG_KEY { $$ = slgh->defineToken($3,$5,1); } | tokenprop fielddef { $$ = $1; slgh->addTokenField($1,$2); } | DEFINE_KEY TOKEN_KEY anysymbol { string errmsg=$3->getName()+": redefined as a token"; yyerror(errmsg.c_str()); YYERROR; } ; diff --git a/Ghidra/Features/Decompiler/src/decompile/cpp/slghscan.l b/Ghidra/Features/Decompiler/src/decompile/cpp/slghscan.l index 0675c29872..0115555194 100644 --- a/Ghidra/Features/Decompiler/src/decompile/cpp/slghscan.l +++ b/Ghidra/Features/Decompiler/src/decompile/cpp/slghscan.l @@ -494,7 +494,7 @@ int4 scan_number(char *numtext,YYSTYPE *lval,bool signednum) [(),\-] { yylval.ch = yytext[0]; return yytext[0]; } \: { BEGIN(print); slgh->calcContextLayout(); yylval.ch = yytext[0]; return yytext[0]; } \{ { BEGIN(sem); yylval.ch = yytext[0]; return yytext[0]; } -#.*$ +#.* [\r\ \t\v]+ \n { slgh->nextLine(); } macro { BEGIN(macroblock); return MACRO_KEY; } @@ -540,7 +540,7 @@ with { BEGIN(pattern); withsection = 1; slgh->calcContextLayout(); return WITH values { return VALUES_KEY; } variables { return VARIABLES_KEY; } pcodeop { return PCODEOP_KEY; } -#.*$ +#.* [a-zA-Z_.][a-zA-Z0-9_.]* { return find_symbol(); } [0-9]|[1-9][0-9]+ { return scan_number(yytext,&yylval,false); } 0x[0-9a-fA-F]+ { return scan_number(yytext,&yylval,false); } @@ -582,7 +582,7 @@ with { BEGIN(pattern); withsection = 1; slgh->calcContextLayout(); return WITH \| { yylval.ch = yytext[0]; return (actionon==0) ? yytext[0] : OP_OR; } \^ { return OP_XOR; } [=(),:;+\-*/~<>] { yylval.ch = yytext[0]; return yytext[0]; } -#.*$ +#.* [a-zA-Z_.][a-zA-Z0-9_.]* { return find_symbol(); } [0-9]|[1-9][0-9]+ { return scan_number(yytext,&yylval,true); } 0x[0-9a-fA-F]+ { return scan_number(yytext,&yylval,true); } @@ -648,7 +648,7 @@ with { BEGIN(pattern); withsection = 1; slgh->calcContextLayout(); return WITH build { return BUILD_KEY; } local { return LOCAL_KEY; } [=(),:\[\];!&|^+\-*/%~<>] { yylval.ch = yytext[0]; return yytext[0]; } -#.*$ +#.* [a-zA-Z_.][a-zA-Z0-9_.]* { return find_symbol(); } [0-9]|[1-9][0-9]+ { return scan_number(yytext,&yylval,false); } 0x[0-9a-fA-F]+ { return scan_number(yytext,&yylval,false); } diff --git a/Ghidra/Framework/SoftwareModeling/src/main/antlr/ghidra/sleigh/grammar/BaseLexer.g b/Ghidra/Framework/SoftwareModeling/src/main/antlr/ghidra/sleigh/grammar/BaseLexer.g index f578ac40d8..aaec246069 100644 --- a/Ghidra/Framework/SoftwareModeling/src/main/antlr/ghidra/sleigh/grammar/BaseLexer.g +++ b/Ghidra/Framework/SoftwareModeling/src/main/antlr/ghidra/sleigh/grammar/BaseLexer.g @@ -119,6 +119,7 @@ tokens { OP_SUBTABLE; OP_TABLE; OP_TOKEN; + OP_TOKEN_ENDIAN; OP_TRUNCATION_SIZE; OP_TYPE; OP_UNIMPL; diff --git a/Ghidra/Framework/SoftwareModeling/src/main/antlr/ghidra/sleigh/grammar/SleighCompiler.g b/Ghidra/Framework/SoftwareModeling/src/main/antlr/ghidra/sleigh/grammar/SleighCompiler.g index 2e12078835..45f4069e5f 100644 --- a/Ghidra/Framework/SoftwareModeling/src/main/antlr/ghidra/sleigh/grammar/SleighCompiler.g +++ b/Ghidra/Framework/SoftwareModeling/src/main/antlr/ghidra/sleigh/grammar/SleighCompiler.g @@ -183,10 +183,20 @@ tokendef if (sym != null) { redefinedError(sym, n, "token"); } else { - $tokendef::tokenSymbol = sc.defineToken(find(n), $n.value.getText(), $i.value.intValue()); + $tokendef::tokenSymbol = sc.defineToken(find(n), $n.value.getText(), $i.value.intValue(), 0); } } } fielddefs) + | ^(OP_TOKEN_ENDIAN n=specific_identifier["token definition"] i=integer s=endian { + if (n != null) { + SleighSymbol sym = sc.findSymbol($n.value.getText()); + if (sym != null) { + redefinedError(sym, n, "token"); + } else { + $tokendef::tokenSymbol = sc.defineToken(find(n), $n.value.getText(), $i.value.intValue(), $s.value ==0 ? -1 : 1); + } + } + } fielddefs) ; fielddefs diff --git a/Ghidra/Framework/SoftwareModeling/src/main/antlr/ghidra/sleigh/grammar/SleighEcho.g b/Ghidra/Framework/SoftwareModeling/src/main/antlr/ghidra/sleigh/grammar/SleighEcho.g index 2d3fc5bbae..fcb42e4c19 100644 --- a/Ghidra/Framework/SoftwareModeling/src/main/antlr/ghidra/sleigh/grammar/SleighEcho.g +++ b/Ghidra/Framework/SoftwareModeling/src/main/antlr/ghidra/sleigh/grammar/SleighEcho.g @@ -60,6 +60,7 @@ aligndef tokendef : ^(OP_TOKEN n=identifier i=integer { out("define token " + $n.value + "(" + $i.value + ")"); } fielddefs) + | ^(OP_TOKEN_ENDIAN n=identifier i=integer s=endian { out("define token endian" + $n.value + "(" + $i.value + ")"); } fielddefs) ; fielddefs diff --git a/Ghidra/Framework/SoftwareModeling/src/main/antlr/ghidra/sleigh/grammar/SleighParser.g b/Ghidra/Framework/SoftwareModeling/src/main/antlr/ghidra/sleigh/grammar/SleighParser.g index 74f75b56e0..6beda0844a 100644 --- a/Ghidra/Framework/SoftwareModeling/src/main/antlr/ghidra/sleigh/grammar/SleighParser.g +++ b/Ghidra/Framework/SoftwareModeling/src/main/antlr/ghidra/sleigh/grammar/SleighParser.g @@ -74,6 +74,7 @@ aligndef tokendef : lc=KEY_DEFINE KEY_TOKEN identifier LPAREN integer rp=RPAREN fielddefs[$rp] -> ^(OP_TOKEN[$lc, "define token"] identifier integer fielddefs) + | lc=KEY_DEFINE KEY_TOKEN identifier LPAREN integer RPAREN rp=KEY_ENDIAN ASSIGN endian fielddefs[$rp] -> ^(OP_TOKEN_ENDIAN[$lc, "define token"] identifier integer endian fielddefs) ; fielddefs[Token lc] diff --git a/Ghidra/Framework/SoftwareModeling/src/main/java/ghidra/app/plugin/processors/sleigh/SleighLanguage.java b/Ghidra/Framework/SoftwareModeling/src/main/java/ghidra/app/plugin/processors/sleigh/SleighLanguage.java index 7501eb3626..c900152a38 100644 --- a/Ghidra/Framework/SoftwareModeling/src/main/java/ghidra/app/plugin/processors/sleigh/SleighLanguage.java +++ b/Ghidra/Framework/SoftwareModeling/src/main/java/ghidra/app/plugin/processors/sleigh/SleighLanguage.java @@ -889,9 +889,9 @@ public class SleighLanguage implements Language { } boolean isBigEndian = SpecXmlUtils.decodeBoolean(el.getAttribute("bigendian")); // check the instruction endianess, not the program data endianess - if (isBigEndian ^ description.getInstructionEndian().isBigEndian()) { + if (isBigEndian ^ description.getEndian().isBigEndian()) { throw new SleighException( - ".ldefs says " + getLanguageID() + " is " + description.getInstructionEndian() + + ".ldefs says " + getLanguageID() + " is " + description.getEndian() + " but .sla says " + el.getAttribute("bigendian")); } uniqueBase = SpecXmlUtils.decodeLong(el.getAttribute("uniqbase")); diff --git a/Ghidra/Framework/SoftwareModeling/src/main/java/ghidra/pcodeCPort/slgh_compile/SleighCompile.java b/Ghidra/Framework/SoftwareModeling/src/main/java/ghidra/pcodeCPort/slgh_compile/SleighCompile.java index e96ee6ba85..ad779e50bc 100644 --- a/Ghidra/Framework/SoftwareModeling/src/main/java/ghidra/pcodeCPort/slgh_compile/SleighCompile.java +++ b/Ghidra/Framework/SoftwareModeling/src/main/java/ghidra/pcodeCPort/slgh_compile/SleighCompile.java @@ -529,8 +529,8 @@ public class SleighCompile extends SleighBase { static int findCollision(Map local2Operand, ArrayList locals, int operand) { Integer boxOperand = Integer.valueOf(operand); - for (int i = 0; i < locals.size(); ++i) { - Integer previous = local2Operand.putIfAbsent(locals.get(i), boxOperand); + for (Long local : locals) { + Integer previous = local2Operand.putIfAbsent(local, boxOperand); if (previous != null) { if (previous.intValue() != operand) { return previous.intValue(); @@ -842,7 +842,7 @@ public class SleighCompile extends SleighBase { } // Parser functions - public TokenSymbol defineToken(Location location, String name, long sz) { + public TokenSymbol defineToken(Location location, String name, long sz, int endian) { entry("defineToken", location, name, sz); int size = (int) sz; if ((size & 7) != 0) { @@ -853,8 +853,15 @@ public class SleighCompile extends SleighBase { else { size = size / 8; } + boolean isBig; + if (endian == 0) { + isBig = isBigEndian(); + } + else { + isBig = (endian > 0); + } ghidra.pcodeCPort.context.Token newtoken = - new ghidra.pcodeCPort.context.Token(name, size, isBigEndian(), tokentable.size()); + new ghidra.pcodeCPort.context.Token(name, size, isBig, tokentable.size()); tokentable.push_back(newtoken); TokenSymbol res = new TokenSymbol(location, newtoken); addSymbol(res); From 1022be3a2279363416314e881e9a18c667460c51 Mon Sep 17 00:00:00 2001 From: caheckman <48068198+caheckman@users.noreply.github.com> Date: Tue, 20 Oct 2020 15:35:04 -0400 Subject: [PATCH 3/5] eliminate rest of simd_address_at --- .../data/languages/AARCH64instructions.sinc | 59 +- .../AARCH64/data/languages/AARCH64ldst.sinc | 8605 ++++++----------- 2 files changed, 2748 insertions(+), 5916 deletions(-) diff --git a/Ghidra/Processors/AARCH64/data/languages/AARCH64instructions.sinc b/Ghidra/Processors/AARCH64/data/languages/AARCH64instructions.sinc index 85531dc222..a3e29b7915 100644 --- a/Ghidra/Processors/AARCH64/data/languages/AARCH64instructions.sinc +++ b/Ghidra/Processors/AARCH64/data/languages/AARCH64instructions.sinc @@ -32,7 +32,11 @@ # and the destination is not the upper half of the register (ie, bit 30 q=0) # then the unused remaining upper bits must be set to 0. +@if DATA_ENDIAN == "little" define endian=little; +@else +define endian=big; +@endif define alignment=4; # Unlike the above, these are preprocessor macros. Use them with e.g. $(TAG_GRANULE) in SLEIGH statements. @@ -1008,7 +1012,7 @@ define context contextreg ShowMemTag = (24,24) noflow ; -define token instrAARCH64 (32) +define token instrAARCH64 (32) endian = little Rm = (16,20) Rn = (5,9) @@ -3898,112 +3902,59 @@ macro set_NZCV(value, condMask) # Macro to access simd lanes -macro simd_address_at(dest, reg, elem, esize, vsize) -{ -@if DATA_ENDIAN == "little" - dest = ® + elem * esize; -@else - dest = ® + vsize - esize - elem * esize; -@endif -} - # Macros to zero the high bits of the Z or Q registers # These are friendlier to the decompiler macro zext_zb(reg) { -@if DATA_ENDIAN == "little" reg[8,56] = 0; reg[64,64] = 0; reg[128,64] = 0; reg[192,64] = 0; -@else - reg[192,56] = 0; - reg[128,64] = 0; - reg[64,64] = 0; - reg[0,64] = 0; -@endif } macro zext_zh(reg) { -@if DATA_ENDIAN == "little" reg[16,48] = 0; reg[64,64] = 0; reg[128,64] = 0; reg[192,64] = 0; -@else - reg[192,48] = 0; - reg[128,64] = 0; - reg[64,64] = 0; - reg[0,64] = 0; -@endif } macro zext_zs(reg) { -@if DATA_ENDIAN == "little" reg[32,32] = 0; reg[64,64] = 0; reg[128,64] = 0; reg[192,64] = 0; -@else - reg[192,32] = 0; - reg[128,64] = 0; - reg[64,64] = 0; - reg[0,64] = 0; -@endif } macro zext_zd(reg) { -@if DATA_ENDIAN == "little" reg[64,64] = 0; reg[128,64] = 0; reg[192,64] = 0; -@else - reg[0,64] = 0; - reg[64,64] = 0; - reg[128,64] = 0; -@endif } macro zext_zq(reg) { -@if DATA_ENDIAN == "little" reg[128,64] = 0; reg[192,64] = 0; -@else - reg[0,64] = 0; - reg[64,64] = 0; -@endif } macro zext_rb(reg) { -@if DATA_ENDIAN == "little" reg[8,56] = 0; -@else - reg[0,56] = 0; -@endif } macro zext_rh(reg) { -@if DATA_ENDIAN == "little" reg[16,48] = 0; -@else - reg[0,48] = 0; -@endif } macro zext_rs(reg) { -@if DATA_ENDIAN == "little" reg[32,32] = 0; -@else - reg[0,32] = 0; -@endif } # SECTION instructions diff --git a/Ghidra/Processors/AARCH64/data/languages/AARCH64ldst.sinc b/Ghidra/Processors/AARCH64/data/languages/AARCH64ldst.sinc index 92ddd5fac7..afe3af0a65 100644 --- a/Ghidra/Processors/AARCH64/data/languages/AARCH64ldst.sinc +++ b/Ghidra/Processors/AARCH64/data/languages/AARCH64ldst.sinc @@ -63,102 +63,69 @@ ldst_wback: ", "^Rm_GPR64 is b_23=1 & Rn_GPR64xsp & Rm_GPR64 { Rn_GPR64xsp = Rm_ is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0010 & b_1011=0b00 & vVt & Rt_VPR64 & Zt & vVtt & Rtt_VPR64 & Ztt & vVttt & Rttt_VPR64 & Zttt & vVtttt & Rtttt_VPR64 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR64, 0, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR64[0,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR64, 1, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR64[8,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR64, 2, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR64[16,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR64, 3, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR64[24,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR64, 4, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR64[32,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR64, 5, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR64[40,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR64, 6, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR64[48,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR64, 7, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR64[56,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR64, 0, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR64[0,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR64, 1, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR64[8,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR64, 2, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR64[16,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR64, 3, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR64[24,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR64, 4, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR64[32,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR64, 5, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR64[40,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR64, 6, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR64[48,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR64, 7, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR64[56,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR64, 0, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR64[0,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR64, 1, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR64[8,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR64, 2, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR64[16,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR64, 3, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR64[24,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR64, 4, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR64[32,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR64, 5, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR64[40,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR64, 6, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR64[48,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR64, 7, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR64[56,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR64, 0, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtttt_VPR64[0,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR64, 1, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtttt_VPR64[8,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR64, 2, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtttt_VPR64[16,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR64, 3, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtttt_VPR64[24,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR64, 4, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtttt_VPR64[32,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR64, 5, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtttt_VPR64[40,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR64, 6, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtttt_VPR64[48,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR64, 7, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtttt_VPR64[56,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -173,54 +140,37 @@ is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0010 & b_1011= is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0010 & b_1011=0b01 & vVt & Rt_VPR64 & Zt & vVtt & Rtt_VPR64 & Ztt & vVttt & Rttt_VPR64 & Zttt & vVtttt & Rtttt_VPR64 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR64, 0, 2, 8); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR64[0,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR64, 1, 2, 8); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR64[16,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR64, 2, 2, 8); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR64[32,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR64, 3, 2, 8); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR64[48,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR64, 0, 2, 8); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtt_VPR64[0,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR64, 1, 2, 8); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtt_VPR64[16,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR64, 2, 2, 8); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtt_VPR64[32,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR64, 3, 2, 8); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtt_VPR64[48,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR64, 0, 2, 8); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rttt_VPR64[0,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR64, 1, 2, 8); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rttt_VPR64[16,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR64, 2, 2, 8); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rttt_VPR64[32,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR64, 3, 2, 8); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rttt_VPR64[48,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtttt_VPR64, 0, 2, 8); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtttt_VPR64[0,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtttt_VPR64, 1, 2, 8); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtttt_VPR64[16,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtttt_VPR64, 2, 2, 8); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtttt_VPR64[32,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtttt_VPR64, 3, 2, 8); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtttt_VPR64[48,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; @@ -235,30 +185,21 @@ is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0010 & b_1011= is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0010 & b_1011=0b10 & vVt & Rt_VPR64 & Zt & vVtt & Rtt_VPR64 & Ztt & vVttt & Rttt_VPR64 & Zttt & vVtttt & Rtttt_VPR64 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR64, 0, 4, 8); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rt_VPR64[0,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rt_VPR64, 1, 4, 8); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rt_VPR64[32,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtt_VPR64, 0, 4, 8); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rtt_VPR64[0,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtt_VPR64, 1, 4, 8); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rtt_VPR64[32,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rttt_VPR64, 0, 4, 8); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rttt_VPR64[0,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rttt_VPR64, 1, 4, 8); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rttt_VPR64[32,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtttt_VPR64, 0, 4, 8); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rtttt_VPR64[0,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtttt_VPR64, 1, 4, 8); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rtttt_VPR64[32,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; @@ -273,18 +214,13 @@ is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0010 & b_1011= is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0010 & b_1011=0b11 & vVt & Rt_VPR64 & Zt & vVtt & Rtt_VPR64 & Ztt & vVttt & Rttt_VPR64 & Zttt & vVtttt & Rtttt_VPR64 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR64, 0, 8, 8); - * [register]:8 tmpa = *:8 tmp_ldXn; + Rt_VPR64[0,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; - simd_address_at(tmpa, Rtt_VPR64, 0, 8, 8); - * [register]:8 tmpa = *:8 tmp_ldXn; + Rtt_VPR64[0,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; - simd_address_at(tmpa, Rttt_VPR64, 0, 8, 8); - * [register]:8 tmpa = *:8 tmp_ldXn; + Rttt_VPR64[0,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; - simd_address_at(tmpa, Rtttt_VPR64, 0, 8, 8); - * [register]:8 tmpa = *:8 tmp_ldXn; + Rtttt_VPR64[0,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; # neglected zexts build ldst_wback; @@ -299,198 +235,133 @@ is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0010 & b_1011= is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0010 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 0, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[0,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 1, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[8,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 2, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[16,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 3, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[24,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 4, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[32,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 5, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[40,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 6, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[48,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 7, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[56,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 8, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[64,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 9, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[72,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 10, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[80,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 11, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[88,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 12, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[96,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 13, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[104,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 14, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[112,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 15, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[120,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 0, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[0,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 1, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[8,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 2, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[16,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 3, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[24,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 4, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[32,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 5, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[40,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 6, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[48,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 7, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[56,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 8, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[64,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 9, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[72,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 10, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[80,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 11, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[88,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 12, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[96,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 13, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[104,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 14, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[112,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 15, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[120,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 0, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR128[0,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 1, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR128[8,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 2, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR128[16,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 3, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR128[24,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 4, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR128[32,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 5, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR128[40,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 6, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR128[48,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 7, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR128[56,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 8, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR128[64,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 9, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR128[72,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 10, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR128[80,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 11, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR128[88,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 12, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR128[96,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 13, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR128[104,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 14, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR128[112,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 15, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR128[120,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR128, 0, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtttt_VPR128[0,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR128, 1, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtttt_VPR128[8,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR128, 2, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtttt_VPR128[16,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR128, 3, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtttt_VPR128[24,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR128, 4, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtttt_VPR128[32,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR128, 5, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtttt_VPR128[40,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR128, 6, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtttt_VPR128[48,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR128, 7, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtttt_VPR128[56,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR128, 8, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtttt_VPR128[64,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR128, 9, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtttt_VPR128[72,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR128, 10, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtttt_VPR128[80,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR128, 11, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtttt_VPR128[88,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR128, 12, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtttt_VPR128[96,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR128, 13, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtttt_VPR128[104,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR128, 14, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtttt_VPR128[112,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR128, 15, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtttt_VPR128[120,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -505,102 +376,69 @@ is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0010 & b_1011= is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0010 & b_1011=0b01 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 0, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR128[0,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR128, 1, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR128[16,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR128, 2, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR128[32,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR128, 3, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR128[48,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR128, 4, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR128[64,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR128, 5, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR128[80,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR128, 6, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR128[96,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR128, 7, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR128[112,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 0, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtt_VPR128[0,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 1, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtt_VPR128[16,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 2, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtt_VPR128[32,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 3, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtt_VPR128[48,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 4, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtt_VPR128[64,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 5, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtt_VPR128[80,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 6, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtt_VPR128[96,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 7, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtt_VPR128[112,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR128, 0, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rttt_VPR128[0,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR128, 1, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rttt_VPR128[16,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR128, 2, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rttt_VPR128[32,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR128, 3, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rttt_VPR128[48,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR128, 4, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rttt_VPR128[64,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR128, 5, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rttt_VPR128[80,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR128, 6, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rttt_VPR128[96,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR128, 7, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rttt_VPR128[112,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtttt_VPR128, 0, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtttt_VPR128[0,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtttt_VPR128, 1, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtttt_VPR128[16,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtttt_VPR128, 2, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtttt_VPR128[32,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtttt_VPR128, 3, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtttt_VPR128[48,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtttt_VPR128, 4, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtttt_VPR128[64,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtttt_VPR128, 5, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtttt_VPR128[80,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtttt_VPR128, 6, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtttt_VPR128[96,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtttt_VPR128, 7, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtttt_VPR128[112,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; @@ -615,54 +453,37 @@ is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0010 & b_1011= is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0010 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 0, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rt_VPR128[0,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rt_VPR128, 1, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rt_VPR128[32,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rt_VPR128, 2, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rt_VPR128[64,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rt_VPR128, 3, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rt_VPR128[96,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtt_VPR128, 0, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rtt_VPR128[0,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtt_VPR128, 1, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rtt_VPR128[32,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtt_VPR128, 2, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rtt_VPR128[64,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtt_VPR128, 3, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rtt_VPR128[96,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rttt_VPR128, 0, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rttt_VPR128[0,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rttt_VPR128, 1, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rttt_VPR128[32,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rttt_VPR128, 2, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rttt_VPR128[64,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rttt_VPR128, 3, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rttt_VPR128[96,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtttt_VPR128, 0, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rtttt_VPR128[0,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtttt_VPR128, 1, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rtttt_VPR128[32,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtttt_VPR128, 2, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rtttt_VPR128[64,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtttt_VPR128, 3, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rtttt_VPR128[96,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; @@ -677,30 +498,21 @@ is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0010 & b_1011= is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0010 & b_1011=0b11 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 0, 8, 16); - * [register]:8 tmpa = *:8 tmp_ldXn; + Rt_VPR128[0,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; - simd_address_at(tmpa, Rt_VPR128, 1, 8, 16); - * [register]:8 tmpa = *:8 tmp_ldXn; + Rt_VPR128[64,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; - simd_address_at(tmpa, Rtt_VPR128, 0, 8, 16); - * [register]:8 tmpa = *:8 tmp_ldXn; + Rtt_VPR128[0,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; - simd_address_at(tmpa, Rtt_VPR128, 1, 8, 16); - * [register]:8 tmpa = *:8 tmp_ldXn; + Rtt_VPR128[64,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; - simd_address_at(tmpa, Rttt_VPR128, 0, 8, 16); - * [register]:8 tmpa = *:8 tmp_ldXn; + Rttt_VPR128[0,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; - simd_address_at(tmpa, Rttt_VPR128, 1, 8, 16); - * [register]:8 tmpa = *:8 tmp_ldXn; + Rttt_VPR128[64,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; - simd_address_at(tmpa, Rtttt_VPR128, 0, 8, 16); - * [register]:8 tmpa = *:8 tmp_ldXn; + Rtttt_VPR128[0,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; - simd_address_at(tmpa, Rtttt_VPR128, 1, 8, 16); - * [register]:8 tmpa = *:8 tmp_ldXn; + Rtttt_VPR128[64,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; # neglected zexts build ldst_wback; @@ -715,78 +527,53 @@ is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0010 & b_1011= is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0110 & b_1011=0b00 & vVt & Rt_VPR64 & Zt & vVtt & Rtt_VPR64 & Ztt & vVttt & Rttt_VPR64 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR64, 0, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR64[0,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR64, 1, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR64[8,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR64, 2, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR64[16,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR64, 3, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR64[24,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR64, 4, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR64[32,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR64, 5, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR64[40,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR64, 6, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR64[48,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR64, 7, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR64[56,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR64, 0, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR64[0,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR64, 1, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR64[8,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR64, 2, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR64[16,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR64, 3, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR64[24,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR64, 4, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR64[32,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR64, 5, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR64[40,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR64, 6, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR64[48,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR64, 7, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR64[56,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR64, 0, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR64[0,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR64, 1, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR64[8,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR64, 2, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR64[16,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR64, 3, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR64[24,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR64, 4, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR64[32,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR64, 5, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR64[40,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR64, 6, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR64[48,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR64, 7, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR64[56,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -801,42 +588,29 @@ is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0110 & b_1011= is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0110 & b_1011=0b01 & vVt & Rt_VPR64 & Zt & vVtt & Rtt_VPR64 & Ztt & vVttt & Rttt_VPR64 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR64, 0, 2, 8); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR64[0,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR64, 1, 2, 8); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR64[16,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR64, 2, 2, 8); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR64[32,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR64, 3, 2, 8); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR64[48,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR64, 0, 2, 8); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtt_VPR64[0,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR64, 1, 2, 8); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtt_VPR64[16,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR64, 2, 2, 8); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtt_VPR64[32,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR64, 3, 2, 8); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtt_VPR64[48,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR64, 0, 2, 8); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rttt_VPR64[0,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR64, 1, 2, 8); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rttt_VPR64[16,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR64, 2, 2, 8); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rttt_VPR64[32,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR64, 3, 2, 8); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rttt_VPR64[48,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; @@ -851,24 +625,17 @@ is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0110 & b_1011= is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0110 & b_1011=0b10 & vVt & Rt_VPR64 & Zt & vVtt & Rtt_VPR64 & Ztt & vVttt & Rttt_VPR64 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR64, 0, 4, 8); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rt_VPR64[0,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rt_VPR64, 1, 4, 8); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rt_VPR64[32,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtt_VPR64, 0, 4, 8); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rtt_VPR64[0,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtt_VPR64, 1, 4, 8); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rtt_VPR64[32,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rttt_VPR64, 0, 4, 8); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rttt_VPR64[0,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rttt_VPR64, 1, 4, 8); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rttt_VPR64[32,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; @@ -883,15 +650,11 @@ is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0110 & b_1011= is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0110 & b_1011=0b11 & vVt & Rt_VPR64 & Zt & vVtt & Rtt_VPR64 & Ztt & vVttt & Rttt_VPR64 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR64, 0, 8, 8); - * [register]:8 tmpa = *:8 tmp_ldXn; + Rt_VPR64[0,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; - simd_address_at(tmpa, Rtt_VPR64, 0, 8, 8); - * [register]:8 tmpa = *:8 tmp_ldXn; + Rtt_VPR64[0,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; - simd_address_at(tmpa, Rttt_VPR64, 0, 8, 8); - * [register]:8 tmpa = *:8 tmp_ldXn; + Rttt_VPR64[0,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; # neglected zexts build ldst_wback; @@ -906,150 +669,101 @@ is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0110 & b_1011= is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0110 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 0, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[0,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 1, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[8,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 2, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[16,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 3, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[24,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 4, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[32,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 5, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[40,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 6, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[48,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 7, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[56,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 8, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[64,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 9, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[72,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 10, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[80,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 11, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[88,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 12, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[96,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 13, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[104,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 14, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[112,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 15, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[120,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 0, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[0,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 1, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[8,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 2, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[16,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 3, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[24,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 4, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[32,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 5, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[40,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 6, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[48,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 7, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[56,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 8, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[64,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 9, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[72,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 10, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[80,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 11, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[88,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 12, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[96,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 13, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[104,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 14, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[112,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 15, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[120,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 0, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR128[0,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 1, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR128[8,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 2, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR128[16,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 3, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR128[24,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 4, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR128[32,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 5, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR128[40,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 6, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR128[48,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 7, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR128[56,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 8, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR128[64,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 9, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR128[72,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 10, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR128[80,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 11, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR128[88,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 12, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR128[96,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 13, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR128[104,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 14, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR128[112,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 15, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR128[120,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -1064,78 +778,53 @@ is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0110 & b_1011= is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0110 & b_1011=0b01 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 0, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR128[0,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR128, 1, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR128[16,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR128, 2, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR128[32,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR128, 3, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR128[48,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR128, 4, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR128[64,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR128, 5, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR128[80,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR128, 6, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR128[96,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR128, 7, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR128[112,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 0, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtt_VPR128[0,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 1, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtt_VPR128[16,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 2, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtt_VPR128[32,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 3, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtt_VPR128[48,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 4, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtt_VPR128[64,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 5, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtt_VPR128[80,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 6, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtt_VPR128[96,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 7, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtt_VPR128[112,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR128, 0, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rttt_VPR128[0,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR128, 1, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rttt_VPR128[16,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR128, 2, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rttt_VPR128[32,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR128, 3, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rttt_VPR128[48,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR128, 4, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rttt_VPR128[64,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR128, 5, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rttt_VPR128[80,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR128, 6, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rttt_VPR128[96,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR128, 7, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rttt_VPR128[112,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; @@ -1150,42 +839,29 @@ is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0110 & b_1011= is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0110 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 0, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rt_VPR128[0,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rt_VPR128, 1, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rt_VPR128[32,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rt_VPR128, 2, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rt_VPR128[64,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rt_VPR128, 3, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rt_VPR128[96,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtt_VPR128, 0, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rtt_VPR128[0,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtt_VPR128, 1, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rtt_VPR128[32,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtt_VPR128, 2, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rtt_VPR128[64,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtt_VPR128, 3, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rtt_VPR128[96,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rttt_VPR128, 0, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rttt_VPR128[0,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rttt_VPR128, 1, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rttt_VPR128[32,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rttt_VPR128, 2, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rttt_VPR128[64,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rttt_VPR128, 3, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rttt_VPR128[96,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; @@ -1200,24 +876,17 @@ is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0110 & b_1011= is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0110 & b_1011=0b11 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 0, 8, 16); - * [register]:8 tmpa = *:8 tmp_ldXn; + Rt_VPR128[0,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; - simd_address_at(tmpa, Rt_VPR128, 1, 8, 16); - * [register]:8 tmpa = *:8 tmp_ldXn; + Rt_VPR128[64,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; - simd_address_at(tmpa, Rtt_VPR128, 0, 8, 16); - * [register]:8 tmpa = *:8 tmp_ldXn; + Rtt_VPR128[0,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; - simd_address_at(tmpa, Rtt_VPR128, 1, 8, 16); - * [register]:8 tmpa = *:8 tmp_ldXn; + Rtt_VPR128[64,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; - simd_address_at(tmpa, Rttt_VPR128, 0, 8, 16); - * [register]:8 tmpa = *:8 tmp_ldXn; + Rttt_VPR128[0,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; - simd_address_at(tmpa, Rttt_VPR128, 1, 8, 16); - * [register]:8 tmpa = *:8 tmp_ldXn; + Rttt_VPR128[64,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; # neglected zexts build ldst_wback; @@ -1232,30 +901,21 @@ is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0110 & b_1011= is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0111 & b_1011=0b00 & vVt & Rt_VPR64 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR64, 0, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR64[0,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR64, 1, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR64[8,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR64, 2, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR64[16,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR64, 3, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR64[24,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR64, 4, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR64[32,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR64, 5, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR64[40,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR64, 6, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR64[48,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR64, 7, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR64[56,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -1270,18 +930,13 @@ is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0111 & b_1011= is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0111 & b_1011=0b01 & vVt & Rt_VPR64 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR64, 0, 2, 8); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR64[0,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR64, 1, 2, 8); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR64[16,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR64, 2, 2, 8); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR64[32,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR64, 3, 2, 8); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR64[48,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; @@ -1296,12 +951,9 @@ is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0111 & b_1011= is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0111 & b_1011=0b10 & vVt & Rt_VPR64 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR64, 0, 4, 8); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rt_VPR64[0,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rt_VPR64, 1, 4, 8); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rt_VPR64[32,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; @@ -1316,9 +968,7 @@ is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0111 & b_1011= is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0111 & b_1011=0b11 & vVt & Rt_VPR64 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR64, 0, 8, 8); - * [register]:8 tmpa = *:8 tmp_ldXn; + Rt_VPR64[0,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; # neglected zexts build ldst_wback; @@ -1333,54 +983,37 @@ is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0111 & b_1011= is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0111 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 0, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[0,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 1, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[8,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 2, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[16,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 3, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[24,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 4, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[32,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 5, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[40,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 6, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[48,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 7, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[56,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 8, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[64,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 9, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[72,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 10, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[80,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 11, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[88,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 12, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[96,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 13, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[104,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 14, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[112,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 15, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[120,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -1395,30 +1028,21 @@ is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0111 & b_1011= is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0111 & b_1011=0b01 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 0, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR128[0,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR128, 1, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR128[16,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR128, 2, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR128[32,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR128, 3, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR128[48,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR128, 4, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR128[64,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR128, 5, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR128[80,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR128, 6, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR128[96,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR128, 7, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR128[112,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; @@ -1433,18 +1057,13 @@ is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0111 & b_1011= is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0111 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 0, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rt_VPR128[0,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rt_VPR128, 1, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rt_VPR128[32,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rt_VPR128, 2, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rt_VPR128[64,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rt_VPR128, 3, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rt_VPR128[96,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; @@ -1459,12 +1078,9 @@ is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0111 & b_1011= is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0111 & b_1011=0b11 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 0, 8, 16); - * [register]:8 tmpa = *:8 tmp_ldXn; + Rt_VPR128[0,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; - simd_address_at(tmpa, Rt_VPR128, 1, 8, 16); - * [register]:8 tmpa = *:8 tmp_ldXn; + Rt_VPR128[64,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; # neglected zexts build ldst_wback; @@ -1479,54 +1095,37 @@ is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0111 & b_1011= is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b1010 & b_1011=0b00 & vVt & Rt_VPR64 & Zt & vVtt & Rtt_VPR64 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR64, 0, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR64[0,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR64, 1, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR64[8,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR64, 2, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR64[16,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR64, 3, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR64[24,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR64, 4, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR64[32,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR64, 5, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR64[40,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR64, 6, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR64[48,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR64, 7, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR64[56,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR64, 0, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR64[0,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR64, 1, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR64[8,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR64, 2, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR64[16,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR64, 3, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR64[24,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR64, 4, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR64[32,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR64, 5, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR64[40,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR64, 6, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR64[48,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR64, 7, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR64[56,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -1541,30 +1140,21 @@ is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b1010 & b_1011= is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b1010 & b_1011=0b01 & vVt & Rt_VPR64 & Zt & vVtt & Rtt_VPR64 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR64, 0, 2, 8); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR64[0,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR64, 1, 2, 8); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR64[16,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR64, 2, 2, 8); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR64[32,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR64, 3, 2, 8); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR64[48,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR64, 0, 2, 8); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtt_VPR64[0,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR64, 1, 2, 8); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtt_VPR64[16,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR64, 2, 2, 8); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtt_VPR64[32,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR64, 3, 2, 8); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtt_VPR64[48,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; @@ -1579,18 +1169,13 @@ is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b1010 & b_1011= is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b1010 & b_1011=0b10 & vVt & Rt_VPR64 & Zt & vVtt & Rtt_VPR64 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR64, 0, 4, 8); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rt_VPR64[0,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rt_VPR64, 1, 4, 8); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rt_VPR64[32,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtt_VPR64, 0, 4, 8); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rtt_VPR64[0,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtt_VPR64, 1, 4, 8); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rtt_VPR64[32,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; @@ -1605,12 +1190,9 @@ is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b1010 & b_1011= is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b1010 & b_1011=0b11 & vVt & Rt_VPR64 & Zt & vVtt & Rtt_VPR64 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR64, 0, 8, 8); - * [register]:8 tmpa = *:8 tmp_ldXn; + Rt_VPR64[0,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; - simd_address_at(tmpa, Rtt_VPR64, 0, 8, 8); - * [register]:8 tmpa = *:8 tmp_ldXn; + Rtt_VPR64[0,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; # neglected zexts build ldst_wback; @@ -1625,102 +1207,69 @@ is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b1010 & b_1011= is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b1010 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 0, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[0,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 1, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[8,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 2, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[16,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 3, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[24,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 4, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[32,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 5, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[40,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 6, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[48,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 7, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[56,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 8, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[64,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 9, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[72,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 10, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[80,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 11, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[88,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 12, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[96,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 13, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[104,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 14, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[112,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 15, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[120,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 0, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[0,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 1, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[8,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 2, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[16,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 3, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[24,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 4, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[32,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 5, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[40,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 6, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[48,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 7, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[56,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 8, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[64,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 9, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[72,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 10, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[80,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 11, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[88,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 12, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[96,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 13, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[104,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 14, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[112,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 15, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[120,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -1735,54 +1284,37 @@ is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b1010 & b_1011= is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b1010 & b_1011=0b01 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 0, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR128[0,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR128, 1, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR128[16,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR128, 2, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR128[32,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR128, 3, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR128[48,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR128, 4, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR128[64,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR128, 5, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR128[80,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR128, 6, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR128[96,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR128, 7, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR128[112,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 0, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtt_VPR128[0,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 1, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtt_VPR128[16,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 2, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtt_VPR128[32,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 3, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtt_VPR128[48,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 4, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtt_VPR128[64,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 5, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtt_VPR128[80,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 6, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtt_VPR128[96,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 7, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtt_VPR128[112,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; @@ -1797,30 +1329,21 @@ is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b1010 & b_1011= is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b1010 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 0, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rt_VPR128[0,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rt_VPR128, 1, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rt_VPR128[32,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rt_VPR128, 2, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rt_VPR128[64,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rt_VPR128, 3, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rt_VPR128[96,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtt_VPR128, 0, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rtt_VPR128[0,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtt_VPR128, 1, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rtt_VPR128[32,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtt_VPR128, 2, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rtt_VPR128[64,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtt_VPR128, 3, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rtt_VPR128[96,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; @@ -1835,18 +1358,13 @@ is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b1010 & b_1011= is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b1010 & b_1011=0b11 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 0, 8, 16); - * [register]:8 tmpa = *:8 tmp_ldXn; + Rt_VPR128[0,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; - simd_address_at(tmpa, Rt_VPR128, 1, 8, 16); - * [register]:8 tmpa = *:8 tmp_ldXn; + Rt_VPR128[64,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; - simd_address_at(tmpa, Rtt_VPR128, 0, 8, 16); - * [register]:8 tmpa = *:8 tmp_ldXn; + Rtt_VPR128[0,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; - simd_address_at(tmpa, Rtt_VPR128, 1, 8, 16); - * [register]:8 tmpa = *:8 tmp_ldXn; + Rtt_VPR128[64,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; # neglected zexts build ldst_wback; @@ -1861,9 +1379,7 @@ is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b1010 & b_1011= is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b000 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 0, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[0,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -1878,9 +1394,7 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b000 & b_12=0 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b000 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 1, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[8,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -1895,9 +1409,7 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b000 & b_12=0 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b000 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 2, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[16,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -1912,9 +1424,7 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b000 & b_12=0 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b000 & b_12=0 & b_1011=0b11 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 3, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[24,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -1929,9 +1439,7 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b000 & b_12=0 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b000 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 4, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[32,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -1946,9 +1454,7 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b000 & b_12=1 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b000 & b_12=1 & b_1011=0b01 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 5, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[40,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -1963,9 +1469,7 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b000 & b_12=1 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b000 & b_12=1 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 6, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[48,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -1980,9 +1484,7 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b000 & b_12=1 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b000 & b_12=1 & b_1011=0b11 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 7, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[56,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -1997,9 +1499,7 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b000 & b_12=1 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b000 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 8, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[64,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -2014,9 +1514,7 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b000 & b_12=0 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b000 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 9, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[72,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -2031,9 +1529,7 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b000 & b_12=0 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b000 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 10, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[80,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -2048,9 +1544,7 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b000 & b_12=0 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b000 & b_12=0 & b_1011=0b11 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 11, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[88,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -2065,9 +1559,7 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b000 & b_12=0 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b000 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 12, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[96,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -2082,9 +1574,7 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b000 & b_12=1 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b000 & b_12=1 & b_1011=0b01 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 13, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[104,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -2099,9 +1589,7 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b000 & b_12=1 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b000 & b_12=1 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 14, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[112,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -2116,9 +1604,7 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b000 & b_12=1 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b000 & b_12=1 & b_1011=0b11 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 15, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[120,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -2133,9 +1619,7 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b000 & b_12=1 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b010 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 0, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR128[0,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; @@ -2150,9 +1634,7 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b010 & b_12=0 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b010 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 1, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR128[16,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; @@ -2167,9 +1649,7 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b010 & b_12=0 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b010 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 2, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR128[32,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; @@ -2184,9 +1664,7 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b010 & b_12=1 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b010 & b_12=1 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 3, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR128[48,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; @@ -2201,9 +1679,7 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b010 & b_12=1 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b010 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 4, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR128[64,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; @@ -2218,9 +1694,7 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b010 & b_12=0 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b010 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 5, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR128[80,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; @@ -2235,9 +1709,7 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b010 & b_12=0 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b010 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 6, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR128[96,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; @@ -2252,9 +1724,7 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b010 & b_12=1 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b010 & b_12=1 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 7, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR128[112,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; @@ -2269,9 +1739,7 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b010 & b_12=1 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b100 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 0, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rt_VPR128[0,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; @@ -2286,9 +1754,7 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b100 & b_12=0 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b100 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 1, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rt_VPR128[32,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; @@ -2303,9 +1769,7 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b100 & b_12=1 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b100 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 2, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rt_VPR128[64,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; @@ -2320,9 +1784,7 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b100 & b_12=0 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b100 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 3, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rt_VPR128[96,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; @@ -2337,9 +1799,7 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b100 & b_12=1 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b100 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 0, 8, 16); - * [register]:8 tmpa = *:8 tmp_ldXn; + Rt_VPR128[0,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; # neglected zexts build ldst_wback; @@ -2354,9 +1814,7 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b100 & b_12=0 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b100 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 1, 8, 16); - * [register]:8 tmpa = *:8 tmp_ldXn; + Rt_VPR128[64,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; # neglected zexts build ldst_wback; @@ -2374,24 +1832,15 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b110 & b_12=0 & { tmp_ldXn = Rn_GPR64xsp; local tmpv:1 = 0; - local tmpa:4 = 0; tmpv = *:1 tmp_ldXn; - simd_address_at(tmpa, Rt_VPR64, 0, 1, 8); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rt_VPR64, 1, 1, 8); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rt_VPR64, 2, 1, 8); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rt_VPR64, 3, 1, 8); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rt_VPR64, 4, 1, 8); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rt_VPR64, 5, 1, 8); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rt_VPR64, 6, 1, 8); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rt_VPR64, 7, 1, 8); - * [register]:1 tmpa = tmpv; + Rt_VPR64[0,8] = tmpv; + Rt_VPR64[8,8] = tmpv; + Rt_VPR64[16,8] = tmpv; + Rt_VPR64[24,8] = tmpv; + Rt_VPR64[32,8] = tmpv; + Rt_VPR64[40,8] = tmpv; + Rt_VPR64[48,8] = tmpv; + Rt_VPR64[56,8] = tmpv; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -2409,16 +1858,11 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b110 & b_12=0 & { tmp_ldXn = Rn_GPR64xsp; local tmpv:2 = 0; - local tmpa:4 = 0; tmpv = *:2 tmp_ldXn; - simd_address_at(tmpa, Rt_VPR64, 0, 2, 8); - * [register]:2 tmpa = tmpv; - simd_address_at(tmpa, Rt_VPR64, 1, 2, 8); - * [register]:2 tmpa = tmpv; - simd_address_at(tmpa, Rt_VPR64, 2, 2, 8); - * [register]:2 tmpa = tmpv; - simd_address_at(tmpa, Rt_VPR64, 3, 2, 8); - * [register]:2 tmpa = tmpv; + Rt_VPR64[0,16] = tmpv; + Rt_VPR64[16,16] = tmpv; + Rt_VPR64[32,16] = tmpv; + Rt_VPR64[48,16] = tmpv; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; @@ -2436,12 +1880,9 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b110 & b_12=0 & { tmp_ldXn = Rn_GPR64xsp; local tmpv:4 = 0; - local tmpa:4 = 0; tmpv = *:4 tmp_ldXn; - simd_address_at(tmpa, Rt_VPR64, 0, 4, 8); - * [register]:4 tmpa = tmpv; - simd_address_at(tmpa, Rt_VPR64, 1, 4, 8); - * [register]:4 tmpa = tmpv; + Rt_VPR64[0,32] = tmpv; + Rt_VPR64[32,32] = tmpv; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; @@ -2459,10 +1900,8 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b110 & b_12=0 & { tmp_ldXn = Rn_GPR64xsp; local tmpv:8 = 0; - local tmpa:4 = 0; tmpv = *:8 tmp_ldXn; - simd_address_at(tmpa, Rt_VPR64, 0, 8, 8); - * [register]:8 tmpa = tmpv; + Rt_VPR64[0,64] = tmpv; tmp_ldXn = tmp_ldXn + 8; # neglected zexts build ldst_wback; @@ -2480,40 +1919,23 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b110 & b_12=0 & { tmp_ldXn = Rn_GPR64xsp; local tmpv:1 = 0; - local tmpa:4 = 0; tmpv = *:1 tmp_ldXn; - simd_address_at(tmpa, Rt_VPR128, 0, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rt_VPR128, 1, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rt_VPR128, 2, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rt_VPR128, 3, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rt_VPR128, 4, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rt_VPR128, 5, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rt_VPR128, 6, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rt_VPR128, 7, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rt_VPR128, 8, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rt_VPR128, 9, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rt_VPR128, 10, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rt_VPR128, 11, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rt_VPR128, 12, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rt_VPR128, 13, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rt_VPR128, 14, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rt_VPR128, 15, 1, 16); - * [register]:1 tmpa = tmpv; + Rt_VPR128[0,8] = tmpv; + Rt_VPR128[8,8] = tmpv; + Rt_VPR128[16,8] = tmpv; + Rt_VPR128[24,8] = tmpv; + Rt_VPR128[32,8] = tmpv; + Rt_VPR128[40,8] = tmpv; + Rt_VPR128[48,8] = tmpv; + Rt_VPR128[56,8] = tmpv; + Rt_VPR128[64,8] = tmpv; + Rt_VPR128[72,8] = tmpv; + Rt_VPR128[80,8] = tmpv; + Rt_VPR128[88,8] = tmpv; + Rt_VPR128[96,8] = tmpv; + Rt_VPR128[104,8] = tmpv; + Rt_VPR128[112,8] = tmpv; + Rt_VPR128[120,8] = tmpv; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -2531,24 +1953,15 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b110 & b_12=0 & { tmp_ldXn = Rn_GPR64xsp; local tmpv:2 = 0; - local tmpa:4 = 0; tmpv = *:2 tmp_ldXn; - simd_address_at(tmpa, Rt_VPR128, 0, 2, 16); - * [register]:2 tmpa = tmpv; - simd_address_at(tmpa, Rt_VPR128, 1, 2, 16); - * [register]:2 tmpa = tmpv; - simd_address_at(tmpa, Rt_VPR128, 2, 2, 16); - * [register]:2 tmpa = tmpv; - simd_address_at(tmpa, Rt_VPR128, 3, 2, 16); - * [register]:2 tmpa = tmpv; - simd_address_at(tmpa, Rt_VPR128, 4, 2, 16); - * [register]:2 tmpa = tmpv; - simd_address_at(tmpa, Rt_VPR128, 5, 2, 16); - * [register]:2 tmpa = tmpv; - simd_address_at(tmpa, Rt_VPR128, 6, 2, 16); - * [register]:2 tmpa = tmpv; - simd_address_at(tmpa, Rt_VPR128, 7, 2, 16); - * [register]:2 tmpa = tmpv; + Rt_VPR128[0,16] = tmpv; + Rt_VPR128[16,16] = tmpv; + Rt_VPR128[32,16] = tmpv; + Rt_VPR128[48,16] = tmpv; + Rt_VPR128[64,16] = tmpv; + Rt_VPR128[80,16] = tmpv; + Rt_VPR128[96,16] = tmpv; + Rt_VPR128[112,16] = tmpv; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; @@ -2566,16 +1979,11 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b110 & b_12=0 & { tmp_ldXn = Rn_GPR64xsp; local tmpv:4 = 0; - local tmpa:4 = 0; tmpv = *:4 tmp_ldXn; - simd_address_at(tmpa, Rt_VPR128, 0, 4, 16); - * [register]:4 tmpa = tmpv; - simd_address_at(tmpa, Rt_VPR128, 1, 4, 16); - * [register]:4 tmpa = tmpv; - simd_address_at(tmpa, Rt_VPR128, 2, 4, 16); - * [register]:4 tmpa = tmpv; - simd_address_at(tmpa, Rt_VPR128, 3, 4, 16); - * [register]:4 tmpa = tmpv; + Rt_VPR128[0,32] = tmpv; + Rt_VPR128[32,32] = tmpv; + Rt_VPR128[64,32] = tmpv; + Rt_VPR128[96,32] = tmpv; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; @@ -2593,12 +2001,9 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b110 & b_12=0 & { tmp_ldXn = Rn_GPR64xsp; local tmpv:8 = 0; - local tmpa:4 = 0; tmpv = *:8 tmp_ldXn; - simd_address_at(tmpa, Rt_VPR128, 0, 8, 16); - * [register]:8 tmpa = tmpv; - simd_address_at(tmpa, Rt_VPR128, 1, 8, 16); - * [register]:8 tmpa = tmpv; + Rt_VPR128[0,64] = tmpv; + Rt_VPR128[64,64] = tmpv; tmp_ldXn = tmp_ldXn + 8; # neglected zexts build ldst_wback; @@ -2613,54 +2018,37 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b110 & b_12=0 & is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b1000 & b_1011=0b00 & vVt & Rt_VPR64 & Zt & vVtt & Rtt_VPR64 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR64, 0, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR64[0,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR64, 0, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR64[0,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR64, 1, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR64[8,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR64, 1, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR64[8,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR64, 2, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR64[16,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR64, 2, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR64[16,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR64, 3, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR64[24,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR64, 3, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR64[24,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR64, 4, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR64[32,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR64, 4, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR64[32,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR64, 5, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR64[40,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR64, 5, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR64[40,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR64, 6, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR64[48,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR64, 6, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR64[48,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR64, 7, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR64[56,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR64, 7, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR64[56,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -2675,30 +2063,21 @@ is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b1000 & b_1011= is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b1000 & b_1011=0b01 & vVt & Rt_VPR64 & Zt & vVtt & Rtt_VPR64 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR64, 0, 2, 8); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR64[0,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR64, 0, 2, 8); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtt_VPR64[0,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR64, 1, 2, 8); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR64[16,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR64, 1, 2, 8); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtt_VPR64[16,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR64, 2, 2, 8); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR64[32,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR64, 2, 2, 8); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtt_VPR64[32,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR64, 3, 2, 8); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR64[48,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR64, 3, 2, 8); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtt_VPR64[48,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; @@ -2713,18 +2092,13 @@ is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b1000 & b_1011= is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b1000 & b_1011=0b10 & vVt & Rt_VPR64 & Zt & vVtt & Rtt_VPR64 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR64, 0, 4, 8); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rt_VPR64[0,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtt_VPR64, 0, 4, 8); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rtt_VPR64[0,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rt_VPR64, 1, 4, 8); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rt_VPR64[32,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtt_VPR64, 1, 4, 8); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rtt_VPR64[32,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; @@ -2739,102 +2113,69 @@ is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b1000 & b_1011= is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b1000 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 0, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[0,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 0, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[0,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 1, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[8,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 1, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[8,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 2, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[16,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 2, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[16,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 3, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[24,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 3, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[24,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 4, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[32,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 4, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[32,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 5, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[40,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 5, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[40,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 6, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[48,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 6, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[48,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 7, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[56,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 7, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[56,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 8, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[64,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 8, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[64,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 9, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[72,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 9, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[72,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 10, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[80,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 10, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[80,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 11, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[88,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 11, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[88,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 12, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[96,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 12, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[96,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 13, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[104,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 13, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[104,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 14, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[112,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 14, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[112,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 15, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[120,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 15, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[120,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -2849,54 +2190,37 @@ is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b1000 & b_1011= is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b1000 & b_1011=0b01 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 0, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR128[0,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 0, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtt_VPR128[0,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR128, 1, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR128[16,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 1, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtt_VPR128[16,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR128, 2, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR128[32,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 2, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtt_VPR128[32,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR128, 3, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR128[48,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 3, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtt_VPR128[48,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR128, 4, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR128[64,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 4, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtt_VPR128[64,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR128, 5, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR128[80,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 5, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtt_VPR128[80,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR128, 6, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR128[96,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 6, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtt_VPR128[96,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR128, 7, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR128[112,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 7, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtt_VPR128[112,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; @@ -2911,30 +2235,21 @@ is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b1000 & b_1011= is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b1000 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 0, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rt_VPR128[0,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtt_VPR128, 0, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rtt_VPR128[0,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rt_VPR128, 1, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rt_VPR128[32,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtt_VPR128, 1, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rtt_VPR128[32,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rt_VPR128, 2, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rt_VPR128[64,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtt_VPR128, 2, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rtt_VPR128[64,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rt_VPR128, 3, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rt_VPR128[96,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtt_VPR128, 3, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rtt_VPR128[96,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; @@ -2949,18 +2264,13 @@ is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b1000 & b_1011= is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b1000 & b_1011=0b11 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 0, 8, 16); - * [register]:8 tmpa = *:8 tmp_ldXn; + Rt_VPR128[0,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; - simd_address_at(tmpa, Rtt_VPR128, 0, 8, 16); - * [register]:8 tmpa = *:8 tmp_ldXn; + Rtt_VPR128[0,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; - simd_address_at(tmpa, Rt_VPR128, 1, 8, 16); - * [register]:8 tmpa = *:8 tmp_ldXn; + Rt_VPR128[64,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; - simd_address_at(tmpa, Rtt_VPR128, 1, 8, 16); - * [register]:8 tmpa = *:8 tmp_ldXn; + Rtt_VPR128[64,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; # neglected zexts build ldst_wback; @@ -2975,12 +2285,9 @@ is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b1000 & b_1011= is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b000 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 0, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[0,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 0, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[0,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -2995,12 +2302,9 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b000 & b_12=0 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b000 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 1, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[8,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 1, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[8,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -3015,12 +2319,9 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b000 & b_12=0 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b000 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 2, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[16,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 2, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[16,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -3035,12 +2336,9 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b000 & b_12=0 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b000 & b_12=0 & b_1011=0b11 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 3, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[24,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 3, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[24,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -3055,12 +2353,9 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b000 & b_12=0 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b000 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 4, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[32,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 4, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[32,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -3075,12 +2370,9 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b000 & b_12=1 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b000 & b_12=1 & b_1011=0b01 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 5, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[40,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 5, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[40,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -3095,12 +2387,9 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b000 & b_12=1 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b000 & b_12=1 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 6, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[48,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 6, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[48,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -3115,12 +2404,9 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b000 & b_12=1 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b000 & b_12=1 & b_1011=0b11 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 7, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[56,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 7, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[56,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -3135,12 +2421,9 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b000 & b_12=1 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b000 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 8, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[64,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 8, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[64,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -3155,12 +2438,9 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b000 & b_12=0 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b000 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 9, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[72,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 9, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[72,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -3175,12 +2455,9 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b000 & b_12=0 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b000 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 10, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[80,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 10, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[80,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -3195,12 +2472,9 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b000 & b_12=0 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b000 & b_12=0 & b_1011=0b11 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 11, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[88,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 11, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[88,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -3215,12 +2489,9 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b000 & b_12=0 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b000 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 12, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[96,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 12, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[96,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -3235,12 +2506,9 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b000 & b_12=1 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b000 & b_12=1 & b_1011=0b01 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 13, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[104,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 13, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[104,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -3255,12 +2523,9 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b000 & b_12=1 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b000 & b_12=1 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 14, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[112,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 14, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[112,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -3275,12 +2540,9 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b000 & b_12=1 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b000 & b_12=1 & b_1011=0b11 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 15, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[120,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 15, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[120,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -3295,12 +2557,9 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b000 & b_12=1 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b010 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 0, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR128[0,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 0, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtt_VPR128[0,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; @@ -3315,12 +2574,9 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b010 & b_12=0 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b010 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 1, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR128[16,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 1, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtt_VPR128[16,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; @@ -3335,12 +2591,9 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b010 & b_12=0 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b010 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 2, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR128[32,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 2, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtt_VPR128[32,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; @@ -3355,12 +2608,9 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b010 & b_12=1 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b010 & b_12=1 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 3, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR128[48,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 3, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtt_VPR128[48,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; @@ -3375,12 +2625,9 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b010 & b_12=1 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b010 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 4, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR128[64,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 4, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtt_VPR128[64,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; @@ -3395,12 +2642,9 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b010 & b_12=0 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b010 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 5, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR128[80,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 5, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtt_VPR128[80,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; @@ -3415,12 +2659,9 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b010 & b_12=0 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b010 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 6, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR128[96,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 6, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtt_VPR128[96,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; @@ -3435,12 +2676,9 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b010 & b_12=1 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b010 & b_12=1 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 7, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR128[112,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 7, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtt_VPR128[112,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; @@ -3455,12 +2693,9 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b010 & b_12=1 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b100 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 0, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rt_VPR128[0,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtt_VPR128, 0, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rtt_VPR128[0,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; @@ -3475,12 +2710,9 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b100 & b_12=0 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b100 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 1, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rt_VPR128[32,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtt_VPR128, 1, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rtt_VPR128[32,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; @@ -3495,12 +2727,9 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b100 & b_12=1 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b100 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 2, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rt_VPR128[64,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtt_VPR128, 2, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rtt_VPR128[64,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; @@ -3515,12 +2744,9 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b100 & b_12=0 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b100 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 3, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rt_VPR128[96,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtt_VPR128, 3, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rtt_VPR128[96,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; @@ -3535,12 +2761,9 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b100 & b_12=1 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b100 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 0, 8, 16); - * [register]:8 tmpa = *:8 tmp_ldXn; + Rt_VPR128[0,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; - simd_address_at(tmpa, Rtt_VPR128, 0, 8, 16); - * [register]:8 tmpa = *:8 tmp_ldXn; + Rtt_VPR128[0,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; # neglected zexts build ldst_wback; @@ -3555,12 +2778,9 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b100 & b_12=0 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b100 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 1, 8, 16); - * [register]:8 tmpa = *:8 tmp_ldXn; + Rt_VPR128[64,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; - simd_address_at(tmpa, Rtt_VPR128, 1, 8, 16); - * [register]:8 tmpa = *:8 tmp_ldXn; + Rtt_VPR128[64,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; # neglected zexts build ldst_wback; @@ -3578,42 +2798,25 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b110 & b_12=0 & { tmp_ldXn = Rn_GPR64xsp; local tmpv:1 = 0; - local tmpa:4 = 0; tmpv = *:1 tmp_ldXn; - simd_address_at(tmpa, Rtt_VPR64, 0, 1, 8); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtt_VPR64, 1, 1, 8); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtt_VPR64, 2, 1, 8); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtt_VPR64, 3, 1, 8); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtt_VPR64, 4, 1, 8); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtt_VPR64, 5, 1, 8); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtt_VPR64, 6, 1, 8); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtt_VPR64, 7, 1, 8); - * [register]:1 tmpa = tmpv; + Rtt_VPR64[0,8] = tmpv; + Rtt_VPR64[8,8] = tmpv; + Rtt_VPR64[16,8] = tmpv; + Rtt_VPR64[24,8] = tmpv; + Rtt_VPR64[32,8] = tmpv; + Rtt_VPR64[40,8] = tmpv; + Rtt_VPR64[48,8] = tmpv; + Rtt_VPR64[56,8] = tmpv; tmp_ldXn = tmp_ldXn + 1; tmpv = *:1 tmp_ldXn; - simd_address_at(tmpa, Rtt_VPR64, 0, 1, 8); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtt_VPR64, 1, 1, 8); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtt_VPR64, 2, 1, 8); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtt_VPR64, 3, 1, 8); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtt_VPR64, 4, 1, 8); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtt_VPR64, 5, 1, 8); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtt_VPR64, 6, 1, 8); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtt_VPR64, 7, 1, 8); - * [register]:1 tmpa = tmpv; + Rtt_VPR64[0,8] = tmpv; + Rtt_VPR64[8,8] = tmpv; + Rtt_VPR64[16,8] = tmpv; + Rtt_VPR64[24,8] = tmpv; + Rtt_VPR64[32,8] = tmpv; + Rtt_VPR64[40,8] = tmpv; + Rtt_VPR64[48,8] = tmpv; + Rtt_VPR64[56,8] = tmpv; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -3631,26 +2834,17 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b110 & b_12=0 & { tmp_ldXn = Rn_GPR64xsp; local tmpv:2 = 0; - local tmpa:4 = 0; tmpv = *:2 tmp_ldXn; - simd_address_at(tmpa, Rtt_VPR64, 0, 2, 8); - * [register]:2 tmpa = tmpv; - simd_address_at(tmpa, Rtt_VPR64, 1, 2, 8); - * [register]:2 tmpa = tmpv; - simd_address_at(tmpa, Rtt_VPR64, 2, 2, 8); - * [register]:2 tmpa = tmpv; - simd_address_at(tmpa, Rtt_VPR64, 3, 2, 8); - * [register]:2 tmpa = tmpv; + Rtt_VPR64[0,16] = tmpv; + Rtt_VPR64[16,16] = tmpv; + Rtt_VPR64[32,16] = tmpv; + Rtt_VPR64[48,16] = tmpv; tmp_ldXn = tmp_ldXn + 2; tmpv = *:2 tmp_ldXn; - simd_address_at(tmpa, Rtt_VPR64, 0, 2, 8); - * [register]:2 tmpa = tmpv; - simd_address_at(tmpa, Rtt_VPR64, 1, 2, 8); - * [register]:2 tmpa = tmpv; - simd_address_at(tmpa, Rtt_VPR64, 2, 2, 8); - * [register]:2 tmpa = tmpv; - simd_address_at(tmpa, Rtt_VPR64, 3, 2, 8); - * [register]:2 tmpa = tmpv; + Rtt_VPR64[0,16] = tmpv; + Rtt_VPR64[16,16] = tmpv; + Rtt_VPR64[32,16] = tmpv; + Rtt_VPR64[48,16] = tmpv; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; @@ -3668,18 +2862,13 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b110 & b_12=0 & { tmp_ldXn = Rn_GPR64xsp; local tmpv:4 = 0; - local tmpa:4 = 0; tmpv = *:4 tmp_ldXn; - simd_address_at(tmpa, Rtt_VPR64, 0, 4, 8); - * [register]:4 tmpa = tmpv; - simd_address_at(tmpa, Rtt_VPR64, 1, 4, 8); - * [register]:4 tmpa = tmpv; + Rtt_VPR64[0,32] = tmpv; + Rtt_VPR64[32,32] = tmpv; tmp_ldXn = tmp_ldXn + 4; tmpv = *:4 tmp_ldXn; - simd_address_at(tmpa, Rtt_VPR64, 0, 4, 8); - * [register]:4 tmpa = tmpv; - simd_address_at(tmpa, Rtt_VPR64, 1, 4, 8); - * [register]:4 tmpa = tmpv; + Rtt_VPR64[0,32] = tmpv; + Rtt_VPR64[32,32] = tmpv; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; @@ -3697,14 +2886,11 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b110 & b_12=0 & { tmp_ldXn = Rn_GPR64xsp; local tmpv:8 = 0; - local tmpa:4 = 0; tmpv = *:8 tmp_ldXn; - simd_address_at(tmpa, Rtt_VPR64, 0, 8, 8); - * [register]:8 tmpa = tmpv; + Rtt_VPR64[0,64] = tmpv; tmp_ldXn = tmp_ldXn + 8; tmpv = *:8 tmp_ldXn; - simd_address_at(tmpa, Rtt_VPR64, 0, 8, 8); - * [register]:8 tmpa = tmpv; + Rtt_VPR64[0,64] = tmpv; tmp_ldXn = tmp_ldXn + 8; # neglected zexts build ldst_wback; @@ -3722,74 +2908,41 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b110 & b_12=0 & { tmp_ldXn = Rn_GPR64xsp; local tmpv:1 = 0; - local tmpa:4 = 0; tmpv = *:1 tmp_ldXn; - simd_address_at(tmpa, Rtt_VPR128, 0, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtt_VPR128, 1, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtt_VPR128, 2, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtt_VPR128, 3, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtt_VPR128, 4, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtt_VPR128, 5, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtt_VPR128, 6, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtt_VPR128, 7, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtt_VPR128, 8, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtt_VPR128, 9, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtt_VPR128, 10, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtt_VPR128, 11, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtt_VPR128, 12, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtt_VPR128, 13, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtt_VPR128, 14, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtt_VPR128, 15, 1, 16); - * [register]:1 tmpa = tmpv; + Rtt_VPR128[0,8] = tmpv; + Rtt_VPR128[8,8] = tmpv; + Rtt_VPR128[16,8] = tmpv; + Rtt_VPR128[24,8] = tmpv; + Rtt_VPR128[32,8] = tmpv; + Rtt_VPR128[40,8] = tmpv; + Rtt_VPR128[48,8] = tmpv; + Rtt_VPR128[56,8] = tmpv; + Rtt_VPR128[64,8] = tmpv; + Rtt_VPR128[72,8] = tmpv; + Rtt_VPR128[80,8] = tmpv; + Rtt_VPR128[88,8] = tmpv; + Rtt_VPR128[96,8] = tmpv; + Rtt_VPR128[104,8] = tmpv; + Rtt_VPR128[112,8] = tmpv; + Rtt_VPR128[120,8] = tmpv; tmp_ldXn = tmp_ldXn + 1; tmpv = *:1 tmp_ldXn; - simd_address_at(tmpa, Rtt_VPR128, 0, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtt_VPR128, 1, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtt_VPR128, 2, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtt_VPR128, 3, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtt_VPR128, 4, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtt_VPR128, 5, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtt_VPR128, 6, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtt_VPR128, 7, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtt_VPR128, 8, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtt_VPR128, 9, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtt_VPR128, 10, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtt_VPR128, 11, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtt_VPR128, 12, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtt_VPR128, 13, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtt_VPR128, 14, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtt_VPR128, 15, 1, 16); - * [register]:1 tmpa = tmpv; + Rtt_VPR128[0,8] = tmpv; + Rtt_VPR128[8,8] = tmpv; + Rtt_VPR128[16,8] = tmpv; + Rtt_VPR128[24,8] = tmpv; + Rtt_VPR128[32,8] = tmpv; + Rtt_VPR128[40,8] = tmpv; + Rtt_VPR128[48,8] = tmpv; + Rtt_VPR128[56,8] = tmpv; + Rtt_VPR128[64,8] = tmpv; + Rtt_VPR128[72,8] = tmpv; + Rtt_VPR128[80,8] = tmpv; + Rtt_VPR128[88,8] = tmpv; + Rtt_VPR128[96,8] = tmpv; + Rtt_VPR128[104,8] = tmpv; + Rtt_VPR128[112,8] = tmpv; + Rtt_VPR128[120,8] = tmpv; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -3807,42 +2960,25 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b110 & b_12=0 & { tmp_ldXn = Rn_GPR64xsp; local tmpv:2 = 0; - local tmpa:4 = 0; tmpv = *:2 tmp_ldXn; - simd_address_at(tmpa, Rtt_VPR128, 0, 2, 16); - * [register]:2 tmpa = tmpv; - simd_address_at(tmpa, Rtt_VPR128, 1, 2, 16); - * [register]:2 tmpa = tmpv; - simd_address_at(tmpa, Rtt_VPR128, 2, 2, 16); - * [register]:2 tmpa = tmpv; - simd_address_at(tmpa, Rtt_VPR128, 3, 2, 16); - * [register]:2 tmpa = tmpv; - simd_address_at(tmpa, Rtt_VPR128, 4, 2, 16); - * [register]:2 tmpa = tmpv; - simd_address_at(tmpa, Rtt_VPR128, 5, 2, 16); - * [register]:2 tmpa = tmpv; - simd_address_at(tmpa, Rtt_VPR128, 6, 2, 16); - * [register]:2 tmpa = tmpv; - simd_address_at(tmpa, Rtt_VPR128, 7, 2, 16); - * [register]:2 tmpa = tmpv; + Rtt_VPR128[0,16] = tmpv; + Rtt_VPR128[16,16] = tmpv; + Rtt_VPR128[32,16] = tmpv; + Rtt_VPR128[48,16] = tmpv; + Rtt_VPR128[64,16] = tmpv; + Rtt_VPR128[80,16] = tmpv; + Rtt_VPR128[96,16] = tmpv; + Rtt_VPR128[112,16] = tmpv; tmp_ldXn = tmp_ldXn + 2; tmpv = *:2 tmp_ldXn; - simd_address_at(tmpa, Rtt_VPR128, 0, 2, 16); - * [register]:2 tmpa = tmpv; - simd_address_at(tmpa, Rtt_VPR128, 1, 2, 16); - * [register]:2 tmpa = tmpv; - simd_address_at(tmpa, Rtt_VPR128, 2, 2, 16); - * [register]:2 tmpa = tmpv; - simd_address_at(tmpa, Rtt_VPR128, 3, 2, 16); - * [register]:2 tmpa = tmpv; - simd_address_at(tmpa, Rtt_VPR128, 4, 2, 16); - * [register]:2 tmpa = tmpv; - simd_address_at(tmpa, Rtt_VPR128, 5, 2, 16); - * [register]:2 tmpa = tmpv; - simd_address_at(tmpa, Rtt_VPR128, 6, 2, 16); - * [register]:2 tmpa = tmpv; - simd_address_at(tmpa, Rtt_VPR128, 7, 2, 16); - * [register]:2 tmpa = tmpv; + Rtt_VPR128[0,16] = tmpv; + Rtt_VPR128[16,16] = tmpv; + Rtt_VPR128[32,16] = tmpv; + Rtt_VPR128[48,16] = tmpv; + Rtt_VPR128[64,16] = tmpv; + Rtt_VPR128[80,16] = tmpv; + Rtt_VPR128[96,16] = tmpv; + Rtt_VPR128[112,16] = tmpv; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; @@ -3860,26 +2996,17 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b110 & b_12=0 & { tmp_ldXn = Rn_GPR64xsp; local tmpv:4 = 0; - local tmpa:4 = 0; tmpv = *:4 tmp_ldXn; - simd_address_at(tmpa, Rtt_VPR128, 0, 4, 16); - * [register]:4 tmpa = tmpv; - simd_address_at(tmpa, Rtt_VPR128, 1, 4, 16); - * [register]:4 tmpa = tmpv; - simd_address_at(tmpa, Rtt_VPR128, 2, 4, 16); - * [register]:4 tmpa = tmpv; - simd_address_at(tmpa, Rtt_VPR128, 3, 4, 16); - * [register]:4 tmpa = tmpv; + Rtt_VPR128[0,32] = tmpv; + Rtt_VPR128[32,32] = tmpv; + Rtt_VPR128[64,32] = tmpv; + Rtt_VPR128[96,32] = tmpv; tmp_ldXn = tmp_ldXn + 4; tmpv = *:4 tmp_ldXn; - simd_address_at(tmpa, Rtt_VPR128, 0, 4, 16); - * [register]:4 tmpa = tmpv; - simd_address_at(tmpa, Rtt_VPR128, 1, 4, 16); - * [register]:4 tmpa = tmpv; - simd_address_at(tmpa, Rtt_VPR128, 2, 4, 16); - * [register]:4 tmpa = tmpv; - simd_address_at(tmpa, Rtt_VPR128, 3, 4, 16); - * [register]:4 tmpa = tmpv; + Rtt_VPR128[0,32] = tmpv; + Rtt_VPR128[32,32] = tmpv; + Rtt_VPR128[64,32] = tmpv; + Rtt_VPR128[96,32] = tmpv; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; @@ -3897,18 +3024,13 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b110 & b_12=0 & { tmp_ldXn = Rn_GPR64xsp; local tmpv:8 = 0; - local tmpa:4 = 0; tmpv = *:8 tmp_ldXn; - simd_address_at(tmpa, Rtt_VPR128, 0, 8, 16); - * [register]:8 tmpa = tmpv; - simd_address_at(tmpa, Rtt_VPR128, 1, 8, 16); - * [register]:8 tmpa = tmpv; + Rtt_VPR128[0,64] = tmpv; + Rtt_VPR128[64,64] = tmpv; tmp_ldXn = tmp_ldXn + 8; tmpv = *:8 tmp_ldXn; - simd_address_at(tmpa, Rtt_VPR128, 0, 8, 16); - * [register]:8 tmpa = tmpv; - simd_address_at(tmpa, Rtt_VPR128, 1, 8, 16); - * [register]:8 tmpa = tmpv; + Rtt_VPR128[0,64] = tmpv; + Rtt_VPR128[64,64] = tmpv; tmp_ldXn = tmp_ldXn + 8; # neglected zexts build ldst_wback; @@ -3923,78 +3045,53 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b110 & b_12=0 & is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0100 & b_1011=0b00 & vVt & Rt_VPR64 & Zt & vVtt & Rtt_VPR64 & Ztt & vVttt & Rttt_VPR64 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR64, 0, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR64[0,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR64, 0, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR64[0,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR64, 0, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR64[0,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR64, 1, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR64[8,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR64, 1, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR64[8,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR64, 1, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR64[8,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR64, 2, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR64[16,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR64, 2, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR64[16,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR64, 2, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR64[16,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR64, 3, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR64[24,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR64, 3, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR64[24,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR64, 3, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR64[24,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR64, 4, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR64[32,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR64, 4, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR64[32,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR64, 4, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR64[32,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR64, 5, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR64[40,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR64, 5, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR64[40,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR64, 5, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR64[40,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR64, 6, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR64[48,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR64, 6, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR64[48,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR64, 6, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR64[48,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR64, 7, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR64[56,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR64, 7, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR64[56,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR64, 7, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR64[56,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -4009,42 +3106,29 @@ is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0100 & b_1011= is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0100 & b_1011=0b01 & vVt & Rt_VPR64 & Zt & vVtt & Rtt_VPR64 & Ztt & vVttt & Rttt_VPR64 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR64, 0, 2, 8); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR64[0,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR64, 0, 2, 8); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtt_VPR64[0,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR64, 0, 2, 8); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rttt_VPR64[0,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR64, 1, 2, 8); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR64[16,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR64, 1, 2, 8); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtt_VPR64[16,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR64, 1, 2, 8); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rttt_VPR64[16,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR64, 2, 2, 8); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR64[32,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR64, 2, 2, 8); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtt_VPR64[32,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR64, 2, 2, 8); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rttt_VPR64[32,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR64, 3, 2, 8); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR64[48,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR64, 3, 2, 8); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtt_VPR64[48,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR64, 3, 2, 8); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rttt_VPR64[48,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; @@ -4059,24 +3143,17 @@ is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0100 & b_1011= is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0100 & b_1011=0b10 & vVt & Rt_VPR64 & Zt & vVtt & Rtt_VPR64 & Ztt & vVttt & Rttt_VPR64 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR64, 0, 4, 8); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rt_VPR64[0,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtt_VPR64, 0, 4, 8); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rtt_VPR64[0,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rttt_VPR64, 0, 4, 8); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rttt_VPR64[0,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rt_VPR64, 1, 4, 8); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rt_VPR64[32,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtt_VPR64, 1, 4, 8); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rtt_VPR64[32,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rttt_VPR64, 1, 4, 8); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rttt_VPR64[32,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; @@ -4091,150 +3168,101 @@ is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0100 & b_1011= is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0100 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 0, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[0,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 0, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[0,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 0, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR128[0,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 1, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[8,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 1, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[8,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 1, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR128[8,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 2, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[16,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 2, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[16,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 2, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR128[16,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 3, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[24,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 3, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[24,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 3, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR128[24,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 4, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[32,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 4, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[32,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 4, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR128[32,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 5, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[40,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 5, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[40,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 5, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR128[40,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 6, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[48,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 6, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[48,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 6, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR128[48,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 7, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[56,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 7, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[56,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 7, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR128[56,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 8, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[64,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 8, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[64,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 8, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR128[64,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 9, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[72,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 9, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[72,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 9, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR128[72,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 10, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[80,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 10, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[80,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 10, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR128[80,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 11, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[88,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 11, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[88,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 11, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR128[88,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 12, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[96,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 12, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[96,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 12, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR128[96,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 13, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[104,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 13, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[104,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 13, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR128[104,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 14, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[112,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 14, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[112,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 14, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR128[112,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 15, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[120,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 15, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[120,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 15, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR128[120,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -4249,78 +3277,53 @@ is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0100 & b_1011= is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0100 & b_1011=0b01 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 0, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR128[0,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 0, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtt_VPR128[0,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR128, 0, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rttt_VPR128[0,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR128, 1, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR128[16,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 1, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtt_VPR128[16,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR128, 1, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rttt_VPR128[16,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR128, 2, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR128[32,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 2, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtt_VPR128[32,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR128, 2, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rttt_VPR128[32,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR128, 3, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR128[48,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 3, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtt_VPR128[48,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR128, 3, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rttt_VPR128[48,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR128, 4, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR128[64,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 4, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtt_VPR128[64,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR128, 4, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rttt_VPR128[64,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR128, 5, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR128[80,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 5, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtt_VPR128[80,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR128, 5, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rttt_VPR128[80,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR128, 6, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR128[96,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 6, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtt_VPR128[96,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR128, 6, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rttt_VPR128[96,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR128, 7, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR128[112,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 7, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtt_VPR128[112,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR128, 7, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rttt_VPR128[112,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; @@ -4335,42 +3338,29 @@ is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0100 & b_1011= is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0100 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 0, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rt_VPR128[0,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtt_VPR128, 0, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rtt_VPR128[0,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rttt_VPR128, 0, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rttt_VPR128[0,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rt_VPR128, 1, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rt_VPR128[32,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtt_VPR128, 1, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rtt_VPR128[32,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rttt_VPR128, 1, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rttt_VPR128[32,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rt_VPR128, 2, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rt_VPR128[64,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtt_VPR128, 2, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rtt_VPR128[64,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rttt_VPR128, 2, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rttt_VPR128[64,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rt_VPR128, 3, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rt_VPR128[96,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtt_VPR128, 3, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rtt_VPR128[96,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rttt_VPR128, 3, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rttt_VPR128[96,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; @@ -4385,24 +3375,17 @@ is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0100 & b_1011= is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0100 & b_1011=0b11 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 0, 8, 16); - * [register]:8 tmpa = *:8 tmp_ldXn; + Rt_VPR128[0,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; - simd_address_at(tmpa, Rtt_VPR128, 0, 8, 16); - * [register]:8 tmpa = *:8 tmp_ldXn; + Rtt_VPR128[0,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; - simd_address_at(tmpa, Rttt_VPR128, 0, 8, 16); - * [register]:8 tmpa = *:8 tmp_ldXn; + Rttt_VPR128[0,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; - simd_address_at(tmpa, Rt_VPR128, 1, 8, 16); - * [register]:8 tmpa = *:8 tmp_ldXn; + Rt_VPR128[64,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; - simd_address_at(tmpa, Rtt_VPR128, 1, 8, 16); - * [register]:8 tmpa = *:8 tmp_ldXn; + Rtt_VPR128[64,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; - simd_address_at(tmpa, Rttt_VPR128, 1, 8, 16); - * [register]:8 tmpa = *:8 tmp_ldXn; + Rttt_VPR128[64,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; # neglected zexts build ldst_wback; @@ -4417,15 +3400,11 @@ is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0100 & b_1011= is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b001 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 0, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[0,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 0, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[0,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 0, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR128[0,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -4440,15 +3419,11 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b001 & b_12=0 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b001 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 1, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[8,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 1, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[8,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 1, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR128[8,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -4463,15 +3438,11 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b001 & b_12=0 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b001 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 2, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[16,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 2, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[16,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 2, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR128[16,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -4486,15 +3457,11 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b001 & b_12=0 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b001 & b_12=0 & b_1011=0b11 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 3, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[24,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 3, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[24,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 3, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR128[24,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -4509,15 +3476,11 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b001 & b_12=0 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b001 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 4, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[32,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 4, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[32,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 4, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR128[32,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -4532,15 +3495,11 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b001 & b_12=1 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b001 & b_12=1 & b_1011=0b01 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 5, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[40,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 5, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[40,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 5, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR128[40,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -4555,15 +3514,11 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b001 & b_12=1 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b001 & b_12=1 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 6, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[48,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 6, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[48,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 6, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR128[48,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -4578,15 +3533,11 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b001 & b_12=1 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b001 & b_12=1 & b_1011=0b11 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 7, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[56,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 7, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[56,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 7, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR128[56,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -4601,15 +3552,11 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b001 & b_12=1 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b001 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 8, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[64,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 8, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[64,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 8, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR128[64,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -4624,15 +3571,11 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b001 & b_12=0 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b001 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 9, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[72,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 9, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[72,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 9, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR128[72,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -4647,15 +3590,11 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b001 & b_12=0 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b001 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 10, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[80,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 10, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[80,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 10, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR128[80,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -4670,15 +3609,11 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b001 & b_12=0 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b001 & b_12=0 & b_1011=0b11 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 11, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[88,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 11, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[88,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 11, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR128[88,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -4693,15 +3628,11 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b001 & b_12=0 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b001 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 12, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[96,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 12, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[96,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 12, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR128[96,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -4716,15 +3647,11 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b001 & b_12=1 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b001 & b_12=1 & b_1011=0b01 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 13, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[104,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 13, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[104,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 13, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR128[104,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -4739,15 +3666,11 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b001 & b_12=1 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b001 & b_12=1 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 14, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[112,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 14, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[112,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 14, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR128[112,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -4762,15 +3685,11 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b001 & b_12=1 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b001 & b_12=1 & b_1011=0b11 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 15, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[120,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 15, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[120,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 15, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR128[120,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -4785,15 +3704,11 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b001 & b_12=1 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b011 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 0, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR128[0,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 0, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtt_VPR128[0,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR128, 0, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rttt_VPR128[0,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; @@ -4808,15 +3723,11 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b011 & b_12=0 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b011 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 1, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR128[16,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 1, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtt_VPR128[16,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR128, 1, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rttt_VPR128[16,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; @@ -4831,15 +3742,11 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b011 & b_12=0 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b011 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 2, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR128[32,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 2, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtt_VPR128[32,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR128, 2, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rttt_VPR128[32,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; @@ -4854,15 +3761,11 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b011 & b_12=1 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b011 & b_12=1 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 3, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR128[48,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 3, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtt_VPR128[48,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR128, 3, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rttt_VPR128[48,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; @@ -4877,15 +3780,11 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b011 & b_12=1 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b011 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 4, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR128[64,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 4, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtt_VPR128[64,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR128, 4, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rttt_VPR128[64,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; @@ -4900,15 +3799,11 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b011 & b_12=0 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b011 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 5, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR128[80,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 5, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtt_VPR128[80,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR128, 5, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rttt_VPR128[80,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; @@ -4923,15 +3818,11 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b011 & b_12=0 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b011 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 6, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR128[96,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 6, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtt_VPR128[96,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR128, 6, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rttt_VPR128[96,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; @@ -4946,15 +3837,11 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b011 & b_12=1 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b011 & b_12=1 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 7, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR128[112,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 7, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtt_VPR128[112,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR128, 7, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rttt_VPR128[112,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; @@ -4969,15 +3856,11 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b011 & b_12=1 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b101 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 0, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rt_VPR128[0,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtt_VPR128, 0, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rtt_VPR128[0,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rttt_VPR128, 0, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rttt_VPR128[0,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; @@ -4992,15 +3875,11 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b101 & b_12=0 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b101 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 1, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rt_VPR128[32,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtt_VPR128, 1, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rtt_VPR128[32,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rttt_VPR128, 1, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rttt_VPR128[32,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; @@ -5015,15 +3894,11 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b101 & b_12=1 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b101 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 2, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rt_VPR128[64,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtt_VPR128, 2, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rtt_VPR128[64,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rttt_VPR128, 2, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rttt_VPR128[64,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; @@ -5038,15 +3913,11 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b101 & b_12=0 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b101 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 3, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rt_VPR128[96,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtt_VPR128, 3, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rtt_VPR128[96,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rttt_VPR128, 3, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rttt_VPR128[96,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; @@ -5061,15 +3932,11 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b101 & b_12=1 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b101 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 0, 8, 16); - * [register]:8 tmpa = *:8 tmp_ldXn; + Rt_VPR128[0,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; - simd_address_at(tmpa, Rtt_VPR128, 0, 8, 16); - * [register]:8 tmpa = *:8 tmp_ldXn; + Rtt_VPR128[0,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; - simd_address_at(tmpa, Rttt_VPR128, 0, 8, 16); - * [register]:8 tmpa = *:8 tmp_ldXn; + Rttt_VPR128[0,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; # neglected zexts build ldst_wback; @@ -5084,15 +3951,11 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b101 & b_12=0 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b101 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 1, 8, 16); - * [register]:8 tmpa = *:8 tmp_ldXn; + Rt_VPR128[64,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; - simd_address_at(tmpa, Rtt_VPR128, 1, 8, 16); - * [register]:8 tmpa = *:8 tmp_ldXn; + Rtt_VPR128[64,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; - simd_address_at(tmpa, Rttt_VPR128, 1, 8, 16); - * [register]:8 tmpa = *:8 tmp_ldXn; + Rttt_VPR128[64,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; # neglected zexts build ldst_wback; @@ -5110,60 +3973,35 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b111 & b_12=0 & { tmp_ldXn = Rn_GPR64xsp; local tmpv:1 = 0; - local tmpa:4 = 0; tmpv = *:1 tmp_ldXn; - simd_address_at(tmpa, Rttt_VPR64, 0, 1, 8); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR64, 1, 1, 8); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR64, 2, 1, 8); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR64, 3, 1, 8); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR64, 4, 1, 8); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR64, 5, 1, 8); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR64, 6, 1, 8); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR64, 7, 1, 8); - * [register]:1 tmpa = tmpv; + Rttt_VPR64[0,8] = tmpv; + Rttt_VPR64[8,8] = tmpv; + Rttt_VPR64[16,8] = tmpv; + Rttt_VPR64[24,8] = tmpv; + Rttt_VPR64[32,8] = tmpv; + Rttt_VPR64[40,8] = tmpv; + Rttt_VPR64[48,8] = tmpv; + Rttt_VPR64[56,8] = tmpv; tmp_ldXn = tmp_ldXn + 1; tmpv = *:1 tmp_ldXn; - simd_address_at(tmpa, Rttt_VPR64, 0, 1, 8); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR64, 1, 1, 8); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR64, 2, 1, 8); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR64, 3, 1, 8); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR64, 4, 1, 8); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR64, 5, 1, 8); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR64, 6, 1, 8); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR64, 7, 1, 8); - * [register]:1 tmpa = tmpv; + Rttt_VPR64[0,8] = tmpv; + Rttt_VPR64[8,8] = tmpv; + Rttt_VPR64[16,8] = tmpv; + Rttt_VPR64[24,8] = tmpv; + Rttt_VPR64[32,8] = tmpv; + Rttt_VPR64[40,8] = tmpv; + Rttt_VPR64[48,8] = tmpv; + Rttt_VPR64[56,8] = tmpv; tmp_ldXn = tmp_ldXn + 1; tmpv = *:1 tmp_ldXn; - simd_address_at(tmpa, Rttt_VPR64, 0, 1, 8); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR64, 1, 1, 8); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR64, 2, 1, 8); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR64, 3, 1, 8); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR64, 4, 1, 8); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR64, 5, 1, 8); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR64, 6, 1, 8); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR64, 7, 1, 8); - * [register]:1 tmpa = tmpv; + Rttt_VPR64[0,8] = tmpv; + Rttt_VPR64[8,8] = tmpv; + Rttt_VPR64[16,8] = tmpv; + Rttt_VPR64[24,8] = tmpv; + Rttt_VPR64[32,8] = tmpv; + Rttt_VPR64[40,8] = tmpv; + Rttt_VPR64[48,8] = tmpv; + Rttt_VPR64[56,8] = tmpv; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -5181,36 +4019,23 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b111 & b_12=0 & { tmp_ldXn = Rn_GPR64xsp; local tmpv:2 = 0; - local tmpa:4 = 0; tmpv = *:2 tmp_ldXn; - simd_address_at(tmpa, Rttt_VPR64, 0, 2, 8); - * [register]:2 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR64, 1, 2, 8); - * [register]:2 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR64, 2, 2, 8); - * [register]:2 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR64, 3, 2, 8); - * [register]:2 tmpa = tmpv; + Rttt_VPR64[0,16] = tmpv; + Rttt_VPR64[16,16] = tmpv; + Rttt_VPR64[32,16] = tmpv; + Rttt_VPR64[48,16] = tmpv; tmp_ldXn = tmp_ldXn + 2; tmpv = *:2 tmp_ldXn; - simd_address_at(tmpa, Rttt_VPR64, 0, 2, 8); - * [register]:2 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR64, 1, 2, 8); - * [register]:2 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR64, 2, 2, 8); - * [register]:2 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR64, 3, 2, 8); - * [register]:2 tmpa = tmpv; + Rttt_VPR64[0,16] = tmpv; + Rttt_VPR64[16,16] = tmpv; + Rttt_VPR64[32,16] = tmpv; + Rttt_VPR64[48,16] = tmpv; tmp_ldXn = tmp_ldXn + 2; tmpv = *:2 tmp_ldXn; - simd_address_at(tmpa, Rttt_VPR64, 0, 2, 8); - * [register]:2 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR64, 1, 2, 8); - * [register]:2 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR64, 2, 2, 8); - * [register]:2 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR64, 3, 2, 8); - * [register]:2 tmpa = tmpv; + Rttt_VPR64[0,16] = tmpv; + Rttt_VPR64[16,16] = tmpv; + Rttt_VPR64[32,16] = tmpv; + Rttt_VPR64[48,16] = tmpv; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; @@ -5228,24 +4053,17 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b111 & b_12=0 & { tmp_ldXn = Rn_GPR64xsp; local tmpv:4 = 0; - local tmpa:4 = 0; tmpv = *:4 tmp_ldXn; - simd_address_at(tmpa, Rttt_VPR64, 0, 4, 8); - * [register]:4 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR64, 1, 4, 8); - * [register]:4 tmpa = tmpv; + Rttt_VPR64[0,32] = tmpv; + Rttt_VPR64[32,32] = tmpv; tmp_ldXn = tmp_ldXn + 4; tmpv = *:4 tmp_ldXn; - simd_address_at(tmpa, Rttt_VPR64, 0, 4, 8); - * [register]:4 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR64, 1, 4, 8); - * [register]:4 tmpa = tmpv; + Rttt_VPR64[0,32] = tmpv; + Rttt_VPR64[32,32] = tmpv; tmp_ldXn = tmp_ldXn + 4; tmpv = *:4 tmp_ldXn; - simd_address_at(tmpa, Rttt_VPR64, 0, 4, 8); - * [register]:4 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR64, 1, 4, 8); - * [register]:4 tmpa = tmpv; + Rttt_VPR64[0,32] = tmpv; + Rttt_VPR64[32,32] = tmpv; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; @@ -5263,18 +4081,14 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b111 & b_12=0 & { tmp_ldXn = Rn_GPR64xsp; local tmpv:8 = 0; - local tmpa:4 = 0; tmpv = *:8 tmp_ldXn; - simd_address_at(tmpa, Rttt_VPR64, 0, 8, 8); - * [register]:8 tmpa = tmpv; + Rttt_VPR64[0,64] = tmpv; tmp_ldXn = tmp_ldXn + 8; tmpv = *:8 tmp_ldXn; - simd_address_at(tmpa, Rttt_VPR64, 0, 8, 8); - * [register]:8 tmpa = tmpv; + Rttt_VPR64[0,64] = tmpv; tmp_ldXn = tmp_ldXn + 8; tmpv = *:8 tmp_ldXn; - simd_address_at(tmpa, Rttt_VPR64, 0, 8, 8); - * [register]:8 tmpa = tmpv; + Rttt_VPR64[0,64] = tmpv; tmp_ldXn = tmp_ldXn + 8; # neglected zexts build ldst_wback; @@ -5292,108 +4106,59 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b111 & b_12=0 & { tmp_ldXn = Rn_GPR64xsp; local tmpv:1 = 0; - local tmpa:4 = 0; tmpv = *:1 tmp_ldXn; - simd_address_at(tmpa, Rttt_VPR128, 0, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR128, 1, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR128, 2, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR128, 3, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR128, 4, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR128, 5, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR128, 6, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR128, 7, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR128, 8, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR128, 9, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR128, 10, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR128, 11, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR128, 12, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR128, 13, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR128, 14, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR128, 15, 1, 16); - * [register]:1 tmpa = tmpv; + Rttt_VPR128[0,8] = tmpv; + Rttt_VPR128[8,8] = tmpv; + Rttt_VPR128[16,8] = tmpv; + Rttt_VPR128[24,8] = tmpv; + Rttt_VPR128[32,8] = tmpv; + Rttt_VPR128[40,8] = tmpv; + Rttt_VPR128[48,8] = tmpv; + Rttt_VPR128[56,8] = tmpv; + Rttt_VPR128[64,8] = tmpv; + Rttt_VPR128[72,8] = tmpv; + Rttt_VPR128[80,8] = tmpv; + Rttt_VPR128[88,8] = tmpv; + Rttt_VPR128[96,8] = tmpv; + Rttt_VPR128[104,8] = tmpv; + Rttt_VPR128[112,8] = tmpv; + Rttt_VPR128[120,8] = tmpv; tmp_ldXn = tmp_ldXn + 1; tmpv = *:1 tmp_ldXn; - simd_address_at(tmpa, Rttt_VPR128, 0, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR128, 1, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR128, 2, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR128, 3, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR128, 4, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR128, 5, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR128, 6, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR128, 7, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR128, 8, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR128, 9, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR128, 10, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR128, 11, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR128, 12, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR128, 13, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR128, 14, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR128, 15, 1, 16); - * [register]:1 tmpa = tmpv; + Rttt_VPR128[0,8] = tmpv; + Rttt_VPR128[8,8] = tmpv; + Rttt_VPR128[16,8] = tmpv; + Rttt_VPR128[24,8] = tmpv; + Rttt_VPR128[32,8] = tmpv; + Rttt_VPR128[40,8] = tmpv; + Rttt_VPR128[48,8] = tmpv; + Rttt_VPR128[56,8] = tmpv; + Rttt_VPR128[64,8] = tmpv; + Rttt_VPR128[72,8] = tmpv; + Rttt_VPR128[80,8] = tmpv; + Rttt_VPR128[88,8] = tmpv; + Rttt_VPR128[96,8] = tmpv; + Rttt_VPR128[104,8] = tmpv; + Rttt_VPR128[112,8] = tmpv; + Rttt_VPR128[120,8] = tmpv; tmp_ldXn = tmp_ldXn + 1; tmpv = *:1 tmp_ldXn; - simd_address_at(tmpa, Rttt_VPR128, 0, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR128, 1, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR128, 2, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR128, 3, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR128, 4, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR128, 5, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR128, 6, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR128, 7, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR128, 8, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR128, 9, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR128, 10, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR128, 11, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR128, 12, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR128, 13, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR128, 14, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR128, 15, 1, 16); - * [register]:1 tmpa = tmpv; + Rttt_VPR128[0,8] = tmpv; + Rttt_VPR128[8,8] = tmpv; + Rttt_VPR128[16,8] = tmpv; + Rttt_VPR128[24,8] = tmpv; + Rttt_VPR128[32,8] = tmpv; + Rttt_VPR128[40,8] = tmpv; + Rttt_VPR128[48,8] = tmpv; + Rttt_VPR128[56,8] = tmpv; + Rttt_VPR128[64,8] = tmpv; + Rttt_VPR128[72,8] = tmpv; + Rttt_VPR128[80,8] = tmpv; + Rttt_VPR128[88,8] = tmpv; + Rttt_VPR128[96,8] = tmpv; + Rttt_VPR128[104,8] = tmpv; + Rttt_VPR128[112,8] = tmpv; + Rttt_VPR128[120,8] = tmpv; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -5411,60 +4176,35 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b111 & b_12=0 & { tmp_ldXn = Rn_GPR64xsp; local tmpv:2 = 0; - local tmpa:4 = 0; tmpv = *:2 tmp_ldXn; - simd_address_at(tmpa, Rttt_VPR128, 0, 2, 16); - * [register]:2 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR128, 1, 2, 16); - * [register]:2 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR128, 2, 2, 16); - * [register]:2 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR128, 3, 2, 16); - * [register]:2 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR128, 4, 2, 16); - * [register]:2 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR128, 5, 2, 16); - * [register]:2 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR128, 6, 2, 16); - * [register]:2 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR128, 7, 2, 16); - * [register]:2 tmpa = tmpv; + Rttt_VPR128[0,16] = tmpv; + Rttt_VPR128[16,16] = tmpv; + Rttt_VPR128[32,16] = tmpv; + Rttt_VPR128[48,16] = tmpv; + Rttt_VPR128[64,16] = tmpv; + Rttt_VPR128[80,16] = tmpv; + Rttt_VPR128[96,16] = tmpv; + Rttt_VPR128[112,16] = tmpv; tmp_ldXn = tmp_ldXn + 2; tmpv = *:2 tmp_ldXn; - simd_address_at(tmpa, Rttt_VPR128, 0, 2, 16); - * [register]:2 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR128, 1, 2, 16); - * [register]:2 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR128, 2, 2, 16); - * [register]:2 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR128, 3, 2, 16); - * [register]:2 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR128, 4, 2, 16); - * [register]:2 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR128, 5, 2, 16); - * [register]:2 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR128, 6, 2, 16); - * [register]:2 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR128, 7, 2, 16); - * [register]:2 tmpa = tmpv; + Rttt_VPR128[0,16] = tmpv; + Rttt_VPR128[16,16] = tmpv; + Rttt_VPR128[32,16] = tmpv; + Rttt_VPR128[48,16] = tmpv; + Rttt_VPR128[64,16] = tmpv; + Rttt_VPR128[80,16] = tmpv; + Rttt_VPR128[96,16] = tmpv; + Rttt_VPR128[112,16] = tmpv; tmp_ldXn = tmp_ldXn + 2; tmpv = *:2 tmp_ldXn; - simd_address_at(tmpa, Rttt_VPR128, 0, 2, 16); - * [register]:2 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR128, 1, 2, 16); - * [register]:2 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR128, 2, 2, 16); - * [register]:2 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR128, 3, 2, 16); - * [register]:2 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR128, 4, 2, 16); - * [register]:2 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR128, 5, 2, 16); - * [register]:2 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR128, 6, 2, 16); - * [register]:2 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR128, 7, 2, 16); - * [register]:2 tmpa = tmpv; + Rttt_VPR128[0,16] = tmpv; + Rttt_VPR128[16,16] = tmpv; + Rttt_VPR128[32,16] = tmpv; + Rttt_VPR128[48,16] = tmpv; + Rttt_VPR128[64,16] = tmpv; + Rttt_VPR128[80,16] = tmpv; + Rttt_VPR128[96,16] = tmpv; + Rttt_VPR128[112,16] = tmpv; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; @@ -5482,36 +4222,23 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b111 & b_12=0 & { tmp_ldXn = Rn_GPR64xsp; local tmpv:4 = 0; - local tmpa:4 = 0; tmpv = *:4 tmp_ldXn; - simd_address_at(tmpa, Rttt_VPR128, 0, 4, 16); - * [register]:4 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR128, 1, 4, 16); - * [register]:4 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR128, 2, 4, 16); - * [register]:4 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR128, 3, 4, 16); - * [register]:4 tmpa = tmpv; + Rttt_VPR128[0,32] = tmpv; + Rttt_VPR128[32,32] = tmpv; + Rttt_VPR128[64,32] = tmpv; + Rttt_VPR128[96,32] = tmpv; tmp_ldXn = tmp_ldXn + 4; tmpv = *:4 tmp_ldXn; - simd_address_at(tmpa, Rttt_VPR128, 0, 4, 16); - * [register]:4 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR128, 1, 4, 16); - * [register]:4 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR128, 2, 4, 16); - * [register]:4 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR128, 3, 4, 16); - * [register]:4 tmpa = tmpv; + Rttt_VPR128[0,32] = tmpv; + Rttt_VPR128[32,32] = tmpv; + Rttt_VPR128[64,32] = tmpv; + Rttt_VPR128[96,32] = tmpv; tmp_ldXn = tmp_ldXn + 4; tmpv = *:4 tmp_ldXn; - simd_address_at(tmpa, Rttt_VPR128, 0, 4, 16); - * [register]:4 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR128, 1, 4, 16); - * [register]:4 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR128, 2, 4, 16); - * [register]:4 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR128, 3, 4, 16); - * [register]:4 tmpa = tmpv; + Rttt_VPR128[0,32] = tmpv; + Rttt_VPR128[32,32] = tmpv; + Rttt_VPR128[64,32] = tmpv; + Rttt_VPR128[96,32] = tmpv; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; @@ -5529,24 +4256,17 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b111 & b_12=0 & { tmp_ldXn = Rn_GPR64xsp; local tmpv:8 = 0; - local tmpa:4 = 0; tmpv = *:8 tmp_ldXn; - simd_address_at(tmpa, Rttt_VPR128, 0, 8, 16); - * [register]:8 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR128, 1, 8, 16); - * [register]:8 tmpa = tmpv; + Rttt_VPR128[0,64] = tmpv; + Rttt_VPR128[64,64] = tmpv; tmp_ldXn = tmp_ldXn + 8; tmpv = *:8 tmp_ldXn; - simd_address_at(tmpa, Rttt_VPR128, 0, 8, 16); - * [register]:8 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR128, 1, 8, 16); - * [register]:8 tmpa = tmpv; + Rttt_VPR128[0,64] = tmpv; + Rttt_VPR128[64,64] = tmpv; tmp_ldXn = tmp_ldXn + 8; tmpv = *:8 tmp_ldXn; - simd_address_at(tmpa, Rttt_VPR128, 0, 8, 16); - * [register]:8 tmpa = tmpv; - simd_address_at(tmpa, Rttt_VPR128, 1, 8, 16); - * [register]:8 tmpa = tmpv; + Rttt_VPR128[0,64] = tmpv; + Rttt_VPR128[64,64] = tmpv; tmp_ldXn = tmp_ldXn + 8; # neglected zexts build ldst_wback; @@ -5561,102 +4281,69 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b111 & b_12=0 & is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0000 & b_1011=0b00 & vVt & Rt_VPR64 & Zt & vVtt & Rtt_VPR64 & Ztt & vVttt & Rttt_VPR64 & Zttt & vVtttt & Rtttt_VPR64 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR64, 0, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR64[0,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR64, 0, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR64[0,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR64, 0, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR64[0,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR64, 0, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtttt_VPR64[0,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR64, 1, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR64[8,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR64, 1, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR64[8,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR64, 1, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR64[8,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR64, 1, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtttt_VPR64[8,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR64, 2, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR64[16,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR64, 2, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR64[16,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR64, 2, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR64[16,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR64, 2, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtttt_VPR64[16,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR64, 3, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR64[24,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR64, 3, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR64[24,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR64, 3, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR64[24,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR64, 3, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtttt_VPR64[24,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR64, 4, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR64[32,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR64, 4, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR64[32,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR64, 4, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR64[32,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR64, 4, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtttt_VPR64[32,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR64, 5, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR64[40,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR64, 5, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR64[40,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR64, 5, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR64[40,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR64, 5, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtttt_VPR64[40,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR64, 6, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR64[48,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR64, 6, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR64[48,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR64, 6, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR64[48,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR64, 6, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtttt_VPR64[48,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR64, 7, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR64[56,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR64, 7, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR64[56,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR64, 7, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR64[56,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR64, 7, 1, 8); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtttt_VPR64[56,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -5671,54 +4358,37 @@ is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0000 & b_1011= is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0000 & b_1011=0b01 & vVt & Rt_VPR64 & Zt & vVtt & Rtt_VPR64 & Ztt & vVttt & Rttt_VPR64 & Zttt & vVtttt & Rtttt_VPR64 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR64, 0, 2, 8); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR64[0,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR64, 0, 2, 8); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtt_VPR64[0,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR64, 0, 2, 8); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rttt_VPR64[0,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtttt_VPR64, 0, 2, 8); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtttt_VPR64[0,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR64, 1, 2, 8); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR64[16,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR64, 1, 2, 8); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtt_VPR64[16,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR64, 1, 2, 8); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rttt_VPR64[16,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtttt_VPR64, 1, 2, 8); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtttt_VPR64[16,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR64, 2, 2, 8); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR64[32,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR64, 2, 2, 8); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtt_VPR64[32,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR64, 2, 2, 8); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rttt_VPR64[32,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtttt_VPR64, 2, 2, 8); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtttt_VPR64[32,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR64, 3, 2, 8); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR64[48,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR64, 3, 2, 8); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtt_VPR64[48,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR64, 3, 2, 8); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rttt_VPR64[48,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtttt_VPR64, 3, 2, 8); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtttt_VPR64[48,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; @@ -5733,30 +4403,21 @@ is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0000 & b_1011= is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0000 & b_1011=0b10 & vVt & Rt_VPR64 & Zt & vVtt & Rtt_VPR64 & Ztt & vVttt & Rttt_VPR64 & Zttt & vVtttt & Rtttt_VPR64 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR64, 0, 4, 8); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rt_VPR64[0,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtt_VPR64, 0, 4, 8); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rtt_VPR64[0,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rttt_VPR64, 0, 4, 8); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rttt_VPR64[0,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtttt_VPR64, 0, 4, 8); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rtttt_VPR64[0,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rt_VPR64, 1, 4, 8); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rt_VPR64[32,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtt_VPR64, 1, 4, 8); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rtt_VPR64[32,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rttt_VPR64, 1, 4, 8); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rttt_VPR64[32,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtttt_VPR64, 1, 4, 8); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rtttt_VPR64[32,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; @@ -5771,198 +4432,133 @@ is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0000 & b_1011= is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0000 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 0, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[0,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 0, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[0,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 0, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR128[0,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR128, 0, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtttt_VPR128[0,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 1, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[8,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 1, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[8,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 1, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR128[8,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR128, 1, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtttt_VPR128[8,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 2, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[16,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 2, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[16,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 2, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR128[16,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR128, 2, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtttt_VPR128[16,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 3, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[24,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 3, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[24,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 3, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR128[24,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR128, 3, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtttt_VPR128[24,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 4, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[32,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 4, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[32,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 4, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR128[32,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR128, 4, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtttt_VPR128[32,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 5, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[40,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 5, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[40,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 5, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR128[40,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR128, 5, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtttt_VPR128[40,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 6, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[48,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 6, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[48,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 6, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR128[48,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR128, 6, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtttt_VPR128[48,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 7, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[56,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 7, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[56,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 7, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR128[56,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR128, 7, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtttt_VPR128[56,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 8, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[64,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 8, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[64,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 8, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR128[64,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR128, 8, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtttt_VPR128[64,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 9, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[72,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 9, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[72,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 9, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR128[72,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR128, 9, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtttt_VPR128[72,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 10, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[80,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 10, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[80,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 10, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR128[80,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR128, 10, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtttt_VPR128[80,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 11, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[88,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 11, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[88,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 11, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR128[88,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR128, 11, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtttt_VPR128[88,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 12, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[96,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 12, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[96,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 12, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR128[96,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR128, 12, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtttt_VPR128[96,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 13, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[104,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 13, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[104,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 13, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR128[104,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR128, 13, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtttt_VPR128[104,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 14, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[112,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 14, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[112,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 14, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR128[112,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR128, 14, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtttt_VPR128[112,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 15, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[120,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 15, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[120,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 15, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR128[120,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR128, 15, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtttt_VPR128[120,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -5977,102 +4573,69 @@ is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0000 & b_1011= is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0000 & b_1011=0b01 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 0, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR128[0,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 0, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtt_VPR128[0,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR128, 0, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rttt_VPR128[0,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtttt_VPR128, 0, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtttt_VPR128[0,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR128, 1, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR128[16,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 1, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtt_VPR128[16,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR128, 1, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rttt_VPR128[16,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtttt_VPR128, 1, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtttt_VPR128[16,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR128, 2, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR128[32,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 2, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtt_VPR128[32,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR128, 2, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rttt_VPR128[32,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtttt_VPR128, 2, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtttt_VPR128[32,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR128, 3, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR128[48,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 3, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtt_VPR128[48,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR128, 3, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rttt_VPR128[48,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtttt_VPR128, 3, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtttt_VPR128[48,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR128, 4, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR128[64,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 4, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtt_VPR128[64,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR128, 4, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rttt_VPR128[64,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtttt_VPR128, 4, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtttt_VPR128[64,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR128, 5, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR128[80,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 5, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtt_VPR128[80,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR128, 5, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rttt_VPR128[80,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtttt_VPR128, 5, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtttt_VPR128[80,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR128, 6, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR128[96,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 6, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtt_VPR128[96,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR128, 6, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rttt_VPR128[96,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtttt_VPR128, 6, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtttt_VPR128[96,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR128, 7, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR128[112,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 7, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtt_VPR128[112,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR128, 7, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rttt_VPR128[112,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtttt_VPR128, 7, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtttt_VPR128[112,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; @@ -6087,54 +4650,37 @@ is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0000 & b_1011= is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0000 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 0, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rt_VPR128[0,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtt_VPR128, 0, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rtt_VPR128[0,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rttt_VPR128, 0, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rttt_VPR128[0,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtttt_VPR128, 0, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rtttt_VPR128[0,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rt_VPR128, 1, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rt_VPR128[32,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtt_VPR128, 1, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rtt_VPR128[32,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rttt_VPR128, 1, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rttt_VPR128[32,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtttt_VPR128, 1, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rtttt_VPR128[32,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rt_VPR128, 2, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rt_VPR128[64,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtt_VPR128, 2, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rtt_VPR128[64,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rttt_VPR128, 2, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rttt_VPR128[64,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtttt_VPR128, 2, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rtttt_VPR128[64,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rt_VPR128, 3, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rt_VPR128[96,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtt_VPR128, 3, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rtt_VPR128[96,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rttt_VPR128, 3, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rttt_VPR128[96,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtttt_VPR128, 3, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rtttt_VPR128[96,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; @@ -6149,30 +4695,21 @@ is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0000 & b_1011= is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0000 & b_1011=0b11 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 0, 8, 16); - * [register]:8 tmpa = *:8 tmp_ldXn; + Rt_VPR128[0,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; - simd_address_at(tmpa, Rtt_VPR128, 0, 8, 16); - * [register]:8 tmpa = *:8 tmp_ldXn; + Rtt_VPR128[0,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; - simd_address_at(tmpa, Rttt_VPR128, 0, 8, 16); - * [register]:8 tmpa = *:8 tmp_ldXn; + Rttt_VPR128[0,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; - simd_address_at(tmpa, Rtttt_VPR128, 0, 8, 16); - * [register]:8 tmpa = *:8 tmp_ldXn; + Rtttt_VPR128[0,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; - simd_address_at(tmpa, Rt_VPR128, 1, 8, 16); - * [register]:8 tmpa = *:8 tmp_ldXn; + Rt_VPR128[64,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; - simd_address_at(tmpa, Rtt_VPR128, 1, 8, 16); - * [register]:8 tmpa = *:8 tmp_ldXn; + Rtt_VPR128[64,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; - simd_address_at(tmpa, Rttt_VPR128, 1, 8, 16); - * [register]:8 tmpa = *:8 tmp_ldXn; + Rttt_VPR128[64,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; - simd_address_at(tmpa, Rtttt_VPR128, 1, 8, 16); - * [register]:8 tmpa = *:8 tmp_ldXn; + Rtttt_VPR128[64,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; # neglected zexts build ldst_wback; @@ -6187,18 +4724,13 @@ is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0000 & b_1011= is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b001 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 0, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[0,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 0, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[0,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 0, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR128[0,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR128, 0, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtttt_VPR128[0,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -6213,18 +4745,13 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b001 & b_12=0 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b001 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 1, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[8,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 1, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[8,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 1, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR128[8,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR128, 1, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtttt_VPR128[8,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -6239,18 +4766,13 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b001 & b_12=0 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b001 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 2, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[16,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 2, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[16,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 2, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR128[16,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR128, 2, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtttt_VPR128[16,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -6265,18 +4787,13 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b001 & b_12=0 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b001 & b_12=0 & b_1011=0b11 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 3, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[24,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 3, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[24,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 3, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR128[24,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR128, 3, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtttt_VPR128[24,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -6291,18 +4808,13 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b001 & b_12=0 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b001 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 4, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[32,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 4, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[32,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 4, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR128[32,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR128, 4, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtttt_VPR128[32,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -6317,18 +4829,13 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b001 & b_12=1 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b001 & b_12=1 & b_1011=0b01 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 5, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[40,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 5, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[40,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 5, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR128[40,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR128, 5, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtttt_VPR128[40,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -6343,18 +4850,13 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b001 & b_12=1 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b001 & b_12=1 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 6, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[48,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 6, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[48,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 6, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR128[48,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR128, 6, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtttt_VPR128[48,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -6369,18 +4871,13 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b001 & b_12=1 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b001 & b_12=1 & b_1011=0b11 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 7, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[56,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 7, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[56,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 7, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR128[56,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR128, 7, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtttt_VPR128[56,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -6395,18 +4892,13 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b001 & b_12=1 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b001 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 8, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[64,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 8, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[64,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 8, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR128[64,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR128, 8, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtttt_VPR128[64,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -6421,18 +4913,13 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b001 & b_12=0 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b001 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 9, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[72,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 9, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[72,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 9, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR128[72,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR128, 9, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtttt_VPR128[72,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -6447,18 +4934,13 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b001 & b_12=0 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b001 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 10, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[80,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 10, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[80,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 10, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR128[80,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR128, 10, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtttt_VPR128[80,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -6473,18 +4955,13 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b001 & b_12=0 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b001 & b_12=0 & b_1011=0b11 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 11, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[88,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 11, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[88,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 11, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR128[88,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR128, 11, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtttt_VPR128[88,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -6499,18 +4976,13 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b001 & b_12=0 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b001 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 12, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[96,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 12, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[96,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 12, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR128[96,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR128, 12, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtttt_VPR128[96,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -6525,18 +4997,13 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b001 & b_12=1 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b001 & b_12=1 & b_1011=0b01 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 13, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[104,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 13, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[104,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 13, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR128[104,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR128, 13, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtttt_VPR128[104,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -6551,18 +5018,13 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b001 & b_12=1 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b001 & b_12=1 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 14, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[112,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 14, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[112,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 14, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR128[112,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR128, 14, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtttt_VPR128[112,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -6577,18 +5039,13 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b001 & b_12=1 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b001 & b_12=1 & b_1011=0b11 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 15, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rt_VPR128[120,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 15, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtt_VPR128[120,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 15, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rttt_VPR128[120,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR128, 15, 1, 16); - * [register]:1 tmpa = *:1 tmp_ldXn; + Rtttt_VPR128[120,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -6603,18 +5060,13 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b001 & b_12=1 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b011 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 0, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR128[0,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 0, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtt_VPR128[0,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR128, 0, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rttt_VPR128[0,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtttt_VPR128, 0, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtttt_VPR128[0,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; @@ -6629,18 +5081,13 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b011 & b_12=0 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b011 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 1, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR128[16,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 1, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtt_VPR128[16,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR128, 1, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rttt_VPR128[16,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtttt_VPR128, 1, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtttt_VPR128[16,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; @@ -6655,18 +5102,13 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b011 & b_12=0 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b011 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 2, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR128[32,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 2, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtt_VPR128[32,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR128, 2, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rttt_VPR128[32,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtttt_VPR128, 2, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtttt_VPR128[32,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; @@ -6681,18 +5123,13 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b011 & b_12=1 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b011 & b_12=1 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 3, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR128[48,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 3, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtt_VPR128[48,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR128, 3, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rttt_VPR128[48,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtttt_VPR128, 3, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtttt_VPR128[48,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; @@ -6707,18 +5144,13 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b011 & b_12=1 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b011 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 4, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR128[64,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 4, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtt_VPR128[64,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR128, 4, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rttt_VPR128[64,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtttt_VPR128, 4, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtttt_VPR128[64,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; @@ -6733,18 +5165,13 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b011 & b_12=0 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b011 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 5, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR128[80,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 5, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtt_VPR128[80,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR128, 5, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rttt_VPR128[80,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtttt_VPR128, 5, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtttt_VPR128[80,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; @@ -6759,18 +5186,13 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b011 & b_12=0 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b011 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 6, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR128[96,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 6, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtt_VPR128[96,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR128, 6, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rttt_VPR128[96,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtttt_VPR128, 6, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtttt_VPR128[96,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; @@ -6785,18 +5207,13 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b011 & b_12=1 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b011 & b_12=1 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 7, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rt_VPR128[112,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 7, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtt_VPR128[112,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR128, 7, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rttt_VPR128[112,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtttt_VPR128, 7, 2, 16); - * [register]:2 tmpa = *:2 tmp_ldXn; + Rtttt_VPR128[112,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; @@ -6811,18 +5228,13 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b011 & b_12=1 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b101 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 0, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rt_VPR128[0,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtt_VPR128, 0, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rtt_VPR128[0,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rttt_VPR128, 0, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rttt_VPR128[0,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtttt_VPR128, 0, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rtttt_VPR128[0,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; @@ -6837,18 +5249,13 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b101 & b_12=0 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b101 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 1, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rt_VPR128[32,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtt_VPR128, 1, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rtt_VPR128[32,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rttt_VPR128, 1, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rttt_VPR128[32,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtttt_VPR128, 1, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rtttt_VPR128[32,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; @@ -6863,18 +5270,13 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b101 & b_12=1 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b101 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 2, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rt_VPR128[64,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtt_VPR128, 2, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rtt_VPR128[64,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rttt_VPR128, 2, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rttt_VPR128[64,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtttt_VPR128, 2, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rtttt_VPR128[64,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; @@ -6889,18 +5291,13 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b101 & b_12=0 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b101 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 3, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rt_VPR128[96,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtt_VPR128, 3, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rtt_VPR128[96,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rttt_VPR128, 3, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rttt_VPR128[96,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtttt_VPR128, 3, 4, 16); - * [register]:4 tmpa = *:4 tmp_ldXn; + Rtttt_VPR128[96,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; @@ -6915,18 +5312,13 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b101 & b_12=1 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b101 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 0, 8, 16); - * [register]:8 tmpa = *:8 tmp_ldXn; + Rt_VPR128[0,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; - simd_address_at(tmpa, Rtt_VPR128, 0, 8, 16); - * [register]:8 tmpa = *:8 tmp_ldXn; + Rtt_VPR128[0,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; - simd_address_at(tmpa, Rttt_VPR128, 0, 8, 16); - * [register]:8 tmpa = *:8 tmp_ldXn; + Rttt_VPR128[0,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; - simd_address_at(tmpa, Rtttt_VPR128, 0, 8, 16); - * [register]:8 tmpa = *:8 tmp_ldXn; + Rtttt_VPR128[0,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; # neglected zexts build ldst_wback; @@ -6941,18 +5333,13 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b101 & b_12=0 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b101 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 1, 8, 16); - * [register]:8 tmpa = *:8 tmp_ldXn; + Rt_VPR128[64,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; - simd_address_at(tmpa, Rtt_VPR128, 1, 8, 16); - * [register]:8 tmpa = *:8 tmp_ldXn; + Rtt_VPR128[64,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; - simd_address_at(tmpa, Rttt_VPR128, 1, 8, 16); - * [register]:8 tmpa = *:8 tmp_ldXn; + Rttt_VPR128[64,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; - simd_address_at(tmpa, Rtttt_VPR128, 1, 8, 16); - * [register]:8 tmpa = *:8 tmp_ldXn; + Rtttt_VPR128[64,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; # neglected zexts build ldst_wback; @@ -6970,78 +5357,45 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b111 & b_12=0 & { tmp_ldXn = Rn_GPR64xsp; local tmpv:1 = 0; - local tmpa:4 = 0; tmpv = *:1 tmp_ldXn; - simd_address_at(tmpa, Rtttt_VPR64, 0, 1, 8); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR64, 1, 1, 8); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR64, 2, 1, 8); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR64, 3, 1, 8); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR64, 4, 1, 8); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR64, 5, 1, 8); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR64, 6, 1, 8); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR64, 7, 1, 8); - * [register]:1 tmpa = tmpv; + Rtttt_VPR64[0,8] = tmpv; + Rtttt_VPR64[8,8] = tmpv; + Rtttt_VPR64[16,8] = tmpv; + Rtttt_VPR64[24,8] = tmpv; + Rtttt_VPR64[32,8] = tmpv; + Rtttt_VPR64[40,8] = tmpv; + Rtttt_VPR64[48,8] = tmpv; + Rtttt_VPR64[56,8] = tmpv; tmp_ldXn = tmp_ldXn + 1; tmpv = *:1 tmp_ldXn; - simd_address_at(tmpa, Rtttt_VPR64, 0, 1, 8); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR64, 1, 1, 8); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR64, 2, 1, 8); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR64, 3, 1, 8); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR64, 4, 1, 8); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR64, 5, 1, 8); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR64, 6, 1, 8); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR64, 7, 1, 8); - * [register]:1 tmpa = tmpv; + Rtttt_VPR64[0,8] = tmpv; + Rtttt_VPR64[8,8] = tmpv; + Rtttt_VPR64[16,8] = tmpv; + Rtttt_VPR64[24,8] = tmpv; + Rtttt_VPR64[32,8] = tmpv; + Rtttt_VPR64[40,8] = tmpv; + Rtttt_VPR64[48,8] = tmpv; + Rtttt_VPR64[56,8] = tmpv; tmp_ldXn = tmp_ldXn + 1; tmpv = *:1 tmp_ldXn; - simd_address_at(tmpa, Rtttt_VPR64, 0, 1, 8); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR64, 1, 1, 8); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR64, 2, 1, 8); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR64, 3, 1, 8); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR64, 4, 1, 8); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR64, 5, 1, 8); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR64, 6, 1, 8); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR64, 7, 1, 8); - * [register]:1 tmpa = tmpv; + Rtttt_VPR64[0,8] = tmpv; + Rtttt_VPR64[8,8] = tmpv; + Rtttt_VPR64[16,8] = tmpv; + Rtttt_VPR64[24,8] = tmpv; + Rtttt_VPR64[32,8] = tmpv; + Rtttt_VPR64[40,8] = tmpv; + Rtttt_VPR64[48,8] = tmpv; + Rtttt_VPR64[56,8] = tmpv; tmp_ldXn = tmp_ldXn + 1; tmpv = *:1 tmp_ldXn; - simd_address_at(tmpa, Rtttt_VPR64, 0, 1, 8); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR64, 1, 1, 8); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR64, 2, 1, 8); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR64, 3, 1, 8); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR64, 4, 1, 8); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR64, 5, 1, 8); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR64, 6, 1, 8); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR64, 7, 1, 8); - * [register]:1 tmpa = tmpv; + Rtttt_VPR64[0,8] = tmpv; + Rtttt_VPR64[8,8] = tmpv; + Rtttt_VPR64[16,8] = tmpv; + Rtttt_VPR64[24,8] = tmpv; + Rtttt_VPR64[32,8] = tmpv; + Rtttt_VPR64[40,8] = tmpv; + Rtttt_VPR64[48,8] = tmpv; + Rtttt_VPR64[56,8] = tmpv; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -7059,46 +5413,29 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b111 & b_12=0 & { tmp_ldXn = Rn_GPR64xsp; local tmpv:2 = 0; - local tmpa:4 = 0; tmpv = *:2 tmp_ldXn; - simd_address_at(tmpa, Rtttt_VPR64, 0, 2, 8); - * [register]:2 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR64, 1, 2, 8); - * [register]:2 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR64, 2, 2, 8); - * [register]:2 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR64, 3, 2, 8); - * [register]:2 tmpa = tmpv; + Rtttt_VPR64[0,16] = tmpv; + Rtttt_VPR64[16,16] = tmpv; + Rtttt_VPR64[32,16] = tmpv; + Rtttt_VPR64[48,16] = tmpv; tmp_ldXn = tmp_ldXn + 2; tmpv = *:2 tmp_ldXn; - simd_address_at(tmpa, Rtttt_VPR64, 0, 2, 8); - * [register]:2 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR64, 1, 2, 8); - * [register]:2 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR64, 2, 2, 8); - * [register]:2 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR64, 3, 2, 8); - * [register]:2 tmpa = tmpv; + Rtttt_VPR64[0,16] = tmpv; + Rtttt_VPR64[16,16] = tmpv; + Rtttt_VPR64[32,16] = tmpv; + Rtttt_VPR64[48,16] = tmpv; tmp_ldXn = tmp_ldXn + 2; tmpv = *:2 tmp_ldXn; - simd_address_at(tmpa, Rtttt_VPR64, 0, 2, 8); - * [register]:2 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR64, 1, 2, 8); - * [register]:2 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR64, 2, 2, 8); - * [register]:2 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR64, 3, 2, 8); - * [register]:2 tmpa = tmpv; + Rtttt_VPR64[0,16] = tmpv; + Rtttt_VPR64[16,16] = tmpv; + Rtttt_VPR64[32,16] = tmpv; + Rtttt_VPR64[48,16] = tmpv; tmp_ldXn = tmp_ldXn + 2; tmpv = *:2 tmp_ldXn; - simd_address_at(tmpa, Rtttt_VPR64, 0, 2, 8); - * [register]:2 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR64, 1, 2, 8); - * [register]:2 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR64, 2, 2, 8); - * [register]:2 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR64, 3, 2, 8); - * [register]:2 tmpa = tmpv; + Rtttt_VPR64[0,16] = tmpv; + Rtttt_VPR64[16,16] = tmpv; + Rtttt_VPR64[32,16] = tmpv; + Rtttt_VPR64[48,16] = tmpv; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; @@ -7116,30 +5453,21 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b111 & b_12=0 & { tmp_ldXn = Rn_GPR64xsp; local tmpv:4 = 0; - local tmpa:4 = 0; tmpv = *:4 tmp_ldXn; - simd_address_at(tmpa, Rtttt_VPR64, 0, 4, 8); - * [register]:4 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR64, 1, 4, 8); - * [register]:4 tmpa = tmpv; + Rtttt_VPR64[0,32] = tmpv; + Rtttt_VPR64[32,32] = tmpv; tmp_ldXn = tmp_ldXn + 4; tmpv = *:4 tmp_ldXn; - simd_address_at(tmpa, Rtttt_VPR64, 0, 4, 8); - * [register]:4 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR64, 1, 4, 8); - * [register]:4 tmpa = tmpv; + Rtttt_VPR64[0,32] = tmpv; + Rtttt_VPR64[32,32] = tmpv; tmp_ldXn = tmp_ldXn + 4; tmpv = *:4 tmp_ldXn; - simd_address_at(tmpa, Rtttt_VPR64, 0, 4, 8); - * [register]:4 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR64, 1, 4, 8); - * [register]:4 tmpa = tmpv; + Rtttt_VPR64[0,32] = tmpv; + Rtttt_VPR64[32,32] = tmpv; tmp_ldXn = tmp_ldXn + 4; tmpv = *:4 tmp_ldXn; - simd_address_at(tmpa, Rtttt_VPR64, 0, 4, 8); - * [register]:4 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR64, 1, 4, 8); - * [register]:4 tmpa = tmpv; + Rtttt_VPR64[0,32] = tmpv; + Rtttt_VPR64[32,32] = tmpv; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; @@ -7157,22 +5485,17 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b111 & b_12=0 & { tmp_ldXn = Rn_GPR64xsp; local tmpv:8 = 0; - local tmpa:4 = 0; tmpv = *:8 tmp_ldXn; - simd_address_at(tmpa, Rtttt_VPR64, 0, 8, 8); - * [register]:8 tmpa = tmpv; + Rtttt_VPR64[0,64] = tmpv; tmp_ldXn = tmp_ldXn + 8; tmpv = *:8 tmp_ldXn; - simd_address_at(tmpa, Rtttt_VPR64, 0, 8, 8); - * [register]:8 tmpa = tmpv; + Rtttt_VPR64[0,64] = tmpv; tmp_ldXn = tmp_ldXn + 8; tmpv = *:8 tmp_ldXn; - simd_address_at(tmpa, Rtttt_VPR64, 0, 8, 8); - * [register]:8 tmpa = tmpv; + Rtttt_VPR64[0,64] = tmpv; tmp_ldXn = tmp_ldXn + 8; tmpv = *:8 tmp_ldXn; - simd_address_at(tmpa, Rtttt_VPR64, 0, 8, 8); - * [register]:8 tmpa = tmpv; + Rtttt_VPR64[0,64] = tmpv; tmp_ldXn = tmp_ldXn + 8; # neglected zexts build ldst_wback; @@ -7190,142 +5513,77 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b111 & b_12=0 & { tmp_ldXn = Rn_GPR64xsp; local tmpv:1 = 0; - local tmpa:4 = 0; tmpv = *:1 tmp_ldXn; - simd_address_at(tmpa, Rtttt_VPR128, 0, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR128, 1, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR128, 2, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR128, 3, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR128, 4, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR128, 5, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR128, 6, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR128, 7, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR128, 8, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR128, 9, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR128, 10, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR128, 11, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR128, 12, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR128, 13, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR128, 14, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR128, 15, 1, 16); - * [register]:1 tmpa = tmpv; + Rtttt_VPR128[0,8] = tmpv; + Rtttt_VPR128[8,8] = tmpv; + Rtttt_VPR128[16,8] = tmpv; + Rtttt_VPR128[24,8] = tmpv; + Rtttt_VPR128[32,8] = tmpv; + Rtttt_VPR128[40,8] = tmpv; + Rtttt_VPR128[48,8] = tmpv; + Rtttt_VPR128[56,8] = tmpv; + Rtttt_VPR128[64,8] = tmpv; + Rtttt_VPR128[72,8] = tmpv; + Rtttt_VPR128[80,8] = tmpv; + Rtttt_VPR128[88,8] = tmpv; + Rtttt_VPR128[96,8] = tmpv; + Rtttt_VPR128[104,8] = tmpv; + Rtttt_VPR128[112,8] = tmpv; + Rtttt_VPR128[120,8] = tmpv; tmp_ldXn = tmp_ldXn + 1; tmpv = *:1 tmp_ldXn; - simd_address_at(tmpa, Rtttt_VPR128, 0, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR128, 1, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR128, 2, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR128, 3, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR128, 4, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR128, 5, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR128, 6, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR128, 7, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR128, 8, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR128, 9, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR128, 10, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR128, 11, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR128, 12, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR128, 13, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR128, 14, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR128, 15, 1, 16); - * [register]:1 tmpa = tmpv; + Rtttt_VPR128[0,8] = tmpv; + Rtttt_VPR128[8,8] = tmpv; + Rtttt_VPR128[16,8] = tmpv; + Rtttt_VPR128[24,8] = tmpv; + Rtttt_VPR128[32,8] = tmpv; + Rtttt_VPR128[40,8] = tmpv; + Rtttt_VPR128[48,8] = tmpv; + Rtttt_VPR128[56,8] = tmpv; + Rtttt_VPR128[64,8] = tmpv; + Rtttt_VPR128[72,8] = tmpv; + Rtttt_VPR128[80,8] = tmpv; + Rtttt_VPR128[88,8] = tmpv; + Rtttt_VPR128[96,8] = tmpv; + Rtttt_VPR128[104,8] = tmpv; + Rtttt_VPR128[112,8] = tmpv; + Rtttt_VPR128[120,8] = tmpv; tmp_ldXn = tmp_ldXn + 1; tmpv = *:1 tmp_ldXn; - simd_address_at(tmpa, Rtttt_VPR128, 0, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR128, 1, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR128, 2, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR128, 3, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR128, 4, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR128, 5, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR128, 6, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR128, 7, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR128, 8, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR128, 9, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR128, 10, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR128, 11, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR128, 12, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR128, 13, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR128, 14, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR128, 15, 1, 16); - * [register]:1 tmpa = tmpv; + Rtttt_VPR128[0,8] = tmpv; + Rtttt_VPR128[8,8] = tmpv; + Rtttt_VPR128[16,8] = tmpv; + Rtttt_VPR128[24,8] = tmpv; + Rtttt_VPR128[32,8] = tmpv; + Rtttt_VPR128[40,8] = tmpv; + Rtttt_VPR128[48,8] = tmpv; + Rtttt_VPR128[56,8] = tmpv; + Rtttt_VPR128[64,8] = tmpv; + Rtttt_VPR128[72,8] = tmpv; + Rtttt_VPR128[80,8] = tmpv; + Rtttt_VPR128[88,8] = tmpv; + Rtttt_VPR128[96,8] = tmpv; + Rtttt_VPR128[104,8] = tmpv; + Rtttt_VPR128[112,8] = tmpv; + Rtttt_VPR128[120,8] = tmpv; tmp_ldXn = tmp_ldXn + 1; tmpv = *:1 tmp_ldXn; - simd_address_at(tmpa, Rtttt_VPR128, 0, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR128, 1, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR128, 2, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR128, 3, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR128, 4, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR128, 5, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR128, 6, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR128, 7, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR128, 8, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR128, 9, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR128, 10, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR128, 11, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR128, 12, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR128, 13, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR128, 14, 1, 16); - * [register]:1 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR128, 15, 1, 16); - * [register]:1 tmpa = tmpv; + Rtttt_VPR128[0,8] = tmpv; + Rtttt_VPR128[8,8] = tmpv; + Rtttt_VPR128[16,8] = tmpv; + Rtttt_VPR128[24,8] = tmpv; + Rtttt_VPR128[32,8] = tmpv; + Rtttt_VPR128[40,8] = tmpv; + Rtttt_VPR128[48,8] = tmpv; + Rtttt_VPR128[56,8] = tmpv; + Rtttt_VPR128[64,8] = tmpv; + Rtttt_VPR128[72,8] = tmpv; + Rtttt_VPR128[80,8] = tmpv; + Rtttt_VPR128[88,8] = tmpv; + Rtttt_VPR128[96,8] = tmpv; + Rtttt_VPR128[104,8] = tmpv; + Rtttt_VPR128[112,8] = tmpv; + Rtttt_VPR128[120,8] = tmpv; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -7343,78 +5601,45 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b111 & b_12=0 & { tmp_ldXn = Rn_GPR64xsp; local tmpv:2 = 0; - local tmpa:4 = 0; tmpv = *:2 tmp_ldXn; - simd_address_at(tmpa, Rtttt_VPR128, 0, 2, 16); - * [register]:2 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR128, 1, 2, 16); - * [register]:2 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR128, 2, 2, 16); - * [register]:2 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR128, 3, 2, 16); - * [register]:2 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR128, 4, 2, 16); - * [register]:2 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR128, 5, 2, 16); - * [register]:2 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR128, 6, 2, 16); - * [register]:2 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR128, 7, 2, 16); - * [register]:2 tmpa = tmpv; + Rtttt_VPR128[0,16] = tmpv; + Rtttt_VPR128[16,16] = tmpv; + Rtttt_VPR128[32,16] = tmpv; + Rtttt_VPR128[48,16] = tmpv; + Rtttt_VPR128[64,16] = tmpv; + Rtttt_VPR128[80,16] = tmpv; + Rtttt_VPR128[96,16] = tmpv; + Rtttt_VPR128[112,16] = tmpv; tmp_ldXn = tmp_ldXn + 2; tmpv = *:2 tmp_ldXn; - simd_address_at(tmpa, Rtttt_VPR128, 0, 2, 16); - * [register]:2 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR128, 1, 2, 16); - * [register]:2 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR128, 2, 2, 16); - * [register]:2 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR128, 3, 2, 16); - * [register]:2 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR128, 4, 2, 16); - * [register]:2 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR128, 5, 2, 16); - * [register]:2 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR128, 6, 2, 16); - * [register]:2 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR128, 7, 2, 16); - * [register]:2 tmpa = tmpv; + Rtttt_VPR128[0,16] = tmpv; + Rtttt_VPR128[16,16] = tmpv; + Rtttt_VPR128[32,16] = tmpv; + Rtttt_VPR128[48,16] = tmpv; + Rtttt_VPR128[64,16] = tmpv; + Rtttt_VPR128[80,16] = tmpv; + Rtttt_VPR128[96,16] = tmpv; + Rtttt_VPR128[112,16] = tmpv; tmp_ldXn = tmp_ldXn + 2; tmpv = *:2 tmp_ldXn; - simd_address_at(tmpa, Rtttt_VPR128, 0, 2, 16); - * [register]:2 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR128, 1, 2, 16); - * [register]:2 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR128, 2, 2, 16); - * [register]:2 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR128, 3, 2, 16); - * [register]:2 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR128, 4, 2, 16); - * [register]:2 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR128, 5, 2, 16); - * [register]:2 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR128, 6, 2, 16); - * [register]:2 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR128, 7, 2, 16); - * [register]:2 tmpa = tmpv; + Rtttt_VPR128[0,16] = tmpv; + Rtttt_VPR128[16,16] = tmpv; + Rtttt_VPR128[32,16] = tmpv; + Rtttt_VPR128[48,16] = tmpv; + Rtttt_VPR128[64,16] = tmpv; + Rtttt_VPR128[80,16] = tmpv; + Rtttt_VPR128[96,16] = tmpv; + Rtttt_VPR128[112,16] = tmpv; tmp_ldXn = tmp_ldXn + 2; tmpv = *:2 tmp_ldXn; - simd_address_at(tmpa, Rtttt_VPR128, 0, 2, 16); - * [register]:2 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR128, 1, 2, 16); - * [register]:2 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR128, 2, 2, 16); - * [register]:2 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR128, 3, 2, 16); - * [register]:2 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR128, 4, 2, 16); - * [register]:2 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR128, 5, 2, 16); - * [register]:2 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR128, 6, 2, 16); - * [register]:2 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR128, 7, 2, 16); - * [register]:2 tmpa = tmpv; + Rtttt_VPR128[0,16] = tmpv; + Rtttt_VPR128[16,16] = tmpv; + Rtttt_VPR128[32,16] = tmpv; + Rtttt_VPR128[48,16] = tmpv; + Rtttt_VPR128[64,16] = tmpv; + Rtttt_VPR128[80,16] = tmpv; + Rtttt_VPR128[96,16] = tmpv; + Rtttt_VPR128[112,16] = tmpv; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; @@ -7432,46 +5657,29 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b111 & b_12=0 & { tmp_ldXn = Rn_GPR64xsp; local tmpv:4 = 0; - local tmpa:4 = 0; tmpv = *:4 tmp_ldXn; - simd_address_at(tmpa, Rtttt_VPR128, 0, 4, 16); - * [register]:4 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR128, 1, 4, 16); - * [register]:4 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR128, 2, 4, 16); - * [register]:4 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR128, 3, 4, 16); - * [register]:4 tmpa = tmpv; + Rtttt_VPR128[0,32] = tmpv; + Rtttt_VPR128[32,32] = tmpv; + Rtttt_VPR128[64,32] = tmpv; + Rtttt_VPR128[96,32] = tmpv; tmp_ldXn = tmp_ldXn + 4; tmpv = *:4 tmp_ldXn; - simd_address_at(tmpa, Rtttt_VPR128, 0, 4, 16); - * [register]:4 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR128, 1, 4, 16); - * [register]:4 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR128, 2, 4, 16); - * [register]:4 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR128, 3, 4, 16); - * [register]:4 tmpa = tmpv; + Rtttt_VPR128[0,32] = tmpv; + Rtttt_VPR128[32,32] = tmpv; + Rtttt_VPR128[64,32] = tmpv; + Rtttt_VPR128[96,32] = tmpv; tmp_ldXn = tmp_ldXn + 4; tmpv = *:4 tmp_ldXn; - simd_address_at(tmpa, Rtttt_VPR128, 0, 4, 16); - * [register]:4 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR128, 1, 4, 16); - * [register]:4 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR128, 2, 4, 16); - * [register]:4 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR128, 3, 4, 16); - * [register]:4 tmpa = tmpv; + Rtttt_VPR128[0,32] = tmpv; + Rtttt_VPR128[32,32] = tmpv; + Rtttt_VPR128[64,32] = tmpv; + Rtttt_VPR128[96,32] = tmpv; tmp_ldXn = tmp_ldXn + 4; tmpv = *:4 tmp_ldXn; - simd_address_at(tmpa, Rtttt_VPR128, 0, 4, 16); - * [register]:4 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR128, 1, 4, 16); - * [register]:4 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR128, 2, 4, 16); - * [register]:4 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR128, 3, 4, 16); - * [register]:4 tmpa = tmpv; + Rtttt_VPR128[0,32] = tmpv; + Rtttt_VPR128[32,32] = tmpv; + Rtttt_VPR128[64,32] = tmpv; + Rtttt_VPR128[96,32] = tmpv; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; @@ -7489,30 +5697,21 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b111 & b_12=0 & { tmp_ldXn = Rn_GPR64xsp; local tmpv:8 = 0; - local tmpa:4 = 0; tmpv = *:8 tmp_ldXn; - simd_address_at(tmpa, Rtttt_VPR128, 0, 8, 16); - * [register]:8 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR128, 1, 8, 16); - * [register]:8 tmpa = tmpv; + Rtttt_VPR128[0,64] = tmpv; + Rtttt_VPR128[64,64] = tmpv; tmp_ldXn = tmp_ldXn + 8; tmpv = *:8 tmp_ldXn; - simd_address_at(tmpa, Rtttt_VPR128, 0, 8, 16); - * [register]:8 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR128, 1, 8, 16); - * [register]:8 tmpa = tmpv; + Rtttt_VPR128[0,64] = tmpv; + Rtttt_VPR128[64,64] = tmpv; tmp_ldXn = tmp_ldXn + 8; tmpv = *:8 tmp_ldXn; - simd_address_at(tmpa, Rtttt_VPR128, 0, 8, 16); - * [register]:8 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR128, 1, 8, 16); - * [register]:8 tmpa = tmpv; + Rtttt_VPR128[0,64] = tmpv; + Rtttt_VPR128[64,64] = tmpv; tmp_ldXn = tmp_ldXn + 8; tmpv = *:8 tmp_ldXn; - simd_address_at(tmpa, Rtttt_VPR128, 0, 8, 16); - * [register]:8 tmpa = tmpv; - simd_address_at(tmpa, Rtttt_VPR128, 1, 8, 16); - * [register]:8 tmpa = tmpv; + Rtttt_VPR128[0,64] = tmpv; + Rtttt_VPR128[64,64] = tmpv; tmp_ldXn = tmp_ldXn + 8; # neglected zexts build ldst_wback; @@ -7526,102 +5725,69 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b111 & b_12=0 & is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0010 & b_1011=0b00 & vVt & Rt_VPR64 & vVtt & Rtt_VPR64 & vVttt & Rttt_VPR64 & vVtttt & Rtttt_VPR64 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR64, 0, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR64[0,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR64, 1, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR64[8,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR64, 2, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR64[16,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR64, 3, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR64[24,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR64, 4, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR64[32,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR64, 5, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR64[40,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR64, 6, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR64[48,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR64, 7, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR64[56,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR64, 0, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR64[0,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR64, 1, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR64[8,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR64, 2, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR64[16,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR64, 3, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR64[24,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR64, 4, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR64[32,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR64, 5, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR64[40,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR64, 6, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR64[48,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR64, 7, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR64[56,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR64, 0, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR64[0,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR64, 1, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR64[8,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR64, 2, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR64[16,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR64, 3, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR64[24,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR64, 4, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR64[32,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR64, 5, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR64[40,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR64, 6, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR64[48,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR64, 7, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR64[56,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR64, 0, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtttt_VPR64[0,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR64, 1, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtttt_VPR64[8,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR64, 2, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtttt_VPR64[16,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR64, 3, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtttt_VPR64[24,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR64, 4, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtttt_VPR64[32,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR64, 5, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtttt_VPR64[40,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR64, 6, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtttt_VPR64[48,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR64, 7, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtttt_VPR64[56,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -7636,54 +5802,37 @@ is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0010 & b_1011= is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0010 & b_1011=0b01 & vVt & Rt_VPR64 & vVtt & Rtt_VPR64 & vVttt & Rttt_VPR64 & vVtttt & Rtttt_VPR64 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR64, 0, 2, 8); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR64[0,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR64, 1, 2, 8); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR64[16,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR64, 2, 2, 8); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR64[32,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR64, 3, 2, 8); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR64[48,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR64, 0, 2, 8); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtt_VPR64[0,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR64, 1, 2, 8); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtt_VPR64[16,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR64, 2, 2, 8); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtt_VPR64[32,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR64, 3, 2, 8); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtt_VPR64[48,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR64, 0, 2, 8); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rttt_VPR64[0,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR64, 1, 2, 8); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rttt_VPR64[16,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR64, 2, 2, 8); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rttt_VPR64[32,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR64, 3, 2, 8); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rttt_VPR64[48,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtttt_VPR64, 0, 2, 8); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtttt_VPR64[0,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtttt_VPR64, 1, 2, 8); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtttt_VPR64[16,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtttt_VPR64, 2, 2, 8); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtttt_VPR64[32,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtttt_VPR64, 3, 2, 8); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtttt_VPR64[48,16]; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; @@ -7698,30 +5847,21 @@ is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0010 & b_1011= is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0010 & b_1011=0b10 & vVt & Rt_VPR64 & vVtt & Rtt_VPR64 & vVttt & Rttt_VPR64 & vVtttt & Rtttt_VPR64 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR64, 0, 4, 8); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rt_VPR64[0,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rt_VPR64, 1, 4, 8); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rt_VPR64[32,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtt_VPR64, 0, 4, 8); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rtt_VPR64[0,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtt_VPR64, 1, 4, 8); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rtt_VPR64[32,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rttt_VPR64, 0, 4, 8); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rttt_VPR64[0,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rttt_VPR64, 1, 4, 8); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rttt_VPR64[32,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtttt_VPR64, 0, 4, 8); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rtttt_VPR64[0,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtttt_VPR64, 1, 4, 8); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rtttt_VPR64[32,32]; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; @@ -7736,18 +5876,13 @@ is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0010 & b_1011= is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0010 & b_1011=0b11 & vVt & Rt_VPR64 & vVtt & Rtt_VPR64 & vVttt & Rttt_VPR64 & vVtttt & Rtttt_VPR64 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR64, 0, 8, 8); - *:8 tmp_ldXn = * [register]:8 tmpa; + *:8 tmp_ldXn = Rt_VPR64[0,64]; tmp_ldXn = tmp_ldXn + 8; - simd_address_at(tmpa, Rtt_VPR64, 0, 8, 8); - *:8 tmp_ldXn = * [register]:8 tmpa; + *:8 tmp_ldXn = Rtt_VPR64[0,64]; tmp_ldXn = tmp_ldXn + 8; - simd_address_at(tmpa, Rttt_VPR64, 0, 8, 8); - *:8 tmp_ldXn = * [register]:8 tmpa; + *:8 tmp_ldXn = Rttt_VPR64[0,64]; tmp_ldXn = tmp_ldXn + 8; - simd_address_at(tmpa, Rtttt_VPR64, 0, 8, 8); - *:8 tmp_ldXn = * [register]:8 tmpa; + *:8 tmp_ldXn = Rtttt_VPR64[0,64]; tmp_ldXn = tmp_ldXn + 8; # neglected zexts build ldst_wback; @@ -7762,198 +5897,133 @@ is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0010 & b_1011= is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0010 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 0, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[0,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 1, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[8,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 2, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[16,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 3, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[24,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 4, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[32,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 5, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[40,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 6, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[48,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 7, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[56,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 8, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[64,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 9, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[72,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 10, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[80,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 11, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[88,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 12, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[96,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 13, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[104,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 14, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[112,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 15, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[120,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 0, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[0,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 1, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[8,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 2, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[16,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 3, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[24,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 4, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[32,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 5, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[40,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 6, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[48,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 7, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[56,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 8, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[64,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 9, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[72,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 10, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[80,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 11, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[88,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 12, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[96,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 13, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[104,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 14, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[112,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 15, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[120,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 0, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR128[0,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 1, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR128[8,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 2, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR128[16,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 3, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR128[24,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 4, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR128[32,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 5, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR128[40,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 6, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR128[48,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 7, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR128[56,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 8, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR128[64,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 9, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR128[72,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 10, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR128[80,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 11, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR128[88,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 12, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR128[96,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 13, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR128[104,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 14, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR128[112,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 15, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR128[120,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR128, 0, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtttt_VPR128[0,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR128, 1, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtttt_VPR128[8,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR128, 2, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtttt_VPR128[16,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR128, 3, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtttt_VPR128[24,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR128, 4, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtttt_VPR128[32,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR128, 5, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtttt_VPR128[40,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR128, 6, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtttt_VPR128[48,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR128, 7, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtttt_VPR128[56,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR128, 8, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtttt_VPR128[64,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR128, 9, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtttt_VPR128[72,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR128, 10, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtttt_VPR128[80,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR128, 11, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtttt_VPR128[88,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR128, 12, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtttt_VPR128[96,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR128, 13, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtttt_VPR128[104,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR128, 14, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtttt_VPR128[112,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR128, 15, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtttt_VPR128[120,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -7968,102 +6038,69 @@ is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0010 & b_1011= is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0010 & b_1011=0b01 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 0, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR128[0,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR128, 1, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR128[16,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR128, 2, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR128[32,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR128, 3, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR128[48,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR128, 4, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR128[64,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR128, 5, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR128[80,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR128, 6, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR128[96,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR128, 7, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR128[112,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 0, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtt_VPR128[0,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 1, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtt_VPR128[16,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 2, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtt_VPR128[32,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 3, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtt_VPR128[48,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 4, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtt_VPR128[64,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 5, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtt_VPR128[80,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 6, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtt_VPR128[96,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 7, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtt_VPR128[112,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR128, 0, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rttt_VPR128[0,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR128, 1, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rttt_VPR128[16,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR128, 2, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rttt_VPR128[32,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR128, 3, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rttt_VPR128[48,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR128, 4, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rttt_VPR128[64,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR128, 5, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rttt_VPR128[80,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR128, 6, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rttt_VPR128[96,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR128, 7, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rttt_VPR128[112,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtttt_VPR128, 0, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtttt_VPR128[0,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtttt_VPR128, 1, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtttt_VPR128[16,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtttt_VPR128, 2, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtttt_VPR128[32,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtttt_VPR128, 3, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtttt_VPR128[48,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtttt_VPR128, 4, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtttt_VPR128[64,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtttt_VPR128, 5, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtttt_VPR128[80,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtttt_VPR128, 6, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtttt_VPR128[96,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtttt_VPR128, 7, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtttt_VPR128[112,16]; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; @@ -8078,54 +6115,37 @@ is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0010 & b_1011= is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0010 & b_1011=0b10 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 0, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rt_VPR128[0,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rt_VPR128, 1, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rt_VPR128[32,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rt_VPR128, 2, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rt_VPR128[64,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rt_VPR128, 3, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rt_VPR128[96,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtt_VPR128, 0, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rtt_VPR128[0,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtt_VPR128, 1, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rtt_VPR128[32,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtt_VPR128, 2, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rtt_VPR128[64,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtt_VPR128, 3, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rtt_VPR128[96,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rttt_VPR128, 0, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rttt_VPR128[0,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rttt_VPR128, 1, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rttt_VPR128[32,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rttt_VPR128, 2, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rttt_VPR128[64,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rttt_VPR128, 3, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rttt_VPR128[96,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtttt_VPR128, 0, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rtttt_VPR128[0,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtttt_VPR128, 1, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rtttt_VPR128[32,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtttt_VPR128, 2, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rtttt_VPR128[64,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtttt_VPR128, 3, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rtttt_VPR128[96,32]; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; @@ -8140,30 +6160,21 @@ is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0010 & b_1011= is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0010 & b_1011=0b11 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 0, 8, 16); - *:8 tmp_ldXn = * [register]:8 tmpa; + *:8 tmp_ldXn = Rt_VPR128[0,64]; tmp_ldXn = tmp_ldXn + 8; - simd_address_at(tmpa, Rt_VPR128, 1, 8, 16); - *:8 tmp_ldXn = * [register]:8 tmpa; + *:8 tmp_ldXn = Rt_VPR128[64,64]; tmp_ldXn = tmp_ldXn + 8; - simd_address_at(tmpa, Rtt_VPR128, 0, 8, 16); - *:8 tmp_ldXn = * [register]:8 tmpa; + *:8 tmp_ldXn = Rtt_VPR128[0,64]; tmp_ldXn = tmp_ldXn + 8; - simd_address_at(tmpa, Rtt_VPR128, 1, 8, 16); - *:8 tmp_ldXn = * [register]:8 tmpa; + *:8 tmp_ldXn = Rtt_VPR128[64,64]; tmp_ldXn = tmp_ldXn + 8; - simd_address_at(tmpa, Rttt_VPR128, 0, 8, 16); - *:8 tmp_ldXn = * [register]:8 tmpa; + *:8 tmp_ldXn = Rttt_VPR128[0,64]; tmp_ldXn = tmp_ldXn + 8; - simd_address_at(tmpa, Rttt_VPR128, 1, 8, 16); - *:8 tmp_ldXn = * [register]:8 tmpa; + *:8 tmp_ldXn = Rttt_VPR128[64,64]; tmp_ldXn = tmp_ldXn + 8; - simd_address_at(tmpa, Rtttt_VPR128, 0, 8, 16); - *:8 tmp_ldXn = * [register]:8 tmpa; + *:8 tmp_ldXn = Rtttt_VPR128[0,64]; tmp_ldXn = tmp_ldXn + 8; - simd_address_at(tmpa, Rtttt_VPR128, 1, 8, 16); - *:8 tmp_ldXn = * [register]:8 tmpa; + *:8 tmp_ldXn = Rtttt_VPR128[64,64]; tmp_ldXn = tmp_ldXn + 8; # neglected zexts build ldst_wback; @@ -8178,78 +6189,53 @@ is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0010 & b_1011= is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0110 & b_1011=0b00 & vVt & Rt_VPR64 & vVtt & Rtt_VPR64 & vVttt & Rttt_VPR64 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR64, 0, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR64[0,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR64, 1, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR64[8,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR64, 2, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR64[16,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR64, 3, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR64[24,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR64, 4, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR64[32,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR64, 5, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR64[40,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR64, 6, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR64[48,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR64, 7, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR64[56,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR64, 0, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR64[0,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR64, 1, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR64[8,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR64, 2, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR64[16,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR64, 3, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR64[24,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR64, 4, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR64[32,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR64, 5, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR64[40,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR64, 6, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR64[48,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR64, 7, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR64[56,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR64, 0, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR64[0,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR64, 1, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR64[8,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR64, 2, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR64[16,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR64, 3, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR64[24,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR64, 4, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR64[32,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR64, 5, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR64[40,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR64, 6, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR64[48,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR64, 7, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR64[56,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -8264,42 +6250,29 @@ is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0110 & b_1011= is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0110 & b_1011=0b01 & vVt & Rt_VPR64 & vVtt & Rtt_VPR64 & vVttt & Rttt_VPR64 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR64, 0, 2, 8); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR64[0,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR64, 1, 2, 8); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR64[16,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR64, 2, 2, 8); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR64[32,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR64, 3, 2, 8); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR64[48,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR64, 0, 2, 8); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtt_VPR64[0,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR64, 1, 2, 8); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtt_VPR64[16,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR64, 2, 2, 8); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtt_VPR64[32,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR64, 3, 2, 8); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtt_VPR64[48,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR64, 0, 2, 8); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rttt_VPR64[0,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR64, 1, 2, 8); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rttt_VPR64[16,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR64, 2, 2, 8); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rttt_VPR64[32,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR64, 3, 2, 8); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rttt_VPR64[48,16]; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; @@ -8314,24 +6287,17 @@ is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0110 & b_1011= is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0110 & b_1011=0b10 & vVt & Rt_VPR64 & vVtt & Rtt_VPR64 & vVttt & Rttt_VPR64 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR64, 0, 4, 8); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rt_VPR64[0,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rt_VPR64, 1, 4, 8); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rt_VPR64[32,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtt_VPR64, 0, 4, 8); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rtt_VPR64[0,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtt_VPR64, 1, 4, 8); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rtt_VPR64[32,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rttt_VPR64, 0, 4, 8); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rttt_VPR64[0,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rttt_VPR64, 1, 4, 8); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rttt_VPR64[32,32]; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; @@ -8346,15 +6312,11 @@ is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0110 & b_1011= is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0110 & b_1011=0b11 & vVt & Rt_VPR64 & vVtt & Rtt_VPR64 & vVttt & Rttt_VPR64 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR64, 0, 8, 8); - *:8 tmp_ldXn = * [register]:8 tmpa; + *:8 tmp_ldXn = Rt_VPR64[0,64]; tmp_ldXn = tmp_ldXn + 8; - simd_address_at(tmpa, Rtt_VPR64, 0, 8, 8); - *:8 tmp_ldXn = * [register]:8 tmpa; + *:8 tmp_ldXn = Rtt_VPR64[0,64]; tmp_ldXn = tmp_ldXn + 8; - simd_address_at(tmpa, Rttt_VPR64, 0, 8, 8); - *:8 tmp_ldXn = * [register]:8 tmpa; + *:8 tmp_ldXn = Rttt_VPR64[0,64]; tmp_ldXn = tmp_ldXn + 8; # neglected zexts build ldst_wback; @@ -8369,150 +6331,101 @@ is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0110 & b_1011= is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0110 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 0, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[0,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 1, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[8,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 2, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[16,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 3, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[24,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 4, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[32,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 5, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[40,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 6, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[48,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 7, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[56,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 8, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[64,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 9, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[72,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 10, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[80,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 11, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[88,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 12, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[96,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 13, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[104,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 14, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[112,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 15, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[120,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 0, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[0,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 1, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[8,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 2, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[16,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 3, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[24,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 4, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[32,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 5, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[40,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 6, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[48,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 7, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[56,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 8, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[64,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 9, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[72,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 10, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[80,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 11, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[88,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 12, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[96,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 13, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[104,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 14, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[112,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 15, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[120,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 0, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR128[0,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 1, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR128[8,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 2, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR128[16,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 3, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR128[24,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 4, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR128[32,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 5, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR128[40,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 6, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR128[48,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 7, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR128[56,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 8, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR128[64,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 9, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR128[72,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 10, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR128[80,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 11, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR128[88,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 12, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR128[96,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 13, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR128[104,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 14, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR128[112,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 15, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR128[120,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -8527,78 +6440,53 @@ is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0110 & b_1011= is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0110 & b_1011=0b01 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 0, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR128[0,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR128, 1, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR128[16,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR128, 2, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR128[32,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR128, 3, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR128[48,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR128, 4, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR128[64,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR128, 5, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR128[80,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR128, 6, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR128[96,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR128, 7, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR128[112,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 0, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtt_VPR128[0,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 1, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtt_VPR128[16,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 2, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtt_VPR128[32,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 3, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtt_VPR128[48,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 4, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtt_VPR128[64,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 5, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtt_VPR128[80,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 6, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtt_VPR128[96,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 7, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtt_VPR128[112,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR128, 0, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rttt_VPR128[0,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR128, 1, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rttt_VPR128[16,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR128, 2, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rttt_VPR128[32,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR128, 3, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rttt_VPR128[48,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR128, 4, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rttt_VPR128[64,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR128, 5, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rttt_VPR128[80,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR128, 6, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rttt_VPR128[96,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR128, 7, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rttt_VPR128[112,16]; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; @@ -8613,42 +6501,29 @@ is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0110 & b_1011= is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0110 & b_1011=0b10 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 0, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rt_VPR128[0,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rt_VPR128, 1, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rt_VPR128[32,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rt_VPR128, 2, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rt_VPR128[64,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rt_VPR128, 3, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rt_VPR128[96,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtt_VPR128, 0, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rtt_VPR128[0,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtt_VPR128, 1, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rtt_VPR128[32,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtt_VPR128, 2, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rtt_VPR128[64,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtt_VPR128, 3, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rtt_VPR128[96,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rttt_VPR128, 0, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rttt_VPR128[0,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rttt_VPR128, 1, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rttt_VPR128[32,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rttt_VPR128, 2, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rttt_VPR128[64,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rttt_VPR128, 3, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rttt_VPR128[96,32]; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; @@ -8663,24 +6538,17 @@ is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0110 & b_1011= is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0110 & b_1011=0b11 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 0, 8, 16); - *:8 tmp_ldXn = * [register]:8 tmpa; + *:8 tmp_ldXn = Rt_VPR128[0,64]; tmp_ldXn = tmp_ldXn + 8; - simd_address_at(tmpa, Rt_VPR128, 1, 8, 16); - *:8 tmp_ldXn = * [register]:8 tmpa; + *:8 tmp_ldXn = Rt_VPR128[64,64]; tmp_ldXn = tmp_ldXn + 8; - simd_address_at(tmpa, Rtt_VPR128, 0, 8, 16); - *:8 tmp_ldXn = * [register]:8 tmpa; + *:8 tmp_ldXn = Rtt_VPR128[0,64]; tmp_ldXn = tmp_ldXn + 8; - simd_address_at(tmpa, Rtt_VPR128, 1, 8, 16); - *:8 tmp_ldXn = * [register]:8 tmpa; + *:8 tmp_ldXn = Rtt_VPR128[64,64]; tmp_ldXn = tmp_ldXn + 8; - simd_address_at(tmpa, Rttt_VPR128, 0, 8, 16); - *:8 tmp_ldXn = * [register]:8 tmpa; + *:8 tmp_ldXn = Rttt_VPR128[0,64]; tmp_ldXn = tmp_ldXn + 8; - simd_address_at(tmpa, Rttt_VPR128, 1, 8, 16); - *:8 tmp_ldXn = * [register]:8 tmpa; + *:8 tmp_ldXn = Rttt_VPR128[64,64]; tmp_ldXn = tmp_ldXn + 8; # neglected zexts build ldst_wback; @@ -8695,30 +6563,21 @@ is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0110 & b_1011= is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0111 & b_1011=0b00 & vVt & Rt_VPR64 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR64, 0, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR64[0,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR64, 1, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR64[8,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR64, 2, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR64[16,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR64, 3, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR64[24,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR64, 4, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR64[32,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR64, 5, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR64[40,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR64, 6, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR64[48,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR64, 7, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR64[56,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -8733,18 +6592,13 @@ is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0111 & b_1011= is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0111 & b_1011=0b01 & vVt & Rt_VPR64 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR64, 0, 2, 8); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR64[0,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR64, 1, 2, 8); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR64[16,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR64, 2, 2, 8); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR64[32,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR64, 3, 2, 8); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR64[48,16]; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; @@ -8759,12 +6613,9 @@ is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0111 & b_1011= is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0111 & b_1011=0b10 & vVt & Rt_VPR64 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR64, 0, 4, 8); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rt_VPR64[0,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rt_VPR64, 1, 4, 8); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rt_VPR64[32,32]; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; @@ -8779,9 +6630,7 @@ is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0111 & b_1011= is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0111 & b_1011=0b11 & vVt & Rt_VPR64 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR64, 0, 8, 8); - *:8 tmp_ldXn = * [register]:8 tmpa; + *:8 tmp_ldXn = Rt_VPR64[0,64]; tmp_ldXn = tmp_ldXn + 8; # neglected zexts build ldst_wback; @@ -8796,54 +6645,37 @@ is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0111 & b_1011= is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0111 & b_1011=0b00 & vVt & Rt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 0, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[0,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 1, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[8,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 2, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[16,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 3, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[24,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 4, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[32,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 5, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[40,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 6, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[48,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 7, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[56,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 8, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[64,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 9, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[72,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 10, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[80,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 11, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[88,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 12, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[96,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 13, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[104,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 14, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[112,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 15, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[120,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -8858,30 +6690,21 @@ is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0111 & b_1011= is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0111 & b_1011=0b01 & vVt & Rt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 0, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR128[0,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR128, 1, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR128[16,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR128, 2, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR128[32,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR128, 3, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR128[48,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR128, 4, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR128[64,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR128, 5, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR128[80,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR128, 6, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR128[96,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR128, 7, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR128[112,16]; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; @@ -8896,18 +6719,13 @@ is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0111 & b_1011= is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0111 & b_1011=0b10 & vVt & Rt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 0, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rt_VPR128[0,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rt_VPR128, 1, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rt_VPR128[32,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rt_VPR128, 2, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rt_VPR128[64,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rt_VPR128, 3, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rt_VPR128[96,32]; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; @@ -8922,12 +6740,9 @@ is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0111 & b_1011= is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0111 & b_1011=0b11 & vVt & Rt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 0, 8, 16); - *:8 tmp_ldXn = * [register]:8 tmpa; + *:8 tmp_ldXn = Rt_VPR128[0,64]; tmp_ldXn = tmp_ldXn + 8; - simd_address_at(tmpa, Rt_VPR128, 1, 8, 16); - *:8 tmp_ldXn = * [register]:8 tmpa; + *:8 tmp_ldXn = Rt_VPR128[64,64]; tmp_ldXn = tmp_ldXn + 8; # neglected zexts build ldst_wback; @@ -8942,54 +6757,37 @@ is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0111 & b_1011= is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b1010 & b_1011=0b00 & vVt & Rt_VPR64 & vVtt & Rtt_VPR64 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR64, 0, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR64[0,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR64, 1, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR64[8,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR64, 2, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR64[16,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR64, 3, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR64[24,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR64, 4, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR64[32,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR64, 5, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR64[40,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR64, 6, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR64[48,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR64, 7, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR64[56,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR64, 0, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR64[0,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR64, 1, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR64[8,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR64, 2, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR64[16,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR64, 3, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR64[24,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR64, 4, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR64[32,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR64, 5, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR64[40,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR64, 6, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR64[48,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR64, 7, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR64[56,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -9004,30 +6802,21 @@ is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b1010 & b_1011= is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b1010 & b_1011=0b01 & vVt & Rt_VPR64 & vVtt & Rtt_VPR64 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR64, 0, 2, 8); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR64[0,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR64, 1, 2, 8); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR64[16,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR64, 2, 2, 8); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR64[32,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR64, 3, 2, 8); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR64[48,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR64, 0, 2, 8); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtt_VPR64[0,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR64, 1, 2, 8); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtt_VPR64[16,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR64, 2, 2, 8); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtt_VPR64[32,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR64, 3, 2, 8); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtt_VPR64[48,16]; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; @@ -9042,18 +6831,13 @@ is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b1010 & b_1011= is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b1010 & b_1011=0b10 & vVt & Rt_VPR64 & vVtt & Rtt_VPR64 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR64, 0, 4, 8); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rt_VPR64[0,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rt_VPR64, 1, 4, 8); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rt_VPR64[32,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtt_VPR64, 0, 4, 8); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rtt_VPR64[0,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtt_VPR64, 1, 4, 8); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rtt_VPR64[32,32]; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; @@ -9068,12 +6852,9 @@ is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b1010 & b_1011= is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b1010 & b_1011=0b11 & vVt & Rt_VPR64 & vVtt & Rtt_VPR64 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR64, 0, 8, 8); - *:8 tmp_ldXn = * [register]:8 tmpa; + *:8 tmp_ldXn = Rt_VPR64[0,64]; tmp_ldXn = tmp_ldXn + 8; - simd_address_at(tmpa, Rtt_VPR64, 0, 8, 8); - *:8 tmp_ldXn = * [register]:8 tmpa; + *:8 tmp_ldXn = Rtt_VPR64[0,64]; tmp_ldXn = tmp_ldXn + 8; # neglected zexts build ldst_wback; @@ -9088,102 +6869,69 @@ is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b1010 & b_1011= is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b1010 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 0, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[0,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 1, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[8,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 2, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[16,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 3, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[24,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 4, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[32,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 5, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[40,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 6, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[48,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 7, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[56,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 8, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[64,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 9, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[72,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 10, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[80,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 11, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[88,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 12, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[96,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 13, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[104,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 14, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[112,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 15, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[120,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 0, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[0,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 1, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[8,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 2, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[16,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 3, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[24,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 4, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[32,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 5, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[40,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 6, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[48,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 7, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[56,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 8, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[64,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 9, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[72,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 10, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[80,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 11, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[88,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 12, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[96,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 13, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[104,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 14, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[112,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 15, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[120,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -9198,54 +6946,37 @@ is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b1010 & b_1011= is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b1010 & b_1011=0b01 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 0, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR128[0,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR128, 1, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR128[16,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR128, 2, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR128[32,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR128, 3, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR128[48,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR128, 4, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR128[64,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR128, 5, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR128[80,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR128, 6, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR128[96,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR128, 7, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR128[112,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 0, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtt_VPR128[0,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 1, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtt_VPR128[16,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 2, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtt_VPR128[32,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 3, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtt_VPR128[48,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 4, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtt_VPR128[64,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 5, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtt_VPR128[80,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 6, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtt_VPR128[96,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 7, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtt_VPR128[112,16]; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; @@ -9260,30 +6991,21 @@ is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b1010 & b_1011= is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b1010 & b_1011=0b10 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 0, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rt_VPR128[0,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rt_VPR128, 1, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rt_VPR128[32,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rt_VPR128, 2, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rt_VPR128[64,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rt_VPR128, 3, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rt_VPR128[96,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtt_VPR128, 0, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rtt_VPR128[0,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtt_VPR128, 1, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rtt_VPR128[32,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtt_VPR128, 2, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rtt_VPR128[64,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtt_VPR128, 3, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rtt_VPR128[96,32]; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; @@ -9298,18 +7020,13 @@ is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b1010 & b_1011= is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b1010 & b_1011=0b11 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 0, 8, 16); - *:8 tmp_ldXn = * [register]:8 tmpa; + *:8 tmp_ldXn = Rt_VPR128[0,64]; tmp_ldXn = tmp_ldXn + 8; - simd_address_at(tmpa, Rt_VPR128, 1, 8, 16); - *:8 tmp_ldXn = * [register]:8 tmpa; + *:8 tmp_ldXn = Rt_VPR128[64,64]; tmp_ldXn = tmp_ldXn + 8; - simd_address_at(tmpa, Rtt_VPR128, 0, 8, 16); - *:8 tmp_ldXn = * [register]:8 tmpa; + *:8 tmp_ldXn = Rtt_VPR128[0,64]; tmp_ldXn = tmp_ldXn + 8; - simd_address_at(tmpa, Rtt_VPR128, 1, 8, 16); - *:8 tmp_ldXn = * [register]:8 tmpa; + *:8 tmp_ldXn = Rtt_VPR128[64,64]; tmp_ldXn = tmp_ldXn + 8; # neglected zexts build ldst_wback; @@ -9324,9 +7041,7 @@ is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b1010 & b_1011= is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b000 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 0, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[0,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -9341,9 +7056,7 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b000 & b_12=0 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b000 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 1, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[8,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -9358,9 +7071,7 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b000 & b_12=0 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b000 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 2, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[16,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -9375,9 +7086,7 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b000 & b_12=0 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b000 & b_12=0 & b_1011=0b11 & vVt & Rt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 3, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[24,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -9392,9 +7101,7 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b000 & b_12=0 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b000 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 4, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[32,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -9409,9 +7116,7 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b000 & b_12=1 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b000 & b_12=1 & b_1011=0b01 & vVt & Rt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 5, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[40,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -9426,9 +7131,7 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b000 & b_12=1 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b000 & b_12=1 & b_1011=0b10 & vVt & Rt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 6, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[48,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -9443,9 +7146,7 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b000 & b_12=1 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b000 & b_12=1 & b_1011=0b11 & vVt & Rt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 7, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[56,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -9460,9 +7161,7 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b000 & b_12=1 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b000 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 8, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[64,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -9477,9 +7176,7 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b000 & b_12=0 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b000 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 9, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[72,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -9494,9 +7191,7 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b000 & b_12=0 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b000 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 10, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[80,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -9511,9 +7206,7 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b000 & b_12=0 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b000 & b_12=0 & b_1011=0b11 & vVt & Rt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 11, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[88,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -9528,9 +7221,7 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b000 & b_12=0 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b000 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 12, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[96,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -9545,9 +7236,7 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b000 & b_12=1 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b000 & b_12=1 & b_1011=0b01 & vVt & Rt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 13, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[104,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -9562,9 +7251,7 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b000 & b_12=1 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b000 & b_12=1 & b_1011=0b10 & vVt & Rt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 14, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[112,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -9579,9 +7266,7 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b000 & b_12=1 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b000 & b_12=1 & b_1011=0b11 & vVt & Rt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 15, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[120,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -9596,9 +7281,7 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b000 & b_12=1 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b010 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 0, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR128[0,16]; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; @@ -9613,9 +7296,7 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b010 & b_12=0 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b010 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 1, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR128[16,16]; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; @@ -9630,9 +7311,7 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b010 & b_12=0 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b010 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 2, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR128[32,16]; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; @@ -9647,9 +7326,7 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b010 & b_12=1 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b010 & b_12=1 & b_1011=0b10 & vVt & Rt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 3, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR128[48,16]; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; @@ -9664,9 +7341,7 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b010 & b_12=1 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b010 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 4, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR128[64,16]; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; @@ -9681,9 +7356,7 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b010 & b_12=0 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b010 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 5, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR128[80,16]; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; @@ -9698,9 +7371,7 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b010 & b_12=0 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b010 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 6, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR128[96,16]; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; @@ -9715,9 +7386,7 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b010 & b_12=1 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b010 & b_12=1 & b_1011=0b10 & vVt & Rt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 7, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR128[112,16]; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; @@ -9732,9 +7401,7 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b010 & b_12=1 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b100 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 0, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rt_VPR128[0,32]; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; @@ -9749,9 +7416,7 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b100 & b_12=0 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b100 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 1, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rt_VPR128[32,32]; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; @@ -9766,9 +7431,7 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b100 & b_12=1 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b100 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 2, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rt_VPR128[64,32]; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; @@ -9783,9 +7446,7 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b100 & b_12=0 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b100 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 3, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rt_VPR128[96,32]; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; @@ -9800,9 +7461,7 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b100 & b_12=1 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b100 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 0, 8, 16); - *:8 tmp_ldXn = * [register]:8 tmpa; + *:8 tmp_ldXn = Rt_VPR128[0,64]; tmp_ldXn = tmp_ldXn + 8; # neglected zexts build ldst_wback; @@ -9817,9 +7476,7 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b100 & b_12=0 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b100 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 1, 8, 16); - *:8 tmp_ldXn = * [register]:8 tmpa; + *:8 tmp_ldXn = Rt_VPR128[64,64]; tmp_ldXn = tmp_ldXn + 8; # neglected zexts build ldst_wback; @@ -9834,54 +7491,37 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b100 & b_12=0 & is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b1000 & b_1011=0b00 & vVt & Rt_VPR64 & vVtt & Rtt_VPR64 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR64, 0, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR64[0,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR64, 0, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR64[0,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR64, 1, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR64[8,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR64, 1, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR64[8,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR64, 2, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR64[16,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR64, 2, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR64[16,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR64, 3, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR64[24,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR64, 3, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR64[24,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR64, 4, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR64[32,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR64, 4, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR64[32,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR64, 5, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR64[40,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR64, 5, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR64[40,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR64, 6, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR64[48,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR64, 6, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR64[48,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR64, 7, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR64[56,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR64, 7, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR64[56,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -9896,30 +7536,21 @@ is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b1000 & b_1011= is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b1000 & b_1011=0b01 & vVt & Rt_VPR64 & vVtt & Rtt_VPR64 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR64, 0, 2, 8); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR64[0,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR64, 0, 2, 8); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtt_VPR64[0,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR64, 1, 2, 8); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR64[16,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR64, 1, 2, 8); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtt_VPR64[16,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR64, 2, 2, 8); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR64[32,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR64, 2, 2, 8); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtt_VPR64[32,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR64, 3, 2, 8); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR64[48,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR64, 3, 2, 8); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtt_VPR64[48,16]; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; @@ -9934,18 +7565,13 @@ is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b1000 & b_1011= is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b1000 & b_1011=0b10 & vVt & Rt_VPR64 & vVtt & Rtt_VPR64 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR64, 0, 4, 8); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rt_VPR64[0,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtt_VPR64, 0, 4, 8); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rtt_VPR64[0,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rt_VPR64, 1, 4, 8); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rt_VPR64[32,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtt_VPR64, 1, 4, 8); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rtt_VPR64[32,32]; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; @@ -9960,102 +7586,69 @@ is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b1000 & b_1011= is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b1000 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 0, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[0,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 0, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[0,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 1, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[8,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 1, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[8,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 2, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[16,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 2, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[16,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 3, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[24,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 3, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[24,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 4, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[32,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 4, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[32,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 5, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[40,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 5, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[40,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 6, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[48,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 6, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[48,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 7, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[56,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 7, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[56,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 8, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[64,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 8, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[64,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 9, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[72,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 9, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[72,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 10, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[80,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 10, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[80,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 11, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[88,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 11, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[88,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 12, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[96,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 12, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[96,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 13, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[104,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 13, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[104,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 14, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[112,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 14, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[112,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 15, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[120,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 15, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[120,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -10070,54 +7663,37 @@ is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b1000 & b_1011= is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b1000 & b_1011=0b01 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 0, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR128[0,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 0, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtt_VPR128[0,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR128, 1, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR128[16,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 1, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtt_VPR128[16,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR128, 2, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR128[32,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 2, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtt_VPR128[32,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR128, 3, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR128[48,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 3, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtt_VPR128[48,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR128, 4, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR128[64,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 4, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtt_VPR128[64,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR128, 5, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR128[80,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 5, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtt_VPR128[80,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR128, 6, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR128[96,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 6, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtt_VPR128[96,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR128, 7, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR128[112,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 7, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtt_VPR128[112,16]; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; @@ -10132,30 +7708,21 @@ is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b1000 & b_1011= is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b1000 & b_1011=0b10 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 0, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rt_VPR128[0,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtt_VPR128, 0, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rtt_VPR128[0,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rt_VPR128, 1, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rt_VPR128[32,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtt_VPR128, 1, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rtt_VPR128[32,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rt_VPR128, 2, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rt_VPR128[64,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtt_VPR128, 2, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rtt_VPR128[64,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rt_VPR128, 3, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rt_VPR128[96,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtt_VPR128, 3, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rtt_VPR128[96,32]; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; @@ -10170,18 +7737,13 @@ is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b1000 & b_1011= is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b1000 & b_1011=0b11 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 0, 8, 16); - *:8 tmp_ldXn = * [register]:8 tmpa; + *:8 tmp_ldXn = Rt_VPR128[0,64]; tmp_ldXn = tmp_ldXn + 8; - simd_address_at(tmpa, Rtt_VPR128, 0, 8, 16); - *:8 tmp_ldXn = * [register]:8 tmpa; + *:8 tmp_ldXn = Rtt_VPR128[0,64]; tmp_ldXn = tmp_ldXn + 8; - simd_address_at(tmpa, Rt_VPR128, 1, 8, 16); - *:8 tmp_ldXn = * [register]:8 tmpa; + *:8 tmp_ldXn = Rt_VPR128[64,64]; tmp_ldXn = tmp_ldXn + 8; - simd_address_at(tmpa, Rtt_VPR128, 1, 8, 16); - *:8 tmp_ldXn = * [register]:8 tmpa; + *:8 tmp_ldXn = Rtt_VPR128[64,64]; tmp_ldXn = tmp_ldXn + 8; # neglected zexts build ldst_wback; @@ -10196,12 +7758,9 @@ is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b1000 & b_1011= is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b000 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 0, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[0,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 0, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[0,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -10216,12 +7775,9 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b000 & b_12=0 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b000 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 1, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[8,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 1, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[8,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -10236,12 +7792,9 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b000 & b_12=0 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b000 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 2, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[16,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 2, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[16,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -10256,12 +7809,9 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b000 & b_12=0 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b000 & b_12=0 & b_1011=0b11 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 3, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[24,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 3, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[24,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -10276,12 +7826,9 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b000 & b_12=0 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b000 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 4, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[32,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 4, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[32,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -10296,12 +7843,9 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b000 & b_12=1 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b000 & b_12=1 & b_1011=0b01 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 5, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[40,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 5, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[40,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -10316,12 +7860,9 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b000 & b_12=1 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b000 & b_12=1 & b_1011=0b10 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 6, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[48,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 6, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[48,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -10336,12 +7877,9 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b000 & b_12=1 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b000 & b_12=1 & b_1011=0b11 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 7, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[56,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 7, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[56,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -10356,12 +7894,9 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b000 & b_12=1 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b000 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 8, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[64,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 8, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[64,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -10376,12 +7911,9 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b000 & b_12=0 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b000 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 9, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[72,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 9, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[72,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -10396,12 +7928,9 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b000 & b_12=0 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b000 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 10, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[80,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 10, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[80,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -10416,12 +7945,9 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b000 & b_12=0 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b000 & b_12=0 & b_1011=0b11 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 11, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[88,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 11, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[88,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -10436,12 +7962,9 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b000 & b_12=0 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b000 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 12, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[96,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 12, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[96,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -10456,12 +7979,9 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b000 & b_12=1 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b000 & b_12=1 & b_1011=0b01 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 13, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[104,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 13, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[104,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -10476,12 +7996,9 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b000 & b_12=1 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b000 & b_12=1 & b_1011=0b10 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 14, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[112,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 14, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[112,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -10496,12 +8013,9 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b000 & b_12=1 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b000 & b_12=1 & b_1011=0b11 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 15, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[120,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 15, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[120,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -10516,12 +8030,9 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b000 & b_12=1 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b010 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 0, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR128[0,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 0, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtt_VPR128[0,16]; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; @@ -10536,12 +8047,9 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b010 & b_12=0 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b010 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 1, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR128[16,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 1, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtt_VPR128[16,16]; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; @@ -10556,12 +8064,9 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b010 & b_12=0 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b010 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 2, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR128[32,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 2, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtt_VPR128[32,16]; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; @@ -10576,12 +8081,9 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b010 & b_12=1 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b010 & b_12=1 & b_1011=0b10 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 3, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR128[48,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 3, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtt_VPR128[48,16]; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; @@ -10596,12 +8098,9 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b010 & b_12=1 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b010 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 4, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR128[64,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 4, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtt_VPR128[64,16]; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; @@ -10616,12 +8115,9 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b010 & b_12=0 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b010 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 5, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR128[80,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 5, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtt_VPR128[80,16]; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; @@ -10636,12 +8132,9 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b010 & b_12=0 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b010 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 6, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR128[96,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 6, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtt_VPR128[96,16]; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; @@ -10656,12 +8149,9 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b010 & b_12=1 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b010 & b_12=1 & b_1011=0b10 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 7, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR128[112,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 7, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtt_VPR128[112,16]; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; @@ -10676,12 +8166,9 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b010 & b_12=1 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b100 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 0, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rt_VPR128[0,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtt_VPR128, 0, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rtt_VPR128[0,32]; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; @@ -10696,12 +8183,9 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b100 & b_12=0 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b100 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 1, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rt_VPR128[32,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtt_VPR128, 1, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rtt_VPR128[32,32]; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; @@ -10716,12 +8200,9 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b100 & b_12=1 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b100 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 2, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rt_VPR128[64,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtt_VPR128, 2, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rtt_VPR128[64,32]; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; @@ -10736,12 +8217,9 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b100 & b_12=0 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b100 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 3, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rt_VPR128[96,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtt_VPR128, 3, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rtt_VPR128[96,32]; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; @@ -10756,12 +8234,9 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b100 & b_12=1 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b100 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 0, 8, 16); - *:8 tmp_ldXn = * [register]:8 tmpa; + *:8 tmp_ldXn = Rt_VPR128[0,64]; tmp_ldXn = tmp_ldXn + 8; - simd_address_at(tmpa, Rtt_VPR128, 0, 8, 16); - *:8 tmp_ldXn = * [register]:8 tmpa; + *:8 tmp_ldXn = Rtt_VPR128[0,64]; tmp_ldXn = tmp_ldXn + 8; # neglected zexts build ldst_wback; @@ -10776,12 +8251,9 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b100 & b_12=0 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b100 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 1, 8, 16); - *:8 tmp_ldXn = * [register]:8 tmpa; + *:8 tmp_ldXn = Rt_VPR128[64,64]; tmp_ldXn = tmp_ldXn + 8; - simd_address_at(tmpa, Rtt_VPR128, 1, 8, 16); - *:8 tmp_ldXn = * [register]:8 tmpa; + *:8 tmp_ldXn = Rtt_VPR128[64,64]; tmp_ldXn = tmp_ldXn + 8; # neglected zexts build ldst_wback; @@ -10796,78 +8268,53 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b100 & b_12=0 & is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0100 & b_1011=0b00 & vVt & Rt_VPR64 & vVtt & Rtt_VPR64 & vVttt & Rttt_VPR64 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR64, 0, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR64[0,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR64, 0, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR64[0,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR64, 0, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR64[0,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR64, 1, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR64[8,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR64, 1, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR64[8,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR64, 1, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR64[8,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR64, 2, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR64[16,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR64, 2, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR64[16,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR64, 2, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR64[16,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR64, 3, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR64[24,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR64, 3, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR64[24,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR64, 3, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR64[24,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR64, 4, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR64[32,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR64, 4, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR64[32,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR64, 4, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR64[32,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR64, 5, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR64[40,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR64, 5, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR64[40,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR64, 5, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR64[40,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR64, 6, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR64[48,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR64, 6, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR64[48,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR64, 6, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR64[48,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR64, 7, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR64[56,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR64, 7, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR64[56,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR64, 7, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR64[56,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -10882,42 +8329,29 @@ is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0100 & b_1011= is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0100 & b_1011=0b01 & vVt & Rt_VPR64 & vVtt & Rtt_VPR64 & vVttt & Rttt_VPR64 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR64, 0, 2, 8); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR64[0,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR64, 0, 2, 8); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtt_VPR64[0,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR64, 0, 2, 8); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rttt_VPR64[0,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR64, 1, 2, 8); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR64[16,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR64, 1, 2, 8); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtt_VPR64[16,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR64, 1, 2, 8); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rttt_VPR64[16,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR64, 2, 2, 8); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR64[32,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR64, 2, 2, 8); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtt_VPR64[32,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR64, 2, 2, 8); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rttt_VPR64[32,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR64, 3, 2, 8); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR64[48,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR64, 3, 2, 8); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtt_VPR64[48,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR64, 3, 2, 8); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rttt_VPR64[48,16]; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; @@ -10932,24 +8366,17 @@ is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0100 & b_1011= is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0100 & b_1011=0b10 & vVt & Rt_VPR64 & vVtt & Rtt_VPR64 & vVttt & Rttt_VPR64 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR64, 0, 4, 8); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rt_VPR64[0,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtt_VPR64, 0, 4, 8); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rtt_VPR64[0,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rttt_VPR64, 0, 4, 8); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rttt_VPR64[0,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rt_VPR64, 1, 4, 8); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rt_VPR64[32,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtt_VPR64, 1, 4, 8); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rtt_VPR64[32,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rttt_VPR64, 1, 4, 8); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rttt_VPR64[32,32]; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; @@ -10964,150 +8391,101 @@ is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0100 & b_1011= is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0100 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 0, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[0,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 0, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[0,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 0, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR128[0,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 1, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[8,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 1, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[8,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 1, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR128[8,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 2, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[16,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 2, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[16,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 2, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR128[16,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 3, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[24,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 3, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[24,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 3, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR128[24,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 4, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[32,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 4, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[32,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 4, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR128[32,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 5, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[40,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 5, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[40,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 5, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR128[40,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 6, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[48,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 6, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[48,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 6, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR128[48,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 7, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[56,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 7, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[56,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 7, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR128[56,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 8, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[64,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 8, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[64,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 8, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR128[64,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 9, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[72,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 9, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[72,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 9, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR128[72,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 10, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[80,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 10, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[80,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 10, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR128[80,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 11, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[88,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 11, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[88,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 11, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR128[88,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 12, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[96,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 12, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[96,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 12, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR128[96,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 13, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[104,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 13, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[104,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 13, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR128[104,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 14, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[112,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 14, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[112,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 14, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR128[112,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 15, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[120,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 15, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[120,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 15, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR128[120,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -11122,78 +8500,53 @@ is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0100 & b_1011= is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0100 & b_1011=0b01 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 0, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR128[0,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 0, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtt_VPR128[0,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR128, 0, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rttt_VPR128[0,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR128, 1, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR128[16,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 1, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtt_VPR128[16,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR128, 1, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rttt_VPR128[16,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR128, 2, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR128[32,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 2, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtt_VPR128[32,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR128, 2, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rttt_VPR128[32,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR128, 3, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR128[48,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 3, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtt_VPR128[48,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR128, 3, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rttt_VPR128[48,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR128, 4, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR128[64,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 4, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtt_VPR128[64,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR128, 4, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rttt_VPR128[64,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR128, 5, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR128[80,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 5, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtt_VPR128[80,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR128, 5, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rttt_VPR128[80,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR128, 6, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR128[96,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 6, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtt_VPR128[96,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR128, 6, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rttt_VPR128[96,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR128, 7, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR128[112,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 7, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtt_VPR128[112,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR128, 7, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rttt_VPR128[112,16]; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; @@ -11208,42 +8561,29 @@ is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0100 & b_1011= is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0100 & b_1011=0b10 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 0, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rt_VPR128[0,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtt_VPR128, 0, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rtt_VPR128[0,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rttt_VPR128, 0, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rttt_VPR128[0,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rt_VPR128, 1, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rt_VPR128[32,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtt_VPR128, 1, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rtt_VPR128[32,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rttt_VPR128, 1, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rttt_VPR128[32,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rt_VPR128, 2, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rt_VPR128[64,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtt_VPR128, 2, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rtt_VPR128[64,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rttt_VPR128, 2, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rttt_VPR128[64,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rt_VPR128, 3, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rt_VPR128[96,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtt_VPR128, 3, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rtt_VPR128[96,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rttt_VPR128, 3, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rttt_VPR128[96,32]; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; @@ -11258,24 +8598,17 @@ is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0100 & b_1011= is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0100 & b_1011=0b11 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 0, 8, 16); - *:8 tmp_ldXn = * [register]:8 tmpa; + *:8 tmp_ldXn = Rt_VPR128[0,64]; tmp_ldXn = tmp_ldXn + 8; - simd_address_at(tmpa, Rtt_VPR128, 0, 8, 16); - *:8 tmp_ldXn = * [register]:8 tmpa; + *:8 tmp_ldXn = Rtt_VPR128[0,64]; tmp_ldXn = tmp_ldXn + 8; - simd_address_at(tmpa, Rttt_VPR128, 0, 8, 16); - *:8 tmp_ldXn = * [register]:8 tmpa; + *:8 tmp_ldXn = Rttt_VPR128[0,64]; tmp_ldXn = tmp_ldXn + 8; - simd_address_at(tmpa, Rt_VPR128, 1, 8, 16); - *:8 tmp_ldXn = * [register]:8 tmpa; + *:8 tmp_ldXn = Rt_VPR128[64,64]; tmp_ldXn = tmp_ldXn + 8; - simd_address_at(tmpa, Rtt_VPR128, 1, 8, 16); - *:8 tmp_ldXn = * [register]:8 tmpa; + *:8 tmp_ldXn = Rtt_VPR128[64,64]; tmp_ldXn = tmp_ldXn + 8; - simd_address_at(tmpa, Rttt_VPR128, 1, 8, 16); - *:8 tmp_ldXn = * [register]:8 tmpa; + *:8 tmp_ldXn = Rttt_VPR128[64,64]; tmp_ldXn = tmp_ldXn + 8; # neglected zexts build ldst_wback; @@ -11290,15 +8623,11 @@ is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0100 & b_1011= is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b001 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 0, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[0,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 0, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[0,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 0, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR128[0,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -11313,15 +8642,11 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b001 & b_12=0 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b001 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 1, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[8,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 1, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[8,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 1, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR128[8,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -11336,15 +8661,11 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b001 & b_12=0 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b001 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 2, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[16,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 2, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[16,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 2, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR128[16,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -11359,15 +8680,11 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b001 & b_12=0 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b001 & b_12=0 & b_1011=0b11 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 3, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[24,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 3, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[24,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 3, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR128[24,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -11382,15 +8699,11 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b001 & b_12=0 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b001 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 4, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[32,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 4, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[32,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 4, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR128[32,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -11405,15 +8718,11 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b001 & b_12=1 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b001 & b_12=1 & b_1011=0b01 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 5, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[40,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 5, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[40,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 5, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR128[40,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -11428,15 +8737,11 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b001 & b_12=1 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b001 & b_12=1 & b_1011=0b10 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 6, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[48,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 6, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[48,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 6, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR128[48,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -11451,15 +8756,11 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b001 & b_12=1 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b001 & b_12=1 & b_1011=0b11 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 7, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[56,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 7, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[56,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 7, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR128[56,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -11474,15 +8775,11 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b001 & b_12=1 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b001 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 8, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[64,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 8, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[64,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 8, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR128[64,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -11497,15 +8794,11 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b001 & b_12=0 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b001 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 9, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[72,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 9, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[72,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 9, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR128[72,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -11520,15 +8813,11 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b001 & b_12=0 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b001 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 10, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[80,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 10, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[80,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 10, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR128[80,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -11543,15 +8832,11 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b001 & b_12=0 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b001 & b_12=0 & b_1011=0b11 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 11, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[88,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 11, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[88,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 11, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR128[88,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -11566,15 +8851,11 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b001 & b_12=0 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b001 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 12, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[96,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 12, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[96,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 12, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR128[96,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -11589,15 +8870,11 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b001 & b_12=1 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b001 & b_12=1 & b_1011=0b01 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 13, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[104,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 13, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[104,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 13, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR128[104,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -11612,15 +8889,11 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b001 & b_12=1 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b001 & b_12=1 & b_1011=0b10 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 14, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[112,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 14, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[112,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 14, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR128[112,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -11635,15 +8908,11 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b001 & b_12=1 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b001 & b_12=1 & b_1011=0b11 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 15, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[120,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 15, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[120,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 15, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR128[120,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -11658,15 +8927,11 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b001 & b_12=1 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b011 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 0, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR128[0,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 0, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtt_VPR128[0,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR128, 0, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rttt_VPR128[0,16]; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; @@ -11681,15 +8946,11 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b011 & b_12=0 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b011 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 1, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR128[16,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 1, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtt_VPR128[16,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR128, 1, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rttt_VPR128[16,16]; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; @@ -11704,15 +8965,11 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b011 & b_12=0 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b011 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 2, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR128[32,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 2, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtt_VPR128[32,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR128, 2, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rttt_VPR128[32,16]; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; @@ -11727,15 +8984,11 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b011 & b_12=1 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b011 & b_12=1 & b_1011=0b10 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 3, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR128[48,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 3, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtt_VPR128[48,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR128, 3, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rttt_VPR128[48,16]; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; @@ -11750,15 +9003,11 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b011 & b_12=1 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b011 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 4, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR128[64,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 4, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtt_VPR128[64,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR128, 4, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rttt_VPR128[64,16]; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; @@ -11773,15 +9022,11 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b011 & b_12=0 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b011 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 5, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR128[80,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 5, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtt_VPR128[80,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR128, 5, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rttt_VPR128[80,16]; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; @@ -11796,15 +9041,11 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b011 & b_12=0 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b011 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 6, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR128[96,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 6, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtt_VPR128[96,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR128, 6, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rttt_VPR128[96,16]; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; @@ -11819,15 +9060,11 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b011 & b_12=1 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b011 & b_12=1 & b_1011=0b10 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 7, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR128[112,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 7, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtt_VPR128[112,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR128, 7, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rttt_VPR128[112,16]; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; @@ -11842,15 +9079,11 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b011 & b_12=1 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b101 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 0, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rt_VPR128[0,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtt_VPR128, 0, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rtt_VPR128[0,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rttt_VPR128, 0, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rttt_VPR128[0,32]; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; @@ -11865,15 +9098,11 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b101 & b_12=0 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b101 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 1, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rt_VPR128[32,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtt_VPR128, 1, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rtt_VPR128[32,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rttt_VPR128, 1, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rttt_VPR128[32,32]; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; @@ -11888,15 +9117,11 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b101 & b_12=1 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b101 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 2, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rt_VPR128[64,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtt_VPR128, 2, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rtt_VPR128[64,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rttt_VPR128, 2, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rttt_VPR128[64,32]; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; @@ -11911,15 +9136,11 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b101 & b_12=0 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b101 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 3, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rt_VPR128[96,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtt_VPR128, 3, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rtt_VPR128[96,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rttt_VPR128, 3, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rttt_VPR128[96,32]; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; @@ -11934,15 +9155,11 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b101 & b_12=1 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b101 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 0, 8, 16); - *:8 tmp_ldXn = * [register]:8 tmpa; + *:8 tmp_ldXn = Rt_VPR128[0,64]; tmp_ldXn = tmp_ldXn + 8; - simd_address_at(tmpa, Rtt_VPR128, 0, 8, 16); - *:8 tmp_ldXn = * [register]:8 tmpa; + *:8 tmp_ldXn = Rtt_VPR128[0,64]; tmp_ldXn = tmp_ldXn + 8; - simd_address_at(tmpa, Rttt_VPR128, 0, 8, 16); - *:8 tmp_ldXn = * [register]:8 tmpa; + *:8 tmp_ldXn = Rttt_VPR128[0,64]; tmp_ldXn = tmp_ldXn + 8; # neglected zexts build ldst_wback; @@ -11957,15 +9174,11 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b101 & b_12=0 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b101 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 1, 8, 16); - *:8 tmp_ldXn = * [register]:8 tmpa; + *:8 tmp_ldXn = Rt_VPR128[64,64]; tmp_ldXn = tmp_ldXn + 8; - simd_address_at(tmpa, Rtt_VPR128, 1, 8, 16); - *:8 tmp_ldXn = * [register]:8 tmpa; + *:8 tmp_ldXn = Rtt_VPR128[64,64]; tmp_ldXn = tmp_ldXn + 8; - simd_address_at(tmpa, Rttt_VPR128, 1, 8, 16); - *:8 tmp_ldXn = * [register]:8 tmpa; + *:8 tmp_ldXn = Rttt_VPR128[64,64]; tmp_ldXn = tmp_ldXn + 8; # neglected zexts build ldst_wback; @@ -11980,102 +9193,69 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b101 & b_12=0 & is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0000 & b_1011=0b00 & vVt & Rt_VPR64 & vVtt & Rtt_VPR64 & vVttt & Rttt_VPR64 & vVtttt & Rtttt_VPR64 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR64, 0, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR64[0,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR64, 0, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR64[0,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR64, 0, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR64[0,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR64, 0, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtttt_VPR64[0,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR64, 1, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR64[8,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR64, 1, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR64[8,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR64, 1, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR64[8,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR64, 1, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtttt_VPR64[8,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR64, 2, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR64[16,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR64, 2, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR64[16,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR64, 2, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR64[16,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR64, 2, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtttt_VPR64[16,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR64, 3, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR64[24,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR64, 3, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR64[24,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR64, 3, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR64[24,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR64, 3, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtttt_VPR64[24,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR64, 4, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR64[32,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR64, 4, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR64[32,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR64, 4, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR64[32,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR64, 4, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtttt_VPR64[32,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR64, 5, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR64[40,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR64, 5, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR64[40,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR64, 5, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR64[40,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR64, 5, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtttt_VPR64[40,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR64, 6, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR64[48,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR64, 6, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR64[48,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR64, 6, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR64[48,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR64, 6, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtttt_VPR64[48,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR64, 7, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR64[56,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR64, 7, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR64[56,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR64, 7, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR64[56,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR64, 7, 1, 8); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtttt_VPR64[56,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -12090,54 +9270,37 @@ is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0000 & b_1011= is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0000 & b_1011=0b01 & vVt & Rt_VPR64 & vVtt & Rtt_VPR64 & vVttt & Rttt_VPR64 & vVtttt & Rtttt_VPR64 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR64, 0, 2, 8); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR64[0,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR64, 0, 2, 8); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtt_VPR64[0,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR64, 0, 2, 8); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rttt_VPR64[0,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtttt_VPR64, 0, 2, 8); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtttt_VPR64[0,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR64, 1, 2, 8); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR64[16,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR64, 1, 2, 8); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtt_VPR64[16,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR64, 1, 2, 8); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rttt_VPR64[16,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtttt_VPR64, 1, 2, 8); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtttt_VPR64[16,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR64, 2, 2, 8); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR64[32,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR64, 2, 2, 8); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtt_VPR64[32,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR64, 2, 2, 8); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rttt_VPR64[32,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtttt_VPR64, 2, 2, 8); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtttt_VPR64[32,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR64, 3, 2, 8); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR64[48,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR64, 3, 2, 8); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtt_VPR64[48,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR64, 3, 2, 8); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rttt_VPR64[48,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtttt_VPR64, 3, 2, 8); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtttt_VPR64[48,16]; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; @@ -12152,30 +9315,21 @@ is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0000 & b_1011= is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0000 & b_1011=0b10 & vVt & Rt_VPR64 & vVtt & Rtt_VPR64 & vVttt & Rttt_VPR64 & vVtttt & Rtttt_VPR64 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR64, 0, 4, 8); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rt_VPR64[0,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtt_VPR64, 0, 4, 8); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rtt_VPR64[0,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rttt_VPR64, 0, 4, 8); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rttt_VPR64[0,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtttt_VPR64, 0, 4, 8); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rtttt_VPR64[0,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rt_VPR64, 1, 4, 8); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rt_VPR64[32,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtt_VPR64, 1, 4, 8); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rtt_VPR64[32,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rttt_VPR64, 1, 4, 8); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rttt_VPR64[32,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtttt_VPR64, 1, 4, 8); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rtttt_VPR64[32,32]; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; @@ -12190,198 +9344,133 @@ is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0000 & b_1011= is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0000 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 0, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[0,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 0, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[0,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 0, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR128[0,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR128, 0, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtttt_VPR128[0,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 1, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[8,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 1, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[8,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 1, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR128[8,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR128, 1, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtttt_VPR128[8,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 2, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[16,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 2, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[16,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 2, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR128[16,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR128, 2, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtttt_VPR128[16,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 3, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[24,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 3, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[24,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 3, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR128[24,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR128, 3, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtttt_VPR128[24,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 4, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[32,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 4, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[32,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 4, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR128[32,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR128, 4, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtttt_VPR128[32,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 5, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[40,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 5, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[40,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 5, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR128[40,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR128, 5, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtttt_VPR128[40,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 6, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[48,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 6, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[48,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 6, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR128[48,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR128, 6, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtttt_VPR128[48,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 7, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[56,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 7, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[56,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 7, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR128[56,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR128, 7, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtttt_VPR128[56,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 8, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[64,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 8, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[64,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 8, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR128[64,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR128, 8, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtttt_VPR128[64,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 9, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[72,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 9, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[72,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 9, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR128[72,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR128, 9, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtttt_VPR128[72,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 10, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[80,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 10, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[80,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 10, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR128[80,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR128, 10, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtttt_VPR128[80,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 11, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[88,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 11, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[88,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 11, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR128[88,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR128, 11, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtttt_VPR128[88,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 12, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[96,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 12, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[96,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 12, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR128[96,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR128, 12, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtttt_VPR128[96,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 13, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[104,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 13, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[104,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 13, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR128[104,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR128, 13, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtttt_VPR128[104,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 14, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[112,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 14, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[112,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 14, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR128[112,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR128, 14, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtttt_VPR128[112,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rt_VPR128, 15, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[120,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 15, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[120,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 15, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR128[120,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR128, 15, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtttt_VPR128[120,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -12396,102 +9485,69 @@ is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0000 & b_1011= is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0000 & b_1011=0b01 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 0, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR128[0,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 0, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtt_VPR128[0,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR128, 0, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rttt_VPR128[0,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtttt_VPR128, 0, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtttt_VPR128[0,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR128, 1, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR128[16,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 1, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtt_VPR128[16,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR128, 1, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rttt_VPR128[16,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtttt_VPR128, 1, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtttt_VPR128[16,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR128, 2, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR128[32,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 2, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtt_VPR128[32,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR128, 2, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rttt_VPR128[32,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtttt_VPR128, 2, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtttt_VPR128[32,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR128, 3, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR128[48,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 3, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtt_VPR128[48,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR128, 3, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rttt_VPR128[48,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtttt_VPR128, 3, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtttt_VPR128[48,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR128, 4, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR128[64,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 4, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtt_VPR128[64,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR128, 4, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rttt_VPR128[64,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtttt_VPR128, 4, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtttt_VPR128[64,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR128, 5, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR128[80,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 5, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtt_VPR128[80,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR128, 5, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rttt_VPR128[80,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtttt_VPR128, 5, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtttt_VPR128[80,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR128, 6, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR128[96,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 6, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtt_VPR128[96,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR128, 6, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rttt_VPR128[96,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtttt_VPR128, 6, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtttt_VPR128[96,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rt_VPR128, 7, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR128[112,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 7, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtt_VPR128[112,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR128, 7, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rttt_VPR128[112,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtttt_VPR128, 7, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtttt_VPR128[112,16]; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; @@ -12506,54 +9562,37 @@ is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0000 & b_1011= is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0000 & b_1011=0b10 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 0, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rt_VPR128[0,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtt_VPR128, 0, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rtt_VPR128[0,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rttt_VPR128, 0, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rttt_VPR128[0,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtttt_VPR128, 0, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rtttt_VPR128[0,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rt_VPR128, 1, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rt_VPR128[32,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtt_VPR128, 1, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rtt_VPR128[32,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rttt_VPR128, 1, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rttt_VPR128[32,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtttt_VPR128, 1, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rtttt_VPR128[32,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rt_VPR128, 2, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rt_VPR128[64,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtt_VPR128, 2, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rtt_VPR128[64,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rttt_VPR128, 2, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rttt_VPR128[64,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtttt_VPR128, 2, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rtttt_VPR128[64,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rt_VPR128, 3, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rt_VPR128[96,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtt_VPR128, 3, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rtt_VPR128[96,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rttt_VPR128, 3, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rttt_VPR128[96,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtttt_VPR128, 3, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rtttt_VPR128[96,32]; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; @@ -12568,30 +9607,21 @@ is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0000 & b_1011= is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0000 & b_1011=0b11 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 0, 8, 16); - *:8 tmp_ldXn = * [register]:8 tmpa; + *:8 tmp_ldXn = Rt_VPR128[0,64]; tmp_ldXn = tmp_ldXn + 8; - simd_address_at(tmpa, Rtt_VPR128, 0, 8, 16); - *:8 tmp_ldXn = * [register]:8 tmpa; + *:8 tmp_ldXn = Rtt_VPR128[0,64]; tmp_ldXn = tmp_ldXn + 8; - simd_address_at(tmpa, Rttt_VPR128, 0, 8, 16); - *:8 tmp_ldXn = * [register]:8 tmpa; + *:8 tmp_ldXn = Rttt_VPR128[0,64]; tmp_ldXn = tmp_ldXn + 8; - simd_address_at(tmpa, Rtttt_VPR128, 0, 8, 16); - *:8 tmp_ldXn = * [register]:8 tmpa; + *:8 tmp_ldXn = Rtttt_VPR128[0,64]; tmp_ldXn = tmp_ldXn + 8; - simd_address_at(tmpa, Rt_VPR128, 1, 8, 16); - *:8 tmp_ldXn = * [register]:8 tmpa; + *:8 tmp_ldXn = Rt_VPR128[64,64]; tmp_ldXn = tmp_ldXn + 8; - simd_address_at(tmpa, Rtt_VPR128, 1, 8, 16); - *:8 tmp_ldXn = * [register]:8 tmpa; + *:8 tmp_ldXn = Rtt_VPR128[64,64]; tmp_ldXn = tmp_ldXn + 8; - simd_address_at(tmpa, Rttt_VPR128, 1, 8, 16); - *:8 tmp_ldXn = * [register]:8 tmpa; + *:8 tmp_ldXn = Rttt_VPR128[64,64]; tmp_ldXn = tmp_ldXn + 8; - simd_address_at(tmpa, Rtttt_VPR128, 1, 8, 16); - *:8 tmp_ldXn = * [register]:8 tmpa; + *:8 tmp_ldXn = Rtttt_VPR128[64,64]; tmp_ldXn = tmp_ldXn + 8; # neglected zexts build ldst_wback; @@ -12606,18 +9636,13 @@ is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0000 & b_1011= is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b001 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 0, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[0,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 0, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[0,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 0, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR128[0,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR128, 0, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtttt_VPR128[0,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -12632,18 +9657,13 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b001 & b_12=0 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b001 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 1, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[8,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 1, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[8,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 1, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR128[8,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR128, 1, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtttt_VPR128[8,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -12658,18 +9678,13 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b001 & b_12=0 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b001 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 2, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[16,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 2, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[16,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 2, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR128[16,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR128, 2, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtttt_VPR128[16,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -12684,18 +9699,13 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b001 & b_12=0 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b001 & b_12=0 & b_1011=0b11 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 3, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[24,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 3, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[24,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 3, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR128[24,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR128, 3, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtttt_VPR128[24,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -12710,18 +9720,13 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b001 & b_12=0 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b001 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 4, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[32,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 4, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[32,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 4, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR128[32,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR128, 4, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtttt_VPR128[32,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -12736,18 +9741,13 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b001 & b_12=1 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b001 & b_12=1 & b_1011=0b01 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 5, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[40,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 5, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[40,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 5, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR128[40,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR128, 5, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtttt_VPR128[40,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -12762,18 +9762,13 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b001 & b_12=1 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b001 & b_12=1 & b_1011=0b10 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 6, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[48,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 6, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[48,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 6, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR128[48,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR128, 6, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtttt_VPR128[48,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -12788,18 +9783,13 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b001 & b_12=1 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b001 & b_12=1 & b_1011=0b11 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 7, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[56,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 7, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[56,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 7, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR128[56,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR128, 7, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtttt_VPR128[56,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -12814,18 +9804,13 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b001 & b_12=1 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b001 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 8, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[64,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 8, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[64,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 8, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR128[64,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR128, 8, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtttt_VPR128[64,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -12840,18 +9825,13 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b001 & b_12=0 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b001 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 9, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[72,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 9, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[72,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 9, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR128[72,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR128, 9, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtttt_VPR128[72,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -12866,18 +9846,13 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b001 & b_12=0 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b001 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 10, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[80,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 10, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[80,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 10, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR128[80,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR128, 10, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtttt_VPR128[80,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -12892,18 +9867,13 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b001 & b_12=0 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b001 & b_12=0 & b_1011=0b11 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 11, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[88,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 11, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[88,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 11, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR128[88,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR128, 11, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtttt_VPR128[88,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -12918,18 +9888,13 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b001 & b_12=0 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b001 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 12, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[96,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 12, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[96,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 12, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR128[96,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR128, 12, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtttt_VPR128[96,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -12944,18 +9909,13 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b001 & b_12=1 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b001 & b_12=1 & b_1011=0b01 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 13, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[104,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 13, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[104,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 13, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR128[104,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR128, 13, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtttt_VPR128[104,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -12970,18 +9930,13 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b001 & b_12=1 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b001 & b_12=1 & b_1011=0b10 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 14, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[112,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 14, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[112,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 14, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR128[112,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR128, 14, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtttt_VPR128[112,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -12996,18 +9951,13 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b001 & b_12=1 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b001 & b_12=1 & b_1011=0b11 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 15, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rt_VPR128[120,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtt_VPR128, 15, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtt_VPR128[120,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rttt_VPR128, 15, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rttt_VPR128[120,8]; tmp_ldXn = tmp_ldXn + 1; - simd_address_at(tmpa, Rtttt_VPR128, 15, 1, 16); - *:1 tmp_ldXn = * [register]:1 tmpa; + *:1 tmp_ldXn = Rtttt_VPR128[120,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; @@ -13022,18 +9972,13 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b001 & b_12=1 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b011 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 0, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR128[0,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 0, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtt_VPR128[0,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR128, 0, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rttt_VPR128[0,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtttt_VPR128, 0, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtttt_VPR128[0,16]; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; @@ -13048,18 +9993,13 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b011 & b_12=0 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b011 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 1, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR128[16,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 1, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtt_VPR128[16,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR128, 1, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rttt_VPR128[16,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtttt_VPR128, 1, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtttt_VPR128[16,16]; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; @@ -13074,18 +10014,13 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b011 & b_12=0 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b011 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 2, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR128[32,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 2, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtt_VPR128[32,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR128, 2, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rttt_VPR128[32,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtttt_VPR128, 2, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtttt_VPR128[32,16]; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; @@ -13100,18 +10035,13 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b011 & b_12=1 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b011 & b_12=1 & b_1011=0b10 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 3, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR128[48,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 3, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtt_VPR128[48,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR128, 3, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rttt_VPR128[48,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtttt_VPR128, 3, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtttt_VPR128[48,16]; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; @@ -13126,18 +10056,13 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b011 & b_12=1 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b011 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 4, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR128[64,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 4, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtt_VPR128[64,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR128, 4, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rttt_VPR128[64,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtttt_VPR128, 4, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtttt_VPR128[64,16]; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; @@ -13152,18 +10077,13 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b011 & b_12=0 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b011 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 5, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR128[80,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 5, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtt_VPR128[80,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR128, 5, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rttt_VPR128[80,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtttt_VPR128, 5, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtttt_VPR128[80,16]; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; @@ -13178,18 +10098,13 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b011 & b_12=0 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b011 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 6, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR128[96,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 6, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtt_VPR128[96,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR128, 6, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rttt_VPR128[96,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtttt_VPR128, 6, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtttt_VPR128[96,16]; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; @@ -13204,18 +10119,13 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b011 & b_12=1 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b011 & b_12=1 & b_1011=0b10 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 7, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rt_VPR128[112,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtt_VPR128, 7, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtt_VPR128[112,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rttt_VPR128, 7, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rttt_VPR128[112,16]; tmp_ldXn = tmp_ldXn + 2; - simd_address_at(tmpa, Rtttt_VPR128, 7, 2, 16); - *:2 tmp_ldXn = * [register]:2 tmpa; + *:2 tmp_ldXn = Rtttt_VPR128[112,16]; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; @@ -13230,18 +10140,13 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b011 & b_12=1 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b101 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 0, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rt_VPR128[0,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtt_VPR128, 0, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rtt_VPR128[0,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rttt_VPR128, 0, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rttt_VPR128[0,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtttt_VPR128, 0, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rtttt_VPR128[0,32]; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; @@ -13256,18 +10161,13 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b101 & b_12=0 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b101 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 1, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rt_VPR128[32,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtt_VPR128, 1, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rtt_VPR128[32,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rttt_VPR128, 1, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rttt_VPR128[32,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtttt_VPR128, 1, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rtttt_VPR128[32,32]; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; @@ -13282,18 +10182,13 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b101 & b_12=1 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b101 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 2, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rt_VPR128[64,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtt_VPR128, 2, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rtt_VPR128[64,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rttt_VPR128, 2, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rttt_VPR128[64,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtttt_VPR128, 2, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rtttt_VPR128[64,32]; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; @@ -13308,18 +10203,13 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b101 & b_12=0 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b101 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 3, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rt_VPR128[96,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtt_VPR128, 3, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rtt_VPR128[96,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rttt_VPR128, 3, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rttt_VPR128[96,32]; tmp_ldXn = tmp_ldXn + 4; - simd_address_at(tmpa, Rtttt_VPR128, 3, 4, 16); - *:4 tmp_ldXn = * [register]:4 tmpa; + *:4 tmp_ldXn = Rtttt_VPR128[96,32]; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; @@ -13334,18 +10224,13 @@ is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b101 & b_12=1 & is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b101 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 0, 8, 16); - *:8 tmp_ldXn = * [register]:8 tmpa; + *:8 tmp_ldXn = Rt_VPR128[0,64]; tmp_ldXn = tmp_ldXn + 8; - simd_address_at(tmpa, Rtt_VPR128, 0, 8, 16); - *:8 tmp_ldXn = * [register]:8 tmpa; + *:8 tmp_ldXn = Rtt_VPR128[0,64]; tmp_ldXn = tmp_ldXn + 8; - simd_address_at(tmpa, Rttt_VPR128, 0, 8, 16); - *:8 tmp_ldXn = * [register]:8 tmpa; + *:8 tmp_ldXn = Rttt_VPR128[0,64]; tmp_ldXn = tmp_ldXn + 8; - simd_address_at(tmpa, Rtttt_VPR128, 0, 8, 16); - *:8 tmp_ldXn = * [register]:8 tmpa; + *:8 tmp_ldXn = Rtttt_VPR128[0,64]; tmp_ldXn = tmp_ldXn + 8; # neglected zexts build ldst_wback; @@ -13360,20 +10245,16 @@ is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b101 & b_12=0 & is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b101 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; - local tmpa:4 = 0; - simd_address_at(tmpa, Rt_VPR128, 1, 8, 16); - *:8 tmp_ldXn = * [register]:8 tmpa; + *:8 tmp_ldXn = Rt_VPR128[64,64]; tmp_ldXn = tmp_ldXn + 8; - simd_address_at(tmpa, Rtt_VPR128, 1, 8, 16); - *:8 tmp_ldXn = * [register]:8 tmpa; + *:8 tmp_ldXn = Rtt_VPR128[64,64]; tmp_ldXn = tmp_ldXn + 8; - simd_address_at(tmpa, Rttt_VPR128, 1, 8, 16); - *:8 tmp_ldXn = * [register]:8 tmpa; + *:8 tmp_ldXn = Rttt_VPR128[64,64]; tmp_ldXn = tmp_ldXn + 8; - simd_address_at(tmpa, Rtttt_VPR128, 1, 8, 16); - *:8 tmp_ldXn = * [register]:8 tmpa; + *:8 tmp_ldXn = Rtttt_VPR128[64,64]; tmp_ldXn = tmp_ldXn + 8; # neglected zexts build ldst_wback; } + From 797fb6900501c44d35ee40714e3463bee085b39c Mon Sep 17 00:00:00 2001 From: caheckman <48068198+caheckman@users.noreply.github.com> Date: Wed, 21 Oct 2020 11:39:09 -0400 Subject: [PATCH 4/5] Disable SLEIGH endian check for mixed endian architectures --- .../app/plugin/processors/sleigh/SleighLanguage.java | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/Ghidra/Framework/SoftwareModeling/src/main/java/ghidra/app/plugin/processors/sleigh/SleighLanguage.java b/Ghidra/Framework/SoftwareModeling/src/main/java/ghidra/app/plugin/processors/sleigh/SleighLanguage.java index c900152a38..f45f183e38 100644 --- a/Ghidra/Framework/SoftwareModeling/src/main/java/ghidra/app/plugin/processors/sleigh/SleighLanguage.java +++ b/Ghidra/Framework/SoftwareModeling/src/main/java/ghidra/app/plugin/processors/sleigh/SleighLanguage.java @@ -888,11 +888,13 @@ public class SleighLanguage implements Language { throw new SleighException(".sla file for " + getLanguageID() + " has the wrong format"); } boolean isBigEndian = SpecXmlUtils.decodeBoolean(el.getAttribute("bigendian")); - // check the instruction endianess, not the program data endianess if (isBigEndian ^ description.getEndian().isBigEndian()) { - throw new SleighException( - ".ldefs says " + getLanguageID() + " is " + description.getEndian() + - " but .sla says " + el.getAttribute("bigendian")); + if (description.getInstructionEndian().isBigEndian() == description.getEndian() + .isBigEndian()) { + throw new SleighException( + ".ldefs says " + getLanguageID() + " is " + description.getEndian() + + " but .sla says " + el.getAttribute("bigendian")); + } } uniqueBase = SpecXmlUtils.decodeLong(el.getAttribute("uniqbase")); alignment = SpecXmlUtils.decodeInt(el.getAttribute("align")); From 017b92ac2e25c638e13e5a40a403903e3129cc89 Mon Sep 17 00:00:00 2001 From: caheckman <48068198+caheckman@users.noreply.github.com> Date: Wed, 28 Oct 2020 10:11:18 -0400 Subject: [PATCH 5/5] Updating documentation to include token endianess override --- .../Decompiler/src/main/doc/sleigh.xml | 45 +++++++++----- GhidraDocs/languages/html/sleigh.html | 58 +++++++++---------- .../languages/html/sleigh_constructors.html | 56 +++++++++--------- GhidraDocs/languages/html/sleigh_context.html | 6 +- .../languages/html/sleigh_definitions.html | 23 ++++---- GhidraDocs/languages/html/sleigh_layout.html | 10 ++-- .../languages/html/sleigh_preprocessing.html | 10 ++-- GhidraDocs/languages/html/sleigh_symbols.html | 4 +- GhidraDocs/languages/html/sleigh_tokens.html | 28 ++++++--- 9 files changed, 135 insertions(+), 105 deletions(-) diff --git a/Ghidra/Features/Decompiler/src/main/doc/sleigh.xml b/Ghidra/Features/Decompiler/src/main/doc/sleigh.xml index c6b1b624c2..c60683a52f 100644 --- a/Ghidra/Features/Decompiler/src/main/doc/sleigh.xml +++ b/Ghidra/Features/Decompiler/src/main/doc/sleigh.xml @@ -4,7 +4,7 @@ SLEIGH A Language for Rapid Processor Specification Originally published December 16, 2005 - Last updated September 5, 2019 + Last updated October 28, 2020 @@ -573,13 +573,14 @@ define endian=little; This defines how the processor interprets contiguous sequences of -bytes as integers. It effects how integer fields within an instruction -are interpreted (see ), and -it also effects the details of how the processor is supposed to -implement atomic operations like integer addition and integer -compare. The specification designer should only need to worry about -these details when labeling instruction fields, otherwise the -specification language will hide endianess issues. +bytes as integers or other values and globally affects values across +all address spaces. It also affects how integer fields +within an instruction are interpreted, (see ), +although it is possible to override this setting in the rare case that endianess is +different for data versus instruction encoding. +The specification designer generally only needs to worry about +endianess when labeling instruction fields and when defining overlapping registers, +otherwise the specification language hides endianess issues. @@ -966,7 +967,7 @@ individual constructor (defined in @@ -1057,8 +1058,22 @@ there are one or more field declarations specifying the name of the field and the range of bits within the token making up the field. The size of a field does not need to be a multiple of 8. The range is inclusive where the least significant bit in the token -is labeled 0. The endianess of the processor will effect this labeling -when defining tokens that are bigger than 1 byte. After each field +is labeled 0. When defining tokens that are bigger than 1 byte, the +global endianess setting (See ) +will affect this labeling. Although it is rarely required, it is possible to override +the global endianess setting for a specific token by appending either the qualifier +endian=little or endian=big +immediately after the token name and size. For instance: + + + define token instr ( 32 ) endian=little op0=(0,15) ... + + +The token instr is overridden to be little endian. +This override applies to all fields defined for the token but affects no other tokens. + + +After each field declaration, there can be zero or more of the following attribute keywords: @@ -2023,7 +2038,7 @@ assignment to such a variable changes the context in which the current instruction is being disassembled and can potentially have a drastic effect on how the rest of the instruction is disassembled. An assignment of this form is considered local to the instruction and -will not effect how other instructions are parsed. The context +will not affect how other instructions are parsed. The context variable is reset to its original value before parsing other instructions. The disassembly action may also contain one or more globalset directives, which @@ -2547,7 +2562,7 @@ the table symbol mode. When this constructor is matched, as part of a more complicated instruction, the symbol mode will represent the original semantic value of reg but with the standard post-increment -side effect. +side-effect. The table symbol associated with the constructor becomes @@ -3724,7 +3739,7 @@ blr is opcode=35 & reg=15 & LRset=1 { return [lr]; } An alternative to the noflow attribute is to simply issue multiple directives within a single constructor, so an explicit end to a context change can be given. The value of the variable exported to the global state -is the one in affect at the point where the directive is issued. Thus, +is the one in effect at the point where the directive is issued. Thus, after one globalset, the same context variable can be assigned a different value, followed by another globalset for a different @@ -3735,7 +3750,7 @@ Because context in SLEIGH is controlled by a disassembly process, there are some basic caveats to the use of the globalset directive. With flowing context changes, -there is no guarantee of what global state will be in affect at a +there is no guarantee of what global state will be in effect at a particular address. During disassembly, at any given point, the process may not have uncovered all the relevant directives, and the known directives may not necessarily be consistent. In diff --git a/GhidraDocs/languages/html/sleigh.html b/GhidraDocs/languages/html/sleigh.html index cc532db93e..a02b507fe4 100644 --- a/GhidraDocs/languages/html/sleigh.html +++ b/GhidraDocs/languages/html/sleigh.html @@ -25,9 +25,9 @@

-SLEIGH

+SLEIGH

A Language for Rapid Processor Specification

-

Last updated September 5, 2019

+

Last updated October 28, 2020

Originally published December 16, 2005


@@ -35,51 +35,51 @@

Table of Contents

-
1. Introduction to P-Code
+
1. Introduction to P-Code
-
1.1. Address Spaces
+
1.1. Address Spaces
1.2. Varnodes
-
1.3. Operations
+
1.3. Operations
2. Basic Specification Layout
-
2.1. Comments
-
2.2. Identifiers
-
2.3. Strings
-
2.4. Integers
-
2.5. White Space
+
2.1. Comments
+
2.2. Identifiers
+
2.3. Strings
+
2.4. Integers
+
2.5. White Space
3. Preprocessing
3.1. Including Files
-
3.2. Preprocessor Macros
-
3.3. Conditional Compilation
+
3.2. Preprocessor Macros
+
3.3. Conditional Compilation
4. Basic Definitions
4.1. Endianess Definition
-
4.2. Alignment Definition
-
4.3. Space Definitions
+
4.2. Alignment Definition
+
4.3. Space Definitions
4.4. Naming Registers
-
4.5. Bit Range Registers
-
4.6. User-Defined Operations
+
4.5. Bit Range Registers
+
4.6. User-Defined Operations
5. Introduction to Symbols
-
5.1. Notes on Namespaces
+
5.1. Notes on Namespaces
5.2. Predefined Symbols
6. Tokens and Fields
6.1. Defining Tokens and Fields
-
6.2. Fields as Family Symbols
-
6.3. Attaching Alternate Meanings to Fields
+
6.2. Fields as Family Symbols
+
6.3. Attaching Alternate Meanings to Fields
6.4. Context Variables
7. Constructors
-
7.1. The Five Sections of a Constructor
-
7.2. The Table Header
+
7.1. The Five Sections of a Constructor
+
7.2. The Table Header
7.3. The Display Section
7.4. The Bit Pattern Section
7.5. Disassembly Actions Section
@@ -87,12 +87,12 @@
7.7. The Semantic Section
7.8. Tables
7.9. P-code Macros
-
7.10. Build Directives
-
7.11. Delay Slot Directives
+
7.10. Build Directives
+
7.11. Delay Slot Directives
8. Using Context
-
8.1. Basic Use of Context Variables
+
8.1. Basic Use of Context Variables
8.2. Local Context Change
8.3. Global Context Change
@@ -101,7 +101,7 @@

-History

+History

This document describes the syntax for the SLEIGH processor specification language, which was developed for the GHIDRA @@ -129,7 +129,7 @@

-Overview

+Overview

SLEIGH is a language for describing the instruction sets of general purpose microprocessors, in order to facilitate the reverse @@ -162,7 +162,7 @@ Italics are used when defining terms and for named entities. Bold is used for SL

-1. Introduction to P-Code

+1. Introduction to P-Code

Although p-code is a distinct language from SLEIGH, because a major purpose of SLEIGH is to specify the translation from machine code to @@ -221,7 +221,7 @@ respectively.

-1.1. Address Spaces

+1.1. Address Spaces

An address space for p-code is a generalization of the indexed memory (RAM) that a typical processor has access to, and @@ -322,7 +322,7 @@ must be provided and enforced by the specification designer.

-1.3. Operations

+1.3. Operations

P-code is intended to emulate a target processor by substituting a sequence of p-code operations for each machine instruction. Thus every diff --git a/GhidraDocs/languages/html/sleigh_constructors.html b/GhidraDocs/languages/html/sleigh_constructors.html index 0a0b7f8e7d..21c175f0fd 100644 --- a/GhidraDocs/languages/html/sleigh_constructors.html +++ b/GhidraDocs/languages/html/sleigh_constructors.html @@ -60,7 +60,7 @@ multiple constructors into a single table are addressed in

-7.1. The Five Sections of a Constructor

+7.1. The Five Sections of a Constructor

A single complex statement in the specification file describes a constructor. This statement is always made up of five distinct @@ -92,7 +92,7 @@ in turn.

-7.2. The Table Header

+7.2. The Table Header

Every constructor must be part of a table, which is the element with an actual family symbol identifier associated with it. So each @@ -230,7 +230,7 @@ no such requirement.

-7.3.2. The '^' character

+7.3.2. The '^' character

The ‘^’ character in the display section is used to separate identifiers from other characters where there shouldn’t be white space @@ -278,7 +278,7 @@ to match the constructor being defined.

-7.4.1. Constraints

+7.4.1. Constraints

The patterns required for processor specifications can almost always be described as a mask and value pair. Given a specific instruction @@ -337,7 +337,7 @@ requires two or more mask/value style checks to correctly implement.

-7.4.3. Defining Operands and Invoking Subtables

+7.4.3. Defining Operands and Invoking Subtables

The principle way of defining a constructor operand, left undefined from the display section, is done in the bit pattern section. If an @@ -396,7 +396,7 @@ statement of the grouping of old symbols into the new constructor.

-7.4.4. Variable Length Instructions

+7.4.4. Variable Length Instructions

There are some additional complexities to designing a specification for a processor with variable length instructions. Some initial @@ -419,7 +419,7 @@ designer control over how tokens fit together.

-7.4.4.1. The ';' Operator
+7.4.4.1. The ';' Operator

The most important operator for patterns defining variable length instructions is the concatenation operator ‘;’. When building a @@ -481,7 +481,7 @@ operator, so parentheses may be necessary to get the intended meaning.

-7.4.4.2. The '...' Operator
+7.4.4.2. The '...' Operator

The ellipsis operator ‘...’ is used to satisfy the token matching requirements of the ‘&’ and ‘|’ operators (described in the previous @@ -557,7 +557,7 @@ don’t quite match the assembly.

-7.4.6. Empty Patterns

+7.4.6. Empty Patterns

Occasionally there is a need for an empty pattern when building tables. An empty pattern matches everything. There is a predefined @@ -567,7 +567,7 @@ to indicate an empty pattern.

-7.4.7. Advanced Constraints

+7.4.7. Advanced Constraints

A constraint does not have to be of the form “field = constant”, although this is almost always what is needed. In certain situations, @@ -821,7 +821,7 @@ assignment to such a variable changes the context in which the current instruction is being disassembled and can potentially have a drastic effect on how the rest of the instruction is disassembled. An assignment of this form is considered local to the instruction and -will not effect how other instructions are parsed. The context +will not affect how other instructions are parsed. The context variable is reset to its original value before parsing other instructions. The disassembly action may also contain one or more globalset directives, which @@ -939,7 +939,7 @@ varnode is r1.

-7.7.1. Expressions

+7.7.1. Expressions

Expressions are built out of symbols and the binary and unary operators listed in Table 5, “Semantic Expression Operators and Syntax” in the @@ -954,7 +954,7 @@ within expressions to affect this order.

-7.7.1.1. Arithmetic, Logical and Boolean Operators
+7.7.1.1. Arithmetic, Logical and Boolean Operators

For the most part these operators should be familiar to software developers. The only real differences arise from the fact that @@ -1017,7 +1017,7 @@ set to something other than one.

-7.7.1.3. Extension
+7.7.1.3. Extension

Most processors have instructions that extend small values into big values, and many instructions do these minor data manipulations @@ -1039,7 +1039,7 @@ the sext operator.

-7.7.1.4. Truncation
+7.7.1.4. Truncation

There are two forms of syntax indicating a truncation of the input varnode. In one the varnode is followed by a colon ‘:’ and an integer @@ -1169,7 +1169,7 @@ the offset portion of the address, and to copy the desired value, the

-7.7.1.7. Managed Code Operations
+7.7.1.7. Managed Code Operations

SLEIGH provides basic support for instructions where encoding and context don't provide a complete description of the semantics. This is the case @@ -1231,7 +1231,7 @@ define pcodeop arctan;

-7.7.2. Statements

+7.7.2. Statements

We describe the types of semantic statements that are allowed in SLEIGH.

@@ -1305,7 +1305,7 @@ and may be enforced in future compiler versions.
-7.7.2.2. Storage Statements
+7.7.2.2. Storage Statements

SLEIGH supports fairly standard storage statement syntax to complement the load operator. The left-hand side of an @@ -1336,7 +1336,7 @@ attribute is set to something other than one.

-7.7.2.3. Exports
+7.7.2.3. Exports

The semantic section doesn’t just specify how to generate p-code for a constructor. Except for those constructors in the root table, this @@ -1366,7 +1366,7 @@ the table symbol mode. When this construc matched, as part of a more complicated instruction, the symbol mode will represent the original semantic value of reg but with the standard post-increment -side effect. +side-effect.

The table symbol associated with the constructor becomes @@ -1388,7 +1388,7 @@ varnode being modified to be exported as an integer constant.

-7.7.2.4. Dynamic References
+7.7.2.4. Dynamic References

The only other operator allowed as part of an export statement, is the ‘*’ @@ -1447,7 +1447,7 @@ levels.

-7.7.2.5. Branching Statements
+7.7.2.5. Branching Statements

This section discusses statements that generate p-code branching operations. These are listed in Table 7, “Branching Statements”, in the Appendix. @@ -1802,7 +1802,7 @@ each followed by a variation which corrects the error.

-7.7.4. Unimplemented Semantics

+7.7.4. Unimplemented Semantics

The semantic section must be present for every constructor in the specification. But the designer can leave the semantics explicitly @@ -1962,7 +1962,7 @@ should generally be avoided.

-7.8.2. Specific Symbol Trees

+7.8.2. Specific Symbol Trees

When the SLEIGH parser analyzes an instruction, it starts with the root symbol instruction, and decides which of the @@ -2045,7 +2045,7 @@ and p-code for these encodings by walking the trees.

-7.8.2.1. Disassembly Trees
+7.8.2.1. Disassembly Trees

If the nodes of each tree are replaced with the display information of the corresponding specific symbol, we see how the disassembly @@ -2068,7 +2068,7 @@ statements corresponding to the original instruction encodings.

-7.8.2.2. P-code Trees
+7.8.2.2. P-code Trees

A similar procedure produces the resulting p-code translation of the instruction. If each node in the specific symbol tree is replaced with @@ -2147,7 +2147,7 @@ directive however should not be used in a macro.

-7.10. Build Directives

+7.10. Build Directives

Because the nodes of a specific symbol tree are traversed in a depth-first order, the p-code for a child node in general comes before @@ -2202,7 +2202,7 @@ normal action of the instruction.

-7.11. Delay Slot Directives

+7.11. Delay Slot Directives

For processors with a pipe-lined architecture, multiple instructions are typically executing simultaneously. This can lead to processor diff --git a/GhidraDocs/languages/html/sleigh_context.html b/GhidraDocs/languages/html/sleigh_context.html index fa762bb070..f8c72d7a6e 100644 --- a/GhidraDocs/languages/html/sleigh_context.html +++ b/GhidraDocs/languages/html/sleigh_context.html @@ -85,7 +85,7 @@ whose encodings are otherwise the same.

-8.1. Basic Use of Context Variables

+8.1. Basic Use of Context Variables

Suppose a processor supports the use of two different sets of registers in its main addressing mode, based on the setting of a @@ -317,7 +317,7 @@ blr is opcode=35 & reg=15 & LRset=1 { return [lr]; } An alternative to the noflow attribute is to simply issue multiple directives within a single constructor, so an explicit end to a context change can be given. The value of the variable exported to the global state -is the one in affect at the point where the directive is issued. Thus, +is the one in effect at the point where the directive is issued. Thus, after one globalset, the same context variable can be assigned a different value, followed by another globalset for a different @@ -328,7 +328,7 @@ Because context in SLEIGH is controlled by a disassembly process, there are some basic caveats to the use of the globalset directive. With flowing context changes, -there is no guarantee of what global state will be in affect at a +there is no guarantee of what global state will be in effect at a particular address. During disassembly, at any given point, the process may not have uncovered all the relevant directives, and the known directives may not necessarily be consistent. In diff --git a/GhidraDocs/languages/html/sleigh_definitions.html b/GhidraDocs/languages/html/sleigh_definitions.html index 49f23dd0a8..96265a83e0 100644 --- a/GhidraDocs/languages/html/sleigh_definitions.html +++ b/GhidraDocs/languages/html/sleigh_definitions.html @@ -44,18 +44,19 @@ define endian=little;

This defines how the processor interprets contiguous sequences of -bytes as integers. It effects how integer fields within an instruction -are interpreted (see Section 6.1, “Defining Tokens and Fields”), and -it also effects the details of how the processor is supposed to -implement atomic operations like integer addition and integer -compare. The specification designer should only need to worry about -these details when labeling instruction fields, otherwise the -specification language will hide endianess issues. +bytes as integers or other values and globally affects values across +all address spaces. It also affects how integer fields +within an instruction are interpreted, (see Section 6.1, “Defining Tokens and Fields”), +although it is possible to override this setting in the rare case that endianess is +different for data versus instruction encoding. +The specification designer generally only needs to worry about +endianess when labeling instruction fields and when defining overlapping registers, +otherwise the specification language hides endianess issues.

-4.2. Alignment Definition

+4.2. Alignment Definition

An alignment definition looks like

@@ -72,7 +73,7 @@ instruction as an error.

-4.3. Space Definitions

+4.3. Space Definitions

The definition of an address space looks like

@@ -227,7 +228,7 @@ define register offset=0 size=1

-4.5. Bit Range Registers

+4.5. Bit Range Registers

Many processors define registers that either consist of a single bit or otherwise don't use an integral number of bytes. A recurring @@ -298,7 +299,7 @@ used as an alternate syntax for defining overlapping registers.

-4.6. User-Defined Operations

+4.6. User-Defined Operations

The specification designer can define new p-code operations using a define pcodeop statement. This diff --git a/GhidraDocs/languages/html/sleigh_layout.html b/GhidraDocs/languages/html/sleigh_layout.html index 1f312277df..8b641bacdf 100644 --- a/GhidraDocs/languages/html/sleigh_layout.html +++ b/GhidraDocs/languages/html/sleigh_layout.html @@ -36,7 +36,7 @@ by the compiler.

-2.1. Comments

+2.1. Comments

Comments start with the ‘#’ character and continue to the end of the line. Comments can appear anywhere except the display section of a @@ -46,7 +46,7 @@ interpreted as something that should be printed in disassembly.

-2.2. Identifiers

+2.2. Identifiers

Identifiers are made up of letters a-z, capitals A-Z, digits 0-9 and the characters ‘.’ and ‘_’. An identifier can use these characters in @@ -55,7 +55,7 @@ any order and for any length, but it must not start with a digit.

-2.3. Strings

+2.3. Strings

String literals can be used, when specifying names and when specifying how disassembly should be printed, so that special characters are @@ -66,7 +66,7 @@ meaning.

-2.4. Integers

+2.4. Integers

Integers are specified either in a decimal format or in a standard C-style hexadecimal format by prepending the @@ -92,7 +92,7 @@ integers internally with 64 bits of precision.

-2.5. White Space

+2.5. White Space

White space characters include space, tab, line-feed, vertical line-feed, and carriage-return (‘ ‘, ‘\t’, ‘\r’, ‘\v’, diff --git a/GhidraDocs/languages/html/sleigh_preprocessing.html b/GhidraDocs/languages/html/sleigh_preprocessing.html index 1eb1e45f3a..5f47bc64bc 100644 --- a/GhidraDocs/languages/html/sleigh_preprocessing.html +++ b/GhidraDocs/languages/html/sleigh_preprocessing.html @@ -54,7 +54,7 @@ own @include directives.

-3.2. Preprocessor Macros

+3.2. Preprocessor Macros

SLEIGH allows simple (unparameterized) macro definitions and expansions. A macro definition occurs on one line and starts with @@ -85,7 +85,7 @@ definition of a macro from that point on in the file.

-3.3. Conditional Compilation

+3.3. Conditional Compilation

SLEIGH supports several directives that allow conditional inclusion of parts of a specification, based on the existence of a macro, or its @@ -103,7 +103,7 @@ and @endif.

-3.3.1. @ifdef and @ifndef

+3.3.1. @ifdef and @ifndef

The @ifdef directive is followed by a macro identifier and evaluates to true if the macro is defined. @@ -129,7 +129,7 @@ or @elif directive (See below).

-3.3.2. @if

+3.3.2. @if

The @if directive is followed by a boolean expression with macros as the variables and strings as the @@ -158,7 +158,7 @@ is defined.

-3.3.3. @else and @elif

+3.3.3. @else and @elif

An @else directive splits the lines bounded by an @if directive and diff --git a/GhidraDocs/languages/html/sleigh_symbols.html b/GhidraDocs/languages/html/sleigh_symbols.html index 6eb2b83374..a3ab15203c 100644 --- a/GhidraDocs/languages/html/sleigh_symbols.html +++ b/GhidraDocs/languages/html/sleigh_symbols.html @@ -105,7 +105,7 @@ the predefined identifier instruction.

-5.1. Notes on Namespaces

+5.1. Notes on Namespaces

Almost all identifiers live in the same global "scope". The global scope includes

@@ -138,7 +138,7 @@ individual constructor (defined in hides the global symbol while that scope -is in affect. +is in effect.

diff --git a/GhidraDocs/languages/html/sleigh_tokens.html b/GhidraDocs/languages/html/sleigh_tokens.html index 79521cfd7b..dc572dfdb4 100644 --- a/GhidraDocs/languages/html/sleigh_tokens.html +++ b/GhidraDocs/languages/html/sleigh_tokens.html @@ -56,8 +56,22 @@ there are one or more field declarations specifying the name of the field and the range of bits within the token making up the field. The size of a field does not need to be a multiple of 8. The range is inclusive where the least significant bit in the token -is labeled 0. The endianess of the processor will effect this labeling -when defining tokens that are bigger than 1 byte. After each field +is labeled 0. When defining tokens that are bigger than 1 byte, the +global endianess setting (See Section 4.1, “Endianess Definition”) +will affect this labeling. Although it is rarely required, it is possible to override +the global endianess setting for a specific token by appending either the qualifier +endian=little or endian=big +immediately after the token name and size. For instance: +

+
+  define token instr ( 32 ) endian=little op0=(0,15) ...
+
+

+The token instr is overridden to be little endian. +This override applies to all fields defined for the token but affects no other tokens. +

+

+After each field declaration, there can be zero or more of the following attribute keywords:

@@ -74,7 +88,7 @@ different names.

-6.2. Fields as Family Symbols

+6.2. Fields as Family Symbols

Fields are the most basic form of family symbol; they define a natural map from instruction bits to a specific symbol as follows. We take the @@ -99,7 +113,7 @@ the dec attribute is not supported]

-6.3. Attaching Alternate Meanings to Fields

+6.3. Attaching Alternate Meanings to Fields

The default interpretation of a field is probably the most natural but of course processors interpret fields within an instruction in a wide @@ -110,7 +124,7 @@ interpretations must be built up out of tables.

-6.3.1. Attaching Registers

+6.3.1. Attaching Registers

Probably the most common processor interpretation of a field is as an encoding of a particular register. In SLEIGH this @@ -149,7 +163,7 @@ of the instruction.

-6.3.2. Attaching Other Integers

+6.3.2. Attaching Other Integers

Sometimes a processor interprets a field as an integer but not the integer given by the default interpretation. A different integer @@ -171,7 +185,7 @@ unspecified positions in the list using a ‘_’]

-6.3.3. Attaching Names

+6.3.3. Attaching Names

It is possible to just modify the display characteristics of a field without changing the semantic meaning. The need for this is rare, but