@@ -378,7 +378,7 @@ class FunctionType;
378378 F (I64X2ExtmulHighI32X4S, (simdExtmulOperation<int32_t , int64_t , false >)) \
379379 F (I64X2ExtmulLowI32X4U, (simdExtmulOperation<uint32_t , uint64_t , true >)) \
380380 F (I64X2ExtmulHighI32X4U, (simdExtmulOperation<uint32_t , uint64_t , false >)) \
381- F (I32X4DotI16X8S, (simdDotOperation)) \
381+ F (I32X4DotI16X8S, (simdDotOperation< int16_t , uint32_t >)) \
382382 F (I8X16NarrowI16X8S, (simdNarrowOperation<int16_t , int8_t >)) \
383383 F (I8X16NarrowI16X8U, (simdNarrowOperation<int16_t , uint8_t >)) \
384384 F (I16X8NarrowI32X4S, (simdNarrowOperation<int32_t , int16_t >)) \
@@ -588,30 +588,65 @@ class FunctionType;
588588 F (MemoryAtomicWait64) \
589589 F (AtomicFence)
590590
591- #define FOR_EACH_BYTECODE (F ) \
592- FOR_EACH_BYTECODE_OP (F) \
593- FOR_EACH_BYTECODE_BINARY_OP (F) \
594- FOR_EACH_BYTECODE_UNARY_OP (F) \
595- FOR_EACH_BYTECODE_UNARY_OP_2 (F) \
596- FOR_EACH_BYTECODE_LOAD_OP (F) \
597- FOR_EACH_BYTECODE_STORE_OP (F) \
598- FOR_EACH_BYTECODE_SIMD_BINARY_OP (F) \
599- FOR_EACH_BYTECODE_SIMD_BINARY_SHIFT_OP (F) \
600- FOR_EACH_BYTECODE_SIMD_BINARY_OTHER (F) \
601- FOR_EACH_BYTECODE_SIMD_UNARY_OP (F) \
602- FOR_EACH_BYTECODE_SIMD_UNARY_CONVERT_OP (F) \
603- FOR_EACH_BYTECODE_SIMD_UNARY_OTHER (F) \
604- FOR_EACH_BYTECODE_SIMD_LOAD_SPLAT_OP (F) \
605- FOR_EACH_BYTECODE_SIMD_LOAD_EXTEND_OP (F) \
606- FOR_EACH_BYTECODE_SIMD_LOAD_LANE_OP (F) \
607- FOR_EACH_BYTECODE_SIMD_STORE_LANE_OP (F) \
608- FOR_EACH_BYTECODE_SIMD_EXTRACT_LANE_OP (F) \
609- FOR_EACH_BYTECODE_SIMD_REPLACE_LANE_OP (F) \
610- FOR_EACH_BYTECODE_SIMD_ETC_OP (F) \
611- FOR_EACH_BYTECODE_ATOMIC_LOAD_OP (F) \
612- FOR_EACH_BYTECODE_ATOMIC_STORE_OP (F) \
613- FOR_EACH_BYTECODE_ATOMIC_RMW_OP (F) \
614- FOR_EACH_BYTECODE_ATOMIC_RMW_CMPXCHG_OP (F) \
591+ #define FOR_EACH_BYTECODE_RELAXED_SIMD_UNARY_OTHER (F ) \
592+ F (I32X4RelaxedTruncF32X4S, (simdTruncSatOperation<float , int32_t >)) \
593+ F (I32X4RelaxedTruncF32X4U, (simdTruncSatOperation<float , uint32_t >)) \
594+ F (I32X4RelaxedTruncF64X2SZero, (simdTruncSatZeroOperation<double , int32_t >)) \
595+ F (I32X4RelaxedTruncF64X2UZero, (simdTruncSatZeroOperation<double , uint32_t >))
596+
597+ #define FOR_EACH_BYTECODE_RELAXED_SIMD_BINARY_OP (F ) \
598+ F (F32X4RelaxedMin, floatMin, float , float ) \
599+ F (F32X4RelaxedMax, floatMax, float , float ) \
600+ F (F64X2RelaxedMin, floatMin, double , double ) \
601+ F (F64X2RelaxedMax, floatMax, double , double ) \
602+ F (I16X8RelaxedQ15mulrS, saturatingRoundingQMul, int16_t , int16_t )
603+
604+ #define FOR_EACH_BYTECODE_RELAXED_SIMD_BINARY_OTHER (F ) \
605+ F (I8X16RelaxedSwizzle, (simdSwizzleOperation<uint8_t >)) \
606+ F (I16X8DotI8X16I7X16S, (simdDotOperation<int8_t , uint16_t >))
607+
608+ #define FOR_EACH_BYTECODE_RELAXED_SIMD_TERNARY_OP (F ) \
609+ F (F32X4RelaxedMadd, floatMulAdd, float , float ) \
610+ F (F32X4RelaxedNmadd, floatNegMulAdd, float , float ) \
611+ F (F64X2RelaxedMadd, floatMulAdd, double , double ) \
612+ F (F64X2RelaxedNmadd, floatNegMulAdd, double , double )
613+
614+ #define FOR_EACH_BYTECODE_RELAXED_SIMD_TERNARY_OTHER (F ) \
615+ F (I32X4DotI8X16I7X16AddS, (simdDotAddOperation)) \
616+ F (I8X16RelaxedLaneSelect, (simdBitSelectOperation)) \
617+ F (I16X8RelaxedLaneSelect, (simdBitSelectOperation)) \
618+ F (I32X4RelaxedLaneSelect, (simdBitSelectOperation)) \
619+ F (I64X2RelaxedLaneSelect, (simdBitSelectOperation))
620+
621+ #define FOR_EACH_BYTECODE (F ) \
622+ FOR_EACH_BYTECODE_OP (F) \
623+ FOR_EACH_BYTECODE_BINARY_OP (F) \
624+ FOR_EACH_BYTECODE_UNARY_OP (F) \
625+ FOR_EACH_BYTECODE_UNARY_OP_2 (F) \
626+ FOR_EACH_BYTECODE_LOAD_OP (F) \
627+ FOR_EACH_BYTECODE_STORE_OP (F) \
628+ FOR_EACH_BYTECODE_SIMD_BINARY_OP (F) \
629+ FOR_EACH_BYTECODE_SIMD_BINARY_SHIFT_OP (F) \
630+ FOR_EACH_BYTECODE_SIMD_BINARY_OTHER (F) \
631+ FOR_EACH_BYTECODE_RELAXED_SIMD_BINARY_OP (F) \
632+ FOR_EACH_BYTECODE_RELAXED_SIMD_BINARY_OTHER (F) \
633+ FOR_EACH_BYTECODE_SIMD_UNARY_OP (F) \
634+ FOR_EACH_BYTECODE_SIMD_UNARY_CONVERT_OP (F) \
635+ FOR_EACH_BYTECODE_RELAXED_SIMD_UNARY_OTHER (F) \
636+ FOR_EACH_BYTECODE_RELAXED_SIMD_TERNARY_OP (F) \
637+ FOR_EACH_BYTECODE_RELAXED_SIMD_TERNARY_OTHER (F) \
638+ FOR_EACH_BYTECODE_SIMD_UNARY_OTHER (F) \
639+ FOR_EACH_BYTECODE_SIMD_LOAD_SPLAT_OP (F) \
640+ FOR_EACH_BYTECODE_SIMD_LOAD_EXTEND_OP (F) \
641+ FOR_EACH_BYTECODE_SIMD_LOAD_LANE_OP (F) \
642+ FOR_EACH_BYTECODE_SIMD_STORE_LANE_OP (F) \
643+ FOR_EACH_BYTECODE_SIMD_EXTRACT_LANE_OP (F) \
644+ FOR_EACH_BYTECODE_SIMD_REPLACE_LANE_OP (F) \
645+ FOR_EACH_BYTECODE_SIMD_ETC_OP (F) \
646+ FOR_EACH_BYTECODE_ATOMIC_LOAD_OP (F) \
647+ FOR_EACH_BYTECODE_ATOMIC_STORE_OP (F) \
648+ FOR_EACH_BYTECODE_ATOMIC_RMW_OP (F) \
649+ FOR_EACH_BYTECODE_ATOMIC_RMW_CMPXCHG_OP (F) \
615650 FOR_EACH_BYTECODE_ATOMIC_OTHER (F)
616651
617652class ByteCode {
@@ -726,6 +761,24 @@ class ByteCodeOffset2Value : public ByteCode {
726761 uint32_t m_value;
727762};
728763
764+ class ByteCodeOffset4 : public ByteCode {
765+ public:
766+ ByteCodeOffset4 (Opcode opcode, ByteCodeStackOffset src0Offset, ByteCodeStackOffset src1Offset, ByteCodeStackOffset src2Offset, ByteCodeStackOffset dstOffset)
767+ : ByteCode(opcode)
768+ , m_stackOffsets{ src0Offset, src1Offset, src2Offset, dstOffset }
769+ {
770+ }
771+
772+ const ByteCodeStackOffset* srcOffsets () const { return m_stackOffsets; }
773+ ByteCodeStackOffset src0Offset () const { return m_stackOffsets[0 ]; }
774+ ByteCodeStackOffset src1Offset () const { return m_stackOffsets[1 ]; }
775+ ByteCodeStackOffset src2Offset () const { return m_stackOffsets[2 ]; }
776+ ByteCodeStackOffset dstOffset () const { return m_stackOffsets[3 ]; }
777+
778+ protected:
779+ ByteCodeStackOffset m_stackOffsets[4 ];
780+ };
781+
729782class ByteCodeOffset4Value : public ByteCode {
730783public:
731784 ByteCodeOffset4Value (Opcode opcode, ByteCodeStackOffset src0Offset, ByteCodeStackOffset src1Offset, ByteCodeStackOffset src2Offset, ByteCodeStackOffset dstOffset, uint32_t value)
@@ -923,15 +976,56 @@ class UnaryOperation : public ByteCodeOffset2 {
923976 DEFINE_UNARY_BYTECODE_DUMP (name) \
924977 };
925978
979+ // dummy ByteCode for ternary operation
980+ class TernaryOperation : public ByteCodeOffset4 {
981+ public:
982+ TernaryOperation (Opcode code, ByteCodeStackOffset src0Offset, ByteCodeStackOffset src1Offset, ByteCodeStackOffset src2Offset, ByteCodeStackOffset dstOffset)
983+ : ByteCodeOffset4(code, src0Offset, src1Offset, src2Offset, dstOffset)
984+ {
985+ }
986+
987+ #if !defined(NDEBUG)
988+ void dump (size_t pos)
989+ {
990+ }
991+ #endif
992+ };
993+
994+ #if !defined(NDEBUG)
995+ #define DEFINE_TERNARY_BYTECODE_DUMP (name ) \
996+ void dump (size_t pos) \
997+ { \
998+ printf (#name " src1: %" PRIu32 " src2: %" PRIu32 " src3: %" PRIu32 " dst: %" PRIu32, (uint32_t )m_stackOffsets[0 ], (uint32_t )m_stackOffsets[1 ], (uint32_t )m_stackOffsets[2 ], (uint32_t )m_stackOffsets[3 ]); \
999+ }
1000+ #else
1001+ #define DEFINE_TERNARY_BYTECODE_DUMP (name )
1002+ #endif
1003+
1004+ #define DEFINE_TERNARY_BYTECODE (name, ...) \
1005+ class name : public TernaryOperation { \
1006+ public: \
1007+ name (ByteCodeStackOffset src0Offset, ByteCodeStackOffset src1Offset, ByteCodeStackOffset src2Offset, ByteCodeStackOffset dstOffset) \
1008+ : TernaryOperation(Opcode::name##Opcode, src0Offset, src1Offset, src2Offset, dstOffset) \
1009+ { \
1010+ } \
1011+ DEFINE_TERNARY_BYTECODE_DUMP (name) \
1012+ };
1013+
1014+
9261015FOR_EACH_BYTECODE_BINARY_OP (DEFINE_BINARY_BYTECODE)
9271016FOR_EACH_BYTECODE_UNARY_OP (DEFINE_UNARY_BYTECODE)
9281017FOR_EACH_BYTECODE_UNARY_OP_2 (DEFINE_UNARY_BYTECODE)
9291018FOR_EACH_BYTECODE_SIMD_BINARY_OP (DEFINE_BINARY_BYTECODE)
9301019FOR_EACH_BYTECODE_SIMD_BINARY_SHIFT_OP (DEFINE_BINARY_BYTECODE)
9311020FOR_EACH_BYTECODE_SIMD_BINARY_OTHER (DEFINE_BINARY_BYTECODE)
1021+ FOR_EACH_BYTECODE_RELAXED_SIMD_BINARY_OP (DEFINE_BINARY_BYTECODE)
1022+ FOR_EACH_BYTECODE_RELAXED_SIMD_BINARY_OTHER (DEFINE_BINARY_BYTECODE)
9321023FOR_EACH_BYTECODE_SIMD_UNARY_OP (DEFINE_UNARY_BYTECODE)
9331024FOR_EACH_BYTECODE_SIMD_UNARY_CONVERT_OP (DEFINE_UNARY_BYTECODE)
9341025FOR_EACH_BYTECODE_SIMD_UNARY_OTHER (DEFINE_UNARY_BYTECODE)
1026+ FOR_EACH_BYTECODE_RELAXED_SIMD_UNARY_OTHER (DEFINE_UNARY_BYTECODE)
1027+ FOR_EACH_BYTECODE_RELAXED_SIMD_TERNARY_OP (DEFINE_TERNARY_BYTECODE)
1028+ FOR_EACH_BYTECODE_RELAXED_SIMD_TERNARY_OTHER (DEFINE_TERNARY_BYTECODE)
9351029
9361030#define DEFINE_MOVE_BYTECODE (name ) \
9371031 class name : public ByteCodeOffset2 { \
@@ -1910,31 +2004,19 @@ FOR_EACH_BYTECODE_ATOMIC_RMW_CMPXCHG_OP(DEFINE_RMW_CMPXCHG_BYTECODE)
19102004#undef DEFINE_RMW_BYTECODE
19112005
19122006// FOR_EACH_BYTECODE_SIMD_ETC_OP
1913- class V128BitSelect : public ByteCode {
2007+ class V128BitSelect : public ByteCodeOffset4 {
19142008public:
19152009 V128BitSelect (ByteCodeStackOffset lhs, ByteCodeStackOffset rhs, ByteCodeStackOffset c, ByteCodeStackOffset dst)
1916- : ByteCode(Opcode::V128BitSelectOpcode)
1917- , m_srcOffsets{ lhs, rhs, c }
1918- , m_dstOffset(dst)
2010+ : ByteCodeOffset4(Opcode::V128BitSelectOpcode, lhs, rhs, c, dst)
19192011 {
19202012 }
19212013
1922- const ByteCodeStackOffset* srcOffsets () const
1923- {
1924- return m_srcOffsets;
1925- }
1926- ByteCodeStackOffset dstOffset () const { return m_dstOffset; }
1927-
19282014#if !defined(NDEBUG)
19292015 void dump (size_t pos)
19302016 {
1931- printf (" v128.bitselect lhs: %" PRIu32 " rhs: %" PRIu32 " c: %" PRIu32 " dst: %" PRIu32, (uint32_t )m_srcOffsets [0 ], (uint32_t )m_srcOffsets [1 ], (uint32_t )m_srcOffsets [2 ], (uint32_t )m_dstOffset );
2017+ printf (" v128.bitselect lhs: %" PRIu32 " rhs: %" PRIu32 " c: %" PRIu32 " dst: %" PRIu32, (uint32_t )m_stackOffsets [0 ], (uint32_t )m_stackOffsets [1 ], (uint32_t )m_stackOffsets [2 ], (uint32_t )m_stackOffsets[ 3 ] );
19322018 }
19332019#endif
1934-
1935- protected:
1936- ByteCodeStackOffset m_srcOffsets[3 ];
1937- ByteCodeStackOffset m_dstOffset;
19382020};
19392021
19402022class V128Load32Zero : public MemoryLoad {
0 commit comments