vex: r3290 - /trunk/priv/guest_arm_toIR.c

classic Classic list List threaded Threaded
1 message Options
Reply | Threaded
Open this post in threaded view
|  
Report Content as Inappropriate

vex: r3290 - /trunk/priv/guest_arm_toIR.c

svn-2
Author: sewardj
Date: Thu Jan 12 11:21:08 2017
New Revision: 3290

Log:
Implement ARMv8 VSEL<c>.F64 d_d_d, VSEL<c>.F32 s_s_s.

Modified:
    trunk/priv/guest_arm_toIR.c

Modified: trunk/priv/guest_arm_toIR.c
==============================================================================
--- trunk/priv/guest_arm_toIR.c (original)
+++ trunk/priv/guest_arm_toIR.c Thu Jan 12 11:21:08 2017
@@ -13462,6 +13462,54 @@
      /* else fall through */
    }
 
+   /* ----------- VSEL<c>.F64 d_d_d, VSEL<c>.F32 s_s_s ----------- */
+   /*        31   27    22 21 19 15 11  8 7 6 5 4 3
+      T1/A1: 1111 11100 D  cc n  d  101 1 N 0 M 0 m  VSEL<c>.F64 Dd, Dn, Dm
+      T1/A1: 1111 11100 D  cc n  d  101 0 N 0 M 0 m  VSEL<c>.F32 Sd, Sn, Sm
+
+      ARM encoding is in NV space.
+      In Thumb mode, we must not be in an IT block.
+   */
+   if (INSN(31,23) == BITS9(1,1,1,1,1,1,1,0,0) && INSN(11,9) == BITS3(1,0,1)
+       && INSN(6,6) == 0 && INSN(4,4) == 0) {
+      UInt bit_D  = INSN(22,22);
+      UInt fld_cc = INSN(21,20);
+      UInt fld_n  = INSN(19,16);
+      UInt fld_d  = INSN(15,12);
+      Bool isF64  = INSN(8,8) == 1;
+      UInt bit_N  = INSN(7,7);
+      UInt bit_M  = INSN(5,5);
+      UInt fld_m  = INSN(3,0);
+
+      UInt dd = isF64 ? ((bit_D << 4) | fld_d) : ((fld_d << 1) | bit_D);
+      UInt nn = isF64 ? ((bit_N << 4) | fld_n) : ((fld_n << 1) | bit_N);
+      UInt mm = isF64 ? ((bit_M << 4) | fld_m) : ((fld_m << 1) | bit_M);
+
+      UInt cc_1 = (fld_cc >> 1) & 1;
+      UInt cc_0 = (fld_cc >> 0) & 1;
+      UInt cond = (fld_cc << 2) | ((cc_1 ^ cc_0) << 1) | 0;
+
+      if (isT) {
+         gen_SIGILL_T_if_in_ITBlock(old_itstate, new_itstate);
+      }
+      /* In ARM mode, this is statically unconditional.  In Thumb mode,
+         this must be dynamically unconditional, and we've SIGILLd if not.
+         In either case we can create unconditional IR. */
+
+      IRTemp guard = newTemp(Ity_I32);
+      assign(guard, mk_armg_calculate_condition(cond));
+      IRExpr* srcN = (isF64 ? llGetDReg : llGetFReg)(nn);
+      IRExpr* srcM = (isF64 ? llGetDReg : llGetFReg)(mm);
+      IRExpr* res  = IRExpr_ITE(unop(Iop_32to1, mkexpr(guard)), srcN, srcM);
+      (isF64 ? llPutDReg : llPutFReg)(dd, res);
+
+      UChar rch = isF64 ? 'd' : 'f';
+      DIP("vsel%s.%s %c%u, %c%u, %c%u\n",
+          nCC(cond), isF64 ? "f64" : "f32", rch, dd, rch, nn, rch, mm);
+      return True;
+   }
+   /* fall through */
+
    /* ---------- Doesn't match anything. ---------- */
    return False;
 


------------------------------------------------------------------------------
Developer Access Program for Intel Xeon Phi Processors
Access to Intel Xeon Phi processor-based developer platforms.
With one year of Intel Parallel Studio XE.
Training and support from Colfax.
Order your platform today. http://sdm.link/xeonphi
_______________________________________________
Valgrind-developers mailing list
[hidden email]
https://lists.sourceforge.net/lists/listinfo/valgrind-developers
Loading...