|
@@ -702,22 +702,22 @@ static u32 vfp_single_ftosiz(int sd, int unused, s32 m, u32 fpscr)
|
|
|
return vfp_single_ftosi(sd, unused, m, FPSCR_ROUND_TOZERO);
|
|
|
}
|
|
|
|
|
|
-static u32 (* const fop_extfns[32])(int sd, int unused, s32 m, u32 fpscr) = {
|
|
|
- [FEXT_TO_IDX(FEXT_FCPY)] = vfp_single_fcpy,
|
|
|
- [FEXT_TO_IDX(FEXT_FABS)] = vfp_single_fabs,
|
|
|
- [FEXT_TO_IDX(FEXT_FNEG)] = vfp_single_fneg,
|
|
|
- [FEXT_TO_IDX(FEXT_FSQRT)] = vfp_single_fsqrt,
|
|
|
- [FEXT_TO_IDX(FEXT_FCMP)] = vfp_single_fcmp,
|
|
|
- [FEXT_TO_IDX(FEXT_FCMPE)] = vfp_single_fcmpe,
|
|
|
- [FEXT_TO_IDX(FEXT_FCMPZ)] = vfp_single_fcmpz,
|
|
|
- [FEXT_TO_IDX(FEXT_FCMPEZ)] = vfp_single_fcmpez,
|
|
|
- [FEXT_TO_IDX(FEXT_FCVT)] = vfp_single_fcvtd,
|
|
|
- [FEXT_TO_IDX(FEXT_FUITO)] = vfp_single_fuito,
|
|
|
- [FEXT_TO_IDX(FEXT_FSITO)] = vfp_single_fsito,
|
|
|
- [FEXT_TO_IDX(FEXT_FTOUI)] = vfp_single_ftoui,
|
|
|
- [FEXT_TO_IDX(FEXT_FTOUIZ)] = vfp_single_ftouiz,
|
|
|
- [FEXT_TO_IDX(FEXT_FTOSI)] = vfp_single_ftosi,
|
|
|
- [FEXT_TO_IDX(FEXT_FTOSIZ)] = vfp_single_ftosiz,
|
|
|
+static struct op fops_ext[32] = {
|
|
|
+ [FEXT_TO_IDX(FEXT_FCPY)] = {vfp_single_fcpy, 0},
|
|
|
+ [FEXT_TO_IDX(FEXT_FABS)] = {vfp_single_fabs, 0},
|
|
|
+ [FEXT_TO_IDX(FEXT_FNEG)] = {vfp_single_fneg, 0},
|
|
|
+ [FEXT_TO_IDX(FEXT_FSQRT)] = {vfp_single_fsqrt, 0},
|
|
|
+ [FEXT_TO_IDX(FEXT_FCMP)] = {vfp_single_fcmp, OP_SCALAR},
|
|
|
+ [FEXT_TO_IDX(FEXT_FCMPE)] = {vfp_single_fcmpe, OP_SCALAR},
|
|
|
+ [FEXT_TO_IDX(FEXT_FCMPZ)] = {vfp_single_fcmpz, OP_SCALAR},
|
|
|
+ [FEXT_TO_IDX(FEXT_FCMPEZ)] = {vfp_single_fcmpez, OP_SCALAR},
|
|
|
+ [FEXT_TO_IDX(FEXT_FCVT)] = {vfp_single_fcvtd, (OP_SD|OP_SCALAR)},
|
|
|
+ [FEXT_TO_IDX(FEXT_FUITO)] = {vfp_single_fuito, OP_SCALAR},
|
|
|
+ [FEXT_TO_IDX(FEXT_FSITO)] = {vfp_single_fsito, OP_SCALAR},
|
|
|
+ [FEXT_TO_IDX(FEXT_FTOUI)] = {vfp_single_ftoui, OP_SCALAR},
|
|
|
+ [FEXT_TO_IDX(FEXT_FTOUIZ)] = {vfp_single_ftouiz, OP_SCALAR},
|
|
|
+ [FEXT_TO_IDX(FEXT_FTOSI)] = {vfp_single_ftosi, OP_SCALAR},
|
|
|
+ [FEXT_TO_IDX(FEXT_FTOSIZ)] = {vfp_single_ftosiz, OP_SCALAR},
|
|
|
};
|
|
|
|
|
|
|
|
@@ -1151,16 +1151,16 @@ static u32 vfp_single_fdiv(int sd, int sn, s32 m, u32 fpscr)
|
|
|
return FPSCR_IOC;
|
|
|
}
|
|
|
|
|
|
-static u32 (* const fop_fns[16])(int sd, int sn, s32 m, u32 fpscr) = {
|
|
|
- [FOP_TO_IDX(FOP_FMAC)] = vfp_single_fmac,
|
|
|
- [FOP_TO_IDX(FOP_FNMAC)] = vfp_single_fnmac,
|
|
|
- [FOP_TO_IDX(FOP_FMSC)] = vfp_single_fmsc,
|
|
|
- [FOP_TO_IDX(FOP_FNMSC)] = vfp_single_fnmsc,
|
|
|
- [FOP_TO_IDX(FOP_FMUL)] = vfp_single_fmul,
|
|
|
- [FOP_TO_IDX(FOP_FNMUL)] = vfp_single_fnmul,
|
|
|
- [FOP_TO_IDX(FOP_FADD)] = vfp_single_fadd,
|
|
|
- [FOP_TO_IDX(FOP_FSUB)] = vfp_single_fsub,
|
|
|
- [FOP_TO_IDX(FOP_FDIV)] = vfp_single_fdiv,
|
|
|
+static struct op fops[16] = {
|
|
|
+ [FOP_TO_IDX(FOP_FMAC)] = {vfp_single_fmac, 0},
|
|
|
+ [FOP_TO_IDX(FOP_FNMAC)] = {vfp_single_fnmac, 0},
|
|
|
+ [FOP_TO_IDX(FOP_FMSC)] = {vfp_single_fmsc, 0},
|
|
|
+ [FOP_TO_IDX(FOP_FNMSC)] = {vfp_single_fnmsc, 0},
|
|
|
+ [FOP_TO_IDX(FOP_FMUL)] = {vfp_single_fmul, 0},
|
|
|
+ [FOP_TO_IDX(FOP_FNMUL)] = {vfp_single_fnmul, 0},
|
|
|
+ [FOP_TO_IDX(FOP_FADD)] = {vfp_single_fadd, 0},
|
|
|
+ [FOP_TO_IDX(FOP_FSUB)] = {vfp_single_fsub, 0},
|
|
|
+ [FOP_TO_IDX(FOP_FDIV)] = {vfp_single_fdiv, 0},
|
|
|
};
|
|
|
|
|
|
#define FREG_BANK(x) ((x) & 0x18)
|
|
@@ -1174,19 +1174,18 @@ u32 vfp_single_cpdo(u32 inst, u32 fpscr)
|
|
|
unsigned int sn = vfp_get_sn(inst);
|
|
|
unsigned int sm = vfp_get_sm(inst);
|
|
|
unsigned int vecitr, veclen, vecstride;
|
|
|
- u32 (*fop)(int, int, s32, u32);
|
|
|
+ struct op *fop;
|
|
|
|
|
|
- veclen = fpscr & FPSCR_LENGTH_MASK;
|
|
|
vecstride = 1 + ((fpscr & FPSCR_STRIDE_MASK) == FPSCR_STRIDE_MASK);
|
|
|
|
|
|
+ fop = (op == FOP_EXT) ? &fops_ext[FEXT_TO_IDX(inst)] : &fops[FOP_TO_IDX(op)];
|
|
|
/*
|
|
|
* fcvtsd takes a dN register number as destination, not sN.
|
|
|
* Technically, if bit 0 of dd is set, this is an invalid
|
|
|
* instruction. However, we ignore this for efficiency.
|
|
|
* It also only operates on scalars.
|
|
|
*/
|
|
|
- if ((inst & FEXT_MASK) == FEXT_FCVT) {
|
|
|
- veclen = 0;
|
|
|
+ if (fop->flags & OP_SD) {
|
|
|
dest = vfp_get_dd(inst);
|
|
|
} else
|
|
|
dest = vfp_get_sd(inst);
|
|
@@ -1195,21 +1194,22 @@ u32 vfp_single_cpdo(u32 inst, u32 fpscr)
|
|
|
* If destination bank is zero, vector length is always '1'.
|
|
|
* ARM DDI0100F C5.1.3, C5.3.2.
|
|
|
*/
|
|
|
- if (FREG_BANK(dest) == 0)
|
|
|
+ if ((fop->flags & OP_SCALAR) || (FREG_BANK(dest) == 0))
|
|
|
veclen = 0;
|
|
|
+ else
|
|
|
+ veclen = fpscr & FPSCR_LENGTH_MASK;
|
|
|
|
|
|
pr_debug("VFP: vecstride=%u veclen=%u\n", vecstride,
|
|
|
(veclen >> FPSCR_LENGTH_BIT) + 1);
|
|
|
|
|
|
- fop = (op == FOP_EXT) ? fop_extfns[FEXT_TO_IDX(inst)] : fop_fns[FOP_TO_IDX(op)];
|
|
|
- if (!fop)
|
|
|
+ if (!fop->fn)
|
|
|
goto invalid;
|
|
|
|
|
|
for (vecitr = 0; vecitr <= veclen; vecitr += 1 << FPSCR_LENGTH_BIT) {
|
|
|
s32 m = vfp_get_float(sm);
|
|
|
u32 except;
|
|
|
|
|
|
- if (op == FOP_EXT && (inst & FEXT_MASK) == FEXT_FCVT)
|
|
|
+ if (op == FOP_EXT && (fop->flags & OP_SD))
|
|
|
pr_debug("VFP: itr%d (d%u) = op[%u] (s%u=%08x)\n",
|
|
|
vecitr >> FPSCR_LENGTH_BIT, dest, sn, sm, m);
|
|
|
else if (op == FOP_EXT)
|
|
@@ -1220,7 +1220,7 @@ u32 vfp_single_cpdo(u32 inst, u32 fpscr)
|
|
|
vecitr >> FPSCR_LENGTH_BIT, dest, sn,
|
|
|
FOP_TO_IDX(op), sm, m);
|
|
|
|
|
|
- except = fop(dest, sn, m, fpscr);
|
|
|
+ except = fop->fn(dest, sn, m, fpscr);
|
|
|
pr_debug("VFP: itr%d: exceptions=%08x\n",
|
|
|
vecitr >> FPSCR_LENGTH_BIT, except);
|
|
|
|