Improve assertions.

This commit is contained in:
Mike Pall
2020-06-13 00:52:54 +02:00
parent 8b55054473
commit 8ae5170cdc
71 changed files with 1363 additions and 927 deletions

View File

@@ -23,7 +23,7 @@ static Reg ra_alloc1z(ASMState *as, IRRef ref, RegSet allow)
{
Reg r = IR(ref)->r;
if (ra_noreg(r)) {
if (!(allow & RSET_FPR) && irref_isk(ref) && get_kval(IR(ref)) == 0)
if (!(allow & RSET_FPR) && irref_isk(ref) && get_kval(as, ref) == 0)
return RID_ZERO;
r = ra_allocref(as, ref, allow);
} else {
@@ -66,10 +66,10 @@ static void asm_sparejump_setup(ASMState *as)
{
MCode *mxp = as->mcbot;
if (((uintptr_t)mxp & (LJ_PAGESIZE-1)) == sizeof(MCLink)) {
lua_assert(MIPSI_NOP == 0);
lj_assertA(MIPSI_NOP == 0, "bad NOP");
memset(mxp, 0, MIPS_SPAREJUMP*2*sizeof(MCode));
mxp += MIPS_SPAREJUMP*2;
lua_assert(mxp < as->mctop);
lj_assertA(mxp < as->mctop, "MIPS_SPAREJUMP too big");
lj_mcode_sync(as->mcbot, mxp);
lj_mcode_commitbot(as->J, mxp);
as->mcbot = mxp;
@@ -84,7 +84,8 @@ static void asm_exitstub_setup(ASMState *as)
/* sw TMP, 0(sp); j ->vm_exit_handler; li TMP, traceno */
*--mxp = MIPSI_LI|MIPSF_T(RID_TMP)|as->T->traceno;
*--mxp = MIPSI_J|((((uintptr_t)(void *)lj_vm_exit_handler)>>2)&0x03ffffffu);
lua_assert(((uintptr_t)mxp ^ (uintptr_t)(void *)lj_vm_exit_handler)>>28 == 0);
lj_assertA(((uintptr_t)mxp ^ (uintptr_t)(void *)lj_vm_exit_handler)>>28 == 0,
"branch target out of range");
*--mxp = MIPSI_SW|MIPSF_T(RID_TMP)|MIPSF_S(RID_SP)|0;
as->mctop = mxp;
}
@@ -195,20 +196,20 @@ static void asm_fusexref(ASMState *as, MIPSIns mi, Reg rt, IRRef ref,
if (ra_noreg(ir->r) && canfuse(as, ir)) {
if (ir->o == IR_ADD) {
intptr_t ofs2;
if (irref_isk(ir->op2) && (ofs2 = ofs + get_kval(IR(ir->op2)),
if (irref_isk(ir->op2) && (ofs2 = ofs + get_kval(as, ir->op2),
checki16(ofs2))) {
ref = ir->op1;
ofs = (int32_t)ofs2;
}
} else if (ir->o == IR_STRREF) {
intptr_t ofs2 = 65536;
lua_assert(ofs == 0);
lj_assertA(ofs == 0, "bad usage");
ofs = (int32_t)sizeof(GCstr);
if (irref_isk(ir->op2)) {
ofs2 = ofs + get_kval(IR(ir->op2));
ofs2 = ofs + get_kval(as, ir->op2);
ref = ir->op1;
} else if (irref_isk(ir->op1)) {
ofs2 = ofs + get_kval(IR(ir->op1));
ofs2 = ofs + get_kval(as, ir->op1);
ref = ir->op2;
}
if (!checki16(ofs2)) {
@@ -252,7 +253,8 @@ static void asm_gencall(ASMState *as, const CCallInfo *ci, IRRef *args)
#if !LJ_SOFTFP
if (irt_isfp(ir->t) && fpr <= REGARG_LASTFPR &&
!(ci->flags & CCI_VARARG)) {
lua_assert(rset_test(as->freeset, fpr)); /* Already evicted. */
lj_assertA(rset_test(as->freeset, fpr),
"reg %d not free", fpr); /* Already evicted. */
ra_leftov(as, fpr, ref);
fpr += LJ_32 ? 2 : 1;
gpr += (LJ_32 && irt_isnum(ir->t)) ? 2 : 1;
@@ -264,7 +266,8 @@ static void asm_gencall(ASMState *as, const CCallInfo *ci, IRRef *args)
#endif
if (LJ_32 && irt_isnum(ir->t)) gpr = (gpr+1) & ~1;
if (gpr <= REGARG_LASTGPR) {
lua_assert(rset_test(as->freeset, gpr)); /* Already evicted. */
lj_assertA(rset_test(as->freeset, gpr),
"reg %d not free", gpr); /* Already evicted. */
#if !LJ_SOFTFP
if (irt_isfp(ir->t)) {
RegSet of = as->freeset;
@@ -277,7 +280,8 @@ static void asm_gencall(ASMState *as, const CCallInfo *ci, IRRef *args)
#if LJ_32
emit_tg(as, MIPSI_MFC1, gpr+(LJ_BE?0:1), r+1);
emit_tg(as, MIPSI_MFC1, gpr+(LJ_BE?1:0), r);
lua_assert(rset_test(as->freeset, gpr+1)); /* Already evicted. */
lj_assertA(rset_test(as->freeset, gpr+1),
"reg %d not free", gpr+1); /* Already evicted. */
gpr += 2;
#else
emit_tg(as, MIPSI_DMFC1, gpr, r);
@@ -347,7 +351,7 @@ static void asm_setupresult(ASMState *as, IRIns *ir, const CCallInfo *ci)
#endif
ra_evictset(as, drop); /* Evictions must be performed first. */
if (ra_used(ir)) {
lua_assert(!irt_ispri(ir->t));
lj_assertA(!irt_ispri(ir->t), "PRI dest");
if (!LJ_SOFTFP && irt_isfp(ir->t)) {
if ((ci->flags & CCI_CASTU64)) {
int32_t ofs = sps_scale(ir->s);
@@ -395,7 +399,7 @@ static void asm_callx(ASMState *as, IRIns *ir)
func = ir->op2; irf = IR(func);
if (irf->o == IR_CARG) { func = irf->op1; irf = IR(func); }
if (irref_isk(func)) { /* Call to constant address. */
ci.func = (ASMFunction)(void *)get_kval(irf);
ci.func = (ASMFunction)(void *)get_kval(as, func);
} else { /* Need specific register for indirect calls. */
Reg r = ra_alloc1(as, func, RID2RSET(RID_CFUNCADDR));
MCode *p = as->mcp;
@@ -512,15 +516,19 @@ static void asm_conv(ASMState *as, IRIns *ir)
#endif
IRRef lref = ir->op1;
#if LJ_32
lua_assert(!(irt_isint64(ir->t) ||
(st == IRT_I64 || st == IRT_U64))); /* Handled by SPLIT. */
/* 64 bit integer conversions are handled by SPLIT. */
lj_assertA(!(irt_isint64(ir->t) || (st == IRT_I64 || st == IRT_U64)),
"IR %04d has unsplit 64 bit type",
(int)(ir - as->ir) - REF_BIAS);
#endif
#if LJ_SOFTFP32
/* FP conversions are handled by SPLIT. */
lua_assert(!irt_isfp(ir->t) && !(st == IRT_NUM || st == IRT_FLOAT));
lj_assertA(!irt_isfp(ir->t) && !(st == IRT_NUM || st == IRT_FLOAT),
"IR %04d has FP type",
(int)(ir - as->ir) - REF_BIAS);
/* Can't check for same types: SPLIT uses CONV int.int + BXOR for sfp NEG. */
#else
lua_assert(irt_type(ir->t) != st);
lj_assertA(irt_type(ir->t) != st, "inconsistent types for CONV");
#if !LJ_SOFTFP
if (irt_isfp(ir->t)) {
Reg dest = ra_dest(as, ir, RSET_FPR);
@@ -579,7 +587,8 @@ static void asm_conv(ASMState *as, IRIns *ir)
} else if (stfp) { /* FP to integer conversion. */
if (irt_isguard(ir->t)) {
/* Checked conversions are only supported from number to int. */
lua_assert(irt_isint(ir->t) && st == IRT_NUM);
lj_assertA(irt_isint(ir->t) && st == IRT_NUM,
"bad type for checked CONV");
asm_tointg(as, ir, ra_alloc1(as, lref, RSET_FPR));
} else {
Reg dest = ra_dest(as, ir, RSET_GPR);
@@ -679,7 +688,8 @@ static void asm_conv(ASMState *as, IRIns *ir)
} else if (stfp) { /* FP to integer conversion. */
if (irt_isguard(ir->t)) {
/* Checked conversions are only supported from number to int. */
lua_assert(irt_isint(ir->t) && st == IRT_NUM);
lj_assertA(irt_isint(ir->t) && st == IRT_NUM,
"bad type for checked CONV");
asm_tointg(as, ir, RID_NONE);
} else {
IRCallID cid = irt_is64(ir->t) ?
@@ -698,7 +708,7 @@ static void asm_conv(ASMState *as, IRIns *ir)
Reg dest = ra_dest(as, ir, RSET_GPR);
if (st >= IRT_I8 && st <= IRT_U16) { /* Extend to 32 bit integer. */
Reg left = ra_alloc1(as, ir->op1, RSET_GPR);
lua_assert(irt_isint(ir->t) || irt_isu32(ir->t));
lj_assertA(irt_isint(ir->t) || irt_isu32(ir->t), "bad type for CONV EXT");
if ((ir->op2 & IRCONV_SEXT)) {
if (LJ_64 || (as->flags & JIT_F_MIPSXXR2)) {
emit_dst(as, st == IRT_I8 ? MIPSI_SEB : MIPSI_SEH, dest, 0, left);
@@ -795,7 +805,8 @@ static void asm_tvstore64(ASMState *as, Reg base, int32_t ofs, IRRef ref)
{
RegSet allow = rset_exclude(RSET_GPR, base);
IRIns *ir = IR(ref);
lua_assert(irt_ispri(ir->t) || irt_isaddr(ir->t) || irt_isinteger(ir->t));
lj_assertA(irt_ispri(ir->t) || irt_isaddr(ir->t) || irt_isinteger(ir->t),
"store of IR type %d", irt_type(ir->t));
if (irref_isk(ref)) {
TValue k;
lj_ir_kvalue(as->J->L, &k, ir);
@@ -944,7 +955,7 @@ static void asm_href(ASMState *as, IRIns *ir, IROp merge)
if (isk && irt_isaddr(kt)) {
k = ((int64_t)irt_toitype(irkey->t) << 47) | irkey[1].tv.u64;
} else {
lua_assert(irt_ispri(kt) && !irt_isnil(kt));
lj_assertA(irt_ispri(kt) && !irt_isnil(kt), "bad HREF key type");
k = ~((int64_t)~irt_toitype(ir->t) << 47);
}
cmp64 = ra_allock(as, k, allow);
@@ -1012,7 +1023,7 @@ static void asm_href(ASMState *as, IRIns *ir, IROp merge)
#endif
/* Load main position relative to tab->node into dest. */
khash = isk ? ir_khash(irkey) : 1;
khash = isk ? ir_khash(as, irkey) : 1;
if (khash == 0) {
emit_tsi(as, MIPSI_AL, dest, tab, (int32_t)offsetof(GCtab, node));
} else {
@@ -1020,7 +1031,7 @@ static void asm_href(ASMState *as, IRIns *ir, IROp merge)
if (isk)
tmphash = ra_allock(as, khash, allow);
emit_dst(as, MIPSI_AADDU, dest, dest, tmp1);
lua_assert(sizeof(Node) == 24);
lj_assertA(sizeof(Node) == 24, "bad Node size");
emit_dst(as, MIPSI_SUBU, tmp1, tmp2, tmp1);
emit_dta(as, MIPSI_SLL, tmp1, tmp1, 3);
emit_dta(as, MIPSI_SLL, tmp2, tmp1, 5);
@@ -1098,7 +1109,7 @@ static void asm_hrefk(ASMState *as, IRIns *ir)
Reg key = ra_scratch(as, allow);
int64_t k;
#endif
lua_assert(ofs % sizeof(Node) == 0);
lj_assertA(ofs % sizeof(Node) == 0, "unaligned HREFK slot");
if (ofs > 32736) {
idx = dest;
rset_clear(allow, dest);
@@ -1127,7 +1138,7 @@ nolo:
emit_tsi(as, MIPSI_LW, type, idx, kofs+(LJ_BE?0:4));
#else
if (irt_ispri(irkey->t)) {
lua_assert(!irt_isnil(irkey->t));
lj_assertA(!irt_isnil(irkey->t), "bad HREFK key type");
k = ~((int64_t)~irt_toitype(irkey->t) << 47);
} else if (irt_isnum(irkey->t)) {
k = (int64_t)ir_knum(irkey)->u64;
@@ -1166,7 +1177,7 @@ static void asm_uref(ASMState *as, IRIns *ir)
static void asm_fref(ASMState *as, IRIns *ir)
{
UNUSED(as); UNUSED(ir);
lua_assert(!ra_used(ir));
lj_assertA(!ra_used(ir), "unfused FREF");
}
static void asm_strref(ASMState *as, IRIns *ir)
@@ -1221,14 +1232,17 @@ static void asm_strref(ASMState *as, IRIns *ir)
/* -- Loads and stores ---------------------------------------------------- */
static MIPSIns asm_fxloadins(IRIns *ir)
static MIPSIns asm_fxloadins(ASMState *as, IRIns *ir)
{
UNUSED(as);
switch (irt_type(ir->t)) {
case IRT_I8: return MIPSI_LB;
case IRT_U8: return MIPSI_LBU;
case IRT_I16: return MIPSI_LH;
case IRT_U16: return MIPSI_LHU;
case IRT_NUM: lua_assert(!LJ_SOFTFP32); if (!LJ_SOFTFP) return MIPSI_LDC1;
case IRT_NUM:
lj_assertA(!LJ_SOFTFP32, "unsplit FP op");
if (!LJ_SOFTFP) return MIPSI_LDC1;
/* fallthrough */
case IRT_FLOAT: if (!LJ_SOFTFP) return MIPSI_LWC1;
/* fallthrough */
@@ -1236,12 +1250,15 @@ static MIPSIns asm_fxloadins(IRIns *ir)
}
}
static MIPSIns asm_fxstoreins(IRIns *ir)
static MIPSIns asm_fxstoreins(ASMState *as, IRIns *ir)
{
UNUSED(as);
switch (irt_type(ir->t)) {
case IRT_I8: case IRT_U8: return MIPSI_SB;
case IRT_I16: case IRT_U16: return MIPSI_SH;
case IRT_NUM: lua_assert(!LJ_SOFTFP32); if (!LJ_SOFTFP) return MIPSI_SDC1;
case IRT_NUM:
lj_assertA(!LJ_SOFTFP32, "unsplit FP op");
if (!LJ_SOFTFP) return MIPSI_SDC1;
/* fallthrough */
case IRT_FLOAT: if (!LJ_SOFTFP) return MIPSI_SWC1;
/* fallthrough */
@@ -1252,10 +1269,10 @@ static MIPSIns asm_fxstoreins(IRIns *ir)
static void asm_fload(ASMState *as, IRIns *ir)
{
Reg dest = ra_dest(as, ir, RSET_GPR);
MIPSIns mi = asm_fxloadins(ir);
MIPSIns mi = asm_fxloadins(as, ir);
Reg idx;
int32_t ofs;
if (ir->op1 == REF_NIL) {
if (ir->op1 == REF_NIL) { /* FLOAD from GG_State with offset. */
idx = RID_JGL;
ofs = (ir->op2 << 2) - 32768 - GG_OFS(g);
} else {
@@ -1269,7 +1286,7 @@ static void asm_fload(ASMState *as, IRIns *ir)
}
ofs = field_ofs[ir->op2];
}
lua_assert(!irt_isfp(ir->t));
lj_assertA(!irt_isfp(ir->t), "bad FP FLOAD");
emit_tsi(as, mi, dest, idx, ofs);
}
@@ -1280,8 +1297,8 @@ static void asm_fstore(ASMState *as, IRIns *ir)
IRIns *irf = IR(ir->op1);
Reg idx = ra_alloc1(as, irf->op1, rset_exclude(RSET_GPR, src));
int32_t ofs = field_ofs[irf->op2];
MIPSIns mi = asm_fxstoreins(ir);
lua_assert(!irt_isfp(ir->t));
MIPSIns mi = asm_fxstoreins(as, ir);
lj_assertA(!irt_isfp(ir->t), "bad FP FSTORE");
emit_tsi(as, mi, src, idx, ofs);
}
}
@@ -1290,8 +1307,9 @@ static void asm_xload(ASMState *as, IRIns *ir)
{
Reg dest = ra_dest(as, ir,
(!LJ_SOFTFP && irt_isfp(ir->t)) ? RSET_FPR : RSET_GPR);
lua_assert(LJ_TARGET_UNALIGNED || !(ir->op2 & IRXLOAD_UNALIGNED));
asm_fusexref(as, asm_fxloadins(ir), dest, ir->op1, RSET_GPR, 0);
lj_assertA(LJ_TARGET_UNALIGNED || !(ir->op2 & IRXLOAD_UNALIGNED),
"unaligned XLOAD");
asm_fusexref(as, asm_fxloadins(as, ir), dest, ir->op1, RSET_GPR, 0);
}
static void asm_xstore_(ASMState *as, IRIns *ir, int32_t ofs)
@@ -1299,7 +1317,7 @@ static void asm_xstore_(ASMState *as, IRIns *ir, int32_t ofs)
if (ir->r != RID_SINK) {
Reg src = ra_alloc1z(as, ir->op2,
(!LJ_SOFTFP && irt_isfp(ir->t)) ? RSET_FPR : RSET_GPR);
asm_fusexref(as, asm_fxstoreins(ir), src, ir->op1,
asm_fusexref(as, asm_fxstoreins(as, ir), src, ir->op1,
rset_exclude(RSET_GPR, src), ofs);
}
}
@@ -1321,8 +1339,9 @@ static void asm_ahuvload(ASMState *as, IRIns *ir)
}
}
if (ra_used(ir)) {
lua_assert((LJ_SOFTFP32 ? 0 : irt_isnum(ir->t)) ||
irt_isint(ir->t) || irt_isaddr(ir->t));
lj_assertA((LJ_SOFTFP32 ? 0 : irt_isnum(ir->t)) ||
irt_isint(ir->t) || irt_isaddr(ir->t),
"bad load type %d", irt_type(ir->t));
dest = ra_dest(as, ir, (!LJ_SOFTFP && irt_isnum(t)) ? RSET_FPR : allow);
rset_clear(allow, dest);
#if LJ_64
@@ -1427,10 +1446,13 @@ static void asm_sload(ASMState *as, IRIns *ir)
#else
int32_t ofs = 8*((int32_t)ir->op1-2);
#endif
lua_assert(!(ir->op2 & IRSLOAD_PARENT)); /* Handled by asm_head_side(). */
lua_assert(irt_isguard(ir->t) || !(ir->op2 & IRSLOAD_TYPECHECK));
lj_assertA(!(ir->op2 & IRSLOAD_PARENT),
"bad parent SLOAD"); /* Handled by asm_head_side(). */
lj_assertA(irt_isguard(ir->t) || !(ir->op2 & IRSLOAD_TYPECHECK),
"inconsistent SLOAD variant");
#if LJ_SOFTFP32
lua_assert(!(ir->op2 & IRSLOAD_CONVERT)); /* Handled by LJ_SOFTFP SPLIT. */
lj_assertA(!(ir->op2 & IRSLOAD_CONVERT),
"unsplit SLOAD convert"); /* Handled by LJ_SOFTFP SPLIT. */
if (hiop && ra_used(ir+1)) {
type = ra_dest(as, ir+1, allow);
rset_clear(allow, type);
@@ -1443,8 +1465,9 @@ static void asm_sload(ASMState *as, IRIns *ir)
} else
#endif
if (ra_used(ir)) {
lua_assert((LJ_SOFTFP32 ? 0 : irt_isnum(ir->t)) ||
irt_isint(ir->t) || irt_isaddr(ir->t));
lj_assertA((LJ_SOFTFP32 ? 0 : irt_isnum(ir->t)) ||
irt_isint(ir->t) || irt_isaddr(ir->t),
"bad SLOAD type %d", irt_type(ir->t));
dest = ra_dest(as, ir, (!LJ_SOFTFP && irt_isnum(t)) ? RSET_FPR : allow);
rset_clear(allow, dest);
base = ra_alloc1(as, REF_BASE, allow);
@@ -1554,7 +1577,8 @@ static void asm_cnew(ASMState *as, IRIns *ir)
const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_mem_newgco];
IRRef args[4];
RegSet drop = RSET_SCRATCH;
lua_assert(sz != CTSIZE_INVALID || (ir->o == IR_CNEW && ir->op2 != REF_NIL));
lj_assertA(sz != CTSIZE_INVALID || (ir->o == IR_CNEW && ir->op2 != REF_NIL),
"bad CNEW/CNEWI operands");
as->gcsteps++;
if (ra_hasreg(ir->r))
@@ -1570,7 +1594,7 @@ static void asm_cnew(ASMState *as, IRIns *ir)
int32_t ofs = sizeof(GCcdata);
if (sz == 8) {
ofs += 4;
lua_assert((ir+1)->o == IR_HIOP);
lj_assertA((ir+1)->o == IR_HIOP, "expected HIOP for CNEWI");
if (LJ_LE) ir++;
}
for (;;) {
@@ -1584,7 +1608,7 @@ static void asm_cnew(ASMState *as, IRIns *ir)
emit_tsi(as, sz == 8 ? MIPSI_SD : MIPSI_SW, ra_alloc1(as, ir->op2, allow),
RID_RET, sizeof(GCcdata));
#endif
lua_assert(sz == 4 || sz == 8);
lj_assertA(sz == 4 || sz == 8, "bad CNEWI size %d", sz);
} else if (ir->op2 != REF_NIL) { /* Create VLA/VLS/aligned cdata. */
ci = &lj_ir_callinfo[IRCALL_lj_cdata_newv];
args[0] = ASMREF_L; /* lua_State *L */
@@ -1634,7 +1658,7 @@ static void asm_obar(ASMState *as, IRIns *ir)
MCLabel l_end;
Reg obj, val, tmp;
/* No need for other object barriers (yet). */
lua_assert(IR(ir->op1)->o == IR_UREFC);
lj_assertA(IR(ir->op1)->o == IR_UREFC, "bad OBAR type");
ra_evictset(as, RSET_SCRATCH);
l_end = emit_label(as);
args[0] = ASMREF_TMP1; /* global_State *g */
@@ -1709,7 +1733,7 @@ static void asm_add(ASMState *as, IRIns *ir)
Reg dest = ra_dest(as, ir, RSET_GPR);
Reg right, left = ra_hintalloc(as, ir->op1, dest, RSET_GPR);
if (irref_isk(ir->op2)) {
intptr_t k = get_kval(IR(ir->op2));
intptr_t k = get_kval(as, ir->op2);
if (checki16(k)) {
emit_tsi(as, (LJ_64 && irt_is64(t)) ? MIPSI_DADDIU : MIPSI_ADDIU, dest,
left, k);
@@ -1810,7 +1834,7 @@ static void asm_arithov(ASMState *as, IRIns *ir)
{
/* TODO MIPSR6: bovc/bnvc. Caveat: no delay slot to load RID_TMP. */
Reg right, left, tmp, dest = ra_dest(as, ir, RSET_GPR);
lua_assert(!irt_is64(ir->t));
lj_assertA(!irt_is64(ir->t), "bad usage");
if (irref_isk(ir->op2)) {
int k = IR(ir->op2)->i;
if (ir->o == IR_SUBOV) k = -k;
@@ -1997,7 +2021,7 @@ static void asm_bitop(ASMState *as, IRIns *ir, MIPSIns mi, MIPSIns mik)
Reg dest = ra_dest(as, ir, RSET_GPR);
Reg right, left = ra_hintalloc(as, ir->op1, dest, RSET_GPR);
if (irref_isk(ir->op2)) {
intptr_t k = get_kval(IR(ir->op2));
intptr_t k = get_kval(as, ir->op2);
if (checku16(k)) {
emit_tsi(as, mik, dest, left, k);
return;
@@ -2030,7 +2054,7 @@ static void asm_bitshift(ASMState *as, IRIns *ir, MIPSIns mi, MIPSIns mik)
#define asm_bshl(as, ir) asm_bitshift(as, ir, MIPSI_SLLV, MIPSI_SLL)
#define asm_bshr(as, ir) asm_bitshift(as, ir, MIPSI_SRLV, MIPSI_SRL)
#define asm_bsar(as, ir) asm_bitshift(as, ir, MIPSI_SRAV, MIPSI_SRA)
#define asm_brol(as, ir) lua_assert(0)
#define asm_brol(as, ir) lj_assertA(0, "unexpected BROL")
static void asm_bror(ASMState *as, IRIns *ir)
{
@@ -2222,13 +2246,13 @@ static void asm_comp(ASMState *as, IRIns *ir)
} else {
Reg right, left = ra_alloc1(as, ir->op1, RSET_GPR);
if (op == IR_ABC) op = IR_UGT;
if ((op&4) == 0 && irref_isk(ir->op2) && get_kval(IR(ir->op2)) == 0) {
if ((op&4) == 0 && irref_isk(ir->op2) && get_kval(as, ir->op2) == 0) {
MIPSIns mi = (op&2) ? ((op&1) ? MIPSI_BLEZ : MIPSI_BGTZ) :
((op&1) ? MIPSI_BLTZ : MIPSI_BGEZ);
asm_guard(as, mi, left, 0);
} else {
if (irref_isk(ir->op2)) {
intptr_t k = get_kval(IR(ir->op2));
intptr_t k = get_kval(as, ir->op2);
if ((op&2)) k++;
if (checki16(k)) {
asm_guard(as, (op&1) ? MIPSI_BNE : MIPSI_BEQ, RID_TMP, RID_ZERO);
@@ -2384,10 +2408,11 @@ static void asm_hiop(ASMState *as, IRIns *ir)
case IR_CNEWI:
/* Nothing to do here. Handled by lo op itself. */
break;
default: lua_assert(0); break;
default: lj_assertA(0, "bad HIOP for op %d", (ir-1)->o); break;
}
#else
UNUSED(as); UNUSED(ir); lua_assert(0); /* Unused without FFI. */
/* Unused on MIPS64 or without SOFTFP or FFI. */
UNUSED(as); UNUSED(ir); lj_assertA(0, "unexpected HIOP");
#endif
}
@@ -2456,7 +2481,8 @@ static void asm_stack_restore(ASMState *as, SnapShot *snap)
#if LJ_SOFTFP32
Reg tmp;
RegSet allow = rset_exclude(RSET_GPR, RID_BASE);
lua_assert(irref_isk(ref)); /* LJ_SOFTFP: must be a number constant. */
/* LJ_SOFTFP: must be a number constant. */
lj_assertA(irref_isk(ref), "unsplit FP op");
tmp = ra_allock(as, (int32_t)ir_knum(ir)->u32.lo, allow);
emit_tsi(as, MIPSI_SW, tmp, RID_BASE, ofs+(LJ_BE?4:0));
if (rset_test(as->freeset, tmp+1)) allow = RID2RSET(tmp+1);
@@ -2473,7 +2499,8 @@ static void asm_stack_restore(ASMState *as, SnapShot *snap)
#if LJ_32
RegSet allow = rset_exclude(RSET_GPR, RID_BASE);
Reg type;
lua_assert(irt_ispri(ir->t) || irt_isaddr(ir->t) || irt_isinteger(ir->t));
lj_assertA(irt_ispri(ir->t) || irt_isaddr(ir->t) || irt_isinteger(ir->t),
"restore of IR type %d", irt_type(ir->t));
if (!irt_ispri(ir->t)) {
Reg src = ra_alloc1(as, ref, allow);
rset_clear(allow, src);
@@ -2496,7 +2523,7 @@ static void asm_stack_restore(ASMState *as, SnapShot *snap)
}
checkmclim(as);
}
lua_assert(map + nent == flinks);
lj_assertA(map + nent == flinks, "inconsistent frames in snapshot");
}
/* -- GC handling --------------------------------------------------------- */
@@ -2694,7 +2721,7 @@ void lj_asm_patchexit(jit_State *J, GCtrace *T, ExitNo exitno, MCode *target)
}
} else if (p+1 == pe) {
/* Patch NOP after code for inverted loop branch. Use of J is ok. */
lua_assert(p[1] == MIPSI_NOP);
lj_assertJ(p[1] == MIPSI_NOP, "expected NOP");
p[1] = tjump;
*p = MIPSI_NOP; /* Replace the load of the exit number. */
cstop = p+2;