Files
Run-Sun3-SunOS-4.1.1/tme-0.8_up/ic/sparc/sparc-insns-auto.c
Amberelle Mason ac30ff9032 Initial import
Initial import of SunOS 4.1.1 and TME 0.8
2023-05-01 12:16:40 -04:00

11649 lines
403 KiB
C

/* automatically generated by sparc-insns-auto.sh, do not edit! */
_TME_RCSID("$Id: sparc-insns-auto.sh,v 1.10 2010/06/05 16:13:41 fredette Exp $");
#include "sparc-impl.h"
/* an all-bits-zero float for use with _tme_sparc*_fpu_mem_fpreg(): */
#if TME_FLOAT_FORMAT_NULL != 0
#error "TME_FLOAT_FORMAT_NULL changed"
#endif
static struct tme_float _tme_sparc_float_null;
#undef TME_SPARC_VERSION
#define TME_SPARC_VERSION(ic) (8)
static tme_uint32_t
_tme_sparc32_alternate_asi_mask(struct tme_sparc *ic)
{
unsigned int asi_data;
unsigned int asi_mask_flags;
tme_uint32_t asi_mask_data;
/* get the ASI, assuming that the i bit is zero: */
asi_data = TME_FIELD_MASK_EXTRACTU(TME_SPARC_INSN, (0xff << 5));
/* this is a privileged instruction: */
TME_SPARC_INSN_PRIV;
/* if the i bit is one, this is an illegal instruction: */
if (__tme_predict_false(TME_SPARC_INSN & TME_BIT(13))) {
TME_SPARC_INSN_ILL(ic);
}
/* get the flags for this ASI: */
asi_mask_flags = ic->tme_sparc_asis[asi_data].tme_sparc_asi_mask_flags;
/* make the ASI mask: */
if (asi_mask_flags & TME_SPARC32_ASI_MASK_FLAG_SPECIAL) {
asi_mask_data
= TME_SPARC_ASI_MASK_SPECIAL(asi_data, TRUE);
}
else {
asi_mask_data = TME_SPARC32_ASI_MASK(asi_data, asi_data);
}
/* if this ASI has a special handler: */
if (__tme_predict_false(ic->tme_sparc_asis[TME_SPARC_ASI_MASK_WHICH(asi_mask_data)].tme_sparc_asi_handler != 0)) {
/* force a slow load or store, which will call the special handler: */
asi_mask_data |= TME_SPARC_ASI_MASK_FLAG_UNDEF;
}
return (asi_mask_data);
}
static struct tme_float *
_tme_sparc32_fpu_mem_fpreg(struct tme_sparc *ic,
tme_uint32_t misaligned,
struct tme_float *float_buffer)
{
unsigned int float_format;
unsigned int fpreg_format;
tme_uint32_t fp_store;
unsigned int fpu_mode;
unsigned int fpreg_number;
/* NB: this checks for various traps by their priority order: */
TME_SPARC_INSN_FPU_ENABLED;
/* get the floating-point format: */
float_format = float_buffer->tme_float_format;
/* convert the floating-point format into the ieee754
floating-point register file format: */
#if (TME_FLOAT_FORMAT_NULL | TME_IEEE754_FPREG_FORMAT_NULL) != 0
#error "TME_FLOAT_FORMAT_ or TME_IEEE754_FPREG_FORMAT_ values changed"
#endif
#if TME_FLOAT_FORMAT_IEEE754_SINGLE < TME_IEEE754_FPREG_FORMAT_SINGLE
#error "TME_FLOAT_FORMAT_ or TME_IEEE754_FPREG_FORMAT_ values changed"
#endif
#if (TME_FLOAT_FORMAT_IEEE754_SINGLE / TME_IEEE754_FPREG_FORMAT_SINGLE) != (TME_FLOAT_FORMAT_IEEE754_DOUBLE / TME_IEEE754_FPREG_FORMAT_DOUBLE)
#error "TME_FLOAT_FORMAT_ or TME_IEEE754_FPREG_FORMAT_ values changed"
#endif
assert (float_format == TME_FLOAT_FORMAT_NULL
|| float_format == TME_FLOAT_FORMAT_IEEE754_SINGLE
|| float_format == TME_FLOAT_FORMAT_IEEE754_DOUBLE);
fpreg_format = float_format / (TME_FLOAT_FORMAT_IEEE754_SINGLE / TME_IEEE754_FPREG_FORMAT_SINGLE);
/* if the memory address is misaligned, return the
float buffer now. the eventual load or store will
cause the mem_address_not_aligned trap: */
/* if the memory address is misaligned: */
#if TME_IEEE754_FPREG_FORMAT_NULL != 0 || TME_IEEE754_FPREG_FORMAT_SINGLE != 1 || TME_IEEE754_FPREG_FORMAT_DOUBLE != 2 || TME_IEEE754_FPREG_FORMAT_QUAD != 4
#error "TME_IEEE754_FPREG_FORMAT_ values changed"
#endif
assert (fpreg_format == TME_IEEE754_FPREG_FORMAT_NULL
|| fpreg_format == TME_IEEE754_FPREG_FORMAT_SINGLE
|| fpreg_format == TME_IEEE754_FPREG_FORMAT_DOUBLE
|| fpreg_format == TME_IEEE754_FPREG_FORMAT_QUAD);
misaligned &= ((sizeof(tme_uint32_t) * fpreg_format) - 1);
if (__tme_predict_false(misaligned)) {
return (float_buffer);
}
/* see if this is a floating-point load or store: */
/* NB: all of the floating-point instructions that use
this preamble have bit two of op3 clear for a load,
and set for a store: */
fp_store = (TME_SPARC_INSN & TME_BIT(19 + 2));
/* if the FPU isn't in execute mode: */
fpu_mode = ic->tme_sparc_fpu_mode;
if (__tme_predict_false(fpu_mode != TME_SPARC_FPU_MODE_EXECUTE)) {
/* if this is a floating-point load, or if this is a
floating-point store and a floating-point exception
is pending: */
if (!fp_store
|| fpu_mode == TME_SPARC_FPU_MODE_EXCEPTION_PENDING) {
/* do an FPU exception check: */
tme_sparc_fpu_exception_check(ic);
}
}
/* if this is not a load or store of a floating-point register: */
if (fpreg_format == TME_IEEE754_FPREG_FORMAT_NULL) {
return (float_buffer);
}
/* decode rd: */
fpreg_number
= tme_sparc_fpu_fpreg_decode(ic,
TME_FIELD_MASK_EXTRACTU(TME_SPARC_INSN,
TME_SPARC_FORMAT3_MASK_RD),
fpreg_format);
/* make sure this floating-point register has the right precision: */
tme_sparc_fpu_fpreg_format(ic, fpreg_number, fpreg_format | TME_IEEE754_FPREG_FORMAT_BUILTIN);
/* if this is a floating-point load: */
if (!fp_store) {
/* mark rd as dirty: */
TME_SPARC_FPU_DIRTY(ic, fpreg_number);
}
/* return the floating-point register: */
return (&ic->tme_sparc_fpu_fpregs[fpreg_number]);
}
#define _tme_sparc32_fpu_mem(ic) \
do { _tme_sparc32_fpu_mem_fpreg(ic, 0, &_tme_sparc_float_null); } while (/* CONSTCOND */ 0)
/* this does a sparc32 "add SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc32_add, tme_uint32_t)
{
tme_uint32_t src1;
tme_uint32_t src2;
tme_uint32_t dst;
/* get the operands: */
src1 = (tme_uint32_t) TME_SPARC_FORMAT3_RS1;
src2 = (tme_uint32_t) TME_SPARC_FORMAT3_RS2;
/* perform the operation: */
dst = src1 + src2;
/* store the destination: */
TME_SPARC_FORMAT3_RD = (tme_uint32_t) dst;
TME_SPARC_INSN_OK;
}
/* this does a sparc32 "addcc SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc32_addcc, tme_uint32_t)
{
tme_uint32_t src1;
tme_uint32_t src2;
tme_uint32_t dst;
tme_uint32_t cc;
/* get the operands: */
src1 = (tme_uint32_t) TME_SPARC_FORMAT3_RS1;
src2 = (tme_uint32_t) TME_SPARC_FORMAT3_RS2;
/* perform the operation: */
dst = src1 + src2;
/* store the destination: */
TME_SPARC_FORMAT3_RD = (tme_uint32_t) dst;
/* set Z if the destination is zero: */
cc = ((((tme_int32_t) dst) == 0) * TME_SPARC32_PSR_ICC_Z);
/* set N if the destination is negative: */
cc += ((((tme_int32_t) dst) < 0) * TME_SPARC32_PSR_ICC_N);
/* if the operands are the same sign, and the destination has
a different sign, set V: */
cc += ((((tme_int32_t) ((src2 ^ dst) & (src1 ^ (src2 ^ (((tme_uint32_t) 0) - 1))))) < 0) * TME_SPARC32_PSR_ICC_V);
/* if src1 and src2 both have the high bit set, or if dst does
not have the high bit set and either src1 or src2 does, set C: */
cc += (((tme_int32_t) (((tme_uint32_t) (src1 & src2)) | ((((tme_uint32_t) dst) ^ (((tme_uint32_t) 0) - 1)) & ((tme_uint32_t) (src1 | src2))))) < 0) * TME_SPARC32_PSR_ICC_C;
/* set the condition codes: */
ic->tme_sparc32_ireg_psr = (ic->tme_sparc32_ireg_psr & ~TME_SPARC32_PSR_ICC) | cc;
TME_SPARC_INSN_OK;
}
/* this does a sparc32 "sub SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc32_sub, tme_uint32_t)
{
tme_uint32_t src1;
tme_uint32_t src2;
tme_uint32_t dst;
/* get the operands: */
src1 = (tme_uint32_t) TME_SPARC_FORMAT3_RS1;
src2 = (tme_uint32_t) TME_SPARC_FORMAT3_RS2;
/* perform the operation: */
dst = src1 - src2;
/* store the destination: */
TME_SPARC_FORMAT3_RD = (tme_uint32_t) dst;
TME_SPARC_INSN_OK;
}
/* this does a sparc32 "subcc SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc32_subcc, tme_uint32_t)
{
tme_uint32_t src1;
tme_uint32_t src2;
tme_uint32_t dst;
tme_uint32_t cc;
/* get the operands: */
src1 = (tme_uint32_t) TME_SPARC_FORMAT3_RS1;
src2 = (tme_uint32_t) TME_SPARC_FORMAT3_RS2;
/* perform the operation: */
dst = src1 - src2;
/* store the destination: */
TME_SPARC_FORMAT3_RD = (tme_uint32_t) dst;
/* set Z if the destination is zero: */
cc = ((((tme_int32_t) dst) == 0) * TME_SPARC32_PSR_ICC_Z);
/* set N if the destination is negative: */
cc += ((((tme_int32_t) dst) < 0) * TME_SPARC32_PSR_ICC_N);
/* if the operands are different signs, and the destination has
a different sign from the first operand, set V: */
cc += ((((tme_int32_t) ((src1 ^ src2) & (src1 ^ dst))) < 0) * TME_SPARC32_PSR_ICC_V);
/* if src2 is greater than src1, set C: */
cc += ((((tme_uint32_t) src2) > ((tme_uint32_t) src1))) * TME_SPARC32_PSR_ICC_C;
/* set the condition codes: */
ic->tme_sparc32_ireg_psr = (ic->tme_sparc32_ireg_psr & ~TME_SPARC32_PSR_ICC) | cc;
TME_SPARC_INSN_OK;
}
/* this does a sparc32 "or SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc32_or, tme_uint32_t)
{
tme_uint32_t src1;
tme_uint32_t src2;
tme_uint32_t dst;
/* get the operands: */
src1 = (tme_uint32_t) TME_SPARC_FORMAT3_RS1;
src2 = (tme_uint32_t) TME_SPARC_FORMAT3_RS2;
/* perform the operation: */
dst = src1 | src2;
/* store the destination: */
TME_SPARC_FORMAT3_RD = (tme_uint32_t) dst;
TME_SPARC_INSN_OK;
}
/* this does a sparc32 "orcc SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc32_orcc, tme_uint32_t)
{
tme_uint32_t src1;
tme_uint32_t src2;
tme_uint32_t dst;
tme_uint32_t cc;
/* get the operands: */
src1 = (tme_uint32_t) TME_SPARC_FORMAT3_RS1;
src2 = (tme_uint32_t) TME_SPARC_FORMAT3_RS2;
/* perform the operation: */
dst = src1 | src2;
/* store the destination: */
TME_SPARC_FORMAT3_RD = (tme_uint32_t) dst;
/* set Z if the destination is zero: */
cc = ((((tme_int32_t) dst) == 0) * TME_SPARC32_PSR_ICC_Z);
/* set N if the destination is negative: */
cc += ((((tme_int32_t) dst) < 0) * TME_SPARC32_PSR_ICC_N);
/* set the condition codes: */
ic->tme_sparc32_ireg_psr = (ic->tme_sparc32_ireg_psr & ~TME_SPARC32_PSR_ICC) | cc;
TME_SPARC_INSN_OK;
}
/* this does a sparc32 "orn SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc32_orn, tme_uint32_t)
{
tme_uint32_t src1;
tme_uint32_t src2;
tme_uint32_t dst;
/* get the operands: */
src1 = (tme_uint32_t) TME_SPARC_FORMAT3_RS1;
src2 = (tme_uint32_t) TME_SPARC_FORMAT3_RS2;
/* perform the operation: */
dst = src1 | ~src2;
/* store the destination: */
TME_SPARC_FORMAT3_RD = (tme_uint32_t) dst;
TME_SPARC_INSN_OK;
}
/* this does a sparc32 "orncc SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc32_orncc, tme_uint32_t)
{
tme_uint32_t src1;
tme_uint32_t src2;
tme_uint32_t dst;
tme_uint32_t cc;
/* get the operands: */
src1 = (tme_uint32_t) TME_SPARC_FORMAT3_RS1;
src2 = (tme_uint32_t) TME_SPARC_FORMAT3_RS2;
/* perform the operation: */
dst = src1 | ~src2;
/* store the destination: */
TME_SPARC_FORMAT3_RD = (tme_uint32_t) dst;
/* set Z if the destination is zero: */
cc = ((((tme_int32_t) dst) == 0) * TME_SPARC32_PSR_ICC_Z);
/* set N if the destination is negative: */
cc += ((((tme_int32_t) dst) < 0) * TME_SPARC32_PSR_ICC_N);
/* set the condition codes: */
ic->tme_sparc32_ireg_psr = (ic->tme_sparc32_ireg_psr & ~TME_SPARC32_PSR_ICC) | cc;
TME_SPARC_INSN_OK;
}
/* this does a sparc32 "and SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc32_and, tme_uint32_t)
{
tme_uint32_t src1;
tme_uint32_t src2;
tme_uint32_t dst;
/* get the operands: */
src1 = (tme_uint32_t) TME_SPARC_FORMAT3_RS1;
src2 = (tme_uint32_t) TME_SPARC_FORMAT3_RS2;
/* perform the operation: */
dst = src1 & src2;
/* store the destination: */
TME_SPARC_FORMAT3_RD = (tme_uint32_t) dst;
TME_SPARC_INSN_OK;
}
/* this does a sparc32 "andcc SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc32_andcc, tme_uint32_t)
{
tme_uint32_t src1;
tme_uint32_t src2;
tme_uint32_t dst;
tme_uint32_t cc;
/* get the operands: */
src1 = (tme_uint32_t) TME_SPARC_FORMAT3_RS1;
src2 = (tme_uint32_t) TME_SPARC_FORMAT3_RS2;
/* perform the operation: */
dst = src1 & src2;
/* store the destination: */
TME_SPARC_FORMAT3_RD = (tme_uint32_t) dst;
/* set Z if the destination is zero: */
cc = ((((tme_int32_t) dst) == 0) * TME_SPARC32_PSR_ICC_Z);
/* set N if the destination is negative: */
cc += ((((tme_int32_t) dst) < 0) * TME_SPARC32_PSR_ICC_N);
/* set the condition codes: */
ic->tme_sparc32_ireg_psr = (ic->tme_sparc32_ireg_psr & ~TME_SPARC32_PSR_ICC) | cc;
TME_SPARC_INSN_OK;
}
/* this does a sparc32 "andn SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc32_andn, tme_uint32_t)
{
tme_uint32_t src1;
tme_uint32_t src2;
tme_uint32_t dst;
/* get the operands: */
src1 = (tme_uint32_t) TME_SPARC_FORMAT3_RS1;
src2 = (tme_uint32_t) TME_SPARC_FORMAT3_RS2;
/* perform the operation: */
dst = src1 & ~src2;
/* store the destination: */
TME_SPARC_FORMAT3_RD = (tme_uint32_t) dst;
TME_SPARC_INSN_OK;
}
/* this does a sparc32 "andncc SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc32_andncc, tme_uint32_t)
{
tme_uint32_t src1;
tme_uint32_t src2;
tme_uint32_t dst;
tme_uint32_t cc;
/* get the operands: */
src1 = (tme_uint32_t) TME_SPARC_FORMAT3_RS1;
src2 = (tme_uint32_t) TME_SPARC_FORMAT3_RS2;
/* perform the operation: */
dst = src1 & ~src2;
/* store the destination: */
TME_SPARC_FORMAT3_RD = (tme_uint32_t) dst;
/* set Z if the destination is zero: */
cc = ((((tme_int32_t) dst) == 0) * TME_SPARC32_PSR_ICC_Z);
/* set N if the destination is negative: */
cc += ((((tme_int32_t) dst) < 0) * TME_SPARC32_PSR_ICC_N);
/* set the condition codes: */
ic->tme_sparc32_ireg_psr = (ic->tme_sparc32_ireg_psr & ~TME_SPARC32_PSR_ICC) | cc;
TME_SPARC_INSN_OK;
}
/* this does a sparc32 "xor SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc32_xor, tme_uint32_t)
{
tme_uint32_t src1;
tme_uint32_t src2;
tme_uint32_t dst;
/* get the operands: */
src1 = (tme_uint32_t) TME_SPARC_FORMAT3_RS1;
src2 = (tme_uint32_t) TME_SPARC_FORMAT3_RS2;
/* perform the operation: */
dst = src1 ^ src2;
/* store the destination: */
TME_SPARC_FORMAT3_RD = (tme_uint32_t) dst;
TME_SPARC_INSN_OK;
}
/* this does a sparc32 "xorcc SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc32_xorcc, tme_uint32_t)
{
tme_uint32_t src1;
tme_uint32_t src2;
tme_uint32_t dst;
tme_uint32_t cc;
/* get the operands: */
src1 = (tme_uint32_t) TME_SPARC_FORMAT3_RS1;
src2 = (tme_uint32_t) TME_SPARC_FORMAT3_RS2;
/* perform the operation: */
dst = src1 ^ src2;
/* store the destination: */
TME_SPARC_FORMAT3_RD = (tme_uint32_t) dst;
/* set Z if the destination is zero: */
cc = ((((tme_int32_t) dst) == 0) * TME_SPARC32_PSR_ICC_Z);
/* set N if the destination is negative: */
cc += ((((tme_int32_t) dst) < 0) * TME_SPARC32_PSR_ICC_N);
/* set the condition codes: */
ic->tme_sparc32_ireg_psr = (ic->tme_sparc32_ireg_psr & ~TME_SPARC32_PSR_ICC) | cc;
TME_SPARC_INSN_OK;
}
/* this does a sparc32 "xnor SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc32_xnor, tme_uint32_t)
{
tme_uint32_t src1;
tme_uint32_t src2;
tme_uint32_t dst;
/* get the operands: */
src1 = (tme_uint32_t) TME_SPARC_FORMAT3_RS1;
src2 = (tme_uint32_t) TME_SPARC_FORMAT3_RS2;
/* perform the operation: */
dst = src1 ^ ~src2;
/* store the destination: */
TME_SPARC_FORMAT3_RD = (tme_uint32_t) dst;
TME_SPARC_INSN_OK;
}
/* this does a sparc32 "xnorcc SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc32_xnorcc, tme_uint32_t)
{
tme_uint32_t src1;
tme_uint32_t src2;
tme_uint32_t dst;
tme_uint32_t cc;
/* get the operands: */
src1 = (tme_uint32_t) TME_SPARC_FORMAT3_RS1;
src2 = (tme_uint32_t) TME_SPARC_FORMAT3_RS2;
/* perform the operation: */
dst = src1 ^ ~src2;
/* store the destination: */
TME_SPARC_FORMAT3_RD = (tme_uint32_t) dst;
/* set Z if the destination is zero: */
cc = ((((tme_int32_t) dst) == 0) * TME_SPARC32_PSR_ICC_Z);
/* set N if the destination is negative: */
cc += ((((tme_int32_t) dst) < 0) * TME_SPARC32_PSR_ICC_N);
/* set the condition codes: */
ic->tme_sparc32_ireg_psr = (ic->tme_sparc32_ireg_psr & ~TME_SPARC32_PSR_ICC) | cc;
TME_SPARC_INSN_OK;
}
/* this does a sparc32 "addx SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc32_addx, tme_uint32_t)
{
tme_uint32_t src1;
tme_uint32_t src2;
tme_uint32_t dst;
/* get the operands: */
src1 = (tme_uint32_t) TME_SPARC_FORMAT3_RS1;
src2 = (tme_uint32_t) TME_SPARC_FORMAT3_RS2;
/* perform the operation: */
dst = src1 + src2;
dst += ((ic->tme_sparc32_ireg_psr & TME_SPARC32_PSR_ICC_C) != 0);
/* store the destination: */
TME_SPARC_FORMAT3_RD = (tme_uint32_t) dst;
TME_SPARC_INSN_OK;
}
/* this does a sparc32 "addxcc SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc32_addxcc, tme_uint32_t)
{
tme_uint32_t src1;
tme_uint32_t src2;
tme_uint32_t dst;
tme_uint32_t cc;
/* get the operands: */
src1 = (tme_uint32_t) TME_SPARC_FORMAT3_RS1;
src2 = (tme_uint32_t) TME_SPARC_FORMAT3_RS2;
/* perform the operation: */
dst = src1 + src2;
dst += ((ic->tme_sparc32_ireg_psr & TME_SPARC32_PSR_ICC_C) != 0);
/* store the destination: */
TME_SPARC_FORMAT3_RD = (tme_uint32_t) dst;
/* set Z if the destination is zero: */
cc = ((((tme_int32_t) dst) == 0) * TME_SPARC32_PSR_ICC_Z);
/* set N if the destination is negative: */
cc += ((((tme_int32_t) dst) < 0) * TME_SPARC32_PSR_ICC_N);
/* if the operands are the same sign, and the destination has
a different sign, set V: */
cc += ((((tme_int32_t) ((src2 ^ dst) & (src1 ^ (src2 ^ (((tme_uint32_t) 0) - 1))))) < 0) * TME_SPARC32_PSR_ICC_V);
/* if src1 and src2 both have the high bit set, or if dst does
not have the high bit set and either src1 or src2 does, set C: */
cc += (((tme_int32_t) (((tme_uint32_t) (src1 & src2)) | ((((tme_uint32_t) dst) ^ (((tme_uint32_t) 0) - 1)) & ((tme_uint32_t) (src1 | src2))))) < 0) * TME_SPARC32_PSR_ICC_C;
/* set the condition codes: */
ic->tme_sparc32_ireg_psr = (ic->tme_sparc32_ireg_psr & ~TME_SPARC32_PSR_ICC) | cc;
TME_SPARC_INSN_OK;
}
/* this does a sparc32 "subx SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc32_subx, tme_uint32_t)
{
tme_uint32_t src1;
tme_uint32_t src2;
tme_uint32_t dst;
/* get the operands: */
src1 = (tme_uint32_t) TME_SPARC_FORMAT3_RS1;
src2 = (tme_uint32_t) TME_SPARC_FORMAT3_RS2;
/* perform the operation: */
dst = src1 - src2;
dst -= ((ic->tme_sparc32_ireg_psr & TME_SPARC32_PSR_ICC_C) != 0);
/* store the destination: */
TME_SPARC_FORMAT3_RD = (tme_uint32_t) dst;
TME_SPARC_INSN_OK;
}
/* this does a sparc32 "subxcc SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc32_subxcc, tme_uint32_t)
{
tme_uint32_t src1;
tme_uint32_t src2;
tme_uint32_t dst;
tme_uint32_t cc;
/* get the operands: */
src1 = (tme_uint32_t) TME_SPARC_FORMAT3_RS1;
src2 = (tme_uint32_t) TME_SPARC_FORMAT3_RS2;
/* perform the operation: */
dst = src1 - src2;
dst -= ((ic->tme_sparc32_ireg_psr & TME_SPARC32_PSR_ICC_C) != 0);
/* store the destination: */
TME_SPARC_FORMAT3_RD = (tme_uint32_t) dst;
/* set Z if the destination is zero: */
cc = ((((tme_int32_t) dst) == 0) * TME_SPARC32_PSR_ICC_Z);
/* set N if the destination is negative: */
cc += ((((tme_int32_t) dst) < 0) * TME_SPARC32_PSR_ICC_N);
/* if the operands are different signs, and the destination has
a different sign from the first operand, set V: */
cc += ((((tme_int32_t) ((src1 ^ src2) & (src1 ^ dst))) < 0) * TME_SPARC32_PSR_ICC_V);
/* if src2 is greater than src1, set C: */
cc += ((((tme_uint32_t) src2) > ((tme_uint32_t) src1)) || (((tme_uint32_t) src2) == ((tme_uint32_t) src1) && (ic->tme_sparc32_ireg_psr & TME_SPARC32_PSR_ICC_C))) * TME_SPARC32_PSR_ICC_C;
/* set the condition codes: */
ic->tme_sparc32_ireg_psr = (ic->tme_sparc32_ireg_psr & ~TME_SPARC32_PSR_ICC) | cc;
TME_SPARC_INSN_OK;
}
/* this does a sparc32 "taddcc SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc32_taddcc, tme_uint32_t)
{
tme_uint32_t src1;
tme_uint32_t src2;
tme_uint32_t dst;
tme_uint32_t cc;
/* get the operands: */
src1 = (tme_uint32_t) TME_SPARC_FORMAT3_RS1;
src2 = (tme_uint32_t) TME_SPARC_FORMAT3_RS2;
/* perform the operation: */
dst = src1 + src2;
/* store the destination: */
TME_SPARC_FORMAT3_RD = (tme_uint32_t) dst;
/* set Z if the destination is zero: */
cc = ((((tme_int32_t) dst) == 0) * TME_SPARC32_PSR_ICC_Z);
/* set N if the destination is negative: */
cc += ((((tme_int32_t) dst) < 0) * TME_SPARC32_PSR_ICC_N);
/* if the operands are the same sign, and the destination has
a different sign, set V: */
cc += ((((tme_int32_t) ((src2 ^ dst) & (src1 ^ (src2 ^ (((tme_uint32_t) 0) - 1))))) < 0) * TME_SPARC32_PSR_ICC_V);
/* if src1 and src2 both have the high bit set, or if dst does
not have the high bit set and either src1 or src2 does, set C: */
cc += (((tme_int32_t) (((tme_uint32_t) (src1 & src2)) | ((((tme_uint32_t) dst) ^ (((tme_uint32_t) 0) - 1)) & ((tme_uint32_t) (src1 | src2))))) < 0) * TME_SPARC32_PSR_ICC_C;
/* set V if bits zero or one of src1 or src2 are set: */
cc |= ((((src1 | src2) & 3) != 0) * TME_SPARC32_PSR_ICC_V);
/* set the condition codes: */
ic->tme_sparc32_ireg_psr = (ic->tme_sparc32_ireg_psr & ~TME_SPARC32_PSR_ICC) | cc;
TME_SPARC_INSN_OK;
}
/* this does a sparc32 "taddcctv SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc32_taddcctv, tme_uint32_t)
{
tme_uint32_t src1;
tme_uint32_t src2;
tme_uint32_t dst;
tme_uint32_t cc;
/* get the operands: */
src1 = (tme_uint32_t) TME_SPARC_FORMAT3_RS1;
src2 = (tme_uint32_t) TME_SPARC_FORMAT3_RS2;
/* perform the operation: */
dst = src1 + src2;
/* set Z if the destination is zero: */
cc = ((((tme_int32_t) dst) == 0) * TME_SPARC32_PSR_ICC_Z);
/* set N if the destination is negative: */
cc += ((((tme_int32_t) dst) < 0) * TME_SPARC32_PSR_ICC_N);
/* if the operands are the same sign, and the destination has
a different sign, set V: */
cc += ((((tme_int32_t) ((src2 ^ dst) & (src1 ^ (src2 ^ (((tme_uint32_t) 0) - 1))))) < 0) * TME_SPARC32_PSR_ICC_V);
/* if src1 and src2 both have the high bit set, or if dst does
not have the high bit set and either src1 or src2 does, set C: */
cc += (((tme_int32_t) (((tme_uint32_t) (src1 & src2)) | ((((tme_uint32_t) dst) ^ (((tme_uint32_t) 0) - 1)) & ((tme_uint32_t) (src1 | src2))))) < 0) * TME_SPARC32_PSR_ICC_C;
/* set V if bits zero or one of src1 or src2 are set: */
cc |= ((((src1 | src2) & 3) != 0) * TME_SPARC32_PSR_ICC_V);
/* trap on a tagged overflow: */
if (cc & TME_SPARC32_PSR_ICC_V) {
tme_sparc32_trap(ic, TME_SPARC32_TRAP_tag_overflow);
}
/* store the destination: */
TME_SPARC_FORMAT3_RD = (tme_uint32_t) dst;
/* set the condition codes: */
ic->tme_sparc32_ireg_psr = (ic->tme_sparc32_ireg_psr & ~TME_SPARC32_PSR_ICC) | cc;
TME_SPARC_INSN_OK;
}
/* this does a sparc32 "tsubcc SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc32_tsubcc, tme_uint32_t)
{
tme_uint32_t src1;
tme_uint32_t src2;
tme_uint32_t dst;
tme_uint32_t cc;
/* get the operands: */
src1 = (tme_uint32_t) TME_SPARC_FORMAT3_RS1;
src2 = (tme_uint32_t) TME_SPARC_FORMAT3_RS2;
/* perform the operation: */
dst = src1 - src2;
/* store the destination: */
TME_SPARC_FORMAT3_RD = (tme_uint32_t) dst;
/* set Z if the destination is zero: */
cc = ((((tme_int32_t) dst) == 0) * TME_SPARC32_PSR_ICC_Z);
/* set N if the destination is negative: */
cc += ((((tme_int32_t) dst) < 0) * TME_SPARC32_PSR_ICC_N);
/* if the operands are different signs, and the destination has
a different sign from the first operand, set V: */
cc += ((((tme_int32_t) ((src1 ^ src2) & (src1 ^ dst))) < 0) * TME_SPARC32_PSR_ICC_V);
/* if src2 is greater than src1, set C: */
cc += ((((tme_uint32_t) src2) > ((tme_uint32_t) src1))) * TME_SPARC32_PSR_ICC_C;
/* set V if bits zero or one of src1 or src2 are set: */
cc |= ((((src1 | src2) & 3) != 0) * TME_SPARC32_PSR_ICC_V);
/* set the condition codes: */
ic->tme_sparc32_ireg_psr = (ic->tme_sparc32_ireg_psr & ~TME_SPARC32_PSR_ICC) | cc;
TME_SPARC_INSN_OK;
}
/* this does a sparc32 "tsubcctv SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc32_tsubcctv, tme_uint32_t)
{
tme_uint32_t src1;
tme_uint32_t src2;
tme_uint32_t dst;
tme_uint32_t cc;
/* get the operands: */
src1 = (tme_uint32_t) TME_SPARC_FORMAT3_RS1;
src2 = (tme_uint32_t) TME_SPARC_FORMAT3_RS2;
/* perform the operation: */
dst = src1 - src2;
/* set Z if the destination is zero: */
cc = ((((tme_int32_t) dst) == 0) * TME_SPARC32_PSR_ICC_Z);
/* set N if the destination is negative: */
cc += ((((tme_int32_t) dst) < 0) * TME_SPARC32_PSR_ICC_N);
/* if the operands are different signs, and the destination has
a different sign from the first operand, set V: */
cc += ((((tme_int32_t) ((src1 ^ src2) & (src1 ^ dst))) < 0) * TME_SPARC32_PSR_ICC_V);
/* if src2 is greater than src1, set C: */
cc += ((((tme_uint32_t) src2) > ((tme_uint32_t) src1))) * TME_SPARC32_PSR_ICC_C;
/* set V if bits zero or one of src1 or src2 are set: */
cc |= ((((src1 | src2) & 3) != 0) * TME_SPARC32_PSR_ICC_V);
/* trap on a tagged overflow: */
if (cc & TME_SPARC32_PSR_ICC_V) {
tme_sparc32_trap(ic, TME_SPARC32_TRAP_tag_overflow);
}
/* store the destination: */
TME_SPARC_FORMAT3_RD = (tme_uint32_t) dst;
/* set the condition codes: */
ic->tme_sparc32_ireg_psr = (ic->tme_sparc32_ireg_psr & ~TME_SPARC32_PSR_ICC) | cc;
TME_SPARC_INSN_OK;
}
/* this does a sparc32 "umul SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc32_umul, tme_uint32_t)
{
tme_uint32_t src1;
tme_uint32_t src2;
tme_uint32_t dst;
tme_uint64_t val64;
/* get the operands: */
src1 = (tme_uint32_t) TME_SPARC_FORMAT3_RS1;
src2 = (tme_uint32_t) TME_SPARC_FORMAT3_RS2;
/* perform the operation: */
val64 = (((tme_uint64_t) src1) * src2);
ic->tme_sparc_ireg_uint32(TME_SPARC_IREG_Y) = (((tme_uint64_t) val64) >> 32);
dst = ((tme_uint64_t) val64);
/* store the destination: */
TME_SPARC_FORMAT3_RD = (tme_uint32_t) dst;
TME_SPARC_INSN_OK;
}
/* this does a sparc32 "umulcc SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc32_umulcc, tme_uint32_t)
{
tme_uint32_t src1;
tme_uint32_t src2;
tme_uint32_t dst;
tme_uint64_t val64;
tme_uint32_t cc;
/* get the operands: */
src1 = (tme_uint32_t) TME_SPARC_FORMAT3_RS1;
src2 = (tme_uint32_t) TME_SPARC_FORMAT3_RS2;
/* perform the operation: */
val64 = (((tme_uint64_t) src1) * src2);
ic->tme_sparc_ireg_uint32(TME_SPARC_IREG_Y) = (((tme_uint64_t) val64) >> 32);
dst = ((tme_uint64_t) val64);
/* store the destination: */
TME_SPARC_FORMAT3_RD = (tme_uint32_t) dst;
/* set Z if the destination is zero: */
cc = ((((tme_int32_t) dst) == 0) * TME_SPARC32_PSR_ICC_Z);
/* set N if the destination is negative: */
cc += ((((tme_int32_t) dst) < 0) * TME_SPARC32_PSR_ICC_N);
/* set the condition codes: */
ic->tme_sparc32_ireg_psr = (ic->tme_sparc32_ireg_psr & ~TME_SPARC32_PSR_ICC) | cc;
TME_SPARC_INSN_OK;
}
/* this does a sparc32 "smul SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc32_smul, tme_uint32_t)
{
tme_int32_t src1;
tme_int32_t src2;
tme_int32_t dst;
tme_int64_t val64;
/* get the operands: */
src1 = (tme_int32_t) TME_SPARC_FORMAT3_RS1;
src2 = (tme_int32_t) TME_SPARC_FORMAT3_RS2;
/* perform the operation: */
val64 = (((tme_int64_t) src1) * src2);
ic->tme_sparc_ireg_uint32(TME_SPARC_IREG_Y) = (((tme_uint64_t) val64) >> 32);
dst = ((tme_int64_t) val64);
/* store the destination: */
TME_SPARC_FORMAT3_RD = (tme_int32_t) dst;
TME_SPARC_INSN_OK;
}
/* this does a sparc32 "smulcc SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc32_smulcc, tme_uint32_t)
{
tme_int32_t src1;
tme_int32_t src2;
tme_int32_t dst;
tme_int64_t val64;
tme_uint32_t cc;
/* get the operands: */
src1 = (tme_int32_t) TME_SPARC_FORMAT3_RS1;
src2 = (tme_int32_t) TME_SPARC_FORMAT3_RS2;
/* perform the operation: */
val64 = (((tme_int64_t) src1) * src2);
ic->tme_sparc_ireg_uint32(TME_SPARC_IREG_Y) = (((tme_uint64_t) val64) >> 32);
dst = ((tme_int64_t) val64);
/* store the destination: */
TME_SPARC_FORMAT3_RD = (tme_int32_t) dst;
/* set Z if the destination is zero: */
cc = ((((tme_int32_t) dst) == 0) * TME_SPARC32_PSR_ICC_Z);
/* set N if the destination is negative: */
cc += ((((tme_int32_t) dst) < 0) * TME_SPARC32_PSR_ICC_N);
/* set the condition codes: */
ic->tme_sparc32_ireg_psr = (ic->tme_sparc32_ireg_psr & ~TME_SPARC32_PSR_ICC) | cc;
TME_SPARC_INSN_OK;
}
/* this does a sparc32 "udiv SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc32_udiv, tme_uint32_t)
{
tme_uint32_t src1;
tme_uint32_t src2;
tme_uint32_t dst;
tme_uint64_t val64;
/* get the operands: */
src1 = (tme_uint32_t) TME_SPARC_FORMAT3_RS1;
src2 = (tme_uint32_t) TME_SPARC_FORMAT3_RS2;
/* perform the operation: */
val64 = ic->tme_sparc_ireg_uint32(TME_SPARC_IREG_Y);
val64 = (val64 << 32) + (tme_uint32_t) src1;
if (__tme_predict_false(src2 == 0)) {
tme_sparc32_trap(ic, TME_SPARC32_TRAP_division_by_zero);
}
val64 /= src2;
dst = (tme_uint32_t) val64;
/* if the division overflowed: */
if (dst != val64) {
/* return the largest appropriate value: */
dst = 0xffffffff;
}
/* store the destination: */
TME_SPARC_FORMAT3_RD = (tme_uint32_t) dst;
TME_SPARC_INSN_OK;
}
/* this does a sparc32 "udivcc SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc32_udivcc, tme_uint32_t)
{
tme_uint32_t src1;
tme_uint32_t src2;
tme_uint32_t dst;
tme_uint64_t val64;
tme_uint32_t cc;
/* get the operands: */
src1 = (tme_uint32_t) TME_SPARC_FORMAT3_RS1;
src2 = (tme_uint32_t) TME_SPARC_FORMAT3_RS2;
/* perform the operation: */
val64 = ic->tme_sparc_ireg_uint32(TME_SPARC_IREG_Y);
val64 = (val64 << 32) + (tme_uint32_t) src1;
if (__tme_predict_false(src2 == 0)) {
tme_sparc32_trap(ic, TME_SPARC32_TRAP_division_by_zero);
}
val64 /= src2;
dst = (tme_uint32_t) val64;
/* if the division overflowed: */
if (dst != val64) {
/* return the largest appropriate value: */
dst = 0xffffffff;
/* set V: */
cc = TME_SPARC32_PSR_ICC_V;
}
/* otherwise, the division didn't overflow: */
else {
/* clear V: */
cc = !TME_SPARC32_PSR_ICC_V;
}
/* store the destination: */
TME_SPARC_FORMAT3_RD = (tme_uint32_t) dst;
/* set Z if the destination is zero: */
cc += ((((tme_int32_t) dst) == 0) * TME_SPARC32_PSR_ICC_Z);
/* set N if the destination is negative: */
cc += ((((tme_int32_t) dst) < 0) * TME_SPARC32_PSR_ICC_N);
/* set the condition codes: */
ic->tme_sparc32_ireg_psr = (ic->tme_sparc32_ireg_psr & ~TME_SPARC32_PSR_ICC) | cc;
TME_SPARC_INSN_OK;
}
/* this does a sparc32 "sdiv SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc32_sdiv, tme_uint32_t)
{
tme_int32_t src1;
tme_int32_t src2;
tme_int32_t dst;
tme_int64_t val64;
/* get the operands: */
src1 = (tme_int32_t) TME_SPARC_FORMAT3_RS1;
src2 = (tme_int32_t) TME_SPARC_FORMAT3_RS2;
/* perform the operation: */
val64 = ic->tme_sparc_ireg_uint32(TME_SPARC_IREG_Y);
val64 = (val64 << 32) + (tme_uint32_t) src1;
if (__tme_predict_false(src2 == 0)) {
tme_sparc32_trap(ic, TME_SPARC32_TRAP_division_by_zero);
}
val64 /= src2;
dst = (tme_int32_t) val64;
/* if the division overflowed: */
if (dst != val64) {
/* return the largest appropriate value: */
dst = (tme_int32_t) ((val64 < 0) + (tme_uint32_t) 0x7fffffff);
}
/* store the destination: */
TME_SPARC_FORMAT3_RD = (tme_int32_t) dst;
TME_SPARC_INSN_OK;
}
/* this does a sparc32 "sdivcc SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc32_sdivcc, tme_uint32_t)
{
tme_int32_t src1;
tme_int32_t src2;
tme_int32_t dst;
tme_int64_t val64;
tme_uint32_t cc;
/* get the operands: */
src1 = (tme_int32_t) TME_SPARC_FORMAT3_RS1;
src2 = (tme_int32_t) TME_SPARC_FORMAT3_RS2;
/* perform the operation: */
val64 = ic->tme_sparc_ireg_uint32(TME_SPARC_IREG_Y);
val64 = (val64 << 32) + (tme_uint32_t) src1;
if (__tme_predict_false(src2 == 0)) {
tme_sparc32_trap(ic, TME_SPARC32_TRAP_division_by_zero);
}
val64 /= src2;
dst = (tme_int32_t) val64;
/* if the division overflowed: */
if (dst != val64) {
/* return the largest appropriate value: */
dst = (tme_int32_t) ((val64 < 0) + (tme_uint32_t) 0x7fffffff);
/* set V: */
cc = TME_SPARC32_PSR_ICC_V;
}
/* otherwise, the division didn't overflow: */
else {
/* clear V: */
cc = !TME_SPARC32_PSR_ICC_V;
}
/* store the destination: */
TME_SPARC_FORMAT3_RD = (tme_int32_t) dst;
/* set Z if the destination is zero: */
cc += ((((tme_int32_t) dst) == 0) * TME_SPARC32_PSR_ICC_Z);
/* set N if the destination is negative: */
cc += ((((tme_int32_t) dst) < 0) * TME_SPARC32_PSR_ICC_N);
/* set the condition codes: */
ic->tme_sparc32_ireg_psr = (ic->tme_sparc32_ireg_psr & ~TME_SPARC32_PSR_ICC) | cc;
TME_SPARC_INSN_OK;
}
/* the sparc32 sll function: */
TME_SPARC_FORMAT3(tme_sparc32_sll, tme_uint32_t)
{
tme_uint32_t dst;
unsigned int count;
/* get the value and the shift count: */
dst = TME_SPARC_FORMAT3_RS1;
count = TME_SPARC_FORMAT3_RS2;
/* limit the count: */
count %= 32;
/* do the shift: */
#if defined(SHIFTMAX_INT32_T) && (SHIFTMAX_INT32_T < (32 - 1))
#error "cannot do full shifts of a tme_int32_t"
#endif /* (SHIFTMAX_INT32_T < (32 - 1)) */
dst <<= count;
/* store the destination: */
TME_SPARC_FORMAT3_RD = dst;
TME_SPARC_INSN_OK;
}
/* the sparc32 srl function: */
TME_SPARC_FORMAT3(tme_sparc32_srl, tme_uint32_t)
{
tme_uint32_t dst;
unsigned int count;
/* get the value and the shift count: */
dst = TME_SPARC_FORMAT3_RS1;
count = TME_SPARC_FORMAT3_RS2;
/* limit the count: */
count %= 32;
/* do the shift: */
#if defined(SHIFTMAX_INT32_T) && (SHIFTMAX_INT32_T < (32 - 1))
#error "cannot do full shifts of a tme_int32_t"
#endif /* (SHIFTMAX_INT32_T < (32 - 1)) */
dst >>= count;
/* store the destination: */
TME_SPARC_FORMAT3_RD = dst;
TME_SPARC_INSN_OK;
}
/* the sparc32 sra function: */
TME_SPARC_FORMAT3(tme_sparc32_sra, tme_uint32_t)
{
tme_int32_t dst;
unsigned int count;
/* get the value and the shift count: */
dst = TME_SPARC_FORMAT3_RS1;
count = TME_SPARC_FORMAT3_RS2;
/* limit the count: */
count %= 32;
/* do the shift: */
#ifdef SHIFTSIGNED_INT32_T
#if defined(SHIFTMAX_INT32_T) && (SHIFTMAX_INT32_T < (32 - 1))
#error "cannot do full shifts of a tme_int32_t"
#endif /* (SHIFTMAX_INT32_T < (32 - 1)) */
dst >>= count;
#else /* !SHIFTSIGNED_INT32_T */
for (; count-- > 0; ) {
dst = (dst & ~((tme_int32_t) 1)) / 2;
}
#endif /* !SHIFTSIGNED_INT32_T */
/* store the destination: */
TME_SPARC_FORMAT3_RD = dst;
TME_SPARC_INSN_OK;
}
/* this does a sparc32 ldb: */
TME_SPARC_FORMAT3(tme_sparc32_ldb, tme_uint32_t)
{
tme_uint32_t address;
tme_uint32_t asi_mask_flags_slow;
struct tme_sparc_tlb *dtlb;
const tme_shared tme_uint8_t *memory;
tme_bus_context_t dtlb_context;
tme_uint32_t endian_little;
tme_uint8_t value8;
tme_uint32_t value32;
/* get the address: */
address = TME_SPARC_FORMAT3_RS1 + TME_SPARC_FORMAT3_RS2;
#ifdef _TME_SPARC_STATS
/* track statistics: */
ic->tme_sparc_stats.tme_sparc_stats_memory_total++;
#endif /* _TME_SPARC_STATS */
/* verify and maybe replay this transfer: */
tme_sparc_recode_verify_mem(ic, &TME_SPARC_FORMAT3_RD,
ic->tme_sparc_asi_mask_data, address,
(TME_RECODE_SIZE_8
| TME_SPARC_RECODE_VERIFY_MEM_LOAD));
if (tme_sparc_recode_verify_replay_last_pc(ic) != 0) {
TME_SPARC_INSN_OK;
}
/* assume that no DTLB ASI mask flags will require a slow load: */
asi_mask_flags_slow = 0;
/* get and busy the DTLB entry: */
dtlb = &ic->tme_sparc_tlbs[TME_SPARC_DTLB_ENTRY(ic, TME_SPARC_TLB_HASH(ic, ic->tme_sparc_memory_context_default, address))];
tme_sparc_tlb_busy(dtlb);
/* assume that this DTLB applies and allows fast transfers: */
memory = dtlb->tme_sparc_tlb_emulator_off_read;
/* if this DTLB matches any context, it matches this context: */
dtlb_context = dtlb->tme_sparc_tlb_context;
if (dtlb_context > ic->tme_sparc_memory_context_max) {
dtlb_context = ic->tme_sparc_memory_context_default;
}
/* we must call the slow load function if: */
if (__tme_predict_false(
/* the DTLB entry is invalid: */
tme_bus_tlb_is_invalid(&dtlb->tme_sparc_tlb_bus_tlb)
/* the DTLB entry does not match the context: */
|| dtlb_context != ic->tme_sparc_memory_context_default
/* the DTLB entry does not cover the needed addresses: */
|| (address < (tme_bus_addr32_t) dtlb->tme_sparc_tlb_addr_first)
|| ((address + ((8 / 8) - 1)) > (tme_bus_addr32_t) dtlb->tme_sparc_tlb_addr_last)
/* the DTLB entry does not cover the needed address space: */
|| (!TME_SPARC_TLB_ASI_MASK_OK(dtlb, ic->tme_sparc_asi_mask_data))
/* the DTLB entry can't be used for a fast ldb: */
|| (dtlb->tme_sparc_tlb_asi_mask & asi_mask_flags_slow) != 0
/* the DTLB entry does not allow fast transfers: */
|| (memory == TME_EMULATOR_OFF_UNDEF)
)) {
/* call the slow load function: */
memory = tme_sparc32_ls(ic,
address,
&TME_SPARC_FORMAT3_RD,
(TME_SPARC_LSINFO_OP_LD
| (8 / 8)));
}
/* get the byte order of this transfer: */
endian_little = FALSE;
/* do the fast transfer: */
memory += address;
value8 = tme_memory_bus_read8((const tme_shared tme_uint8_t *) memory, dtlb->tme_sparc_tlb_bus_rwlock, sizeof(tme_uint8_t), sizeof(tme_uint32_t));
/* possibly sign-extend the loaded value: */
value32 = value8;
if (TME_SPARC_INSN & TME_BIT(22)) {
value32 = (tme_uint32_t) (tme_int32_t) (tme_int8_t) value32;
}
TME_SPARC_FORMAT3_RD = (tme_uint32_t) (tme_int32_t) (tme_int32_t) value32;
/* unbusy the DTLB entry: */
tme_sparc_tlb_unbusy(dtlb);
/* log the value loaded: */
tme_sparc_recode_verify_mem_load(ic, &TME_SPARC_FORMAT3_RD);
tme_sparc_log(ic, 1000, TME_OK,
(TME_SPARC_LOG_HANDLE(ic),
_("ldb 0x%02x:0x%08" TME_PRIx32 ": 0x%02" TME_PRIx32),
TME_SPARC_ASI_MASK_WHICH(ic->tme_sparc_asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
address,
TME_SPARC_FORMAT3_RD));
TME_SPARC_INSN_OK;
}
/* this does a sparc32 stb: */
TME_SPARC_FORMAT3(tme_sparc32_stb, tme_uint32_t)
{
tme_uint32_t address;
tme_uint32_t asi_mask_flags_slow;
struct tme_sparc_tlb *dtlb;
tme_shared tme_uint8_t *memory;
tme_bus_context_t dtlb_context;
tme_uint32_t endian_little;
tme_uint8_t value8;
/* get the address: */
address = TME_SPARC_FORMAT3_RS1 + TME_SPARC_FORMAT3_RS2;
#ifdef _TME_SPARC_STATS
/* track statistics: */
ic->tme_sparc_stats.tme_sparc_stats_memory_total++;
#endif /* _TME_SPARC_STATS */
/* verify and maybe replay this transfer: */
tme_sparc_recode_verify_mem(ic, &TME_SPARC_FORMAT3_RD,
ic->tme_sparc_asi_mask_data, address,
(TME_RECODE_SIZE_8
| TME_SPARC_RECODE_VERIFY_MEM_STORE));
if (tme_sparc_recode_verify_replay_last_pc(ic) != 0) {
TME_SPARC_INSN_OK;
}
/* log the value stored: */
tme_sparc_log(ic, 1000, TME_OK,
(TME_SPARC_LOG_HANDLE(ic),
_("stb 0x%02x:0x%08" TME_PRIx32 ": 0x%02" TME_PRIx8),
TME_SPARC_ASI_MASK_WHICH(ic->tme_sparc_asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
address,
(tme_uint8_t) TME_SPARC_FORMAT3_RD));
/* assume that no DTLB ASI mask flags will require a slow store: */
asi_mask_flags_slow = 0;
/* get and busy the DTLB entry: */
dtlb = &ic->tme_sparc_tlbs[TME_SPARC_DTLB_ENTRY(ic, TME_SPARC_TLB_HASH(ic, ic->tme_sparc_memory_context_default, address))];
tme_sparc_tlb_busy(dtlb);
/* assume that this DTLB applies and allows fast transfers: */
memory = dtlb->tme_sparc_tlb_emulator_off_write;
/* if this DTLB matches any context, it matches this context: */
dtlb_context = dtlb->tme_sparc_tlb_context;
if (dtlb_context > ic->tme_sparc_memory_context_max) {
dtlb_context = ic->tme_sparc_memory_context_default;
}
/* we must call the slow store function if: */
if (__tme_predict_false(
/* the DTLB entry is invalid: */
tme_bus_tlb_is_invalid(&dtlb->tme_sparc_tlb_bus_tlb)
/* the DTLB entry does not match the context: */
|| dtlb_context != ic->tme_sparc_memory_context_default
/* the DTLB entry does not cover the needed addresses: */
|| (address < (tme_bus_addr32_t) dtlb->tme_sparc_tlb_addr_first)
|| ((address + ((8 / 8) - 1)) > (tme_bus_addr32_t) dtlb->tme_sparc_tlb_addr_last)
/* the DTLB entry does not cover the needed address space: */
|| (!TME_SPARC_TLB_ASI_MASK_OK(dtlb, ic->tme_sparc_asi_mask_data))
/* the DTLB entry can't be used for a fast stb: */
|| (dtlb->tme_sparc_tlb_asi_mask & asi_mask_flags_slow) != 0
/* the DTLB entry does not allow fast transfers: */
|| (memory == TME_EMULATOR_OFF_UNDEF)
)) {
/* call the slow store function: */
memory = tme_sparc32_ls(ic,
address,
&TME_SPARC_FORMAT3_RD,
(TME_SPARC_LSINFO_OP_ST
| (8 / 8)));
/* if the slow store function did the transfer: */
if (__tme_predict_false(memory == TME_EMULATOR_OFF_UNDEF)) {
/* unbusy the TLB entry; */
tme_sparc_tlb_unbusy(dtlb);
TME_SPARC_INSN_OK;
}
}
/* get the byte order of this transfer: */
endian_little = FALSE;
/* do the fast transfer: */
memory += address;
value8 = TME_SPARC_FORMAT3_RD;
tme_memory_bus_write8((tme_shared tme_uint8_t *) memory, value8, dtlb->tme_sparc_tlb_bus_rwlock, sizeof(tme_uint8_t), sizeof(tme_uint32_t));
/* unbusy the DTLB entry: */
tme_sparc_tlb_unbusy(dtlb);
TME_SPARC_INSN_OK;
}
/* this does a sparc32 ldh: */
TME_SPARC_FORMAT3(tme_sparc32_ldh, tme_uint32_t)
{
tme_uint32_t address;
tme_uint32_t asi_mask_flags_slow;
struct tme_sparc_tlb *dtlb;
const tme_shared tme_uint8_t *memory;
tme_bus_context_t dtlb_context;
tme_uint32_t endian_little;
tme_uint16_t value16;
tme_uint32_t value32;
/* get the address: */
address = TME_SPARC_FORMAT3_RS1 + TME_SPARC_FORMAT3_RS2;
#ifdef _TME_SPARC_STATS
/* track statistics: */
ic->tme_sparc_stats.tme_sparc_stats_memory_total++;
#endif /* _TME_SPARC_STATS */
/* verify and maybe replay this transfer: */
tme_sparc_recode_verify_mem(ic, &TME_SPARC_FORMAT3_RD,
ic->tme_sparc_asi_mask_data, address,
(TME_RECODE_SIZE_16
| TME_SPARC_RECODE_VERIFY_MEM_LOAD));
if (tme_sparc_recode_verify_replay_last_pc(ic) != 0) {
TME_SPARC_INSN_OK;
}
/* assume that no DTLB ASI mask flags will require a slow load: */
asi_mask_flags_slow = 0;
/* get and busy the DTLB entry: */
dtlb = &ic->tme_sparc_tlbs[TME_SPARC_DTLB_ENTRY(ic, TME_SPARC_TLB_HASH(ic, ic->tme_sparc_memory_context_default, address))];
tme_sparc_tlb_busy(dtlb);
/* assume that this DTLB applies and allows fast transfers: */
memory = dtlb->tme_sparc_tlb_emulator_off_read;
/* if this DTLB matches any context, it matches this context: */
dtlb_context = dtlb->tme_sparc_tlb_context;
if (dtlb_context > ic->tme_sparc_memory_context_max) {
dtlb_context = ic->tme_sparc_memory_context_default;
}
/* we must call the slow load function if: */
if (__tme_predict_false(
/* the DTLB entry is invalid: */
tme_bus_tlb_is_invalid(&dtlb->tme_sparc_tlb_bus_tlb)
/* the DTLB entry does not match the context: */
|| dtlb_context != ic->tme_sparc_memory_context_default
/* the DTLB entry does not cover the needed addresses: */
|| (address < (tme_bus_addr32_t) dtlb->tme_sparc_tlb_addr_first)
|| ((address + ((16 / 8) - 1)) > (tme_bus_addr32_t) dtlb->tme_sparc_tlb_addr_last)
/* the DTLB entry does not cover the needed address space: */
|| (!TME_SPARC_TLB_ASI_MASK_OK(dtlb, ic->tme_sparc_asi_mask_data))
/* the DTLB entry can't be used for a fast ldh: */
|| (dtlb->tme_sparc_tlb_asi_mask & asi_mask_flags_slow) != 0
/* the DTLB entry does not allow fast transfers: */
|| (memory == TME_EMULATOR_OFF_UNDEF)
/* the address is misaligned: */
|| ((address % (16 / 8)) != 0)
)) {
/* call the slow load function: */
memory = tme_sparc32_ls(ic,
address,
&TME_SPARC_FORMAT3_RD,
(TME_SPARC_LSINFO_OP_LD
| (16 / 8)));
}
/* get the byte order of this transfer: */
endian_little = FALSE;
/* do the fast transfer: */
memory += address;
value16 = tme_memory_bus_read16((const tme_shared tme_uint16_t *) memory, dtlb->tme_sparc_tlb_bus_rwlock, sizeof(tme_uint16_t), sizeof(tme_uint32_t));
value16 = (endian_little ? tme_letoh_u16(value16) : tme_betoh_u16(value16));
/* possibly sign-extend the loaded value: */
value32 = value16;
if (TME_SPARC_INSN & TME_BIT(22)) {
value32 = (tme_uint32_t) (tme_int32_t) (tme_int16_t) value32;
}
TME_SPARC_FORMAT3_RD = (tme_uint32_t) (tme_int32_t) (tme_int32_t) value32;
/* unbusy the DTLB entry: */
tme_sparc_tlb_unbusy(dtlb);
/* log the value loaded: */
tme_sparc_recode_verify_mem_load(ic, &TME_SPARC_FORMAT3_RD);
tme_sparc_log(ic, 1000, TME_OK,
(TME_SPARC_LOG_HANDLE(ic),
_("ldh 0x%02x:0x%08" TME_PRIx32 ": 0x%04" TME_PRIx32),
TME_SPARC_ASI_MASK_WHICH(ic->tme_sparc_asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
address,
TME_SPARC_FORMAT3_RD));
TME_SPARC_INSN_OK;
}
/* this does a sparc32 sth: */
TME_SPARC_FORMAT3(tme_sparc32_sth, tme_uint32_t)
{
tme_uint32_t address;
tme_uint32_t asi_mask_flags_slow;
struct tme_sparc_tlb *dtlb;
tme_shared tme_uint8_t *memory;
tme_bus_context_t dtlb_context;
tme_uint32_t endian_little;
tme_uint16_t value16;
/* get the address: */
address = TME_SPARC_FORMAT3_RS1 + TME_SPARC_FORMAT3_RS2;
#ifdef _TME_SPARC_STATS
/* track statistics: */
ic->tme_sparc_stats.tme_sparc_stats_memory_total++;
#endif /* _TME_SPARC_STATS */
/* verify and maybe replay this transfer: */
tme_sparc_recode_verify_mem(ic, &TME_SPARC_FORMAT3_RD,
ic->tme_sparc_asi_mask_data, address,
(TME_RECODE_SIZE_16
| TME_SPARC_RECODE_VERIFY_MEM_STORE));
if (tme_sparc_recode_verify_replay_last_pc(ic) != 0) {
TME_SPARC_INSN_OK;
}
/* log the value stored: */
tme_sparc_log(ic, 1000, TME_OK,
(TME_SPARC_LOG_HANDLE(ic),
_("sth 0x%02x:0x%08" TME_PRIx32 ": 0x%04" TME_PRIx16),
TME_SPARC_ASI_MASK_WHICH(ic->tme_sparc_asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
address,
(tme_uint16_t) TME_SPARC_FORMAT3_RD));
/* assume that no DTLB ASI mask flags will require a slow store: */
asi_mask_flags_slow = 0;
/* get and busy the DTLB entry: */
dtlb = &ic->tme_sparc_tlbs[TME_SPARC_DTLB_ENTRY(ic, TME_SPARC_TLB_HASH(ic, ic->tme_sparc_memory_context_default, address))];
tme_sparc_tlb_busy(dtlb);
/* assume that this DTLB applies and allows fast transfers: */
memory = dtlb->tme_sparc_tlb_emulator_off_write;
/* if this DTLB matches any context, it matches this context: */
dtlb_context = dtlb->tme_sparc_tlb_context;
if (dtlb_context > ic->tme_sparc_memory_context_max) {
dtlb_context = ic->tme_sparc_memory_context_default;
}
/* we must call the slow store function if: */
if (__tme_predict_false(
/* the DTLB entry is invalid: */
tme_bus_tlb_is_invalid(&dtlb->tme_sparc_tlb_bus_tlb)
/* the DTLB entry does not match the context: */
|| dtlb_context != ic->tme_sparc_memory_context_default
/* the DTLB entry does not cover the needed addresses: */
|| (address < (tme_bus_addr32_t) dtlb->tme_sparc_tlb_addr_first)
|| ((address + ((16 / 8) - 1)) > (tme_bus_addr32_t) dtlb->tme_sparc_tlb_addr_last)
/* the DTLB entry does not cover the needed address space: */
|| (!TME_SPARC_TLB_ASI_MASK_OK(dtlb, ic->tme_sparc_asi_mask_data))
/* the DTLB entry can't be used for a fast sth: */
|| (dtlb->tme_sparc_tlb_asi_mask & asi_mask_flags_slow) != 0
/* the DTLB entry does not allow fast transfers: */
|| (memory == TME_EMULATOR_OFF_UNDEF)
/* the address is misaligned: */
|| ((address % (16 / 8)) != 0)
)) {
/* call the slow store function: */
memory = tme_sparc32_ls(ic,
address,
&TME_SPARC_FORMAT3_RD,
(TME_SPARC_LSINFO_OP_ST
| (16 / 8)));
/* if the slow store function did the transfer: */
if (__tme_predict_false(memory == TME_EMULATOR_OFF_UNDEF)) {
/* unbusy the TLB entry; */
tme_sparc_tlb_unbusy(dtlb);
TME_SPARC_INSN_OK;
}
}
/* get the byte order of this transfer: */
endian_little = FALSE;
/* do the fast transfer: */
memory += address;
value16 = TME_SPARC_FORMAT3_RD;
value16 = (endian_little ? tme_htole_u16(value16) : tme_htobe_u16(value16));
tme_memory_bus_write16((tme_shared tme_uint16_t *) memory, value16, dtlb->tme_sparc_tlb_bus_rwlock, sizeof(tme_uint16_t), sizeof(tme_uint32_t));
/* unbusy the DTLB entry: */
tme_sparc_tlb_unbusy(dtlb);
TME_SPARC_INSN_OK;
}
/* this does a sparc32 ld: */
TME_SPARC_FORMAT3(tme_sparc32_ld, tme_uint32_t)
{
tme_uint32_t address;
tme_uint32_t asi_mask_flags_slow;
struct tme_sparc_tlb *dtlb;
const tme_shared tme_uint8_t *memory;
tme_bus_context_t dtlb_context;
tme_uint32_t endian_little;
tme_uint32_t value32;
/* get the address: */
address = TME_SPARC_FORMAT3_RS1 + TME_SPARC_FORMAT3_RS2;
#ifdef _TME_SPARC_STATS
/* track statistics: */
ic->tme_sparc_stats.tme_sparc_stats_memory_total++;
#endif /* _TME_SPARC_STATS */
/* verify and maybe replay this transfer: */
tme_sparc_recode_verify_mem(ic, &TME_SPARC_FORMAT3_RD,
ic->tme_sparc_asi_mask_data, address,
(TME_RECODE_SIZE_32
| TME_SPARC_RECODE_VERIFY_MEM_LOAD));
if (tme_sparc_recode_verify_replay_last_pc(ic) != 0) {
TME_SPARC_INSN_OK;
}
/* assume that no DTLB ASI mask flags will require a slow load: */
asi_mask_flags_slow = 0;
/* get and busy the DTLB entry: */
dtlb = &ic->tme_sparc_tlbs[TME_SPARC_DTLB_ENTRY(ic, TME_SPARC_TLB_HASH(ic, ic->tme_sparc_memory_context_default, address))];
tme_sparc_tlb_busy(dtlb);
/* assume that this DTLB applies and allows fast transfers: */
memory = dtlb->tme_sparc_tlb_emulator_off_read;
/* if this DTLB matches any context, it matches this context: */
dtlb_context = dtlb->tme_sparc_tlb_context;
if (dtlb_context > ic->tme_sparc_memory_context_max) {
dtlb_context = ic->tme_sparc_memory_context_default;
}
/* we must call the slow load function if: */
if (__tme_predict_false(
/* the DTLB entry is invalid: */
tme_bus_tlb_is_invalid(&dtlb->tme_sparc_tlb_bus_tlb)
/* the DTLB entry does not match the context: */
|| dtlb_context != ic->tme_sparc_memory_context_default
/* the DTLB entry does not cover the needed addresses: */
|| (address < (tme_bus_addr32_t) dtlb->tme_sparc_tlb_addr_first)
|| ((address + ((32 / 8) - 1)) > (tme_bus_addr32_t) dtlb->tme_sparc_tlb_addr_last)
/* the DTLB entry does not cover the needed address space: */
|| (!TME_SPARC_TLB_ASI_MASK_OK(dtlb, ic->tme_sparc_asi_mask_data))
/* the DTLB entry can't be used for a fast ld: */
|| (dtlb->tme_sparc_tlb_asi_mask & asi_mask_flags_slow) != 0
/* the DTLB entry does not allow fast transfers: */
|| (memory == TME_EMULATOR_OFF_UNDEF)
/* the address is misaligned: */
|| ((address % (32 / 8)) != 0)
)) {
/* call the slow load function: */
memory = tme_sparc32_ls(ic,
address,
&TME_SPARC_FORMAT3_RD,
(TME_SPARC_LSINFO_OP_LD
| (32 / 8)));
}
/* get the byte order of this transfer: */
endian_little = FALSE;
/* do the fast transfer: */
memory += address;
value32 = tme_memory_bus_read32((const tme_shared tme_uint32_t *) memory, dtlb->tme_sparc_tlb_bus_rwlock, sizeof(tme_uint32_t), sizeof(tme_uint32_t));
value32 = (endian_little ? tme_letoh_u32(value32) : tme_betoh_u32(value32));
TME_SPARC_FORMAT3_RD = (tme_uint32_t) (tme_int32_t) (tme_int32_t) value32;
/* unbusy the DTLB entry: */
tme_sparc_tlb_unbusy(dtlb);
/* log the value loaded: */
tme_sparc_recode_verify_mem_load(ic, &TME_SPARC_FORMAT3_RD);
tme_sparc_log(ic, 1000, TME_OK,
(TME_SPARC_LOG_HANDLE(ic),
_("ld 0x%02x:0x%08" TME_PRIx32 ": 0x%08" TME_PRIx32),
TME_SPARC_ASI_MASK_WHICH(ic->tme_sparc_asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
address,
TME_SPARC_FORMAT3_RD));
TME_SPARC_INSN_OK;
}
/* this does a sparc32 st: */
TME_SPARC_FORMAT3(tme_sparc32_st, tme_uint32_t)
{
tme_uint32_t address;
tme_uint32_t asi_mask_flags_slow;
struct tme_sparc_tlb *dtlb;
tme_shared tme_uint8_t *memory;
tme_bus_context_t dtlb_context;
tme_uint32_t endian_little;
tme_uint32_t value32;
/* get the address: */
address = TME_SPARC_FORMAT3_RS1 + TME_SPARC_FORMAT3_RS2;
#ifdef _TME_SPARC_STATS
/* track statistics: */
ic->tme_sparc_stats.tme_sparc_stats_memory_total++;
#endif /* _TME_SPARC_STATS */
/* verify and maybe replay this transfer: */
tme_sparc_recode_verify_mem(ic, &TME_SPARC_FORMAT3_RD,
ic->tme_sparc_asi_mask_data, address,
(TME_RECODE_SIZE_32
| TME_SPARC_RECODE_VERIFY_MEM_STORE));
if (tme_sparc_recode_verify_replay_last_pc(ic) != 0) {
TME_SPARC_INSN_OK;
}
/* log the value stored: */
tme_sparc_log(ic, 1000, TME_OK,
(TME_SPARC_LOG_HANDLE(ic),
_("st 0x%02x:0x%08" TME_PRIx32 ": 0x%08" TME_PRIx32),
TME_SPARC_ASI_MASK_WHICH(ic->tme_sparc_asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
address,
(tme_uint32_t) TME_SPARC_FORMAT3_RD));
/* assume that no DTLB ASI mask flags will require a slow store: */
asi_mask_flags_slow = 0;
/* get and busy the DTLB entry: */
dtlb = &ic->tme_sparc_tlbs[TME_SPARC_DTLB_ENTRY(ic, TME_SPARC_TLB_HASH(ic, ic->tme_sparc_memory_context_default, address))];
tme_sparc_tlb_busy(dtlb);
/* assume that this DTLB applies and allows fast transfers: */
memory = dtlb->tme_sparc_tlb_emulator_off_write;
/* if this DTLB matches any context, it matches this context: */
dtlb_context = dtlb->tme_sparc_tlb_context;
if (dtlb_context > ic->tme_sparc_memory_context_max) {
dtlb_context = ic->tme_sparc_memory_context_default;
}
/* we must call the slow store function if: */
if (__tme_predict_false(
/* the DTLB entry is invalid: */
tme_bus_tlb_is_invalid(&dtlb->tme_sparc_tlb_bus_tlb)
/* the DTLB entry does not match the context: */
|| dtlb_context != ic->tme_sparc_memory_context_default
/* the DTLB entry does not cover the needed addresses: */
|| (address < (tme_bus_addr32_t) dtlb->tme_sparc_tlb_addr_first)
|| ((address + ((32 / 8) - 1)) > (tme_bus_addr32_t) dtlb->tme_sparc_tlb_addr_last)
/* the DTLB entry does not cover the needed address space: */
|| (!TME_SPARC_TLB_ASI_MASK_OK(dtlb, ic->tme_sparc_asi_mask_data))
/* the DTLB entry can't be used for a fast st: */
|| (dtlb->tme_sparc_tlb_asi_mask & asi_mask_flags_slow) != 0
/* the DTLB entry does not allow fast transfers: */
|| (memory == TME_EMULATOR_OFF_UNDEF)
/* the address is misaligned: */
|| ((address % (32 / 8)) != 0)
)) {
/* call the slow store function: */
memory = tme_sparc32_ls(ic,
address,
&TME_SPARC_FORMAT3_RD,
(TME_SPARC_LSINFO_OP_ST
| (32 / 8)));
/* if the slow store function did the transfer: */
if (__tme_predict_false(memory == TME_EMULATOR_OFF_UNDEF)) {
/* unbusy the TLB entry; */
tme_sparc_tlb_unbusy(dtlb);
TME_SPARC_INSN_OK;
}
}
/* get the byte order of this transfer: */
endian_little = FALSE;
/* do the fast transfer: */
memory += address;
value32 = TME_SPARC_FORMAT3_RD;
value32 = (endian_little ? tme_htole_u32(value32) : tme_htobe_u32(value32));
tme_memory_bus_write32((tme_shared tme_uint32_t *) memory, value32, dtlb->tme_sparc_tlb_bus_rwlock, sizeof(tme_uint32_t), sizeof(tme_uint32_t));
/* unbusy the DTLB entry: */
tme_sparc_tlb_unbusy(dtlb);
TME_SPARC_INSN_OK;
}
/* this does a sparc32 ldd: */
TME_SPARC_FORMAT3(tme_sparc32_ldd, tme_uint32_t)
{
tme_uint32_t address;
tme_uint32_t asi_mask_flags_slow;
struct tme_sparc_tlb *dtlb;
const tme_shared tme_uint8_t *memory;
tme_bus_context_t dtlb_context;
tme_uint32_t endian_little;
tme_uint32_t value32;
/* get the address: */
address = TME_SPARC_FORMAT3_RS1 + TME_SPARC_FORMAT3_RS2;
#ifdef _TME_SPARC_STATS
/* track statistics: */
ic->tme_sparc_stats.tme_sparc_stats_memory_total++;
#endif /* _TME_SPARC_STATS */
/* verify and maybe replay this transfer: */
tme_sparc_recode_verify_mem(ic, &TME_SPARC_FORMAT3_RD,
ic->tme_sparc_asi_mask_data, address,
(TME_RECODE_SIZE_32
| TME_SPARC_RECODE_VERIFY_MEM_LOAD));
tme_sparc_recode_verify_mem(ic, &TME_SPARC_FORMAT3_RD_ODD(tme_ic_ireg_uint32),
ic->tme_sparc_asi_mask_data, address + sizeof(tme_uint32_t),
(TME_RECODE_SIZE_32
| TME_SPARC_RECODE_VERIFY_MEM_LOAD));
if (tme_sparc_recode_verify_replay_last_pc(ic) != 0) {
TME_SPARC_INSN_OK;
}
/* assume that no DTLB ASI mask flags will require a slow load: */
asi_mask_flags_slow = 0;
/* get and busy the DTLB entry: */
dtlb = &ic->tme_sparc_tlbs[TME_SPARC_DTLB_ENTRY(ic, TME_SPARC_TLB_HASH(ic, ic->tme_sparc_memory_context_default, address))];
tme_sparc_tlb_busy(dtlb);
/* assume that this DTLB applies and allows fast transfers: */
memory = dtlb->tme_sparc_tlb_emulator_off_read;
/* if this DTLB matches any context, it matches this context: */
dtlb_context = dtlb->tme_sparc_tlb_context;
if (dtlb_context > ic->tme_sparc_memory_context_max) {
dtlb_context = ic->tme_sparc_memory_context_default;
}
/* we must call the slow load function if: */
if (__tme_predict_false(
/* the DTLB entry is invalid: */
tme_bus_tlb_is_invalid(&dtlb->tme_sparc_tlb_bus_tlb)
/* the DTLB entry does not match the context: */
|| dtlb_context != ic->tme_sparc_memory_context_default
/* the DTLB entry does not cover the needed addresses: */
|| (address < (tme_bus_addr32_t) dtlb->tme_sparc_tlb_addr_first)
|| ((address + ((64 / 8) - 1)) > (tme_bus_addr32_t) dtlb->tme_sparc_tlb_addr_last)
/* the DTLB entry does not cover the needed address space: */
|| (!TME_SPARC_TLB_ASI_MASK_OK(dtlb, ic->tme_sparc_asi_mask_data))
/* the DTLB entry can't be used for a fast ldd: */
|| (dtlb->tme_sparc_tlb_asi_mask & asi_mask_flags_slow) != 0
/* the DTLB entry does not allow fast transfers: */
|| (memory == TME_EMULATOR_OFF_UNDEF)
/* the address is misaligned: */
|| ((address % (64 / 8)) != 0)
/* the destination register number is odd: */
|| ((TME_SPARC_INSN & TME_BIT(25)) != 0)
)) {
/* call the slow load function: */
memory = tme_sparc32_ls(ic,
address,
&TME_SPARC_FORMAT3_RD,
(TME_SPARC_LSINFO_OP_LD
| TME_SPARC_LSINFO_LDD_STD
| (64 / 8)));
}
/* get the byte order of this transfer: */
endian_little = FALSE;
/* do the fast transfer: */
memory += address;
value32 = tme_memory_bus_read32(((const tme_shared tme_uint32_t *) memory) + 0, dtlb->tme_sparc_tlb_bus_rwlock, sizeof(tme_uint32_t) * 2, sizeof(tme_uint32_t));
value32 = (endian_little ? tme_letoh_u32(value32) : tme_betoh_u32(value32));
TME_SPARC_FORMAT3_RD = value32;
value32 = tme_memory_bus_read32(((const tme_shared tme_uint32_t *) memory) + 1, dtlb->tme_sparc_tlb_bus_rwlock, sizeof(tme_uint32_t) * 1, sizeof(tme_uint32_t));
value32 = (endian_little ? tme_letoh_u32(value32) : tme_betoh_u32(value32));
TME_SPARC_FORMAT3_RD_ODD(tme_ic_ireg_uint32) = value32;
/* unbusy the DTLB entry: */
tme_sparc_tlb_unbusy(dtlb);
/* log the value loaded: */
tme_sparc_recode_verify_mem_load(ic, &TME_SPARC_FORMAT3_RD);
tme_sparc_recode_verify_mem_load(ic, &TME_SPARC_FORMAT3_RD_ODD(tme_ic_ireg_uint32));
tme_sparc_log(ic, 1000, TME_OK,
(TME_SPARC_LOG_HANDLE(ic),
_("ldd 0x%02x:0x%08" TME_PRIx32 ": 0x%08" TME_PRIx32 " 0x%08" TME_PRIx32 ""),
TME_SPARC_ASI_MASK_WHICH(ic->tme_sparc_asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
address,
TME_SPARC_FORMAT3_RD,
TME_SPARC_FORMAT3_RD_ODD(tme_ic_ireg_uint32)));
TME_SPARC_INSN_OK;
}
/* this does a sparc32 std: */
TME_SPARC_FORMAT3(tme_sparc32_std, tme_uint32_t)
{
tme_uint32_t address;
tme_uint32_t asi_mask_flags_slow;
struct tme_sparc_tlb *dtlb;
tme_shared tme_uint8_t *memory;
tme_bus_context_t dtlb_context;
tme_uint32_t endian_little;
tme_uint32_t value32;
/* get the address: */
address = TME_SPARC_FORMAT3_RS1 + TME_SPARC_FORMAT3_RS2;
#ifdef _TME_SPARC_STATS
/* track statistics: */
ic->tme_sparc_stats.tme_sparc_stats_memory_total++;
#endif /* _TME_SPARC_STATS */
/* verify and maybe replay this transfer: */
tme_sparc_recode_verify_mem(ic, &TME_SPARC_FORMAT3_RD,
ic->tme_sparc_asi_mask_data, address,
(TME_RECODE_SIZE_32
| TME_SPARC_RECODE_VERIFY_MEM_STORE));
tme_sparc_recode_verify_mem(ic, &TME_SPARC_FORMAT3_RD_ODD(tme_ic_ireg_uint32),
ic->tme_sparc_asi_mask_data, address + sizeof(tme_uint32_t),
(TME_RECODE_SIZE_32
| TME_SPARC_RECODE_VERIFY_MEM_STORE));
if (tme_sparc_recode_verify_replay_last_pc(ic) != 0) {
TME_SPARC_INSN_OK;
}
/* log the values stored: */
tme_sparc_log(ic, 1000, TME_OK,
(TME_SPARC_LOG_HANDLE(ic),
_("std 0x%02x:0x%08" TME_PRIx32 ": 0x%08" TME_PRIx32 " 0x%08" TME_PRIx32),
TME_SPARC_ASI_MASK_WHICH(ic->tme_sparc_asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
address,
(tme_uint32_t) TME_SPARC_FORMAT3_RD,
(tme_uint32_t) TME_SPARC_FORMAT3_RD_ODD(tme_ic_ireg_uint32)));
/* assume that no DTLB ASI mask flags will require a slow store: */
asi_mask_flags_slow = 0;
/* get and busy the DTLB entry: */
dtlb = &ic->tme_sparc_tlbs[TME_SPARC_DTLB_ENTRY(ic, TME_SPARC_TLB_HASH(ic, ic->tme_sparc_memory_context_default, address))];
tme_sparc_tlb_busy(dtlb);
/* assume that this DTLB applies and allows fast transfers: */
memory = dtlb->tme_sparc_tlb_emulator_off_write;
/* if this DTLB matches any context, it matches this context: */
dtlb_context = dtlb->tme_sparc_tlb_context;
if (dtlb_context > ic->tme_sparc_memory_context_max) {
dtlb_context = ic->tme_sparc_memory_context_default;
}
/* we must call the slow store function if: */
if (__tme_predict_false(
/* the DTLB entry is invalid: */
tme_bus_tlb_is_invalid(&dtlb->tme_sparc_tlb_bus_tlb)
/* the DTLB entry does not match the context: */
|| dtlb_context != ic->tme_sparc_memory_context_default
/* the DTLB entry does not cover the needed addresses: */
|| (address < (tme_bus_addr32_t) dtlb->tme_sparc_tlb_addr_first)
|| ((address + ((64 / 8) - 1)) > (tme_bus_addr32_t) dtlb->tme_sparc_tlb_addr_last)
/* the DTLB entry does not cover the needed address space: */
|| (!TME_SPARC_TLB_ASI_MASK_OK(dtlb, ic->tme_sparc_asi_mask_data))
/* the DTLB entry can't be used for a fast std: */
|| (dtlb->tme_sparc_tlb_asi_mask & asi_mask_flags_slow) != 0
/* the DTLB entry does not allow fast transfers: */
|| (memory == TME_EMULATOR_OFF_UNDEF)
/* the address is misaligned: */
|| ((address % (64 / 8)) != 0)
/* the destination register number is odd: */
|| ((TME_SPARC_INSN & TME_BIT(25)) != 0)
)) {
/* call the slow store function: */
memory = tme_sparc32_ls(ic,
address,
&TME_SPARC_FORMAT3_RD,
(TME_SPARC_LSINFO_OP_ST
| TME_SPARC_LSINFO_LDD_STD
| (64 / 8)));
/* if the slow store function did the transfer: */
if (__tme_predict_false(memory == TME_EMULATOR_OFF_UNDEF)) {
/* unbusy the TLB entry; */
tme_sparc_tlb_unbusy(dtlb);
TME_SPARC_INSN_OK;
}
}
/* get the byte order of this transfer: */
endian_little = FALSE;
/* do the fast transfer: */
memory += address;
value32 = TME_SPARC_FORMAT3_RD;
value32 = (endian_little ? tme_htole_u32(value32) : tme_htobe_u32(value32));
tme_memory_bus_write32(((tme_shared tme_uint32_t *) memory) + 0, value32, dtlb->tme_sparc_tlb_bus_rwlock, sizeof(tme_uint32_t) * 2, sizeof(tme_uint32_t));
value32 = TME_SPARC_FORMAT3_RD_ODD(tme_ic_ireg_uint32);
value32 = (endian_little ? tme_htole_u32(value32) : tme_htobe_u32(value32));
tme_memory_bus_write32(((tme_shared tme_uint32_t *) memory) + 1, value32, dtlb->tme_sparc_tlb_bus_rwlock, sizeof(tme_uint32_t) * 1, sizeof(tme_uint32_t));
/* unbusy the DTLB entry: */
tme_sparc_tlb_unbusy(dtlb);
TME_SPARC_INSN_OK;
}
/* this does a sparc32 ldstub: */
TME_SPARC_FORMAT3(tme_sparc32_ldstub, tme_uint32_t)
{
tme_uint32_t address;
tme_uint32_t asi_mask_flags_slow;
struct tme_sparc_tlb *dtlb;
tme_shared tme_uint8_t *memory;
tme_bus_context_t dtlb_context;
tme_uint32_t endian_little;
/* get the address: */
address = TME_SPARC_FORMAT3_RS1 + TME_SPARC_FORMAT3_RS2;
#ifdef _TME_SPARC_STATS
/* track statistics: */
ic->tme_sparc_stats.tme_sparc_stats_memory_total++;
#endif /* _TME_SPARC_STATS */
/* verify and maybe replay this transfer: */
tme_sparc_recode_verify_mem(ic, &TME_SPARC_FORMAT3_RD,
ic->tme_sparc_asi_mask_data, address,
(TME_RECODE_SIZE_8
| TME_SPARC_RECODE_VERIFY_MEM_LOAD));
if (tme_sparc_recode_verify_replay_last_pc(ic) != 0) {
TME_SPARC_INSN_OK;
}
/* assume that no DTLB ASI mask flags will require a slow store: */
asi_mask_flags_slow = 0;
/* get and busy the DTLB entry: */
dtlb = &ic->tme_sparc_tlbs[TME_SPARC_DTLB_ENTRY(ic, TME_SPARC_TLB_HASH(ic, ic->tme_sparc_memory_context_default, address))];
tme_sparc_tlb_busy(dtlb);
/* assume that this DTLB applies and allows fast transfers: */
memory = dtlb->tme_sparc_tlb_emulator_off_write;
/* if this DTLB matches any context, it matches this context: */
dtlb_context = dtlb->tme_sparc_tlb_context;
if (dtlb_context > ic->tme_sparc_memory_context_max) {
dtlb_context = ic->tme_sparc_memory_context_default;
}
/* we must call the slow store function if: */
if (__tme_predict_false(
/* the DTLB entry is invalid: */
tme_bus_tlb_is_invalid(&dtlb->tme_sparc_tlb_bus_tlb)
/* the DTLB entry does not match the context: */
|| dtlb_context != ic->tme_sparc_memory_context_default
/* the DTLB entry does not cover the needed addresses: */
|| (address < (tme_bus_addr32_t) dtlb->tme_sparc_tlb_addr_first)
|| ((address + ((8 / 8) - 1)) > (tme_bus_addr32_t) dtlb->tme_sparc_tlb_addr_last)
/* the DTLB entry does not cover the needed address space: */
|| (!TME_SPARC_TLB_ASI_MASK_OK(dtlb, ic->tme_sparc_asi_mask_data))
/* the DTLB entry can't be used for a fast ldstub: */
|| (dtlb->tme_sparc_tlb_asi_mask & asi_mask_flags_slow) != 0
/* the DTLB entry does not allow fast transfers: */
|| (memory != dtlb->tme_sparc_tlb_emulator_off_read)
|| (memory == TME_EMULATOR_OFF_UNDEF)
)) {
/* call the slow store function: */
memory = tme_sparc32_ls(ic,
address,
&TME_SPARC_FORMAT3_RD,
(TME_SPARC_LSINFO_OP_ATOMIC
| (8 / 8)));
/* if the slow store function did the transfer: */
if (__tme_predict_false(memory == TME_EMULATOR_OFF_UNDEF)) {
/* unbusy the TLB entry; */
tme_sparc_tlb_unbusy(dtlb);
/* log the value loaded: */
tme_sparc_recode_verify_mem_load(ic, &TME_SPARC_FORMAT3_RD);
tme_sparc_log(ic, 1000, TME_OK,
(TME_SPARC_LOG_HANDLE(ic),
_("ldstub 0x%02x:0x%08" TME_PRIx32 ": 0x%02" TME_PRIx32),
TME_SPARC_ASI_MASK_WHICH(ic->tme_sparc_asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
address,
TME_SPARC_FORMAT3_RD));
TME_SPARC_INSN_OK;
}
}
/* get the byte order of this transfer: */
endian_little = FALSE;
/* do the fast transfer: */
memory += address;
TME_SPARC_FORMAT3_RD = tme_memory_atomic_xchg8(memory, 0xff, dtlb->tme_sparc_tlb_bus_rwlock, sizeof(tme_uint8_t));
/* unbusy the DTLB entry: */
tme_sparc_tlb_unbusy(dtlb);
/* log the value loaded: */
tme_sparc_recode_verify_mem_load(ic, &TME_SPARC_FORMAT3_RD);
tme_sparc_log(ic, 1000, TME_OK,
(TME_SPARC_LOG_HANDLE(ic),
_("ldstub 0x%02x:0x%08" TME_PRIx32 ": 0x%02" TME_PRIx32),
TME_SPARC_ASI_MASK_WHICH(ic->tme_sparc_asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
address,
TME_SPARC_FORMAT3_RD));
TME_SPARC_INSN_OK;
}
/* this does a sparc32 ldstuba: */
TME_SPARC_FORMAT3(tme_sparc32_ldstuba, tme_uint32_t)
{
tme_uint32_t asi_mask_data;
tme_uint32_t address;
tme_uint32_t asi_mask_flags_slow;
struct tme_sparc_tlb *dtlb;
tme_shared tme_uint8_t *memory;
tme_bus_context_t dtlb_context;
tme_uint32_t endian_little;
/* get the alternate ASI mask: */
asi_mask_data = _tme_sparc32_alternate_asi_mask(ic);
/* get the address: */
address = TME_SPARC_FORMAT3_RS1 + TME_SPARC_FORMAT3_RS2;
#ifdef _TME_SPARC_STATS
/* track statistics: */
ic->tme_sparc_stats.tme_sparc_stats_memory_total++;
#endif /* _TME_SPARC_STATS */
/* verify and maybe replay this transfer: */
tme_sparc_recode_verify_mem(ic, &TME_SPARC_FORMAT3_RD,
asi_mask_data, address,
(TME_RECODE_SIZE_8
| TME_SPARC_RECODE_VERIFY_MEM_LOAD));
if (tme_sparc_recode_verify_replay_last_pc(ic) != 0) {
TME_SPARC_INSN_OK;
}
/* assume that no DTLB ASI mask flags will require a slow store: */
asi_mask_flags_slow = 0;
/* get and busy the DTLB entry: */
dtlb = &ic->tme_sparc_tlbs[TME_SPARC_DTLB_ENTRY(ic, TME_SPARC_TLB_HASH(ic, ic->tme_sparc_memory_context_default, address))];
tme_sparc_tlb_busy(dtlb);
/* assume that this DTLB applies and allows fast transfers: */
memory = dtlb->tme_sparc_tlb_emulator_off_write;
/* if this DTLB matches any context, it matches this context: */
dtlb_context = dtlb->tme_sparc_tlb_context;
if (dtlb_context > ic->tme_sparc_memory_context_max) {
dtlb_context = ic->tme_sparc_memory_context_default;
}
/* we must call the slow store function if: */
if (__tme_predict_false(
/* the DTLB entry is invalid: */
tme_bus_tlb_is_invalid(&dtlb->tme_sparc_tlb_bus_tlb)
/* the DTLB entry does not match the context: */
|| dtlb_context != ic->tme_sparc_memory_context_default
/* the DTLB entry does not cover the needed addresses: */
|| (address < (tme_bus_addr32_t) dtlb->tme_sparc_tlb_addr_first)
|| ((address + ((8 / 8) - 1)) > (tme_bus_addr32_t) dtlb->tme_sparc_tlb_addr_last)
/* the DTLB entry does not cover the needed address space: */
|| (!TME_SPARC_TLB_ASI_MASK_OK(dtlb, asi_mask_data))
/* the DTLB entry can't be used for a fast ldstuba: */
|| (dtlb->tme_sparc_tlb_asi_mask & asi_mask_flags_slow) != 0
/* the DTLB entry does not allow fast transfers: */
|| (memory != dtlb->tme_sparc_tlb_emulator_off_read)
|| (memory == TME_EMULATOR_OFF_UNDEF)
)) {
/* call the slow store function: */
memory = tme_sparc32_ls(ic,
address,
&TME_SPARC_FORMAT3_RD,
(TME_SPARC_LSINFO_OP_ATOMIC
| TME_SPARC_LSINFO_ASI(TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF))
| TME_SPARC_LSINFO_A
| (8 / 8)));
/* if the slow store function did the transfer: */
if (__tme_predict_false(memory == TME_EMULATOR_OFF_UNDEF)) {
/* unbusy the TLB entry; */
tme_sparc_tlb_unbusy(dtlb);
/* log the value loaded: */
tme_sparc_recode_verify_mem_load(ic, &TME_SPARC_FORMAT3_RD);
tme_sparc_log(ic, 1000, TME_OK,
(TME_SPARC_LOG_HANDLE(ic),
_("ldstuba 0x%02x:0x%08" TME_PRIx32 ": 0x%02" TME_PRIx32),
TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
address,
TME_SPARC_FORMAT3_RD));
TME_SPARC_INSN_OK;
}
}
/* get the byte order of this transfer: */
endian_little = FALSE;
/* do the fast transfer: */
memory += address;
TME_SPARC_FORMAT3_RD = tme_memory_atomic_xchg8(memory, 0xff, dtlb->tme_sparc_tlb_bus_rwlock, sizeof(tme_uint8_t));
/* unbusy the DTLB entry: */
tme_sparc_tlb_unbusy(dtlb);
/* log the value loaded: */
tme_sparc_recode_verify_mem_load(ic, &TME_SPARC_FORMAT3_RD);
tme_sparc_log(ic, 1000, TME_OK,
(TME_SPARC_LOG_HANDLE(ic),
_("ldstuba 0x%02x:0x%08" TME_PRIx32 ": 0x%02" TME_PRIx32),
TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
address,
TME_SPARC_FORMAT3_RD));
TME_SPARC_INSN_OK;
}
/* this does a sparc32 swap: */
TME_SPARC_FORMAT3(tme_sparc32_swap, tme_uint32_t)
{
tme_uint32_t address;
tme_uint32_t asi_mask_flags_slow;
struct tme_sparc_tlb *dtlb;
tme_shared tme_uint8_t *memory;
tme_bus_context_t dtlb_context;
tme_uint32_t endian_little;
tme_uint32_t value32;
/* get the address: */
address = TME_SPARC_FORMAT3_RS1 + TME_SPARC_FORMAT3_RS2;
#ifdef _TME_SPARC_STATS
/* track statistics: */
ic->tme_sparc_stats.tme_sparc_stats_memory_total++;
#endif /* _TME_SPARC_STATS */
/* verify and maybe replay this transfer: */
tme_sparc_recode_verify_mem(ic, &TME_SPARC_FORMAT3_RD,
ic->tme_sparc_asi_mask_data, address,
(TME_RECODE_SIZE_32
| TME_SPARC_RECODE_VERIFY_MEM_LOAD | TME_SPARC_RECODE_VERIFY_MEM_STORE));
if (tme_sparc_recode_verify_replay_last_pc(ic) != 0) {
TME_SPARC_INSN_OK;
}
/* log the value stored: */
tme_sparc_log(ic, 1000, TME_OK,
(TME_SPARC_LOG_HANDLE(ic),
_("swap 0x%02x:0x%08" TME_PRIx32 ": 0x%08" TME_PRIx32),
TME_SPARC_ASI_MASK_WHICH(ic->tme_sparc_asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
address,
(tme_uint32_t) TME_SPARC_FORMAT3_RD));
/* assume that no DTLB ASI mask flags will require a slow store: */
asi_mask_flags_slow = 0;
/* get and busy the DTLB entry: */
dtlb = &ic->tme_sparc_tlbs[TME_SPARC_DTLB_ENTRY(ic, TME_SPARC_TLB_HASH(ic, ic->tme_sparc_memory_context_default, address))];
tme_sparc_tlb_busy(dtlb);
/* assume that this DTLB applies and allows fast transfers: */
memory = dtlb->tme_sparc_tlb_emulator_off_write;
/* if this DTLB matches any context, it matches this context: */
dtlb_context = dtlb->tme_sparc_tlb_context;
if (dtlb_context > ic->tme_sparc_memory_context_max) {
dtlb_context = ic->tme_sparc_memory_context_default;
}
/* we must call the slow store function if: */
if (__tme_predict_false(
/* the DTLB entry is invalid: */
tme_bus_tlb_is_invalid(&dtlb->tme_sparc_tlb_bus_tlb)
/* the DTLB entry does not match the context: */
|| dtlb_context != ic->tme_sparc_memory_context_default
/* the DTLB entry does not cover the needed addresses: */
|| (address < (tme_bus_addr32_t) dtlb->tme_sparc_tlb_addr_first)
|| ((address + ((32 / 8) - 1)) > (tme_bus_addr32_t) dtlb->tme_sparc_tlb_addr_last)
/* the DTLB entry does not cover the needed address space: */
|| (!TME_SPARC_TLB_ASI_MASK_OK(dtlb, ic->tme_sparc_asi_mask_data))
/* the DTLB entry can't be used for a fast swap: */
|| (dtlb->tme_sparc_tlb_asi_mask & asi_mask_flags_slow) != 0
/* the DTLB entry does not allow fast transfers: */
|| (memory != dtlb->tme_sparc_tlb_emulator_off_read)
|| (memory == TME_EMULATOR_OFF_UNDEF)
/* the address is misaligned: */
|| ((address % (32 / 8)) != 0)
)) {
/* call the slow store function: */
memory = tme_sparc32_ls(ic,
address,
&TME_SPARC_FORMAT3_RD,
(TME_SPARC_LSINFO_OP_ATOMIC
| (32 / 8)));
/* if the slow store function did the transfer: */
if (__tme_predict_false(memory == TME_EMULATOR_OFF_UNDEF)) {
/* unbusy the TLB entry; */
tme_sparc_tlb_unbusy(dtlb);
/* log the value loaded: */
tme_sparc_recode_verify_mem_load(ic, &TME_SPARC_FORMAT3_RD);
tme_sparc_log(ic, 1000, TME_OK,
(TME_SPARC_LOG_HANDLE(ic),
_("swap 0x%02x:0x%08" TME_PRIx32 ": 0x%08" TME_PRIx32),
TME_SPARC_ASI_MASK_WHICH(ic->tme_sparc_asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
address,
TME_SPARC_FORMAT3_RD));
TME_SPARC_INSN_OK;
}
}
/* get the byte order of this transfer: */
endian_little = FALSE;
/* do the fast transfer: */
memory += address;
value32 = TME_SPARC_FORMAT3_RD;
value32 = (endian_little ? tme_htole_u32(value32) : tme_htobe_u32(value32));
value32 = tme_memory_atomic_xchg32((tme_shared tme_uint32_t *) memory, value32, dtlb->tme_sparc_tlb_bus_rwlock, sizeof(tme_uint8_t));
value32 = (endian_little ? tme_letoh_u32(value32) : tme_betoh_u32(value32));
TME_SPARC_FORMAT3_RD = value32;
/* unbusy the DTLB entry: */
tme_sparc_tlb_unbusy(dtlb);
/* log the value loaded: */
tme_sparc_recode_verify_mem_load(ic, &TME_SPARC_FORMAT3_RD);
tme_sparc_log(ic, 1000, TME_OK,
(TME_SPARC_LOG_HANDLE(ic),
_("swap 0x%02x:0x%08" TME_PRIx32 ": 0x%08" TME_PRIx32),
TME_SPARC_ASI_MASK_WHICH(ic->tme_sparc_asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
address,
TME_SPARC_FORMAT3_RD));
TME_SPARC_INSN_OK;
}
/* this does a sparc32 swapa: */
TME_SPARC_FORMAT3(tme_sparc32_swapa, tme_uint32_t)
{
tme_uint32_t asi_mask_data;
tme_uint32_t address;
tme_uint32_t asi_mask_flags_slow;
struct tme_sparc_tlb *dtlb;
tme_shared tme_uint8_t *memory;
tme_bus_context_t dtlb_context;
tme_uint32_t endian_little;
tme_uint32_t value32;
/* get the alternate ASI mask: */
asi_mask_data = _tme_sparc32_alternate_asi_mask(ic);
/* get the address: */
address = TME_SPARC_FORMAT3_RS1 + TME_SPARC_FORMAT3_RS2;
#ifdef _TME_SPARC_STATS
/* track statistics: */
ic->tme_sparc_stats.tme_sparc_stats_memory_total++;
#endif /* _TME_SPARC_STATS */
/* verify and maybe replay this transfer: */
tme_sparc_recode_verify_mem(ic, &TME_SPARC_FORMAT3_RD,
asi_mask_data, address,
(TME_RECODE_SIZE_32
| TME_SPARC_RECODE_VERIFY_MEM_LOAD | TME_SPARC_RECODE_VERIFY_MEM_STORE));
if (tme_sparc_recode_verify_replay_last_pc(ic) != 0) {
TME_SPARC_INSN_OK;
}
/* log the value stored: */
tme_sparc_log(ic, 1000, TME_OK,
(TME_SPARC_LOG_HANDLE(ic),
_("swapa 0x%02x:0x%08" TME_PRIx32 ": 0x%08" TME_PRIx32),
TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
address,
(tme_uint32_t) TME_SPARC_FORMAT3_RD));
/* assume that no DTLB ASI mask flags will require a slow store: */
asi_mask_flags_slow = 0;
/* get and busy the DTLB entry: */
dtlb = &ic->tme_sparc_tlbs[TME_SPARC_DTLB_ENTRY(ic, TME_SPARC_TLB_HASH(ic, ic->tme_sparc_memory_context_default, address))];
tme_sparc_tlb_busy(dtlb);
/* assume that this DTLB applies and allows fast transfers: */
memory = dtlb->tme_sparc_tlb_emulator_off_write;
/* if this DTLB matches any context, it matches this context: */
dtlb_context = dtlb->tme_sparc_tlb_context;
if (dtlb_context > ic->tme_sparc_memory_context_max) {
dtlb_context = ic->tme_sparc_memory_context_default;
}
/* we must call the slow store function if: */
if (__tme_predict_false(
/* the DTLB entry is invalid: */
tme_bus_tlb_is_invalid(&dtlb->tme_sparc_tlb_bus_tlb)
/* the DTLB entry does not match the context: */
|| dtlb_context != ic->tme_sparc_memory_context_default
/* the DTLB entry does not cover the needed addresses: */
|| (address < (tme_bus_addr32_t) dtlb->tme_sparc_tlb_addr_first)
|| ((address + ((32 / 8) - 1)) > (tme_bus_addr32_t) dtlb->tme_sparc_tlb_addr_last)
/* the DTLB entry does not cover the needed address space: */
|| (!TME_SPARC_TLB_ASI_MASK_OK(dtlb, asi_mask_data))
/* the DTLB entry can't be used for a fast swapa: */
|| (dtlb->tme_sparc_tlb_asi_mask & asi_mask_flags_slow) != 0
/* the DTLB entry does not allow fast transfers: */
|| (memory != dtlb->tme_sparc_tlb_emulator_off_read)
|| (memory == TME_EMULATOR_OFF_UNDEF)
/* the address is misaligned: */
|| ((address % (32 / 8)) != 0)
)) {
/* call the slow store function: */
memory = tme_sparc32_ls(ic,
address,
&TME_SPARC_FORMAT3_RD,
(TME_SPARC_LSINFO_OP_ATOMIC
| TME_SPARC_LSINFO_ASI(TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF))
| TME_SPARC_LSINFO_A
| (32 / 8)));
/* if the slow store function did the transfer: */
if (__tme_predict_false(memory == TME_EMULATOR_OFF_UNDEF)) {
/* unbusy the TLB entry; */
tme_sparc_tlb_unbusy(dtlb);
/* log the value loaded: */
tme_sparc_recode_verify_mem_load(ic, &TME_SPARC_FORMAT3_RD);
tme_sparc_log(ic, 1000, TME_OK,
(TME_SPARC_LOG_HANDLE(ic),
_("swapa 0x%02x:0x%08" TME_PRIx32 ": 0x%08" TME_PRIx32),
TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
address,
TME_SPARC_FORMAT3_RD));
TME_SPARC_INSN_OK;
}
}
/* get the byte order of this transfer: */
endian_little = FALSE;
/* do the fast transfer: */
memory += address;
value32 = TME_SPARC_FORMAT3_RD;
value32 = (endian_little ? tme_htole_u32(value32) : tme_htobe_u32(value32));
value32 = tme_memory_atomic_xchg32((tme_shared tme_uint32_t *) memory, value32, dtlb->tme_sparc_tlb_bus_rwlock, sizeof(tme_uint8_t));
value32 = (endian_little ? tme_letoh_u32(value32) : tme_betoh_u32(value32));
TME_SPARC_FORMAT3_RD = value32;
/* unbusy the DTLB entry: */
tme_sparc_tlb_unbusy(dtlb);
/* log the value loaded: */
tme_sparc_recode_verify_mem_load(ic, &TME_SPARC_FORMAT3_RD);
tme_sparc_log(ic, 1000, TME_OK,
(TME_SPARC_LOG_HANDLE(ic),
_("swapa 0x%02x:0x%08" TME_PRIx32 ": 0x%08" TME_PRIx32),
TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
address,
TME_SPARC_FORMAT3_RD));
TME_SPARC_INSN_OK;
}
/* this does a sparc32 ldba: */
TME_SPARC_FORMAT3(tme_sparc32_ldba, tme_uint32_t)
{
tme_uint32_t asi_mask_data;
tme_uint32_t address;
tme_uint32_t asi_mask_flags_slow;
struct tme_sparc_tlb *dtlb;
const tme_shared tme_uint8_t *memory;
tme_bus_context_t dtlb_context;
tme_uint32_t endian_little;
tme_uint8_t value8;
tme_uint32_t value32;
/* get the alternate ASI mask: */
asi_mask_data = _tme_sparc32_alternate_asi_mask(ic);
/* get the address: */
address = TME_SPARC_FORMAT3_RS1 + TME_SPARC_FORMAT3_RS2;
#ifdef _TME_SPARC_STATS
/* track statistics: */
ic->tme_sparc_stats.tme_sparc_stats_memory_total++;
#endif /* _TME_SPARC_STATS */
/* verify and maybe replay this transfer: */
tme_sparc_recode_verify_mem(ic, &TME_SPARC_FORMAT3_RD,
asi_mask_data, address,
(TME_RECODE_SIZE_8
| TME_SPARC_RECODE_VERIFY_MEM_LOAD));
if (tme_sparc_recode_verify_replay_last_pc(ic) != 0) {
TME_SPARC_INSN_OK;
}
/* assume that no DTLB ASI mask flags will require a slow load: */
asi_mask_flags_slow = 0;
/* get and busy the DTLB entry: */
dtlb = &ic->tme_sparc_tlbs[TME_SPARC_DTLB_ENTRY(ic, TME_SPARC_TLB_HASH(ic, ic->tme_sparc_memory_context_default, address))];
tme_sparc_tlb_busy(dtlb);
/* assume that this DTLB applies and allows fast transfers: */
memory = dtlb->tme_sparc_tlb_emulator_off_read;
/* if this DTLB matches any context, it matches this context: */
dtlb_context = dtlb->tme_sparc_tlb_context;
if (dtlb_context > ic->tme_sparc_memory_context_max) {
dtlb_context = ic->tme_sparc_memory_context_default;
}
/* we must call the slow load function if: */
if (__tme_predict_false(
/* the DTLB entry is invalid: */
tme_bus_tlb_is_invalid(&dtlb->tme_sparc_tlb_bus_tlb)
/* the DTLB entry does not match the context: */
|| dtlb_context != ic->tme_sparc_memory_context_default
/* the DTLB entry does not cover the needed addresses: */
|| (address < (tme_bus_addr32_t) dtlb->tme_sparc_tlb_addr_first)
|| ((address + ((8 / 8) - 1)) > (tme_bus_addr32_t) dtlb->tme_sparc_tlb_addr_last)
/* the DTLB entry does not cover the needed address space: */
|| (!TME_SPARC_TLB_ASI_MASK_OK(dtlb, asi_mask_data))
/* the DTLB entry can't be used for a fast ldba: */
|| (dtlb->tme_sparc_tlb_asi_mask & asi_mask_flags_slow) != 0
/* the DTLB entry does not allow fast transfers: */
|| (memory == TME_EMULATOR_OFF_UNDEF)
)) {
/* call the slow load function: */
memory = tme_sparc32_ls(ic,
address,
&TME_SPARC_FORMAT3_RD,
(TME_SPARC_LSINFO_OP_LD
| TME_SPARC_LSINFO_ASI(TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF))
| TME_SPARC_LSINFO_A
| (8 / 8)));
/* if the slow load function did the transfer: */
if (__tme_predict_false(memory == TME_EMULATOR_OFF_UNDEF)) {
/* unbusy the TLB entry; */
tme_sparc_tlb_unbusy(dtlb);
/* log the value loaded: */
tme_sparc_recode_verify_mem_load(ic, &TME_SPARC_FORMAT3_RD);
tme_sparc_log(ic, 1000, TME_OK,
(TME_SPARC_LOG_HANDLE(ic),
_("ldba 0x%02x:0x%08" TME_PRIx32 ": 0x%02" TME_PRIx32),
TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
address,
TME_SPARC_FORMAT3_RD));
TME_SPARC_INSN_OK;
}
}
/* get the byte order of this transfer: */
endian_little = FALSE;
/* do the fast transfer: */
memory += address;
value8 = tme_memory_bus_read8((const tme_shared tme_uint8_t *) memory, dtlb->tme_sparc_tlb_bus_rwlock, sizeof(tme_uint8_t), sizeof(tme_uint32_t));
/* possibly sign-extend the loaded value: */
value32 = value8;
if (TME_SPARC_INSN & TME_BIT(22)) {
value32 = (tme_uint32_t) (tme_int32_t) (tme_int8_t) value32;
}
TME_SPARC_FORMAT3_RD = (tme_uint32_t) (tme_int32_t) (tme_int32_t) value32;
/* unbusy the DTLB entry: */
tme_sparc_tlb_unbusy(dtlb);
/* log the value loaded: */
tme_sparc_recode_verify_mem_load(ic, &TME_SPARC_FORMAT3_RD);
tme_sparc_log(ic, 1000, TME_OK,
(TME_SPARC_LOG_HANDLE(ic),
_("ldba 0x%02x:0x%08" TME_PRIx32 ": 0x%02" TME_PRIx32),
TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
address,
TME_SPARC_FORMAT3_RD));
TME_SPARC_INSN_OK;
}
/* this does a sparc32 stba: */
TME_SPARC_FORMAT3(tme_sparc32_stba, tme_uint32_t)
{
tme_uint32_t asi_mask_data;
tme_uint32_t address;
tme_uint32_t asi_mask_flags_slow;
struct tme_sparc_tlb *dtlb;
tme_shared tme_uint8_t *memory;
tme_bus_context_t dtlb_context;
tme_uint32_t endian_little;
tme_uint8_t value8;
/* get the alternate ASI mask: */
asi_mask_data = _tme_sparc32_alternate_asi_mask(ic);
/* get the address: */
address = TME_SPARC_FORMAT3_RS1 + TME_SPARC_FORMAT3_RS2;
#ifdef _TME_SPARC_STATS
/* track statistics: */
ic->tme_sparc_stats.tme_sparc_stats_memory_total++;
#endif /* _TME_SPARC_STATS */
/* verify and maybe replay this transfer: */
tme_sparc_recode_verify_mem(ic, &TME_SPARC_FORMAT3_RD,
asi_mask_data, address,
(TME_RECODE_SIZE_8
| TME_SPARC_RECODE_VERIFY_MEM_STORE));
if (tme_sparc_recode_verify_replay_last_pc(ic) != 0) {
TME_SPARC_INSN_OK;
}
/* log the value stored: */
tme_sparc_log(ic, 1000, TME_OK,
(TME_SPARC_LOG_HANDLE(ic),
_("stba 0x%02x:0x%08" TME_PRIx32 ": 0x%02" TME_PRIx8),
TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
address,
(tme_uint8_t) TME_SPARC_FORMAT3_RD));
/* assume that no DTLB ASI mask flags will require a slow store: */
asi_mask_flags_slow = 0;
/* get and busy the DTLB entry: */
dtlb = &ic->tme_sparc_tlbs[TME_SPARC_DTLB_ENTRY(ic, TME_SPARC_TLB_HASH(ic, ic->tme_sparc_memory_context_default, address))];
tme_sparc_tlb_busy(dtlb);
/* assume that this DTLB applies and allows fast transfers: */
memory = dtlb->tme_sparc_tlb_emulator_off_write;
/* if this DTLB matches any context, it matches this context: */
dtlb_context = dtlb->tme_sparc_tlb_context;
if (dtlb_context > ic->tme_sparc_memory_context_max) {
dtlb_context = ic->tme_sparc_memory_context_default;
}
/* we must call the slow store function if: */
if (__tme_predict_false(
/* the DTLB entry is invalid: */
tme_bus_tlb_is_invalid(&dtlb->tme_sparc_tlb_bus_tlb)
/* the DTLB entry does not match the context: */
|| dtlb_context != ic->tme_sparc_memory_context_default
/* the DTLB entry does not cover the needed addresses: */
|| (address < (tme_bus_addr32_t) dtlb->tme_sparc_tlb_addr_first)
|| ((address + ((8 / 8) - 1)) > (tme_bus_addr32_t) dtlb->tme_sparc_tlb_addr_last)
/* the DTLB entry does not cover the needed address space: */
|| (!TME_SPARC_TLB_ASI_MASK_OK(dtlb, asi_mask_data))
/* the DTLB entry can't be used for a fast stba: */
|| (dtlb->tme_sparc_tlb_asi_mask & asi_mask_flags_slow) != 0
/* the DTLB entry does not allow fast transfers: */
|| (memory == TME_EMULATOR_OFF_UNDEF)
)) {
/* call the slow store function: */
memory = tme_sparc32_ls(ic,
address,
&TME_SPARC_FORMAT3_RD,
(TME_SPARC_LSINFO_OP_ST
| TME_SPARC_LSINFO_ASI(TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF))
| TME_SPARC_LSINFO_A
| (8 / 8)));
/* if the slow store function did the transfer: */
if (__tme_predict_false(memory == TME_EMULATOR_OFF_UNDEF)) {
/* unbusy the TLB entry; */
tme_sparc_tlb_unbusy(dtlb);
TME_SPARC_INSN_OK;
}
}
/* get the byte order of this transfer: */
endian_little = FALSE;
/* do the fast transfer: */
memory += address;
value8 = TME_SPARC_FORMAT3_RD;
tme_memory_bus_write8((tme_shared tme_uint8_t *) memory, value8, dtlb->tme_sparc_tlb_bus_rwlock, sizeof(tme_uint8_t), sizeof(tme_uint32_t));
/* unbusy the DTLB entry: */
tme_sparc_tlb_unbusy(dtlb);
TME_SPARC_INSN_OK;
}
/* this does a sparc32 ldha: */
TME_SPARC_FORMAT3(tme_sparc32_ldha, tme_uint32_t)
{
tme_uint32_t asi_mask_data;
tme_uint32_t address;
tme_uint32_t asi_mask_flags_slow;
struct tme_sparc_tlb *dtlb;
const tme_shared tme_uint8_t *memory;
tme_bus_context_t dtlb_context;
tme_uint32_t endian_little;
tme_uint16_t value16;
tme_uint32_t value32;
/* get the alternate ASI mask: */
asi_mask_data = _tme_sparc32_alternate_asi_mask(ic);
/* get the address: */
address = TME_SPARC_FORMAT3_RS1 + TME_SPARC_FORMAT3_RS2;
#ifdef _TME_SPARC_STATS
/* track statistics: */
ic->tme_sparc_stats.tme_sparc_stats_memory_total++;
#endif /* _TME_SPARC_STATS */
/* verify and maybe replay this transfer: */
tme_sparc_recode_verify_mem(ic, &TME_SPARC_FORMAT3_RD,
asi_mask_data, address,
(TME_RECODE_SIZE_16
| TME_SPARC_RECODE_VERIFY_MEM_LOAD));
if (tme_sparc_recode_verify_replay_last_pc(ic) != 0) {
TME_SPARC_INSN_OK;
}
/* assume that no DTLB ASI mask flags will require a slow load: */
asi_mask_flags_slow = 0;
/* get and busy the DTLB entry: */
dtlb = &ic->tme_sparc_tlbs[TME_SPARC_DTLB_ENTRY(ic, TME_SPARC_TLB_HASH(ic, ic->tme_sparc_memory_context_default, address))];
tme_sparc_tlb_busy(dtlb);
/* assume that this DTLB applies and allows fast transfers: */
memory = dtlb->tme_sparc_tlb_emulator_off_read;
/* if this DTLB matches any context, it matches this context: */
dtlb_context = dtlb->tme_sparc_tlb_context;
if (dtlb_context > ic->tme_sparc_memory_context_max) {
dtlb_context = ic->tme_sparc_memory_context_default;
}
/* we must call the slow load function if: */
if (__tme_predict_false(
/* the DTLB entry is invalid: */
tme_bus_tlb_is_invalid(&dtlb->tme_sparc_tlb_bus_tlb)
/* the DTLB entry does not match the context: */
|| dtlb_context != ic->tme_sparc_memory_context_default
/* the DTLB entry does not cover the needed addresses: */
|| (address < (tme_bus_addr32_t) dtlb->tme_sparc_tlb_addr_first)
|| ((address + ((16 / 8) - 1)) > (tme_bus_addr32_t) dtlb->tme_sparc_tlb_addr_last)
/* the DTLB entry does not cover the needed address space: */
|| (!TME_SPARC_TLB_ASI_MASK_OK(dtlb, asi_mask_data))
/* the DTLB entry can't be used for a fast ldha: */
|| (dtlb->tme_sparc_tlb_asi_mask & asi_mask_flags_slow) != 0
/* the DTLB entry does not allow fast transfers: */
|| (memory == TME_EMULATOR_OFF_UNDEF)
/* the address is misaligned: */
|| ((address % (16 / 8)) != 0)
)) {
/* call the slow load function: */
memory = tme_sparc32_ls(ic,
address,
&TME_SPARC_FORMAT3_RD,
(TME_SPARC_LSINFO_OP_LD
| TME_SPARC_LSINFO_ASI(TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF))
| TME_SPARC_LSINFO_A
| (16 / 8)));
/* if the slow load function did the transfer: */
if (__tme_predict_false(memory == TME_EMULATOR_OFF_UNDEF)) {
/* unbusy the TLB entry; */
tme_sparc_tlb_unbusy(dtlb);
/* log the value loaded: */
tme_sparc_recode_verify_mem_load(ic, &TME_SPARC_FORMAT3_RD);
tme_sparc_log(ic, 1000, TME_OK,
(TME_SPARC_LOG_HANDLE(ic),
_("ldha 0x%02x:0x%08" TME_PRIx32 ": 0x%04" TME_PRIx32),
TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
address,
TME_SPARC_FORMAT3_RD));
TME_SPARC_INSN_OK;
}
}
/* get the byte order of this transfer: */
endian_little = FALSE;
/* do the fast transfer: */
memory += address;
value16 = tme_memory_bus_read16((const tme_shared tme_uint16_t *) memory, dtlb->tme_sparc_tlb_bus_rwlock, sizeof(tme_uint16_t), sizeof(tme_uint32_t));
value16 = (endian_little ? tme_letoh_u16(value16) : tme_betoh_u16(value16));
/* possibly sign-extend the loaded value: */
value32 = value16;
if (TME_SPARC_INSN & TME_BIT(22)) {
value32 = (tme_uint32_t) (tme_int32_t) (tme_int16_t) value32;
}
TME_SPARC_FORMAT3_RD = (tme_uint32_t) (tme_int32_t) (tme_int32_t) value32;
/* unbusy the DTLB entry: */
tme_sparc_tlb_unbusy(dtlb);
/* log the value loaded: */
tme_sparc_recode_verify_mem_load(ic, &TME_SPARC_FORMAT3_RD);
tme_sparc_log(ic, 1000, TME_OK,
(TME_SPARC_LOG_HANDLE(ic),
_("ldha 0x%02x:0x%08" TME_PRIx32 ": 0x%04" TME_PRIx32),
TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
address,
TME_SPARC_FORMAT3_RD));
TME_SPARC_INSN_OK;
}
/* this does a sparc32 stha: */
TME_SPARC_FORMAT3(tme_sparc32_stha, tme_uint32_t)
{
tme_uint32_t asi_mask_data;
tme_uint32_t address;
tme_uint32_t asi_mask_flags_slow;
struct tme_sparc_tlb *dtlb;
tme_shared tme_uint8_t *memory;
tme_bus_context_t dtlb_context;
tme_uint32_t endian_little;
tme_uint16_t value16;
/* get the alternate ASI mask: */
asi_mask_data = _tme_sparc32_alternate_asi_mask(ic);
/* get the address: */
address = TME_SPARC_FORMAT3_RS1 + TME_SPARC_FORMAT3_RS2;
#ifdef _TME_SPARC_STATS
/* track statistics: */
ic->tme_sparc_stats.tme_sparc_stats_memory_total++;
#endif /* _TME_SPARC_STATS */
/* verify and maybe replay this transfer: */
tme_sparc_recode_verify_mem(ic, &TME_SPARC_FORMAT3_RD,
asi_mask_data, address,
(TME_RECODE_SIZE_16
| TME_SPARC_RECODE_VERIFY_MEM_STORE));
if (tme_sparc_recode_verify_replay_last_pc(ic) != 0) {
TME_SPARC_INSN_OK;
}
/* log the value stored: */
tme_sparc_log(ic, 1000, TME_OK,
(TME_SPARC_LOG_HANDLE(ic),
_("stha 0x%02x:0x%08" TME_PRIx32 ": 0x%04" TME_PRIx16),
TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
address,
(tme_uint16_t) TME_SPARC_FORMAT3_RD));
/* assume that no DTLB ASI mask flags will require a slow store: */
asi_mask_flags_slow = 0;
/* get and busy the DTLB entry: */
dtlb = &ic->tme_sparc_tlbs[TME_SPARC_DTLB_ENTRY(ic, TME_SPARC_TLB_HASH(ic, ic->tme_sparc_memory_context_default, address))];
tme_sparc_tlb_busy(dtlb);
/* assume that this DTLB applies and allows fast transfers: */
memory = dtlb->tme_sparc_tlb_emulator_off_write;
/* if this DTLB matches any context, it matches this context: */
dtlb_context = dtlb->tme_sparc_tlb_context;
if (dtlb_context > ic->tme_sparc_memory_context_max) {
dtlb_context = ic->tme_sparc_memory_context_default;
}
/* we must call the slow store function if: */
if (__tme_predict_false(
/* the DTLB entry is invalid: */
tme_bus_tlb_is_invalid(&dtlb->tme_sparc_tlb_bus_tlb)
/* the DTLB entry does not match the context: */
|| dtlb_context != ic->tme_sparc_memory_context_default
/* the DTLB entry does not cover the needed addresses: */
|| (address < (tme_bus_addr32_t) dtlb->tme_sparc_tlb_addr_first)
|| ((address + ((16 / 8) - 1)) > (tme_bus_addr32_t) dtlb->tme_sparc_tlb_addr_last)
/* the DTLB entry does not cover the needed address space: */
|| (!TME_SPARC_TLB_ASI_MASK_OK(dtlb, asi_mask_data))
/* the DTLB entry can't be used for a fast stha: */
|| (dtlb->tme_sparc_tlb_asi_mask & asi_mask_flags_slow) != 0
/* the DTLB entry does not allow fast transfers: */
|| (memory == TME_EMULATOR_OFF_UNDEF)
/* the address is misaligned: */
|| ((address % (16 / 8)) != 0)
)) {
/* call the slow store function: */
memory = tme_sparc32_ls(ic,
address,
&TME_SPARC_FORMAT3_RD,
(TME_SPARC_LSINFO_OP_ST
| TME_SPARC_LSINFO_ASI(TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF))
| TME_SPARC_LSINFO_A
| (16 / 8)));
/* if the slow store function did the transfer: */
if (__tme_predict_false(memory == TME_EMULATOR_OFF_UNDEF)) {
/* unbusy the TLB entry; */
tme_sparc_tlb_unbusy(dtlb);
TME_SPARC_INSN_OK;
}
}
/* get the byte order of this transfer: */
endian_little = FALSE;
/* do the fast transfer: */
memory += address;
value16 = TME_SPARC_FORMAT3_RD;
value16 = (endian_little ? tme_htole_u16(value16) : tme_htobe_u16(value16));
tme_memory_bus_write16((tme_shared tme_uint16_t *) memory, value16, dtlb->tme_sparc_tlb_bus_rwlock, sizeof(tme_uint16_t), sizeof(tme_uint32_t));
/* unbusy the DTLB entry: */
tme_sparc_tlb_unbusy(dtlb);
TME_SPARC_INSN_OK;
}
/* this does a sparc32 lda: */
TME_SPARC_FORMAT3(tme_sparc32_lda, tme_uint32_t)
{
tme_uint32_t asi_mask_data;
tme_uint32_t address;
tme_uint32_t asi_mask_flags_slow;
struct tme_sparc_tlb *dtlb;
const tme_shared tme_uint8_t *memory;
tme_bus_context_t dtlb_context;
tme_uint32_t endian_little;
tme_uint32_t value32;
/* get the alternate ASI mask: */
asi_mask_data = _tme_sparc32_alternate_asi_mask(ic);
/* get the address: */
address = TME_SPARC_FORMAT3_RS1 + TME_SPARC_FORMAT3_RS2;
#ifdef _TME_SPARC_STATS
/* track statistics: */
ic->tme_sparc_stats.tme_sparc_stats_memory_total++;
#endif /* _TME_SPARC_STATS */
/* verify and maybe replay this transfer: */
tme_sparc_recode_verify_mem(ic, &TME_SPARC_FORMAT3_RD,
asi_mask_data, address,
(TME_RECODE_SIZE_32
| TME_SPARC_RECODE_VERIFY_MEM_LOAD));
if (tme_sparc_recode_verify_replay_last_pc(ic) != 0) {
TME_SPARC_INSN_OK;
}
/* assume that no DTLB ASI mask flags will require a slow load: */
asi_mask_flags_slow = 0;
/* get and busy the DTLB entry: */
dtlb = &ic->tme_sparc_tlbs[TME_SPARC_DTLB_ENTRY(ic, TME_SPARC_TLB_HASH(ic, ic->tme_sparc_memory_context_default, address))];
tme_sparc_tlb_busy(dtlb);
/* assume that this DTLB applies and allows fast transfers: */
memory = dtlb->tme_sparc_tlb_emulator_off_read;
/* if this DTLB matches any context, it matches this context: */
dtlb_context = dtlb->tme_sparc_tlb_context;
if (dtlb_context > ic->tme_sparc_memory_context_max) {
dtlb_context = ic->tme_sparc_memory_context_default;
}
/* we must call the slow load function if: */
if (__tme_predict_false(
/* the DTLB entry is invalid: */
tme_bus_tlb_is_invalid(&dtlb->tme_sparc_tlb_bus_tlb)
/* the DTLB entry does not match the context: */
|| dtlb_context != ic->tme_sparc_memory_context_default
/* the DTLB entry does not cover the needed addresses: */
|| (address < (tme_bus_addr32_t) dtlb->tme_sparc_tlb_addr_first)
|| ((address + ((32 / 8) - 1)) > (tme_bus_addr32_t) dtlb->tme_sparc_tlb_addr_last)
/* the DTLB entry does not cover the needed address space: */
|| (!TME_SPARC_TLB_ASI_MASK_OK(dtlb, asi_mask_data))
/* the DTLB entry can't be used for a fast lda: */
|| (dtlb->tme_sparc_tlb_asi_mask & asi_mask_flags_slow) != 0
/* the DTLB entry does not allow fast transfers: */
|| (memory == TME_EMULATOR_OFF_UNDEF)
/* the address is misaligned: */
|| ((address % (32 / 8)) != 0)
)) {
/* call the slow load function: */
memory = tme_sparc32_ls(ic,
address,
&TME_SPARC_FORMAT3_RD,
(TME_SPARC_LSINFO_OP_LD
| TME_SPARC_LSINFO_ASI(TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF))
| TME_SPARC_LSINFO_A
| (32 / 8)));
/* if the slow load function did the transfer: */
if (__tme_predict_false(memory == TME_EMULATOR_OFF_UNDEF)) {
/* unbusy the TLB entry; */
tme_sparc_tlb_unbusy(dtlb);
/* log the value loaded: */
tme_sparc_recode_verify_mem_load(ic, &TME_SPARC_FORMAT3_RD);
tme_sparc_log(ic, 1000, TME_OK,
(TME_SPARC_LOG_HANDLE(ic),
_("lda 0x%02x:0x%08" TME_PRIx32 ": 0x%08" TME_PRIx32),
TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
address,
TME_SPARC_FORMAT3_RD));
TME_SPARC_INSN_OK;
}
}
/* get the byte order of this transfer: */
endian_little = FALSE;
/* do the fast transfer: */
memory += address;
value32 = tme_memory_bus_read32((const tme_shared tme_uint32_t *) memory, dtlb->tme_sparc_tlb_bus_rwlock, sizeof(tme_uint32_t), sizeof(tme_uint32_t));
value32 = (endian_little ? tme_letoh_u32(value32) : tme_betoh_u32(value32));
TME_SPARC_FORMAT3_RD = (tme_uint32_t) (tme_int32_t) (tme_int32_t) value32;
/* unbusy the DTLB entry: */
tme_sparc_tlb_unbusy(dtlb);
/* log the value loaded: */
tme_sparc_recode_verify_mem_load(ic, &TME_SPARC_FORMAT3_RD);
tme_sparc_log(ic, 1000, TME_OK,
(TME_SPARC_LOG_HANDLE(ic),
_("lda 0x%02x:0x%08" TME_PRIx32 ": 0x%08" TME_PRIx32),
TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
address,
TME_SPARC_FORMAT3_RD));
TME_SPARC_INSN_OK;
}
/* this does a sparc32 sta: */
TME_SPARC_FORMAT3(tme_sparc32_sta, tme_uint32_t)
{
tme_uint32_t asi_mask_data;
tme_uint32_t address;
tme_uint32_t asi_mask_flags_slow;
struct tme_sparc_tlb *dtlb;
tme_shared tme_uint8_t *memory;
tme_bus_context_t dtlb_context;
tme_uint32_t endian_little;
tme_uint32_t value32;
/* get the alternate ASI mask: */
asi_mask_data = _tme_sparc32_alternate_asi_mask(ic);
/* get the address: */
address = TME_SPARC_FORMAT3_RS1 + TME_SPARC_FORMAT3_RS2;
#ifdef _TME_SPARC_STATS
/* track statistics: */
ic->tme_sparc_stats.tme_sparc_stats_memory_total++;
#endif /* _TME_SPARC_STATS */
/* verify and maybe replay this transfer: */
tme_sparc_recode_verify_mem(ic, &TME_SPARC_FORMAT3_RD,
asi_mask_data, address,
(TME_RECODE_SIZE_32
| TME_SPARC_RECODE_VERIFY_MEM_STORE));
if (tme_sparc_recode_verify_replay_last_pc(ic) != 0) {
TME_SPARC_INSN_OK;
}
/* log the value stored: */
tme_sparc_log(ic, 1000, TME_OK,
(TME_SPARC_LOG_HANDLE(ic),
_("sta 0x%02x:0x%08" TME_PRIx32 ": 0x%08" TME_PRIx32),
TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
address,
(tme_uint32_t) TME_SPARC_FORMAT3_RD));
/* assume that no DTLB ASI mask flags will require a slow store: */
asi_mask_flags_slow = 0;
/* get and busy the DTLB entry: */
dtlb = &ic->tme_sparc_tlbs[TME_SPARC_DTLB_ENTRY(ic, TME_SPARC_TLB_HASH(ic, ic->tme_sparc_memory_context_default, address))];
tme_sparc_tlb_busy(dtlb);
/* assume that this DTLB applies and allows fast transfers: */
memory = dtlb->tme_sparc_tlb_emulator_off_write;
/* if this DTLB matches any context, it matches this context: */
dtlb_context = dtlb->tme_sparc_tlb_context;
if (dtlb_context > ic->tme_sparc_memory_context_max) {
dtlb_context = ic->tme_sparc_memory_context_default;
}
/* we must call the slow store function if: */
if (__tme_predict_false(
/* the DTLB entry is invalid: */
tme_bus_tlb_is_invalid(&dtlb->tme_sparc_tlb_bus_tlb)
/* the DTLB entry does not match the context: */
|| dtlb_context != ic->tme_sparc_memory_context_default
/* the DTLB entry does not cover the needed addresses: */
|| (address < (tme_bus_addr32_t) dtlb->tme_sparc_tlb_addr_first)
|| ((address + ((32 / 8) - 1)) > (tme_bus_addr32_t) dtlb->tme_sparc_tlb_addr_last)
/* the DTLB entry does not cover the needed address space: */
|| (!TME_SPARC_TLB_ASI_MASK_OK(dtlb, asi_mask_data))
/* the DTLB entry can't be used for a fast sta: */
|| (dtlb->tme_sparc_tlb_asi_mask & asi_mask_flags_slow) != 0
/* the DTLB entry does not allow fast transfers: */
|| (memory == TME_EMULATOR_OFF_UNDEF)
/* the address is misaligned: */
|| ((address % (32 / 8)) != 0)
)) {
/* call the slow store function: */
memory = tme_sparc32_ls(ic,
address,
&TME_SPARC_FORMAT3_RD,
(TME_SPARC_LSINFO_OP_ST
| TME_SPARC_LSINFO_ASI(TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF))
| TME_SPARC_LSINFO_A
| (32 / 8)));
/* if the slow store function did the transfer: */
if (__tme_predict_false(memory == TME_EMULATOR_OFF_UNDEF)) {
/* unbusy the TLB entry; */
tme_sparc_tlb_unbusy(dtlb);
TME_SPARC_INSN_OK;
}
}
/* get the byte order of this transfer: */
endian_little = FALSE;
/* do the fast transfer: */
memory += address;
value32 = TME_SPARC_FORMAT3_RD;
value32 = (endian_little ? tme_htole_u32(value32) : tme_htobe_u32(value32));
tme_memory_bus_write32((tme_shared tme_uint32_t *) memory, value32, dtlb->tme_sparc_tlb_bus_rwlock, sizeof(tme_uint32_t), sizeof(tme_uint32_t));
/* unbusy the DTLB entry: */
tme_sparc_tlb_unbusy(dtlb);
TME_SPARC_INSN_OK;
}
/* this does a sparc32 ldda: */
TME_SPARC_FORMAT3(tme_sparc32_ldda, tme_uint32_t)
{
tme_uint32_t asi_mask_data;
tme_uint32_t address;
tme_uint32_t asi_mask_flags_slow;
struct tme_sparc_tlb *dtlb;
const tme_shared tme_uint8_t *memory;
tme_bus_context_t dtlb_context;
tme_uint32_t endian_little;
tme_uint32_t value32;
/* get the alternate ASI mask: */
asi_mask_data = _tme_sparc32_alternate_asi_mask(ic);
/* get the address: */
address = TME_SPARC_FORMAT3_RS1 + TME_SPARC_FORMAT3_RS2;
#ifdef _TME_SPARC_STATS
/* track statistics: */
ic->tme_sparc_stats.tme_sparc_stats_memory_total++;
#endif /* _TME_SPARC_STATS */
/* verify and maybe replay this transfer: */
tme_sparc_recode_verify_mem(ic, &TME_SPARC_FORMAT3_RD,
asi_mask_data, address,
(TME_RECODE_SIZE_32
| TME_SPARC_RECODE_VERIFY_MEM_LOAD));
tme_sparc_recode_verify_mem(ic, &TME_SPARC_FORMAT3_RD_ODD(tme_ic_ireg_uint32),
asi_mask_data, address + sizeof(tme_uint32_t),
(TME_RECODE_SIZE_32
| TME_SPARC_RECODE_VERIFY_MEM_LOAD));
if (tme_sparc_recode_verify_replay_last_pc(ic) != 0) {
TME_SPARC_INSN_OK;
}
/* assume that no DTLB ASI mask flags will require a slow load: */
asi_mask_flags_slow = 0;
/* get and busy the DTLB entry: */
dtlb = &ic->tme_sparc_tlbs[TME_SPARC_DTLB_ENTRY(ic, TME_SPARC_TLB_HASH(ic, ic->tme_sparc_memory_context_default, address))];
tme_sparc_tlb_busy(dtlb);
/* assume that this DTLB applies and allows fast transfers: */
memory = dtlb->tme_sparc_tlb_emulator_off_read;
/* if this DTLB matches any context, it matches this context: */
dtlb_context = dtlb->tme_sparc_tlb_context;
if (dtlb_context > ic->tme_sparc_memory_context_max) {
dtlb_context = ic->tme_sparc_memory_context_default;
}
/* we must call the slow load function if: */
if (__tme_predict_false(
/* the DTLB entry is invalid: */
tme_bus_tlb_is_invalid(&dtlb->tme_sparc_tlb_bus_tlb)
/* the DTLB entry does not match the context: */
|| dtlb_context != ic->tme_sparc_memory_context_default
/* the DTLB entry does not cover the needed addresses: */
|| (address < (tme_bus_addr32_t) dtlb->tme_sparc_tlb_addr_first)
|| ((address + ((64 / 8) - 1)) > (tme_bus_addr32_t) dtlb->tme_sparc_tlb_addr_last)
/* the DTLB entry does not cover the needed address space: */
|| (!TME_SPARC_TLB_ASI_MASK_OK(dtlb, asi_mask_data))
/* the DTLB entry can't be used for a fast ldda: */
|| (dtlb->tme_sparc_tlb_asi_mask & asi_mask_flags_slow) != 0
/* the DTLB entry does not allow fast transfers: */
|| (memory == TME_EMULATOR_OFF_UNDEF)
/* the address is misaligned: */
|| ((address % (64 / 8)) != 0)
/* the destination register number is odd: */
|| ((TME_SPARC_INSN & TME_BIT(25)) != 0)
)) {
/* call the slow load function: */
memory = tme_sparc32_ls(ic,
address,
&TME_SPARC_FORMAT3_RD,
(TME_SPARC_LSINFO_OP_LD
| TME_SPARC_LSINFO_LDD_STD
| TME_SPARC_LSINFO_ASI(TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF))
| TME_SPARC_LSINFO_A
| (64 / 8)));
/* if the slow load function did the transfer: */
if (__tme_predict_false(memory == TME_EMULATOR_OFF_UNDEF)) {
/* unbusy the TLB entry; */
tme_sparc_tlb_unbusy(dtlb);
/* log the value loaded: */
tme_sparc_recode_verify_mem_load(ic, &TME_SPARC_FORMAT3_RD);
tme_sparc_recode_verify_mem_load(ic, &TME_SPARC_FORMAT3_RD_ODD(tme_ic_ireg_uint32));
tme_sparc_log(ic, 1000, TME_OK,
(TME_SPARC_LOG_HANDLE(ic),
_("ldda 0x%02x:0x%08" TME_PRIx32 ": 0x%08" TME_PRIx32 " 0x%08" TME_PRIx32 ""),
TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
address,
TME_SPARC_FORMAT3_RD,
TME_SPARC_FORMAT3_RD_ODD(tme_ic_ireg_uint32)));
TME_SPARC_INSN_OK;
}
}
/* get the byte order of this transfer: */
endian_little = FALSE;
/* do the fast transfer: */
memory += address;
value32 = tme_memory_bus_read32(((const tme_shared tme_uint32_t *) memory) + 0, dtlb->tme_sparc_tlb_bus_rwlock, sizeof(tme_uint32_t) * 2, sizeof(tme_uint32_t));
value32 = (endian_little ? tme_letoh_u32(value32) : tme_betoh_u32(value32));
TME_SPARC_FORMAT3_RD = value32;
value32 = tme_memory_bus_read32(((const tme_shared tme_uint32_t *) memory) + 1, dtlb->tme_sparc_tlb_bus_rwlock, sizeof(tme_uint32_t) * 1, sizeof(tme_uint32_t));
value32 = (endian_little ? tme_letoh_u32(value32) : tme_betoh_u32(value32));
TME_SPARC_FORMAT3_RD_ODD(tme_ic_ireg_uint32) = value32;
/* unbusy the DTLB entry: */
tme_sparc_tlb_unbusy(dtlb);
/* log the value loaded: */
tme_sparc_recode_verify_mem_load(ic, &TME_SPARC_FORMAT3_RD);
tme_sparc_recode_verify_mem_load(ic, &TME_SPARC_FORMAT3_RD_ODD(tme_ic_ireg_uint32));
tme_sparc_log(ic, 1000, TME_OK,
(TME_SPARC_LOG_HANDLE(ic),
_("ldda 0x%02x:0x%08" TME_PRIx32 ": 0x%08" TME_PRIx32 " 0x%08" TME_PRIx32 ""),
TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
address,
TME_SPARC_FORMAT3_RD,
TME_SPARC_FORMAT3_RD_ODD(tme_ic_ireg_uint32)));
TME_SPARC_INSN_OK;
}
/* this does a sparc32 stda: */
TME_SPARC_FORMAT3(tme_sparc32_stda, tme_uint32_t)
{
tme_uint32_t asi_mask_data;
tme_uint32_t address;
tme_uint32_t asi_mask_flags_slow;
struct tme_sparc_tlb *dtlb;
tme_shared tme_uint8_t *memory;
tme_bus_context_t dtlb_context;
tme_uint32_t endian_little;
tme_uint32_t value32;
/* get the alternate ASI mask: */
asi_mask_data = _tme_sparc32_alternate_asi_mask(ic);
/* get the address: */
address = TME_SPARC_FORMAT3_RS1 + TME_SPARC_FORMAT3_RS2;
#ifdef _TME_SPARC_STATS
/* track statistics: */
ic->tme_sparc_stats.tme_sparc_stats_memory_total++;
#endif /* _TME_SPARC_STATS */
/* verify and maybe replay this transfer: */
tme_sparc_recode_verify_mem(ic, &TME_SPARC_FORMAT3_RD,
asi_mask_data, address,
(TME_RECODE_SIZE_32
| TME_SPARC_RECODE_VERIFY_MEM_STORE));
tme_sparc_recode_verify_mem(ic, &TME_SPARC_FORMAT3_RD_ODD(tme_ic_ireg_uint32),
asi_mask_data, address + sizeof(tme_uint32_t),
(TME_RECODE_SIZE_32
| TME_SPARC_RECODE_VERIFY_MEM_STORE));
if (tme_sparc_recode_verify_replay_last_pc(ic) != 0) {
TME_SPARC_INSN_OK;
}
/* log the values stored: */
tme_sparc_log(ic, 1000, TME_OK,
(TME_SPARC_LOG_HANDLE(ic),
_("stda 0x%02x:0x%08" TME_PRIx32 ": 0x%08" TME_PRIx32 " 0x%08" TME_PRIx32),
TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
address,
(tme_uint32_t) TME_SPARC_FORMAT3_RD,
(tme_uint32_t) TME_SPARC_FORMAT3_RD_ODD(tme_ic_ireg_uint32)));
/* assume that no DTLB ASI mask flags will require a slow store: */
asi_mask_flags_slow = 0;
/* get and busy the DTLB entry: */
dtlb = &ic->tme_sparc_tlbs[TME_SPARC_DTLB_ENTRY(ic, TME_SPARC_TLB_HASH(ic, ic->tme_sparc_memory_context_default, address))];
tme_sparc_tlb_busy(dtlb);
/* assume that this DTLB applies and allows fast transfers: */
memory = dtlb->tme_sparc_tlb_emulator_off_write;
/* if this DTLB matches any context, it matches this context: */
dtlb_context = dtlb->tme_sparc_tlb_context;
if (dtlb_context > ic->tme_sparc_memory_context_max) {
dtlb_context = ic->tme_sparc_memory_context_default;
}
/* we must call the slow store function if: */
if (__tme_predict_false(
/* the DTLB entry is invalid: */
tme_bus_tlb_is_invalid(&dtlb->tme_sparc_tlb_bus_tlb)
/* the DTLB entry does not match the context: */
|| dtlb_context != ic->tme_sparc_memory_context_default
/* the DTLB entry does not cover the needed addresses: */
|| (address < (tme_bus_addr32_t) dtlb->tme_sparc_tlb_addr_first)
|| ((address + ((64 / 8) - 1)) > (tme_bus_addr32_t) dtlb->tme_sparc_tlb_addr_last)
/* the DTLB entry does not cover the needed address space: */
|| (!TME_SPARC_TLB_ASI_MASK_OK(dtlb, asi_mask_data))
/* the DTLB entry can't be used for a fast stda: */
|| (dtlb->tme_sparc_tlb_asi_mask & asi_mask_flags_slow) != 0
/* the DTLB entry does not allow fast transfers: */
|| (memory == TME_EMULATOR_OFF_UNDEF)
/* the address is misaligned: */
|| ((address % (64 / 8)) != 0)
/* the destination register number is odd: */
|| ((TME_SPARC_INSN & TME_BIT(25)) != 0)
)) {
/* call the slow store function: */
memory = tme_sparc32_ls(ic,
address,
&TME_SPARC_FORMAT3_RD,
(TME_SPARC_LSINFO_OP_ST
| TME_SPARC_LSINFO_LDD_STD
| TME_SPARC_LSINFO_ASI(TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF))
| TME_SPARC_LSINFO_A
| (64 / 8)));
/* if the slow store function did the transfer: */
if (__tme_predict_false(memory == TME_EMULATOR_OFF_UNDEF)) {
/* unbusy the TLB entry; */
tme_sparc_tlb_unbusy(dtlb);
TME_SPARC_INSN_OK;
}
}
/* get the byte order of this transfer: */
endian_little = FALSE;
/* do the fast transfer: */
memory += address;
value32 = TME_SPARC_FORMAT3_RD;
value32 = (endian_little ? tme_htole_u32(value32) : tme_htobe_u32(value32));
tme_memory_bus_write32(((tme_shared tme_uint32_t *) memory) + 0, value32, dtlb->tme_sparc_tlb_bus_rwlock, sizeof(tme_uint32_t) * 2, sizeof(tme_uint32_t));
value32 = TME_SPARC_FORMAT3_RD_ODD(tme_ic_ireg_uint32);
value32 = (endian_little ? tme_htole_u32(value32) : tme_htobe_u32(value32));
tme_memory_bus_write32(((tme_shared tme_uint32_t *) memory) + 1, value32, dtlb->tme_sparc_tlb_bus_rwlock, sizeof(tme_uint32_t) * 1, sizeof(tme_uint32_t));
/* unbusy the DTLB entry: */
tme_sparc_tlb_unbusy(dtlb);
TME_SPARC_INSN_OK;
}
/* this does a sparc32 jmpl: */
TME_SPARC_FORMAT3(tme_sparc32_jmpl, tme_uint32_t)
{
tme_uint32_t pc_next_next;
tme_uint32_t ls_faults;
/* "The JMPL instruction causes a register-indirect delayed control
transfer to the address given by r[rs1] + r[rs2] if the i field is
zero, or r[rs1] + sign_ext(simm13) if the i field is one. The JMPL
instruction copies the PC, which contains the address of the JMPL
instruction, into register r[rd]. If either of the low-order two
bits of the jump address is nonzero, a mem_address_not_aligned
trap occurs." */
/* get the target address: */
pc_next_next = TME_SPARC_FORMAT3_RS1 + TME_SPARC_FORMAT3_RS2;
/* set the delayed control transfer: */
ic->tme_sparc_ireg_uint32(TME_SPARC_IREG_PC_NEXT_NEXT) = pc_next_next;
/* check the target address: */
ls_faults = TME_SPARC_LS_FAULT_NONE;
if (__tme_predict_false((pc_next_next % sizeof(tme_uint32_t)) != 0)) {
ls_faults += TME_SPARC_LS_FAULT_ADDRESS_NOT_ALIGNED;
}
if (__tme_predict_false(ls_faults != TME_SPARC_LS_FAULT_NONE)) {
tme_sparc_nnpc_trap(ic, ls_faults);
}
/* write the PC of the jmpl into r[rd]: */
TME_SPARC_FORMAT3_RD = ic->tme_sparc_ireg_uint32(TME_SPARC_IREG_PC);
/* log an indirect call instruction, which has 15 (%o7) for rd: */
if (TME_FIELD_MASK_EXTRACTU(TME_SPARC_INSN, TME_SPARC_FORMAT3_MASK_RD) == 15) {
tme_sparc_log(ic, 250, TME_OK,
(TME_SPARC_LOG_HANDLE(ic),
_("call 0x%08" TME_PRIx32),
pc_next_next));
}
/* log a ret or retl instruction, which has 0 (%g0) for rd,
either 31 (%i7) or 15 (%o7) for rs1, and 8 for simm13: */
else if ((TME_SPARC_INSN | (16 << 14))
== ((tme_uint32_t) (0x2 << 30) | (0 << 25) | (0x38 << 19) | (31 << 14) | (0x1 << 13) | 8)) {
tme_sparc_log(ic, 250, TME_OK,
(TME_SPARC_LOG_HANDLE(ic),
_("retl 0x%08" TME_PRIx32),
pc_next_next));
}
TME_SPARC_INSN_OK;
}
/* this does a sparc32 ldf: */
TME_SPARC_FORMAT3(tme_sparc32_ldf, tme_uint32_t)
{
tme_uint32_t misaligned;
struct tme_float float_buffer;
struct tme_float *fpreg;
/* get the least significant 32 bits of the address: */
misaligned = TME_SPARC_FORMAT3_RS1;
misaligned += (tme_uint32_t) TME_SPARC_FORMAT3_RS2;
/* decode rd: */
float_buffer.tme_float_format = TME_FLOAT_FORMAT_IEEE754_SINGLE;
fpreg
= _tme_sparc32_fpu_mem_fpreg(ic,
misaligned,
&float_buffer);
/* do the load: */
tme_sparc32_ld(ic, _rs1, _rs2, &ic->tme_sparc_ireg_uint32(TME_SPARC_IREG_FPX));
/* set the floating-point register value: */
assert (fpreg != &float_buffer);
fpreg->tme_float_format = TME_FLOAT_FORMAT_IEEE754_SINGLE;
fpreg->tme_float_value_ieee754_single
= ic->tme_sparc_ireg_uint32(TME_SPARC_IREG_FPX);
TME_SPARC_INSN_OK;
}
/* this does a sparc32 lddf: */
TME_SPARC_FORMAT3(tme_sparc32_lddf, tme_uint32_t)
{
tme_uint32_t address;
tme_uint32_t misaligned;
struct tme_float float_buffer;
struct tme_float *fpreg;
/* get the address: */
address = TME_SPARC_FORMAT3_RS1 + TME_SPARC_FORMAT3_RS2;
/* get the least significant 32 bits of the address: */
misaligned = address;
/* decode rd: */
float_buffer.tme_float_format = TME_FLOAT_FORMAT_IEEE754_DOUBLE;
fpreg
= _tme_sparc32_fpu_mem_fpreg(ic,
misaligned,
&float_buffer);
/* do the load: */
tme_sparc32_ldd(ic, _rs1, _rs2, &ic->tme_sparc_ireg_uint32(TME_SPARC_IREG_FPX));
/* set the double floating-point register value: */
assert (fpreg != &float_buffer);
fpreg->tme_float_format = TME_FLOAT_FORMAT_IEEE754_DOUBLE;
fpreg->tme_float_value_ieee754_double.tme_value64_uint32_hi
= ic->tme_sparc_ireg_uint32((TME_SPARC_IREG_FPX) + 0);
fpreg->tme_float_value_ieee754_double.tme_value64_uint32_lo
= ic->tme_sparc_ireg_uint32((TME_SPARC_IREG_FPX) + 1);
TME_SPARC_INSN_OK;
}
/* this does a sparc32 ldfsr: */
TME_SPARC_FORMAT3(tme_sparc32_ldfsr, tme_uint32_t)
{
tme_uint32_t fsr;
_tme_sparc32_fpu_mem(ic);
/* do the load: */
tme_sparc32_ld(ic, _rs1, _rs2, &ic->tme_sparc_ireg_uint32(TME_SPARC_IREG_FPX));
/* update the FSR: */
fsr = ic->tme_sparc_ireg_uint32(TME_SPARC_IREG_FPX);
/* "An LDFSR instruction does not affect ftt." */
/* "The LDFSR instruction does not affect qne." */
fsr &= ~(TME_SPARC_FSR_VER | TME_SPARC_FSR_FTT | TME_SPARC_FSR_QNE);
ic->tme_sparc_fpu_fsr = (ic->tme_sparc_fpu_fsr & (TME_SPARC_FSR_VER | TME_SPARC_FSR_FTT | TME_SPARC_FSR_QNE)) | fsr;
TME_SPARC_INSN_OK;
}
/* this does a sparc32 stf: */
TME_SPARC_FORMAT3(tme_sparc32_stf, tme_uint32_t)
{
tme_uint32_t misaligned;
struct tme_float float_buffer;
const struct tme_float *fpreg;
const tme_uint32_t *value_single;
/* get the least significant 32 bits of the address: */
misaligned = TME_SPARC_FORMAT3_RS1;
misaligned += (tme_uint32_t) TME_SPARC_FORMAT3_RS2;
/* decode rd: */
float_buffer.tme_float_format = TME_FLOAT_FORMAT_IEEE754_SINGLE;
fpreg
= _tme_sparc32_fpu_mem_fpreg(ic,
misaligned,
&float_buffer);
/* get this single floating-point register in IEEE754 single-precision format: */
value_single = tme_ieee754_single_value_get(fpreg, &float_buffer.tme_float_value_ieee754_single);
/* set the floating-point register value: */
ic->tme_sparc_ireg_uint32(TME_SPARC_IREG_FPX) = *value_single;
/* do the store: */
tme_sparc32_st(ic, _rs1, _rs2, &ic->tme_sparc_ireg_uint32(TME_SPARC_IREG_FPX));
assert (fpreg != &float_buffer);
TME_SPARC_INSN_OK;
}
/* this does a sparc32 stdf: */
TME_SPARC_FORMAT3(tme_sparc32_stdf, tme_uint32_t)
{
tme_uint32_t address;
tme_uint32_t misaligned;
struct tme_float float_buffer;
struct tme_float *fpreg;
const union tme_value64 *value_double;
/* get the address: */
address = TME_SPARC_FORMAT3_RS1 + TME_SPARC_FORMAT3_RS2;
/* get the least significant 32 bits of the address: */
misaligned = address;
/* decode rd: */
float_buffer.tme_float_format = TME_FLOAT_FORMAT_IEEE754_DOUBLE;
fpreg
= _tme_sparc32_fpu_mem_fpreg(ic,
misaligned,
&float_buffer);
/* get this double floating-point register in IEEE754 double-precision format: */
value_double = tme_ieee754_double_value_get(fpreg, &float_buffer.tme_float_value_ieee754_double);
/* set the floating-point register value: */
ic->tme_sparc_ireg_uint32((TME_SPARC_IREG_FPX) + 0)
= value_double->tme_value64_uint32_hi;
ic->tme_sparc_ireg_uint32((TME_SPARC_IREG_FPX) + 1)
= value_double->tme_value64_uint32_lo;
/* do the store: */
tme_sparc32_std(ic, _rs1, _rs2, &ic->tme_sparc_ireg_uint32(TME_SPARC_IREG_FPX));
assert (fpreg != &float_buffer);
TME_SPARC_INSN_OK;
}
/* this does a sparc32 stfsr: */
TME_SPARC_FORMAT3(tme_sparc32_stfsr, tme_uint32_t)
{
_tme_sparc32_fpu_mem(ic);
/* set the FSR value to store: */
ic->tme_sparc_ireg_uint32(TME_SPARC_IREG_FPX) = ic->tme_sparc_fpu_fsr;
/* do the store: */
tme_sparc32_st(ic, _rs1, _rs2, &ic->tme_sparc_ireg_uint32(TME_SPARC_IREG_FPX));
TME_SPARC_INSN_OK;
}
/* this does a sparc32 fpop1: */
TME_SPARC_FORMAT3(tme_sparc32_fpop1, tme_uint32_t)
{
TME_SPARC_INSN_FPU;
tme_sparc_fpu_fpop1(ic);
TME_SPARC_INSN_OK;
}
/* this does a sparc32 fpop2: */
TME_SPARC_FORMAT3(tme_sparc32_fpop2, tme_uint32_t)
{
TME_SPARC_INSN_FPU;
tme_sparc_fpu_fpop2(ic);
TME_SPARC_INSN_OK;
}
/* this does a sparc32 "mulscc SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc32_mulscc, tme_uint32_t)
{
tme_uint32_t src1;
tme_uint32_t src2;
tme_uint32_t dst;
tme_uint32_t y;
tme_uint32_t cc;
/* get the operands: */
src1 = (tme_uint32_t) TME_SPARC_FORMAT3_RS1;
src2 = (tme_uint32_t) TME_SPARC_FORMAT3_RS2;
/* perform the operation: */
/* "(1) The multiplier is established as r[rs2] if the i field is zero, or
sign_ext(simm13) if the i field is one."
"(3) If the least significant bit of the Y register = 1, the shifted
value from step (2) is added to the multiplier. If the LSB of the
Y register = 0, then 0 is added to the shifted value from step (2)." */
y = ic->tme_sparc_ireg_uint32(TME_SPARC_IREG_Y);
if ((y & 1) == 0) {
src2 = 0;
}
/* "(6) The Y register is shifted right by one bit, with the LSB of the
unshifted r[rs1] replacing the MSB of Y." */
y >>= 1;
if (src1 & 1) {
y += 0x80000000;
}
ic->tme_sparc_ireg_uint32(TME_SPARC_IREG_Y) = y;
/* "(2) A 32-bit value is computed by shifting r[rs1] right by one
bit with (N xor V) from the PSR replacing the high-order bit.
(This is the proper sign for the previous partial product.)" */
src1 >>= 1;
if (((ic->tme_sparc32_ireg_psr ^ (ic->tme_sparc32_ireg_psr * (TME_SPARC32_PSR_ICC_N / TME_SPARC32_PSR_ICC_V))) & TME_SPARC32_PSR_ICC_N) != 0) {
src1 += 0x80000000;
}
/* "(4) The sum from step (3) is written into r[rd]." */
dst = src1 + src2;
/* "(5) The integer condition codes, icc, are updated according to the
addition performed in step (3)." */
/* store the destination: */
TME_SPARC_FORMAT3_RD = (tme_uint32_t) dst;
/* set Z if the destination is zero: */
cc = ((((tme_int32_t) dst) == 0) * TME_SPARC32_PSR_ICC_Z);
/* set N if the destination is negative: */
cc += ((((tme_int32_t) dst) < 0) * TME_SPARC32_PSR_ICC_N);
/* if the operands are the same sign, and the destination has
a different sign, set V: */
cc += ((((tme_int32_t) ((src2 ^ dst) & (src1 ^ (src2 ^ (((tme_uint32_t) 0) - 1))))) < 0) * TME_SPARC32_PSR_ICC_V);
/* if src1 and src2 both have the high bit set, or if dst does
not have the high bit set and either src1 or src2 does, set C: */
cc += (((tme_int32_t) (((tme_uint32_t) (src1 & src2)) | ((((tme_uint32_t) dst) ^ (((tme_uint32_t) 0) - 1)) & ((tme_uint32_t) (src1 | src2))))) < 0) * TME_SPARC32_PSR_ICC_C;
/* set the condition codes: */
ic->tme_sparc32_ireg_psr = (ic->tme_sparc32_ireg_psr & ~TME_SPARC32_PSR_ICC) | cc;
TME_SPARC_INSN_OK;
}
/* this does a slow atomic operation: */
void
tme_sparc32_atomic(struct tme_sparc *ic, struct tme_sparc_ls *ls)
{
tme_uint32_t endian_little;
tme_uint32_t insn;
tme_uint32_t value32;
tme_uint32_t value_swap32;
tme_uint32_t size;
/* if this is the beginning of the operation: */
if (ls->tme_sparc_ls_state == 0) {
/* start the load part of the operation: */
ls->tme_sparc_ls_state = ls->tme_sparc_ls_size;
assert (ls->tme_sparc_ls_state != 0
&& (ls->tme_sparc_ls_state & TME_BIT(7)) == 0);
/* the load must start at the beginning of the buffer: */
assert (ls->tme_sparc_ls_buffer_offset == 0);
}
/* if this is the load part of the operation: */
if ((ls->tme_sparc_ls_state & TME_BIT(7)) == 0) {
/* do one slow load cycle: */
tme_sparc32_load(ic, ls);
/* if the slow load cycle did not load all of the data: */
if (__tme_predict_false(ls->tme_sparc_ls_size != 0)) {
return;
}
/* get the byte order of this transfer: */
endian_little = FALSE;
/* dispatch on the op3 of the instruction: */
insn = TME_SPARC_INSN;
switch ((insn >> 19) & 0x3f) {
case 0x0d: /* ldstub */
case 0x1d: /* ldstuba */
/* finish the load part of the ldstub: */
assert (ls->tme_sparc_ls_state == sizeof(tme_uint8_t));
*ls->tme_sparc_ls_rd32 = ic->tme_sparc_memory_buffer.tme_sparc_memory_buffer8s[0];
/* start the store part of the ldstub: */
ic->tme_sparc_memory_buffer.tme_sparc_memory_buffer8s[0] = 0xff;
break;
/* otherwise, this must be swap: */
default:
assert (((insn >> 19) & 0x2f) == 0x0f /* swap, swapa */);
/* finish the load part of the swap: */
assert (ls->tme_sparc_ls_state == sizeof(tme_uint32_t));
value32 = ic->tme_sparc_memory_buffer.tme_sparc_memory_buffer32s[0];
value_swap32 = *ls->tme_sparc_ls_rd32;
if (endian_little) {
value32 = tme_letoh_u32(value32);
value_swap32 = tme_htole_u32(value32);
}
else {
value32 = tme_betoh_u32(value32);
value_swap32 = tme_htobe_u32(value32);
}
*ls->tme_sparc_ls_rd32 = value32;
/* start the store part of the swap: */
ic->tme_sparc_memory_buffer.tme_sparc_memory_buffer32s[0] = value_swap32;
break;
}
/* start the store part of the operation: */
size = ls->tme_sparc_ls_state;
ls->tme_sparc_ls_address32 -= size;
ls->tme_sparc_ls_size = size;
ls->tme_sparc_ls_buffer_offset = 0;
ls->tme_sparc_ls_state = size | TME_BIT(7);
}
/* this is the store part of the operation: */
/* do one slow store cycle: */
tme_sparc32_store(ic, ls);
/* if the slow store cycle did not store all of the data: */
if (__tme_predict_false(ls->tme_sparc_ls_size != 0)) {
return;
}
}
/* this does one slow load cycle: */
void
tme_sparc32_load(struct tme_sparc *ic,
struct tme_sparc_ls *ls)
{
struct tme_sparc_tlb *tlb;
tme_uint32_t address;
unsigned int cycle_size;
tme_bus_addr_t physical_address;
int shift;
int err;
/* get the TLB entry: */
tlb = ls->tme_sparc_ls_tlb;
/* the TLB entry must be busy and valid: */
assert (tme_bus_tlb_is_valid(&tlb->tme_sparc_tlb_bus_tlb));
/* start the bus cycle structure: */
ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_type = TME_BUS_CYCLE_READ;
/* get the buffer: */
ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_buffer = &ic->tme_sparc_memory_buffer.tme_sparc_memory_buffer8s[ls->tme_sparc_ls_buffer_offset];
ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_buffer_increment = 1;
/* get the current address: */
address = ls->tme_sparc_ls_address32;
ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_address = address;
/* start the cycle size: */
cycle_size = ls->tme_sparc_ls_size;
assert (cycle_size > 0);
cycle_size--;
cycle_size = TME_MIN(cycle_size, (((tme_bus_addr32_t) tlb->tme_sparc_tlb_addr_last) - address)) + 1;
/* if this TLB entry allows fast reads: */
if (__tme_predict_true(tlb->tme_sparc_tlb_emulator_off_read != TME_EMULATOR_OFF_UNDEF)) {
/* do a read: */
ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_size = cycle_size;
tme_memory_bus_read_buffer((tlb->tme_sparc_tlb_emulator_off_read + (tme_uint32_t) ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_address),
ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_buffer,
ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_size,
tlb->tme_sparc_tlb_bus_rwlock,
sizeof(tme_uint8_t),
sizeof(tme_uint32_t));
}
/* otherwise, this TLB entry does not allow fast reads: */
else {
/* finish the cycle size: */
cycle_size = TME_MIN(cycle_size, 1 + ((~ (unsigned int) address) % sizeof(tme_uint32_t)));
ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_size = cycle_size;
/* form the physical address for the bus cycle handler: */
physical_address = ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_address;
physical_address += tlb->tme_sparc_tlb_addr_offset;
shift = tlb->tme_sparc_tlb_addr_shift;
if (shift < 0) {
physical_address <<= (0 - shift);
}
else if (shift > 0) {
physical_address >>= shift;
}
ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_address = physical_address;
/* finish the bus cycle structure: */
(*ic->_tme_sparc_ls_bus_cycle)(ic, ls);
tme_sparc_log(ic, 10000, TME_OK,
(TME_SPARC_LOG_HANDLE(ic),
_("cycle-load%u 0x%08" TME_PRIx32),
(unsigned int) (ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_size * 8),
(tme_bus_addr32_t) ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_address));
/* callout the bus cycle: */
tme_sparc_tlb_unbusy(tlb);
tme_sparc_callout_unlock(ic);
err = (*tlb->tme_sparc_tlb_bus_tlb.tme_bus_tlb_cycle)
(tlb->tme_sparc_tlb_bus_tlb.tme_bus_tlb_cycle_private,
&ls->tme_sparc_ls_bus_cycle);
tme_sparc_callout_relock(ic);
tme_sparc_tlb_busy(tlb);
/* the TLB entry can't have been invalidated before the load: */
assert (err != EBADF);
/* if the bus cycle didn't complete normally: */
if (err != TME_OK) {
/* if a real bus fault may have happened, instead of
some synchronous event: */
if (err != TME_BUS_CYCLE_SYNCHRONOUS_EVENT) {
/* call the bus fault handlers: */
err = tme_bus_tlb_fault(&tlb->tme_sparc_tlb_bus_tlb, &ls->tme_sparc_ls_bus_cycle, err);
}
/* if some synchronous event has happened: */
if (err == TME_BUS_CYCLE_SYNCHRONOUS_EVENT) {
/* after the currently executing instruction finishes, check
for external resets, halts, or interrupts: */
ic->_tme_sparc_instruction_burst_remaining = 0;
ic->_tme_sparc_instruction_burst_other = TRUE;
}
/* otherwise, if no real bus fault happened: */
else if (err == TME_OK) {
/* nothing to do */
}
/* otherwise, a real bus fault happened: */
else {
(*ic->_tme_sparc_ls_bus_fault)(ic, ls, err);
return;
}
}
}
/* some data must have been transferred: */
assert (ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_size > 0);
/* update: */
cycle_size = ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_size;
ls->tme_sparc_ls_address32 += cycle_size;
ls->tme_sparc_ls_buffer_offset += cycle_size;
ls->tme_sparc_ls_size -= cycle_size;
}
/* this does one slow store cycle: */
void
tme_sparc32_store(struct tme_sparc *ic,
struct tme_sparc_ls *ls)
{
struct tme_sparc_tlb *tlb;
tme_uint32_t address;
unsigned int cycle_size;
tme_bus_addr_t physical_address;
int shift;
int err;
/* get the TLB entry: */
tlb = ls->tme_sparc_ls_tlb;
/* the TLB entry must be busy and valid: */
assert (tme_bus_tlb_is_valid(&tlb->tme_sparc_tlb_bus_tlb));
/* start the bus cycle structure: */
ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_type = TME_BUS_CYCLE_WRITE;
/* get the buffer: */
ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_buffer = &ic->tme_sparc_memory_buffer.tme_sparc_memory_buffer8s[ls->tme_sparc_ls_buffer_offset];
ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_buffer_increment = 1;
/* get the current address: */
address = ls->tme_sparc_ls_address32;
ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_address = address;
/* start the cycle size: */
cycle_size = ls->tme_sparc_ls_size;
assert (cycle_size > 0);
cycle_size--;
cycle_size = TME_MIN(cycle_size, (((tme_bus_addr32_t) tlb->tme_sparc_tlb_addr_last) - address)) + 1;
/* if this TLB entry allows fast writes: */
if (__tme_predict_true(tlb->tme_sparc_tlb_emulator_off_write != TME_EMULATOR_OFF_UNDEF)) {
/* do a write: */
ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_size = cycle_size;
tme_memory_bus_write_buffer((tlb->tme_sparc_tlb_emulator_off_write + (tme_uint32_t) ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_address),
ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_buffer,
ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_size,
tlb->tme_sparc_tlb_bus_rwlock,
sizeof(tme_uint8_t),
sizeof(tme_uint32_t));
}
/* otherwise, this TLB entry does not allow fast writes: */
else {
/* finish the cycle size: */
cycle_size = TME_MIN(cycle_size, 1 + ((~ (unsigned int) address) % sizeof(tme_uint32_t)));
ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_size = cycle_size;
/* form the physical address for the bus cycle handler: */
physical_address = ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_address;
physical_address += tlb->tme_sparc_tlb_addr_offset;
shift = tlb->tme_sparc_tlb_addr_shift;
if (shift < 0) {
physical_address <<= (0 - shift);
}
else if (shift > 0) {
physical_address >>= shift;
}
ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_address = physical_address;
/* finish the bus cycle structure: */
(*ic->_tme_sparc_ls_bus_cycle)(ic, ls);
tme_sparc_log(ic, 10000, TME_OK,
(TME_SPARC_LOG_HANDLE(ic),
_("cycle-store%u 0x%08" TME_PRIx32),
(unsigned int) (ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_size * 8),
(tme_bus_addr32_t) ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_address));
/* callout the bus cycle: */
tme_sparc_tlb_unbusy(tlb);
tme_sparc_callout_unlock(ic);
err = (*tlb->tme_sparc_tlb_bus_tlb.tme_bus_tlb_cycle)
(tlb->tme_sparc_tlb_bus_tlb.tme_bus_tlb_cycle_private,
&ls->tme_sparc_ls_bus_cycle);
tme_sparc_callout_relock(ic);
tme_sparc_tlb_busy(tlb);
/* the TLB entry can't have been invalidated before the store: */
assert (err != EBADF);
/* if the bus cycle didn't complete normally: */
if (err != TME_OK) {
/* if a real bus fault may have happened, instead of
some synchronous event: */
if (err != TME_BUS_CYCLE_SYNCHRONOUS_EVENT) {
/* call the bus fault handlers: */
err = tme_bus_tlb_fault(&tlb->tme_sparc_tlb_bus_tlb, &ls->tme_sparc_ls_bus_cycle, err);
}
/* if some synchronous event has happened: */
if (err == TME_BUS_CYCLE_SYNCHRONOUS_EVENT) {
/* after the currently executing instruction finishes, check
for external resets, halts, or interrupts: */
ic->_tme_sparc_instruction_burst_remaining = 0;
ic->_tme_sparc_instruction_burst_other = TRUE;
}
/* otherwise, if no real bus fault happened: */
else if (err == TME_OK) {
/* nothing to do */
}
/* otherwise, a real bus fault happened: */
else {
(*ic->_tme_sparc_ls_bus_fault)(ic, ls, err);
return;
}
}
}
/* some data must have been transferred: */
assert (ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_size > 0);
/* if this was an atomic operation: */
if (__tme_predict_false(ls->tme_sparc_ls_lsinfo & TME_SPARC_LSINFO_OP_ATOMIC)) {
/* we do not support atomic operations in TLB entries that
do not support both fast reads and fast writes. assuming
that all atomic operations are to regular memory, we
should always get fast read and fast write TLBs. when
we do not, it should only be because the memory has been
made read-only in the MMU. the write above was supposed
to cause a fault (with the instruction rerun later with
a fast read and fast write TLB entry), but instead it
succeeded and transferred some data. we have modified
memory and cannot recover: */
abort();
}
/* update: */
cycle_size = ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_size;
ls->tme_sparc_ls_address32 += cycle_size;
ls->tme_sparc_ls_buffer_offset += cycle_size;
ls->tme_sparc_ls_size -= cycle_size;
}
/* this does a slow load or store: */
tme_shared tme_uint8_t *
tme_sparc32_ls(struct tme_sparc *ic,
tme_uint32_t const address_first,
tme_uint32_t *_rd,
tme_uint32_t lsinfo)
{
struct tme_sparc_ls ls;
tme_uint32_t size;
tme_uint32_t asi;
tme_uint32_t asi_mask_flags;
tme_uint32_t asi_mask;
tme_bus_context_t context;
tme_uint32_t tlb_hash;
unsigned long tlb_i;
unsigned long handler_i;
struct tme_sparc_tlb *tlb;
unsigned int cycle_type;
tme_uint32_t address;
void (*address_map) _TME_P((struct tme_sparc *, struct tme_sparc_ls *));
tme_bus_addr_t address_bus;
int rc;
const tme_shared tme_uint8_t *emulator_off;
unsigned int buffer_offset;
tme_uint32_t value;
tme_uint32_t value32;
/* we must not be replaying instructions: */
assert (tme_sparc_recode_verify_replay_last_pc(ic) == 0);
/* initialize the pointer to the rd register: */
ls.tme_sparc_ls_rd32 = _rd;
#ifndef NDEBUG
/* initialize the cycle function: */
ls.tme_sparc_ls_cycle = NULL;
/* initialize the TLB entry pointer: */
ls.tme_sparc_ls_tlb = NULL;
#endif /* NDEBUG */
/* initialize the faults: */
ls.tme_sparc_ls_faults = TME_SPARC_LS_FAULT_NONE;
/* initialize the address: */
ls.tme_sparc_ls_address32 = address_first;
/* initialize the size: */
size = TME_SPARC_LSINFO_WHICH_SIZE(lsinfo);
ls.tme_sparc_ls_size = size;
/* initialize the info: */
ls.tme_sparc_ls_lsinfo = lsinfo;
/* if the address is not aligned: */
if (__tme_predict_false(((size - 1) & (tme_uint32_t) address_first) != 0)) {
ls.tme_sparc_ls_faults |= TME_SPARC_LS_FAULT_ADDRESS_NOT_ALIGNED;
}
/* otherwise, the address is aligned: */
else {
/* the transfer must not cross a 32-bit boundary: */
assert ((size - 1) <= (tme_uint32_t) ~address_first);
}
/* initialize the address map: */
ls.tme_sparc_ls_address_map = ic->_tme_sparc_ls_address_map;
/* if this is a ldd, ldda, std, or stda, or an instruction
that loads or stores in the same way: */
if (lsinfo & TME_SPARC_LSINFO_LDD_STD) {
/* if the rd register is odd: */
/* NB: we don't check the rd field in the instruction,
because the register number there might be encoded
in some way, or the architecture might ignore bit
zero in the rd field (for example, the sparc32 lddf).
instead, we test the rd register pointer: */
if (__tme_predict_false((ls.tme_sparc_ls_rd32
- ic->tme_sparc_ic.tme_ic_iregs.tme_ic_iregs_uint32s)
% 2)) {
ls.tme_sparc_ls_faults |= TME_SPARC_LS_FAULT_LDD_STD_RD_ODD;
}
}
/* if the ASI has been specified: */
if (lsinfo & TME_SPARC_LSINFO_A) {
/* get the ASI: */
asi = TME_SPARC_LSINFO_WHICH_ASI(lsinfo);
/* get the flags for this ASI: */
asi_mask_flags = ic->tme_sparc_asis[asi].tme_sparc_asi_mask_flags;
/* make the ASI mask: */
if (asi_mask_flags & TME_SPARC32_ASI_MASK_FLAG_SPECIAL) {
asi_mask
= TME_SPARC_ASI_MASK_SPECIAL(asi, TRUE);
}
else {
asi_mask = TME_SPARC32_ASI_MASK(asi, asi);
}
ls.tme_sparc_ls_asi_mask = asi_mask;
/* get the context for the alternate address space: */
context = ic->tme_sparc_memory_context_default;
ls.tme_sparc_ls_context = context;
/* get the default TLB entry index: */
tlb_hash = TME_SPARC_TLB_HASH(ic, context, address_first);
if (lsinfo & TME_SPARC_LSINFO_OP_FETCH) {
tlb_i = TME_SPARC_ITLB_ENTRY(ic, tlb_hash);
}
else {
tlb_i = TME_SPARC_DTLB_ENTRY(ic, tlb_hash);
}
ls.tme_sparc_ls_tlb_i = tlb_i;
/* call any special handler for this ASI: */
handler_i = ic->tme_sparc_asis[TME_SPARC_ASI_MASK_WHICH(asi_mask)].tme_sparc_asi_handler;
if (__tme_predict_false(handler_i != 0)) {
(*ic->_tme_sparc_ls_asi_handlers[handler_i])(ic, &ls);
}
/* get the final TLB entry index: */
tlb_i = ls.tme_sparc_ls_tlb_i;
}
/* otherwise, the ASI has not been specified: */
else {
/* get the ASI mask: */
asi_mask = ic->tme_sparc_asi_mask_data;
/* add in any ASI mask flags from the instruction: */
assert (TME_SPARC_LSINFO_WHICH_ASI_FLAGS(lsinfo) == 0
);
asi_mask |= TME_SPARC_LSINFO_WHICH_ASI_FLAGS(lsinfo);
/* set the ASI mask: */
ls.tme_sparc_ls_asi_mask = asi_mask;
/* get the context: */
context = ic->tme_sparc_memory_context_default;
ls.tme_sparc_ls_context = context;
/* this must not be a fetch: */
assert ((lsinfo & TME_SPARC_LSINFO_OP_FETCH) == 0);
/* get the TLB entry index: */
tlb_hash = TME_SPARC_TLB_HASH(ic, context, address_first);
tlb_i = TME_SPARC_DTLB_ENTRY(ic, tlb_hash);
ls.tme_sparc_ls_tlb_i = tlb_i;
}
/* get the TLB entry pointer: */
tlb = &ic->tme_sparc_tlbs[tlb_i];
ls.tme_sparc_ls_tlb = tlb;
/* get the cycle type: */
/* NB: we deliberately set this once, now, since the lsinfo
may change once we start transferring: */
cycle_type
= ((lsinfo
& (TME_SPARC_LSINFO_OP_ST
| TME_SPARC_LSINFO_OP_ATOMIC))
? TME_BUS_CYCLE_WRITE
: TME_BUS_CYCLE_READ);
/* loop until the transfer is complete: */
for (;;) {
/* if we have faulted: */
if (__tme_predict_false(ls.tme_sparc_ls_faults != TME_SPARC_LS_FAULT_NONE)) {
/* unbusy this TLB, since the trap function may not return: */
tme_bus_tlb_unbusy(&tlb->tme_sparc_tlb_bus_tlb);
/* call the trap function, which will not return if it traps: */
(*ic->_tme_sparc_ls_trap)(ic, &ls);
/* rebusy this TLB: */
tme_bus_tlb_busy(&tlb->tme_sparc_tlb_bus_tlb);
/* since the trap function returned, it must have cleared the fault: */
assert (ls.tme_sparc_ls_faults == TME_SPARC_LS_FAULT_NONE);
}
/* if the transfer is complete, stop now: */
if (__tme_predict_false(ls.tme_sparc_ls_size == 0)) {
break;
}
/* get the current address: */
address = ls.tme_sparc_ls_address32;
/* if this TLB entry does not apply or is invalid: */
if ((tlb->tme_sparc_tlb_context != ls.tme_sparc_ls_context
&& tlb->tme_sparc_tlb_context <= ic->tme_sparc_memory_context_max)
|| address < (tme_bus_addr32_t) tlb->tme_sparc_tlb_addr_first
|| address > (tme_bus_addr32_t) tlb->tme_sparc_tlb_addr_last
|| !TME_SPARC_TLB_ASI_MASK_OK(tlb, ls.tme_sparc_ls_asi_mask)
|| ((tlb->tme_sparc_tlb_cycles_ok & cycle_type) == 0
&& (cycle_type == TME_BUS_CYCLE_READ
? tlb->tme_sparc_tlb_emulator_off_read
: tlb->tme_sparc_tlb_emulator_off_write) == TME_EMULATOR_OFF_UNDEF)
|| tme_bus_tlb_is_invalid(&tlb->tme_sparc_tlb_bus_tlb)) {
/* unbusy this TLB entry for filling: */
tme_bus_tlb_unbusy_fill(&tlb->tme_sparc_tlb_bus_tlb);
/* if we haven't mapped this address yet: */
address_map = ls.tme_sparc_ls_address_map;
if (address_map != NULL) {
ls.tme_sparc_ls_address_map = NULL;
/* count this mapping: */
if (ls.tme_sparc_ls_lsinfo & TME_SPARC_LSINFO_OP_FETCH) {
TME_SPARC_STAT(ic, tme_sparc_stats_itlb_map);
}
else {
TME_SPARC_STAT(ic, tme_sparc_stats_dtlb_map);
}
/* initialize the ASI mask and context on this TLB entry: */
/* NB that the ASI mask will likely be updated by either the
address mapping or the TLB fill: */
tlb->tme_sparc_tlb_asi_mask
= (ls.tme_sparc_ls_asi_mask
& ~TME_SPARC_ASI_MASK_FLAGS_AVAIL);
tlb->tme_sparc_tlb_context = ls.tme_sparc_ls_context;
/* NB: if the address mapping traps, we won't get a chance
to finish updating this TLB entry, which is currently in
an inconsistent state - but not necessarily an unusable
state. poison it to be unusable, including any recode
TLB entry: */
tlb->tme_sparc_tlb_addr_first = 1;
tlb->tme_sparc_tlb_addr_last = 0;
#if TME_SPARC_HAVE_RECODE(ic)
if (ls.tme_sparc_ls_lsinfo & TME_SPARC_LSINFO_OP_FETCH) {
tme_sparc32_recode_chain_tlb_update(ic, &ls);
}
else {
tme_sparc32_recode_ls_tlb_update(ic, &ls);
}
#endif /* TME_SPARC_HAVE_RECODE(ic) */
#ifndef NDEBUG
/* initialize the mapping TLB entry: */
ls.tme_sparc_ls_tlb_map.tme_bus_tlb_addr_first = 0 - (tme_bus_addr_t) 1;
ls.tme_sparc_ls_tlb_map.tme_bus_tlb_addr_last = 0 - (tme_bus_addr_t) 2;
ls.tme_sparc_ls_tlb_map.tme_bus_tlb_cycles_ok = 0;
ls.tme_sparc_ls_tlb_map.tme_bus_tlb_addr_offset = 0 - (tme_bus_addr_t) 1;
#endif /* !NDEBUG */
/* map the address: */
(*address_map)(ic, &ls);
/* the address mapping must do any trapping itself: */
assert (ls.tme_sparc_ls_faults == TME_SPARC_LS_FAULT_NONE);
/* if the address mapping completed the transfer: */
if (__tme_predict_false(ls.tme_sparc_ls_size == 0)) {
/* rebusy the TLB entry: */
tme_sparc_tlb_busy(tlb);
/* stop now: */
break;
}
/* the mapping must have actually made a mapping: */
assert (ls.tme_sparc_ls_tlb_map.tme_bus_tlb_addr_first != 0 - (tme_bus_addr_t) 1);
assert (ls.tme_sparc_ls_tlb_map.tme_bus_tlb_addr_last != 0 - (tme_bus_addr_t) 2);
assert (ls.tme_sparc_ls_tlb_map.tme_bus_tlb_cycles_ok != 0);
assert (ls.tme_sparc_ls_tlb_map.tme_bus_tlb_addr_offset != 0 - (tme_bus_addr_t) 1);
}
/* count this fill: */
if (ls.tme_sparc_ls_lsinfo & TME_SPARC_LSINFO_OP_FETCH) {
TME_SPARC_STAT(ic, tme_sparc_stats_itlb_fill);
}
else {
TME_SPARC_STAT(ic, tme_sparc_stats_dtlb_fill);
}
/* get the bus address: */
address_bus = ls.tme_sparc_ls_address32 + ls.tme_sparc_ls_tlb_map.tme_bus_tlb_addr_offset;
/* fill the TLB entry: */
tme_sparc_callout_unlock(ic);
rc = (*ic->_tme_sparc_bus_connection->tme_sparc_bus_tlb_fill)
(ic->_tme_sparc_bus_connection,
tlb,
ls.tme_sparc_ls_asi_mask,
address_bus,
cycle_type);
assert (rc == TME_OK);
tme_sparc_callout_relock(ic);
/* map the TLB entry: */
tme_bus_tlb_map(&tlb->tme_sparc_tlb_bus_tlb, address_bus,
&ls.tme_sparc_ls_tlb_map, ls.tme_sparc_ls_address32);
/* update any recode TLB entry: */
#if TME_SPARC_HAVE_RECODE(ic)
if (ls.tme_sparc_ls_lsinfo & TME_SPARC_LSINFO_OP_FETCH) {
tme_sparc32_recode_chain_tlb_update(ic, &ls);
}
else {
tme_sparc32_recode_ls_tlb_update(ic, &ls);
}
#endif /* TME_SPARC_HAVE_RECODE(ic) */
/* rebusy the TLB entry: */
tme_sparc_tlb_busy(tlb);
/* if this TLB entry is already invalid: */
if (tme_bus_tlb_is_invalid(&tlb->tme_sparc_tlb_bus_tlb)) {
continue;
}
}
/* this TLB entry must apply: */
assert ((tlb->tme_sparc_tlb_context == ls.tme_sparc_ls_context
|| tlb->tme_sparc_tlb_context > ic->tme_sparc_memory_context_max)
&& ls.tme_sparc_ls_address32 >= (tme_bus_addr32_t) tlb->tme_sparc_tlb_addr_first
&& ls.tme_sparc_ls_address32 <= (tme_bus_addr32_t) tlb->tme_sparc_tlb_addr_last
&& ((tlb->tme_sparc_tlb_cycles_ok & cycle_type)
|| (cycle_type == TME_BUS_CYCLE_READ
? tlb->tme_sparc_tlb_emulator_off_read
: tlb->tme_sparc_tlb_emulator_off_write) != TME_EMULATOR_OFF_UNDEF)
&& TME_SPARC_TLB_ASI_MASK_OK(tlb, ls.tme_sparc_ls_asi_mask));
/* get the current lsinfo: */
lsinfo = ls.tme_sparc_ls_lsinfo;
/* if we have to check the TLB: */
if (__tme_predict_true((lsinfo & TME_SPARC_LSINFO_NO_CHECK_TLB) == 0)) {
/* get the ASI mask for this TLB entry: */
asi_mask = tlb->tme_sparc_tlb_asi_mask;
}
/* if we might not have to call a slow cycle function: */
if (__tme_predict_true((lsinfo & TME_SPARC_LSINFO_SLOW_CYCLES) == 0)) {
/* if this TLB entry allows fast transfer of all of the addresses: */
if (__tme_predict_true(((tme_bus_addr32_t) tlb->tme_sparc_tlb_addr_last) >= (address_first + (ls.tme_sparc_ls_size - 1)))) {
emulator_off = tlb->tme_sparc_tlb_emulator_off_read;
if (lsinfo & TME_SPARC_LSINFO_OP_ST) {
emulator_off = tlb->tme_sparc_tlb_emulator_off_write;
}
if (__tme_predict_true(emulator_off != TME_EMULATOR_OFF_UNDEF
&& (((lsinfo & TME_SPARC_LSINFO_OP_ATOMIC) == 0)
|| emulator_off == tlb->tme_sparc_tlb_emulator_off_write))) {
/* return and let our caller do the transfer: */
/* NB: we break const here: */
return ((tme_shared tme_uint8_t *) emulator_off);
}
}
/* we have to call a slow cycle function: */
lsinfo |= TME_SPARC_LSINFO_SLOW_CYCLES;
assert (ls.tme_sparc_ls_cycle == NULL);
/* assume that this operation will transfer the start of the buffer: */
buffer_offset = 0;
/* assume that this is a load or a fetch: */
ls.tme_sparc_ls_cycle = tme_sparc32_load;
/* if this is a store: */
if (lsinfo & TME_SPARC_LSINFO_OP_ST) {
/* put the (first) register to store in the memory buffer: */
value = TME_SPARC_FORMAT3_RD;
value = (FALSE ? tme_htole_u32(value) : tme_htobe_u32(value));
ic->tme_sparc_memory_buffer.tme_sparc_memory_buffer32s[0] = value;
/* find the offset in the memory buffer corresponding to the
first address: */
buffer_offset = sizeof(tme_uint32_t) - ls.tme_sparc_ls_size;
if (FALSE) {
buffer_offset = 0;
}
/* if this is a std or stda: */
if (lsinfo & TME_SPARC_LSINFO_LDD_STD) {
/* put the odd 32-bit register to store in the memory buffer
after the even 32-bit register. exactly where this is depends
on the architecture and on the byte order of the store: */
value32 = TME_SPARC_FORMAT3_RD_ODD(tme_ic_ireg_uint32);
if (FALSE) {
value32 = tme_htole_u32(value32);
ic->tme_sparc_memory_buffer.tme_sparc_memory_buffer32s[1] = value32;
buffer_offset = 0;
}
else {
value32 = tme_htobe_u32(value32);
ic->tme_sparc_memory_buffer.tme_sparc_memory_buffer32s[(32 / 32)] = value32;
buffer_offset = sizeof(tme_uint32_t) - sizeof(tme_uint32_t);
}
}
/* set the cycle function: */
ls.tme_sparc_ls_cycle = tme_sparc32_store;
}
/* otherwise, if this is an atomic: */
else if (lsinfo & TME_SPARC_LSINFO_OP_ATOMIC) {
/* set the cycle function: */
ls.tme_sparc_ls_cycle = tme_sparc32_atomic;
}
/* set the buffer offset for the (first) slow cycle: */
ls.tme_sparc_ls_buffer_offset = buffer_offset;
/* clear the state for this operation: */
ls.tme_sparc_ls_state = 0;
}
/* assume that we won't have to check the TLB again: */
ls.tme_sparc_ls_lsinfo = lsinfo | TME_SPARC_LSINFO_NO_CHECK_TLB;
/* call the slow cycle function: */
(*ls.tme_sparc_ls_cycle)(ic, &ls);
}
/* if this was a load that has already completed, a store,
or an atomic, make sure our caller doesn't try to complete
a fast transfer: */
if (ls.tme_sparc_ls_lsinfo
& (TME_SPARC_LSINFO_LD_COMPLETED
| TME_SPARC_LSINFO_OP_ST
| TME_SPARC_LSINFO_OP_ATOMIC)) {
return (TME_EMULATOR_OFF_UNDEF);
}
/* otherwise, this was a load that did slow cycles into the
memory buffer and hasn't updated rd yet. return a pointer
to the memory buffer so our caller can complete the load: */
return (ic->tme_sparc_memory_buffer.tme_sparc_memory_buffer8s
- address_first);
}
#undef TME_SPARC_VERSION
#define TME_SPARC_VERSION(ic) _TME_SPARC_VERSION(ic)
#ifdef TME_HAVE_INT64_T
#undef TME_SPARC_VERSION
#define TME_SPARC_VERSION(ic) (9)
static tme_uint32_t
_tme_sparc64_alternate_asi_mask(struct tme_sparc *ic)
{
unsigned int asi_data;
unsigned int asi_mask_flags;
tme_uint32_t asi_mask_data;
/* get the ASI, assuming that the i bit is zero: */
asi_data = TME_FIELD_MASK_EXTRACTU(TME_SPARC_INSN, (0xff << 5));
/* if the i bit is one, use the address space in the ASI register: */
if (TME_SPARC_INSN & TME_BIT(13)) {
asi_data = ic->tme_sparc64_ireg_asi;
}
/* get the flags for this ASI: */
asi_mask_flags = ic->tme_sparc_asis[asi_data].tme_sparc_asi_mask_flags;
/* if this is a nonprivileged access: */
if (!TME_SPARC_PRIV(ic)) {
/* if this is a restricted ASI: */
if (__tme_predict_false((asi_data & TME_SPARC64_ASI_FLAG_UNRESTRICTED) == 0)) {
/* force a slow load or store, which will generate the
privileged_action trap: */
asi_mask_flags |= TME_SPARC_ASI_MASK_FLAG_UNDEF;
}
/* force a nonprivileged access with the ASI: */
asi_mask_flags |= TME_SPARC64_ASI_MASK_FLAG_INSN_AS_IF_USER;
}
/* make the ASI mask: */
if (asi_mask_flags & TME_SPARC64_ASI_MASK_FLAG_SPECIAL) {
asi_mask_data
= (asi_mask_flags
+ TME_SPARC_ASI_MASK_SPECIAL(asi_data,
((asi_mask_flags & TME_SPARC64_ASI_MASK_FLAG_INSN_AS_IF_USER) == 0)));
}
else {
asi_mask_data = TME_SPARC64_ASI_MASK(asi_data, asi_mask_flags);
}
/* if this ASI has a special handler: */
if (__tme_predict_false(ic->tme_sparc_asis[TME_SPARC_ASI_MASK_WHICH(asi_mask_data)].tme_sparc_asi_handler != 0)) {
/* force a slow load or store, which will call the special handler: */
asi_mask_data |= TME_SPARC_ASI_MASK_FLAG_UNDEF;
}
return (asi_mask_data);
}
static struct tme_float *
_tme_sparc64_fpu_mem_fpreg(struct tme_sparc *ic,
tme_uint32_t misaligned,
struct tme_float *float_buffer)
{
unsigned int float_format;
unsigned int fpreg_format;
tme_uint32_t fp_store;
unsigned int fpu_mode;
unsigned int fpreg_number;
/* NB: this checks for various traps by their priority order: */
TME_SPARC_INSN_FPU_ENABLED;
/* get the floating-point format: */
float_format = float_buffer->tme_float_format;
/* convert the floating-point format into the ieee754
floating-point register file format: */
#if (TME_FLOAT_FORMAT_NULL | TME_IEEE754_FPREG_FORMAT_NULL) != 0
#error "TME_FLOAT_FORMAT_ or TME_IEEE754_FPREG_FORMAT_ values changed"
#endif
#if TME_FLOAT_FORMAT_IEEE754_SINGLE < TME_IEEE754_FPREG_FORMAT_SINGLE
#error "TME_FLOAT_FORMAT_ or TME_IEEE754_FPREG_FORMAT_ values changed"
#endif
#if (TME_FLOAT_FORMAT_IEEE754_SINGLE / TME_IEEE754_FPREG_FORMAT_SINGLE) != (TME_FLOAT_FORMAT_IEEE754_DOUBLE / TME_IEEE754_FPREG_FORMAT_DOUBLE)
#error "TME_FLOAT_FORMAT_ or TME_IEEE754_FPREG_FORMAT_ values changed"
#endif
assert (float_format == TME_FLOAT_FORMAT_NULL
|| float_format == TME_FLOAT_FORMAT_IEEE754_SINGLE
|| float_format == TME_FLOAT_FORMAT_IEEE754_DOUBLE);
fpreg_format = float_format / (TME_FLOAT_FORMAT_IEEE754_SINGLE / TME_IEEE754_FPREG_FORMAT_SINGLE);
/* if the memory address is misaligned, return the
float buffer now. the eventual load or store will
cause the mem_address_not_aligned trap: */
/* if the memory address is misaligned: */
#if TME_IEEE754_FPREG_FORMAT_NULL != 0 || TME_IEEE754_FPREG_FORMAT_SINGLE != 1 || TME_IEEE754_FPREG_FORMAT_DOUBLE != 2 || TME_IEEE754_FPREG_FORMAT_QUAD != 4
#error "TME_IEEE754_FPREG_FORMAT_ values changed"
#endif
assert (fpreg_format == TME_IEEE754_FPREG_FORMAT_NULL
|| fpreg_format == TME_IEEE754_FPREG_FORMAT_SINGLE
|| fpreg_format == TME_IEEE754_FPREG_FORMAT_DOUBLE
|| fpreg_format == TME_IEEE754_FPREG_FORMAT_QUAD);
misaligned &= ((sizeof(tme_uint32_t) * fpreg_format) - 1);
if (__tme_predict_false(misaligned)) {
/* if the memory address is not even 32-bit aligned, or
if this SPARC doesn't support loads and stores of this
size at 32-bit alignment: */
if (misaligned != sizeof(tme_uint32_t)
#if TME_IEEE754_FPREG_FORMAT_SINGLE != 1 || (TME_SPARC_MEMORY_FLAG_HAS_LDDF_STDF_32 * TME_IEEE754_FPREG_FORMAT_DOUBLE) != TME_SPARC_MEMORY_FLAG_HAS_LDQF_STQF_32
#error "TME_IEEE754_FPREG_FORMAT_ or TME_SPARC_MEMORY_FLAG_ values changed"
#endif
|| (TME_SPARC_MEMORY_FLAGS(ic)
& (TME_SPARC_MEMORY_FLAG_HAS_LDDF_STDF_32 * fpreg_format)) == 0) {
return (float_buffer);
}
}
/* see if this is a floating-point load or store: */
/* NB: all of the floating-point instructions that use
this preamble have bit two of op3 clear for a load,
and set for a store: */
fp_store = (TME_SPARC_INSN & TME_BIT(19 + 2));
/* if the FPU isn't in execute mode: */
fpu_mode = ic->tme_sparc_fpu_mode;
if (__tme_predict_false(fpu_mode != TME_SPARC_FPU_MODE_EXECUTE)) {
/* if this is a floating-point load, or if this is a
floating-point store and a floating-point exception
is pending: */
if (!fp_store
|| fpu_mode == TME_SPARC_FPU_MODE_EXCEPTION_PENDING) {
/* do an FPU exception check: */
tme_sparc_fpu_exception_check(ic);
}
}
/* if this is not a load or store of a floating-point register: */
if (fpreg_format == TME_IEEE754_FPREG_FORMAT_NULL) {
return (float_buffer);
}
/* decode rd: */
fpreg_number
= tme_sparc_fpu_fpreg_decode(ic,
TME_FIELD_MASK_EXTRACTU(TME_SPARC_INSN,
TME_SPARC_FORMAT3_MASK_RD),
fpreg_format);
/* make sure this floating-point register has the right precision: */
tme_sparc_fpu_fpreg_format(ic, fpreg_number, fpreg_format | TME_IEEE754_FPREG_FORMAT_BUILTIN);
/* if this is a floating-point load: */
if (!fp_store) {
/* mark rd as dirty: */
TME_SPARC_FPU_DIRTY(ic, fpreg_number);
}
/* return the floating-point register: */
return (&ic->tme_sparc_fpu_fpregs[fpreg_number]);
}
#define _tme_sparc64_fpu_mem(ic) \
do { _tme_sparc64_fpu_mem_fpreg(ic, 0, &_tme_sparc_float_null); } while (/* CONSTCOND */ 0)
/* this does a sparc64 "add SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc64_add, tme_uint64_t)
{
tme_uint64_t src1;
tme_uint64_t src2;
tme_uint64_t dst;
/* get the operands: */
src1 = (tme_uint64_t) TME_SPARC_FORMAT3_RS1;
src2 = (tme_uint64_t) TME_SPARC_FORMAT3_RS2;
/* perform the operation: */
dst = src1 + src2;
/* store the destination: */
TME_SPARC_FORMAT3_RD = (tme_uint64_t) dst;
TME_SPARC_INSN_OK;
}
/* this does a sparc64 "addcc SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc64_addcc, tme_uint64_t)
{
tme_uint64_t src1;
tme_uint64_t src2;
tme_uint64_t dst;
tme_uint32_t cc;
/* get the operands: */
src1 = (tme_uint64_t) TME_SPARC_FORMAT3_RS1;
src2 = (tme_uint64_t) TME_SPARC_FORMAT3_RS2;
/* perform the operation: */
dst = src1 + src2;
/* store the destination: */
TME_SPARC_FORMAT3_RD = (tme_uint64_t) dst;
/* set Z if the destination is zero: */
cc = ((((tme_int32_t) dst) == 0) * TME_SPARC64_CCR_ICC_Z);
/* set N if the destination is negative: */
cc += ((((tme_int32_t) dst) < 0) * TME_SPARC64_CCR_ICC_N);
/* if the operands are the same sign, and the destination has
a different sign, set V: */
cc += ((((tme_int32_t) ((src2 ^ dst) & (src1 ^ (src2 ^ (((tme_uint32_t) 0) - 1))))) < 0) * TME_SPARC64_CCR_ICC_V);
/* if src1 and src2 both have the high bit set, or if dst does
not have the high bit set and either src1 or src2 does, set C: */
cc += (((tme_int32_t) (((tme_uint32_t) (src1 & src2)) | ((((tme_uint32_t) dst) ^ (((tme_uint32_t) 0) - 1)) & ((tme_uint32_t) (src1 | src2))))) < 0) * TME_SPARC64_CCR_ICC_C;
/* set Z if the destination is zero: */
cc += ((((tme_int64_t) dst) == 0) * TME_SPARC64_CCR_XCC_Z);
/* set N if the destination is negative: */
cc += ((((tme_int64_t) dst) < 0) * TME_SPARC64_CCR_XCC_N);
/* if the operands are the same sign, and the destination has
a different sign, set V: */
cc += ((((tme_int64_t) ((src2 ^ dst) & (src1 ^ (src2 ^ (((tme_uint64_t) 0) - 1))))) < 0) * TME_SPARC64_CCR_XCC_V);
/* if src1 and src2 both have the high bit set, or if dst does
not have the high bit set and either src1 or src2 does, set C: */
cc += (((tme_int64_t) (((tme_uint64_t) (src1 & src2)) | ((((tme_uint64_t) dst) ^ (((tme_uint64_t) 0) - 1)) & ((tme_uint64_t) (src1 | src2))))) < 0) * TME_SPARC64_CCR_XCC_C;
/* set the condition codes: */
ic->tme_sparc64_ireg_ccr = cc;
TME_SPARC_INSN_OK;
}
/* this does a sparc64 "sub SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc64_sub, tme_uint64_t)
{
tme_uint64_t src1;
tme_uint64_t src2;
tme_uint64_t dst;
/* get the operands: */
src1 = (tme_uint64_t) TME_SPARC_FORMAT3_RS1;
src2 = (tme_uint64_t) TME_SPARC_FORMAT3_RS2;
/* perform the operation: */
dst = src1 - src2;
/* store the destination: */
TME_SPARC_FORMAT3_RD = (tme_uint64_t) dst;
TME_SPARC_INSN_OK;
}
/* this does a sparc64 "subcc SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc64_subcc, tme_uint64_t)
{
tme_uint64_t src1;
tme_uint64_t src2;
tme_uint64_t dst;
tme_uint32_t cc;
/* get the operands: */
src1 = (tme_uint64_t) TME_SPARC_FORMAT3_RS1;
src2 = (tme_uint64_t) TME_SPARC_FORMAT3_RS2;
/* perform the operation: */
dst = src1 - src2;
/* store the destination: */
TME_SPARC_FORMAT3_RD = (tme_uint64_t) dst;
/* set Z if the destination is zero: */
cc = ((((tme_int32_t) dst) == 0) * TME_SPARC64_CCR_ICC_Z);
/* set N if the destination is negative: */
cc += ((((tme_int32_t) dst) < 0) * TME_SPARC64_CCR_ICC_N);
/* if the operands are different signs, and the destination has
a different sign from the first operand, set V: */
cc += ((((tme_int32_t) ((src1 ^ src2) & (src1 ^ dst))) < 0) * TME_SPARC64_CCR_ICC_V);
/* if src2 is greater than src1, set C: */
cc += ((((tme_uint32_t) src2) > ((tme_uint32_t) src1))) * TME_SPARC64_CCR_ICC_C;
/* set Z if the destination is zero: */
cc += ((((tme_int64_t) dst) == 0) * TME_SPARC64_CCR_XCC_Z);
/* set N if the destination is negative: */
cc += ((((tme_int64_t) dst) < 0) * TME_SPARC64_CCR_XCC_N);
/* if the operands are different signs, and the destination has
a different sign from the first operand, set V: */
cc += ((((tme_int64_t) ((src1 ^ src2) & (src1 ^ dst))) < 0) * TME_SPARC64_CCR_XCC_V);
/* if src2 is greater than src1, set C: */
cc += ((((tme_uint64_t) src2) > ((tme_uint64_t) src1))) * TME_SPARC64_CCR_XCC_C;
/* set the condition codes: */
ic->tme_sparc64_ireg_ccr = cc;
TME_SPARC_INSN_OK;
}
/* this does a sparc64 "or SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc64_or, tme_uint64_t)
{
tme_uint64_t src1;
tme_uint64_t src2;
tme_uint64_t dst;
/* get the operands: */
src1 = (tme_uint64_t) TME_SPARC_FORMAT3_RS1;
src2 = (tme_uint64_t) TME_SPARC_FORMAT3_RS2;
/* perform the operation: */
dst = src1 | src2;
/* store the destination: */
TME_SPARC_FORMAT3_RD = (tme_uint64_t) dst;
TME_SPARC_INSN_OK;
}
/* this does a sparc64 "orcc SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc64_orcc, tme_uint64_t)
{
tme_uint64_t src1;
tme_uint64_t src2;
tme_uint64_t dst;
tme_uint32_t cc;
/* get the operands: */
src1 = (tme_uint64_t) TME_SPARC_FORMAT3_RS1;
src2 = (tme_uint64_t) TME_SPARC_FORMAT3_RS2;
/* perform the operation: */
dst = src1 | src2;
/* store the destination: */
TME_SPARC_FORMAT3_RD = (tme_uint64_t) dst;
/* set Z if the destination is zero: */
cc = ((((tme_int32_t) dst) == 0) * TME_SPARC64_CCR_ICC_Z);
/* set N if the destination is negative: */
cc += ((((tme_int32_t) dst) < 0) * TME_SPARC64_CCR_ICC_N);
/* set Z if the destination is zero: */
cc += ((((tme_int64_t) dst) == 0) * TME_SPARC64_CCR_XCC_Z);
/* set N if the destination is negative: */
cc += ((((tme_int64_t) dst) < 0) * TME_SPARC64_CCR_XCC_N);
/* set the condition codes: */
ic->tme_sparc64_ireg_ccr = cc;
TME_SPARC_INSN_OK;
}
/* this does a sparc64 "orn SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc64_orn, tme_uint64_t)
{
tme_uint64_t src1;
tme_uint64_t src2;
tme_uint64_t dst;
/* get the operands: */
src1 = (tme_uint64_t) TME_SPARC_FORMAT3_RS1;
src2 = (tme_uint64_t) TME_SPARC_FORMAT3_RS2;
/* perform the operation: */
dst = src1 | ~src2;
/* store the destination: */
TME_SPARC_FORMAT3_RD = (tme_uint64_t) dst;
TME_SPARC_INSN_OK;
}
/* this does a sparc64 "orncc SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc64_orncc, tme_uint64_t)
{
tme_uint64_t src1;
tme_uint64_t src2;
tme_uint64_t dst;
tme_uint32_t cc;
/* get the operands: */
src1 = (tme_uint64_t) TME_SPARC_FORMAT3_RS1;
src2 = (tme_uint64_t) TME_SPARC_FORMAT3_RS2;
/* perform the operation: */
dst = src1 | ~src2;
/* store the destination: */
TME_SPARC_FORMAT3_RD = (tme_uint64_t) dst;
/* set Z if the destination is zero: */
cc = ((((tme_int32_t) dst) == 0) * TME_SPARC64_CCR_ICC_Z);
/* set N if the destination is negative: */
cc += ((((tme_int32_t) dst) < 0) * TME_SPARC64_CCR_ICC_N);
/* set Z if the destination is zero: */
cc += ((((tme_int64_t) dst) == 0) * TME_SPARC64_CCR_XCC_Z);
/* set N if the destination is negative: */
cc += ((((tme_int64_t) dst) < 0) * TME_SPARC64_CCR_XCC_N);
/* set the condition codes: */
ic->tme_sparc64_ireg_ccr = cc;
TME_SPARC_INSN_OK;
}
/* this does a sparc64 "and SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc64_and, tme_uint64_t)
{
tme_uint64_t src1;
tme_uint64_t src2;
tme_uint64_t dst;
/* get the operands: */
src1 = (tme_uint64_t) TME_SPARC_FORMAT3_RS1;
src2 = (tme_uint64_t) TME_SPARC_FORMAT3_RS2;
/* perform the operation: */
dst = src1 & src2;
/* store the destination: */
TME_SPARC_FORMAT3_RD = (tme_uint64_t) dst;
TME_SPARC_INSN_OK;
}
/* this does a sparc64 "andcc SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc64_andcc, tme_uint64_t)
{
tme_uint64_t src1;
tme_uint64_t src2;
tme_uint64_t dst;
tme_uint32_t cc;
/* get the operands: */
src1 = (tme_uint64_t) TME_SPARC_FORMAT3_RS1;
src2 = (tme_uint64_t) TME_SPARC_FORMAT3_RS2;
/* perform the operation: */
dst = src1 & src2;
/* store the destination: */
TME_SPARC_FORMAT3_RD = (tme_uint64_t) dst;
/* set Z if the destination is zero: */
cc = ((((tme_int32_t) dst) == 0) * TME_SPARC64_CCR_ICC_Z);
/* set N if the destination is negative: */
cc += ((((tme_int32_t) dst) < 0) * TME_SPARC64_CCR_ICC_N);
/* set Z if the destination is zero: */
cc += ((((tme_int64_t) dst) == 0) * TME_SPARC64_CCR_XCC_Z);
/* set N if the destination is negative: */
cc += ((((tme_int64_t) dst) < 0) * TME_SPARC64_CCR_XCC_N);
/* set the condition codes: */
ic->tme_sparc64_ireg_ccr = cc;
TME_SPARC_INSN_OK;
}
/* this does a sparc64 "andn SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc64_andn, tme_uint64_t)
{
tme_uint64_t src1;
tme_uint64_t src2;
tme_uint64_t dst;
/* get the operands: */
src1 = (tme_uint64_t) TME_SPARC_FORMAT3_RS1;
src2 = (tme_uint64_t) TME_SPARC_FORMAT3_RS2;
/* perform the operation: */
dst = src1 & ~src2;
/* store the destination: */
TME_SPARC_FORMAT3_RD = (tme_uint64_t) dst;
TME_SPARC_INSN_OK;
}
/* this does a sparc64 "andncc SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc64_andncc, tme_uint64_t)
{
tme_uint64_t src1;
tme_uint64_t src2;
tme_uint64_t dst;
tme_uint32_t cc;
/* get the operands: */
src1 = (tme_uint64_t) TME_SPARC_FORMAT3_RS1;
src2 = (tme_uint64_t) TME_SPARC_FORMAT3_RS2;
/* perform the operation: */
dst = src1 & ~src2;
/* store the destination: */
TME_SPARC_FORMAT3_RD = (tme_uint64_t) dst;
/* set Z if the destination is zero: */
cc = ((((tme_int32_t) dst) == 0) * TME_SPARC64_CCR_ICC_Z);
/* set N if the destination is negative: */
cc += ((((tme_int32_t) dst) < 0) * TME_SPARC64_CCR_ICC_N);
/* set Z if the destination is zero: */
cc += ((((tme_int64_t) dst) == 0) * TME_SPARC64_CCR_XCC_Z);
/* set N if the destination is negative: */
cc += ((((tme_int64_t) dst) < 0) * TME_SPARC64_CCR_XCC_N);
/* set the condition codes: */
ic->tme_sparc64_ireg_ccr = cc;
TME_SPARC_INSN_OK;
}
/* this does a sparc64 "xor SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc64_xor, tme_uint64_t)
{
tme_uint64_t src1;
tme_uint64_t src2;
tme_uint64_t dst;
/* get the operands: */
src1 = (tme_uint64_t) TME_SPARC_FORMAT3_RS1;
src2 = (tme_uint64_t) TME_SPARC_FORMAT3_RS2;
/* perform the operation: */
dst = src1 ^ src2;
/* store the destination: */
TME_SPARC_FORMAT3_RD = (tme_uint64_t) dst;
TME_SPARC_INSN_OK;
}
/* this does a sparc64 "xorcc SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc64_xorcc, tme_uint64_t)
{
tme_uint64_t src1;
tme_uint64_t src2;
tme_uint64_t dst;
tme_uint32_t cc;
/* get the operands: */
src1 = (tme_uint64_t) TME_SPARC_FORMAT3_RS1;
src2 = (tme_uint64_t) TME_SPARC_FORMAT3_RS2;
/* perform the operation: */
dst = src1 ^ src2;
/* store the destination: */
TME_SPARC_FORMAT3_RD = (tme_uint64_t) dst;
/* set Z if the destination is zero: */
cc = ((((tme_int32_t) dst) == 0) * TME_SPARC64_CCR_ICC_Z);
/* set N if the destination is negative: */
cc += ((((tme_int32_t) dst) < 0) * TME_SPARC64_CCR_ICC_N);
/* set Z if the destination is zero: */
cc += ((((tme_int64_t) dst) == 0) * TME_SPARC64_CCR_XCC_Z);
/* set N if the destination is negative: */
cc += ((((tme_int64_t) dst) < 0) * TME_SPARC64_CCR_XCC_N);
/* set the condition codes: */
ic->tme_sparc64_ireg_ccr = cc;
TME_SPARC_INSN_OK;
}
/* this does a sparc64 "xnor SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc64_xnor, tme_uint64_t)
{
tme_uint64_t src1;
tme_uint64_t src2;
tme_uint64_t dst;
/* get the operands: */
src1 = (tme_uint64_t) TME_SPARC_FORMAT3_RS1;
src2 = (tme_uint64_t) TME_SPARC_FORMAT3_RS2;
/* perform the operation: */
dst = src1 ^ ~src2;
/* store the destination: */
TME_SPARC_FORMAT3_RD = (tme_uint64_t) dst;
TME_SPARC_INSN_OK;
}
/* this does a sparc64 "xnorcc SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc64_xnorcc, tme_uint64_t)
{
tme_uint64_t src1;
tme_uint64_t src2;
tme_uint64_t dst;
tme_uint32_t cc;
/* get the operands: */
src1 = (tme_uint64_t) TME_SPARC_FORMAT3_RS1;
src2 = (tme_uint64_t) TME_SPARC_FORMAT3_RS2;
/* perform the operation: */
dst = src1 ^ ~src2;
/* store the destination: */
TME_SPARC_FORMAT3_RD = (tme_uint64_t) dst;
/* set Z if the destination is zero: */
cc = ((((tme_int32_t) dst) == 0) * TME_SPARC64_CCR_ICC_Z);
/* set N if the destination is negative: */
cc += ((((tme_int32_t) dst) < 0) * TME_SPARC64_CCR_ICC_N);
/* set Z if the destination is zero: */
cc += ((((tme_int64_t) dst) == 0) * TME_SPARC64_CCR_XCC_Z);
/* set N if the destination is negative: */
cc += ((((tme_int64_t) dst) < 0) * TME_SPARC64_CCR_XCC_N);
/* set the condition codes: */
ic->tme_sparc64_ireg_ccr = cc;
TME_SPARC_INSN_OK;
}
/* this does a sparc64 "addx SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc64_addx, tme_uint64_t)
{
tme_uint64_t src1;
tme_uint64_t src2;
tme_uint64_t dst;
/* get the operands: */
src1 = (tme_uint64_t) TME_SPARC_FORMAT3_RS1;
src2 = (tme_uint64_t) TME_SPARC_FORMAT3_RS2;
/* perform the operation: */
dst = src1 + src2;
dst += ((ic->tme_sparc64_ireg_ccr & TME_SPARC64_CCR_ICC_C) != 0);
/* store the destination: */
TME_SPARC_FORMAT3_RD = (tme_uint64_t) dst;
TME_SPARC_INSN_OK;
}
/* this does a sparc64 "addxcc SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc64_addxcc, tme_uint64_t)
{
tme_uint64_t src1;
tme_uint64_t src2;
tme_uint64_t dst;
tme_uint32_t cc;
/* get the operands: */
src1 = (tme_uint64_t) TME_SPARC_FORMAT3_RS1;
src2 = (tme_uint64_t) TME_SPARC_FORMAT3_RS2;
/* perform the operation: */
dst = src1 + src2;
dst += ((ic->tme_sparc64_ireg_ccr & TME_SPARC64_CCR_ICC_C) != 0);
/* store the destination: */
TME_SPARC_FORMAT3_RD = (tme_uint64_t) dst;
/* set Z if the destination is zero: */
cc = ((((tme_int32_t) dst) == 0) * TME_SPARC64_CCR_ICC_Z);
/* set N if the destination is negative: */
cc += ((((tme_int32_t) dst) < 0) * TME_SPARC64_CCR_ICC_N);
/* if the operands are the same sign, and the destination has
a different sign, set V: */
cc += ((((tme_int32_t) ((src2 ^ dst) & (src1 ^ (src2 ^ (((tme_uint32_t) 0) - 1))))) < 0) * TME_SPARC64_CCR_ICC_V);
/* if src1 and src2 both have the high bit set, or if dst does
not have the high bit set and either src1 or src2 does, set C: */
cc += (((tme_int32_t) (((tme_uint32_t) (src1 & src2)) | ((((tme_uint32_t) dst) ^ (((tme_uint32_t) 0) - 1)) & ((tme_uint32_t) (src1 | src2))))) < 0) * TME_SPARC64_CCR_ICC_C;
/* set Z if the destination is zero: */
cc += ((((tme_int64_t) dst) == 0) * TME_SPARC64_CCR_XCC_Z);
/* set N if the destination is negative: */
cc += ((((tme_int64_t) dst) < 0) * TME_SPARC64_CCR_XCC_N);
/* if the operands are the same sign, and the destination has
a different sign, set V: */
cc += ((((tme_int64_t) ((src2 ^ dst) & (src1 ^ (src2 ^ (((tme_uint64_t) 0) - 1))))) < 0) * TME_SPARC64_CCR_XCC_V);
/* if src1 and src2 both have the high bit set, or if dst does
not have the high bit set and either src1 or src2 does, set C: */
cc += (((tme_int64_t) (((tme_uint64_t) (src1 & src2)) | ((((tme_uint64_t) dst) ^ (((tme_uint64_t) 0) - 1)) & ((tme_uint64_t) (src1 | src2))))) < 0) * TME_SPARC64_CCR_XCC_C;
/* set the condition codes: */
ic->tme_sparc64_ireg_ccr = cc;
TME_SPARC_INSN_OK;
}
/* this does a sparc64 "subx SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc64_subx, tme_uint64_t)
{
tme_uint64_t src1;
tme_uint64_t src2;
tme_uint64_t dst;
/* get the operands: */
src1 = (tme_uint64_t) TME_SPARC_FORMAT3_RS1;
src2 = (tme_uint64_t) TME_SPARC_FORMAT3_RS2;
/* perform the operation: */
dst = src1 - src2;
dst -= ((ic->tme_sparc64_ireg_ccr & TME_SPARC64_CCR_ICC_C) != 0);
/* store the destination: */
TME_SPARC_FORMAT3_RD = (tme_uint64_t) dst;
TME_SPARC_INSN_OK;
}
/* this does a sparc64 "subxcc SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc64_subxcc, tme_uint64_t)
{
tme_uint64_t src1;
tme_uint64_t src2;
tme_uint64_t dst;
tme_uint32_t cc;
/* get the operands: */
src1 = (tme_uint64_t) TME_SPARC_FORMAT3_RS1;
src2 = (tme_uint64_t) TME_SPARC_FORMAT3_RS2;
/* perform the operation: */
dst = src1 - src2;
dst -= ((ic->tme_sparc64_ireg_ccr & TME_SPARC64_CCR_ICC_C) != 0);
/* store the destination: */
TME_SPARC_FORMAT3_RD = (tme_uint64_t) dst;
/* set Z if the destination is zero: */
cc = ((((tme_int32_t) dst) == 0) * TME_SPARC64_CCR_ICC_Z);
/* set N if the destination is negative: */
cc += ((((tme_int32_t) dst) < 0) * TME_SPARC64_CCR_ICC_N);
/* if the operands are different signs, and the destination has
a different sign from the first operand, set V: */
cc += ((((tme_int32_t) ((src1 ^ src2) & (src1 ^ dst))) < 0) * TME_SPARC64_CCR_ICC_V);
/* if src2 is greater than src1, set C: */
cc += ((((tme_uint32_t) src2) > ((tme_uint32_t) src1)) || (((tme_uint32_t) src2) == ((tme_uint32_t) src1) && (ic->tme_sparc64_ireg_ccr & TME_SPARC64_CCR_ICC_C))) * TME_SPARC64_CCR_ICC_C;
/* set Z if the destination is zero: */
cc += ((((tme_int64_t) dst) == 0) * TME_SPARC64_CCR_XCC_Z);
/* set N if the destination is negative: */
cc += ((((tme_int64_t) dst) < 0) * TME_SPARC64_CCR_XCC_N);
/* if the operands are different signs, and the destination has
a different sign from the first operand, set V: */
cc += ((((tme_int64_t) ((src1 ^ src2) & (src1 ^ dst))) < 0) * TME_SPARC64_CCR_XCC_V);
/* if src2 is greater than src1, set C: */
cc += ((((tme_uint64_t) src2) > ((tme_uint64_t) src1)) || (((tme_uint64_t) src2) == ((tme_uint64_t) src1) && (ic->tme_sparc64_ireg_ccr & TME_SPARC64_CCR_ICC_C))) * TME_SPARC64_CCR_XCC_C;
/* set the condition codes: */
ic->tme_sparc64_ireg_ccr = cc;
TME_SPARC_INSN_OK;
}
/* this does a sparc64 "taddcc SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc64_taddcc, tme_uint64_t)
{
tme_uint64_t src1;
tme_uint64_t src2;
tme_uint64_t dst;
tme_uint32_t cc;
/* get the operands: */
src1 = (tme_uint64_t) TME_SPARC_FORMAT3_RS1;
src2 = (tme_uint64_t) TME_SPARC_FORMAT3_RS2;
/* perform the operation: */
dst = src1 + src2;
/* store the destination: */
TME_SPARC_FORMAT3_RD = (tme_uint64_t) dst;
/* set Z if the destination is zero: */
cc = ((((tme_int32_t) dst) == 0) * TME_SPARC64_CCR_ICC_Z);
/* set N if the destination is negative: */
cc += ((((tme_int32_t) dst) < 0) * TME_SPARC64_CCR_ICC_N);
/* if the operands are the same sign, and the destination has
a different sign, set V: */
cc += ((((tme_int32_t) ((src2 ^ dst) & (src1 ^ (src2 ^ (((tme_uint32_t) 0) - 1))))) < 0) * TME_SPARC64_CCR_ICC_V);
/* if src1 and src2 both have the high bit set, or if dst does
not have the high bit set and either src1 or src2 does, set C: */
cc += (((tme_int32_t) (((tme_uint32_t) (src1 & src2)) | ((((tme_uint32_t) dst) ^ (((tme_uint32_t) 0) - 1)) & ((tme_uint32_t) (src1 | src2))))) < 0) * TME_SPARC64_CCR_ICC_C;
/* set Z if the destination is zero: */
cc += ((((tme_int64_t) dst) == 0) * TME_SPARC64_CCR_XCC_Z);
/* set N if the destination is negative: */
cc += ((((tme_int64_t) dst) < 0) * TME_SPARC64_CCR_XCC_N);
/* if the operands are the same sign, and the destination has
a different sign, set V: */
cc += ((((tme_int64_t) ((src2 ^ dst) & (src1 ^ (src2 ^ (((tme_uint64_t) 0) - 1))))) < 0) * TME_SPARC64_CCR_XCC_V);
/* if src1 and src2 both have the high bit set, or if dst does
not have the high bit set and either src1 or src2 does, set C: */
cc += (((tme_int64_t) (((tme_uint64_t) (src1 & src2)) | ((((tme_uint64_t) dst) ^ (((tme_uint64_t) 0) - 1)) & ((tme_uint64_t) (src1 | src2))))) < 0) * TME_SPARC64_CCR_XCC_C;
/* set V if bits zero or one of src1 or src2 are set: */
cc |= ((((src1 | src2) & 3) != 0) * TME_SPARC64_CCR_ICC_V);
/* set the condition codes: */
ic->tme_sparc64_ireg_ccr = cc;
TME_SPARC_INSN_OK;
}
/* this does a sparc64 "taddcctv SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc64_taddcctv, tme_uint64_t)
{
tme_uint64_t src1;
tme_uint64_t src2;
tme_uint64_t dst;
tme_uint32_t cc;
/* get the operands: */
src1 = (tme_uint64_t) TME_SPARC_FORMAT3_RS1;
src2 = (tme_uint64_t) TME_SPARC_FORMAT3_RS2;
/* perform the operation: */
dst = src1 + src2;
/* set Z if the destination is zero: */
cc = ((((tme_int32_t) dst) == 0) * TME_SPARC64_CCR_ICC_Z);
/* set N if the destination is negative: */
cc += ((((tme_int32_t) dst) < 0) * TME_SPARC64_CCR_ICC_N);
/* if the operands are the same sign, and the destination has
a different sign, set V: */
cc += ((((tme_int32_t) ((src2 ^ dst) & (src1 ^ (src2 ^ (((tme_uint32_t) 0) - 1))))) < 0) * TME_SPARC64_CCR_ICC_V);
/* if src1 and src2 both have the high bit set, or if dst does
not have the high bit set and either src1 or src2 does, set C: */
cc += (((tme_int32_t) (((tme_uint32_t) (src1 & src2)) | ((((tme_uint32_t) dst) ^ (((tme_uint32_t) 0) - 1)) & ((tme_uint32_t) (src1 | src2))))) < 0) * TME_SPARC64_CCR_ICC_C;
/* set Z if the destination is zero: */
cc += ((((tme_int64_t) dst) == 0) * TME_SPARC64_CCR_XCC_Z);
/* set N if the destination is negative: */
cc += ((((tme_int64_t) dst) < 0) * TME_SPARC64_CCR_XCC_N);
/* if the operands are the same sign, and the destination has
a different sign, set V: */
cc += ((((tme_int64_t) ((src2 ^ dst) & (src1 ^ (src2 ^ (((tme_uint64_t) 0) - 1))))) < 0) * TME_SPARC64_CCR_XCC_V);
/* if src1 and src2 both have the high bit set, or if dst does
not have the high bit set and either src1 or src2 does, set C: */
cc += (((tme_int64_t) (((tme_uint64_t) (src1 & src2)) | ((((tme_uint64_t) dst) ^ (((tme_uint64_t) 0) - 1)) & ((tme_uint64_t) (src1 | src2))))) < 0) * TME_SPARC64_CCR_XCC_C;
/* set V if bits zero or one of src1 or src2 are set: */
cc |= ((((src1 | src2) & 3) != 0) * TME_SPARC64_CCR_ICC_V);
/* trap on a tagged overflow: */
if (cc & TME_SPARC64_CCR_ICC_V) {
tme_sparc64_trap(ic, TME_SPARC64_TRAP_tag_overflow);
}
/* store the destination: */
TME_SPARC_FORMAT3_RD = (tme_uint64_t) dst;
/* set the condition codes: */
ic->tme_sparc64_ireg_ccr = cc;
TME_SPARC_INSN_OK;
}
/* this does a sparc64 "tsubcc SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc64_tsubcc, tme_uint64_t)
{
tme_uint64_t src1;
tme_uint64_t src2;
tme_uint64_t dst;
tme_uint32_t cc;
/* get the operands: */
src1 = (tme_uint64_t) TME_SPARC_FORMAT3_RS1;
src2 = (tme_uint64_t) TME_SPARC_FORMAT3_RS2;
/* perform the operation: */
dst = src1 - src2;
/* store the destination: */
TME_SPARC_FORMAT3_RD = (tme_uint64_t) dst;
/* set Z if the destination is zero: */
cc = ((((tme_int32_t) dst) == 0) * TME_SPARC64_CCR_ICC_Z);
/* set N if the destination is negative: */
cc += ((((tme_int32_t) dst) < 0) * TME_SPARC64_CCR_ICC_N);
/* if the operands are different signs, and the destination has
a different sign from the first operand, set V: */
cc += ((((tme_int32_t) ((src1 ^ src2) & (src1 ^ dst))) < 0) * TME_SPARC64_CCR_ICC_V);
/* if src2 is greater than src1, set C: */
cc += ((((tme_uint32_t) src2) > ((tme_uint32_t) src1))) * TME_SPARC64_CCR_ICC_C;
/* set Z if the destination is zero: */
cc += ((((tme_int64_t) dst) == 0) * TME_SPARC64_CCR_XCC_Z);
/* set N if the destination is negative: */
cc += ((((tme_int64_t) dst) < 0) * TME_SPARC64_CCR_XCC_N);
/* if the operands are different signs, and the destination has
a different sign from the first operand, set V: */
cc += ((((tme_int64_t) ((src1 ^ src2) & (src1 ^ dst))) < 0) * TME_SPARC64_CCR_XCC_V);
/* if src2 is greater than src1, set C: */
cc += ((((tme_uint64_t) src2) > ((tme_uint64_t) src1))) * TME_SPARC64_CCR_XCC_C;
/* set V if bits zero or one of src1 or src2 are set: */
cc |= ((((src1 | src2) & 3) != 0) * TME_SPARC64_CCR_ICC_V);
/* set the condition codes: */
ic->tme_sparc64_ireg_ccr = cc;
TME_SPARC_INSN_OK;
}
/* this does a sparc64 "tsubcctv SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc64_tsubcctv, tme_uint64_t)
{
tme_uint64_t src1;
tme_uint64_t src2;
tme_uint64_t dst;
tme_uint32_t cc;
/* get the operands: */
src1 = (tme_uint64_t) TME_SPARC_FORMAT3_RS1;
src2 = (tme_uint64_t) TME_SPARC_FORMAT3_RS2;
/* perform the operation: */
dst = src1 - src2;
/* set Z if the destination is zero: */
cc = ((((tme_int32_t) dst) == 0) * TME_SPARC64_CCR_ICC_Z);
/* set N if the destination is negative: */
cc += ((((tme_int32_t) dst) < 0) * TME_SPARC64_CCR_ICC_N);
/* if the operands are different signs, and the destination has
a different sign from the first operand, set V: */
cc += ((((tme_int32_t) ((src1 ^ src2) & (src1 ^ dst))) < 0) * TME_SPARC64_CCR_ICC_V);
/* if src2 is greater than src1, set C: */
cc += ((((tme_uint32_t) src2) > ((tme_uint32_t) src1))) * TME_SPARC64_CCR_ICC_C;
/* set Z if the destination is zero: */
cc += ((((tme_int64_t) dst) == 0) * TME_SPARC64_CCR_XCC_Z);
/* set N if the destination is negative: */
cc += ((((tme_int64_t) dst) < 0) * TME_SPARC64_CCR_XCC_N);
/* if the operands are different signs, and the destination has
a different sign from the first operand, set V: */
cc += ((((tme_int64_t) ((src1 ^ src2) & (src1 ^ dst))) < 0) * TME_SPARC64_CCR_XCC_V);
/* if src2 is greater than src1, set C: */
cc += ((((tme_uint64_t) src2) > ((tme_uint64_t) src1))) * TME_SPARC64_CCR_XCC_C;
/* set V if bits zero or one of src1 or src2 are set: */
cc |= ((((src1 | src2) & 3) != 0) * TME_SPARC64_CCR_ICC_V);
/* trap on a tagged overflow: */
if (cc & TME_SPARC64_CCR_ICC_V) {
tme_sparc64_trap(ic, TME_SPARC64_TRAP_tag_overflow);
}
/* store the destination: */
TME_SPARC_FORMAT3_RD = (tme_uint64_t) dst;
/* set the condition codes: */
ic->tme_sparc64_ireg_ccr = cc;
TME_SPARC_INSN_OK;
}
/* this does a sparc64 "umul SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc64_umul, tme_uint64_t)
{
tme_uint32_t src1;
tme_uint32_t src2;
tme_uint64_t dst;
tme_uint64_t val64;
/* get the operands: */
src1 = (tme_uint64_t) TME_SPARC_FORMAT3_RS1;
src2 = (tme_uint64_t) TME_SPARC_FORMAT3_RS2;
/* perform the operation: */
val64 = (((tme_uint64_t) src1) * src2);
ic->tme_sparc_ireg_uint32(TME_SPARC_IREG_Y << 1) = (((tme_uint64_t) val64) >> 32);
dst = ((tme_uint64_t) val64);
/* store the destination: */
TME_SPARC_FORMAT3_RD = (tme_uint64_t) dst;
TME_SPARC_INSN_OK;
}
/* this does a sparc64 "umulcc SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc64_umulcc, tme_uint64_t)
{
tme_uint32_t src1;
tme_uint32_t src2;
tme_uint64_t dst;
tme_uint64_t val64;
tme_uint32_t cc;
/* get the operands: */
src1 = (tme_uint64_t) TME_SPARC_FORMAT3_RS1;
src2 = (tme_uint64_t) TME_SPARC_FORMAT3_RS2;
/* perform the operation: */
val64 = (((tme_uint64_t) src1) * src2);
ic->tme_sparc_ireg_uint32(TME_SPARC_IREG_Y << 1) = (((tme_uint64_t) val64) >> 32);
dst = ((tme_uint64_t) val64);
/* store the destination: */
TME_SPARC_FORMAT3_RD = (tme_uint64_t) dst;
/* set Z if the destination is zero: */
cc = ((((tme_int32_t) dst) == 0) * TME_SPARC64_CCR_ICC_Z);
/* set N if the destination is negative: */
cc += ((((tme_int32_t) dst) < 0) * TME_SPARC64_CCR_ICC_N);
/* set Z if the destination is zero: */
cc += ((((tme_int64_t) dst) == 0) * TME_SPARC64_CCR_XCC_Z);
/* set N if the destination is negative: */
cc += ((((tme_int64_t) dst) < 0) * TME_SPARC64_CCR_XCC_N);
/* set the condition codes: */
ic->tme_sparc64_ireg_ccr = cc;
TME_SPARC_INSN_OK;
}
/* this does a sparc64 "smul SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc64_smul, tme_uint64_t)
{
tme_int32_t src1;
tme_int32_t src2;
tme_int64_t dst;
tme_int64_t val64;
/* get the operands: */
src1 = (tme_int64_t) TME_SPARC_FORMAT3_RS1;
src2 = (tme_int64_t) TME_SPARC_FORMAT3_RS2;
/* perform the operation: */
val64 = (((tme_int64_t) src1) * src2);
ic->tme_sparc_ireg_uint32(TME_SPARC_IREG_Y << 1) = (((tme_uint64_t) val64) >> 32);
dst = ((tme_int64_t) val64);
/* store the destination: */
TME_SPARC_FORMAT3_RD = (tme_int64_t) dst;
TME_SPARC_INSN_OK;
}
/* this does a sparc64 "smulcc SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc64_smulcc, tme_uint64_t)
{
tme_int32_t src1;
tme_int32_t src2;
tme_int64_t dst;
tme_int64_t val64;
tme_uint32_t cc;
/* get the operands: */
src1 = (tme_int64_t) TME_SPARC_FORMAT3_RS1;
src2 = (tme_int64_t) TME_SPARC_FORMAT3_RS2;
/* perform the operation: */
val64 = (((tme_int64_t) src1) * src2);
ic->tme_sparc_ireg_uint32(TME_SPARC_IREG_Y << 1) = (((tme_uint64_t) val64) >> 32);
dst = ((tme_int64_t) val64);
/* store the destination: */
TME_SPARC_FORMAT3_RD = (tme_int64_t) dst;
/* set Z if the destination is zero: */
cc = ((((tme_int32_t) dst) == 0) * TME_SPARC64_CCR_ICC_Z);
/* set N if the destination is negative: */
cc += ((((tme_int32_t) dst) < 0) * TME_SPARC64_CCR_ICC_N);
/* set Z if the destination is zero: */
cc += ((((tme_int64_t) dst) == 0) * TME_SPARC64_CCR_XCC_Z);
/* set N if the destination is negative: */
cc += ((((tme_int64_t) dst) < 0) * TME_SPARC64_CCR_XCC_N);
/* set the condition codes: */
ic->tme_sparc64_ireg_ccr = cc;
TME_SPARC_INSN_OK;
}
/* this does a sparc64 "udiv SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc64_udiv, tme_uint64_t)
{
tme_uint32_t src1;
tme_uint32_t src2;
tme_uint32_t dst;
tme_uint64_t val64;
/* get the operands: */
src1 = (tme_uint64_t) TME_SPARC_FORMAT3_RS1;
src2 = (tme_uint64_t) TME_SPARC_FORMAT3_RS2;
/* perform the operation: */
val64 = ic->tme_sparc_ireg_uint32(TME_SPARC_IREG_Y << 1);
val64 = (val64 << 32) + (tme_uint32_t) src1;
if (__tme_predict_false(src2 == 0)) {
tme_sparc64_trap(ic, TME_SPARC64_TRAP_division_by_zero);
}
val64 /= src2;
dst = (tme_uint32_t) val64;
/* if the division overflowed: */
if (dst != val64) {
/* return the largest appropriate value: */
dst = 0xffffffff;
}
/* store the destination: */
TME_SPARC_FORMAT3_RD = (tme_uint64_t) dst;
TME_SPARC_INSN_OK;
}
/* this does a sparc64 "udivcc SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc64_udivcc, tme_uint64_t)
{
tme_uint32_t src1;
tme_uint32_t src2;
tme_uint32_t dst;
tme_uint64_t val64;
tme_uint32_t cc;
/* get the operands: */
src1 = (tme_uint64_t) TME_SPARC_FORMAT3_RS1;
src2 = (tme_uint64_t) TME_SPARC_FORMAT3_RS2;
/* perform the operation: */
val64 = ic->tme_sparc_ireg_uint32(TME_SPARC_IREG_Y << 1);
val64 = (val64 << 32) + (tme_uint32_t) src1;
if (__tme_predict_false(src2 == 0)) {
tme_sparc64_trap(ic, TME_SPARC64_TRAP_division_by_zero);
}
val64 /= src2;
dst = (tme_uint32_t) val64;
/* if the division overflowed: */
if (dst != val64) {
/* return the largest appropriate value: */
dst = 0xffffffff;
/* set V: */
cc = TME_SPARC64_CCR_ICC_V;
}
/* otherwise, the division didn't overflow: */
else {
/* clear V: */
cc = !TME_SPARC64_CCR_ICC_V;
}
/* store the destination: */
TME_SPARC_FORMAT3_RD = (tme_uint64_t) dst;
/* set Z if the destination is zero: */
cc += ((((tme_int32_t) dst) == 0) * TME_SPARC64_CCR_ICC_Z);
/* set N if the destination is negative: */
cc += ((((tme_int32_t) dst) < 0) * TME_SPARC64_CCR_ICC_N);
/* set Z if the destination is zero: */
cc += ((((tme_int64_t) dst) == 0) * TME_SPARC64_CCR_XCC_Z);
/* set the condition codes: */
ic->tme_sparc64_ireg_ccr = cc;
TME_SPARC_INSN_OK;
}
/* this does a sparc64 "sdiv SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc64_sdiv, tme_uint64_t)
{
tme_int32_t src1;
tme_int32_t src2;
tme_int64_t dst;
tme_int64_t val64;
/* get the operands: */
src1 = (tme_int64_t) TME_SPARC_FORMAT3_RS1;
src2 = (tme_int64_t) TME_SPARC_FORMAT3_RS2;
/* perform the operation: */
val64 = ic->tme_sparc_ireg_uint32(TME_SPARC_IREG_Y << 1);
val64 = (val64 << 32) + (tme_uint32_t) src1;
if (__tme_predict_false(src2 == 0)) {
tme_sparc64_trap(ic, TME_SPARC64_TRAP_division_by_zero);
}
val64 /= src2;
dst = (tme_int32_t) val64;
/* if the division overflowed: */
if (dst != val64) {
/* return the largest appropriate value: */
dst = (tme_int32_t) ((val64 < 0) + (tme_uint32_t) 0x7fffffff);
}
/* store the destination: */
TME_SPARC_FORMAT3_RD = (tme_int64_t) dst;
TME_SPARC_INSN_OK;
}
/* this does a sparc64 "sdivcc SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc64_sdivcc, tme_uint64_t)
{
tme_int32_t src1;
tme_int32_t src2;
tme_int64_t dst;
tme_int64_t val64;
tme_uint32_t cc;
/* get the operands: */
src1 = (tme_int64_t) TME_SPARC_FORMAT3_RS1;
src2 = (tme_int64_t) TME_SPARC_FORMAT3_RS2;
/* perform the operation: */
val64 = ic->tme_sparc_ireg_uint32(TME_SPARC_IREG_Y << 1);
val64 = (val64 << 32) + (tme_uint32_t) src1;
if (__tme_predict_false(src2 == 0)) {
tme_sparc64_trap(ic, TME_SPARC64_TRAP_division_by_zero);
}
val64 /= src2;
dst = (tme_int32_t) val64;
/* if the division overflowed: */
if (dst != val64) {
/* return the largest appropriate value: */
dst = (tme_int32_t) ((val64 < 0) + (tme_uint32_t) 0x7fffffff);
/* set V: */
cc = TME_SPARC64_CCR_ICC_V;
}
/* otherwise, the division didn't overflow: */
else {
/* clear V: */
cc = !TME_SPARC64_CCR_ICC_V;
}
/* store the destination: */
TME_SPARC_FORMAT3_RD = (tme_int64_t) dst;
/* set Z if the destination is zero: */
cc += ((((tme_int32_t) dst) == 0) * TME_SPARC64_CCR_ICC_Z);
/* set N if the destination is negative: */
cc += ((((tme_int32_t) dst) < 0) * TME_SPARC64_CCR_ICC_N);
/* set Z if the destination is zero: */
cc += ((((tme_int64_t) dst) == 0) * TME_SPARC64_CCR_XCC_Z);
/* set N if the destination is negative: */
cc += ((((tme_int64_t) dst) < 0) * TME_SPARC64_CCR_XCC_N);
/* set the condition codes: */
ic->tme_sparc64_ireg_ccr = cc;
TME_SPARC_INSN_OK;
}
/* the sparc64 sll function: */
TME_SPARC_FORMAT3(tme_sparc64_sll, tme_uint64_t)
{
tme_uint64_t dst;
unsigned int count;
/* get the value and the shift count: */
dst = TME_SPARC_FORMAT3_RS1;
count = TME_SPARC_FORMAT3_RS2;
/* if the X bit is clear: */
if ((TME_SPARC_INSN & TME_BIT(12)) == 0) {
/* limit the count: */
count %= 32;
}
/* limit the count: */
count %= 64;
/* do the shift: */
#if defined(SHIFTMAX_INT64_T) && (SHIFTMAX_INT64_T < (64 - 1))
#error "cannot do full shifts of a tme_int64_t"
#endif /* (SHIFTMAX_INT64_T < (64 - 1)) */
dst <<= count;
/* store the destination: */
TME_SPARC_FORMAT3_RD = dst;
TME_SPARC_INSN_OK;
}
/* the sparc64 srl function: */
TME_SPARC_FORMAT3(tme_sparc64_srl, tme_uint64_t)
{
tme_uint64_t dst;
unsigned int count;
/* get the value and the shift count: */
dst = TME_SPARC_FORMAT3_RS1;
count = TME_SPARC_FORMAT3_RS2;
/* if the X bit is clear: */
if ((TME_SPARC_INSN & TME_BIT(12)) == 0) {
/* limit the count: */
count %= 32;
/* clip the value to 32 bits: */
dst = (tme_uint32_t) dst;
}
/* limit the count: */
count %= 64;
/* do the shift: */
#if defined(SHIFTMAX_INT64_T) && (SHIFTMAX_INT64_T < (64 - 1))
#error "cannot do full shifts of a tme_int64_t"
#endif /* (SHIFTMAX_INT64_T < (64 - 1)) */
dst >>= count;
/* store the destination: */
TME_SPARC_FORMAT3_RD = dst;
TME_SPARC_INSN_OK;
}
/* the sparc64 sra function: */
TME_SPARC_FORMAT3(tme_sparc64_sra, tme_uint64_t)
{
tme_int64_t dst;
unsigned int count;
/* get the value and the shift count: */
dst = TME_SPARC_FORMAT3_RS1;
count = TME_SPARC_FORMAT3_RS2;
/* if the X bit is clear: */
if ((TME_SPARC_INSN & TME_BIT(12)) == 0) {
/* limit the count: */
count %= 32;
/* clip the value to 32 bits: */
dst = (tme_int32_t) dst;
}
/* limit the count: */
count %= 64;
/* do the shift: */
#ifdef SHIFTSIGNED_INT64_T
#if defined(SHIFTMAX_INT64_T) && (SHIFTMAX_INT64_T < (64 - 1))
#error "cannot do full shifts of a tme_int64_t"
#endif /* (SHIFTMAX_INT64_T < (64 - 1)) */
dst >>= count;
#else /* !SHIFTSIGNED_INT64_T */
for (; count-- > 0; ) {
dst = (dst & ~((tme_int64_t) 1)) / 2;
}
#endif /* !SHIFTSIGNED_INT64_T */
/* store the destination: */
TME_SPARC_FORMAT3_RD = dst;
TME_SPARC_INSN_OK;
}
/* this does a sparc64 ldb: */
TME_SPARC_FORMAT3(tme_sparc64_ldb, tme_uint64_t)
{
tme_uint64_t address;
tme_uint32_t asi_mask_flags_slow;
struct tme_sparc_tlb *dtlb;
const tme_shared tme_uint8_t *memory;
tme_bus_context_t dtlb_context;
tme_uint32_t endian_little;
tme_uint8_t value8;
tme_uint32_t value32;
/* get the address: */
address = TME_SPARC_FORMAT3_RS1 + TME_SPARC_FORMAT3_RS2;
address &= ic->tme_sparc_address_mask;
#ifdef _TME_SPARC_STATS
/* track statistics: */
ic->tme_sparc_stats.tme_sparc_stats_memory_total++;
#endif /* _TME_SPARC_STATS */
/* verify and maybe replay this transfer: */
tme_sparc_recode_verify_mem(ic, &TME_SPARC_FORMAT3_RD,
ic->tme_sparc_asi_mask_data, address,
(TME_RECODE_SIZE_8
| TME_SPARC_RECODE_VERIFY_MEM_LOAD));
if (tme_sparc_recode_verify_replay_last_pc(ic) != 0) {
TME_SPARC_INSN_OK;
}
/* assume that no DTLB ASI mask flags will require a slow load: */
asi_mask_flags_slow = 0;
/* a ldb without a no-fault ASI traps on no-fault addresses: */
asi_mask_flags_slow |= TME_SPARC64_ASI_FLAG_NO_FAULT;
/* get and busy the DTLB entry: */
dtlb = &ic->tme_sparc_tlbs[TME_SPARC_DTLB_ENTRY(ic, TME_SPARC_TLB_HASH(ic, ic->tme_sparc_memory_context_default, address))];
tme_sparc_tlb_busy(dtlb);
/* assume that this DTLB applies and allows fast transfers: */
memory = dtlb->tme_sparc_tlb_emulator_off_read;
/* if this DTLB matches any context, it matches this context: */
dtlb_context = dtlb->tme_sparc_tlb_context;
if (dtlb_context > ic->tme_sparc_memory_context_max) {
dtlb_context = ic->tme_sparc_memory_context_default;
}
/* we must call the slow load function if: */
if (__tme_predict_false(
/* the DTLB entry is invalid: */
tme_bus_tlb_is_invalid(&dtlb->tme_sparc_tlb_bus_tlb)
/* the DTLB entry does not match the context: */
|| dtlb_context != ic->tme_sparc_memory_context_default
/* the DTLB entry does not cover the needed addresses: */
|| (address < (tme_bus_addr64_t) dtlb->tme_sparc_tlb_addr_first)
|| ((address + ((8 / 8) - 1)) > (tme_bus_addr64_t) dtlb->tme_sparc_tlb_addr_last)
/* the DTLB entry does not cover the needed address space: */
|| (!TME_SPARC_TLB_ASI_MASK_OK(dtlb, ic->tme_sparc_asi_mask_data))
/* the DTLB entry can't be used for a fast ldb: */
|| (dtlb->tme_sparc_tlb_asi_mask & asi_mask_flags_slow) != 0
/* the DTLB entry does not allow fast transfers: */
|| (memory == TME_EMULATOR_OFF_UNDEF)
)) {
/* call the slow load function: */
memory = tme_sparc64_ls(ic,
address,
&TME_SPARC_FORMAT3_RD,
(TME_SPARC_LSINFO_OP_LD
| (8 / 8)));
}
/* get the byte order of this transfer: */
endian_little = ic->tme_sparc_asi_mask_data & TME_SPARC64_ASI_FLAG_LITTLE;
if (__tme_predict_false(dtlb->tme_sparc_tlb_asi_mask & TME_SPARC64_ASI_FLAG_LITTLE)) {
if (TME_SPARC_MEMORY_FLAGS(ic) & TME_SPARC_MEMORY_FLAG_HAS_INVERT_ENDIAN) {
endian_little ^= TME_SPARC64_ASI_FLAG_LITTLE;
}
else {
assert (FALSE);
}
}
/* do the fast transfer: */
memory += address;
value8 = tme_memory_bus_read8((const tme_shared tme_uint8_t *) memory, dtlb->tme_sparc_tlb_bus_rwlock, sizeof(tme_uint8_t), sizeof(tme_uint64_t));
/* possibly sign-extend the loaded value: */
value32 = value8;
if (TME_SPARC_INSN & TME_BIT(22)) {
value32 = (tme_uint32_t) (tme_int32_t) (tme_int8_t) value32;
}
TME_SPARC_FORMAT3_RD = (tme_uint64_t) (tme_int64_t) (tme_int32_t) value32;
/* unbusy the DTLB entry: */
tme_sparc_tlb_unbusy(dtlb);
/* log the value loaded: */
tme_sparc_recode_verify_mem_load(ic, &TME_SPARC_FORMAT3_RD);
tme_sparc_log(ic, 1000, TME_OK,
(TME_SPARC_LOG_HANDLE(ic),
_("ldb 0x%02x:0x%016" TME_PRIx64 ": 0x%02" TME_PRIx64),
TME_SPARC_ASI_MASK_WHICH(ic->tme_sparc_asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
address,
TME_SPARC_FORMAT3_RD));
TME_SPARC_INSN_OK;
}
/* this does a sparc64 stb: */
TME_SPARC_FORMAT3(tme_sparc64_stb, tme_uint64_t)
{
tme_uint64_t address;
tme_uint32_t asi_mask_flags_slow;
struct tme_sparc_tlb *dtlb;
tme_shared tme_uint8_t *memory;
tme_bus_context_t dtlb_context;
tme_uint32_t endian_little;
tme_uint8_t value8;
/* get the address: */
address = TME_SPARC_FORMAT3_RS1 + TME_SPARC_FORMAT3_RS2;
address &= ic->tme_sparc_address_mask;
#ifdef _TME_SPARC_STATS
/* track statistics: */
ic->tme_sparc_stats.tme_sparc_stats_memory_total++;
#endif /* _TME_SPARC_STATS */
/* verify and maybe replay this transfer: */
tme_sparc_recode_verify_mem(ic, &TME_SPARC_FORMAT3_RD,
ic->tme_sparc_asi_mask_data, address,
(TME_RECODE_SIZE_8
| TME_SPARC_RECODE_VERIFY_MEM_STORE));
if (tme_sparc_recode_verify_replay_last_pc(ic) != 0) {
TME_SPARC_INSN_OK;
}
/* log the value stored: */
tme_sparc_log(ic, 1000, TME_OK,
(TME_SPARC_LOG_HANDLE(ic),
_("stb 0x%02x:0x%016" TME_PRIx64 ": 0x%02" TME_PRIx8),
TME_SPARC_ASI_MASK_WHICH(ic->tme_sparc_asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
address,
(tme_uint8_t) TME_SPARC_FORMAT3_RD));
/* assume that no DTLB ASI mask flags will require a slow store: */
asi_mask_flags_slow = 0;
/* a stb traps on no-fault addresses: */
asi_mask_flags_slow |= TME_SPARC64_ASI_FLAG_NO_FAULT;
/* get and busy the DTLB entry: */
dtlb = &ic->tme_sparc_tlbs[TME_SPARC_DTLB_ENTRY(ic, TME_SPARC_TLB_HASH(ic, ic->tme_sparc_memory_context_default, address))];
tme_sparc_tlb_busy(dtlb);
/* assume that this DTLB applies and allows fast transfers: */
memory = dtlb->tme_sparc_tlb_emulator_off_write;
/* if this DTLB matches any context, it matches this context: */
dtlb_context = dtlb->tme_sparc_tlb_context;
if (dtlb_context > ic->tme_sparc_memory_context_max) {
dtlb_context = ic->tme_sparc_memory_context_default;
}
/* we must call the slow store function if: */
if (__tme_predict_false(
/* the DTLB entry is invalid: */
tme_bus_tlb_is_invalid(&dtlb->tme_sparc_tlb_bus_tlb)
/* the DTLB entry does not match the context: */
|| dtlb_context != ic->tme_sparc_memory_context_default
/* the DTLB entry does not cover the needed addresses: */
|| (address < (tme_bus_addr64_t) dtlb->tme_sparc_tlb_addr_first)
|| ((address + ((8 / 8) - 1)) > (tme_bus_addr64_t) dtlb->tme_sparc_tlb_addr_last)
/* the DTLB entry does not cover the needed address space: */
|| (!TME_SPARC_TLB_ASI_MASK_OK(dtlb, ic->tme_sparc_asi_mask_data))
/* the DTLB entry can't be used for a fast stb: */
|| (dtlb->tme_sparc_tlb_asi_mask & asi_mask_flags_slow) != 0
/* the DTLB entry does not allow fast transfers: */
|| (memory == TME_EMULATOR_OFF_UNDEF)
)) {
/* call the slow store function: */
memory = tme_sparc64_ls(ic,
address,
&TME_SPARC_FORMAT3_RD,
(TME_SPARC_LSINFO_OP_ST
| (8 / 8)));
/* if the slow store function did the transfer: */
if (__tme_predict_false(memory == TME_EMULATOR_OFF_UNDEF)) {
/* unbusy the TLB entry; */
tme_sparc_tlb_unbusy(dtlb);
TME_SPARC_INSN_OK;
}
}
/* get the byte order of this transfer: */
endian_little = ic->tme_sparc_asi_mask_data & TME_SPARC64_ASI_FLAG_LITTLE;
if (__tme_predict_false(dtlb->tme_sparc_tlb_asi_mask & TME_SPARC64_ASI_FLAG_LITTLE)) {
if (TME_SPARC_MEMORY_FLAGS(ic) & TME_SPARC_MEMORY_FLAG_HAS_INVERT_ENDIAN) {
endian_little ^= TME_SPARC64_ASI_FLAG_LITTLE;
}
else {
assert (FALSE);
}
}
/* do the fast transfer: */
memory += address;
value8 = TME_SPARC_FORMAT3_RD;
tme_memory_bus_write8((tme_shared tme_uint8_t *) memory, value8, dtlb->tme_sparc_tlb_bus_rwlock, sizeof(tme_uint8_t), sizeof(tme_uint64_t));
/* unbusy the DTLB entry: */
tme_sparc_tlb_unbusy(dtlb);
TME_SPARC_INSN_OK;
}
/* this does a sparc64 ldh: */
TME_SPARC_FORMAT3(tme_sparc64_ldh, tme_uint64_t)
{
tme_uint64_t address;
tme_uint32_t asi_mask_flags_slow;
struct tme_sparc_tlb *dtlb;
const tme_shared tme_uint8_t *memory;
tme_bus_context_t dtlb_context;
tme_uint32_t endian_little;
tme_uint16_t value16;
tme_uint32_t value32;
/* get the address: */
address = TME_SPARC_FORMAT3_RS1 + TME_SPARC_FORMAT3_RS2;
address &= ic->tme_sparc_address_mask;
#ifdef _TME_SPARC_STATS
/* track statistics: */
ic->tme_sparc_stats.tme_sparc_stats_memory_total++;
#endif /* _TME_SPARC_STATS */
/* verify and maybe replay this transfer: */
tme_sparc_recode_verify_mem(ic, &TME_SPARC_FORMAT3_RD,
ic->tme_sparc_asi_mask_data, address,
(TME_RECODE_SIZE_16
| TME_SPARC_RECODE_VERIFY_MEM_LOAD));
if (tme_sparc_recode_verify_replay_last_pc(ic) != 0) {
TME_SPARC_INSN_OK;
}
/* assume that no DTLB ASI mask flags will require a slow load: */
asi_mask_flags_slow = 0;
/* a ldh without a no-fault ASI traps on no-fault addresses: */
asi_mask_flags_slow |= TME_SPARC64_ASI_FLAG_NO_FAULT;
/* get and busy the DTLB entry: */
dtlb = &ic->tme_sparc_tlbs[TME_SPARC_DTLB_ENTRY(ic, TME_SPARC_TLB_HASH(ic, ic->tme_sparc_memory_context_default, address))];
tme_sparc_tlb_busy(dtlb);
/* assume that this DTLB applies and allows fast transfers: */
memory = dtlb->tme_sparc_tlb_emulator_off_read;
/* if this DTLB matches any context, it matches this context: */
dtlb_context = dtlb->tme_sparc_tlb_context;
if (dtlb_context > ic->tme_sparc_memory_context_max) {
dtlb_context = ic->tme_sparc_memory_context_default;
}
/* we must call the slow load function if: */
if (__tme_predict_false(
/* the DTLB entry is invalid: */
tme_bus_tlb_is_invalid(&dtlb->tme_sparc_tlb_bus_tlb)
/* the DTLB entry does not match the context: */
|| dtlb_context != ic->tme_sparc_memory_context_default
/* the DTLB entry does not cover the needed addresses: */
|| (address < (tme_bus_addr64_t) dtlb->tme_sparc_tlb_addr_first)
|| ((address + ((16 / 8) - 1)) > (tme_bus_addr64_t) dtlb->tme_sparc_tlb_addr_last)
/* the DTLB entry does not cover the needed address space: */
|| (!TME_SPARC_TLB_ASI_MASK_OK(dtlb, ic->tme_sparc_asi_mask_data))
/* the DTLB entry can't be used for a fast ldh: */
|| (dtlb->tme_sparc_tlb_asi_mask & asi_mask_flags_slow) != 0
/* the DTLB entry does not allow fast transfers: */
|| (memory == TME_EMULATOR_OFF_UNDEF)
/* the address is misaligned: */
|| ((address % (16 / 8)) != 0)
)) {
/* call the slow load function: */
memory = tme_sparc64_ls(ic,
address,
&TME_SPARC_FORMAT3_RD,
(TME_SPARC_LSINFO_OP_LD
| (16 / 8)));
}
/* get the byte order of this transfer: */
endian_little = ic->tme_sparc_asi_mask_data & TME_SPARC64_ASI_FLAG_LITTLE;
if (__tme_predict_false(dtlb->tme_sparc_tlb_asi_mask & TME_SPARC64_ASI_FLAG_LITTLE)) {
if (TME_SPARC_MEMORY_FLAGS(ic) & TME_SPARC_MEMORY_FLAG_HAS_INVERT_ENDIAN) {
endian_little ^= TME_SPARC64_ASI_FLAG_LITTLE;
}
else {
assert (FALSE);
}
}
/* do the fast transfer: */
memory += address;
value16 = tme_memory_bus_read16((const tme_shared tme_uint16_t *) memory, dtlb->tme_sparc_tlb_bus_rwlock, sizeof(tme_uint16_t), sizeof(tme_uint64_t));
value16 = (endian_little ? tme_letoh_u16(value16) : tme_betoh_u16(value16));
/* possibly sign-extend the loaded value: */
value32 = value16;
if (TME_SPARC_INSN & TME_BIT(22)) {
value32 = (tme_uint32_t) (tme_int32_t) (tme_int16_t) value32;
}
TME_SPARC_FORMAT3_RD = (tme_uint64_t) (tme_int64_t) (tme_int32_t) value32;
/* unbusy the DTLB entry: */
tme_sparc_tlb_unbusy(dtlb);
/* log the value loaded: */
tme_sparc_recode_verify_mem_load(ic, &TME_SPARC_FORMAT3_RD);
tme_sparc_log(ic, 1000, TME_OK,
(TME_SPARC_LOG_HANDLE(ic),
_("ldh 0x%02x:0x%016" TME_PRIx64 ": 0x%04" TME_PRIx64),
TME_SPARC_ASI_MASK_WHICH(ic->tme_sparc_asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
address,
TME_SPARC_FORMAT3_RD));
TME_SPARC_INSN_OK;
}
/* this does a sparc64 sth: */
TME_SPARC_FORMAT3(tme_sparc64_sth, tme_uint64_t)
{
tme_uint64_t address;
tme_uint32_t asi_mask_flags_slow;
struct tme_sparc_tlb *dtlb;
tme_shared tme_uint8_t *memory;
tme_bus_context_t dtlb_context;
tme_uint32_t endian_little;
tme_uint16_t value16;
/* get the address: */
address = TME_SPARC_FORMAT3_RS1 + TME_SPARC_FORMAT3_RS2;
address &= ic->tme_sparc_address_mask;
#ifdef _TME_SPARC_STATS
/* track statistics: */
ic->tme_sparc_stats.tme_sparc_stats_memory_total++;
#endif /* _TME_SPARC_STATS */
/* verify and maybe replay this transfer: */
tme_sparc_recode_verify_mem(ic, &TME_SPARC_FORMAT3_RD,
ic->tme_sparc_asi_mask_data, address,
(TME_RECODE_SIZE_16
| TME_SPARC_RECODE_VERIFY_MEM_STORE));
if (tme_sparc_recode_verify_replay_last_pc(ic) != 0) {
TME_SPARC_INSN_OK;
}
/* log the value stored: */
tme_sparc_log(ic, 1000, TME_OK,
(TME_SPARC_LOG_HANDLE(ic),
_("sth 0x%02x:0x%016" TME_PRIx64 ": 0x%04" TME_PRIx16),
TME_SPARC_ASI_MASK_WHICH(ic->tme_sparc_asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
address,
(tme_uint16_t) TME_SPARC_FORMAT3_RD));
/* assume that no DTLB ASI mask flags will require a slow store: */
asi_mask_flags_slow = 0;
/* a sth traps on no-fault addresses: */
asi_mask_flags_slow |= TME_SPARC64_ASI_FLAG_NO_FAULT;
/* get and busy the DTLB entry: */
dtlb = &ic->tme_sparc_tlbs[TME_SPARC_DTLB_ENTRY(ic, TME_SPARC_TLB_HASH(ic, ic->tme_sparc_memory_context_default, address))];
tme_sparc_tlb_busy(dtlb);
/* assume that this DTLB applies and allows fast transfers: */
memory = dtlb->tme_sparc_tlb_emulator_off_write;
/* if this DTLB matches any context, it matches this context: */
dtlb_context = dtlb->tme_sparc_tlb_context;
if (dtlb_context > ic->tme_sparc_memory_context_max) {
dtlb_context = ic->tme_sparc_memory_context_default;
}
/* we must call the slow store function if: */
if (__tme_predict_false(
/* the DTLB entry is invalid: */
tme_bus_tlb_is_invalid(&dtlb->tme_sparc_tlb_bus_tlb)
/* the DTLB entry does not match the context: */
|| dtlb_context != ic->tme_sparc_memory_context_default
/* the DTLB entry does not cover the needed addresses: */
|| (address < (tme_bus_addr64_t) dtlb->tme_sparc_tlb_addr_first)
|| ((address + ((16 / 8) - 1)) > (tme_bus_addr64_t) dtlb->tme_sparc_tlb_addr_last)
/* the DTLB entry does not cover the needed address space: */
|| (!TME_SPARC_TLB_ASI_MASK_OK(dtlb, ic->tme_sparc_asi_mask_data))
/* the DTLB entry can't be used for a fast sth: */
|| (dtlb->tme_sparc_tlb_asi_mask & asi_mask_flags_slow) != 0
/* the DTLB entry does not allow fast transfers: */
|| (memory == TME_EMULATOR_OFF_UNDEF)
/* the address is misaligned: */
|| ((address % (16 / 8)) != 0)
)) {
/* call the slow store function: */
memory = tme_sparc64_ls(ic,
address,
&TME_SPARC_FORMAT3_RD,
(TME_SPARC_LSINFO_OP_ST
| (16 / 8)));
/* if the slow store function did the transfer: */
if (__tme_predict_false(memory == TME_EMULATOR_OFF_UNDEF)) {
/* unbusy the TLB entry; */
tme_sparc_tlb_unbusy(dtlb);
TME_SPARC_INSN_OK;
}
}
/* get the byte order of this transfer: */
endian_little = ic->tme_sparc_asi_mask_data & TME_SPARC64_ASI_FLAG_LITTLE;
if (__tme_predict_false(dtlb->tme_sparc_tlb_asi_mask & TME_SPARC64_ASI_FLAG_LITTLE)) {
if (TME_SPARC_MEMORY_FLAGS(ic) & TME_SPARC_MEMORY_FLAG_HAS_INVERT_ENDIAN) {
endian_little ^= TME_SPARC64_ASI_FLAG_LITTLE;
}
else {
assert (FALSE);
}
}
/* do the fast transfer: */
memory += address;
value16 = TME_SPARC_FORMAT3_RD;
value16 = (endian_little ? tme_htole_u16(value16) : tme_htobe_u16(value16));
tme_memory_bus_write16((tme_shared tme_uint16_t *) memory, value16, dtlb->tme_sparc_tlb_bus_rwlock, sizeof(tme_uint16_t), sizeof(tme_uint64_t));
/* unbusy the DTLB entry: */
tme_sparc_tlb_unbusy(dtlb);
TME_SPARC_INSN_OK;
}
/* this does a sparc64 ld: */
TME_SPARC_FORMAT3(tme_sparc64_ld, tme_uint64_t)
{
tme_uint64_t address;
tme_uint32_t asi_mask_flags_slow;
struct tme_sparc_tlb *dtlb;
const tme_shared tme_uint8_t *memory;
tme_bus_context_t dtlb_context;
tme_uint32_t endian_little;
tme_uint32_t value32;
tme_uint64_t value64;
/* get the address: */
address = TME_SPARC_FORMAT3_RS1 + TME_SPARC_FORMAT3_RS2;
address &= ic->tme_sparc_address_mask;
#ifdef _TME_SPARC_STATS
/* track statistics: */
ic->tme_sparc_stats.tme_sparc_stats_memory_total++;
#endif /* _TME_SPARC_STATS */
/* verify and maybe replay this transfer: */
tme_sparc_recode_verify_mem(ic, &TME_SPARC_FORMAT3_RD,
ic->tme_sparc_asi_mask_data, address,
(TME_RECODE_SIZE_32
| TME_SPARC_RECODE_VERIFY_MEM_LOAD));
if (tme_sparc_recode_verify_replay_last_pc(ic) != 0) {
TME_SPARC_INSN_OK;
}
/* assume that no DTLB ASI mask flags will require a slow load: */
asi_mask_flags_slow = 0;
/* a ld without a no-fault ASI traps on no-fault addresses: */
asi_mask_flags_slow |= TME_SPARC64_ASI_FLAG_NO_FAULT;
/* get and busy the DTLB entry: */
dtlb = &ic->tme_sparc_tlbs[TME_SPARC_DTLB_ENTRY(ic, TME_SPARC_TLB_HASH(ic, ic->tme_sparc_memory_context_default, address))];
tme_sparc_tlb_busy(dtlb);
/* assume that this DTLB applies and allows fast transfers: */
memory = dtlb->tme_sparc_tlb_emulator_off_read;
/* if this DTLB matches any context, it matches this context: */
dtlb_context = dtlb->tme_sparc_tlb_context;
if (dtlb_context > ic->tme_sparc_memory_context_max) {
dtlb_context = ic->tme_sparc_memory_context_default;
}
/* we must call the slow load function if: */
if (__tme_predict_false(
/* the DTLB entry is invalid: */
tme_bus_tlb_is_invalid(&dtlb->tme_sparc_tlb_bus_tlb)
/* the DTLB entry does not match the context: */
|| dtlb_context != ic->tme_sparc_memory_context_default
/* the DTLB entry does not cover the needed addresses: */
|| (address < (tme_bus_addr64_t) dtlb->tme_sparc_tlb_addr_first)
|| ((address + ((32 / 8) - 1)) > (tme_bus_addr64_t) dtlb->tme_sparc_tlb_addr_last)
/* the DTLB entry does not cover the needed address space: */
|| (!TME_SPARC_TLB_ASI_MASK_OK(dtlb, ic->tme_sparc_asi_mask_data))
/* the DTLB entry can't be used for a fast ld: */
|| (dtlb->tme_sparc_tlb_asi_mask & asi_mask_flags_slow) != 0
/* the DTLB entry does not allow fast transfers: */
|| (memory == TME_EMULATOR_OFF_UNDEF)
/* the address is misaligned: */
|| ((address % (32 / 8)) != 0)
)) {
/* call the slow load function: */
memory = tme_sparc64_ls(ic,
address,
&TME_SPARC_FORMAT3_RD,
(TME_SPARC_LSINFO_OP_LD
| (32 / 8)));
}
/* get the byte order of this transfer: */
endian_little = ic->tme_sparc_asi_mask_data & TME_SPARC64_ASI_FLAG_LITTLE;
if (__tme_predict_false(dtlb->tme_sparc_tlb_asi_mask & TME_SPARC64_ASI_FLAG_LITTLE)) {
if (TME_SPARC_MEMORY_FLAGS(ic) & TME_SPARC_MEMORY_FLAG_HAS_INVERT_ENDIAN) {
endian_little ^= TME_SPARC64_ASI_FLAG_LITTLE;
}
else {
assert (FALSE);
}
}
/* do the fast transfer: */
memory += address;
value32 = tme_memory_bus_read32((const tme_shared tme_uint32_t *) memory, dtlb->tme_sparc_tlb_bus_rwlock, sizeof(tme_uint32_t), sizeof(tme_uint64_t));
value32 = (endian_little ? tme_letoh_u32(value32) : tme_betoh_u32(value32));
/* possibly sign-extend the loaded value: */
value64 = value32;
if (TME_SPARC_INSN & TME_BIT(22)) {
value64 = (tme_uint64_t) (tme_int64_t) (tme_int32_t) value64;
}
TME_SPARC_FORMAT3_RD = (tme_uint64_t) (tme_int64_t) (tme_int64_t) value64;
/* unbusy the DTLB entry: */
tme_sparc_tlb_unbusy(dtlb);
/* log the value loaded: */
tme_sparc_recode_verify_mem_load(ic, &TME_SPARC_FORMAT3_RD);
tme_sparc_log(ic, 1000, TME_OK,
(TME_SPARC_LOG_HANDLE(ic),
_("ld 0x%02x:0x%016" TME_PRIx64 ": 0x%08" TME_PRIx64),
TME_SPARC_ASI_MASK_WHICH(ic->tme_sparc_asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
address,
TME_SPARC_FORMAT3_RD));
TME_SPARC_INSN_OK;
}
/* this does a sparc64 st: */
TME_SPARC_FORMAT3(tme_sparc64_st, tme_uint64_t)
{
tme_uint64_t address;
tme_uint32_t asi_mask_flags_slow;
struct tme_sparc_tlb *dtlb;
tme_shared tme_uint8_t *memory;
tme_bus_context_t dtlb_context;
tme_uint32_t endian_little;
tme_uint32_t value32;
/* get the address: */
address = TME_SPARC_FORMAT3_RS1 + TME_SPARC_FORMAT3_RS2;
address &= ic->tme_sparc_address_mask;
#ifdef _TME_SPARC_STATS
/* track statistics: */
ic->tme_sparc_stats.tme_sparc_stats_memory_total++;
#endif /* _TME_SPARC_STATS */
/* verify and maybe replay this transfer: */
tme_sparc_recode_verify_mem(ic, &TME_SPARC_FORMAT3_RD,
ic->tme_sparc_asi_mask_data, address,
(TME_RECODE_SIZE_32
| TME_SPARC_RECODE_VERIFY_MEM_STORE));
if (tme_sparc_recode_verify_replay_last_pc(ic) != 0) {
TME_SPARC_INSN_OK;
}
/* log the value stored: */
tme_sparc_log(ic, 1000, TME_OK,
(TME_SPARC_LOG_HANDLE(ic),
_("st 0x%02x:0x%016" TME_PRIx64 ": 0x%08" TME_PRIx32),
TME_SPARC_ASI_MASK_WHICH(ic->tme_sparc_asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
address,
(tme_uint32_t) TME_SPARC_FORMAT3_RD));
/* assume that no DTLB ASI mask flags will require a slow store: */
asi_mask_flags_slow = 0;
/* a st traps on no-fault addresses: */
asi_mask_flags_slow |= TME_SPARC64_ASI_FLAG_NO_FAULT;
/* get and busy the DTLB entry: */
dtlb = &ic->tme_sparc_tlbs[TME_SPARC_DTLB_ENTRY(ic, TME_SPARC_TLB_HASH(ic, ic->tme_sparc_memory_context_default, address))];
tme_sparc_tlb_busy(dtlb);
/* assume that this DTLB applies and allows fast transfers: */
memory = dtlb->tme_sparc_tlb_emulator_off_write;
/* if this DTLB matches any context, it matches this context: */
dtlb_context = dtlb->tme_sparc_tlb_context;
if (dtlb_context > ic->tme_sparc_memory_context_max) {
dtlb_context = ic->tme_sparc_memory_context_default;
}
/* we must call the slow store function if: */
if (__tme_predict_false(
/* the DTLB entry is invalid: */
tme_bus_tlb_is_invalid(&dtlb->tme_sparc_tlb_bus_tlb)
/* the DTLB entry does not match the context: */
|| dtlb_context != ic->tme_sparc_memory_context_default
/* the DTLB entry does not cover the needed addresses: */
|| (address < (tme_bus_addr64_t) dtlb->tme_sparc_tlb_addr_first)
|| ((address + ((32 / 8) - 1)) > (tme_bus_addr64_t) dtlb->tme_sparc_tlb_addr_last)
/* the DTLB entry does not cover the needed address space: */
|| (!TME_SPARC_TLB_ASI_MASK_OK(dtlb, ic->tme_sparc_asi_mask_data))
/* the DTLB entry can't be used for a fast st: */
|| (dtlb->tme_sparc_tlb_asi_mask & asi_mask_flags_slow) != 0
/* the DTLB entry does not allow fast transfers: */
|| (memory == TME_EMULATOR_OFF_UNDEF)
/* the address is misaligned: */
|| ((address % (32 / 8)) != 0)
)) {
/* call the slow store function: */
memory = tme_sparc64_ls(ic,
address,
&TME_SPARC_FORMAT3_RD,
(TME_SPARC_LSINFO_OP_ST
| (32 / 8)));
/* if the slow store function did the transfer: */
if (__tme_predict_false(memory == TME_EMULATOR_OFF_UNDEF)) {
/* unbusy the TLB entry; */
tme_sparc_tlb_unbusy(dtlb);
TME_SPARC_INSN_OK;
}
}
/* get the byte order of this transfer: */
endian_little = ic->tme_sparc_asi_mask_data & TME_SPARC64_ASI_FLAG_LITTLE;
if (__tme_predict_false(dtlb->tme_sparc_tlb_asi_mask & TME_SPARC64_ASI_FLAG_LITTLE)) {
if (TME_SPARC_MEMORY_FLAGS(ic) & TME_SPARC_MEMORY_FLAG_HAS_INVERT_ENDIAN) {
endian_little ^= TME_SPARC64_ASI_FLAG_LITTLE;
}
else {
assert (FALSE);
}
}
/* do the fast transfer: */
memory += address;
value32 = TME_SPARC_FORMAT3_RD;
value32 = (endian_little ? tme_htole_u32(value32) : tme_htobe_u32(value32));
tme_memory_bus_write32((tme_shared tme_uint32_t *) memory, value32, dtlb->tme_sparc_tlb_bus_rwlock, sizeof(tme_uint32_t), sizeof(tme_uint64_t));
/* unbusy the DTLB entry: */
tme_sparc_tlb_unbusy(dtlb);
TME_SPARC_INSN_OK;
}
/* this does a sparc64 ldd: */
TME_SPARC_FORMAT3(tme_sparc64_ldd, tme_uint64_t)
{
tme_uint64_t address;
tme_uint32_t asi_mask_flags_slow;
struct tme_sparc_tlb *dtlb;
const tme_shared tme_uint8_t *memory;
tme_bus_context_t dtlb_context;
tme_uint32_t endian_little;
tme_uint32_t value32;
/* get the address: */
address = TME_SPARC_FORMAT3_RS1 + TME_SPARC_FORMAT3_RS2;
address &= ic->tme_sparc_address_mask;
#ifdef _TME_SPARC_STATS
/* track statistics: */
ic->tme_sparc_stats.tme_sparc_stats_memory_total++;
#endif /* _TME_SPARC_STATS */
/* verify and maybe replay this transfer: */
tme_sparc_recode_verify_mem(ic, &TME_SPARC_FORMAT3_RD,
ic->tme_sparc_asi_mask_data, address,
(TME_RECODE_SIZE_32
| TME_SPARC_RECODE_VERIFY_MEM_LOAD));
tme_sparc_recode_verify_mem(ic, &TME_SPARC_FORMAT3_RD_ODD(tme_ic_ireg_uint64),
ic->tme_sparc_asi_mask_data, address + sizeof(tme_uint32_t),
(TME_RECODE_SIZE_32
| TME_SPARC_RECODE_VERIFY_MEM_LOAD));
if (tme_sparc_recode_verify_replay_last_pc(ic) != 0) {
TME_SPARC_INSN_OK;
}
/* assume that no DTLB ASI mask flags will require a slow load: */
asi_mask_flags_slow = 0;
/* a ldd without a no-fault ASI traps on no-fault addresses: */
asi_mask_flags_slow |= TME_SPARC64_ASI_FLAG_NO_FAULT;
/* get and busy the DTLB entry: */
dtlb = &ic->tme_sparc_tlbs[TME_SPARC_DTLB_ENTRY(ic, TME_SPARC_TLB_HASH(ic, ic->tme_sparc_memory_context_default, address))];
tme_sparc_tlb_busy(dtlb);
/* assume that this DTLB applies and allows fast transfers: */
memory = dtlb->tme_sparc_tlb_emulator_off_read;
/* if this DTLB matches any context, it matches this context: */
dtlb_context = dtlb->tme_sparc_tlb_context;
if (dtlb_context > ic->tme_sparc_memory_context_max) {
dtlb_context = ic->tme_sparc_memory_context_default;
}
/* we must call the slow load function if: */
if (__tme_predict_false(
/* the DTLB entry is invalid: */
tme_bus_tlb_is_invalid(&dtlb->tme_sparc_tlb_bus_tlb)
/* the DTLB entry does not match the context: */
|| dtlb_context != ic->tme_sparc_memory_context_default
/* the DTLB entry does not cover the needed addresses: */
|| (address < (tme_bus_addr64_t) dtlb->tme_sparc_tlb_addr_first)
|| ((address + ((64 / 8) - 1)) > (tme_bus_addr64_t) dtlb->tme_sparc_tlb_addr_last)
/* the DTLB entry does not cover the needed address space: */
|| (!TME_SPARC_TLB_ASI_MASK_OK(dtlb, ic->tme_sparc_asi_mask_data))
/* the DTLB entry can't be used for a fast ldd: */
|| (dtlb->tme_sparc_tlb_asi_mask & asi_mask_flags_slow) != 0
/* the DTLB entry does not allow fast transfers: */
|| (memory == TME_EMULATOR_OFF_UNDEF)
/* the address is misaligned: */
|| ((address % (64 / 8)) != 0)
/* the destination register number is odd: */
|| ((TME_SPARC_INSN & TME_BIT(25)) != 0)
)) {
/* call the slow load function: */
memory = tme_sparc64_ls(ic,
address,
&TME_SPARC_FORMAT3_RD,
(TME_SPARC_LSINFO_OP_LD
| TME_SPARC_LSINFO_LDD_STD
| (64 / 8)));
}
/* get the byte order of this transfer: */
endian_little = ic->tme_sparc_asi_mask_data & TME_SPARC64_ASI_FLAG_LITTLE;
if (__tme_predict_false(dtlb->tme_sparc_tlb_asi_mask & TME_SPARC64_ASI_FLAG_LITTLE)) {
if (TME_SPARC_MEMORY_FLAGS(ic) & TME_SPARC_MEMORY_FLAG_HAS_INVERT_ENDIAN) {
endian_little ^= TME_SPARC64_ASI_FLAG_LITTLE;
}
else {
assert (FALSE);
}
}
/* do the fast transfer: */
memory += address;
value32 = tme_memory_bus_read32(((const tme_shared tme_uint32_t *) memory) + 0, dtlb->tme_sparc_tlb_bus_rwlock, sizeof(tme_uint32_t) * 2, sizeof(tme_uint64_t));
value32 = (endian_little ? tme_letoh_u32(value32) : tme_betoh_u32(value32));
TME_SPARC_FORMAT3_RD = value32;
value32 = tme_memory_bus_read32(((const tme_shared tme_uint32_t *) memory) + 1, dtlb->tme_sparc_tlb_bus_rwlock, sizeof(tme_uint32_t) * 1, sizeof(tme_uint64_t));
value32 = (endian_little ? tme_letoh_u32(value32) : tme_betoh_u32(value32));
TME_SPARC_FORMAT3_RD_ODD(tme_ic_ireg_uint64) = value32;
/* unbusy the DTLB entry: */
tme_sparc_tlb_unbusy(dtlb);
/* log the value loaded: */
tme_sparc_recode_verify_mem_load(ic, &TME_SPARC_FORMAT3_RD);
tme_sparc_recode_verify_mem_load(ic, &TME_SPARC_FORMAT3_RD_ODD(tme_ic_ireg_uint64));
tme_sparc_log(ic, 1000, TME_OK,
(TME_SPARC_LOG_HANDLE(ic),
_("ldd 0x%02x:0x%016" TME_PRIx64 ": 0x%016" TME_PRIx64 " 0x%016" TME_PRIx64 ""),
TME_SPARC_ASI_MASK_WHICH(ic->tme_sparc_asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
address,
TME_SPARC_FORMAT3_RD,
TME_SPARC_FORMAT3_RD_ODD(tme_ic_ireg_uint64)));
TME_SPARC_INSN_OK;
}
/* this does a sparc64 std: */
TME_SPARC_FORMAT3(tme_sparc64_std, tme_uint64_t)
{
tme_uint64_t address;
tme_uint32_t asi_mask_flags_slow;
struct tme_sparc_tlb *dtlb;
tme_shared tme_uint8_t *memory;
tme_bus_context_t dtlb_context;
tme_uint32_t endian_little;
tme_uint32_t value32;
/* get the address: */
address = TME_SPARC_FORMAT3_RS1 + TME_SPARC_FORMAT3_RS2;
address &= ic->tme_sparc_address_mask;
#ifdef _TME_SPARC_STATS
/* track statistics: */
ic->tme_sparc_stats.tme_sparc_stats_memory_total++;
#endif /* _TME_SPARC_STATS */
/* verify and maybe replay this transfer: */
tme_sparc_recode_verify_mem(ic, &TME_SPARC_FORMAT3_RD,
ic->tme_sparc_asi_mask_data, address,
(TME_RECODE_SIZE_32
| TME_SPARC_RECODE_VERIFY_MEM_STORE));
tme_sparc_recode_verify_mem(ic, &TME_SPARC_FORMAT3_RD_ODD(tme_ic_ireg_uint64),
ic->tme_sparc_asi_mask_data, address + sizeof(tme_uint32_t),
(TME_RECODE_SIZE_32
| TME_SPARC_RECODE_VERIFY_MEM_STORE));
if (tme_sparc_recode_verify_replay_last_pc(ic) != 0) {
TME_SPARC_INSN_OK;
}
/* log the values stored: */
tme_sparc_log(ic, 1000, TME_OK,
(TME_SPARC_LOG_HANDLE(ic),
_("std 0x%02x:0x%016" TME_PRIx64 ": 0x%08" TME_PRIx32 " 0x%08" TME_PRIx32),
TME_SPARC_ASI_MASK_WHICH(ic->tme_sparc_asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
address,
(tme_uint32_t) TME_SPARC_FORMAT3_RD,
(tme_uint32_t) TME_SPARC_FORMAT3_RD_ODD(tme_ic_ireg_uint64)));
/* assume that no DTLB ASI mask flags will require a slow store: */
asi_mask_flags_slow = 0;
/* a std traps on no-fault addresses: */
asi_mask_flags_slow |= TME_SPARC64_ASI_FLAG_NO_FAULT;
/* get and busy the DTLB entry: */
dtlb = &ic->tme_sparc_tlbs[TME_SPARC_DTLB_ENTRY(ic, TME_SPARC_TLB_HASH(ic, ic->tme_sparc_memory_context_default, address))];
tme_sparc_tlb_busy(dtlb);
/* assume that this DTLB applies and allows fast transfers: */
memory = dtlb->tme_sparc_tlb_emulator_off_write;
/* if this DTLB matches any context, it matches this context: */
dtlb_context = dtlb->tme_sparc_tlb_context;
if (dtlb_context > ic->tme_sparc_memory_context_max) {
dtlb_context = ic->tme_sparc_memory_context_default;
}
/* we must call the slow store function if: */
if (__tme_predict_false(
/* the DTLB entry is invalid: */
tme_bus_tlb_is_invalid(&dtlb->tme_sparc_tlb_bus_tlb)
/* the DTLB entry does not match the context: */
|| dtlb_context != ic->tme_sparc_memory_context_default
/* the DTLB entry does not cover the needed addresses: */
|| (address < (tme_bus_addr64_t) dtlb->tme_sparc_tlb_addr_first)
|| ((address + ((64 / 8) - 1)) > (tme_bus_addr64_t) dtlb->tme_sparc_tlb_addr_last)
/* the DTLB entry does not cover the needed address space: */
|| (!TME_SPARC_TLB_ASI_MASK_OK(dtlb, ic->tme_sparc_asi_mask_data))
/* the DTLB entry can't be used for a fast std: */
|| (dtlb->tme_sparc_tlb_asi_mask & asi_mask_flags_slow) != 0
/* the DTLB entry does not allow fast transfers: */
|| (memory == TME_EMULATOR_OFF_UNDEF)
/* the address is misaligned: */
|| ((address % (64 / 8)) != 0)
/* the destination register number is odd: */
|| ((TME_SPARC_INSN & TME_BIT(25)) != 0)
)) {
/* call the slow store function: */
memory = tme_sparc64_ls(ic,
address,
&TME_SPARC_FORMAT3_RD,
(TME_SPARC_LSINFO_OP_ST
| TME_SPARC_LSINFO_LDD_STD
| (64 / 8)));
/* if the slow store function did the transfer: */
if (__tme_predict_false(memory == TME_EMULATOR_OFF_UNDEF)) {
/* unbusy the TLB entry; */
tme_sparc_tlb_unbusy(dtlb);
TME_SPARC_INSN_OK;
}
}
/* get the byte order of this transfer: */
endian_little = ic->tme_sparc_asi_mask_data & TME_SPARC64_ASI_FLAG_LITTLE;
if (__tme_predict_false(dtlb->tme_sparc_tlb_asi_mask & TME_SPARC64_ASI_FLAG_LITTLE)) {
if (TME_SPARC_MEMORY_FLAGS(ic) & TME_SPARC_MEMORY_FLAG_HAS_INVERT_ENDIAN) {
endian_little ^= TME_SPARC64_ASI_FLAG_LITTLE;
}
else {
assert (FALSE);
}
}
/* do the fast transfer: */
memory += address;
value32 = TME_SPARC_FORMAT3_RD;
value32 = (endian_little ? tme_htole_u32(value32) : tme_htobe_u32(value32));
tme_memory_bus_write32(((tme_shared tme_uint32_t *) memory) + 0, value32, dtlb->tme_sparc_tlb_bus_rwlock, sizeof(tme_uint32_t) * 2, sizeof(tme_uint64_t));
value32 = TME_SPARC_FORMAT3_RD_ODD(tme_ic_ireg_uint64);
value32 = (endian_little ? tme_htole_u32(value32) : tme_htobe_u32(value32));
tme_memory_bus_write32(((tme_shared tme_uint32_t *) memory) + 1, value32, dtlb->tme_sparc_tlb_bus_rwlock, sizeof(tme_uint32_t) * 1, sizeof(tme_uint64_t));
/* unbusy the DTLB entry: */
tme_sparc_tlb_unbusy(dtlb);
TME_SPARC_INSN_OK;
}
/* this does a sparc64 ldstub: */
TME_SPARC_FORMAT3(tme_sparc64_ldstub, tme_uint64_t)
{
tme_uint64_t address;
tme_uint32_t asi_mask_flags_slow;
struct tme_sparc_tlb *dtlb;
tme_shared tme_uint8_t *memory;
tme_bus_context_t dtlb_context;
tme_uint32_t endian_little;
/* get the address: */
address = TME_SPARC_FORMAT3_RS1 + TME_SPARC_FORMAT3_RS2;
address &= ic->tme_sparc_address_mask;
#ifdef _TME_SPARC_STATS
/* track statistics: */
ic->tme_sparc_stats.tme_sparc_stats_memory_total++;
#endif /* _TME_SPARC_STATS */
/* verify and maybe replay this transfer: */
tme_sparc_recode_verify_mem(ic, &TME_SPARC_FORMAT3_RD,
ic->tme_sparc_asi_mask_data, address,
(TME_RECODE_SIZE_8
| TME_SPARC_RECODE_VERIFY_MEM_LOAD));
if (tme_sparc_recode_verify_replay_last_pc(ic) != 0) {
TME_SPARC_INSN_OK;
}
/* assume that no DTLB ASI mask flags will require a slow store: */
asi_mask_flags_slow = 0;
/* a ldstub traps on no-fault addresses: */
asi_mask_flags_slow |= TME_SPARC64_ASI_FLAG_NO_FAULT;
/* a ldstub traps on uncacheable addresses with side-effects: */
asi_mask_flags_slow |= TME_SPARC64_ASI_MASK_FLAG_TLB_UNCACHEABLE;
/* get and busy the DTLB entry: */
dtlb = &ic->tme_sparc_tlbs[TME_SPARC_DTLB_ENTRY(ic, TME_SPARC_TLB_HASH(ic, ic->tme_sparc_memory_context_default, address))];
tme_sparc_tlb_busy(dtlb);
/* assume that this DTLB applies and allows fast transfers: */
memory = dtlb->tme_sparc_tlb_emulator_off_write;
/* if this DTLB matches any context, it matches this context: */
dtlb_context = dtlb->tme_sparc_tlb_context;
if (dtlb_context > ic->tme_sparc_memory_context_max) {
dtlb_context = ic->tme_sparc_memory_context_default;
}
/* we must call the slow store function if: */
if (__tme_predict_false(
/* the DTLB entry is invalid: */
tme_bus_tlb_is_invalid(&dtlb->tme_sparc_tlb_bus_tlb)
/* the DTLB entry does not match the context: */
|| dtlb_context != ic->tme_sparc_memory_context_default
/* the DTLB entry does not cover the needed addresses: */
|| (address < (tme_bus_addr64_t) dtlb->tme_sparc_tlb_addr_first)
|| ((address + ((8 / 8) - 1)) > (tme_bus_addr64_t) dtlb->tme_sparc_tlb_addr_last)
/* the DTLB entry does not cover the needed address space: */
|| (!TME_SPARC_TLB_ASI_MASK_OK(dtlb, ic->tme_sparc_asi_mask_data))
/* the DTLB entry can't be used for a fast ldstub: */
|| (dtlb->tme_sparc_tlb_asi_mask & asi_mask_flags_slow) != 0
/* the DTLB entry does not allow fast transfers: */
|| (memory != dtlb->tme_sparc_tlb_emulator_off_read)
|| (memory == TME_EMULATOR_OFF_UNDEF)
)) {
/* call the slow store function: */
memory = tme_sparc64_ls(ic,
address,
&TME_SPARC_FORMAT3_RD,
(TME_SPARC_LSINFO_OP_ATOMIC
| (8 / 8)));
/* if the slow store function did the transfer: */
if (__tme_predict_false(memory == TME_EMULATOR_OFF_UNDEF)) {
/* unbusy the TLB entry; */
tme_sparc_tlb_unbusy(dtlb);
/* log the value loaded: */
tme_sparc_recode_verify_mem_load(ic, &TME_SPARC_FORMAT3_RD);
tme_sparc_log(ic, 1000, TME_OK,
(TME_SPARC_LOG_HANDLE(ic),
_("ldstub 0x%02x:0x%016" TME_PRIx64 ": 0x%02" TME_PRIx64),
TME_SPARC_ASI_MASK_WHICH(ic->tme_sparc_asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
address,
TME_SPARC_FORMAT3_RD));
TME_SPARC_INSN_OK;
}
}
/* get the byte order of this transfer: */
endian_little = ic->tme_sparc_asi_mask_data & TME_SPARC64_ASI_FLAG_LITTLE;
if (__tme_predict_false(dtlb->tme_sparc_tlb_asi_mask & TME_SPARC64_ASI_FLAG_LITTLE)) {
if (TME_SPARC_MEMORY_FLAGS(ic) & TME_SPARC_MEMORY_FLAG_HAS_INVERT_ENDIAN) {
endian_little ^= TME_SPARC64_ASI_FLAG_LITTLE;
}
else {
assert (FALSE);
}
}
/* do the fast transfer: */
memory += address;
TME_SPARC_FORMAT3_RD = tme_memory_atomic_xchg8(memory, 0xff, dtlb->tme_sparc_tlb_bus_rwlock, sizeof(tme_uint8_t));
/* unbusy the DTLB entry: */
tme_sparc_tlb_unbusy(dtlb);
/* log the value loaded: */
tme_sparc_recode_verify_mem_load(ic, &TME_SPARC_FORMAT3_RD);
tme_sparc_log(ic, 1000, TME_OK,
(TME_SPARC_LOG_HANDLE(ic),
_("ldstub 0x%02x:0x%016" TME_PRIx64 ": 0x%02" TME_PRIx64),
TME_SPARC_ASI_MASK_WHICH(ic->tme_sparc_asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
address,
TME_SPARC_FORMAT3_RD));
TME_SPARC_INSN_OK;
}
/* this does a sparc64 ldstuba: */
TME_SPARC_FORMAT3(tme_sparc64_ldstuba, tme_uint64_t)
{
tme_uint32_t asi_mask_data;
tme_uint64_t address;
tme_bus_context_t context;
tme_uint32_t asi_mask_flags_slow;
struct tme_sparc_tlb *dtlb;
tme_shared tme_uint8_t *memory;
tme_bus_context_t dtlb_context;
tme_uint32_t endian_little;
/* get the alternate ASI mask: */
asi_mask_data = _tme_sparc64_alternate_asi_mask(ic);
/* get the address: */
address = TME_SPARC_FORMAT3_RS1 + TME_SPARC_FORMAT3_RS2;
address &= ic->tme_sparc_address_mask;
#ifdef _TME_SPARC_STATS
/* track statistics: */
ic->tme_sparc_stats.tme_sparc_stats_memory_total++;
#endif /* _TME_SPARC_STATS */
/* verify and maybe replay this transfer: */
tme_sparc_recode_verify_mem(ic, &TME_SPARC_FORMAT3_RD,
asi_mask_data, address,
(TME_RECODE_SIZE_8
| TME_SPARC_RECODE_VERIFY_MEM_LOAD));
if (tme_sparc_recode_verify_replay_last_pc(ic) != 0) {
TME_SPARC_INSN_OK;
}
/* get the context: */
context = ic->tme_sparc_memory_context_primary;
if (__tme_predict_false(asi_mask_data
& (TME_SPARC64_ASI_FLAG_SECONDARY
+ TME_SPARC64_ASI_MASK_FLAG_INSN_NUCLEUS))) {
if (asi_mask_data & TME_SPARC64_ASI_FLAG_SECONDARY) {
context = ic->tme_sparc_memory_context_secondary;
}
else if (TME_SPARC_MEMORY_FLAGS(ic) & TME_SPARC_MEMORY_FLAG_HAS_NUCLEUS) {
context = 0;
}
}
/* assume that no DTLB ASI mask flags will require a slow store: */
asi_mask_flags_slow = 0;
/* a ldstuba traps on no-fault addresses: */
asi_mask_flags_slow |= TME_SPARC64_ASI_FLAG_NO_FAULT;
/* a ldstuba traps on uncacheable addresses with side-effects: */
asi_mask_flags_slow |= TME_SPARC64_ASI_MASK_FLAG_TLB_UNCACHEABLE;
/* if this ldstuba is using a no-fault ASI: */
if (__tme_predict_false(asi_mask_data & TME_SPARC64_ASI_FLAG_NO_FAULT)) {
/* a ldstuba with a no-fault ASI traps: */
asi_mask_flags_slow = 0 - (tme_uint32_t) 1;
}
/* get and busy the DTLB entry: */
dtlb = &ic->tme_sparc_tlbs[TME_SPARC_DTLB_ENTRY(ic, TME_SPARC_TLB_HASH(ic, context, address))];
tme_sparc_tlb_busy(dtlb);
/* assume that this DTLB applies and allows fast transfers: */
memory = dtlb->tme_sparc_tlb_emulator_off_write;
/* if this DTLB matches any context, it matches this context: */
dtlb_context = dtlb->tme_sparc_tlb_context;
if (dtlb_context > ic->tme_sparc_memory_context_max) {
dtlb_context = context;
}
/* we must call the slow store function if: */
if (__tme_predict_false(
/* the DTLB entry is invalid: */
tme_bus_tlb_is_invalid(&dtlb->tme_sparc_tlb_bus_tlb)
/* the DTLB entry does not match the context: */
|| dtlb_context != context
/* the DTLB entry does not cover the needed addresses: */
|| (address < (tme_bus_addr64_t) dtlb->tme_sparc_tlb_addr_first)
|| ((address + ((8 / 8) - 1)) > (tme_bus_addr64_t) dtlb->tme_sparc_tlb_addr_last)
/* the DTLB entry does not cover the needed address space: */
|| (!TME_SPARC_TLB_ASI_MASK_OK(dtlb, asi_mask_data))
/* the DTLB entry can't be used for a fast ldstuba: */
|| (dtlb->tme_sparc_tlb_asi_mask & asi_mask_flags_slow) != 0
/* the DTLB entry does not allow fast transfers: */
|| (memory != dtlb->tme_sparc_tlb_emulator_off_read)
|| (memory == TME_EMULATOR_OFF_UNDEF)
)) {
/* call the slow store function: */
memory = tme_sparc64_ls(ic,
address,
&TME_SPARC_FORMAT3_RD,
(TME_SPARC_LSINFO_OP_ATOMIC
| TME_SPARC_LSINFO_ASI(TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF))
| TME_SPARC_LSINFO_A
| (8 / 8)));
/* if the slow store function did the transfer: */
if (__tme_predict_false(memory == TME_EMULATOR_OFF_UNDEF)) {
/* unbusy the TLB entry; */
tme_sparc_tlb_unbusy(dtlb);
/* log the value loaded: */
tme_sparc_recode_verify_mem_load(ic, &TME_SPARC_FORMAT3_RD);
tme_sparc_log(ic, 1000, TME_OK,
(TME_SPARC_LOG_HANDLE(ic),
_("ldstuba 0x%02x:0x%016" TME_PRIx64 ": 0x%02" TME_PRIx64),
TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
address,
TME_SPARC_FORMAT3_RD));
TME_SPARC_INSN_OK;
}
}
/* get the byte order of this transfer: */
endian_little = asi_mask_data & TME_SPARC64_ASI_FLAG_LITTLE;
if (__tme_predict_false(dtlb->tme_sparc_tlb_asi_mask & TME_SPARC64_ASI_FLAG_LITTLE)) {
if (TME_SPARC_MEMORY_FLAGS(ic) & TME_SPARC_MEMORY_FLAG_HAS_INVERT_ENDIAN) {
endian_little ^= TME_SPARC64_ASI_FLAG_LITTLE;
}
else {
assert (FALSE);
}
}
/* do the fast transfer: */
memory += address;
TME_SPARC_FORMAT3_RD = tme_memory_atomic_xchg8(memory, 0xff, dtlb->tme_sparc_tlb_bus_rwlock, sizeof(tme_uint8_t));
/* unbusy the DTLB entry: */
tme_sparc_tlb_unbusy(dtlb);
/* log the value loaded: */
tme_sparc_recode_verify_mem_load(ic, &TME_SPARC_FORMAT3_RD);
tme_sparc_log(ic, 1000, TME_OK,
(TME_SPARC_LOG_HANDLE(ic),
_("ldstuba 0x%02x:0x%016" TME_PRIx64 ": 0x%02" TME_PRIx64),
TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
address,
TME_SPARC_FORMAT3_RD));
TME_SPARC_INSN_OK;
}
/* this does a sparc64 swap: */
TME_SPARC_FORMAT3(tme_sparc64_swap, tme_uint64_t)
{
tme_uint64_t address;
tme_uint32_t asi_mask_flags_slow;
struct tme_sparc_tlb *dtlb;
tme_shared tme_uint8_t *memory;
tme_bus_context_t dtlb_context;
tme_uint32_t endian_little;
tme_uint32_t value32;
/* get the address: */
address = TME_SPARC_FORMAT3_RS1 + TME_SPARC_FORMAT3_RS2;
address &= ic->tme_sparc_address_mask;
#ifdef _TME_SPARC_STATS
/* track statistics: */
ic->tme_sparc_stats.tme_sparc_stats_memory_total++;
#endif /* _TME_SPARC_STATS */
/* verify and maybe replay this transfer: */
tme_sparc_recode_verify_mem(ic, &TME_SPARC_FORMAT3_RD,
ic->tme_sparc_asi_mask_data, address,
(TME_RECODE_SIZE_32
| TME_SPARC_RECODE_VERIFY_MEM_LOAD | TME_SPARC_RECODE_VERIFY_MEM_STORE));
if (tme_sparc_recode_verify_replay_last_pc(ic) != 0) {
TME_SPARC_INSN_OK;
}
/* log the value stored: */
tme_sparc_log(ic, 1000, TME_OK,
(TME_SPARC_LOG_HANDLE(ic),
_("swap 0x%02x:0x%016" TME_PRIx64 ": 0x%08" TME_PRIx32),
TME_SPARC_ASI_MASK_WHICH(ic->tme_sparc_asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
address,
(tme_uint32_t) TME_SPARC_FORMAT3_RD));
/* assume that no DTLB ASI mask flags will require a slow store: */
asi_mask_flags_slow = 0;
/* a swap traps on no-fault addresses: */
asi_mask_flags_slow |= TME_SPARC64_ASI_FLAG_NO_FAULT;
/* a swap traps on uncacheable addresses with side-effects: */
asi_mask_flags_slow |= TME_SPARC64_ASI_MASK_FLAG_TLB_UNCACHEABLE;
/* get and busy the DTLB entry: */
dtlb = &ic->tme_sparc_tlbs[TME_SPARC_DTLB_ENTRY(ic, TME_SPARC_TLB_HASH(ic, ic->tme_sparc_memory_context_default, address))];
tme_sparc_tlb_busy(dtlb);
/* assume that this DTLB applies and allows fast transfers: */
memory = dtlb->tme_sparc_tlb_emulator_off_write;
/* if this DTLB matches any context, it matches this context: */
dtlb_context = dtlb->tme_sparc_tlb_context;
if (dtlb_context > ic->tme_sparc_memory_context_max) {
dtlb_context = ic->tme_sparc_memory_context_default;
}
/* we must call the slow store function if: */
if (__tme_predict_false(
/* the DTLB entry is invalid: */
tme_bus_tlb_is_invalid(&dtlb->tme_sparc_tlb_bus_tlb)
/* the DTLB entry does not match the context: */
|| dtlb_context != ic->tme_sparc_memory_context_default
/* the DTLB entry does not cover the needed addresses: */
|| (address < (tme_bus_addr64_t) dtlb->tme_sparc_tlb_addr_first)
|| ((address + ((32 / 8) - 1)) > (tme_bus_addr64_t) dtlb->tme_sparc_tlb_addr_last)
/* the DTLB entry does not cover the needed address space: */
|| (!TME_SPARC_TLB_ASI_MASK_OK(dtlb, ic->tme_sparc_asi_mask_data))
/* the DTLB entry can't be used for a fast swap: */
|| (dtlb->tme_sparc_tlb_asi_mask & asi_mask_flags_slow) != 0
/* the DTLB entry does not allow fast transfers: */
|| (memory != dtlb->tme_sparc_tlb_emulator_off_read)
|| (memory == TME_EMULATOR_OFF_UNDEF)
/* the address is misaligned: */
|| ((address % (32 / 8)) != 0)
)) {
/* call the slow store function: */
memory = tme_sparc64_ls(ic,
address,
&TME_SPARC_FORMAT3_RD,
(TME_SPARC_LSINFO_OP_ATOMIC
| (32 / 8)));
/* if the slow store function did the transfer: */
if (__tme_predict_false(memory == TME_EMULATOR_OFF_UNDEF)) {
/* unbusy the TLB entry; */
tme_sparc_tlb_unbusy(dtlb);
/* log the value loaded: */
tme_sparc_recode_verify_mem_load(ic, &TME_SPARC_FORMAT3_RD);
tme_sparc_log(ic, 1000, TME_OK,
(TME_SPARC_LOG_HANDLE(ic),
_("swap 0x%02x:0x%016" TME_PRIx64 ": 0x%08" TME_PRIx64),
TME_SPARC_ASI_MASK_WHICH(ic->tme_sparc_asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
address,
TME_SPARC_FORMAT3_RD));
TME_SPARC_INSN_OK;
}
}
/* get the byte order of this transfer: */
endian_little = ic->tme_sparc_asi_mask_data & TME_SPARC64_ASI_FLAG_LITTLE;
if (__tme_predict_false(dtlb->tme_sparc_tlb_asi_mask & TME_SPARC64_ASI_FLAG_LITTLE)) {
if (TME_SPARC_MEMORY_FLAGS(ic) & TME_SPARC_MEMORY_FLAG_HAS_INVERT_ENDIAN) {
endian_little ^= TME_SPARC64_ASI_FLAG_LITTLE;
}
else {
assert (FALSE);
}
}
/* do the fast transfer: */
memory += address;
value32 = TME_SPARC_FORMAT3_RD;
value32 = (endian_little ? tme_htole_u32(value32) : tme_htobe_u32(value32));
value32 = tme_memory_atomic_xchg32((tme_shared tme_uint32_t *) memory, value32, dtlb->tme_sparc_tlb_bus_rwlock, sizeof(tme_uint8_t));
value32 = (endian_little ? tme_letoh_u32(value32) : tme_betoh_u32(value32));
TME_SPARC_FORMAT3_RD = value32;
/* unbusy the DTLB entry: */
tme_sparc_tlb_unbusy(dtlb);
/* log the value loaded: */
tme_sparc_recode_verify_mem_load(ic, &TME_SPARC_FORMAT3_RD);
tme_sparc_log(ic, 1000, TME_OK,
(TME_SPARC_LOG_HANDLE(ic),
_("swap 0x%02x:0x%016" TME_PRIx64 ": 0x%08" TME_PRIx64),
TME_SPARC_ASI_MASK_WHICH(ic->tme_sparc_asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
address,
TME_SPARC_FORMAT3_RD));
TME_SPARC_INSN_OK;
}
/* this does a sparc64 swapa: */
TME_SPARC_FORMAT3(tme_sparc64_swapa, tme_uint64_t)
{
tme_uint32_t asi_mask_data;
tme_uint64_t address;
tme_bus_context_t context;
tme_uint32_t asi_mask_flags_slow;
struct tme_sparc_tlb *dtlb;
tme_shared tme_uint8_t *memory;
tme_bus_context_t dtlb_context;
tme_uint32_t endian_little;
tme_uint32_t value32;
/* get the alternate ASI mask: */
asi_mask_data = _tme_sparc64_alternate_asi_mask(ic);
/* get the address: */
address = TME_SPARC_FORMAT3_RS1 + TME_SPARC_FORMAT3_RS2;
address &= ic->tme_sparc_address_mask;
#ifdef _TME_SPARC_STATS
/* track statistics: */
ic->tme_sparc_stats.tme_sparc_stats_memory_total++;
#endif /* _TME_SPARC_STATS */
/* verify and maybe replay this transfer: */
tme_sparc_recode_verify_mem(ic, &TME_SPARC_FORMAT3_RD,
asi_mask_data, address,
(TME_RECODE_SIZE_32
| TME_SPARC_RECODE_VERIFY_MEM_LOAD | TME_SPARC_RECODE_VERIFY_MEM_STORE));
if (tme_sparc_recode_verify_replay_last_pc(ic) != 0) {
TME_SPARC_INSN_OK;
}
/* log the value stored: */
tme_sparc_log(ic, 1000, TME_OK,
(TME_SPARC_LOG_HANDLE(ic),
_("swapa 0x%02x:0x%016" TME_PRIx64 ": 0x%08" TME_PRIx32),
TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
address,
(tme_uint32_t) TME_SPARC_FORMAT3_RD));
/* get the context: */
context = ic->tme_sparc_memory_context_primary;
if (__tme_predict_false(asi_mask_data
& (TME_SPARC64_ASI_FLAG_SECONDARY
+ TME_SPARC64_ASI_MASK_FLAG_INSN_NUCLEUS))) {
if (asi_mask_data & TME_SPARC64_ASI_FLAG_SECONDARY) {
context = ic->tme_sparc_memory_context_secondary;
}
else if (TME_SPARC_MEMORY_FLAGS(ic) & TME_SPARC_MEMORY_FLAG_HAS_NUCLEUS) {
context = 0;
}
}
/* assume that no DTLB ASI mask flags will require a slow store: */
asi_mask_flags_slow = 0;
/* a swapa traps on no-fault addresses: */
asi_mask_flags_slow |= TME_SPARC64_ASI_FLAG_NO_FAULT;
/* a swapa traps on uncacheable addresses with side-effects: */
asi_mask_flags_slow |= TME_SPARC64_ASI_MASK_FLAG_TLB_UNCACHEABLE;
/* if this swapa is using a no-fault ASI: */
if (__tme_predict_false(asi_mask_data & TME_SPARC64_ASI_FLAG_NO_FAULT)) {
/* a swapa with a no-fault ASI traps: */
asi_mask_flags_slow = 0 - (tme_uint32_t) 1;
}
/* get and busy the DTLB entry: */
dtlb = &ic->tme_sparc_tlbs[TME_SPARC_DTLB_ENTRY(ic, TME_SPARC_TLB_HASH(ic, context, address))];
tme_sparc_tlb_busy(dtlb);
/* assume that this DTLB applies and allows fast transfers: */
memory = dtlb->tme_sparc_tlb_emulator_off_write;
/* if this DTLB matches any context, it matches this context: */
dtlb_context = dtlb->tme_sparc_tlb_context;
if (dtlb_context > ic->tme_sparc_memory_context_max) {
dtlb_context = context;
}
/* we must call the slow store function if: */
if (__tme_predict_false(
/* the DTLB entry is invalid: */
tme_bus_tlb_is_invalid(&dtlb->tme_sparc_tlb_bus_tlb)
/* the DTLB entry does not match the context: */
|| dtlb_context != context
/* the DTLB entry does not cover the needed addresses: */
|| (address < (tme_bus_addr64_t) dtlb->tme_sparc_tlb_addr_first)
|| ((address + ((32 / 8) - 1)) > (tme_bus_addr64_t) dtlb->tme_sparc_tlb_addr_last)
/* the DTLB entry does not cover the needed address space: */
|| (!TME_SPARC_TLB_ASI_MASK_OK(dtlb, asi_mask_data))
/* the DTLB entry can't be used for a fast swapa: */
|| (dtlb->tme_sparc_tlb_asi_mask & asi_mask_flags_slow) != 0
/* the DTLB entry does not allow fast transfers: */
|| (memory != dtlb->tme_sparc_tlb_emulator_off_read)
|| (memory == TME_EMULATOR_OFF_UNDEF)
/* the address is misaligned: */
|| ((address % (32 / 8)) != 0)
)) {
/* call the slow store function: */
memory = tme_sparc64_ls(ic,
address,
&TME_SPARC_FORMAT3_RD,
(TME_SPARC_LSINFO_OP_ATOMIC
| TME_SPARC_LSINFO_ASI(TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF))
| TME_SPARC_LSINFO_A
| (32 / 8)));
/* if the slow store function did the transfer: */
if (__tme_predict_false(memory == TME_EMULATOR_OFF_UNDEF)) {
/* unbusy the TLB entry; */
tme_sparc_tlb_unbusy(dtlb);
/* log the value loaded: */
tme_sparc_recode_verify_mem_load(ic, &TME_SPARC_FORMAT3_RD);
tme_sparc_log(ic, 1000, TME_OK,
(TME_SPARC_LOG_HANDLE(ic),
_("swapa 0x%02x:0x%016" TME_PRIx64 ": 0x%08" TME_PRIx64),
TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
address,
TME_SPARC_FORMAT3_RD));
TME_SPARC_INSN_OK;
}
}
/* get the byte order of this transfer: */
endian_little = asi_mask_data & TME_SPARC64_ASI_FLAG_LITTLE;
if (__tme_predict_false(dtlb->tme_sparc_tlb_asi_mask & TME_SPARC64_ASI_FLAG_LITTLE)) {
if (TME_SPARC_MEMORY_FLAGS(ic) & TME_SPARC_MEMORY_FLAG_HAS_INVERT_ENDIAN) {
endian_little ^= TME_SPARC64_ASI_FLAG_LITTLE;
}
else {
assert (FALSE);
}
}
/* do the fast transfer: */
memory += address;
value32 = TME_SPARC_FORMAT3_RD;
value32 = (endian_little ? tme_htole_u32(value32) : tme_htobe_u32(value32));
value32 = tme_memory_atomic_xchg32((tme_shared tme_uint32_t *) memory, value32, dtlb->tme_sparc_tlb_bus_rwlock, sizeof(tme_uint8_t));
value32 = (endian_little ? tme_letoh_u32(value32) : tme_betoh_u32(value32));
TME_SPARC_FORMAT3_RD = value32;
/* unbusy the DTLB entry: */
tme_sparc_tlb_unbusy(dtlb);
/* log the value loaded: */
tme_sparc_recode_verify_mem_load(ic, &TME_SPARC_FORMAT3_RD);
tme_sparc_log(ic, 1000, TME_OK,
(TME_SPARC_LOG_HANDLE(ic),
_("swapa 0x%02x:0x%016" TME_PRIx64 ": 0x%08" TME_PRIx64),
TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
address,
TME_SPARC_FORMAT3_RD));
TME_SPARC_INSN_OK;
}
/* this does a sparc64 ldba: */
TME_SPARC_FORMAT3(tme_sparc64_ldba, tme_uint64_t)
{
tme_uint32_t asi_mask_data;
tme_uint64_t address;
tme_bus_context_t context;
tme_uint32_t asi_mask_flags_slow;
struct tme_sparc_tlb *dtlb;
const tme_shared tme_uint8_t *memory;
tme_bus_context_t dtlb_context;
tme_uint32_t endian_little;
tme_uint8_t value8;
tme_uint32_t value32;
/* get the alternate ASI mask: */
asi_mask_data = _tme_sparc64_alternate_asi_mask(ic);
/* get the address: */
address = TME_SPARC_FORMAT3_RS1 + TME_SPARC_FORMAT3_RS2;
address &= ic->tme_sparc_address_mask;
#ifdef _TME_SPARC_STATS
/* track statistics: */
ic->tme_sparc_stats.tme_sparc_stats_memory_total++;
#endif /* _TME_SPARC_STATS */
/* verify and maybe replay this transfer: */
tme_sparc_recode_verify_mem(ic, &TME_SPARC_FORMAT3_RD,
asi_mask_data, address,
(TME_RECODE_SIZE_8
| TME_SPARC_RECODE_VERIFY_MEM_LOAD));
if (tme_sparc_recode_verify_replay_last_pc(ic) != 0) {
TME_SPARC_INSN_OK;
}
/* get the context: */
context = ic->tme_sparc_memory_context_primary;
if (__tme_predict_false(asi_mask_data
& (TME_SPARC64_ASI_FLAG_SECONDARY
+ TME_SPARC64_ASI_MASK_FLAG_INSN_NUCLEUS))) {
if (asi_mask_data & TME_SPARC64_ASI_FLAG_SECONDARY) {
context = ic->tme_sparc_memory_context_secondary;
}
else if (TME_SPARC_MEMORY_FLAGS(ic) & TME_SPARC_MEMORY_FLAG_HAS_NUCLEUS) {
context = 0;
}
}
/* assume that no DTLB ASI mask flags will require a slow load: */
asi_mask_flags_slow = 0;
/* a ldba without a no-fault ASI traps on no-fault addresses: */
asi_mask_flags_slow |= TME_SPARC64_ASI_FLAG_NO_FAULT;
/* if this ldba is using a no-fault ASI: */
if (__tme_predict_false(asi_mask_data & TME_SPARC64_ASI_FLAG_NO_FAULT)) {
/* a ldba with a no-fault ASI traps on addresses with side-effects: */
asi_mask_flags_slow = TME_SPARC64_ASI_MASK_FLAG_TLB_SIDE_EFFECTS;
}
/* get and busy the DTLB entry: */
dtlb = &ic->tme_sparc_tlbs[TME_SPARC_DTLB_ENTRY(ic, TME_SPARC_TLB_HASH(ic, context, address))];
tme_sparc_tlb_busy(dtlb);
/* assume that this DTLB applies and allows fast transfers: */
memory = dtlb->tme_sparc_tlb_emulator_off_read;
/* if this DTLB matches any context, it matches this context: */
dtlb_context = dtlb->tme_sparc_tlb_context;
if (dtlb_context > ic->tme_sparc_memory_context_max) {
dtlb_context = context;
}
/* we must call the slow load function if: */
if (__tme_predict_false(
/* the DTLB entry is invalid: */
tme_bus_tlb_is_invalid(&dtlb->tme_sparc_tlb_bus_tlb)
/* the DTLB entry does not match the context: */
|| dtlb_context != context
/* the DTLB entry does not cover the needed addresses: */
|| (address < (tme_bus_addr64_t) dtlb->tme_sparc_tlb_addr_first)
|| ((address + ((8 / 8) - 1)) > (tme_bus_addr64_t) dtlb->tme_sparc_tlb_addr_last)
/* the DTLB entry does not cover the needed address space: */
|| (!TME_SPARC_TLB_ASI_MASK_OK(dtlb, asi_mask_data))
/* the DTLB entry can't be used for a fast ldba: */
|| (dtlb->tme_sparc_tlb_asi_mask & asi_mask_flags_slow) != 0
/* the DTLB entry does not allow fast transfers: */
|| (memory == TME_EMULATOR_OFF_UNDEF)
)) {
/* call the slow load function: */
memory = tme_sparc64_ls(ic,
address,
&TME_SPARC_FORMAT3_RD,
(TME_SPARC_LSINFO_OP_LD
| TME_SPARC_LSINFO_ASI(TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF))
| TME_SPARC_LSINFO_A
| (8 / 8)));
/* if the slow load function did the transfer: */
if (__tme_predict_false(memory == TME_EMULATOR_OFF_UNDEF)) {
/* unbusy the TLB entry; */
tme_sparc_tlb_unbusy(dtlb);
/* log the value loaded: */
tme_sparc_recode_verify_mem_load(ic, &TME_SPARC_FORMAT3_RD);
tme_sparc_log(ic, 1000, TME_OK,
(TME_SPARC_LOG_HANDLE(ic),
_("ldba 0x%02x:0x%016" TME_PRIx64 ": 0x%02" TME_PRIx64),
TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
address,
TME_SPARC_FORMAT3_RD));
TME_SPARC_INSN_OK;
}
}
/* get the byte order of this transfer: */
endian_little = asi_mask_data & TME_SPARC64_ASI_FLAG_LITTLE;
if (__tme_predict_false(dtlb->tme_sparc_tlb_asi_mask & TME_SPARC64_ASI_FLAG_LITTLE)) {
if (TME_SPARC_MEMORY_FLAGS(ic) & TME_SPARC_MEMORY_FLAG_HAS_INVERT_ENDIAN) {
endian_little ^= TME_SPARC64_ASI_FLAG_LITTLE;
}
else {
assert (FALSE);
}
}
/* do the fast transfer: */
memory += address;
value8 = tme_memory_bus_read8((const tme_shared tme_uint8_t *) memory, dtlb->tme_sparc_tlb_bus_rwlock, sizeof(tme_uint8_t), sizeof(tme_uint64_t));
/* possibly sign-extend the loaded value: */
value32 = value8;
if (TME_SPARC_INSN & TME_BIT(22)) {
value32 = (tme_uint32_t) (tme_int32_t) (tme_int8_t) value32;
}
TME_SPARC_FORMAT3_RD = (tme_uint64_t) (tme_int64_t) (tme_int32_t) value32;
/* unbusy the DTLB entry: */
tme_sparc_tlb_unbusy(dtlb);
/* log the value loaded: */
tme_sparc_recode_verify_mem_load(ic, &TME_SPARC_FORMAT3_RD);
tme_sparc_log(ic, 1000, TME_OK,
(TME_SPARC_LOG_HANDLE(ic),
_("ldba 0x%02x:0x%016" TME_PRIx64 ": 0x%02" TME_PRIx64),
TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
address,
TME_SPARC_FORMAT3_RD));
TME_SPARC_INSN_OK;
}
/* this does a sparc64 stba: */
TME_SPARC_FORMAT3(tme_sparc64_stba, tme_uint64_t)
{
tme_uint32_t asi_mask_data;
tme_uint64_t address;
tme_bus_context_t context;
tme_uint32_t asi_mask_flags_slow;
struct tme_sparc_tlb *dtlb;
tme_shared tme_uint8_t *memory;
tme_bus_context_t dtlb_context;
tme_uint32_t endian_little;
tme_uint8_t value8;
/* get the alternate ASI mask: */
asi_mask_data = _tme_sparc64_alternate_asi_mask(ic);
/* get the address: */
address = TME_SPARC_FORMAT3_RS1 + TME_SPARC_FORMAT3_RS2;
address &= ic->tme_sparc_address_mask;
#ifdef _TME_SPARC_STATS
/* track statistics: */
ic->tme_sparc_stats.tme_sparc_stats_memory_total++;
#endif /* _TME_SPARC_STATS */
/* verify and maybe replay this transfer: */
tme_sparc_recode_verify_mem(ic, &TME_SPARC_FORMAT3_RD,
asi_mask_data, address,
(TME_RECODE_SIZE_8
| TME_SPARC_RECODE_VERIFY_MEM_STORE));
if (tme_sparc_recode_verify_replay_last_pc(ic) != 0) {
TME_SPARC_INSN_OK;
}
/* log the value stored: */
tme_sparc_log(ic, 1000, TME_OK,
(TME_SPARC_LOG_HANDLE(ic),
_("stba 0x%02x:0x%016" TME_PRIx64 ": 0x%02" TME_PRIx8),
TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
address,
(tme_uint8_t) TME_SPARC_FORMAT3_RD));
/* get the context: */
context = ic->tme_sparc_memory_context_primary;
if (__tme_predict_false(asi_mask_data
& (TME_SPARC64_ASI_FLAG_SECONDARY
+ TME_SPARC64_ASI_MASK_FLAG_INSN_NUCLEUS))) {
if (asi_mask_data & TME_SPARC64_ASI_FLAG_SECONDARY) {
context = ic->tme_sparc_memory_context_secondary;
}
else if (TME_SPARC_MEMORY_FLAGS(ic) & TME_SPARC_MEMORY_FLAG_HAS_NUCLEUS) {
context = 0;
}
}
/* assume that no DTLB ASI mask flags will require a slow store: */
asi_mask_flags_slow = 0;
/* a stba traps on no-fault addresses: */
asi_mask_flags_slow |= TME_SPARC64_ASI_FLAG_NO_FAULT;
/* if this stba is using a no-fault ASI: */
if (__tme_predict_false(asi_mask_data & TME_SPARC64_ASI_FLAG_NO_FAULT)) {
/* a stba with a no-fault ASI traps: */
asi_mask_flags_slow = 0 - (tme_uint32_t) 1;
}
/* get and busy the DTLB entry: */
dtlb = &ic->tme_sparc_tlbs[TME_SPARC_DTLB_ENTRY(ic, TME_SPARC_TLB_HASH(ic, context, address))];
tme_sparc_tlb_busy(dtlb);
/* assume that this DTLB applies and allows fast transfers: */
memory = dtlb->tme_sparc_tlb_emulator_off_write;
/* if this DTLB matches any context, it matches this context: */
dtlb_context = dtlb->tme_sparc_tlb_context;
if (dtlb_context > ic->tme_sparc_memory_context_max) {
dtlb_context = context;
}
/* we must call the slow store function if: */
if (__tme_predict_false(
/* the DTLB entry is invalid: */
tme_bus_tlb_is_invalid(&dtlb->tme_sparc_tlb_bus_tlb)
/* the DTLB entry does not match the context: */
|| dtlb_context != context
/* the DTLB entry does not cover the needed addresses: */
|| (address < (tme_bus_addr64_t) dtlb->tme_sparc_tlb_addr_first)
|| ((address + ((8 / 8) - 1)) > (tme_bus_addr64_t) dtlb->tme_sparc_tlb_addr_last)
/* the DTLB entry does not cover the needed address space: */
|| (!TME_SPARC_TLB_ASI_MASK_OK(dtlb, asi_mask_data))
/* the DTLB entry can't be used for a fast stba: */
|| (dtlb->tme_sparc_tlb_asi_mask & asi_mask_flags_slow) != 0
/* the DTLB entry does not allow fast transfers: */
|| (memory == TME_EMULATOR_OFF_UNDEF)
)) {
/* call the slow store function: */
memory = tme_sparc64_ls(ic,
address,
&TME_SPARC_FORMAT3_RD,
(TME_SPARC_LSINFO_OP_ST
| TME_SPARC_LSINFO_ASI(TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF))
| TME_SPARC_LSINFO_A
| (8 / 8)));
/* if the slow store function did the transfer: */
if (__tme_predict_false(memory == TME_EMULATOR_OFF_UNDEF)) {
/* unbusy the TLB entry; */
tme_sparc_tlb_unbusy(dtlb);
TME_SPARC_INSN_OK;
}
}
/* get the byte order of this transfer: */
endian_little = asi_mask_data & TME_SPARC64_ASI_FLAG_LITTLE;
if (__tme_predict_false(dtlb->tme_sparc_tlb_asi_mask & TME_SPARC64_ASI_FLAG_LITTLE)) {
if (TME_SPARC_MEMORY_FLAGS(ic) & TME_SPARC_MEMORY_FLAG_HAS_INVERT_ENDIAN) {
endian_little ^= TME_SPARC64_ASI_FLAG_LITTLE;
}
else {
assert (FALSE);
}
}
/* do the fast transfer: */
memory += address;
value8 = TME_SPARC_FORMAT3_RD;
tme_memory_bus_write8((tme_shared tme_uint8_t *) memory, value8, dtlb->tme_sparc_tlb_bus_rwlock, sizeof(tme_uint8_t), sizeof(tme_uint64_t));
/* unbusy the DTLB entry: */
tme_sparc_tlb_unbusy(dtlb);
TME_SPARC_INSN_OK;
}
/* this does a sparc64 ldha: */
TME_SPARC_FORMAT3(tme_sparc64_ldha, tme_uint64_t)
{
tme_uint32_t asi_mask_data;
tme_uint64_t address;
tme_bus_context_t context;
tme_uint32_t asi_mask_flags_slow;
struct tme_sparc_tlb *dtlb;
const tme_shared tme_uint8_t *memory;
tme_bus_context_t dtlb_context;
tme_uint32_t endian_little;
tme_uint16_t value16;
tme_uint32_t value32;
/* get the alternate ASI mask: */
asi_mask_data = _tme_sparc64_alternate_asi_mask(ic);
/* get the address: */
address = TME_SPARC_FORMAT3_RS1 + TME_SPARC_FORMAT3_RS2;
address &= ic->tme_sparc_address_mask;
#ifdef _TME_SPARC_STATS
/* track statistics: */
ic->tme_sparc_stats.tme_sparc_stats_memory_total++;
#endif /* _TME_SPARC_STATS */
/* verify and maybe replay this transfer: */
tme_sparc_recode_verify_mem(ic, &TME_SPARC_FORMAT3_RD,
asi_mask_data, address,
(TME_RECODE_SIZE_16
| TME_SPARC_RECODE_VERIFY_MEM_LOAD));
if (tme_sparc_recode_verify_replay_last_pc(ic) != 0) {
TME_SPARC_INSN_OK;
}
/* get the context: */
context = ic->tme_sparc_memory_context_primary;
if (__tme_predict_false(asi_mask_data
& (TME_SPARC64_ASI_FLAG_SECONDARY
+ TME_SPARC64_ASI_MASK_FLAG_INSN_NUCLEUS))) {
if (asi_mask_data & TME_SPARC64_ASI_FLAG_SECONDARY) {
context = ic->tme_sparc_memory_context_secondary;
}
else if (TME_SPARC_MEMORY_FLAGS(ic) & TME_SPARC_MEMORY_FLAG_HAS_NUCLEUS) {
context = 0;
}
}
/* assume that no DTLB ASI mask flags will require a slow load: */
asi_mask_flags_slow = 0;
/* a ldha without a no-fault ASI traps on no-fault addresses: */
asi_mask_flags_slow |= TME_SPARC64_ASI_FLAG_NO_FAULT;
/* if this ldha is using a no-fault ASI: */
if (__tme_predict_false(asi_mask_data & TME_SPARC64_ASI_FLAG_NO_FAULT)) {
/* a ldha with a no-fault ASI traps on addresses with side-effects: */
asi_mask_flags_slow = TME_SPARC64_ASI_MASK_FLAG_TLB_SIDE_EFFECTS;
}
/* get and busy the DTLB entry: */
dtlb = &ic->tme_sparc_tlbs[TME_SPARC_DTLB_ENTRY(ic, TME_SPARC_TLB_HASH(ic, context, address))];
tme_sparc_tlb_busy(dtlb);
/* assume that this DTLB applies and allows fast transfers: */
memory = dtlb->tme_sparc_tlb_emulator_off_read;
/* if this DTLB matches any context, it matches this context: */
dtlb_context = dtlb->tme_sparc_tlb_context;
if (dtlb_context > ic->tme_sparc_memory_context_max) {
dtlb_context = context;
}
/* we must call the slow load function if: */
if (__tme_predict_false(
/* the DTLB entry is invalid: */
tme_bus_tlb_is_invalid(&dtlb->tme_sparc_tlb_bus_tlb)
/* the DTLB entry does not match the context: */
|| dtlb_context != context
/* the DTLB entry does not cover the needed addresses: */
|| (address < (tme_bus_addr64_t) dtlb->tme_sparc_tlb_addr_first)
|| ((address + ((16 / 8) - 1)) > (tme_bus_addr64_t) dtlb->tme_sparc_tlb_addr_last)
/* the DTLB entry does not cover the needed address space: */
|| (!TME_SPARC_TLB_ASI_MASK_OK(dtlb, asi_mask_data))
/* the DTLB entry can't be used for a fast ldha: */
|| (dtlb->tme_sparc_tlb_asi_mask & asi_mask_flags_slow) != 0
/* the DTLB entry does not allow fast transfers: */
|| (memory == TME_EMULATOR_OFF_UNDEF)
/* the address is misaligned: */
|| ((address % (16 / 8)) != 0)
)) {
/* call the slow load function: */
memory = tme_sparc64_ls(ic,
address,
&TME_SPARC_FORMAT3_RD,
(TME_SPARC_LSINFO_OP_LD
| TME_SPARC_LSINFO_ASI(TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF))
| TME_SPARC_LSINFO_A
| (16 / 8)));
/* if the slow load function did the transfer: */
if (__tme_predict_false(memory == TME_EMULATOR_OFF_UNDEF)) {
/* unbusy the TLB entry; */
tme_sparc_tlb_unbusy(dtlb);
/* log the value loaded: */
tme_sparc_recode_verify_mem_load(ic, &TME_SPARC_FORMAT3_RD);
tme_sparc_log(ic, 1000, TME_OK,
(TME_SPARC_LOG_HANDLE(ic),
_("ldha 0x%02x:0x%016" TME_PRIx64 ": 0x%04" TME_PRIx64),
TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
address,
TME_SPARC_FORMAT3_RD));
TME_SPARC_INSN_OK;
}
}
/* get the byte order of this transfer: */
endian_little = asi_mask_data & TME_SPARC64_ASI_FLAG_LITTLE;
if (__tme_predict_false(dtlb->tme_sparc_tlb_asi_mask & TME_SPARC64_ASI_FLAG_LITTLE)) {
if (TME_SPARC_MEMORY_FLAGS(ic) & TME_SPARC_MEMORY_FLAG_HAS_INVERT_ENDIAN) {
endian_little ^= TME_SPARC64_ASI_FLAG_LITTLE;
}
else {
assert (FALSE);
}
}
/* do the fast transfer: */
memory += address;
value16 = tme_memory_bus_read16((const tme_shared tme_uint16_t *) memory, dtlb->tme_sparc_tlb_bus_rwlock, sizeof(tme_uint16_t), sizeof(tme_uint64_t));
value16 = (endian_little ? tme_letoh_u16(value16) : tme_betoh_u16(value16));
/* possibly sign-extend the loaded value: */
value32 = value16;
if (TME_SPARC_INSN & TME_BIT(22)) {
value32 = (tme_uint32_t) (tme_int32_t) (tme_int16_t) value32;
}
TME_SPARC_FORMAT3_RD = (tme_uint64_t) (tme_int64_t) (tme_int32_t) value32;
/* unbusy the DTLB entry: */
tme_sparc_tlb_unbusy(dtlb);
/* log the value loaded: */
tme_sparc_recode_verify_mem_load(ic, &TME_SPARC_FORMAT3_RD);
tme_sparc_log(ic, 1000, TME_OK,
(TME_SPARC_LOG_HANDLE(ic),
_("ldha 0x%02x:0x%016" TME_PRIx64 ": 0x%04" TME_PRIx64),
TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
address,
TME_SPARC_FORMAT3_RD));
TME_SPARC_INSN_OK;
}
/* this does a sparc64 stha: */
TME_SPARC_FORMAT3(tme_sparc64_stha, tme_uint64_t)
{
tme_uint32_t asi_mask_data;
tme_uint64_t address;
tme_bus_context_t context;
tme_uint32_t asi_mask_flags_slow;
struct tme_sparc_tlb *dtlb;
tme_shared tme_uint8_t *memory;
tme_bus_context_t dtlb_context;
tme_uint32_t endian_little;
tme_uint16_t value16;
/* get the alternate ASI mask: */
asi_mask_data = _tme_sparc64_alternate_asi_mask(ic);
/* get the address: */
address = TME_SPARC_FORMAT3_RS1 + TME_SPARC_FORMAT3_RS2;
address &= ic->tme_sparc_address_mask;
#ifdef _TME_SPARC_STATS
/* track statistics: */
ic->tme_sparc_stats.tme_sparc_stats_memory_total++;
#endif /* _TME_SPARC_STATS */
/* verify and maybe replay this transfer: */
tme_sparc_recode_verify_mem(ic, &TME_SPARC_FORMAT3_RD,
asi_mask_data, address,
(TME_RECODE_SIZE_16
| TME_SPARC_RECODE_VERIFY_MEM_STORE));
if (tme_sparc_recode_verify_replay_last_pc(ic) != 0) {
TME_SPARC_INSN_OK;
}
/* log the value stored: */
tme_sparc_log(ic, 1000, TME_OK,
(TME_SPARC_LOG_HANDLE(ic),
_("stha 0x%02x:0x%016" TME_PRIx64 ": 0x%04" TME_PRIx16),
TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
address,
(tme_uint16_t) TME_SPARC_FORMAT3_RD));
/* get the context: */
context = ic->tme_sparc_memory_context_primary;
if (__tme_predict_false(asi_mask_data
& (TME_SPARC64_ASI_FLAG_SECONDARY
+ TME_SPARC64_ASI_MASK_FLAG_INSN_NUCLEUS))) {
if (asi_mask_data & TME_SPARC64_ASI_FLAG_SECONDARY) {
context = ic->tme_sparc_memory_context_secondary;
}
else if (TME_SPARC_MEMORY_FLAGS(ic) & TME_SPARC_MEMORY_FLAG_HAS_NUCLEUS) {
context = 0;
}
}
/* assume that no DTLB ASI mask flags will require a slow store: */
asi_mask_flags_slow = 0;
/* a stha traps on no-fault addresses: */
asi_mask_flags_slow |= TME_SPARC64_ASI_FLAG_NO_FAULT;
/* if this stha is using a no-fault ASI: */
if (__tme_predict_false(asi_mask_data & TME_SPARC64_ASI_FLAG_NO_FAULT)) {
/* a stha with a no-fault ASI traps: */
asi_mask_flags_slow = 0 - (tme_uint32_t) 1;
}
/* get and busy the DTLB entry: */
dtlb = &ic->tme_sparc_tlbs[TME_SPARC_DTLB_ENTRY(ic, TME_SPARC_TLB_HASH(ic, context, address))];
tme_sparc_tlb_busy(dtlb);
/* assume that this DTLB applies and allows fast transfers: */
memory = dtlb->tme_sparc_tlb_emulator_off_write;
/* if this DTLB matches any context, it matches this context: */
dtlb_context = dtlb->tme_sparc_tlb_context;
if (dtlb_context > ic->tme_sparc_memory_context_max) {
dtlb_context = context;
}
/* we must call the slow store function if: */
if (__tme_predict_false(
/* the DTLB entry is invalid: */
tme_bus_tlb_is_invalid(&dtlb->tme_sparc_tlb_bus_tlb)
/* the DTLB entry does not match the context: */
|| dtlb_context != context
/* the DTLB entry does not cover the needed addresses: */
|| (address < (tme_bus_addr64_t) dtlb->tme_sparc_tlb_addr_first)
|| ((address + ((16 / 8) - 1)) > (tme_bus_addr64_t) dtlb->tme_sparc_tlb_addr_last)
/* the DTLB entry does not cover the needed address space: */
|| (!TME_SPARC_TLB_ASI_MASK_OK(dtlb, asi_mask_data))
/* the DTLB entry can't be used for a fast stha: */
|| (dtlb->tme_sparc_tlb_asi_mask & asi_mask_flags_slow) != 0
/* the DTLB entry does not allow fast transfers: */
|| (memory == TME_EMULATOR_OFF_UNDEF)
/* the address is misaligned: */
|| ((address % (16 / 8)) != 0)
)) {
/* call the slow store function: */
memory = tme_sparc64_ls(ic,
address,
&TME_SPARC_FORMAT3_RD,
(TME_SPARC_LSINFO_OP_ST
| TME_SPARC_LSINFO_ASI(TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF))
| TME_SPARC_LSINFO_A
| (16 / 8)));
/* if the slow store function did the transfer: */
if (__tme_predict_false(memory == TME_EMULATOR_OFF_UNDEF)) {
/* unbusy the TLB entry; */
tme_sparc_tlb_unbusy(dtlb);
TME_SPARC_INSN_OK;
}
}
/* get the byte order of this transfer: */
endian_little = asi_mask_data & TME_SPARC64_ASI_FLAG_LITTLE;
if (__tme_predict_false(dtlb->tme_sparc_tlb_asi_mask & TME_SPARC64_ASI_FLAG_LITTLE)) {
if (TME_SPARC_MEMORY_FLAGS(ic) & TME_SPARC_MEMORY_FLAG_HAS_INVERT_ENDIAN) {
endian_little ^= TME_SPARC64_ASI_FLAG_LITTLE;
}
else {
assert (FALSE);
}
}
/* do the fast transfer: */
memory += address;
value16 = TME_SPARC_FORMAT3_RD;
value16 = (endian_little ? tme_htole_u16(value16) : tme_htobe_u16(value16));
tme_memory_bus_write16((tme_shared tme_uint16_t *) memory, value16, dtlb->tme_sparc_tlb_bus_rwlock, sizeof(tme_uint16_t), sizeof(tme_uint64_t));
/* unbusy the DTLB entry: */
tme_sparc_tlb_unbusy(dtlb);
TME_SPARC_INSN_OK;
}
/* this does a sparc64 lda: */
TME_SPARC_FORMAT3(tme_sparc64_lda, tme_uint64_t)
{
tme_uint32_t asi_mask_data;
tme_uint64_t address;
tme_bus_context_t context;
tme_uint32_t asi_mask_flags_slow;
struct tme_sparc_tlb *dtlb;
const tme_shared tme_uint8_t *memory;
tme_bus_context_t dtlb_context;
tme_uint32_t endian_little;
tme_uint32_t value32;
tme_uint64_t value64;
/* get the alternate ASI mask: */
asi_mask_data = _tme_sparc64_alternate_asi_mask(ic);
/* get the address: */
address = TME_SPARC_FORMAT3_RS1 + TME_SPARC_FORMAT3_RS2;
address &= ic->tme_sparc_address_mask;
#ifdef _TME_SPARC_STATS
/* track statistics: */
ic->tme_sparc_stats.tme_sparc_stats_memory_total++;
#endif /* _TME_SPARC_STATS */
/* verify and maybe replay this transfer: */
tme_sparc_recode_verify_mem(ic, &TME_SPARC_FORMAT3_RD,
asi_mask_data, address,
(TME_RECODE_SIZE_32
| TME_SPARC_RECODE_VERIFY_MEM_LOAD));
if (tme_sparc_recode_verify_replay_last_pc(ic) != 0) {
TME_SPARC_INSN_OK;
}
/* get the context: */
context = ic->tme_sparc_memory_context_primary;
if (__tme_predict_false(asi_mask_data
& (TME_SPARC64_ASI_FLAG_SECONDARY
+ TME_SPARC64_ASI_MASK_FLAG_INSN_NUCLEUS))) {
if (asi_mask_data & TME_SPARC64_ASI_FLAG_SECONDARY) {
context = ic->tme_sparc_memory_context_secondary;
}
else if (TME_SPARC_MEMORY_FLAGS(ic) & TME_SPARC_MEMORY_FLAG_HAS_NUCLEUS) {
context = 0;
}
}
/* assume that no DTLB ASI mask flags will require a slow load: */
asi_mask_flags_slow = 0;
/* a lda without a no-fault ASI traps on no-fault addresses: */
asi_mask_flags_slow |= TME_SPARC64_ASI_FLAG_NO_FAULT;
/* if this lda is using a no-fault ASI: */
if (__tme_predict_false(asi_mask_data & TME_SPARC64_ASI_FLAG_NO_FAULT)) {
/* a lda with a no-fault ASI traps on addresses with side-effects: */
asi_mask_flags_slow = TME_SPARC64_ASI_MASK_FLAG_TLB_SIDE_EFFECTS;
}
/* get and busy the DTLB entry: */
dtlb = &ic->tme_sparc_tlbs[TME_SPARC_DTLB_ENTRY(ic, TME_SPARC_TLB_HASH(ic, context, address))];
tme_sparc_tlb_busy(dtlb);
/* assume that this DTLB applies and allows fast transfers: */
memory = dtlb->tme_sparc_tlb_emulator_off_read;
/* if this DTLB matches any context, it matches this context: */
dtlb_context = dtlb->tme_sparc_tlb_context;
if (dtlb_context > ic->tme_sparc_memory_context_max) {
dtlb_context = context;
}
/* we must call the slow load function if: */
if (__tme_predict_false(
/* the DTLB entry is invalid: */
tme_bus_tlb_is_invalid(&dtlb->tme_sparc_tlb_bus_tlb)
/* the DTLB entry does not match the context: */
|| dtlb_context != context
/* the DTLB entry does not cover the needed addresses: */
|| (address < (tme_bus_addr64_t) dtlb->tme_sparc_tlb_addr_first)
|| ((address + ((32 / 8) - 1)) > (tme_bus_addr64_t) dtlb->tme_sparc_tlb_addr_last)
/* the DTLB entry does not cover the needed address space: */
|| (!TME_SPARC_TLB_ASI_MASK_OK(dtlb, asi_mask_data))
/* the DTLB entry can't be used for a fast lda: */
|| (dtlb->tme_sparc_tlb_asi_mask & asi_mask_flags_slow) != 0
/* the DTLB entry does not allow fast transfers: */
|| (memory == TME_EMULATOR_OFF_UNDEF)
/* the address is misaligned: */
|| ((address % (32 / 8)) != 0)
)) {
/* call the slow load function: */
memory = tme_sparc64_ls(ic,
address,
&TME_SPARC_FORMAT3_RD,
(TME_SPARC_LSINFO_OP_LD
| TME_SPARC_LSINFO_ASI(TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF))
| TME_SPARC_LSINFO_A
| (32 / 8)));
/* if the slow load function did the transfer: */
if (__tme_predict_false(memory == TME_EMULATOR_OFF_UNDEF)) {
/* unbusy the TLB entry; */
tme_sparc_tlb_unbusy(dtlb);
/* log the value loaded: */
tme_sparc_recode_verify_mem_load(ic, &TME_SPARC_FORMAT3_RD);
tme_sparc_log(ic, 1000, TME_OK,
(TME_SPARC_LOG_HANDLE(ic),
_("lda 0x%02x:0x%016" TME_PRIx64 ": 0x%08" TME_PRIx64),
TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
address,
TME_SPARC_FORMAT3_RD));
TME_SPARC_INSN_OK;
}
}
/* get the byte order of this transfer: */
endian_little = asi_mask_data & TME_SPARC64_ASI_FLAG_LITTLE;
if (__tme_predict_false(dtlb->tme_sparc_tlb_asi_mask & TME_SPARC64_ASI_FLAG_LITTLE)) {
if (TME_SPARC_MEMORY_FLAGS(ic) & TME_SPARC_MEMORY_FLAG_HAS_INVERT_ENDIAN) {
endian_little ^= TME_SPARC64_ASI_FLAG_LITTLE;
}
else {
assert (FALSE);
}
}
/* do the fast transfer: */
memory += address;
value32 = tme_memory_bus_read32((const tme_shared tme_uint32_t *) memory, dtlb->tme_sparc_tlb_bus_rwlock, sizeof(tme_uint32_t), sizeof(tme_uint64_t));
value32 = (endian_little ? tme_letoh_u32(value32) : tme_betoh_u32(value32));
/* possibly sign-extend the loaded value: */
value64 = value32;
if (TME_SPARC_INSN & TME_BIT(22)) {
value64 = (tme_uint64_t) (tme_int64_t) (tme_int32_t) value64;
}
TME_SPARC_FORMAT3_RD = (tme_uint64_t) (tme_int64_t) (tme_int64_t) value64;
/* unbusy the DTLB entry: */
tme_sparc_tlb_unbusy(dtlb);
/* log the value loaded: */
tme_sparc_recode_verify_mem_load(ic, &TME_SPARC_FORMAT3_RD);
tme_sparc_log(ic, 1000, TME_OK,
(TME_SPARC_LOG_HANDLE(ic),
_("lda 0x%02x:0x%016" TME_PRIx64 ": 0x%08" TME_PRIx64),
TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
address,
TME_SPARC_FORMAT3_RD));
TME_SPARC_INSN_OK;
}
/* this does a sparc64 sta: */
TME_SPARC_FORMAT3(tme_sparc64_sta, tme_uint64_t)
{
tme_uint32_t asi_mask_data;
tme_uint64_t address;
tme_bus_context_t context;
tme_uint32_t asi_mask_flags_slow;
struct tme_sparc_tlb *dtlb;
tme_shared tme_uint8_t *memory;
tme_bus_context_t dtlb_context;
tme_uint32_t endian_little;
tme_uint32_t value32;
/* get the alternate ASI mask: */
asi_mask_data = _tme_sparc64_alternate_asi_mask(ic);
/* get the address: */
address = TME_SPARC_FORMAT3_RS1 + TME_SPARC_FORMAT3_RS2;
address &= ic->tme_sparc_address_mask;
#ifdef _TME_SPARC_STATS
/* track statistics: */
ic->tme_sparc_stats.tme_sparc_stats_memory_total++;
#endif /* _TME_SPARC_STATS */
/* verify and maybe replay this transfer: */
tme_sparc_recode_verify_mem(ic, &TME_SPARC_FORMAT3_RD,
asi_mask_data, address,
(TME_RECODE_SIZE_32
| TME_SPARC_RECODE_VERIFY_MEM_STORE));
if (tme_sparc_recode_verify_replay_last_pc(ic) != 0) {
TME_SPARC_INSN_OK;
}
/* log the value stored: */
tme_sparc_log(ic, 1000, TME_OK,
(TME_SPARC_LOG_HANDLE(ic),
_("sta 0x%02x:0x%016" TME_PRIx64 ": 0x%08" TME_PRIx32),
TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
address,
(tme_uint32_t) TME_SPARC_FORMAT3_RD));
/* get the context: */
context = ic->tme_sparc_memory_context_primary;
if (__tme_predict_false(asi_mask_data
& (TME_SPARC64_ASI_FLAG_SECONDARY
+ TME_SPARC64_ASI_MASK_FLAG_INSN_NUCLEUS))) {
if (asi_mask_data & TME_SPARC64_ASI_FLAG_SECONDARY) {
context = ic->tme_sparc_memory_context_secondary;
}
else if (TME_SPARC_MEMORY_FLAGS(ic) & TME_SPARC_MEMORY_FLAG_HAS_NUCLEUS) {
context = 0;
}
}
/* assume that no DTLB ASI mask flags will require a slow store: */
asi_mask_flags_slow = 0;
/* a sta traps on no-fault addresses: */
asi_mask_flags_slow |= TME_SPARC64_ASI_FLAG_NO_FAULT;
/* if this sta is using a no-fault ASI: */
if (__tme_predict_false(asi_mask_data & TME_SPARC64_ASI_FLAG_NO_FAULT)) {
/* a sta with a no-fault ASI traps: */
asi_mask_flags_slow = 0 - (tme_uint32_t) 1;
}
/* get and busy the DTLB entry: */
dtlb = &ic->tme_sparc_tlbs[TME_SPARC_DTLB_ENTRY(ic, TME_SPARC_TLB_HASH(ic, context, address))];
tme_sparc_tlb_busy(dtlb);
/* assume that this DTLB applies and allows fast transfers: */
memory = dtlb->tme_sparc_tlb_emulator_off_write;
/* if this DTLB matches any context, it matches this context: */
dtlb_context = dtlb->tme_sparc_tlb_context;
if (dtlb_context > ic->tme_sparc_memory_context_max) {
dtlb_context = context;
}
/* we must call the slow store function if: */
if (__tme_predict_false(
/* the DTLB entry is invalid: */
tme_bus_tlb_is_invalid(&dtlb->tme_sparc_tlb_bus_tlb)
/* the DTLB entry does not match the context: */
|| dtlb_context != context
/* the DTLB entry does not cover the needed addresses: */
|| (address < (tme_bus_addr64_t) dtlb->tme_sparc_tlb_addr_first)
|| ((address + ((32 / 8) - 1)) > (tme_bus_addr64_t) dtlb->tme_sparc_tlb_addr_last)
/* the DTLB entry does not cover the needed address space: */
|| (!TME_SPARC_TLB_ASI_MASK_OK(dtlb, asi_mask_data))
/* the DTLB entry can't be used for a fast sta: */
|| (dtlb->tme_sparc_tlb_asi_mask & asi_mask_flags_slow) != 0
/* the DTLB entry does not allow fast transfers: */
|| (memory == TME_EMULATOR_OFF_UNDEF)
/* the address is misaligned: */
|| ((address % (32 / 8)) != 0)
)) {
/* call the slow store function: */
memory = tme_sparc64_ls(ic,
address,
&TME_SPARC_FORMAT3_RD,
(TME_SPARC_LSINFO_OP_ST
| TME_SPARC_LSINFO_ASI(TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF))
| TME_SPARC_LSINFO_A
| (32 / 8)));
/* if the slow store function did the transfer: */
if (__tme_predict_false(memory == TME_EMULATOR_OFF_UNDEF)) {
/* unbusy the TLB entry; */
tme_sparc_tlb_unbusy(dtlb);
TME_SPARC_INSN_OK;
}
}
/* get the byte order of this transfer: */
endian_little = asi_mask_data & TME_SPARC64_ASI_FLAG_LITTLE;
if (__tme_predict_false(dtlb->tme_sparc_tlb_asi_mask & TME_SPARC64_ASI_FLAG_LITTLE)) {
if (TME_SPARC_MEMORY_FLAGS(ic) & TME_SPARC_MEMORY_FLAG_HAS_INVERT_ENDIAN) {
endian_little ^= TME_SPARC64_ASI_FLAG_LITTLE;
}
else {
assert (FALSE);
}
}
/* do the fast transfer: */
memory += address;
value32 = TME_SPARC_FORMAT3_RD;
value32 = (endian_little ? tme_htole_u32(value32) : tme_htobe_u32(value32));
tme_memory_bus_write32((tme_shared tme_uint32_t *) memory, value32, dtlb->tme_sparc_tlb_bus_rwlock, sizeof(tme_uint32_t), sizeof(tme_uint64_t));
/* unbusy the DTLB entry: */
tme_sparc_tlb_unbusy(dtlb);
TME_SPARC_INSN_OK;
}
/* this does a sparc64 ldda: */
TME_SPARC_FORMAT3(tme_sparc64_ldda, tme_uint64_t)
{
tme_uint32_t asi_mask_data;
tme_uint64_t address;
tme_bus_context_t context;
tme_uint32_t asi_mask_flags_slow;
struct tme_sparc_tlb *dtlb;
const tme_shared tme_uint8_t *memory;
tme_bus_context_t dtlb_context;
tme_uint32_t endian_little;
tme_uint32_t value32;
/* get the alternate ASI mask: */
asi_mask_data = _tme_sparc64_alternate_asi_mask(ic);
/* get the address: */
address = TME_SPARC_FORMAT3_RS1 + TME_SPARC_FORMAT3_RS2;
address &= ic->tme_sparc_address_mask;
#ifdef _TME_SPARC_STATS
/* track statistics: */
ic->tme_sparc_stats.tme_sparc_stats_memory_total++;
#endif /* _TME_SPARC_STATS */
/* verify and maybe replay this transfer: */
tme_sparc_recode_verify_mem(ic, &TME_SPARC_FORMAT3_RD,
asi_mask_data, address,
(TME_RECODE_SIZE_32
| TME_SPARC_RECODE_VERIFY_MEM_LOAD));
tme_sparc_recode_verify_mem(ic, &TME_SPARC_FORMAT3_RD_ODD(tme_ic_ireg_uint64),
asi_mask_data, address + sizeof(tme_uint32_t),
(TME_RECODE_SIZE_32
| TME_SPARC_RECODE_VERIFY_MEM_LOAD));
if (tme_sparc_recode_verify_replay_last_pc(ic) != 0) {
TME_SPARC_INSN_OK;
}
/* get the context: */
context = ic->tme_sparc_memory_context_primary;
if (__tme_predict_false(asi_mask_data
& (TME_SPARC64_ASI_FLAG_SECONDARY
+ TME_SPARC64_ASI_MASK_FLAG_INSN_NUCLEUS))) {
if (asi_mask_data & TME_SPARC64_ASI_FLAG_SECONDARY) {
context = ic->tme_sparc_memory_context_secondary;
}
else if (TME_SPARC_MEMORY_FLAGS(ic) & TME_SPARC_MEMORY_FLAG_HAS_NUCLEUS) {
context = 0;
}
}
/* assume that no DTLB ASI mask flags will require a slow load: */
asi_mask_flags_slow = 0;
/* a ldda without a no-fault ASI traps on no-fault addresses: */
asi_mask_flags_slow |= TME_SPARC64_ASI_FLAG_NO_FAULT;
/* if this ldda is using a no-fault ASI: */
if (__tme_predict_false(asi_mask_data & TME_SPARC64_ASI_FLAG_NO_FAULT)) {
/* a ldda with a no-fault ASI traps on addresses with side-effects: */
asi_mask_flags_slow = TME_SPARC64_ASI_MASK_FLAG_TLB_SIDE_EFFECTS;
}
/* get and busy the DTLB entry: */
dtlb = &ic->tme_sparc_tlbs[TME_SPARC_DTLB_ENTRY(ic, TME_SPARC_TLB_HASH(ic, context, address))];
tme_sparc_tlb_busy(dtlb);
/* assume that this DTLB applies and allows fast transfers: */
memory = dtlb->tme_sparc_tlb_emulator_off_read;
/* if this DTLB matches any context, it matches this context: */
dtlb_context = dtlb->tme_sparc_tlb_context;
if (dtlb_context > ic->tme_sparc_memory_context_max) {
dtlb_context = context;
}
/* we must call the slow load function if: */
if (__tme_predict_false(
/* the DTLB entry is invalid: */
tme_bus_tlb_is_invalid(&dtlb->tme_sparc_tlb_bus_tlb)
/* the DTLB entry does not match the context: */
|| dtlb_context != context
/* the DTLB entry does not cover the needed addresses: */
|| (address < (tme_bus_addr64_t) dtlb->tme_sparc_tlb_addr_first)
|| ((address + ((64 / 8) - 1)) > (tme_bus_addr64_t) dtlb->tme_sparc_tlb_addr_last)
/* the DTLB entry does not cover the needed address space: */
|| (!TME_SPARC_TLB_ASI_MASK_OK(dtlb, asi_mask_data))
/* the DTLB entry can't be used for a fast ldda: */
|| (dtlb->tme_sparc_tlb_asi_mask & asi_mask_flags_slow) != 0
/* the DTLB entry does not allow fast transfers: */
|| (memory == TME_EMULATOR_OFF_UNDEF)
/* the address is misaligned: */
|| ((address % (64 / 8)) != 0)
/* the destination register number is odd: */
|| ((TME_SPARC_INSN & TME_BIT(25)) != 0)
)) {
/* call the slow load function: */
memory = tme_sparc64_ls(ic,
address,
&TME_SPARC_FORMAT3_RD,
(TME_SPARC_LSINFO_OP_LD
| TME_SPARC_LSINFO_LDD_STD
| TME_SPARC_LSINFO_ASI(TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF))
| TME_SPARC_LSINFO_A
| (64 / 8)));
/* if the slow load function did the transfer: */
if (__tme_predict_false(memory == TME_EMULATOR_OFF_UNDEF)) {
/* unbusy the TLB entry; */
tme_sparc_tlb_unbusy(dtlb);
/* log the value loaded: */
tme_sparc_recode_verify_mem_load(ic, &TME_SPARC_FORMAT3_RD);
tme_sparc_recode_verify_mem_load(ic, &TME_SPARC_FORMAT3_RD_ODD(tme_ic_ireg_uint64));
tme_sparc_log(ic, 1000, TME_OK,
(TME_SPARC_LOG_HANDLE(ic),
_("ldda 0x%02x:0x%016" TME_PRIx64 ": 0x%016" TME_PRIx64 " 0x%016" TME_PRIx64 ""),
TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
address,
TME_SPARC_FORMAT3_RD,
TME_SPARC_FORMAT3_RD_ODD(tme_ic_ireg_uint64)));
TME_SPARC_INSN_OK;
}
}
/* get the byte order of this transfer: */
endian_little = asi_mask_data & TME_SPARC64_ASI_FLAG_LITTLE;
if (__tme_predict_false(dtlb->tme_sparc_tlb_asi_mask & TME_SPARC64_ASI_FLAG_LITTLE)) {
if (TME_SPARC_MEMORY_FLAGS(ic) & TME_SPARC_MEMORY_FLAG_HAS_INVERT_ENDIAN) {
endian_little ^= TME_SPARC64_ASI_FLAG_LITTLE;
}
else {
assert (FALSE);
}
}
/* do the fast transfer: */
memory += address;
value32 = tme_memory_bus_read32(((const tme_shared tme_uint32_t *) memory) + 0, dtlb->tme_sparc_tlb_bus_rwlock, sizeof(tme_uint32_t) * 2, sizeof(tme_uint64_t));
value32 = (endian_little ? tme_letoh_u32(value32) : tme_betoh_u32(value32));
TME_SPARC_FORMAT3_RD = value32;
value32 = tme_memory_bus_read32(((const tme_shared tme_uint32_t *) memory) + 1, dtlb->tme_sparc_tlb_bus_rwlock, sizeof(tme_uint32_t) * 1, sizeof(tme_uint64_t));
value32 = (endian_little ? tme_letoh_u32(value32) : tme_betoh_u32(value32));
TME_SPARC_FORMAT3_RD_ODD(tme_ic_ireg_uint64) = value32;
/* unbusy the DTLB entry: */
tme_sparc_tlb_unbusy(dtlb);
/* log the value loaded: */
tme_sparc_recode_verify_mem_load(ic, &TME_SPARC_FORMAT3_RD);
tme_sparc_recode_verify_mem_load(ic, &TME_SPARC_FORMAT3_RD_ODD(tme_ic_ireg_uint64));
tme_sparc_log(ic, 1000, TME_OK,
(TME_SPARC_LOG_HANDLE(ic),
_("ldda 0x%02x:0x%016" TME_PRIx64 ": 0x%016" TME_PRIx64 " 0x%016" TME_PRIx64 ""),
TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
address,
TME_SPARC_FORMAT3_RD,
TME_SPARC_FORMAT3_RD_ODD(tme_ic_ireg_uint64)));
TME_SPARC_INSN_OK;
}
/* this does a sparc64 stda: */
TME_SPARC_FORMAT3(tme_sparc64_stda, tme_uint64_t)
{
tme_uint32_t asi_mask_data;
tme_uint64_t address;
tme_bus_context_t context;
tme_uint32_t asi_mask_flags_slow;
struct tme_sparc_tlb *dtlb;
tme_shared tme_uint8_t *memory;
tme_bus_context_t dtlb_context;
tme_uint32_t endian_little;
tme_uint32_t value32;
/* get the alternate ASI mask: */
asi_mask_data = _tme_sparc64_alternate_asi_mask(ic);
/* get the address: */
address = TME_SPARC_FORMAT3_RS1 + TME_SPARC_FORMAT3_RS2;
address &= ic->tme_sparc_address_mask;
#ifdef _TME_SPARC_STATS
/* track statistics: */
ic->tme_sparc_stats.tme_sparc_stats_memory_total++;
#endif /* _TME_SPARC_STATS */
/* verify and maybe replay this transfer: */
tme_sparc_recode_verify_mem(ic, &TME_SPARC_FORMAT3_RD,
asi_mask_data, address,
(TME_RECODE_SIZE_32
| TME_SPARC_RECODE_VERIFY_MEM_STORE));
tme_sparc_recode_verify_mem(ic, &TME_SPARC_FORMAT3_RD_ODD(tme_ic_ireg_uint64),
asi_mask_data, address + sizeof(tme_uint32_t),
(TME_RECODE_SIZE_32
| TME_SPARC_RECODE_VERIFY_MEM_STORE));
if (tme_sparc_recode_verify_replay_last_pc(ic) != 0) {
TME_SPARC_INSN_OK;
}
/* log the values stored: */
tme_sparc_log(ic, 1000, TME_OK,
(TME_SPARC_LOG_HANDLE(ic),
_("stda 0x%02x:0x%016" TME_PRIx64 ": 0x%08" TME_PRIx32 " 0x%08" TME_PRIx32),
TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
address,
(tme_uint32_t) TME_SPARC_FORMAT3_RD,
(tme_uint32_t) TME_SPARC_FORMAT3_RD_ODD(tme_ic_ireg_uint64)));
/* get the context: */
context = ic->tme_sparc_memory_context_primary;
if (__tme_predict_false(asi_mask_data
& (TME_SPARC64_ASI_FLAG_SECONDARY
+ TME_SPARC64_ASI_MASK_FLAG_INSN_NUCLEUS))) {
if (asi_mask_data & TME_SPARC64_ASI_FLAG_SECONDARY) {
context = ic->tme_sparc_memory_context_secondary;
}
else if (TME_SPARC_MEMORY_FLAGS(ic) & TME_SPARC_MEMORY_FLAG_HAS_NUCLEUS) {
context = 0;
}
}
/* assume that no DTLB ASI mask flags will require a slow store: */
asi_mask_flags_slow = 0;
/* a stda traps on no-fault addresses: */
asi_mask_flags_slow |= TME_SPARC64_ASI_FLAG_NO_FAULT;
/* if this stda is using a no-fault ASI: */
if (__tme_predict_false(asi_mask_data & TME_SPARC64_ASI_FLAG_NO_FAULT)) {
/* a stda with a no-fault ASI traps: */
asi_mask_flags_slow = 0 - (tme_uint32_t) 1;
}
/* get and busy the DTLB entry: */
dtlb = &ic->tme_sparc_tlbs[TME_SPARC_DTLB_ENTRY(ic, TME_SPARC_TLB_HASH(ic, context, address))];
tme_sparc_tlb_busy(dtlb);
/* assume that this DTLB applies and allows fast transfers: */
memory = dtlb->tme_sparc_tlb_emulator_off_write;
/* if this DTLB matches any context, it matches this context: */
dtlb_context = dtlb->tme_sparc_tlb_context;
if (dtlb_context > ic->tme_sparc_memory_context_max) {
dtlb_context = context;
}
/* we must call the slow store function if: */
if (__tme_predict_false(
/* the DTLB entry is invalid: */
tme_bus_tlb_is_invalid(&dtlb->tme_sparc_tlb_bus_tlb)
/* the DTLB entry does not match the context: */
|| dtlb_context != context
/* the DTLB entry does not cover the needed addresses: */
|| (address < (tme_bus_addr64_t) dtlb->tme_sparc_tlb_addr_first)
|| ((address + ((64 / 8) - 1)) > (tme_bus_addr64_t) dtlb->tme_sparc_tlb_addr_last)
/* the DTLB entry does not cover the needed address space: */
|| (!TME_SPARC_TLB_ASI_MASK_OK(dtlb, asi_mask_data))
/* the DTLB entry can't be used for a fast stda: */
|| (dtlb->tme_sparc_tlb_asi_mask & asi_mask_flags_slow) != 0
/* the DTLB entry does not allow fast transfers: */
|| (memory == TME_EMULATOR_OFF_UNDEF)
/* the address is misaligned: */
|| ((address % (64 / 8)) != 0)
/* the destination register number is odd: */
|| ((TME_SPARC_INSN & TME_BIT(25)) != 0)
)) {
/* call the slow store function: */
memory = tme_sparc64_ls(ic,
address,
&TME_SPARC_FORMAT3_RD,
(TME_SPARC_LSINFO_OP_ST
| TME_SPARC_LSINFO_LDD_STD
| TME_SPARC_LSINFO_ASI(TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF))
| TME_SPARC_LSINFO_A
| (64 / 8)));
/* if the slow store function did the transfer: */
if (__tme_predict_false(memory == TME_EMULATOR_OFF_UNDEF)) {
/* unbusy the TLB entry; */
tme_sparc_tlb_unbusy(dtlb);
TME_SPARC_INSN_OK;
}
}
/* get the byte order of this transfer: */
endian_little = asi_mask_data & TME_SPARC64_ASI_FLAG_LITTLE;
if (__tme_predict_false(dtlb->tme_sparc_tlb_asi_mask & TME_SPARC64_ASI_FLAG_LITTLE)) {
if (TME_SPARC_MEMORY_FLAGS(ic) & TME_SPARC_MEMORY_FLAG_HAS_INVERT_ENDIAN) {
endian_little ^= TME_SPARC64_ASI_FLAG_LITTLE;
}
else {
assert (FALSE);
}
}
/* do the fast transfer: */
memory += address;
value32 = TME_SPARC_FORMAT3_RD;
value32 = (endian_little ? tme_htole_u32(value32) : tme_htobe_u32(value32));
tme_memory_bus_write32(((tme_shared tme_uint32_t *) memory) + 0, value32, dtlb->tme_sparc_tlb_bus_rwlock, sizeof(tme_uint32_t) * 2, sizeof(tme_uint64_t));
value32 = TME_SPARC_FORMAT3_RD_ODD(tme_ic_ireg_uint64);
value32 = (endian_little ? tme_htole_u32(value32) : tme_htobe_u32(value32));
tme_memory_bus_write32(((tme_shared tme_uint32_t *) memory) + 1, value32, dtlb->tme_sparc_tlb_bus_rwlock, sizeof(tme_uint32_t) * 1, sizeof(tme_uint64_t));
/* unbusy the DTLB entry: */
tme_sparc_tlb_unbusy(dtlb);
TME_SPARC_INSN_OK;
}
/* this does a sparc64 jmpl: */
TME_SPARC_FORMAT3(tme_sparc64_jmpl, tme_uint64_t)
{
tme_uint64_t pc_next_next;
tme_uint32_t ls_faults;
/* "The JMPL instruction causes a register-indirect delayed control
transfer to the address given by r[rs1] + r[rs2] if the i field is
zero, or r[rs1] + sign_ext(simm13) if the i field is one. The JMPL
instruction copies the PC, which contains the address of the JMPL
instruction, into register r[rd]. If either of the low-order two
bits of the jump address is nonzero, a mem_address_not_aligned
trap occurs." */
/* get the target address: */
pc_next_next = TME_SPARC_FORMAT3_RS1 + TME_SPARC_FORMAT3_RS2;
pc_next_next &= ic->tme_sparc_address_mask;
/* set the delayed control transfer: */
ic->tme_sparc_ireg_uint64(TME_SPARC_IREG_PC_NEXT_NEXT) = pc_next_next;
/* check the target address: */
ls_faults = TME_SPARC_LS_FAULT_NONE;
if (__tme_predict_false((pc_next_next
+ ic->tme_sparc64_ireg_va_hole_start)
> ((ic->tme_sparc64_ireg_va_hole_start * 2) - 1))) {
ls_faults += TME_SPARC64_LS_FAULT_VA_RANGE_NNPC;
}
if (__tme_predict_false((pc_next_next % sizeof(tme_uint32_t)) != 0)) {
ls_faults += TME_SPARC_LS_FAULT_ADDRESS_NOT_ALIGNED;
}
if (__tme_predict_false(ls_faults != TME_SPARC_LS_FAULT_NONE)) {
tme_sparc_nnpc_trap(ic, ls_faults);
}
/* write the PC of the jmpl into r[rd]: */
TME_SPARC_FORMAT3_RD = ic->tme_sparc_ireg_uint64(TME_SPARC_IREG_PC);
/* log an indirect call instruction, which has 15 (%o7) for rd: */
if (TME_FIELD_MASK_EXTRACTU(TME_SPARC_INSN, TME_SPARC_FORMAT3_MASK_RD) == 15) {
tme_sparc_log(ic, 250, TME_OK,
(TME_SPARC_LOG_HANDLE(ic),
_("call 0x%016" TME_PRIx64),
pc_next_next));
}
/* log a ret or retl instruction, which has 0 (%g0) for rd,
either 31 (%i7) or 15 (%o7) for rs1, and 8 for simm13: */
else if ((TME_SPARC_INSN | (16 << 14))
== ((tme_uint32_t) (0x2 << 30) | (0 << 25) | (0x38 << 19) | (31 << 14) | (0x1 << 13) | 8)) {
tme_sparc_log(ic, 250, TME_OK,
(TME_SPARC_LOG_HANDLE(ic),
_("retl 0x%016" TME_PRIx64),
pc_next_next));
}
TME_SPARC_INSN_OK;
}
/* this does a sparc64 ldf: */
TME_SPARC_FORMAT3(tme_sparc64_ldf, tme_uint64_t)
{
tme_uint32_t misaligned;
struct tme_float float_buffer;
struct tme_float *fpreg;
/* get the least significant 32 bits of the address: */
misaligned = TME_SPARC_FORMAT3_RS1;
misaligned += (tme_uint32_t) TME_SPARC_FORMAT3_RS2;
/* decode rd: */
float_buffer.tme_float_format = TME_FLOAT_FORMAT_IEEE754_SINGLE;
fpreg
= _tme_sparc64_fpu_mem_fpreg(ic,
misaligned,
&float_buffer);
/* do the load: */
tme_sparc64_ld(ic, _rs1, _rs2, &ic->tme_sparc_ireg_uint64(TME_SPARC_IREG_FPX));
/* set the floating-point register value: */
assert (fpreg != &float_buffer);
fpreg->tme_float_format = TME_FLOAT_FORMAT_IEEE754_SINGLE;
fpreg->tme_float_value_ieee754_single
= ic->tme_sparc_ireg_uint32(TME_SPARC_IREG_FPX << 1);
TME_SPARC_INSN_OK;
}
/* this does a sparc64 lddf: */
TME_SPARC_FORMAT3(tme_sparc64_lddf, tme_uint64_t)
{
tme_uint64_t address;
tme_uint32_t misaligned;
struct tme_float float_buffer;
struct tme_float *fpreg;
tme_uint64_t offset;
/* get the address: */
address = TME_SPARC_FORMAT3_RS1 + TME_SPARC_FORMAT3_RS2;
/* get the least significant 32 bits of the address: */
misaligned = address;
/* decode rd: */
float_buffer.tme_float_format = TME_FLOAT_FORMAT_IEEE754_DOUBLE;
fpreg
= _tme_sparc64_fpu_mem_fpreg(ic,
misaligned,
&float_buffer);
/* if bit two of the address is set, and this SPARC supports
32-bit-aligned lddf instructions: */
if ((misaligned & sizeof(tme_uint32_t))
&& fpreg != &float_buffer) {
/* do two 32-bit loads: */
offset = sizeof(tme_uint32_t) * 0;
tme_sparc64_ld(ic, &address, &offset, &ic->tme_sparc_ireg_uint64(TME_SPARC_IREG_FPX + 0));
offset = sizeof(tme_uint32_t) * 1;
tme_sparc64_ld(ic, &address, &offset, &ic->tme_sparc_ireg_uint64(TME_SPARC_IREG_FPX + 1));
/* set the double floating-point register value: */
fpreg->tme_float_format = TME_FLOAT_FORMAT_IEEE754_DOUBLE;
fpreg->tme_float_value_ieee754_double.tme_value64_uint32_hi
= ic->tme_sparc_ireg_uint64(TME_SPARC_IREG_FPX + 0);
fpreg->tme_float_value_ieee754_double.tme_value64_uint32_lo
= ic->tme_sparc_ireg_uint64(TME_SPARC_IREG_FPX + 1);
}
/* otherwise, bit two of the address is not set, or this SPARC
doesn't support 32-bit-aligned lddf instructions: */
else {
/* do an ldx-style load: */
tme_sparc64_ldx(ic, _rs1, _rs2, &ic->tme_sparc_ireg_uint64(TME_SPARC_IREG_FPX));
/* set the double floating-point register value: */
assert (fpreg != &float_buffer);
fpreg->tme_float_format = TME_FLOAT_FORMAT_IEEE754_DOUBLE;
fpreg->tme_float_value_ieee754_double.tme_value64_uint
= ic->tme_sparc_ireg_uint64(TME_SPARC_IREG_FPX);
}
TME_SPARC_INSN_OK;
}
/* this does a sparc64 ldfsr: */
TME_SPARC_FORMAT3(tme_sparc64_ldfsr, tme_uint64_t)
{
tme_uint32_t fsr;
tme_uint32_t reg_rd;
/* see if this is an ldfsr or an ldxfsr: */
reg_rd = TME_FIELD_MASK_EXTRACTU(TME_SPARC_INSN, TME_SPARC_FORMAT3_MASK_RD);
if (__tme_predict_false(reg_rd > 1)) {
TME_SPARC_INSN_ILL(ic);
}
_tme_sparc64_fpu_mem(ic);
/* if this is an ldxfsr: */
if (reg_rd == 1) {
/* do the load: */
tme_sparc64_ldx(ic, _rs1, _rs2, &ic->tme_sparc_ireg_uint64(TME_SPARC_IREG_FPX));
/* update the extended FSR: */
ic->tme_sparc_fpu_xfsr
= (ic->tme_sparc_ireg_uint32((TME_SPARC_IREG_FPX << 1) + 1)
& 0x3f /* fcc3 .. fcc1 */);
}
/* otherwise, this is an ldfsr. do the load: */
else
tme_sparc64_ld(ic, _rs1, _rs2, &ic->tme_sparc_ireg_uint64(TME_SPARC_IREG_FPX));
/* update the FSR: */
fsr = ic->tme_sparc_ireg_uint32(TME_SPARC_IREG_FPX << 1);
/* "An LDFSR instruction does not affect ftt." */
/* "The LDFSR instruction does not affect qne." */
fsr &= ~(TME_SPARC_FSR_VER | TME_SPARC_FSR_FTT | TME_SPARC_FSR_QNE);
ic->tme_sparc_fpu_fsr = (ic->tme_sparc_fpu_fsr & (TME_SPARC_FSR_VER | TME_SPARC_FSR_FTT | TME_SPARC_FSR_QNE)) | fsr;
TME_SPARC_INSN_OK;
}
/* this does a sparc64 stf: */
TME_SPARC_FORMAT3(tme_sparc64_stf, tme_uint64_t)
{
tme_uint32_t misaligned;
struct tme_float float_buffer;
const struct tme_float *fpreg;
const tme_uint32_t *value_single;
/* get the least significant 32 bits of the address: */
misaligned = TME_SPARC_FORMAT3_RS1;
misaligned += (tme_uint32_t) TME_SPARC_FORMAT3_RS2;
/* decode rd: */
float_buffer.tme_float_format = TME_FLOAT_FORMAT_IEEE754_SINGLE;
fpreg
= _tme_sparc64_fpu_mem_fpreg(ic,
misaligned,
&float_buffer);
/* get this single floating-point register in IEEE754 single-precision format: */
value_single = tme_ieee754_single_value_get(fpreg, &float_buffer.tme_float_value_ieee754_single);
/* set the floating-point register value: */
ic->tme_sparc_ireg_uint32(TME_SPARC_IREG_FPX << 1) = *value_single;
/* do the store: */
tme_sparc64_st(ic, _rs1, _rs2, &ic->tme_sparc_ireg_uint64(TME_SPARC_IREG_FPX));
assert (fpreg != &float_buffer);
TME_SPARC_INSN_OK;
}
/* this does a sparc64 stdf: */
TME_SPARC_FORMAT3(tme_sparc64_stdf, tme_uint64_t)
{
tme_uint64_t address;
tme_uint32_t misaligned;
struct tme_float float_buffer;
struct tme_float *fpreg;
const union tme_value64 *value_double;
tme_uint64_t offset;
/* get the address: */
address = TME_SPARC_FORMAT3_RS1 + TME_SPARC_FORMAT3_RS2;
/* get the least significant 32 bits of the address: */
misaligned = address;
/* decode rd: */
float_buffer.tme_float_format = TME_FLOAT_FORMAT_IEEE754_DOUBLE;
fpreg
= _tme_sparc64_fpu_mem_fpreg(ic,
misaligned,
&float_buffer);
/* get this double floating-point register in IEEE754 double-precision format: */
value_double = tme_ieee754_double_value_get(fpreg, &float_buffer.tme_float_value_ieee754_double);
/* if bit two of the address is set, and this SPARC supports
32-bit-aligned stdf instructions: */
if ((misaligned & sizeof(tme_uint32_t))
&& fpreg != &float_buffer) {
/* set the floating-point register value: */
ic->tme_sparc_ireg_uint64(TME_SPARC_IREG_FPX + 0)
= value_double->tme_value64_uint32_hi;
ic->tme_sparc_ireg_uint32(TME_SPARC_IREG_FPX + 1)
= value_double->tme_value64_uint32_lo;
/* do two 32-bit stores: */
offset = sizeof(tme_uint32_t) * 0;
tme_sparc64_st(ic, &address, &offset, &ic->tme_sparc_ireg_uint64(TME_SPARC_IREG_FPX + 0));
offset = sizeof(tme_uint32_t) * 1;
tme_sparc64_st(ic, &address, &offset, &ic->tme_sparc_ireg_uint64(TME_SPARC_IREG_FPX + 1));
}
/* otherwise, bit two of the address is not set, or this SPARC
doesn't support 32-bit-aligned stdf instructions: */
else {
/* set the floating-point register value: */
ic->tme_sparc_ireg_uint64(TME_SPARC_IREG_FPX)
= value_double->tme_value64_uint;
/* do an stx-style store: */
tme_sparc64_stx(ic, _rs1, _rs2, &ic->tme_sparc_ireg_uint64(TME_SPARC_IREG_FPX));
}
assert (fpreg != &float_buffer);
TME_SPARC_INSN_OK;
}
/* this does a sparc64 stfsr: */
TME_SPARC_FORMAT3(tme_sparc64_stfsr, tme_uint64_t)
{
tme_uint32_t reg_rd;
/* see if this is an stfsr or an stxfsr: */
reg_rd = TME_FIELD_MASK_EXTRACTU(TME_SPARC_INSN, TME_SPARC_FORMAT3_MASK_RD);
if (__tme_predict_false(reg_rd > 1)) {
TME_SPARC_INSN_ILL(ic);
}
_tme_sparc64_fpu_mem(ic);
/* set the FSR value to store: */
ic->tme_sparc_ireg_uint32(TME_SPARC_IREG_FPX << 1) = ic->tme_sparc_fpu_fsr;
/* if this is an stxfsr: */
if (reg_rd == 1) {
/* set in the extended FSR to store and do the store: */
ic->tme_sparc_ireg_uint32((TME_SPARC_IREG_FPX << 1) + 1) = ic->tme_sparc_fpu_xfsr;
tme_sparc64_stx(ic, _rs1, _rs2, &ic->tme_sparc_ireg_uint64(TME_SPARC_IREG_FPX));
}
/* otherwise, this is a stfsr. do the store: */
else
tme_sparc64_st(ic, _rs1, _rs2, &ic->tme_sparc_ireg_uint64(TME_SPARC_IREG_FPX));
TME_SPARC_INSN_OK;
}
/* this does a sparc64 fpop1: */
TME_SPARC_FORMAT3(tme_sparc64_fpop1, tme_uint64_t)
{
TME_SPARC_INSN_FPU;
tme_sparc_fpu_fpop1(ic);
TME_SPARC_INSN_OK;
}
/* this does a sparc64 fpop2: */
TME_SPARC_FORMAT3(tme_sparc64_fpop2, tme_uint64_t)
{
TME_SPARC_INSN_FPU;
tme_sparc_fpu_fpop2(ic);
TME_SPARC_INSN_OK;
}
/* this does a sparc64 "mulscc SRC1, SRC2, DST": */
TME_SPARC_FORMAT3(tme_sparc64_mulscc, tme_uint64_t)
{
tme_uint32_t src1;
tme_uint32_t src2;
tme_uint32_t dst;
tme_uint32_t y;
tme_uint32_t cc;
/* get the operands: */
src1 = (tme_uint64_t) TME_SPARC_FORMAT3_RS1;
src2 = (tme_uint64_t) TME_SPARC_FORMAT3_RS2;
/* perform the operation: */
/* "(1) The multiplier is established as r[rs2] if the i field is zero, or
sign_ext(simm13) if the i field is one."
"(3) If the least significant bit of the Y register = 1, the shifted
value from step (2) is added to the multiplier. If the LSB of the
Y register = 0, then 0 is added to the shifted value from step (2)." */
y = ic->tme_sparc_ireg_uint32(TME_SPARC_IREG_Y << 1);
if ((y & 1) == 0) {
src2 = 0;
}
/* "(6) The Y register is shifted right by one bit, with the LSB of the
unshifted r[rs1] replacing the MSB of Y." */
y >>= 1;
if (src1 & 1) {
y += 0x80000000;
}
ic->tme_sparc_ireg_uint32(TME_SPARC_IREG_Y << 1) = y;
/* "(2) A 32-bit value is computed by shifting r[rs1] right by one
bit with (N xor V) from the PSR replacing the high-order bit.
(This is the proper sign for the previous partial product.)" */
src1 >>= 1;
if (((ic->tme_sparc64_ireg_ccr ^ (ic->tme_sparc64_ireg_ccr * (TME_SPARC64_CCR_ICC_N / TME_SPARC64_CCR_ICC_V))) & TME_SPARC64_CCR_ICC_N) != 0) {
src1 += 0x80000000;
}
/* "(4) The sum from step (3) is written into r[rd]." */
dst = src1 + src2;
/* "(5) The integer condition codes, icc, are updated according to the
addition performed in step (3)." */
/* store the destination: */
TME_SPARC_FORMAT3_RD = (tme_uint64_t) dst;
/* set Z if the destination is zero: */
cc = ((((tme_int32_t) dst) == 0) * TME_SPARC64_CCR_ICC_Z);
/* set N if the destination is negative: */
cc += ((((tme_int32_t) dst) < 0) * TME_SPARC64_CCR_ICC_N);
/* if the operands are the same sign, and the destination has
a different sign, set V: */
cc += ((((tme_int32_t) ((src2 ^ dst) & (src1 ^ (src2 ^ (((tme_uint32_t) 0) - 1))))) < 0) * TME_SPARC64_CCR_ICC_V);
/* if src1 and src2 both have the high bit set, or if dst does
not have the high bit set and either src1 or src2 does, set C: */
cc += (((tme_int32_t) (((tme_uint32_t) (src1 & src2)) | ((((tme_uint32_t) dst) ^ (((tme_uint32_t) 0) - 1)) & ((tme_uint32_t) (src1 | src2))))) < 0) * TME_SPARC64_CCR_ICC_C;
/* set Z if the destination is zero: */
cc += ((((tme_int64_t) dst) == 0) * TME_SPARC64_CCR_XCC_Z);
/* if the operands are the same sign, and the destination has
a different sign, set V: */
cc += ((((tme_int64_t) ((src2 ^ dst) & (src1 ^ (src2 ^ (((tme_uint64_t) 0) - 1))))) < 0) * TME_SPARC64_CCR_XCC_V);
/* if src1 and src2 both have the high bit set, or if dst does
not have the high bit set and either src1 or src2 does, set C: */
cc += (((tme_int64_t) (((tme_uint64_t) (src1 & src2)) | ((((tme_uint64_t) dst) ^ (((tme_uint64_t) 0) - 1)) & ((tme_uint64_t) (src1 | src2))))) < 0) * TME_SPARC64_CCR_XCC_C;
/* set the condition codes: */
ic->tme_sparc64_ireg_ccr = cc;
TME_SPARC_INSN_OK;
}
/* this does a sparc64 ldx: */
TME_SPARC_FORMAT3(tme_sparc64_ldx, tme_uint64_t)
{
tme_uint64_t address;
tme_uint32_t asi_mask_flags_slow;
struct tme_sparc_tlb *dtlb;
const tme_shared tme_uint8_t *memory;
tme_bus_context_t dtlb_context;
tme_uint32_t endian_little;
tme_uint64_t value64;
/* get the address: */
address = TME_SPARC_FORMAT3_RS1 + TME_SPARC_FORMAT3_RS2;
address &= ic->tme_sparc_address_mask;
#ifdef _TME_SPARC_STATS
/* track statistics: */
ic->tme_sparc_stats.tme_sparc_stats_memory_total++;
#endif /* _TME_SPARC_STATS */
/* verify and maybe replay this transfer: */
tme_sparc_recode_verify_mem(ic, &TME_SPARC_FORMAT3_RD,
ic->tme_sparc_asi_mask_data, address,
(TME_RECODE_SIZE_64
| TME_SPARC_RECODE_VERIFY_MEM_LOAD));
if (tme_sparc_recode_verify_replay_last_pc(ic) != 0) {
TME_SPARC_INSN_OK;
}
/* assume that no DTLB ASI mask flags will require a slow load: */
asi_mask_flags_slow = 0;
/* a ldx without a no-fault ASI traps on no-fault addresses: */
asi_mask_flags_slow |= TME_SPARC64_ASI_FLAG_NO_FAULT;
/* get and busy the DTLB entry: */
dtlb = &ic->tme_sparc_tlbs[TME_SPARC_DTLB_ENTRY(ic, TME_SPARC_TLB_HASH(ic, ic->tme_sparc_memory_context_default, address))];
tme_sparc_tlb_busy(dtlb);
/* assume that this DTLB applies and allows fast transfers: */
memory = dtlb->tme_sparc_tlb_emulator_off_read;
/* if this DTLB matches any context, it matches this context: */
dtlb_context = dtlb->tme_sparc_tlb_context;
if (dtlb_context > ic->tme_sparc_memory_context_max) {
dtlb_context = ic->tme_sparc_memory_context_default;
}
/* we must call the slow load function if: */
if (__tme_predict_false(
/* the DTLB entry is invalid: */
tme_bus_tlb_is_invalid(&dtlb->tme_sparc_tlb_bus_tlb)
/* the DTLB entry does not match the context: */
|| dtlb_context != ic->tme_sparc_memory_context_default
/* the DTLB entry does not cover the needed addresses: */
|| (address < (tme_bus_addr64_t) dtlb->tme_sparc_tlb_addr_first)
|| ((address + ((64 / 8) - 1)) > (tme_bus_addr64_t) dtlb->tme_sparc_tlb_addr_last)
/* the DTLB entry does not cover the needed address space: */
|| (!TME_SPARC_TLB_ASI_MASK_OK(dtlb, ic->tme_sparc_asi_mask_data))
/* the DTLB entry can't be used for a fast ldx: */
|| (dtlb->tme_sparc_tlb_asi_mask & asi_mask_flags_slow) != 0
/* the DTLB entry does not allow fast transfers: */
|| (memory == TME_EMULATOR_OFF_UNDEF)
/* the address is misaligned: */
|| ((address % (64 / 8)) != 0)
)) {
/* call the slow load function: */
memory = tme_sparc64_ls(ic,
address,
&TME_SPARC_FORMAT3_RD,
(TME_SPARC_LSINFO_OP_LD
| (64 / 8)));
}
/* get the byte order of this transfer: */
endian_little = ic->tme_sparc_asi_mask_data & TME_SPARC64_ASI_FLAG_LITTLE;
if (__tme_predict_false(dtlb->tme_sparc_tlb_asi_mask & TME_SPARC64_ASI_FLAG_LITTLE)) {
if (TME_SPARC_MEMORY_FLAGS(ic) & TME_SPARC_MEMORY_FLAG_HAS_INVERT_ENDIAN) {
endian_little ^= TME_SPARC64_ASI_FLAG_LITTLE;
}
else {
assert (FALSE);
}
}
/* do the fast transfer: */
memory += address;
value64 = tme_memory_bus_read64((const tme_shared tme_uint64_t *) memory, dtlb->tme_sparc_tlb_bus_rwlock, sizeof(tme_uint64_t), sizeof(tme_uint64_t));
value64 = (endian_little ? tme_letoh_u64(value64) : tme_betoh_u64(value64));
TME_SPARC_FORMAT3_RD = (tme_uint64_t) (tme_int64_t) (tme_int64_t) value64;
/* unbusy the DTLB entry: */
tme_sparc_tlb_unbusy(dtlb);
/* log the value loaded: */
tme_sparc_recode_verify_mem_load(ic, &TME_SPARC_FORMAT3_RD);
tme_sparc_log(ic, 1000, TME_OK,
(TME_SPARC_LOG_HANDLE(ic),
_("ldx 0x%02x:0x%016" TME_PRIx64 ": 0x%016" TME_PRIx64),
TME_SPARC_ASI_MASK_WHICH(ic->tme_sparc_asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
address,
TME_SPARC_FORMAT3_RD));
TME_SPARC_INSN_OK;
}
/* this does a sparc64 stx: */
TME_SPARC_FORMAT3(tme_sparc64_stx, tme_uint64_t)
{
tme_uint64_t address;
tme_uint32_t asi_mask_flags_slow;
struct tme_sparc_tlb *dtlb;
tme_shared tme_uint8_t *memory;
tme_bus_context_t dtlb_context;
tme_uint32_t endian_little;
tme_uint64_t value64;
/* get the address: */
address = TME_SPARC_FORMAT3_RS1 + TME_SPARC_FORMAT3_RS2;
address &= ic->tme_sparc_address_mask;
#ifdef _TME_SPARC_STATS
/* track statistics: */
ic->tme_sparc_stats.tme_sparc_stats_memory_total++;
#endif /* _TME_SPARC_STATS */
/* verify and maybe replay this transfer: */
tme_sparc_recode_verify_mem(ic, &TME_SPARC_FORMAT3_RD,
ic->tme_sparc_asi_mask_data, address,
(TME_RECODE_SIZE_64
| TME_SPARC_RECODE_VERIFY_MEM_STORE));
if (tme_sparc_recode_verify_replay_last_pc(ic) != 0) {
TME_SPARC_INSN_OK;
}
/* log the value stored: */
tme_sparc_log(ic, 1000, TME_OK,
(TME_SPARC_LOG_HANDLE(ic),
_("stx 0x%02x:0x%016" TME_PRIx64 ": 0x%016" TME_PRIx64),
TME_SPARC_ASI_MASK_WHICH(ic->tme_sparc_asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
address,
(tme_uint64_t) TME_SPARC_FORMAT3_RD));
/* assume that no DTLB ASI mask flags will require a slow store: */
asi_mask_flags_slow = 0;
/* a stx traps on no-fault addresses: */
asi_mask_flags_slow |= TME_SPARC64_ASI_FLAG_NO_FAULT;
/* get and busy the DTLB entry: */
dtlb = &ic->tme_sparc_tlbs[TME_SPARC_DTLB_ENTRY(ic, TME_SPARC_TLB_HASH(ic, ic->tme_sparc_memory_context_default, address))];
tme_sparc_tlb_busy(dtlb);
/* assume that this DTLB applies and allows fast transfers: */
memory = dtlb->tme_sparc_tlb_emulator_off_write;
/* if this DTLB matches any context, it matches this context: */
dtlb_context = dtlb->tme_sparc_tlb_context;
if (dtlb_context > ic->tme_sparc_memory_context_max) {
dtlb_context = ic->tme_sparc_memory_context_default;
}
/* we must call the slow store function if: */
if (__tme_predict_false(
/* the DTLB entry is invalid: */
tme_bus_tlb_is_invalid(&dtlb->tme_sparc_tlb_bus_tlb)
/* the DTLB entry does not match the context: */
|| dtlb_context != ic->tme_sparc_memory_context_default
/* the DTLB entry does not cover the needed addresses: */
|| (address < (tme_bus_addr64_t) dtlb->tme_sparc_tlb_addr_first)
|| ((address + ((64 / 8) - 1)) > (tme_bus_addr64_t) dtlb->tme_sparc_tlb_addr_last)
/* the DTLB entry does not cover the needed address space: */
|| (!TME_SPARC_TLB_ASI_MASK_OK(dtlb, ic->tme_sparc_asi_mask_data))
/* the DTLB entry can't be used for a fast stx: */
|| (dtlb->tme_sparc_tlb_asi_mask & asi_mask_flags_slow) != 0
/* the DTLB entry does not allow fast transfers: */
|| (memory == TME_EMULATOR_OFF_UNDEF)
/* the address is misaligned: */
|| ((address % (64 / 8)) != 0)
)) {
/* call the slow store function: */
memory = tme_sparc64_ls(ic,
address,
&TME_SPARC_FORMAT3_RD,
(TME_SPARC_LSINFO_OP_ST
| (64 / 8)));
/* if the slow store function did the transfer: */
if (__tme_predict_false(memory == TME_EMULATOR_OFF_UNDEF)) {
/* unbusy the TLB entry; */
tme_sparc_tlb_unbusy(dtlb);
TME_SPARC_INSN_OK;
}
}
/* get the byte order of this transfer: */
endian_little = ic->tme_sparc_asi_mask_data & TME_SPARC64_ASI_FLAG_LITTLE;
if (__tme_predict_false(dtlb->tme_sparc_tlb_asi_mask & TME_SPARC64_ASI_FLAG_LITTLE)) {
if (TME_SPARC_MEMORY_FLAGS(ic) & TME_SPARC_MEMORY_FLAG_HAS_INVERT_ENDIAN) {
endian_little ^= TME_SPARC64_ASI_FLAG_LITTLE;
}
else {
assert (FALSE);
}
}
/* do the fast transfer: */
memory += address;
value64 = TME_SPARC_FORMAT3_RD;
value64 = (endian_little ? tme_htole_u64(value64) : tme_htobe_u64(value64));
tme_memory_bus_write64((tme_shared tme_uint64_t *) memory, value64, dtlb->tme_sparc_tlb_bus_rwlock, sizeof(tme_uint64_t), sizeof(tme_uint64_t));
/* unbusy the DTLB entry: */
tme_sparc_tlb_unbusy(dtlb);
TME_SPARC_INSN_OK;
}
/* this does a sparc64 ldxa: */
TME_SPARC_FORMAT3(tme_sparc64_ldxa, tme_uint64_t)
{
tme_uint32_t asi_mask_data;
tme_uint64_t address;
tme_bus_context_t context;
tme_uint32_t asi_mask_flags_slow;
struct tme_sparc_tlb *dtlb;
const tme_shared tme_uint8_t *memory;
tme_bus_context_t dtlb_context;
tme_uint32_t endian_little;
tme_uint64_t value64;
/* get the alternate ASI mask: */
asi_mask_data = _tme_sparc64_alternate_asi_mask(ic);
/* get the address: */
address = TME_SPARC_FORMAT3_RS1 + TME_SPARC_FORMAT3_RS2;
address &= ic->tme_sparc_address_mask;
#ifdef _TME_SPARC_STATS
/* track statistics: */
ic->tme_sparc_stats.tme_sparc_stats_memory_total++;
#endif /* _TME_SPARC_STATS */
/* verify and maybe replay this transfer: */
tme_sparc_recode_verify_mem(ic, &TME_SPARC_FORMAT3_RD,
asi_mask_data, address,
(TME_RECODE_SIZE_64
| TME_SPARC_RECODE_VERIFY_MEM_LOAD));
if (tme_sparc_recode_verify_replay_last_pc(ic) != 0) {
TME_SPARC_INSN_OK;
}
/* get the context: */
context = ic->tme_sparc_memory_context_primary;
if (__tme_predict_false(asi_mask_data
& (TME_SPARC64_ASI_FLAG_SECONDARY
+ TME_SPARC64_ASI_MASK_FLAG_INSN_NUCLEUS))) {
if (asi_mask_data & TME_SPARC64_ASI_FLAG_SECONDARY) {
context = ic->tme_sparc_memory_context_secondary;
}
else if (TME_SPARC_MEMORY_FLAGS(ic) & TME_SPARC_MEMORY_FLAG_HAS_NUCLEUS) {
context = 0;
}
}
/* assume that no DTLB ASI mask flags will require a slow load: */
asi_mask_flags_slow = 0;
/* a ldxa without a no-fault ASI traps on no-fault addresses: */
asi_mask_flags_slow |= TME_SPARC64_ASI_FLAG_NO_FAULT;
/* if this ldxa is using a no-fault ASI: */
if (__tme_predict_false(asi_mask_data & TME_SPARC64_ASI_FLAG_NO_FAULT)) {
/* a ldxa with a no-fault ASI traps on addresses with side-effects: */
asi_mask_flags_slow = TME_SPARC64_ASI_MASK_FLAG_TLB_SIDE_EFFECTS;
}
/* get and busy the DTLB entry: */
dtlb = &ic->tme_sparc_tlbs[TME_SPARC_DTLB_ENTRY(ic, TME_SPARC_TLB_HASH(ic, context, address))];
tme_sparc_tlb_busy(dtlb);
/* assume that this DTLB applies and allows fast transfers: */
memory = dtlb->tme_sparc_tlb_emulator_off_read;
/* if this DTLB matches any context, it matches this context: */
dtlb_context = dtlb->tme_sparc_tlb_context;
if (dtlb_context > ic->tme_sparc_memory_context_max) {
dtlb_context = context;
}
/* we must call the slow load function if: */
if (__tme_predict_false(
/* the DTLB entry is invalid: */
tme_bus_tlb_is_invalid(&dtlb->tme_sparc_tlb_bus_tlb)
/* the DTLB entry does not match the context: */
|| dtlb_context != context
/* the DTLB entry does not cover the needed addresses: */
|| (address < (tme_bus_addr64_t) dtlb->tme_sparc_tlb_addr_first)
|| ((address + ((64 / 8) - 1)) > (tme_bus_addr64_t) dtlb->tme_sparc_tlb_addr_last)
/* the DTLB entry does not cover the needed address space: */
|| (!TME_SPARC_TLB_ASI_MASK_OK(dtlb, asi_mask_data))
/* the DTLB entry can't be used for a fast ldxa: */
|| (dtlb->tme_sparc_tlb_asi_mask & asi_mask_flags_slow) != 0
/* the DTLB entry does not allow fast transfers: */
|| (memory == TME_EMULATOR_OFF_UNDEF)
/* the address is misaligned: */
|| ((address % (64 / 8)) != 0)
)) {
/* call the slow load function: */
memory = tme_sparc64_ls(ic,
address,
&TME_SPARC_FORMAT3_RD,
(TME_SPARC_LSINFO_OP_LD
| TME_SPARC_LSINFO_ASI(TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF))
| TME_SPARC_LSINFO_A
| (64 / 8)));
/* if the slow load function did the transfer: */
if (__tme_predict_false(memory == TME_EMULATOR_OFF_UNDEF)) {
/* unbusy the TLB entry; */
tme_sparc_tlb_unbusy(dtlb);
/* log the value loaded: */
tme_sparc_recode_verify_mem_load(ic, &TME_SPARC_FORMAT3_RD);
tme_sparc_log(ic, 1000, TME_OK,
(TME_SPARC_LOG_HANDLE(ic),
_("ldxa 0x%02x:0x%016" TME_PRIx64 ": 0x%016" TME_PRIx64),
TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
address,
TME_SPARC_FORMAT3_RD));
TME_SPARC_INSN_OK;
}
}
/* get the byte order of this transfer: */
endian_little = asi_mask_data & TME_SPARC64_ASI_FLAG_LITTLE;
if (__tme_predict_false(dtlb->tme_sparc_tlb_asi_mask & TME_SPARC64_ASI_FLAG_LITTLE)) {
if (TME_SPARC_MEMORY_FLAGS(ic) & TME_SPARC_MEMORY_FLAG_HAS_INVERT_ENDIAN) {
endian_little ^= TME_SPARC64_ASI_FLAG_LITTLE;
}
else {
assert (FALSE);
}
}
/* do the fast transfer: */
memory += address;
value64 = tme_memory_bus_read64((const tme_shared tme_uint64_t *) memory, dtlb->tme_sparc_tlb_bus_rwlock, sizeof(tme_uint64_t), sizeof(tme_uint64_t));
value64 = (endian_little ? tme_letoh_u64(value64) : tme_betoh_u64(value64));
TME_SPARC_FORMAT3_RD = (tme_uint64_t) (tme_int64_t) (tme_int64_t) value64;
/* unbusy the DTLB entry: */
tme_sparc_tlb_unbusy(dtlb);
/* log the value loaded: */
tme_sparc_recode_verify_mem_load(ic, &TME_SPARC_FORMAT3_RD);
tme_sparc_log(ic, 1000, TME_OK,
(TME_SPARC_LOG_HANDLE(ic),
_("ldxa 0x%02x:0x%016" TME_PRIx64 ": 0x%016" TME_PRIx64),
TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
address,
TME_SPARC_FORMAT3_RD));
TME_SPARC_INSN_OK;
}
/* this does a sparc64 stxa: */
TME_SPARC_FORMAT3(tme_sparc64_stxa, tme_uint64_t)
{
tme_uint32_t asi_mask_data;
tme_uint64_t address;
tme_bus_context_t context;
tme_uint32_t asi_mask_flags_slow;
struct tme_sparc_tlb *dtlb;
tme_shared tme_uint8_t *memory;
tme_bus_context_t dtlb_context;
tme_uint32_t endian_little;
tme_uint64_t value64;
/* get the alternate ASI mask: */
asi_mask_data = _tme_sparc64_alternate_asi_mask(ic);
/* get the address: */
address = TME_SPARC_FORMAT3_RS1 + TME_SPARC_FORMAT3_RS2;
address &= ic->tme_sparc_address_mask;
#ifdef _TME_SPARC_STATS
/* track statistics: */
ic->tme_sparc_stats.tme_sparc_stats_memory_total++;
#endif /* _TME_SPARC_STATS */
/* verify and maybe replay this transfer: */
tme_sparc_recode_verify_mem(ic, &TME_SPARC_FORMAT3_RD,
asi_mask_data, address,
(TME_RECODE_SIZE_64
| TME_SPARC_RECODE_VERIFY_MEM_STORE));
if (tme_sparc_recode_verify_replay_last_pc(ic) != 0) {
TME_SPARC_INSN_OK;
}
/* log the value stored: */
tme_sparc_log(ic, 1000, TME_OK,
(TME_SPARC_LOG_HANDLE(ic),
_("stxa 0x%02x:0x%016" TME_PRIx64 ": 0x%016" TME_PRIx64),
TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
address,
(tme_uint64_t) TME_SPARC_FORMAT3_RD));
/* get the context: */
context = ic->tme_sparc_memory_context_primary;
if (__tme_predict_false(asi_mask_data
& (TME_SPARC64_ASI_FLAG_SECONDARY
+ TME_SPARC64_ASI_MASK_FLAG_INSN_NUCLEUS))) {
if (asi_mask_data & TME_SPARC64_ASI_FLAG_SECONDARY) {
context = ic->tme_sparc_memory_context_secondary;
}
else if (TME_SPARC_MEMORY_FLAGS(ic) & TME_SPARC_MEMORY_FLAG_HAS_NUCLEUS) {
context = 0;
}
}
/* assume that no DTLB ASI mask flags will require a slow store: */
asi_mask_flags_slow = 0;
/* a stxa traps on no-fault addresses: */
asi_mask_flags_slow |= TME_SPARC64_ASI_FLAG_NO_FAULT;
/* if this stxa is using a no-fault ASI: */
if (__tme_predict_false(asi_mask_data & TME_SPARC64_ASI_FLAG_NO_FAULT)) {
/* a stxa with a no-fault ASI traps: */
asi_mask_flags_slow = 0 - (tme_uint32_t) 1;
}
/* get and busy the DTLB entry: */
dtlb = &ic->tme_sparc_tlbs[TME_SPARC_DTLB_ENTRY(ic, TME_SPARC_TLB_HASH(ic, context, address))];
tme_sparc_tlb_busy(dtlb);
/* assume that this DTLB applies and allows fast transfers: */
memory = dtlb->tme_sparc_tlb_emulator_off_write;
/* if this DTLB matches any context, it matches this context: */
dtlb_context = dtlb->tme_sparc_tlb_context;
if (dtlb_context > ic->tme_sparc_memory_context_max) {
dtlb_context = context;
}
/* we must call the slow store function if: */
if (__tme_predict_false(
/* the DTLB entry is invalid: */
tme_bus_tlb_is_invalid(&dtlb->tme_sparc_tlb_bus_tlb)
/* the DTLB entry does not match the context: */
|| dtlb_context != context
/* the DTLB entry does not cover the needed addresses: */
|| (address < (tme_bus_addr64_t) dtlb->tme_sparc_tlb_addr_first)
|| ((address + ((64 / 8) - 1)) > (tme_bus_addr64_t) dtlb->tme_sparc_tlb_addr_last)
/* the DTLB entry does not cover the needed address space: */
|| (!TME_SPARC_TLB_ASI_MASK_OK(dtlb, asi_mask_data))
/* the DTLB entry can't be used for a fast stxa: */
|| (dtlb->tme_sparc_tlb_asi_mask & asi_mask_flags_slow) != 0
/* the DTLB entry does not allow fast transfers: */
|| (memory == TME_EMULATOR_OFF_UNDEF)
/* the address is misaligned: */
|| ((address % (64 / 8)) != 0)
)) {
/* call the slow store function: */
memory = tme_sparc64_ls(ic,
address,
&TME_SPARC_FORMAT3_RD,
(TME_SPARC_LSINFO_OP_ST
| TME_SPARC_LSINFO_ASI(TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF))
| TME_SPARC_LSINFO_A
| (64 / 8)));
/* if the slow store function did the transfer: */
if (__tme_predict_false(memory == TME_EMULATOR_OFF_UNDEF)) {
/* unbusy the TLB entry; */
tme_sparc_tlb_unbusy(dtlb);
TME_SPARC_INSN_OK;
}
}
/* get the byte order of this transfer: */
endian_little = asi_mask_data & TME_SPARC64_ASI_FLAG_LITTLE;
if (__tme_predict_false(dtlb->tme_sparc_tlb_asi_mask & TME_SPARC64_ASI_FLAG_LITTLE)) {
if (TME_SPARC_MEMORY_FLAGS(ic) & TME_SPARC_MEMORY_FLAG_HAS_INVERT_ENDIAN) {
endian_little ^= TME_SPARC64_ASI_FLAG_LITTLE;
}
else {
assert (FALSE);
}
}
/* do the fast transfer: */
memory += address;
value64 = TME_SPARC_FORMAT3_RD;
value64 = (endian_little ? tme_htole_u64(value64) : tme_htobe_u64(value64));
tme_memory_bus_write64((tme_shared tme_uint64_t *) memory, value64, dtlb->tme_sparc_tlb_bus_rwlock, sizeof(tme_uint64_t), sizeof(tme_uint64_t));
/* unbusy the DTLB entry: */
tme_sparc_tlb_unbusy(dtlb);
TME_SPARC_INSN_OK;
}
/* this does a sparc64 ldfa: */
TME_SPARC_FORMAT3(tme_sparc64_ldfa, tme_uint64_t)
{
tme_uint32_t misaligned;
struct tme_float float_buffer;
struct tme_float *fpreg;
/* get the least significant 32 bits of the address: */
misaligned = TME_SPARC_FORMAT3_RS1;
misaligned += (tme_uint32_t) TME_SPARC_FORMAT3_RS2;
/* see if the address is misaligned for the ASI: */
misaligned = (*ic->_tme_sparc_ls_asi_misaligned)(ic, misaligned);
/* decode rd: */
float_buffer.tme_float_format = TME_FLOAT_FORMAT_IEEE754_SINGLE;
fpreg
= _tme_sparc64_fpu_mem_fpreg(ic,
misaligned,
&float_buffer);
/* do the load: */
tme_sparc64_lda(ic, _rs1, _rs2, &ic->tme_sparc_ireg_uint64(TME_SPARC_IREG_FPX));
/* set the floating-point register value: */
assert (fpreg != &float_buffer);
fpreg->tme_float_format = TME_FLOAT_FORMAT_IEEE754_SINGLE;
fpreg->tme_float_value_ieee754_single
= ic->tme_sparc_ireg_uint32(TME_SPARC_IREG_FPX << 1);
TME_SPARC_INSN_OK;
}
/* this does a sparc64 lddfa: */
TME_SPARC_FORMAT3(tme_sparc64_lddfa, tme_uint64_t)
{
tme_uint64_t address;
tme_uint32_t misaligned;
struct tme_float float_buffer;
struct tme_float *fpreg;
tme_uint64_t offset;
/* get the address: */
address = TME_SPARC_FORMAT3_RS1 + TME_SPARC_FORMAT3_RS2;
/* get the least significant 32 bits of the address: */
misaligned = address;
/* see if the address is misaligned for the ASI: */
misaligned = (*ic->_tme_sparc_ls_asi_misaligned)(ic, misaligned);
/* decode rd: */
float_buffer.tme_float_format = TME_FLOAT_FORMAT_IEEE754_DOUBLE;
fpreg
= _tme_sparc64_fpu_mem_fpreg(ic,
misaligned,
&float_buffer);
/* if bit two of the address is set, and this SPARC supports
32-bit-aligned lddfa instructions: */
if ((misaligned & sizeof(tme_uint32_t))
&& fpreg != &float_buffer) {
/* do two 32-bit loads: */
offset = sizeof(tme_uint32_t) * 0;
tme_sparc64_lda(ic, &address, &offset, &ic->tme_sparc_ireg_uint64(TME_SPARC_IREG_FPX + 0));
offset = sizeof(tme_uint32_t) * 1;
tme_sparc64_lda(ic, &address, &offset, &ic->tme_sparc_ireg_uint64(TME_SPARC_IREG_FPX + 1));
/* set the double floating-point register value: */
fpreg->tme_float_format = TME_FLOAT_FORMAT_IEEE754_DOUBLE;
fpreg->tme_float_value_ieee754_double.tme_value64_uint32_hi
= ic->tme_sparc_ireg_uint64(TME_SPARC_IREG_FPX + 0);
fpreg->tme_float_value_ieee754_double.tme_value64_uint32_lo
= ic->tme_sparc_ireg_uint64(TME_SPARC_IREG_FPX + 1);
}
/* otherwise, bit two of the address is not set, or this SPARC
doesn't support 32-bit-aligned lddfa instructions: */
else {
/* do an ldxa-style load: */
tme_sparc64_ldxa(ic, _rs1, _rs2, &ic->tme_sparc_ireg_uint64(TME_SPARC_IREG_FPX));
/* set the double floating-point register value: */
assert (fpreg != &float_buffer);
fpreg->tme_float_format = TME_FLOAT_FORMAT_IEEE754_DOUBLE;
fpreg->tme_float_value_ieee754_double.tme_value64_uint
= ic->tme_sparc_ireg_uint64(TME_SPARC_IREG_FPX);
}
TME_SPARC_INSN_OK;
}
/* this does a sparc64 stfa: */
TME_SPARC_FORMAT3(tme_sparc64_stfa, tme_uint64_t)
{
tme_uint32_t misaligned;
struct tme_float float_buffer;
const struct tme_float *fpreg;
const tme_uint32_t *value_single;
/* get the least significant 32 bits of the address: */
misaligned = TME_SPARC_FORMAT3_RS1;
misaligned += (tme_uint32_t) TME_SPARC_FORMAT3_RS2;
/* see if the address is misaligned for the ASI: */
misaligned = (*ic->_tme_sparc_ls_asi_misaligned)(ic, misaligned);
/* decode rd: */
float_buffer.tme_float_format = TME_FLOAT_FORMAT_IEEE754_SINGLE;
fpreg
= _tme_sparc64_fpu_mem_fpreg(ic,
misaligned,
&float_buffer);
/* get this single floating-point register in IEEE754 single-precision format: */
value_single = tme_ieee754_single_value_get(fpreg, &float_buffer.tme_float_value_ieee754_single);
/* set the floating-point register value: */
ic->tme_sparc_ireg_uint32(TME_SPARC_IREG_FPX << 1) = *value_single;
/* do the store: */
tme_sparc64_sta(ic, _rs1, _rs2, &ic->tme_sparc_ireg_uint64(TME_SPARC_IREG_FPX));
assert (fpreg != &float_buffer);
TME_SPARC_INSN_OK;
}
/* this does a sparc64 stdfa: */
TME_SPARC_FORMAT3(tme_sparc64_stdfa, tme_uint64_t)
{
tme_uint64_t address;
tme_uint32_t misaligned;
struct tme_float float_buffer;
struct tme_float *fpreg;
const union tme_value64 *value_double;
tme_uint64_t offset;
/* get the address: */
address = TME_SPARC_FORMAT3_RS1 + TME_SPARC_FORMAT3_RS2;
/* get the least significant 32 bits of the address: */
misaligned = address;
/* see if the address is misaligned for the ASI: */
misaligned = (*ic->_tme_sparc_ls_asi_misaligned)(ic, misaligned);
/* decode rd: */
float_buffer.tme_float_format = TME_FLOAT_FORMAT_IEEE754_DOUBLE;
fpreg
= _tme_sparc64_fpu_mem_fpreg(ic,
misaligned,
&float_buffer);
/* get this double floating-point register in IEEE754 double-precision format: */
value_double = tme_ieee754_double_value_get(fpreg, &float_buffer.tme_float_value_ieee754_double);
/* if bit two of the address is set, and this SPARC supports
32-bit-aligned stdfa instructions: */
if ((misaligned & sizeof(tme_uint32_t))
&& fpreg != &float_buffer) {
/* set the floating-point register value: */
ic->tme_sparc_ireg_uint64(TME_SPARC_IREG_FPX + 0)
= value_double->tme_value64_uint32_hi;
ic->tme_sparc_ireg_uint32(TME_SPARC_IREG_FPX + 1)
= value_double->tme_value64_uint32_lo;
/* do two 32-bit stores: */
offset = sizeof(tme_uint32_t) * 0;
tme_sparc64_sta(ic, &address, &offset, &ic->tme_sparc_ireg_uint64(TME_SPARC_IREG_FPX + 0));
offset = sizeof(tme_uint32_t) * 1;
tme_sparc64_sta(ic, &address, &offset, &ic->tme_sparc_ireg_uint64(TME_SPARC_IREG_FPX + 1));
}
/* otherwise, bit two of the address is not set, or this SPARC
doesn't support 32-bit-aligned stdfa instructions: */
else {
/* set the floating-point register value: */
ic->tme_sparc_ireg_uint64(TME_SPARC_IREG_FPX)
= value_double->tme_value64_uint;
/* do an stxa-style store: */
tme_sparc64_stxa(ic, _rs1, _rs2, &ic->tme_sparc_ireg_uint64(TME_SPARC_IREG_FPX));
}
assert (fpreg != &float_buffer);
TME_SPARC_INSN_OK;
}
/* this does a sparc64 casa: */
TME_SPARC_FORMAT3(tme_sparc64_casa, tme_uint64_t)
{
tme_uint32_t asi_mask_data;
tme_uint64_t address;
tme_bus_context_t context;
tme_uint32_t asi_mask_flags_slow;
struct tme_sparc_tlb *dtlb;
tme_shared tme_uint8_t *memory;
tme_bus_context_t dtlb_context;
tme_uint32_t endian_little;
unsigned int reg_rs2;
tme_uint32_t value_compare32;
tme_uint32_t value_swap32;
tme_uint32_t value_read32;
/* get the alternate ASI mask: */
asi_mask_data = _tme_sparc64_alternate_asi_mask(ic);
/* get the address: */
address = TME_SPARC_FORMAT3_RS1;
address &= ic->tme_sparc_address_mask;
#ifdef _TME_SPARC_STATS
/* track statistics: */
ic->tme_sparc_stats.tme_sparc_stats_memory_total++;
#endif /* _TME_SPARC_STATS */
/* verify and maybe replay this transfer: */
tme_sparc_recode_verify_mem(ic, &TME_SPARC_FORMAT3_RD,
asi_mask_data, address,
(TME_RECODE_SIZE_32
| TME_SPARC_RECODE_VERIFY_MEM_LOAD | TME_SPARC_RECODE_VERIFY_MEM_STORE));
if (tme_sparc_recode_verify_replay_last_pc(ic) != 0) {
TME_SPARC_INSN_OK;
}
/* log the value stored: */
tme_sparc_log(ic, 1000, TME_OK,
(TME_SPARC_LOG_HANDLE(ic),
_("casa 0x%02x:0x%016" TME_PRIx64 ": 0x%08" TME_PRIx32),
TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
address,
(tme_uint32_t) TME_SPARC_FORMAT3_RD));
/* get the context: */
context = ic->tme_sparc_memory_context_primary;
if (__tme_predict_false(asi_mask_data
& (TME_SPARC64_ASI_FLAG_SECONDARY
+ TME_SPARC64_ASI_MASK_FLAG_INSN_NUCLEUS))) {
if (asi_mask_data & TME_SPARC64_ASI_FLAG_SECONDARY) {
context = ic->tme_sparc_memory_context_secondary;
}
else if (TME_SPARC_MEMORY_FLAGS(ic) & TME_SPARC_MEMORY_FLAG_HAS_NUCLEUS) {
context = 0;
}
}
/* assume that no DTLB ASI mask flags will require a slow store: */
asi_mask_flags_slow = 0;
/* a casa traps on no-fault addresses: */
asi_mask_flags_slow |= TME_SPARC64_ASI_FLAG_NO_FAULT;
/* a casa traps on uncacheable addresses with side-effects: */
asi_mask_flags_slow |= TME_SPARC64_ASI_MASK_FLAG_TLB_UNCACHEABLE;
/* if this casa is using a no-fault ASI: */
if (__tme_predict_false(asi_mask_data & TME_SPARC64_ASI_FLAG_NO_FAULT)) {
/* a casa with a no-fault ASI traps: */
asi_mask_flags_slow = 0 - (tme_uint32_t) 1;
}
/* get and busy the DTLB entry: */
dtlb = &ic->tme_sparc_tlbs[TME_SPARC_DTLB_ENTRY(ic, TME_SPARC_TLB_HASH(ic, context, address))];
tme_sparc_tlb_busy(dtlb);
/* assume that this DTLB applies and allows fast transfers: */
memory = dtlb->tme_sparc_tlb_emulator_off_write;
/* if this DTLB matches any context, it matches this context: */
dtlb_context = dtlb->tme_sparc_tlb_context;
if (dtlb_context > ic->tme_sparc_memory_context_max) {
dtlb_context = context;
}
/* we must call the slow store function if: */
if (__tme_predict_false(
/* the DTLB entry is invalid: */
tme_bus_tlb_is_invalid(&dtlb->tme_sparc_tlb_bus_tlb)
/* the DTLB entry does not match the context: */
|| dtlb_context != context
/* the DTLB entry does not cover the needed addresses: */
|| (address < (tme_bus_addr64_t) dtlb->tme_sparc_tlb_addr_first)
|| ((address + ((32 / 8) - 1)) > (tme_bus_addr64_t) dtlb->tme_sparc_tlb_addr_last)
/* the DTLB entry does not cover the needed address space: */
|| (!TME_SPARC_TLB_ASI_MASK_OK(dtlb, asi_mask_data))
/* the DTLB entry can't be used for a fast casa: */
|| (dtlb->tme_sparc_tlb_asi_mask & asi_mask_flags_slow) != 0
/* the DTLB entry does not allow fast transfers: */
|| (memory != dtlb->tme_sparc_tlb_emulator_off_read)
|| (memory == TME_EMULATOR_OFF_UNDEF)
/* the address is misaligned: */
|| ((address % (32 / 8)) != 0)
)) {
/* call the slow store function: */
memory = tme_sparc64_ls(ic,
address,
&TME_SPARC_FORMAT3_RD,
(TME_SPARC_LSINFO_OP_ATOMIC
| TME_SPARC_LSINFO_ASI(TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF))
| TME_SPARC_LSINFO_A
| (32 / 8)));
/* if the slow store function did the transfer: */
if (__tme_predict_false(memory == TME_EMULATOR_OFF_UNDEF)) {
/* unbusy the TLB entry; */
tme_sparc_tlb_unbusy(dtlb);
/* log the value loaded: */
tme_sparc_recode_verify_mem_load(ic, &TME_SPARC_FORMAT3_RD);
tme_sparc_log(ic, 1000, TME_OK,
(TME_SPARC_LOG_HANDLE(ic),
_("casa 0x%02x:0x%016" TME_PRIx64 ": 0x%08" TME_PRIx64),
TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
address,
TME_SPARC_FORMAT3_RD));
TME_SPARC_INSN_OK;
}
}
/* get the byte order of this transfer: */
endian_little = asi_mask_data & TME_SPARC64_ASI_FLAG_LITTLE;
if (__tme_predict_false(dtlb->tme_sparc_tlb_asi_mask & TME_SPARC64_ASI_FLAG_LITTLE)) {
if (TME_SPARC_MEMORY_FLAGS(ic) & TME_SPARC_MEMORY_FLAG_HAS_INVERT_ENDIAN) {
endian_little ^= TME_SPARC64_ASI_FLAG_LITTLE;
}
else {
assert (FALSE);
}
}
/* do the fast transfer: */
memory += address;
reg_rs2 = TME_FIELD_MASK_EXTRACTU(TME_SPARC_INSN, TME_SPARC_FORMAT3_MASK_RS2);
TME_SPARC_REG_INDEX(ic, reg_rs2);
value_compare32 = ic->tme_sparc_ireg_uint64(reg_rs2);
value_compare32 = (endian_little ? tme_htole_u32(value_compare32) : tme_htobe_u32(value_compare32));
value_swap32 = TME_SPARC_FORMAT3_RD;
value_swap32 = (endian_little ? tme_htole_u32(value_swap32) : tme_htobe_u32(value_swap32));
value_read32 = tme_memory_atomic_cx32((tme_shared tme_uint32_t *) memory, value_compare32, value_swap32, dtlb->tme_sparc_tlb_bus_rwlock, sizeof(tme_uint32_t));
value_read32 = (endian_little ? tme_letoh_u32(value_read32) : tme_betoh_u32(value_read32));
TME_SPARC_FORMAT3_RD = value_read32;
/* unbusy the DTLB entry: */
tme_sparc_tlb_unbusy(dtlb);
/* log the value loaded: */
tme_sparc_recode_verify_mem_load(ic, &TME_SPARC_FORMAT3_RD);
tme_sparc_log(ic, 1000, TME_OK,
(TME_SPARC_LOG_HANDLE(ic),
_("casa 0x%02x:0x%016" TME_PRIx64 ": 0x%08" TME_PRIx64),
TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
address,
TME_SPARC_FORMAT3_RD));
TME_SPARC_INSN_OK;
}
/* this does a sparc64 casxa: */
TME_SPARC_FORMAT3(tme_sparc64_casxa, tme_uint64_t)
{
tme_uint32_t asi_mask_data;
tme_uint64_t address;
tme_bus_context_t context;
tme_uint32_t asi_mask_flags_slow;
struct tme_sparc_tlb *dtlb;
tme_shared tme_uint8_t *memory;
tme_bus_context_t dtlb_context;
tme_uint32_t endian_little;
unsigned int reg_rs2;
tme_uint64_t value_compare64;
tme_uint64_t value_swap64;
tme_uint64_t value_read64;
/* get the alternate ASI mask: */
asi_mask_data = _tme_sparc64_alternate_asi_mask(ic);
/* get the address: */
address = TME_SPARC_FORMAT3_RS1;
address &= ic->tme_sparc_address_mask;
#ifdef _TME_SPARC_STATS
/* track statistics: */
ic->tme_sparc_stats.tme_sparc_stats_memory_total++;
#endif /* _TME_SPARC_STATS */
/* verify and maybe replay this transfer: */
tme_sparc_recode_verify_mem(ic, &TME_SPARC_FORMAT3_RD,
asi_mask_data, address,
(TME_RECODE_SIZE_64
| TME_SPARC_RECODE_VERIFY_MEM_LOAD | TME_SPARC_RECODE_VERIFY_MEM_STORE));
if (tme_sparc_recode_verify_replay_last_pc(ic) != 0) {
TME_SPARC_INSN_OK;
}
/* log the value stored: */
tme_sparc_log(ic, 1000, TME_OK,
(TME_SPARC_LOG_HANDLE(ic),
_("casxa 0x%02x:0x%016" TME_PRIx64 ": 0x%016" TME_PRIx64),
TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
address,
(tme_uint64_t) TME_SPARC_FORMAT3_RD));
/* get the context: */
context = ic->tme_sparc_memory_context_primary;
if (__tme_predict_false(asi_mask_data
& (TME_SPARC64_ASI_FLAG_SECONDARY
+ TME_SPARC64_ASI_MASK_FLAG_INSN_NUCLEUS))) {
if (asi_mask_data & TME_SPARC64_ASI_FLAG_SECONDARY) {
context = ic->tme_sparc_memory_context_secondary;
}
else if (TME_SPARC_MEMORY_FLAGS(ic) & TME_SPARC_MEMORY_FLAG_HAS_NUCLEUS) {
context = 0;
}
}
/* assume that no DTLB ASI mask flags will require a slow store: */
asi_mask_flags_slow = 0;
/* a casxa traps on no-fault addresses: */
asi_mask_flags_slow |= TME_SPARC64_ASI_FLAG_NO_FAULT;
/* a casxa traps on uncacheable addresses with side-effects: */
asi_mask_flags_slow |= TME_SPARC64_ASI_MASK_FLAG_TLB_UNCACHEABLE;
/* if this casxa is using a no-fault ASI: */
if (__tme_predict_false(asi_mask_data & TME_SPARC64_ASI_FLAG_NO_FAULT)) {
/* a casxa with a no-fault ASI traps: */
asi_mask_flags_slow = 0 - (tme_uint32_t) 1;
}
/* get and busy the DTLB entry: */
dtlb = &ic->tme_sparc_tlbs[TME_SPARC_DTLB_ENTRY(ic, TME_SPARC_TLB_HASH(ic, context, address))];
tme_sparc_tlb_busy(dtlb);
/* assume that this DTLB applies and allows fast transfers: */
memory = dtlb->tme_sparc_tlb_emulator_off_write;
/* if this DTLB matches any context, it matches this context: */
dtlb_context = dtlb->tme_sparc_tlb_context;
if (dtlb_context > ic->tme_sparc_memory_context_max) {
dtlb_context = context;
}
/* we must call the slow store function if: */
if (__tme_predict_false(
/* the DTLB entry is invalid: */
tme_bus_tlb_is_invalid(&dtlb->tme_sparc_tlb_bus_tlb)
/* the DTLB entry does not match the context: */
|| dtlb_context != context
/* the DTLB entry does not cover the needed addresses: */
|| (address < (tme_bus_addr64_t) dtlb->tme_sparc_tlb_addr_first)
|| ((address + ((64 / 8) - 1)) > (tme_bus_addr64_t) dtlb->tme_sparc_tlb_addr_last)
/* the DTLB entry does not cover the needed address space: */
|| (!TME_SPARC_TLB_ASI_MASK_OK(dtlb, asi_mask_data))
/* the DTLB entry can't be used for a fast casxa: */
|| (dtlb->tme_sparc_tlb_asi_mask & asi_mask_flags_slow) != 0
/* the DTLB entry does not allow fast transfers: */
|| (memory != dtlb->tme_sparc_tlb_emulator_off_read)
|| (memory == TME_EMULATOR_OFF_UNDEF)
/* the address is misaligned: */
|| ((address % (64 / 8)) != 0)
)) {
/* call the slow store function: */
memory = tme_sparc64_ls(ic,
address,
&TME_SPARC_FORMAT3_RD,
(TME_SPARC_LSINFO_OP_ATOMIC
| TME_SPARC_LSINFO_ASI(TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF))
| TME_SPARC_LSINFO_A
| (64 / 8)));
/* if the slow store function did the transfer: */
if (__tme_predict_false(memory == TME_EMULATOR_OFF_UNDEF)) {
/* unbusy the TLB entry; */
tme_sparc_tlb_unbusy(dtlb);
/* log the value loaded: */
tme_sparc_recode_verify_mem_load(ic, &TME_SPARC_FORMAT3_RD);
tme_sparc_log(ic, 1000, TME_OK,
(TME_SPARC_LOG_HANDLE(ic),
_("casxa 0x%02x:0x%016" TME_PRIx64 ": 0x%016" TME_PRIx64),
TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
address,
TME_SPARC_FORMAT3_RD));
TME_SPARC_INSN_OK;
}
}
/* get the byte order of this transfer: */
endian_little = asi_mask_data & TME_SPARC64_ASI_FLAG_LITTLE;
if (__tme_predict_false(dtlb->tme_sparc_tlb_asi_mask & TME_SPARC64_ASI_FLAG_LITTLE)) {
if (TME_SPARC_MEMORY_FLAGS(ic) & TME_SPARC_MEMORY_FLAG_HAS_INVERT_ENDIAN) {
endian_little ^= TME_SPARC64_ASI_FLAG_LITTLE;
}
else {
assert (FALSE);
}
}
/* do the fast transfer: */
memory += address;
reg_rs2 = TME_FIELD_MASK_EXTRACTU(TME_SPARC_INSN, TME_SPARC_FORMAT3_MASK_RS2);
TME_SPARC_REG_INDEX(ic, reg_rs2);
value_compare64 = ic->tme_sparc_ireg_uint64(reg_rs2);
value_compare64 = (endian_little ? tme_htole_u64(value_compare64) : tme_htobe_u64(value_compare64));
value_swap64 = TME_SPARC_FORMAT3_RD;
value_swap64 = (endian_little ? tme_htole_u64(value_swap64) : tme_htobe_u64(value_swap64));
value_read64 = tme_memory_atomic_cx64((tme_shared tme_uint64_t *) memory, value_compare64, value_swap64, dtlb->tme_sparc_tlb_bus_rwlock, sizeof(tme_uint64_t));
value_read64 = (endian_little ? tme_letoh_u64(value_read64) : tme_betoh_u64(value_read64));
TME_SPARC_FORMAT3_RD = value_read64;
/* unbusy the DTLB entry: */
tme_sparc_tlb_unbusy(dtlb);
/* log the value loaded: */
tme_sparc_recode_verify_mem_load(ic, &TME_SPARC_FORMAT3_RD);
tme_sparc_log(ic, 1000, TME_OK,
(TME_SPARC_LOG_HANDLE(ic),
_("casxa 0x%02x:0x%016" TME_PRIx64 ": 0x%016" TME_PRIx64),
TME_SPARC_ASI_MASK_WHICH(asi_mask_data & ~TME_SPARC_ASI_MASK_FLAG_UNDEF),
address,
TME_SPARC_FORMAT3_RD));
TME_SPARC_INSN_OK;
}
/* the sparc64 mulx function: */
TME_SPARC_FORMAT3(tme_sparc64_mulx, tme_uint64_t)
{
tme_uint64_t src1;
tme_uint64_t src2;
tme_uint64_t dst;
/* get the operands: */
src1 = TME_SPARC_FORMAT3_RS1;
src2 = TME_SPARC_FORMAT3_RS2;
/* do the mulx: */
dst = src1 * src2;
TME_SPARC_FORMAT3_RD = dst;
TME_SPARC_INSN_OK;
}
/* the sparc64 sdivx function: */
TME_SPARC_FORMAT3(tme_sparc64_sdivx, tme_uint64_t)
{
tme_int64_t src1;
tme_int64_t src2;
tme_int64_t dst;
/* get the operands: */
src1 = TME_SPARC_FORMAT3_RS1;
src2 = TME_SPARC_FORMAT3_RS2;
/* do the sdivx: */
if (__tme_predict_false(src2 == 0)) {
tme_sparc64_trap(ic, TME_SPARC64_TRAP_division_by_zero);
}
dst = (src2 == -1 && src1 == (((tme_int64_t) 1) << 63) ? src1 : src1 / src2);
TME_SPARC_FORMAT3_RD = dst;
TME_SPARC_INSN_OK;
}
/* the sparc64 udivx function: */
TME_SPARC_FORMAT3(tme_sparc64_udivx, tme_uint64_t)
{
tme_uint64_t src1;
tme_uint64_t src2;
tme_uint64_t dst;
/* get the operands: */
src1 = TME_SPARC_FORMAT3_RS1;
src2 = TME_SPARC_FORMAT3_RS2;
/* do the udivx: */
if (__tme_predict_false(src2 == 0)) {
tme_sparc64_trap(ic, TME_SPARC64_TRAP_division_by_zero);
}
dst = src1 / src2;
TME_SPARC_FORMAT3_RD = dst;
TME_SPARC_INSN_OK;
}
/* this does a slow atomic operation: */
void
tme_sparc64_atomic(struct tme_sparc *ic, struct tme_sparc_ls *ls)
{
tme_uint32_t endian_little;
tme_uint32_t insn;
tme_uint64_t value64;
tme_uint64_t value_swap64;
unsigned int reg_rs2;
tme_uint32_t value32;
tme_uint32_t value_swap32;
tme_uint32_t size;
/* if this is the beginning of the operation: */
if (ls->tme_sparc_ls_state == 0) {
/* start the load part of the operation: */
ls->tme_sparc_ls_state = ls->tme_sparc_ls_size;
assert (ls->tme_sparc_ls_state != 0
&& (ls->tme_sparc_ls_state & TME_BIT(7)) == 0);
/* the load must start at the beginning of the buffer: */
assert (ls->tme_sparc_ls_buffer_offset == 0);
}
/* if this is the load part of the operation: */
if ((ls->tme_sparc_ls_state & TME_BIT(7)) == 0) {
/* do one slow load cycle: */
tme_sparc64_load(ic, ls);
/* if the slow load cycle did not load all of the data: */
if (__tme_predict_false(ls->tme_sparc_ls_size != 0)) {
return;
}
/* get the byte order of this transfer: */
endian_little = ls->tme_sparc_ls_lsinfo & TME_SPARC_LSINFO_ENDIAN_LITTLE;
/* dispatch on the op3 of the instruction: */
insn = TME_SPARC_INSN;
switch ((insn >> 19) & 0x3f) {
case 0x3c: /* casa */
/* finish the load part of the compare and swap: */
assert (ls->tme_sparc_ls_state == sizeof(tme_uint32_t));
value32 = ic->tme_sparc_memory_buffer.tme_sparc_memory_buffer32s[0];
value_swap32 = *ls->tme_sparc_ls_rd64;
if (endian_little) {
value32 = tme_letoh_u32(value32);
value_swap32 = tme_htole_u32(value_swap32);
}
else {
value32 = tme_betoh_u32(value32);
value_swap32 = tme_htobe_u32(value_swap32);
}
*ls->tme_sparc_ls_rd64 = value32;
/* if the comparison fails: */
reg_rs2 = TME_FIELD_MASK_EXTRACTU(insn, TME_SPARC_FORMAT3_MASK_RS2);
TME_SPARC_REG_INDEX(ic, reg_rs2);
if (value32 != (tme_uint32_t) ic->tme_sparc_ireg_uint64(reg_rs2)) {
return;
}
/* start the store part of the compare and swap: */
ic->tme_sparc_memory_buffer.tme_sparc_memory_buffer32s[0] = value_swap32;
break;
case 0x3e: /* casxa */
/* finish the load part of the compare and swap: */
assert (ls->tme_sparc_ls_state == sizeof(tme_uint64_t));
value64 = ic->tme_sparc_memory_buffer.tme_sparc_memory_buffer64s[0];
value_swap64 = *ls->tme_sparc_ls_rd64;
if (endian_little) {
value64 = tme_letoh_u64(value64);
value_swap64 = tme_htole_u64(value_swap64);
}
else {
value64 = tme_betoh_u64(value64);
value_swap64 = tme_htobe_u64(value_swap64);
}
*ls->tme_sparc_ls_rd64 = value64;
/* if the comparison fails: */
reg_rs2 = TME_FIELD_MASK_EXTRACTU(insn, TME_SPARC_FORMAT3_MASK_RS2);
TME_SPARC_REG_INDEX(ic, reg_rs2);
if (value64 != (tme_uint64_t) ic->tme_sparc_ireg_uint64(reg_rs2)) {
return;
}
/* start the store part of the compare and swap: */
ic->tme_sparc_memory_buffer.tme_sparc_memory_buffer64s[0] = value_swap64;
break;
case 0x0d: /* ldstub */
case 0x1d: /* ldstuba */
/* finish the load part of the ldstub: */
assert (ls->tme_sparc_ls_state == sizeof(tme_uint8_t));
*ls->tme_sparc_ls_rd64 = ic->tme_sparc_memory_buffer.tme_sparc_memory_buffer8s[0];
/* start the store part of the ldstub: */
ic->tme_sparc_memory_buffer.tme_sparc_memory_buffer8s[0] = 0xff;
break;
/* otherwise, this must be swap: */
default:
assert (((insn >> 19) & 0x2f) == 0x0f /* swap, swapa */);
/* finish the load part of the swap: */
assert (ls->tme_sparc_ls_state == sizeof(tme_uint32_t));
value32 = ic->tme_sparc_memory_buffer.tme_sparc_memory_buffer32s[0];
value_swap32 = *ls->tme_sparc_ls_rd64;
if (endian_little) {
value32 = tme_letoh_u32(value32);
value_swap32 = tme_htole_u32(value32);
}
else {
value32 = tme_betoh_u32(value32);
value_swap32 = tme_htobe_u32(value32);
}
*ls->tme_sparc_ls_rd64 = value32;
/* start the store part of the swap: */
ic->tme_sparc_memory_buffer.tme_sparc_memory_buffer32s[0] = value_swap32;
break;
}
/* start the store part of the operation: */
size = ls->tme_sparc_ls_state;
ls->tme_sparc_ls_address64 -= size;
ls->tme_sparc_ls_size = size;
ls->tme_sparc_ls_buffer_offset = 0;
ls->tme_sparc_ls_state = size | TME_BIT(7);
}
/* this is the store part of the operation: */
/* do one slow store cycle: */
tme_sparc64_store(ic, ls);
/* if the slow store cycle did not store all of the data: */
if (__tme_predict_false(ls->tme_sparc_ls_size != 0)) {
return;
}
}
/* this does one slow load cycle: */
void
tme_sparc64_load(struct tme_sparc *ic,
struct tme_sparc_ls *ls)
{
struct tme_sparc_tlb *tlb;
tme_uint64_t address;
unsigned int cycle_size;
tme_bus_addr_t physical_address;
int shift;
int err;
/* get the TLB entry: */
tlb = ls->tme_sparc_ls_tlb;
/* the TLB entry must be busy and valid: */
assert (tme_bus_tlb_is_valid(&tlb->tme_sparc_tlb_bus_tlb));
/* start the bus cycle structure: */
ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_type = TME_BUS_CYCLE_READ;
/* get the buffer: */
ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_buffer = &ic->tme_sparc_memory_buffer.tme_sparc_memory_buffer8s[ls->tme_sparc_ls_buffer_offset];
ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_buffer_increment = 1;
/* get the current address: */
address = ls->tme_sparc_ls_address64;
ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_address = address;
/* start the cycle size: */
cycle_size = ls->tme_sparc_ls_size;
assert (cycle_size > 0);
cycle_size--;
cycle_size = TME_MIN(cycle_size, (((tme_bus_addr64_t) tlb->tme_sparc_tlb_addr_last) - address)) + 1;
/* if this TLB entry allows fast reads: */
if (__tme_predict_true(tlb->tme_sparc_tlb_emulator_off_read != TME_EMULATOR_OFF_UNDEF)) {
/* do a read: */
ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_size = cycle_size;
tme_memory_bus_read_buffer((tlb->tme_sparc_tlb_emulator_off_read + (tme_uint64_t) ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_address),
ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_buffer,
ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_size,
tlb->tme_sparc_tlb_bus_rwlock,
sizeof(tme_uint8_t),
sizeof(tme_uint64_t));
}
/* otherwise, this TLB entry does not allow fast reads: */
else {
/* finish the cycle size: */
cycle_size = TME_MIN(cycle_size, 1 + ((~ (unsigned int) address) % sizeof(tme_uint64_t)));
ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_size = cycle_size;
/* form the physical address for the bus cycle handler: */
physical_address = ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_address;
physical_address += tlb->tme_sparc_tlb_addr_offset;
shift = tlb->tme_sparc_tlb_addr_shift;
if (shift < 0) {
physical_address <<= (0 - shift);
}
else if (shift > 0) {
physical_address >>= shift;
}
ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_address = physical_address;
/* finish the bus cycle structure: */
(*ic->_tme_sparc_ls_bus_cycle)(ic, ls);
tme_sparc_log(ic, 10000, TME_OK,
(TME_SPARC_LOG_HANDLE(ic),
_("cycle-load%u 0x%016" TME_PRIx64),
(unsigned int) (ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_size * 8),
(tme_bus_addr64_t) ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_address));
/* callout the bus cycle: */
tme_sparc_tlb_unbusy(tlb);
tme_sparc_callout_unlock(ic);
err = (*tlb->tme_sparc_tlb_bus_tlb.tme_bus_tlb_cycle)
(tlb->tme_sparc_tlb_bus_tlb.tme_bus_tlb_cycle_private,
&ls->tme_sparc_ls_bus_cycle);
tme_sparc_callout_relock(ic);
tme_sparc_tlb_busy(tlb);
/* the TLB entry can't have been invalidated before the load: */
assert (err != EBADF);
/* if the bus cycle didn't complete normally: */
if (err != TME_OK) {
/* if a real bus fault may have happened, instead of
some synchronous event: */
if (err != TME_BUS_CYCLE_SYNCHRONOUS_EVENT) {
/* call the bus fault handlers: */
err = tme_bus_tlb_fault(&tlb->tme_sparc_tlb_bus_tlb, &ls->tme_sparc_ls_bus_cycle, err);
}
/* if some synchronous event has happened: */
if (err == TME_BUS_CYCLE_SYNCHRONOUS_EVENT) {
/* after the currently executing instruction finishes, check
for external resets, halts, or interrupts: */
ic->_tme_sparc_instruction_burst_remaining = 0;
ic->_tme_sparc_instruction_burst_other = TRUE;
}
/* otherwise, if no real bus fault happened: */
else if (err == TME_OK) {
/* nothing to do */
}
/* otherwise, a real bus fault happened: */
else {
(*ic->_tme_sparc_ls_bus_fault)(ic, ls, err);
return;
}
}
}
/* some data must have been transferred: */
assert (ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_size > 0);
/* update: */
cycle_size = ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_size;
ls->tme_sparc_ls_address64 += cycle_size;
ls->tme_sparc_ls_buffer_offset += cycle_size;
ls->tme_sparc_ls_size -= cycle_size;
}
/* this does one slow store cycle: */
void
tme_sparc64_store(struct tme_sparc *ic,
struct tme_sparc_ls *ls)
{
struct tme_sparc_tlb *tlb;
tme_uint64_t address;
unsigned int cycle_size;
tme_bus_addr_t physical_address;
int shift;
int err;
/* get the TLB entry: */
tlb = ls->tme_sparc_ls_tlb;
/* the TLB entry must be busy and valid: */
assert (tme_bus_tlb_is_valid(&tlb->tme_sparc_tlb_bus_tlb));
/* start the bus cycle structure: */
ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_type = TME_BUS_CYCLE_WRITE;
/* get the buffer: */
ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_buffer = &ic->tme_sparc_memory_buffer.tme_sparc_memory_buffer8s[ls->tme_sparc_ls_buffer_offset];
ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_buffer_increment = 1;
/* get the current address: */
address = ls->tme_sparc_ls_address64;
ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_address = address;
/* start the cycle size: */
cycle_size = ls->tme_sparc_ls_size;
assert (cycle_size > 0);
cycle_size--;
cycle_size = TME_MIN(cycle_size, (((tme_bus_addr64_t) tlb->tme_sparc_tlb_addr_last) - address)) + 1;
/* if this TLB entry allows fast writes: */
if (__tme_predict_true(tlb->tme_sparc_tlb_emulator_off_write != TME_EMULATOR_OFF_UNDEF)) {
/* do a write: */
ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_size = cycle_size;
tme_memory_bus_write_buffer((tlb->tme_sparc_tlb_emulator_off_write + (tme_uint64_t) ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_address),
ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_buffer,
ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_size,
tlb->tme_sparc_tlb_bus_rwlock,
sizeof(tme_uint8_t),
sizeof(tme_uint64_t));
}
/* otherwise, this TLB entry does not allow fast writes: */
else {
/* finish the cycle size: */
cycle_size = TME_MIN(cycle_size, 1 + ((~ (unsigned int) address) % sizeof(tme_uint64_t)));
ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_size = cycle_size;
/* form the physical address for the bus cycle handler: */
physical_address = ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_address;
physical_address += tlb->tme_sparc_tlb_addr_offset;
shift = tlb->tme_sparc_tlb_addr_shift;
if (shift < 0) {
physical_address <<= (0 - shift);
}
else if (shift > 0) {
physical_address >>= shift;
}
ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_address = physical_address;
/* finish the bus cycle structure: */
(*ic->_tme_sparc_ls_bus_cycle)(ic, ls);
tme_sparc_log(ic, 10000, TME_OK,
(TME_SPARC_LOG_HANDLE(ic),
_("cycle-store%u 0x%016" TME_PRIx64),
(unsigned int) (ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_size * 8),
(tme_bus_addr64_t) ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_address));
/* callout the bus cycle: */
tme_sparc_tlb_unbusy(tlb);
tme_sparc_callout_unlock(ic);
err = (*tlb->tme_sparc_tlb_bus_tlb.tme_bus_tlb_cycle)
(tlb->tme_sparc_tlb_bus_tlb.tme_bus_tlb_cycle_private,
&ls->tme_sparc_ls_bus_cycle);
tme_sparc_callout_relock(ic);
tme_sparc_tlb_busy(tlb);
/* the TLB entry can't have been invalidated before the store: */
assert (err != EBADF);
/* if the bus cycle didn't complete normally: */
if (err != TME_OK) {
/* if a real bus fault may have happened, instead of
some synchronous event: */
if (err != TME_BUS_CYCLE_SYNCHRONOUS_EVENT) {
/* call the bus fault handlers: */
err = tme_bus_tlb_fault(&tlb->tme_sparc_tlb_bus_tlb, &ls->tme_sparc_ls_bus_cycle, err);
}
/* if some synchronous event has happened: */
if (err == TME_BUS_CYCLE_SYNCHRONOUS_EVENT) {
/* after the currently executing instruction finishes, check
for external resets, halts, or interrupts: */
ic->_tme_sparc_instruction_burst_remaining = 0;
ic->_tme_sparc_instruction_burst_other = TRUE;
}
/* otherwise, if no real bus fault happened: */
else if (err == TME_OK) {
/* nothing to do */
}
/* otherwise, a real bus fault happened: */
else {
(*ic->_tme_sparc_ls_bus_fault)(ic, ls, err);
return;
}
}
}
/* some data must have been transferred: */
assert (ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_size > 0);
/* if this was an atomic operation: */
if (__tme_predict_false(ls->tme_sparc_ls_lsinfo & TME_SPARC_LSINFO_OP_ATOMIC)) {
/* we do not support atomic operations in TLB entries that
do not support both fast reads and fast writes. assuming
that all atomic operations are to regular memory, we
should always get fast read and fast write TLBs. when
we do not, it should only be because the memory has been
made read-only in the MMU. the write above was supposed
to cause a fault (with the instruction rerun later with
a fast read and fast write TLB entry), but instead it
succeeded and transferred some data. we have modified
memory and cannot recover: */
abort();
}
/* update: */
cycle_size = ls->tme_sparc_ls_bus_cycle.tme_bus_cycle_size;
ls->tme_sparc_ls_address64 += cycle_size;
ls->tme_sparc_ls_buffer_offset += cycle_size;
ls->tme_sparc_ls_size -= cycle_size;
}
/* this does a slow load or store: */
tme_shared tme_uint8_t *
tme_sparc64_ls(struct tme_sparc *ic,
tme_uint64_t const address_first,
tme_uint64_t *_rd,
tme_uint32_t lsinfo)
{
struct tme_sparc_ls ls;
tme_uint32_t size;
tme_uint32_t asi;
tme_uint32_t asi_mask_flags;
tme_uint32_t asi_mask;
tme_bus_context_t context;
tme_uint32_t tlb_hash;
unsigned long tlb_i;
unsigned long handler_i;
struct tme_sparc_tlb *tlb;
unsigned int cycle_type;
tme_uint64_t address;
void (*address_map) _TME_P((struct tme_sparc *, struct tme_sparc_ls *));
tme_bus_addr_t address_bus;
int rc;
const tme_shared tme_uint8_t *emulator_off;
unsigned int buffer_offset;
tme_uint64_t value;
tme_uint32_t value32;
/* we must not be replaying instructions: */
assert (tme_sparc_recode_verify_replay_last_pc(ic) == 0);
/* initialize the pointer to the rd register: */
ls.tme_sparc_ls_rd64 = _rd;
#ifndef NDEBUG
/* initialize the cycle function: */
ls.tme_sparc_ls_cycle = NULL;
/* initialize the TLB entry pointer: */
ls.tme_sparc_ls_tlb = NULL;
#endif /* NDEBUG */
/* initialize the faults: */
ls.tme_sparc_ls_faults = TME_SPARC_LS_FAULT_NONE;
/* initialize the address: */
ls.tme_sparc_ls_address64 = address_first;
/* initialize the size: */
size = TME_SPARC_LSINFO_WHICH_SIZE(lsinfo);
ls.tme_sparc_ls_size = size;
/* initialize the info: */
ls.tme_sparc_ls_lsinfo = lsinfo;
/* if the address is not aligned: */
if (__tme_predict_false(((size - 1) & (tme_uint32_t) address_first) != 0)) {
ls.tme_sparc_ls_faults |= TME_SPARC_LS_FAULT_ADDRESS_NOT_ALIGNED;
}
/* otherwise, the address is aligned: */
else {
/* the transfer must not cross a 32-bit boundary: */
assert ((size - 1) <= (tme_uint32_t) ~address_first);
}
/* initialize the address map: */
ls.tme_sparc_ls_address_map = ic->_tme_sparc_ls_address_map;
/* if this is a ldd, ldda, std, or stda, or an instruction
that loads or stores in the same way: */
if (lsinfo & TME_SPARC_LSINFO_LDD_STD) {
/* if the rd register is odd: */
/* NB: we don't check the rd field in the instruction,
because the register number there might be encoded
in some way, or the architecture might ignore bit
zero in the rd field (for example, the sparc32 lddf).
instead, we test the rd register pointer: */
if (__tme_predict_false((ls.tme_sparc_ls_rd64
- ic->tme_sparc_ic.tme_ic_iregs.tme_ic_iregs_uint64s)
% 2)) {
ls.tme_sparc_ls_faults |= TME_SPARC_LS_FAULT_LDD_STD_RD_ODD;
}
}
/* if the ASI has been specified: */
if (lsinfo & TME_SPARC_LSINFO_A) {
/* get the ASI: */
asi = TME_SPARC_LSINFO_WHICH_ASI(lsinfo);
/* get the flags for this ASI: */
asi_mask_flags = ic->tme_sparc_asis[asi].tme_sparc_asi_mask_flags;
/* if this is a nonprivileged access: */
if (!TME_SPARC_PRIV(ic)) {
/* if this is a restricted ASI: */
if (__tme_predict_false((asi & TME_SPARC64_ASI_FLAG_UNRESTRICTED) == 0)) {
ls.tme_sparc_ls_faults |= TME_SPARC64_LS_FAULT_PRIVILEGED_ASI;
}
/* force a nonprivileged access with the ASI: */
asi_mask_flags |= TME_SPARC64_ASI_MASK_FLAG_INSN_AS_IF_USER;
}
/* make the ASI mask: */
if (asi_mask_flags & TME_SPARC64_ASI_MASK_FLAG_SPECIAL) {
asi_mask
= (asi_mask_flags
+ TME_SPARC_ASI_MASK_SPECIAL(asi,
(asi_mask_flags & TME_SPARC64_ASI_MASK_FLAG_INSN_AS_IF_USER) == 0));
}
else {
asi_mask = TME_SPARC64_ASI_MASK(asi, asi_mask_flags);
}
ls.tme_sparc_ls_asi_mask = asi_mask;
/* if this is a no-fault ASI with a non-load instruction: */
if (asi_mask & TME_SPARC64_ASI_FLAG_NO_FAULT) {
if (__tme_predict_false(lsinfo & (TME_SPARC_LSINFO_OP_ST | TME_SPARC_LSINFO_OP_ATOMIC))) {
ls.tme_sparc_ls_faults |= TME_SPARC64_LS_FAULT_NO_FAULT_NON_LOAD;
}
}
/* get the context for the alternate address space: */
context = ic->tme_sparc_memory_context_primary;
if (asi_mask & TME_SPARC64_ASI_FLAG_SECONDARY) {
context = ic->tme_sparc_memory_context_secondary;
}
if (__tme_predict_false(asi_mask & TME_SPARC64_ASI_MASK_FLAG_INSN_NUCLEUS)) {
if (TME_SPARC_MEMORY_FLAGS(ic) & TME_SPARC_MEMORY_FLAG_HAS_NUCLEUS) {
context = 0;
}
}
ls.tme_sparc_ls_context = context;
/* get the default TLB entry index: */
tlb_hash = TME_SPARC_TLB_HASH(ic, context, address_first);
if (lsinfo & TME_SPARC_LSINFO_OP_FETCH) {
tlb_i = TME_SPARC_ITLB_ENTRY(ic, tlb_hash);
}
else {
tlb_i = TME_SPARC_DTLB_ENTRY(ic, tlb_hash);
}
ls.tme_sparc_ls_tlb_i = tlb_i;
/* call any special handler for this ASI: */
handler_i = ic->tme_sparc_asis[TME_SPARC_ASI_MASK_WHICH(asi_mask)].tme_sparc_asi_handler;
if (__tme_predict_false(handler_i != 0)) {
(*ic->_tme_sparc_ls_asi_handlers[handler_i])(ic, &ls);
}
/* get the final TLB entry index: */
tlb_i = ls.tme_sparc_ls_tlb_i;
}
/* otherwise, the ASI has not been specified: */
else {
/* get the ASI mask: */
asi_mask = ic->tme_sparc_asi_mask_data;
/* add in any ASI mask flags from the instruction: */
/* NB: initially, TME_SPARC64_ASI_FLAG_NO_FAULT is the
only flag allowed, and only the flush instruction
can use it: */
assert (TME_SPARC_LSINFO_WHICH_ASI_FLAGS(lsinfo) == 0
|| (TME_SPARC_LSINFO_WHICH_ASI_FLAGS(lsinfo) == TME_SPARC64_ASI_FLAG_NO_FAULT
&& ((ic->_tme_sparc_insn >> 19) & 0x3f) == 0x3b)
);
asi_mask |= TME_SPARC_LSINFO_WHICH_ASI_FLAGS(lsinfo);
/* set the ASI mask: */
ls.tme_sparc_ls_asi_mask = asi_mask;
/* get the context: */
context = ic->tme_sparc_memory_context_default;
ls.tme_sparc_ls_context = context;
/* this must not be a fetch: */
assert ((lsinfo & TME_SPARC_LSINFO_OP_FETCH) == 0);
/* get the TLB entry index: */
tlb_hash = TME_SPARC_TLB_HASH(ic, context, address_first);
tlb_i = TME_SPARC_DTLB_ENTRY(ic, tlb_hash);
ls.tme_sparc_ls_tlb_i = tlb_i;
}
/* get the TLB entry pointer: */
tlb = &ic->tme_sparc_tlbs[tlb_i];
ls.tme_sparc_ls_tlb = tlb;
/* get the cycle type: */
/* NB: we deliberately set this once, now, since the lsinfo
may change once we start transferring: */
cycle_type
= ((lsinfo
& (TME_SPARC_LSINFO_OP_ST
| TME_SPARC_LSINFO_OP_ATOMIC))
? TME_BUS_CYCLE_WRITE
: TME_BUS_CYCLE_READ);
/* loop until the transfer is complete: */
for (;;) {
/* if we have faulted: */
if (__tme_predict_false(ls.tme_sparc_ls_faults != TME_SPARC_LS_FAULT_NONE)) {
/* unbusy this TLB, since the trap function may not return: */
tme_bus_tlb_unbusy(&tlb->tme_sparc_tlb_bus_tlb);
/* call the trap function, which will not return if it traps: */
(*ic->_tme_sparc_ls_trap)(ic, &ls);
/* rebusy this TLB: */
tme_bus_tlb_busy(&tlb->tme_sparc_tlb_bus_tlb);
/* since the trap function returned, it must have cleared the fault: */
assert (ls.tme_sparc_ls_faults == TME_SPARC_LS_FAULT_NONE);
}
/* if the transfer is complete, stop now: */
if (__tme_predict_false(ls.tme_sparc_ls_size == 0)) {
break;
}
/* get the current address: */
address = ls.tme_sparc_ls_address64;
/* if this TLB entry does not apply or is invalid: */
if ((tlb->tme_sparc_tlb_context != ls.tme_sparc_ls_context
&& tlb->tme_sparc_tlb_context <= ic->tme_sparc_memory_context_max)
|| address < (tme_bus_addr64_t) tlb->tme_sparc_tlb_addr_first
|| address > (tme_bus_addr64_t) tlb->tme_sparc_tlb_addr_last
|| !TME_SPARC_TLB_ASI_MASK_OK(tlb, ls.tme_sparc_ls_asi_mask)
|| ((tlb->tme_sparc_tlb_cycles_ok & cycle_type) == 0
&& (cycle_type == TME_BUS_CYCLE_READ
? tlb->tme_sparc_tlb_emulator_off_read
: tlb->tme_sparc_tlb_emulator_off_write) == TME_EMULATOR_OFF_UNDEF)
|| tme_bus_tlb_is_invalid(&tlb->tme_sparc_tlb_bus_tlb)) {
/* unbusy this TLB entry for filling: */
tme_bus_tlb_unbusy_fill(&tlb->tme_sparc_tlb_bus_tlb);
/* if we haven't mapped this address yet: */
address_map = ls.tme_sparc_ls_address_map;
if (address_map != NULL) {
ls.tme_sparc_ls_address_map = NULL;
/* count this mapping: */
if (ls.tme_sparc_ls_lsinfo & TME_SPARC_LSINFO_OP_FETCH) {
TME_SPARC_STAT(ic, tme_sparc_stats_itlb_map);
}
else {
TME_SPARC_STAT(ic, tme_sparc_stats_dtlb_map);
}
/* initialize the ASI mask and context on this TLB entry: */
/* NB that the ASI mask will likely be updated by either the
address mapping or the TLB fill: */
tlb->tme_sparc_tlb_asi_mask
= (ls.tme_sparc_ls_asi_mask
& ~TME_SPARC_ASI_MASK_FLAGS_AVAIL);
tlb->tme_sparc_tlb_context = ls.tme_sparc_ls_context;
/* NB: if the address mapping traps, we won't get a chance
to finish updating this TLB entry, which is currently in
an inconsistent state - but not necessarily an unusable
state. poison it to be unusable, including any recode
TLB entry: */
tlb->tme_sparc_tlb_addr_first = 1;
tlb->tme_sparc_tlb_addr_last = 0;
#if TME_SPARC_HAVE_RECODE(ic)
if (ls.tme_sparc_ls_lsinfo & TME_SPARC_LSINFO_OP_FETCH) {
tme_sparc64_recode_chain_tlb_update(ic, &ls);
}
else {
tme_sparc64_recode_ls_tlb_update(ic, &ls);
}
#endif /* TME_SPARC_HAVE_RECODE(ic) */
#ifndef NDEBUG
/* initialize the mapping TLB entry: */
ls.tme_sparc_ls_tlb_map.tme_bus_tlb_addr_first = 0 - (tme_bus_addr_t) 1;
ls.tme_sparc_ls_tlb_map.tme_bus_tlb_addr_last = 0 - (tme_bus_addr_t) 2;
ls.tme_sparc_ls_tlb_map.tme_bus_tlb_cycles_ok = 0;
ls.tme_sparc_ls_tlb_map.tme_bus_tlb_addr_offset = 0 - (tme_bus_addr_t) 1;
#endif /* !NDEBUG */
/* map the address: */
(*address_map)(ic, &ls);
/* the address mapping must do any trapping itself: */
assert (ls.tme_sparc_ls_faults == TME_SPARC_LS_FAULT_NONE);
/* if the address mapping completed the transfer: */
if (__tme_predict_false(ls.tme_sparc_ls_size == 0)) {
/* rebusy the TLB entry: */
tme_sparc_tlb_busy(tlb);
/* stop now: */
break;
}
/* the mapping must have actually made a mapping: */
assert (ls.tme_sparc_ls_tlb_map.tme_bus_tlb_addr_first != 0 - (tme_bus_addr_t) 1);
assert (ls.tme_sparc_ls_tlb_map.tme_bus_tlb_addr_last != 0 - (tme_bus_addr_t) 2);
assert (ls.tme_sparc_ls_tlb_map.tme_bus_tlb_cycles_ok != 0);
assert (ls.tme_sparc_ls_tlb_map.tme_bus_tlb_addr_offset != 0 - (tme_bus_addr_t) 1);
}
/* count this fill: */
if (ls.tme_sparc_ls_lsinfo & TME_SPARC_LSINFO_OP_FETCH) {
TME_SPARC_STAT(ic, tme_sparc_stats_itlb_fill);
}
else {
TME_SPARC_STAT(ic, tme_sparc_stats_dtlb_fill);
}
/* get the bus address: */
address_bus = ls.tme_sparc_ls_address64 + ls.tme_sparc_ls_tlb_map.tme_bus_tlb_addr_offset;
/* fill the TLB entry: */
tme_sparc_callout_unlock(ic);
rc = (*ic->_tme_sparc_bus_connection->tme_sparc_bus_tlb_fill)
(ic->_tme_sparc_bus_connection,
tlb,
ls.tme_sparc_ls_asi_mask,
address_bus,
cycle_type);
assert (rc == TME_OK);
tme_sparc_callout_relock(ic);
/* map the TLB entry: */
tme_bus_tlb_map(&tlb->tme_sparc_tlb_bus_tlb, address_bus,
&ls.tme_sparc_ls_tlb_map, ls.tme_sparc_ls_address64);
/* update any recode TLB entry: */
#if TME_SPARC_HAVE_RECODE(ic)
if (ls.tme_sparc_ls_lsinfo & TME_SPARC_LSINFO_OP_FETCH) {
tme_sparc64_recode_chain_tlb_update(ic, &ls);
}
else {
tme_sparc64_recode_ls_tlb_update(ic, &ls);
}
#endif /* TME_SPARC_HAVE_RECODE(ic) */
/* rebusy the TLB entry: */
tme_sparc_tlb_busy(tlb);
/* if this TLB entry is already invalid: */
if (tme_bus_tlb_is_invalid(&tlb->tme_sparc_tlb_bus_tlb)) {
continue;
}
}
/* this TLB entry must apply: */
assert ((tlb->tme_sparc_tlb_context == ls.tme_sparc_ls_context
|| tlb->tme_sparc_tlb_context > ic->tme_sparc_memory_context_max)
&& ls.tme_sparc_ls_address64 >= (tme_bus_addr64_t) tlb->tme_sparc_tlb_addr_first
&& ls.tme_sparc_ls_address64 <= (tme_bus_addr64_t) tlb->tme_sparc_tlb_addr_last
&& ((tlb->tme_sparc_tlb_cycles_ok & cycle_type)
|| (cycle_type == TME_BUS_CYCLE_READ
? tlb->tme_sparc_tlb_emulator_off_read
: tlb->tme_sparc_tlb_emulator_off_write) != TME_EMULATOR_OFF_UNDEF)
&& TME_SPARC_TLB_ASI_MASK_OK(tlb, ls.tme_sparc_ls_asi_mask));
/* get the current lsinfo: */
lsinfo = ls.tme_sparc_ls_lsinfo;
/* if we have to check the TLB: */
if (__tme_predict_true((lsinfo & TME_SPARC_LSINFO_NO_CHECK_TLB) == 0)) {
/* get the ASI mask for this TLB entry: */
asi_mask = tlb->tme_sparc_tlb_asi_mask;
/* if this TLB entry is for no-fault accesses only: */
if (__tme_predict_false(asi_mask & TME_SPARC64_ASI_FLAG_NO_FAULT)) {
/* if this access is not using a no-fault ASI: */
if (__tme_predict_false((ls.tme_sparc_ls_asi_mask & TME_SPARC64_ASI_FLAG_NO_FAULT) == 0)) {
ls.tme_sparc_ls_faults |= TME_SPARC64_LS_FAULT_NO_FAULT_FAULT;
continue;
}
}
/* if this TLB entry is for addresses with side effects: */
if (asi_mask & TME_SPARC64_ASI_MASK_FLAG_TLB_SIDE_EFFECTS) {
/* if this access is using a no-fault ASI: */
/* NB: a flush may be implemented as a load with a no-fault ASI: */
if (__tme_predict_false(ls.tme_sparc_ls_asi_mask & TME_SPARC64_ASI_FLAG_NO_FAULT)) {
ls.tme_sparc_ls_faults |= TME_SPARC64_LS_FAULT_SIDE_EFFECTS;
continue;
}
}
/* if this TLB entry is for uncacheable addresses: */
if (asi_mask & TME_SPARC64_ASI_MASK_FLAG_TLB_UNCACHEABLE) {
/* if this is an atomic access: */
if (__tme_predict_false(lsinfo & TME_SPARC_LSINFO_OP_ATOMIC)) {
ls.tme_sparc_ls_faults |= TME_SPARC64_LS_FAULT_UNCACHEABLE;
continue;
}
}
/* see if this is a little-endian instruction: */
lsinfo
= ((lsinfo
& ~TME_SPARC_LSINFO_ENDIAN_LITTLE)
+ ((ls.tme_sparc_ls_asi_mask
& TME_SPARC64_ASI_FLAG_LITTLE)
#if TME_SPARC_LSINFO_ENDIAN_LITTLE < TME_SPARC64_ASI_FLAG_LITTLE
#error "TME_SPARC_LSINFO_ENDIAN_ values changed"
#endif
* (TME_SPARC_LSINFO_ENDIAN_LITTLE
/ TME_SPARC64_ASI_FLAG_LITTLE)));
/* if this TLB entry has its little-endian bit set: */
if (__tme_predict_false(asi_mask & TME_SPARC64_ASI_FLAG_LITTLE)) {
assert (TME_SPARC_MEMORY_FLAGS(ic) & TME_SPARC_MEMORY_FLAG_HAS_INVERT_ENDIAN);
if (TRUE) {
lsinfo ^= TME_SPARC_LSINFO_ENDIAN_LITTLE;
}
}
}
/* if we might not have to call a slow cycle function: */
if (__tme_predict_true((lsinfo & TME_SPARC_LSINFO_SLOW_CYCLES) == 0)) {
/* if this TLB entry allows fast transfer of all of the addresses: */
if (__tme_predict_true(((tme_bus_addr64_t) tlb->tme_sparc_tlb_addr_last) >= (address_first + (ls.tme_sparc_ls_size - 1)))) {
emulator_off = tlb->tme_sparc_tlb_emulator_off_read;
if (lsinfo & TME_SPARC_LSINFO_OP_ST) {
emulator_off = tlb->tme_sparc_tlb_emulator_off_write;
}
if (__tme_predict_true(emulator_off != TME_EMULATOR_OFF_UNDEF
&& (((lsinfo & TME_SPARC_LSINFO_OP_ATOMIC) == 0)
|| emulator_off == tlb->tme_sparc_tlb_emulator_off_write))) {
/* return and let our caller do the transfer: */
/* NB: we break const here: */
return ((tme_shared tme_uint8_t *) emulator_off);
}
}
/* we have to call a slow cycle function: */
lsinfo |= TME_SPARC_LSINFO_SLOW_CYCLES;
assert (ls.tme_sparc_ls_cycle == NULL);
/* assume that this operation will transfer the start of the buffer: */
buffer_offset = 0;
/* assume that this is a load or a fetch: */
ls.tme_sparc_ls_cycle = tme_sparc64_load;
/* if this is a store: */
if (lsinfo & TME_SPARC_LSINFO_OP_ST) {
/* put the (first) register to store in the memory buffer: */
value = TME_SPARC_FORMAT3_RD;
value = ((lsinfo & TME_SPARC_LSINFO_ENDIAN_LITTLE) ? tme_htole_u64(value) : tme_htobe_u64(value));
ic->tme_sparc_memory_buffer.tme_sparc_memory_buffer64s[0] = value;
/* find the offset in the memory buffer corresponding to the
first address: */
buffer_offset = sizeof(tme_uint64_t) - ls.tme_sparc_ls_size;
if ((lsinfo & TME_SPARC_LSINFO_ENDIAN_LITTLE)) {
buffer_offset = 0;
}
/* if this is a std or stda: */
if (lsinfo & TME_SPARC_LSINFO_LDD_STD) {
/* put the odd 32-bit register to store in the memory buffer
after the even 32-bit register. exactly where this is depends
on the architecture and on the byte order of the store: */
value32 = TME_SPARC_FORMAT3_RD_ODD(tme_ic_ireg_uint64);
if ((lsinfo & TME_SPARC_LSINFO_ENDIAN_LITTLE)) {
value32 = tme_htole_u32(value32);
ic->tme_sparc_memory_buffer.tme_sparc_memory_buffer32s[1] = value32;
buffer_offset = 0;
}
else {
value32 = tme_htobe_u32(value32);
ic->tme_sparc_memory_buffer.tme_sparc_memory_buffer32s[(64 / 32)] = value32;
buffer_offset = sizeof(tme_uint64_t) - sizeof(tme_uint32_t);
}
}
/* set the cycle function: */
ls.tme_sparc_ls_cycle = tme_sparc64_store;
}
/* otherwise, if this is an atomic: */
else if (lsinfo & TME_SPARC_LSINFO_OP_ATOMIC) {
/* set the cycle function: */
ls.tme_sparc_ls_cycle = tme_sparc64_atomic;
}
/* set the buffer offset for the (first) slow cycle: */
ls.tme_sparc_ls_buffer_offset = buffer_offset;
/* clear the state for this operation: */
ls.tme_sparc_ls_state = 0;
}
/* assume that we won't have to check the TLB again: */
ls.tme_sparc_ls_lsinfo = lsinfo | TME_SPARC_LSINFO_NO_CHECK_TLB;
/* call the slow cycle function: */
(*ls.tme_sparc_ls_cycle)(ic, &ls);
}
/* if this was a load that has already completed, a store,
or an atomic, make sure our caller doesn't try to complete
a fast transfer: */
if (ls.tme_sparc_ls_lsinfo
& (TME_SPARC_LSINFO_LD_COMPLETED
| TME_SPARC_LSINFO_OP_ST
| TME_SPARC_LSINFO_OP_ATOMIC)) {
return (TME_EMULATOR_OFF_UNDEF);
}
/* otherwise, this was a load that did slow cycles into the
memory buffer and hasn't updated rd yet. return a pointer
to the memory buffer so our caller can complete the load: */
return (ic->tme_sparc_memory_buffer.tme_sparc_memory_buffer8s
- address_first);
}
#undef TME_SPARC_VERSION
#define TME_SPARC_VERSION(ic) _TME_SPARC_VERSION(ic)
#endif /* TME_HAVE_INT64_T */
/* automatically generated by sparc-misc-auto.sh, do not edit! */
/* the icc->conditions mapping: */
const tme_uint8_t _tme_sparc_conds_icc[16] = {
0,
0 | TME_BIT(4) | TME_BIT(5),
0 | TME_BIT(2) | TME_BIT(3) | TME_BIT(7),
0 | TME_BIT(2) | TME_BIT(3) | TME_BIT(4) | TME_BIT(5) | TME_BIT(7),
0 | TME_BIT(1) | TME_BIT(2) | TME_BIT(4),
0 | TME_BIT(1) | TME_BIT(2) | TME_BIT(4) | TME_BIT(5),
0 | TME_BIT(1) | TME_BIT(2) | TME_BIT(3) | TME_BIT(4) | TME_BIT(7),
0 | TME_BIT(1) | TME_BIT(2) | TME_BIT(3) | TME_BIT(4) | TME_BIT(5) | TME_BIT(7),
0 | TME_BIT(2) | TME_BIT(3) | TME_BIT(6),
0 | TME_BIT(2) | TME_BIT(3) | TME_BIT(4) | TME_BIT(5) | TME_BIT(6),
0 | TME_BIT(6) | TME_BIT(7),
0 | TME_BIT(4) | TME_BIT(5) | TME_BIT(6) | TME_BIT(7),
0 | TME_BIT(1) | TME_BIT(2) | TME_BIT(3) | TME_BIT(4) | TME_BIT(6),
0 | TME_BIT(1) | TME_BIT(2) | TME_BIT(3) | TME_BIT(4) | TME_BIT(5) | TME_BIT(6),
0 | TME_BIT(1) | TME_BIT(2) | TME_BIT(4) | TME_BIT(6) | TME_BIT(7),
0 | TME_BIT(1) | TME_BIT(2) | TME_BIT(4) | TME_BIT(5) | TME_BIT(6) | TME_BIT(7),
};