Merge remote-tracking branch 'origin/GT-3525_SmallLocalsAnalysis'

This commit is contained in:
ghidorahrex 2020-03-02 11:20:43 -05:00
commit 5277e7acf7
29 changed files with 1664 additions and 728 deletions

View File

@ -179,6 +179,14 @@ bool Address::isContiguous(int4 sz,const Address &loaddr,int4 losz) const
return false;
}
/// If \b this is (originally) a \e join address, reevaluate it in terms of its new
/// \e offset and \e siz, changing the space and offset if necessary.
/// \param size is the new size in bytes of the underlying object
void Address::renormalize(int4 size) {
if (base->getType() == IPTR_JOIN)
base->getManager()->renormalizeJoinAddress(*this,size);
}
/// This is usually used to build an address from an \b \<addr\>
/// tag, but it can be used to create an address from any tag
/// with the appropriate attributes

View File

@ -80,6 +80,7 @@ public:
int4 overlap(int4 skip,const Address &op,int4 size) const; ///< Determine how two address ranges overlap
bool isContiguous(int4 sz,const Address &loaddr,int4 losz) const; ///< Does \e this form a contigous range with \e loaddr
bool isConstant(void) const; ///< Is this a \e constant \e value
void renormalize(int4 size); ///< Make sure there is a backing JoinRecord if \b this is in the \e join space
bool isJoin(void) const; ///< Is this a \e join \e value
void saveXml(ostream &s) const; ///< Save this to a stream as an XML tag
void saveXml(ostream &s,int4 size) const; ///< Save this and a size to a stream as an XML tag
@ -483,6 +484,24 @@ inline uintb pcode_left(uintb val,int4 sa) {
return val << sa;
}
/// \brief Calculate smallest mask that covers the given value
///
/// Calculcate a mask that covers either the least significant byte, uint2, uint4, or uint8,
/// whatever is smallest.
/// \param val is the given value
/// \return the minimal mask
inline uintb minimalmask(uintb val)
{
if (val > 0xffffffff)
return ~((uintb)0);
if (val > 0xffff)
return 0xffffffff;
if (val > 0xff)
return 0xffff;
return 0xff;
}
extern bool signbit_negative(uintb val,int4 size); ///< Return true if the sign-bit is set
extern uintb calc_mask(int4 size); ///< Calculate a mask for a given byte size
extern uintb uintb_negate(uintb in,int4 size); ///< Negate the \e sized value

View File

@ -25,8 +25,8 @@
vector<ArchitectureCapability *> ArchitectureCapability::thelist;
const uint4 ArchitectureCapability::majorversion = 3;
const uint4 ArchitectureCapability::minorversion = 5;
const uint4 ArchitectureCapability::majorversion = 4;
const uint4 ArchitectureCapability::minorversion = 0;
/// This builds a list of just the ArchitectureCapability extensions
void ArchitectureCapability::initialize(void)

View File

@ -166,6 +166,57 @@ int4 CastStrategyC::intPromotionType(const Varnode *vn) const
return UNKNOWN_PROMOTION;
}
bool CastStrategyC::isExtensionCastImplied(const PcodeOp *op,const PcodeOp *readOp) const
{
const Varnode *outVn = op->getOut();
if (outVn->isExplicit()) {
}
else {
if (readOp == (PcodeOp *) 0)
return false;
type_metatype metatype = outVn->getHigh()->getType()->getMetatype();
const Varnode *otherVn;
int4 slot;
switch (readOp->code()) {
case CPUI_PTRADD:
break;
case CPUI_INT_ADD:
case CPUI_INT_SUB:
case CPUI_INT_MULT:
case CPUI_INT_DIV:
case CPUI_INT_AND:
case CPUI_INT_OR:
case CPUI_INT_XOR:
case CPUI_INT_EQUAL:
case CPUI_INT_NOTEQUAL:
case CPUI_INT_LESS:
case CPUI_INT_LESSEQUAL:
case CPUI_INT_SLESS:
case CPUI_INT_SLESSEQUAL:
slot = readOp->getSlot(outVn);
otherVn = readOp->getIn(1 - slot);
// Check if the expression involves an explicit variable of the right integer type
if (otherVn->isConstant()) {
// Integer tokens do not naturally indicate their size, and
// integers that are bigger than the promotion size are NOT naturally extended.
if (otherVn->getSize() > promoteSize) // So if the integer is bigger than the promotion size
return false; // The extension cast on the other side must be explicit
}
else if (!otherVn->isExplicit())
return false;
if (otherVn->getHigh()->getType()->getMetatype() != metatype)
return false;
break;
default:
return false;
}
return true; // Everything is integer promotion
}
return false;
}
Datatype *CastStrategyC::castStandard(Datatype *reqtype,Datatype *curtype,
bool care_uint_int,bool care_ptr_uint) const

View File

@ -88,6 +88,16 @@ public:
/// \return \b true if a cast is required before extending
virtual bool checkIntPromotionForExtension(const PcodeOp *op) const=0;
/// \brief Is the given ZEXT/SEXT cast implied by the expression its in?
///
/// We've already determined that the given ZEXT or SEXT op can be viewed as a natural \e cast operation.
/// Determine if the cast is implied by the expression its and doesn't need to be printed.
/// \param op is the given ZEXT or SEXT PcodeOp
/// \param readOp is the PcodeOp consuming the output of the extensions (or null)
/// \return \b true if the op as a cast does not need to be printed
virtual bool isExtensionCastImplied(const PcodeOp *op,const PcodeOp *readOp) const=0;
/// \brief Does there need to be a visible cast between the given data-types
///
/// The cast is from a \e current data-type to an \e expected data-type. NULL is returned
@ -151,6 +161,7 @@ public:
virtual int4 intPromotionType(const Varnode *vn) const;
virtual bool checkIntPromotionForCompare(const PcodeOp *op,int4 slot) const;
virtual bool checkIntPromotionForExtension(const PcodeOp *op) const;
virtual bool isExtensionCastImplied(const PcodeOp *op,const PcodeOp *readOp) const;
virtual Datatype *castStandard(Datatype *reqtype,Datatype *curtype,bool care_uint_int,bool care_ptr_uint) const;
virtual Datatype *arithmeticOutputStandard(const PcodeOp *op);
virtual bool isSubpieceCast(Datatype *outtype,Datatype *intype,uint4 offset) const;

View File

@ -3307,6 +3307,9 @@ void ActionDeadCode::propagateConsumed(vector<Varnode *> &worklist)
b = (a == 0) ? 0 : ~((uintb)0); // if any consumed, treat all input bits as consumed
pushConsumed(b,op->getIn(0), worklist);
break;
case CPUI_CALL:
case CPUI_CALLIND:
break; // Call output doesn't indicate consumption of inputs
default:
a = (outc==0) ? 0 : ~((uintb)0); // all or nothing
for(int4 i=0;i<op->numInput();++i)
@ -3348,6 +3351,59 @@ bool ActionDeadCode::neverConsumed(Varnode *vn,Funcdata &data)
return true;
}
/// \brief Determine how the given sub-function parameters are consumed
///
/// Set the consume property for each input Varnode of a CPUI_CALL or CPUI_CALLIND.
/// If the prototype is locked, assume parameters are entirely consumed.
/// \param fc is the call specification for the given sub-function
/// \param worklist will hold input Varnodes that can propagate their consume property
void ActionDeadCode::markConsumedParameters(FuncCallSpecs *fc,vector<Varnode *> &worklist)
{
PcodeOp *callOp = fc->getOp();
pushConsumed(~((uintb)0),callOp->getIn(0),worklist); // In all cases the first operand is fully consumed
if (fc->isInputLocked() || fc->isInputActive()) { // If the prototype is locked in, or in active recovery
for(int4 i=1;i<callOp->numInput();++i)
pushConsumed(~((uintb)0),callOp->getIn(i),worklist); // Treat all parameters as fully consumed
return;
}
for(int4 i=1;i<callOp->numInput();++i) {
Varnode *vn = callOp->getIn(i);
uintb consumeVal;
if (vn->isAutoLive())
consumeVal = ~((uintb)0);
else
consumeVal = minimalmask(vn->getNZMask());
pushConsumed(consumeVal,vn,worklist);
}
}
/// \brief Determine how the \e return \e values for the given function are consumed
///
/// Examine each CPUI_RETURN to see how the Varnode input is consumed.
/// If the function's prototype is locked, assume the Varnode is entirely consumed.
/// If there are no CPUI_RETURN ops, return 0
/// \param data is the given function
/// \return the bit mask of what is consumed
uintb ActionDeadCode::gatherConsumedReturn(Funcdata &data)
{
if (data.getFuncProto().isOutputLocked() || data.getActiveOutput() != (ParamActive *)0)
return ~((uintb)0);
list<PcodeOp *>::const_iterator iter,enditer;
enditer = data.endOp(CPUI_RETURN);
uintb consumeVal = 0;
for(iter=data.beginOp(CPUI_RETURN);iter!=enditer;++iter) {
PcodeOp *returnOp = *iter;
if (returnOp->isDead()) continue;
if (returnOp->numInput() > 1) {
Varnode *vn = returnOp->getIn(1);
consumeVal |= minimalmask(vn->getNZMask());
}
}
return consumeVal;
}
int4 ActionDeadCode::apply(Funcdata &data)
{
@ -3355,6 +3411,7 @@ int4 ActionDeadCode::apply(Funcdata &data)
list<PcodeOp *>::const_iterator iter;
PcodeOp *op;
Varnode *vn;
uintb returnConsume;
vector<Varnode *> worklist;
VarnodeLocSet::const_iterator viter,endviter;
const AddrSpaceManager *manage = data.getArch();
@ -3383,13 +3440,42 @@ int4 ActionDeadCode::apply(Funcdata &data)
}
}
returnConsume = gatherConsumedReturn(data);
for(iter=data.beginOpAlive();iter!=data.endOpAlive();++iter) {
op = *iter;
op->clearIndirectSource();
if (op->isCall() || (!op->isAssignment())) {
for(i=0;i<op->numInput();++i)
pushConsumed(~((uintb)0),op->getIn(i),worklist);
if (op->isCall()) {
if (op->code() == CPUI_CALLOTHER) {
for(i=0;i<op->numInput();++i)
pushConsumed(~((uintb)0),op->getIn(i),worklist);
}
// Postpone setting consumption on CALL and CALLIND inputs
if (!op->isAssignment())
continue;
}
else if (!op->isAssignment()) {
OpCode opc = op->code();
if (opc == CPUI_RETURN) {
pushConsumed(~((uintb)0),op->getIn(0),worklist);
for(i=1;i<op->numInput();++i)
pushConsumed(returnConsume,op->getIn(i),worklist);
}
else if (opc == CPUI_BRANCHIND) {
JumpTable *jt = data.findJumpTable(op);
uintb mask;
if (jt != (JumpTable *)0)
mask = jt->getSwitchVarConsume();
else
mask = ~((uintb)0);
pushConsumed(mask,op->getIn(0),worklist);
}
else {
for(i=0;i<op->numInput();++i)
pushConsumed(~((uintb)0),op->getIn(i),worklist);
}
// Postpone setting consumption on RETURN input
continue;
}
else {
for(i=0;i<op->numInput();++i) {
@ -3399,21 +3485,17 @@ int4 ActionDeadCode::apply(Funcdata &data)
}
}
vn = op->getOut();
if ((vn!=(Varnode *)0)&&(vn->isAutoLive()))
if (vn->isAutoLive())
pushConsumed(~((uintb)0),vn,worklist);
}
// Mark consumption of call parameters
for(i=0;i<data.numCalls();++i)
markConsumedParameters(data.getCallSpecs(i),worklist);
// Propagate the consume flags
while(!worklist.empty())
propagateConsumed(worklist);
// while(!worklist.empty()) {
// vn = worklist.back();
// worklist.pop_back();
// op = vn->Def();
// for(i=0;i<op->numInput();++i) {
// vn = op->Input(i);
// push_consumed(0x3fffffff,vn,worklist);
// }
// }
for(i=0;i<manage->numSpaces();++i) {
spc = manage->getSpace(i);
@ -3768,7 +3850,7 @@ int4 ActionOutputPrototype::apply(Funcdata &data)
{
ProtoParameter *outparam = data.getFuncProto().getOutput();
if ((!outparam->isTypeLocked())||outparam->isSizeTypeLocked()) {
PcodeOp *op = data.canonicalReturnOp();
PcodeOp *op = data.getFirstReturnOp();
vector<Varnode *> vnlist;
if (op != (PcodeOp *)0) {
for(int4 i=1;i<op->numInput();++i)
@ -4113,14 +4195,6 @@ bool ActionInferTypes::propagateGoodEdge(PcodeOp *op,int4 inslot,int4 outslot,Va
break;
case CPUI_COPY:
if ((inslot!=-1)&&(outslot!=-1)) return false; // Must propagate input <-> output
if (metain == TYPE_BOOL) {
if (inslot != -1) return false;
Varnode *othervn = op->getIn(outslot);
if (!othervn->isConstant()) return false;
uintb val = othervn->getOffset();
if (val > 1)
return false;
}
break;
case CPUI_MULTIEQUAL:
if ((inslot!=-1)&&(outslot!=-1)) return false; // Must propagate input <-> output
@ -4128,24 +4202,15 @@ bool ActionInferTypes::propagateGoodEdge(PcodeOp *op,int4 inslot,int4 outslot,Va
case CPUI_INT_LESS:
case CPUI_INT_LESSEQUAL:
if ((inslot==-1)||(outslot==-1)) return false; // Must propagate input <-> input
if (metain == TYPE_BOOL) return false;
break;
case CPUI_INT_EQUAL:
case CPUI_INT_NOTEQUAL:
if ((inslot==-1)||(outslot==-1)) return false; // Must propagate input <-> input
if (metain == TYPE_BOOL) { // Only propagate bool to 0 or 1 const
Varnode *othervn = op->getIn(outslot);
if (!othervn->isConstant()) return false;
uintb val = othervn->getOffset();
if (val > 1)
return false;
}
break;
case CPUI_LOAD:
case CPUI_STORE:
if ((inslot==0)||(outslot==0)) return false; // Don't propagate along this edge
if (invn->isSpacebase()) return false;
if (metain == TYPE_BOOL) return false;
break;
case CPUI_PTRADD:
if ((inslot==2)||(outslot==2)) return false; // Don't propagate along this edge
@ -4203,10 +4268,14 @@ bool ActionInferTypes::propagateTypeEdge(TypeFactory *typegrp,PcodeOp *op,int4 i
if (outvn->isAnnotation()) return false;
if (outvn->isTypeLock()) return false; // Can't propagate through typelock
invn = (inslot==-1) ? op->getOut() : op->getIn(inslot);
Datatype *alttype = invn->getTempType();
if (!propagateGoodEdge(op,inslot,outslot,invn))
return false;
Datatype *alttype = invn->getTempType();
if (alttype->getMetatype() == TYPE_BOOL) { // Only propagate boolean
if (outvn->getNZMask() > 1) // If we know output can only take boolean values
return false;
}
switch(op->code()) {
case CPUI_INDIRECT:
case CPUI_COPY:
@ -4334,7 +4403,7 @@ void PropagationState::step(void)
inslot = op->getSlot(vn);
return;
}
if (op == vn->getDef())
if (inslot == -1)
op = (PcodeOp *)0;
else
op = vn->getDef();
@ -4489,6 +4558,72 @@ void ActionInferTypes::propagateSpacebaseRef(Funcdata &data,Varnode *spcvn)
}
}
/// Return the CPUI_RETURN op with the most specialized data-type, which is not
/// dead and is not a special halt.
/// \param data is the function
/// \return the representative CPUI_RETURN op or NULL
PcodeOp *ActionInferTypes::canonicalReturnOp(Funcdata &data)
{
PcodeOp *res = (PcodeOp *)0;
Datatype *bestdt = (Datatype *)0;
list<PcodeOp *>::const_iterator iter,iterend;
iterend = data.endOp(CPUI_RETURN);
for(iter=data.beginOp(CPUI_RETURN);iter!=iterend;++iter) {
PcodeOp *retop = *iter;
if (retop->isDead()) continue;
if (retop->getHaltType()!=0) continue;
if (retop->numInput() > 1) {
Varnode *vn = retop->getIn(1);
Datatype *ct = vn->getTempType();
if (bestdt == (Datatype *)0) {
res = retop;
bestdt = ct;
}
else if (ct->typeOrder(*bestdt) < 0) {
res = retop;
bestdt = ct;
}
}
}
return res;
}
/// \brief Give data-types a chance to propagate between CPUI_RETURN operations.
///
/// Since a function is intended to return a single data-type, data-types effectively
/// propagate between the input Varnodes to CPUI_RETURN ops, if there are more than one.
void ActionInferTypes::propagateAcrossReturns(Funcdata &data)
{
PcodeOp *op = canonicalReturnOp(data);
if (op == (PcodeOp *)0) return;
TypeFactory *typegrp = data.getArch()->types;
Varnode *baseVn = op->getIn(1);
Datatype *ct = baseVn->getTempType();
int4 baseSize = baseVn->getSize();
bool isBool = ct->getMetatype() == TYPE_BOOL;
list<PcodeOp *>::const_iterator iter,iterend;
iterend = data.endOp(CPUI_RETURN);
for(iter=data.beginOp(CPUI_RETURN);iter!=iterend;++iter) {
PcodeOp *retop = *iter;
if (retop == op) continue;
if (retop->isDead()) continue;
if (retop->getHaltType()!=0) continue;
if (retop->numInput() > 1) {
Varnode *vn = retop->getIn(1);
if (vn->getSize() != baseSize) continue;
if (isBool && vn->getNZMask() > 1) continue; // Don't propagate bool if value is not necessarily 0 or 1
if (vn->getTempType() == ct) continue; // Already propagated
vn->setTempType(ct);
#ifdef TYPEPROP_DEBUG
propagationDebug(typegrp->getArch(),vn,ct,retop,1,(Varnode *)0);
#endif
propagateOneType(typegrp, vn);
}
}
}
int4 ActionInferTypes::apply(Funcdata &data)
{
@ -4517,6 +4652,7 @@ int4 ActionInferTypes::apply(Funcdata &data)
if ((!vn->isWritten())&&(vn->hasNoDescend())) continue;
propagateOneType(typegrp,vn);
}
propagateAcrossReturns(data);
AddrSpace *spcid = data.getScopeLocal()->getSpaceId();
Varnode *spcvn = data.findSpacebaseInput(spcid);
if (spcvn != (Varnode *)0)

View File

@ -541,6 +541,8 @@ class ActionDeadCode : public Action {
static void pushConsumed(uintb val,Varnode *vn,vector<Varnode *> &worklist);
static void propagateConsumed(vector<Varnode *> &worklist);
static bool neverConsumed(Varnode *vn,Funcdata &data);
static void markConsumedParameters(FuncCallSpecs *fc,vector<Varnode *> &worklist);
static uintb gatherConsumedReturn(Funcdata &data);
public:
ActionDeadCode(const string &g) : Action(0,"deadcode",g) {} ///< Constructor
virtual Action *clone(const ActionGroupList &grouplist) const {
@ -929,6 +931,8 @@ class ActionInferTypes : public Action {
static void propagateOneType(TypeFactory *typegrp,Varnode *vn);
static void propagateRef(Funcdata &data,Varnode *vn,const Address &addr);
static void propagateSpacebaseRef(Funcdata &data,Varnode *spcvn);
static PcodeOp *canonicalReturnOp(Funcdata &data);
static void propagateAcrossReturns(Funcdata &data);
public:
ActionInferTypes(const string &g) : Action(0,"infertypes",g) {} ///< Constructor
virtual void reset(Funcdata &data) { localcount = 0; }

View File

@ -405,7 +405,7 @@ void Funcdata::clearCallSpecs(void)
FuncCallSpecs *Funcdata::getCallSpecs(const PcodeOp *op) const
{ // Get FuncCallSpecs from CALL op
{
int4 i;
const Varnode *vn;

View File

@ -399,7 +399,7 @@ public:
PcodeOp *newOp(int4 inputs,const SeqNum &sq); /// Allocate a new PcodeOp with sequence number
PcodeOp *newOpBefore(PcodeOp *follow,OpCode opc,Varnode *in1,Varnode *in2,Varnode *in3=(Varnode *)0);
PcodeOp *cloneOp(const PcodeOp *op,const SeqNum &seq); /// Clone a PcodeOp into \b this function
PcodeOp *canonicalReturnOp(void) const; /// Find a representative CPUI_RETURN op for \b this function
PcodeOp *getFirstReturnOp(void) const; /// Find a representative CPUI_RETURN op for \b this function
PcodeOp *newIndirectOp(PcodeOp *indeffect,const Address &addr,int4 size,uint4 extraFlags);
PcodeOp *newIndirectCreation(PcodeOp *indeffect,const Address &addr,int4 size,bool possibleout);
void markIndirectCreation(PcodeOp *indop,bool possibleOutput); ///< Convert CPUI_INDIRECT into an \e indirect \e creation

View File

@ -610,8 +610,8 @@ void Funcdata::installSwitchDefaults(void)
PcodeOp *indop = jt->getIndirectOp();
BlockBasic *ind = indop->getParent();
// Mark any switch blocks default edge
if (jt->getMostCommon() != ~((uint4)0)) // If a mostcommon was found
ind->setDefaultSwitch(jt->getMostCommon());
if (jt->getDefaultBlock() != -1) // If a default case is present
ind->setDefaultSwitch(jt->getDefaultBlock());
}
}

View File

@ -577,37 +577,20 @@ PcodeOp *Funcdata::cloneOp(const PcodeOp *op,const SeqNum &seq)
return newop;
}
/// Return the CPUI_RETURN op with the most specialized data-type, which is not
/// dead and is not a special halt. If HighVariables are not available, just
/// return the first CPUI_RETURN op.
/// \return the representative CPUI_RETURN op or NULL
PcodeOp *Funcdata::canonicalReturnOp(void) const
/// Return the first CPUI_RETURN operation that is not dead or an artificial halt
/// \return a representative CPUI_RETURN op or NULL if there are none
PcodeOp *Funcdata::getFirstReturnOp(void) const
{
bool hasnohigh = !isHighOn();
PcodeOp *res = (PcodeOp *)0;
Datatype *bestdt = (Datatype *)0;
list<PcodeOp *>::const_iterator iter,iterend;
iterend = endOp(CPUI_RETURN);
for(iter=beginOp(CPUI_RETURN);iter!=iterend;++iter) {
PcodeOp *retop = *iter;
if (retop->isDead()) continue;
if (retop->getHaltType()!=0) continue;
if (retop->numInput() > 1) {
if (hasnohigh) return retop;
Varnode *vn = retop->getIn(1);
Datatype *ct = vn->getHigh()->getType();
if (bestdt == (Datatype *)0) {
res = retop;
bestdt = ct;
}
else if (ct->typeOrder(*bestdt) < 0) {
res = retop;
bestdt = ct;
}
}
return retop;
}
return res;
return (PcodeOp *)0;
}
/// \brief Create new PcodeOp with 2 or 3 given operands

File diff suppressed because it is too large Load Diff

View File

@ -13,7 +13,8 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Abstract jump table, we do not specify how addresses are encoded in table
/// \file jumptable.hh
/// \brief Classes to support jump-tables and their recovery
#ifndef __CPUI_JUMPTABLE__
#define __CPUI_JUMPTABLE__
@ -23,62 +24,84 @@
class EmulateFunction;
struct JumptableThunkError : public LowlevelError { // Thunk that looks like a jumptable
/// Initialize the error with an explanatory string
JumptableThunkError(const string &s) : LowlevelError(s) {}
/// \brief Exception thrown for a thunk mechanism that looks like a jump-table
struct JumptableThunkError : public LowlevelError {
JumptableThunkError(const string &s) : LowlevelError(s) {} ///< Construct with an explanatory string
};
struct JumptableNotReachableError : public LowlevelError { // There are no legal flows to the switch
JumptableNotReachableError(const string &s) : LowlevelError(s) {}
/// \brief Exception thrown is there are no legal flows to a switch
struct JumptableNotReachableError : public LowlevelError {
JumptableNotReachableError(const string &s) : LowlevelError(s) {} ///< Constructor
};
/// \brief A description where and how data was loaded from memory
///
/// This is a generic table description, giving the starting address
/// of the table, the size of an entry, and number of entries.
class LoadTable {
friend class EmulateFunction;
Address addr; // Starting address of table
int4 size; // Size of table entry
int4 num; // Number of entries in table;
Address addr; ///< Starting address of table
int4 size; ///< Size of table entry
int4 num; ///< Number of entries in table;
public:
LoadTable(void) {} // For use with restoreXml
LoadTable(const Address &ad,int4 sz) { addr = ad, size = sz; num = 1; }
LoadTable(const Address &ad,int4 sz,int4 nm) { addr = ad; size = sz; num = nm; }
bool operator<(const LoadTable &op2) const { return (addr < op2.addr); }
void saveXml(ostream &s) const;
void restoreXml(const Element *el,Architecture *glb);
static void collapseTable(vector<LoadTable> &table);
LoadTable(void) {} // Constructor for use with restoreXml
LoadTable(const Address &ad,int4 sz) { addr = ad, size = sz; num = 1; } ///< Constructor for a single entry table
LoadTable(const Address &ad,int4 sz,int4 nm) { addr = ad; size = sz; num = nm; } ///< Construct a full table
bool operator<(const LoadTable &op2) const { return (addr < op2.addr); } ///< Compare \b this with another table by address
void saveXml(ostream &s) const; ///< Save a description of \b this as an \<loadtable> XML tag
void restoreXml(const Element *el,Architecture *glb); ///< Read in \b this table from a \<loadtable> XML description
static void collapseTable(vector<LoadTable> &table); ///< Collapse a sequence of table descriptions
};
/// \brief All paths from a (putative) switch variable to the CPUI_BRANCHIND
///
/// This is a container for intersecting paths during the construction of a
/// JumpModel. It contains every PcodeOp from some starting Varnode through
/// all paths to a specific BRANCHIND. The paths can split and rejoin. This also
/// keeps track of Varnodes that are present on \e all paths, as these are the
/// potential switch variables for the model.
class PathMeld {
/// \brief A PcodeOp in the path set associated with the last Varnode in the intersection
///
/// This links a PcodeOp to the point where the flow path to it split from common path
struct RootedOp {
PcodeOp *op;
int4 rootVn;
RootedOp(PcodeOp *o,int4 root) { op = o; rootVn = root; }
PcodeOp *op; ///< An op in the container
int4 rootVn; ///< The index, within commonVn, of the Varnode at the split point
RootedOp(PcodeOp *o,int4 root) { op = o; rootVn = root; } ///< Constructor
};
vector<Varnode *> commonVn; // Varnodes in common with all paths
vector<RootedOp> opMeld; // All the ops for the melded paths
vector<Varnode *> commonVn; ///< Varnodes in common with all paths
vector<RootedOp> opMeld; ///< All the ops for the melded paths
void internalIntersect(vector<int4> &parentMap);
int4 meldOps(const vector<PcodeOp *> &path,int4 cutOff,const vector<int4> &parentMap);
void truncatePaths(int4 cutPoint);
public:
void set(const PathMeld &op2);
void set(const vector<PcodeOp *> &path,const vector<int4> &slot);
void set(PcodeOp *op,Varnode *vn);
void append(const PathMeld &op2);
void clear(void);
void meld(vector<PcodeOp *> &path,vector<int4> &slot);
int4 numCommonVarnode(void) const { return commonVn.size(); }
int4 numOps(void) const { return opMeld.size(); }
Varnode *getVarnode(int4 i) const { return commonVn[i]; }
Varnode *getOpParent(int4 i) const { return commonVn[ opMeld[i].rootVn ]; }
PcodeOp *getOp(int4 i) const { return opMeld[i].op; }
PcodeOp *getEarliestOp(int4 pos) const;
bool empty(void) const { return commonVn.empty(); }
void set(const PathMeld &op2); ///< Copy paths from another container
void set(const vector<PcodeOp *> &path,const vector<int4> &slot); ///< Initialize \b this to be a single path
void set(PcodeOp *op,Varnode *vn); ///< Initialize \b this container to a single node "path"
void append(const PathMeld &op2); ///< Append a new set of paths to \b this set of paths
void clear(void); ///< Clear \b this to be an empty container
void meld(vector<PcodeOp *> &path,vector<int4> &slot); ///< Meld a new path into \b this container
void markPaths(bool val,int4 startVarnode); ///< Mark PcodeOps paths from the given start
int4 numCommonVarnode(void) const { return commonVn.size(); } ///< Return the number of Varnodes common to all paths
int4 numOps(void) const { return opMeld.size(); } ///< Return the number of PcodeOps across all paths
Varnode *getVarnode(int4 i) const { return commonVn[i]; } ///< Get the i-th common Varnode
Varnode *getOpParent(int4 i) const { return commonVn[ opMeld[i].rootVn ]; } ///< Get the split-point for the i-th PcodeOp
PcodeOp *getOp(int4 i) const { return opMeld[i].op; } ///< Get the i-th PcodeOp
PcodeOp *getEarliestOp(int4 pos) const; ///< Find \e earliest PcodeOp that has a specific common Varnode as input
bool empty(void) const { return commonVn.empty(); } ///< Return \b true if \b this container holds no paths
};
/// \brief A light-weight emulator to calculate switch targets from switch variables
///
/// We assume we only have to store memory state for individual Varnodes and that dynamic
/// LOADs are resolved from the LoadImage. BRANCH and CBRANCH emulation will fail, there can
/// only be one execution path, although there can be multiple data-flow paths.
class EmulateFunction : public EmulatePcodeOp {
Funcdata *fd;
map<Varnode *,uintb> varnodeMap; // Lightweight memory state based on Varnodes
bool collectloads;
vector<LoadTable> loadpoints;
Funcdata *fd; ///< The function being emulated
map<Varnode *,uintb> varnodeMap; ///< Light-weight memory state based on Varnodes
bool collectloads; ///< Set to \b true if the emulator collects individual LOAD addresses
vector<LoadTable> loadpoints; ///< The set of collected LOAD records
virtual void executeLoad(void);
virtual void executeBranch(void);
virtual void executeBranchind(void);
@ -87,65 +110,81 @@ class EmulateFunction : public EmulatePcodeOp {
virtual void executeCallother(void);
virtual void fallthruOp(void);
public:
EmulateFunction(Funcdata *f);
void setLoadCollect(bool val) { collectloads = val; }
EmulateFunction(Funcdata *f); ///< Constructor
void setLoadCollect(bool val) { collectloads = val; } ///< Set whether we collect LOAD information
virtual void setExecuteAddress(const Address &addr);
virtual uintb getVarnodeValue(Varnode *vn) const;
virtual void setVarnodeValue(Varnode *vn,uintb val);
uintb emulatePath(uintb val,const PathMeld &pathMeld,PcodeOp *startop,Varnode *startvn);
void collectLoadPoints(vector<LoadTable> &res) const;
void collectLoadPoints(vector<LoadTable> &res) const; ///< Recover any LOAD table descriptions
};
class FlowInfo;
class JumpTable;
/// \brief A (putative) switch variable Varnode and a constraint imposed by a CBRANCH
///
/// The record constrains a specific Varnode. If the associated CBRANCH is followed
/// along the path that reaches the switch's BRANCHIND, then we have an explicit
/// description of the possible values the Varnode can hold.
class GuardRecord {
PcodeOp *cbranch; // instruction branching around switch
int4 indpath; // branch going to switch
CircleRange range; // range of values which goto switch
Varnode *vn; // Varnode being restricted
Varnode *baseVn; // Value being (quasi)copied to vn
int4 bitsPreserved; // Number of bits copied (all other bits are zero)
PcodeOp *cbranch; ///< PcodeOp CBRANCH the branches around the switch
PcodeOp *readOp; ///< The immediate PcodeOp causing the restriction
int4 indpath; ///< Specific CBRANCH path going to the switch
CircleRange range; ///< Range of values causing the CBRANCH to take the path to the switch
Varnode *vn; ///< The Varnode being restricted
Varnode *baseVn; ///< Value being (quasi)copied to the Varnode
int4 bitsPreserved; ///< Number of bits copied (all other bits are zero)
public:
GuardRecord(PcodeOp *op,int4 path,const CircleRange &rng,Varnode *v);
PcodeOp *getBranch(void) const { return cbranch; }
int4 getPath(void) const { return indpath; }
const CircleRange &getRange(void) const { return range; }
bool isClear(void) const { return (cbranch == (PcodeOp *)0); }
void clear(void) { cbranch = (PcodeOp *)0; }
GuardRecord(PcodeOp *bOp,PcodeOp *rOp,int4 path,const CircleRange &rng,Varnode *v); ///< Constructor
PcodeOp *getBranch(void) const { return cbranch; } ///< Get the CBRANCH associated with \b this guard
PcodeOp *getReadOp(void) const { return readOp; } ///< Get the PcodeOp immediately causing the restriction
int4 getPath(void) const { return indpath; } ///< Get the specific path index going towards the switch
const CircleRange &getRange(void) const { return range; } ///< Get the range of values causing the switch path to be taken
void clear(void) { cbranch = (PcodeOp *)0; } ///< Mark \b this guard as unused
int4 valueMatch(Varnode *vn2,Varnode *baseVn2,int4 bitsPreserved2) const;
static int4 oneOffMatch(PcodeOp *op1,PcodeOp *op2);
static Varnode *quasiCopy(Varnode *vn,int4 &bitsPreserved,bool noWholeValue);
static Varnode *quasiCopy(Varnode *vn,int4 &bitsPreserved);
};
// This class represents a set of switch variables, and the values that they can take
/// \brief An iterator over values a switch variable can take
///
/// This iterator is intended to provide the start value for emulation
/// of a jump-table model to obtain the associated jump-table destination.
/// Each value can be associated with a starting Varnode and PcodeOp in
/// the function being emulated, via getStartVarnode() and getStartOp().
class JumpValues {
public:
virtual ~JumpValues(void) {}
virtual void truncate(int4 nm)=0;
virtual uintb getSize(void) const=0;
virtual bool contains(uintb val) const=0;
virtual void truncate(int4 nm)=0; ///< Truncate the number of values to the given number
virtual uintb getSize(void) const=0; ///< Return the number of values the variables can take
virtual bool contains(uintb val) const=0; ///< Return \b true if the given value is in the set of possible values
/// \brief Initialize \b this for iterating over the set of possible values
///
/// \return \b true if there are any values to iterate over
virtual bool initializeForReading(void) const=0;
virtual bool next(void) const=0;
virtual uintb getValue(void) const=0;
virtual Varnode *getStartVarnode(void) const=0;
virtual PcodeOp *getStartOp(void) const=0;
virtual bool isReversible(void) const=0; // Can the current value be reversed to get a label
virtual JumpValues *clone(void) const=0;
virtual bool next(void) const=0; ///< Advance the iterator, return \b true if there is another value
virtual uintb getValue(void) const=0; ///< Get the current value
virtual Varnode *getStartVarnode(void) const=0; ///< Get the Varnode associated with the current value
virtual PcodeOp *getStartOp(void) const=0; ///< Get the PcodeOp associated with the current value
virtual bool isReversible(void) const=0; ///< Return \b true if the current value can be reversed to get a label
virtual JumpValues *clone(void) const=0; ///< Clone \b this iterator
};
// This class implements a single entry switch variable that can take a range of values
/// \brief single entry switch variable that can take a range of values
class JumpValuesRange : public JumpValues {
protected:
CircleRange range; // Acceptable range of values for normalvn
Varnode *normqvn;
PcodeOp *startop;
mutable uintb curval;
CircleRange range; ///< Acceptable range of values for the normalized switch variable
Varnode *normqvn; ///< Varnode representing the normalized switch variable
PcodeOp *startop; ///< First PcodeOp in the jump-table calculation
mutable uintb curval; ///< The current value pointed to be the iterator
public:
void setRange(const CircleRange &rng) { range = rng; }
void setStartVn(Varnode *vn) { normqvn = vn; }
void setStartOp(PcodeOp *op) { startop = op; }
virtual void truncate(int4 nm); ///< Truncate the number of values to the given number
void setRange(const CircleRange &rng) { range = rng; } ///< Set the range of values explicitly
void setStartVn(Varnode *vn) { normqvn = vn; } ///< Set the normalized switch Varnode explicitly
void setStartOp(PcodeOp *op) { startop = op; } ///< Set the starting PcodeOp explicitly
virtual void truncate(int4 nm);
virtual uintb getSize(void) const;
virtual bool contains(uintb val) const;
virtual bool initializeForReading(void) const;
@ -157,17 +196,19 @@ public:
virtual JumpValues *clone(void) const;
};
// This class extends having a single entry switch variable with range and
// adds a second entry point that takes only a single value
class JumpValuesRangeDefault : public JumpValuesRange { // Range like model1, but with extra default value
uintb extravalue;
Varnode *extravn;
PcodeOp *extraop;
mutable bool lastvalue;
/// \brief A jump-table starting range with two possible execution paths
///
/// This extends the basic JumpValuesRange having a single entry switch variable and
/// adds a second entry point that takes only a single value. This value comes last in the iteration.
class JumpValuesRangeDefault : public JumpValuesRange {
uintb extravalue; ///< The extra value
Varnode *extravn; ///< The starting Varnode associated with the extra value
PcodeOp *extraop; ///< The starting PcodeOp associated with the extra value
mutable bool lastvalue; ///< \b true is the extra value has been visited by the iterator
public:
void setExtraValue(uintb val) { extravalue = val; }
void setDefaultVn(Varnode *vn) { extravn = vn; }
void setDefaultOp(PcodeOp *op) { extraop = op; }
void setExtraValue(uintb val) { extravalue = val; } ///< Set the extra value explicitly
void setDefaultVn(Varnode *vn) { extravn = vn; } ///< Set the associated start Varnode
void setDefaultOp(PcodeOp *op) { extraop = op; } ///< Set the associated start PcodeOp
virtual uintb getSize(void) const;
virtual bool contains(uintb val) const;
virtual bool initializeForReading(void) const;
@ -178,61 +219,138 @@ public:
virtual JumpValues *clone(void) const;
};
// This class represents the entire recovery process, recognizing the model, tracing
// from the switch entry to the address, and folding in guards
/// \brief A jump-table execution model
///
/// This class holds details of the model and recovers these details in various stages.
/// The model concepts include:
/// - Address Table, the set of destination addresses the jump-table can produce.
/// - Normalized Switch Variable, the Varnode with the most restricted set of values used
/// by the model to produce the destination addresses.
/// - Unnormalized Switch Variable, the Varnode being switched on, as seen in the decompiler output.
/// - Case labels, switch variable values associated with specific destination addresses.
/// - Guards, CBRANCH ops that enforce the normalized switch variable's value range.
class JumpModel {
protected:
JumpTable *jumptable; // The jumptable that is building this model
JumpTable *jumptable; ///< The jump-table that is building \b this model
public:
JumpModel(JumpTable *jt) { jumptable = jt; }
virtual ~JumpModel(void) {}
virtual bool isOverride(void) const=0;
virtual int4 getTableSize(void) const=0;
JumpModel(JumpTable *jt) { jumptable = jt; } ///< Construct given a parent jump-table
virtual ~JumpModel(void) {} ///< Destructor
virtual bool isOverride(void) const=0; ///< Return \b true if \b this model was manually overridden
virtual int4 getTableSize(void) const=0; ///< Return the number of entries in the address table
/// \brief Attempt to recover details of the model, given a specific BRANCHIND
///
/// This generally recovers the normalized switch variable and any guards.
/// \param fd is the function containing the switch
/// \param indop is the given BRANCHIND
/// \param matchsize is the expected number of address table entries to recover, or 0 for no expectation
/// \param maxtablesize is maximum number of address table entries to allow in the model
/// \return \b true if details of the model were successfully recovered
virtual bool recoverModel(Funcdata *fd,PcodeOp *indop,uint4 matchsize,uint4 maxtablesize)=0;
/// \brief Construct the explicit list of target addresses (the Address Table) from \b this model
///
/// The addresses produced all come from the BRANCHIND and may not be deduped. Alternate guard
/// destinations are not yet included.
/// \param fd is the function containing the switch
/// \param indop is the root BRANCHIND of the switch
/// \param addresstable will hold the list of Addresses
/// \param loadpoints if non-null will hold LOAD table information used by the model
virtual void buildAddresses(Funcdata *fd,PcodeOp *indop,vector<Address> &addresstable,vector<LoadTable> *loadpoints) const=0;
/// \brief Recover the unnormalized switch variable
///
/// The normalized switch variable must already be recovered. The amount of normalization between
/// the two switch variables can be restricted.
/// \param maxaddsub is a restriction on arithmetic operations
/// \param maxleftright is a restriction on shift operations
/// \param maxext is a restriction on extension operations
virtual void findUnnormalized(uint4 maxaddsub,uint4 maxleftright,uint4 maxext)=0;
/// \brief Recover \e case labels associated with the Address table
///
/// The unnormalized switch variable must already be recovered. Values that the normalized
/// switch value can hold or walked back to obtain the value that the unnormalized switch
/// variable would hold. Labels are returned in the order provided by normalized switch
/// variable iterator JumpValues.
/// \param fd is the function containing the switch
/// \param addresstable is the address table (used to label code blocks with bad or missing labels)
/// \param label will hold recovered labels in JumpValues order
/// \param orig is the JumpModel to use for the JumpValues iterator
virtual void buildLabels(Funcdata *fd,vector<Address> &addresstable,vector<uintb> &label,const JumpModel *orig) const=0;
virtual void foldInNormalization(Funcdata *fd,PcodeOp *indop)=0;
/// \brief Do normalization of the given switch specific to \b this model.
///
/// The PcodeOp machinery is removed so it looks like the CPUI_BRANCHIND simply takes the
/// switch variable as an input Varnode and automatically interprets its values to reach
/// the correct destination.
/// \param fd is the function containing the switch
/// \param indop is the given switch as a CPUI_BRANCHIND
/// \return the Varnode holding the final unnormalized switch variable
virtual Varnode *foldInNormalization(Funcdata *fd,PcodeOp *indop)=0;
/// \brief Eliminate any \e guard code involved in computing the switch destination
///
/// We now think of the BRANCHIND as encompassing any guard function.
/// \param fd is the function containing the switch
/// \param jump is the JumpTable owning \b this model.
virtual bool foldInGuards(Funcdata *fd,JumpTable *jump)=0;
/// \brief Perform a sanity check on recovered addresses
///
/// Individual addresses are checked against the function or its program to determine
/// if they are reasonable. This method can optionally remove addresses from the table.
/// If it does so, the underlying model is changed to reflect the removal.
/// \param fd is the function containing the switch
/// \param indop is the root BRANCHIND of the switch
/// \param addresstable is the list of recovered Addresses, which may be modified
/// \return \b true if there are (at least some) reasonable addresses in the table
virtual bool sanityCheck(Funcdata *fd,PcodeOp *indop,vector<Address> &addresstable)=0;
virtual JumpModel *clone(JumpTable *jt) const=0;
virtual void clear(void) {}; // Clear any non-permanent aspects of the model
virtual void saveXml(ostream &s) const {} // For use with override models
virtual void restoreXml(const Element *el,Architecture *glb) {} // For use with override models
virtual JumpModel *clone(JumpTable *jt) const=0; ///< Clone \b this model
virtual void clear(void) {} ///< Clear any non-permanent aspects of the model
virtual void saveXml(ostream &s) const {} ///< Save this model as an XML tag
virtual void restoreXml(const Element *el,Architecture *glb) {} ///< Restore \b this model from an XML tag
};
// This class treats the branch indirection variable as the switch variable, and recovers
// its possible values from the existing block structure
/// \brief A trivial jump-table model, where the BRANCHIND input Varnode is the switch variable
///
/// This class treats the input Varnode to the BRANCHIND as the switch variable, and recovers
/// its possible values from the existing block structure. This is used when the flow following
/// fork recovers destination addresses, but the switch normalization action is unable to recover
/// the model.
class JumpModelTrivial : public JumpModel {
uint4 size;
uint4 size; ///< Number of addresses in the table as reported by the JumpTable
public:
JumpModelTrivial(JumpTable *jt) : JumpModel(jt) { size = 0; }
JumpModelTrivial(JumpTable *jt) : JumpModel(jt) { size = 0; } ///< Construct given a parent JumpTable
virtual bool isOverride(void) const { return false; }
virtual int4 getTableSize(void) const { return size; }
virtual bool recoverModel(Funcdata *fd,PcodeOp *indop,uint4 matchsize,uint4 maxtablesize);
virtual void buildAddresses(Funcdata *fd,PcodeOp *indop,vector<Address> &addresstable,vector<LoadTable> *loadpoints) const;
virtual void findUnnormalized(uint4 maxaddsub,uint4 maxleftright,uint4 maxext) {}
virtual void buildLabels(Funcdata *fd,vector<Address> &addresstable,vector<uintb> &label,const JumpModel *orig) const;
virtual void foldInNormalization(Funcdata *fd,PcodeOp *indop) {}
virtual Varnode *foldInNormalization(Funcdata *fd,PcodeOp *indop) { return (Varnode *)0; }
virtual bool foldInGuards(Funcdata *fd,JumpTable *jump) { return false; }
virtual bool sanityCheck(Funcdata *fd,PcodeOp *indop,vector<Address> &addresstable) { return true; }
virtual JumpModel *clone(JumpTable *jt) const;
};
// This is the basic switch model. In brief
// 1) Straight-line calculation from switch variable to BRANCHIND
// 2) Switch variable is bounded by one or more "guards" that branch around the BRANCHIND
// 3) Recover unnormalized switch from bounded switch, through some basic transforms
/// \brief The basic switch model
///
/// This is the most common model:
/// - A straight-line calculation from switch variable to BRANCHIND
/// - The switch variable is bounded by one or more \e guards that branch around the BRANCHIND
/// - The unnormalized switch variable is recovered from the normalized variable through some basic transforms
class JumpBasic : public JumpModel {
protected:
JumpValuesRange *jrange;
PathMeld pathMeld; // Set of PcodeOps and Varnodes producing the final switch addresses
vector<GuardRecord> selectguards;
int4 varnodeIndex; // Position of the normalized switch varnode within PathMeld
Varnode *normalvn; // The normalized switch varnode
Varnode *switchvn; // The unnormalized switch varnode
static bool isprune(Varnode *vn);
static bool ispoint(Varnode *vn);
JumpValuesRange *jrange; ///< Range of values for the (normalized) switch variable
PathMeld pathMeld; ///< Set of PcodeOps and Varnodes producing the final target addresses
vector<GuardRecord> selectguards; ///< Any guards associated with \b model
int4 varnodeIndex; ///< Position of the normalized switch Varnode within PathMeld
Varnode *normalvn; ///< Normalized switch Varnode
Varnode *switchvn; ///< Unnormalized switch Varnode
static bool isprune(Varnode *vn); ///< Do we prune in here in our depth-first search for the normalized switch variable
static bool ispoint(Varnode *vn); ///< Is it possible for the given Varnode to be a switch variable?
static int4 getStride(Varnode *vn); ///< Get the step/stride associated with the Varnode
static uintb backup2Switch(Funcdata *fd,uintb output,Varnode *outvn,Varnode *invn);
void findDeterminingVarnodes(PcodeOp *op,int4 slot);
@ -241,11 +359,23 @@ protected:
void findSmallestNormal(uint4 matchsize);
void findNormalized(Funcdata *fd,BlockBasic *rootbl,int4 pathout,uint4 matchsize,uint4 maxtablesize);
void markFoldableGuards();
void markModel(bool val); ///< Mark (or unmark) all PcodeOps involved in the model
bool flowsOnlyToModel(Varnode *vn,PcodeOp *trailOp); ///< Check if the given Varnode flows to anything other than \b this model
/// \brief Eliminate the given guard to \b this switch
///
/// We \e disarm the guard instructions by making the guard condition
/// always \b false. If the simplification removes the unusable branches,
/// we are left with only one path through the switch.
/// \param fd is the function containing the switch
/// \param guard is a description of the particular guard mechanism
/// \param jump is the JumpTable owning \b this model
/// \return \b true if a change was made to data-flow
virtual bool foldInOneGuard(Funcdata *fd,GuardRecord &guard,JumpTable *jump);
public:
JumpBasic(JumpTable *jt) : JumpModel(jt) { jrange = (JumpValuesRange *)0; }
const PathMeld &getPathMeld(void) const { return pathMeld; }
const JumpValuesRange *getValueRange(void) const { return jrange; }
JumpBasic(JumpTable *jt) : JumpModel(jt) { jrange = (JumpValuesRange *)0; } ///< Construct given a parent JumpTable
const PathMeld &getPathMeld(void) const { return pathMeld; } ///< Get the possible of paths to the switch
const JumpValuesRange *getValueRange(void) const { return jrange; } ///< Get the normalized value iterator
virtual ~JumpBasic(void);
virtual bool isOverride(void) const { return false; }
virtual int4 getTableSize(void) const { return jrange->getSize(); }
@ -253,54 +383,63 @@ public:
virtual void buildAddresses(Funcdata *fd,PcodeOp *indop,vector<Address> &addresstable,vector<LoadTable> *loadpoints) const;
virtual void findUnnormalized(uint4 maxaddsub,uint4 maxleftright,uint4 maxext);
virtual void buildLabels(Funcdata *fd,vector<Address> &addresstable,vector<uintb> &label,const JumpModel *orig) const;
virtual void foldInNormalization(Funcdata *fd,PcodeOp *indop);
virtual Varnode *foldInNormalization(Funcdata *fd,PcodeOp *indop);
virtual bool foldInGuards(Funcdata *fd,JumpTable *jump);
virtual bool sanityCheck(Funcdata *fd,PcodeOp *indop,vector<Address> &addresstable);
virtual JumpModel *clone(JumpTable *jt) const;
virtual void clear(void);
};
// This model expects two paths to the switch, 1 from a default value, 1 from the other values that hit the switch
// If A is the guarding control-flow block, C is the block setting the default value, and S the switch block itself,
// We expect one of the following situations:
// A -> C or S and C -> S
// A -> C or D and C -> S D -> S
// C -> S and S -> A A -> S or "out of loop", i.e. S is in a loop, and the guard block doubles as the loop condition
/// \brief A basic jump-table model with an added default address path
///
/// This model expects two paths to the switch, 1 from a default value, 1 from the other values that hit the switch
/// If A is the guarding control-flow block, C is the block setting the default value, and S the switch block itself,
/// We expect one of the following situations:
/// - A -> C or S and C -> S
/// - A -> C or D and C -> S D -> S
/// - C -> S and S -> A A -> S or "out of loop", i.e. S is in a loop, and the guard block doubles as the loop condition
///
/// This builds on the analysis performed for JumpBasic, which fails because there are too many paths
/// to the BRANCHIND, preventing the guards from being interpreted properly. This class expects to reuse
/// the PathMeld calculation from JumpBasic.
class JumpBasic2 : public JumpBasic {
Varnode *extravn;
PathMeld origPathMeld;
Varnode *extravn; ///< The extra Varnode holding the default value
PathMeld origPathMeld; ///< The set of paths that produce non-default addresses
bool checkNormalDominance(void) const;
virtual bool foldInOneGuard(Funcdata *fd,GuardRecord &guard,JumpTable *jump);
public:
JumpBasic2(JumpTable *jt) : JumpBasic(jt) {}
void initializeStart(const PathMeld &pathMeld);
JumpBasic2(JumpTable *jt) : JumpBasic(jt) {} ///< Constructor
void initializeStart(const PathMeld &pathMeld); ///< Pass in the prior PathMeld calculation
virtual bool recoverModel(Funcdata *fd,PcodeOp *indop,uint4 matchsize,uint4 maxtablesize);
virtual void findUnnormalized(uint4 maxaddsub,uint4 maxleftright,uint4 maxext);
virtual JumpModel *clone(JumpTable *jt) const;
virtual void clear(void);
};
// This is the basic model for manually specifying the list of addresses the switch goes to
// It tries to repurpose some of the analysis that JumpBasic does to recover what the switch variable
// is, but will revert to the trivial model if it can't find a suitable switch variable
/// \brief A basic jump-table model incorporating manual override information
///
/// The list of potential target addresses produced by the BRANCHIND is not recovered by \b this
/// model, but must provided explicitly via setAddresses().
/// The model tries to repurpose some of the analysis that JumpBasic does to recover the switch variable.
/// But it will revert to the trivial model if it can't find a suitable switch variable.
class JumpBasicOverride : public JumpBasic {
set<Address> adset; // Absolute address table (manually specified)
vector<uintb> values; // Normalized switch variable values associated with addresses
vector<Address> addrtable; // Address associated with each value
uintb startingvalue; // Possible start for guessing values that match addresses
Address normaddress; // Dynamic info for recovering normalized switch variable
uint8 hash; // if (hash==0) there is no normalized switch (use trivial model)
bool istrivial; // true if we use a trivial value model
set<Address> adset; ///< Absolute address table (manually specified)
vector<uintb> values; ///< Normalized switch variable values associated with addresses
vector<Address> addrtable; ///< Address associated with each value
uintb startingvalue; ///< Possible start for guessing values that match addresses
Address normaddress; ///< Dynamic info for recovering normalized switch variable
uint8 hash; ///< if (hash==0) there is no normalized switch (use trivial model)
bool istrivial; ///< \b true if we use a trivial value model
int4 findStartOp(Varnode *vn);
int4 trialNorm(Funcdata *fd,Varnode *trialvn,uint4 tolerance);
void setupTrivial(void);
Varnode *findLikelyNorm(void);
void clearCopySpecific(void);
public:
JumpBasicOverride(JumpTable *jt);
void setAddresses(const vector<Address> &adtable);
void setNorm(const Address &addr,uintb h) { normaddress = addr; hash = h; }
void setStartingValue(uintb val) { startingvalue = val; }
JumpBasicOverride(JumpTable *jt); ///< Constructor
void setAddresses(const vector<Address> &adtable); ///< Manually set the address table for \b this model
void setNorm(const Address &addr,uintb h) { normaddress = addr; hash = h; } ///< Set the normalized switch variable
void setStartingValue(uintb val) { startingvalue = val; } ///< Set the starting value for the normalized range
virtual bool isOverride(void) const { return true; }
virtual int4 getTableSize(void) const { return addrtable.size(); }
virtual bool recoverModel(Funcdata *fd,PcodeOp *indop,uint4 matchsize,uint4 maxtablesize);
@ -318,22 +457,25 @@ public:
class JumpAssistOp;
// This model looks for a special "jumpassist" pseudo-op near the branch site, which contains
// p-code models describing how to parse a jump-table for case labels and addresses.
// It views the switch table calculation as a two-stage process:
// case2index: convert the switchvar to an index into a table
// index2address: convert the index to an address
// The pseudo-op holds:
// the table address, size (number of indices)
// exemplar p-code for inverting the case2index part of the calculation
// exemplar p-code for calculating index2address
/// \brief A jump-table model assisted by pseudo-op directives in the code
///
/// This model looks for a special \e jumpassist pseudo-op near the branch site, which contains
/// p-code models describing how to parse a jump-table for case labels and addresses.
/// It views the switch table calculation as a two-stage process:
/// - case2index: convert the switchvar to an index into a table
/// - index2address: convert the index to an address
///
/// The pseudo-op holds:
/// - the table address, size (number of indices)
/// - exemplar p-code for inverting the case2index part of the calculation
/// - exemplar p-code for calculating index2address
class JumpAssisted : public JumpModel {
PcodeOp *assistOp;
JumpAssistOp *userop;
int4 sizeIndices; // Total number of indices in the table (not including the defaultaddress)
Varnode *switchvn; // The switch variable
PcodeOp *assistOp; ///< The \e jumpassist PcodeOp
JumpAssistOp *userop; ///< The \e jumpassist p-code models
int4 sizeIndices; ///< Total number of indices in the table (not including the defaultaddress)
Varnode *switchvn; ///< The switch variable
public:
JumpAssisted(JumpTable *jt) : JumpModel(jt) { assistOp = (PcodeOp *)0; switchvn = (Varnode *)0; sizeIndices=0; }
JumpAssisted(JumpTable *jt) : JumpModel(jt) { assistOp = (PcodeOp *)0; switchvn = (Varnode *)0; sizeIndices=0; } ///< Constructor
// virtual ~JumpAssisted(void);
virtual bool isOverride(void) const { return false; }
virtual int4 getTableSize(void) const { return sizeIndices+1; }
@ -341,71 +483,106 @@ public:
virtual void buildAddresses(Funcdata *fd,PcodeOp *indop,vector<Address> &addresstable,vector<LoadTable> *loadpoints) const;
virtual void findUnnormalized(uint4 maxaddsub,uint4 maxleftright,uint4 maxext) {}
virtual void buildLabels(Funcdata *fd,vector<Address> &addresstable,vector<uintb> &label,const JumpModel *orig) const;
virtual void foldInNormalization(Funcdata *fd,PcodeOp *indop);
virtual Varnode *foldInNormalization(Funcdata *fd,PcodeOp *indop);
virtual bool foldInGuards(Funcdata *fd,JumpTable *jump);
virtual bool sanityCheck(Funcdata *fd,PcodeOp *indop,vector<Address> &addresstable) { return true; }
virtual JumpModel *clone(JumpTable *jt) const;
virtual void clear(void) { assistOp = (PcodeOp *)0; switchvn = (Varnode *)0; }
};
/// \brief A map from values to control-flow targets within a function
///
/// A JumpTable is attached to a specific CPUI_BRANCHIND and encapsulates all
/// the information necessary to model the indirect jump as a \e switch statement.
/// It knows how to map from specific switch variable values to the destination
/// \e case block and how to label the value.
class JumpTable {
Architecture *glb; // Architecture under which this jumptable operates
JumpModel *jmodel,*origmodel;
vector<Address> addresstable; // Raw addresses in the jumptable
vector<uint4> blocktable; // Addresses converted to basic blocks
vector<uintb> label;
vector<LoadTable> loadpoints;
Address opaddress; // Absolute address of op
PcodeOp *indirect; // INDIRECT op referring to this jump table
uint4 mostcommon; // Most common position in table
uint4 maxtablesize; // Maximum table size we allow to be built (sanity check)
uint4 maxaddsub; // Maximum ADDs or SUBs to normalize
uint4 maxleftright; // Maximum shifts to normalize
uint4 maxext; // Maximum extensions to normalize
int4 recoverystage; // 0=no stages, 1=needs additional stage, 2=complete
bool collectloads;
void recoverModel(Funcdata *fd);
void trivialSwitchOver(void);
void sanityCheck(Funcdata *fd);
uint4 block2Position(const FlowBlock *bl) const;
static bool isReachable(PcodeOp *op);
/// \brief An address table index and its corresponding out-edge
struct IndexPair {
int4 blockPosition; ///< Out-edge index for the basic-block
int4 addressIndex; /// Index of address targetting the basic-block
IndexPair(int4 pos,int4 index) { blockPosition = pos; addressIndex = index; } ///< Constructor
bool operator<(const IndexPair &op2) const; ///< Compare by position then by index
static bool compareByPosition(const IndexPair &op1,const IndexPair &op2); ///< Compare just by position
};
Architecture *glb; ///< Architecture under which this jump-table operates
JumpModel *jmodel; ///< Current model of how the jump table is implemented in code
JumpModel *origmodel; ///< Initial jump table model, which may be incomplete
vector<Address> addresstable; ///< Raw addresses in the jump-table
vector<IndexPair> block2addr; ///< Map from basic-blocks to address table index
vector<uintb> label; ///< The case label for each explicit target
vector<LoadTable> loadpoints; ///< Any recovered in-memory data for the jump-table
Address opaddress; ///< Absolute address of the BRANCHIND jump
PcodeOp *indirect; ///< CPUI_BRANCHIND linked to \b this jump-table
uintb switchVarConsume; ///< Bits of the switch variable being consumed
int4 defaultBlock; ///< The out-edge corresponding to the \e default switch destination (-1 = undefined)
int4 lastBlock; ///< Block out-edge corresponding to last entry in the address table
uint4 maxtablesize; ///< Maximum table size we allow to be built (sanity check)
uint4 maxaddsub; ///< Maximum ADDs or SUBs to normalize
uint4 maxleftright; ///< Maximum shifts to normalize
uint4 maxext; ///< Maximum extensions to normalize
int4 recoverystage; ///< 0=no stages recovered, 1=additional stage needed, 2=complete
bool collectloads; ///< Set to \b true if information about in-memory model data is/should be collected
void recoverModel(Funcdata *fd); ///< Attempt recovery of the jump-table model
void trivialSwitchOver(void); ///< Switch \b this table over to a trivial model
void sanityCheck(Funcdata *fd); ///< Perform sanity check on recovered address targets
int4 block2Position(const FlowBlock *bl) const; ///< Convert a basic-block to an out-edge index from the switch.
static bool isReachable(PcodeOp *op); ///< Check if the given PcodeOp still seems reachable in its function
public:
JumpTable(Architecture *g,Address ad=Address());
JumpTable(const JumpTable *op2);
~JumpTable(void);
bool isSwitchedOver(void) const { return !blocktable.empty(); }
bool isRecovered(void) const { return !addresstable.empty(); }
bool isLabelled(void) const { return !label.empty(); }
bool isOverride(void) const;
bool isPossibleMultistage(void) const { return (addresstable.size()==1); }
int4 getStage(void) const { return recoverystage; }
int4 numEntries(void) const { return addresstable.size(); }
int4 getMostCommon(void) const { return mostcommon; }
const Address &getOpAddress(void) const { return opaddress; }
PcodeOp *getIndirectOp(void) const { return indirect; }
void setIndirectOp(PcodeOp *ind) { opaddress = ind->getAddr(); indirect = ind; }
void setMaxTableSize(uint4 val) { maxtablesize = val; }
JumpTable(Architecture *g,Address ad=Address()); ///< Constructor
JumpTable(const JumpTable *op2); ///< Copy constructor
~JumpTable(void); ///< Destructor
bool isRecovered(void) const { return !addresstable.empty(); } ///< Return \b true if a model has been recovered
bool isLabelled(void) const { return !label.empty(); } ///< Return \b true if \e case labels are computed
bool isOverride(void) const; ///< Return \b true if \b this table was manually overridden
bool isPossibleMultistage(void) const { return (addresstable.size()==1); } ///< Return \b true if this could be multi-staged
int4 getStage(void) const { return recoverystage; } ///< Return what stage of recovery this jump-table is in.
int4 numEntries(void) const { return addresstable.size(); } ///< Return the size of the address table for \b this jump-table
uintb getSwitchVarConsume(void) const { return switchVarConsume; } ///< Get bits of switch variable consumed by \b this table
int4 getDefaultBlock(void) const { return defaultBlock; } ///< Get the out-edge corresponding to the \e default switch destination
const Address &getOpAddress(void) const { return opaddress; } ///< Get the address of the BRANCHIND for the switch
PcodeOp *getIndirectOp(void) const { return indirect; } ///< Get the BRANCHIND PcodeOp
void setIndirectOp(PcodeOp *ind) { opaddress = ind->getAddr(); indirect = ind; } ///< Set the BRANCHIND PcodeOp
void setMaxTableSize(uint4 val) { maxtablesize = val; } ///< Set the maximum entries allowed in the address table
void setNormMax(uint4 maddsub,uint4 mleftright,uint4 mext) {
maxaddsub = maddsub; maxleftright = mleftright; maxext = mext; }
maxaddsub = maddsub; maxleftright = mleftright; maxext = mext; } ///< Set the switch variable normalization model restrictions
void setOverride(const vector<Address> &addrtable,const Address &naddr,uintb h,uintb sv);
int4 numIndicesByBlock(const FlowBlock *bl) const;
int4 getIndexByBlock(const FlowBlock *bl,int4 i) const;
Address getAddressByIndex(int4 index) const { return addresstable[index]; }
void setMostCommonIndex(uint4 tableind);
void setMostCommonBlock(uint4 bl) { mostcommon = bl; }
void setLoadCollect(bool val) { collectloads = val; }
void addBlockToSwitch(BlockBasic *bl,uintb lab);
void switchOver(const FlowInfo &flow);
uintb getLabelByIndex(int4 index) const { return label[index]; }
void foldInNormalization(Funcdata *fd) { jmodel->foldInNormalization(fd,indirect); }
bool foldInGuards(Funcdata *fd) { return jmodel->foldInGuards(fd,this); }
void recoverAddresses(Funcdata *fd);
void recoverMultistage(Funcdata *fd);
bool recoverLabels(Funcdata *fd);
bool checkForMultistage(Funcdata *fd);
void clear(void);
void saveXml(ostream &s) const;
void restoreXml(const Element *el);
Address getAddressByIndex(int4 i) const { return addresstable[i]; } ///< Get the i-th address table entry
void setLastAsMostCommon(void); ///< Set the most common jump-table target to be the last address in the table
void setDefaultBlock(int4 bl) { defaultBlock = bl; } ///< Set out-edge of the switch destination considered to be \e default
void setLoadCollect(bool val) { collectloads = val; } ///< Set whether LOAD records should be collected
void addBlockToSwitch(BlockBasic *bl,uintb lab); ///< Force a given basic-block to be a switch destination
void switchOver(const FlowInfo &flow); ///< Convert absolute addresses to block indices
uintb getLabelByIndex(int4 index) const { return label[index]; } ///< Given a \e case index, get its label
void foldInNormalization(Funcdata *fd); ///< Hide the normalization code for the switch
bool foldInGuards(Funcdata *fd) { return jmodel->foldInGuards(fd,this); } ///< Hide any guard code for \b this switch
void recoverAddresses(Funcdata *fd); ///< Recover the raw jump-table addresses (the address table)
void recoverMultistage(Funcdata *fd); ///< Recover jump-table addresses keeping track of a possible previous stage
bool recoverLabels(Funcdata *fd); ///< Recover the case labels for \b this jump-table
bool checkForMultistage(Funcdata *fd); ///< Check if this jump-table requires an additional recovery stage
void clear(void); ///< Clear instance specific data for \b this jump-table
void saveXml(ostream &s) const; ///< Save \b this jump-table as a \<jumptable> XML tag
void restoreXml(const Element *el); ///< Recover \b this jump-table from a \<jumptable> XML tag
};
/// \param op2 is the other IndexPair to compare with \b this
/// \return \b true if \b this is ordered before the other IndexPair
inline bool JumpTable::IndexPair::operator<(const IndexPair &op2) const
{
if (blockPosition != op2.blockPosition) return (blockPosition < op2.blockPosition);
return (addressIndex < op2.addressIndex);
}
/// \param op1 is the first IndexPair to compare
/// \param op2 is the second IndexPair to compare
/// \return \b true if op1 is ordered before op2
inline bool JumpTable::IndexPair::compareByPosition(const IndexPair &op1,const IndexPair &op2)
{
return (op1.blockPosition < op2.blockPosition);
}
#endif

View File

@ -81,6 +81,35 @@ PcodeOp::PcodeOp(int4 s,const SeqNum &sq) : start(sq),inrefs(s)
inrefs[i] = (Varnode *)0;
}
/// \brief Find the slot for a given Varnode, which may be take up multiple input slots
///
/// In the rare case that \b this PcodeOp takes the same Varnode as input multiple times,
/// use the specific descendant iterator producing \b this PcodeOp to work out the corresponding slot.
/// Every slot containing the given Varnode will be produced exactly once over the course of iteration.
/// \param vn is the given Varnode
/// \param firstSlot is the first instance of the Varnode in \b this input list
/// \param iter is the specific descendant iterator producing \b this
/// \return the slot corresponding to the iterator
int4 PcodeOp::getRepeatSlot(const Varnode *vn,int4 firstSlot,list<PcodeOp *>::const_iterator iter) const
{
int4 count = 1;
for(list<PcodeOp *>::const_iterator oiter=vn->beginDescend();oiter != iter;++oiter) {
if ((*oiter) == this)
count += 1;
}
if (count == 1) return firstSlot;
int4 recount = 1;
for(int4 i=firstSlot+1;i<inrefs.size();++i) {
if (inrefs[i] == vn) {
recount += 1;
if (recount == count)
return i;
}
}
return -1;
}
/// Can this be collapsed to a copy op, i.e. are all inputs constants
/// \return \b true if this op can be callapsed
bool PcodeOp::isCollapsible(void) const

View File

@ -155,6 +155,7 @@ public:
list<PcodeOp *>::iterator getBasicIter(void) const { return basiciter; } ///< Get position within basic block
/// \brief Get the slot number of the indicated input varnode
int4 getSlot(const Varnode *vn) const { int4 i,n; n=inrefs.size(); for(i=0;i<n;++i) if (inrefs[i]==vn) break; return i; }
int4 getRepeatSlot(const Varnode *vn,int4 firstSlot,list<PcodeOp *>::const_iterator iter) const;
/// \brief Get the evaluation type of this op
uint4 getEvalType(void) const { return (flags&(PcodeOp::unary|PcodeOp::binary|PcodeOp::special)); }
/// \brief Get type which indicates unusual halt in control-flow
@ -218,7 +219,6 @@ public:
PcodeOp *target(void) const; ///< Return starting op for instruction associated with this op
uintb getNZMaskLocal(bool cliploop) const; ///< Calculate known zero bits for output to this op
int4 compareOrder(const PcodeOp *bop) const; ///< Compare the control-flow order of this and \e bop
void push(PrintLanguage *lng) const { opcode->push(lng,this); } ///< Push this op as a display token
void printRaw(ostream &s) const { opcode->printRaw(s,this); } ///< Print raw info about this op to stream
const string &getOpName(void) const { return opcode->getName(); } ///< Return the name of this op
void printDebug(ostream &s) const; ///< Print debug description of this op to stream

View File

@ -591,11 +591,11 @@ void PrintC::opReturn(const PcodeOp *op)
pushAtom(Atom("",blanktoken,EmitXml::no_color));
}
void PrintC::opIntZext(const PcodeOp *op)
void PrintC::opIntZext(const PcodeOp *op,const PcodeOp *readOp)
{
if (castStrategy->isZextCast(op->getOut()->getHigh()->getType(),op->getIn(0)->getHigh()->getType())) {
if (isExtensionCastImplied(op))
if (option_hide_exts && castStrategy->isExtensionCastImplied(op,readOp))
opHiddenFunc(op);
else
opTypeCast(op);
@ -604,11 +604,11 @@ void PrintC::opIntZext(const PcodeOp *op)
opFunc(op);
}
void PrintC::opIntSext(const PcodeOp *op)
void PrintC::opIntSext(const PcodeOp *op,const PcodeOp *readOp)
{
if (castStrategy->isSextCast(op->getOut()->getHigh()->getType(),op->getIn(0)->getHigh()->getType())) {
if (isExtensionCastImplied(op))
if (option_hide_exts && castStrategy->isExtensionCastImplied(op,readOp))
opHiddenFunc(op);
else
opTypeCast(op);
@ -1282,60 +1282,6 @@ bool PrintC::printCharacterConstant(ostream &s,const Address &addr,int4 charsize
return res;
}
/// \brief Is the given ZEXT/SEXT cast implied by the expression its in
///
/// We know that the given ZEXT or SEXT op can be viewed as a natural \e cast operation.
/// Sometimes such a cast is implied by the expression its in, and the cast itself
/// doesn't need to be printed.
/// \param op is the given ZEXT or SEXT PcodeOp
/// \return \b true if the op as a cast does not need to be printed
bool PrintC::isExtensionCastImplied(const PcodeOp *op) const
{
if (!option_hide_exts)
return false; // If hiding extensions is not on, we must always print extension
const Varnode *outVn = op->getOut();
if (outVn->isExplicit()) {
}
else {
type_metatype metatype = outVn->getHigh()->getType()->getMetatype();
list<PcodeOp *>::const_iterator iter;
for(iter=outVn->beginDescend();iter!=outVn->endDescend();++iter) {
PcodeOp *expOp = *iter;
Varnode *otherVn;
int4 slot;
switch(expOp->code()) {
case CPUI_PTRADD:
break;
case CPUI_INT_ADD:
case CPUI_INT_SUB:
case CPUI_INT_MULT:
case CPUI_INT_DIV:
case CPUI_INT_AND:
case CPUI_INT_OR:
case CPUI_INT_XOR:
case CPUI_INT_LESS:
case CPUI_INT_LESSEQUAL:
case CPUI_INT_SLESS:
case CPUI_INT_SLESSEQUAL:
slot = expOp->getSlot(outVn);
otherVn = expOp->getIn(1-slot);
// Check if the expression involves an explicit variable of the right integer type
if (!otherVn->isExplicit())
return false;
if (otherVn->getHigh()->getType()->getMetatype() != metatype)
return false;
break;
default:
return false;
}
}
return true; // Everything is integer promotion
}
return false;
}
/// \brief Push a single character constant to the RPN stack
///
/// For C, a character constant is usually emitted as the character in single quotes.
@ -1864,8 +1810,11 @@ void PrintC::emitPrototypeOutput(const FuncProto *proto,
PcodeOp *op;
Varnode *vn;
if (fd != (const Funcdata *)0)
op = fd->canonicalReturnOp();
if (fd != (const Funcdata *)0) {
op = fd->getFirstReturnOp();
if (op != (PcodeOp *)0 && op->numInput() < 2)
op = (PcodeOp *)0;
}
else
op = (PcodeOp *)0;
@ -2150,7 +2099,7 @@ void PrintC::emitExpression(const PcodeOp *op)
// If BRANCHIND, print switch( )
// If CALL, CALLIND, CALLOTHER print call
// If RETURN, print return ( )
op->push(this);
op->getOpcode()->push(this,op,(PcodeOp *)0);
recurse();
}

View File

@ -159,7 +159,6 @@ protected:
void opHiddenFunc(const PcodeOp *op); ///< Push the given p-code op as a hidden token
static bool hasCharTerminator(uint1 *buffer,int4 size,int4 charsize);
bool printCharacterConstant(ostream &s,const Address &addr,int4 charsize) const;
bool isExtensionCastImplied(const PcodeOp *op) const;
virtual void pushConstant(uintb val,const Datatype *ct,
const Varnode *vn,const PcodeOp *op);
virtual bool pushEquate(uintb val,int4 sz,const EquateSymbol *sym,
@ -238,8 +237,8 @@ public:
virtual void opIntSlessEqual(const PcodeOp *op) { opBinary(&less_equal,op); }
virtual void opIntLess(const PcodeOp *op) { opBinary(&less_than,op); }
virtual void opIntLessEqual(const PcodeOp *op) { opBinary(&less_equal,op); }
virtual void opIntZext(const PcodeOp *op);
virtual void opIntSext(const PcodeOp *op);
virtual void opIntZext(const PcodeOp *op,const PcodeOp *readOp);
virtual void opIntSext(const PcodeOp *op,const PcodeOp *readOp);
virtual void opIntAdd(const PcodeOp *op) { opBinary(&binary_plus,op); }
virtual void opIntSub(const PcodeOp *op) { opBinary(&binary_minus,op); }
virtual void opIntCarry(const PcodeOp *op) { opFunc(op); }

View File

@ -648,8 +648,10 @@ void PrintLanguage::recurse(void)
mods = nodepend.back().vnmod;
nodepend.pop_back();
pending -= 1;
if (vn->isImplied())
vn->getDef()->push(this);
if (vn->isImplied()) {
const PcodeOp *defOp = vn->getDef();
defOp->getOpcode()->push(this,defOp,op);
}
else
pushVnExplicit(vn,op);
pending = nodepend.size();

View File

@ -486,8 +486,8 @@ public:
virtual void opIntSlessEqual(const PcodeOp *op)=0; ///< Emit a INT_SLESSEQUAL operator
virtual void opIntLess(const PcodeOp *op)=0; ///< Emit a INT_LESS operator
virtual void opIntLessEqual(const PcodeOp *op)=0; ///< Emit a INT_LESSEQUAL operator
virtual void opIntZext(const PcodeOp *op)=0; ///< Emit a INT_ZEXT operator
virtual void opIntSext(const PcodeOp *op)=0; ///< Emit a INT_SEXT operator
virtual void opIntZext(const PcodeOp *op,const PcodeOp *readOp)=0; ///< Emit a INT_ZEXT operator
virtual void opIntSext(const PcodeOp *op,const PcodeOp *readOp)=0; ///< Emit a INT_SEXT operator
virtual void opIntAdd(const PcodeOp *op)=0; ///< Emit a INT_ADD operator
virtual void opIntSub(const PcodeOp *op)=0; ///< Emit a INT_SUB operator
virtual void opIntCarry(const PcodeOp *op)=0; ///< Emit a INT_CARRY operator

View File

@ -1066,8 +1066,9 @@ Varnode *CircleRange::pullBack(PcodeOp *op,Varnode **constMarkup,bool usenzmask)
CircleRange nzrange;
if (!nzrange.setNZMask(res->getNZMask(),res->getSize()))
return res;
if (0!=intersect(nzrange))
return (Varnode *)0;
intersect(nzrange);
// If the intersect does not succeed (i.e. produces 2 pieces) the original range is
// preserved and we still consider this pullback successful.
}
return res;
}

View File

@ -873,8 +873,10 @@ Varnode *RulePullsubMulti::buildSubpiece(Varnode *basevn,uint4 outsize,uint4 shi
data.opSetOpcode(new_op,CPUI_SUBPIECE);
if (usetmp)
outvn = data.newUniqueOut(outsize,new_op);
else
else {
smalladdr1.renormalize(outsize);
outvn = data.newVarnodeOut(outsize,smalladdr1,new_op);
}
data.opSetInput(new_op,basevn,0);
data.opSetInput(new_op,data.newConstant(4,shift),1);
@ -943,6 +945,26 @@ int4 RulePullsubMulti::applyOp(PcodeOp *op,Funcdata &data)
Varnode *outvn = op->getOut();
if (outvn->isPrecisLo()||outvn->isPrecisHi()) return 0; // Don't pull apart a double precision object
// Make sure we don't new add SUBPIECE ops that aren't going to cancel in some way
int4 branches = mult->numInput();
uintb consume = calc_mask(newSize) << 8*minByte;
consume = ~consume; // Check for use of bits outside of what gets truncated later
for(int4 i=0;i<branches;++i) {
Varnode *inVn = mult->getIn(i);
if ((consume & inVn->getConsume()) != 0) { // Check if bits not truncated are still used
// Check if there's an extension that matches the truncation
if (minByte == 0 && inVn->isWritten()) {
PcodeOp *defOp = inVn->getDef();
OpCode opc = defOp->code();
if (opc == CPUI_INT_ZEXT || opc == CPUI_INT_SEXT) {
if (newSize == defOp->getIn(0)->getSize())
continue; // We have matching extension, so new SUBPIECE will cancel anyway
}
}
return 0;
}
}
Address smalladdr2;
if (!vn->getSpace()->isBigEndian())
smalladdr2 = vn->getAddr()+minByte;
@ -950,7 +972,6 @@ int4 RulePullsubMulti::applyOp(PcodeOp *op,Funcdata &data)
smalladdr2 = vn->getAddr()+(vn->getSize()-maxByte-1);
vector<Varnode *> params;
int4 branches = mult->numInput();
for(int4 i=0;i<branches;++i) {
Varnode *vn_piece = mult->getIn(i);
@ -964,6 +985,7 @@ int4 RulePullsubMulti::applyOp(PcodeOp *op,Funcdata &data)
}
// Build new multiequal near original multiequal
PcodeOp *new_multi = data.newOp(params.size(),mult->getAddr());
smalladdr2.renormalize(newSize);
Varnode *new_vn = data.newVarnodeOut(newSize,smalladdr2,new_multi);
data.opSetOpcode(new_multi,CPUI_MULTIEQUAL);
data.opSetAllInput(new_multi,params);
@ -1003,6 +1025,10 @@ int4 RulePullsubIndirect::applyOp(PcodeOp *op,Funcdata &data)
Varnode *outvn = op->getOut();
if (outvn->isPrecisLo()||outvn->isPrecisHi()) return 0; // Don't pull apart double precision object
uintb consume = calc_mask(newSize) << 8 * minByte;
consume = ~consume;
if ((consume & indir->getIn(0)->getConsume())!=0) return 0;
Varnode *small2;
Address smalladdr2;
PcodeOp *new_ind;
@ -2005,6 +2031,7 @@ int4 RuleLeftRight::applyOp(PcodeOp *op,Funcdata &data)
addr = addr + isa;
data.opUnsetInput(op,0);
data.opUnsetOutput(leftshift);
addr.renormalize(tsz);
Varnode *newvn = data.newVarnodeOut(tsz,addr,leftshift);
data.opSetOpcode(leftshift,CPUI_SUBPIECE);
data.opSetInput(leftshift, data.newConstant( leftshift->getIn(1)->getSize(), 0), 1);
@ -2483,6 +2510,7 @@ int4 RuleZextEliminate::applyOp(PcodeOp *op,Funcdata &data)
if (!vn2->isConstant()) return 0;
zext = vn1->getDef();
if (!zext->getIn(0)->isHeritageKnown()) return 0;
if (vn1->loneDescend() != op) return 0; // Make sure extension is not used for anything else
smallsize = zext->getIn(0)->getSize();
val = vn2->getOffset();
if ((val>>(8*smallsize))==0) { // Is zero extension unnecessary
@ -7456,6 +7484,7 @@ Varnode *RulePtrFlow::truncatePointer(AddrSpace *spc,PcodeOp *op,Varnode *vn,int
Address addr = vn->getAddr();
if (addr.isBigEndian())
addr = addr + (vn->getSize() - spc->getAddrSize());
addr.renormalize(spc->getAddrSize());
newvn = data.newVarnodeOut(spc->getAddrSize(),addr,truncop);
}
data.opSetInput(op,newvn,slot);

View File

@ -193,38 +193,6 @@ SubvariableFlow::ReplaceOp *SubvariableFlow::createOpDown(OpCode opc,int4 numpar
return rop;
}
/// \brief Convert a new INDIRECT op into the logically trimmed variant of the given original PcodeOp
///
/// This method assumes the original op was an \e indirect \e creation. The input and output
/// Varnode for the new INDIRECT are not provided by this method. It only provides the
/// \e indirect \e effect input and patches up any active parameter recovery process.
/// \param newop is the new INDIRECT op to convert
/// \param oldop is the original INDIRECT op
/// \param out is the subgraph output variable node of the new INDIRECT
void SubvariableFlow::patchIndirect(PcodeOp *newop,PcodeOp *oldop, ReplaceVarnode *out)
{
PcodeOp *indop = PcodeOp::getOpFromConst(oldop->getIn(1)->getAddr());
bool possibleout = !oldop->getIn(0)->isIndirectZero();
Varnode *outvn = getReplaceVarnode(out);
fd->opSetOutput(newop,outvn);
fd->opSetOpcode(newop, CPUI_INDIRECT);
fd->opSetInput(newop,fd->newConstant(outvn->getSize(),0),0);
fd->opSetInput(newop,fd->newVarnodeIop(indop),1);
fd->markIndirectCreation(newop,possibleout);
fd->opInsertBefore(newop, indop);
FuncCallSpecs *fc = fd->getCallSpecs(indop);
if (fc == (FuncCallSpecs *)0) return;
if (fc->isOutputActive()) {
ParamActive *active = fc->getActiveOutput();
int4 trial = active->whichTrial( out->vn->getAddr(), out->vn->getSize() );
if (trial < 0)
throw LowlevelError("Cannot trim output trial to subflow");
Address addr = getReplacementAddress(out);
active->shrink(trial,addr,flowsize);
}
}
/// \brief Determine if the given subgraph variable can act as a parameter to the given CALL op
///
/// We assume the variable flows as a parameter to the CALL. If the CALL doesn't lock the parameter
@ -238,14 +206,18 @@ bool SubvariableFlow::tryCallPull(PcodeOp *op,ReplaceVarnode *rvn,int4 slot)
{
if (slot == 0) return false;
if (!aggressive) {
if ((rvn->vn->getConsume()&~rvn->mask)!=0) // If there's something outside the mask being consumed
return false; // Don't truncate
}
FuncCallSpecs *fc = fd->getCallSpecs(op);
if (fc == (FuncCallSpecs *)0) return false;
if (fc->isInputActive()) return false; // Don't trim while in the middle of figuring out params
if (fc->isInputLocked() && (!fc->isDotdotdot())) return false;
patchlist.push_back(PatchRecord());
patchlist.back().type = 2;
patchlist.back().pullop = op;
patchlist.back().type = PatchRecord::parameter_patch;
patchlist.back().patchOp = op;
patchlist.back().in1 = rvn;
patchlist.back().slot = slot;
pullcount += 1; // A true terminal modification
@ -265,6 +237,10 @@ bool SubvariableFlow::tryReturnPull(PcodeOp *op,ReplaceVarnode *rvn,int4 slot)
{
if (slot == 0) return false; // Don't deal with actual return address container
if (fd->getFuncProto().isOutputLocked()) return false;
if (!aggressive) {
if ((rvn->vn->getConsume()&~rvn->mask)!=0) // If there's something outside the mask being consumed
return false; // Don't truncate
}
if (!returnsTraversed) {
// If we plan to truncate the size of a return variable, we need to propagate the logical size to any other
@ -283,12 +259,21 @@ bool SubvariableFlow::tryReturnPull(PcodeOp *op,ReplaceVarnode *rvn,int4 slot)
return false;
if (inworklist)
worklist.push_back(rep);
else if (retvn->isConstant() && retop != op) {
// Trace won't revisit this RETURN, so we need to generate patch now
patchlist.push_back(PatchRecord());
patchlist.back().type = PatchRecord::parameter_patch;
patchlist.back().patchOp = retop;
patchlist.back().in1 = rep;
patchlist.back().slot = slot;
pullcount += 1;
}
}
returnsTraversed = true;
}
patchlist.push_back(PatchRecord());
patchlist.back().type = 2;
patchlist.back().pullop = op;
patchlist.back().type = PatchRecord::parameter_patch;
patchlist.back().patchOp = op;
patchlist.back().in1 = rvn;
patchlist.back().slot = slot;
pullcount += 1; // A true terminal modification
@ -302,24 +287,44 @@ bool SubvariableFlow::tryReturnPull(PcodeOp *op,ReplaceVarnode *rvn,int4 slot)
/// \param op is the given INDIRECT
/// \param rvn is the given subgraph variable acting as the output of the INDIRECT
/// \return \b true if we can successfully trim the value to its logical size
bool SubvariableFlow::tryCallReturnPull(PcodeOp *op,ReplaceVarnode *rvn)
bool SubvariableFlow::tryCallReturnPush(PcodeOp *op,ReplaceVarnode *rvn)
{
if (!op->isIndirectCreation()) return false;
PcodeOp *indop = PcodeOp::getOpFromConst(op->getIn(1)->getAddr());
FuncCallSpecs *fc = fd->getCallSpecs(indop);
if (!aggressive) {
if ((rvn->vn->getConsume()&~rvn->mask)!=0) // If there's something outside the mask being consumed
return false; // Don't truncate
}
if ((rvn->mask & 1) == 0) return false; // Verify the logical value is the least significant part
if (bitsize < 8) return false; // Make sure logical value is at least a byte
FuncCallSpecs *fc = fd->getCallSpecs(op);
if (fc == (FuncCallSpecs *)0) return false;
if (fc->isOutputLocked()) return false;
if (fc->isOutputActive()) {
ParamActive *active = fc->getActiveOutput();
int4 trial = active->whichTrial( rvn->vn->getAddr(), rvn->vn->getSize() );
if (trial < 0) return false;
Address newaddr = getReplacementAddress(rvn);
if (!active->testShrink(trial,newaddr, flowsize ))
return false;
}
createOp(CPUI_INDIRECT,2,rvn);
if (fc->isOutputActive()) return false; // Don't trim while in the middle of figuring out return value
addPush(op,rvn);
// pullcount += 1; // This is a push NOT a pull
return true;
}
/// \brief Determine if the subgraph variable can act as a switch variable for the given BRANCHIND
///
/// We query the JumpTable associated with the BRANCHIND to see if its switch variable
/// can be trimmed as indicated by the logical flow.
/// \param op is the given BRANCHIND op
/// \param rvn is the subgraph variable flowing to the BRANCHIND
/// \return \b true if the switch variable can be successfully trimmed to its logical size
bool SubvariableFlow::trySwitchPull(PcodeOp *op,ReplaceVarnode *rvn)
{
if ((rvn->mask & 1) == 0) return false; // Logical value must be justified
if ((rvn->vn->getConsume()&~rvn->mask)!=0) // If there's something outside the mask being consumed
return false; // we can't trim
patchlist.push_back(PatchRecord());
patchlist.back().type = PatchRecord::parameter_patch;
patchlist.back().patchOp = op;
patchlist.back().in1 = rvn;
patchlist.back().slot = 0;
pullcount += 1; // A true terminal modification
return true;
}
@ -339,14 +344,14 @@ bool SubvariableFlow::traceForward(ReplaceVarnode *rvn)
bool booldir;
int4 dcount = 0;
int4 hcount = 0;
int4 callcount = 0;
list<PcodeOp *>::const_iterator iter,enditer;
iter = rvn->vn->beginDescend();
enditer = rvn->vn->endDescend();
while(iter != enditer) {
op = *iter++;
for(iter = rvn->vn->beginDescend();iter != enditer;++iter) {
op = *iter;
outvn = op->getOut();
if ((outvn!=(Varnode *)0)&&(outvn->isMark()))
if ((outvn!=(Varnode *)0)&&outvn->isMark()&&!op->isCall())
continue;
dcount += 1; // Count this descendant
slot = op->getSlot(rvn->vn);
@ -472,7 +477,15 @@ bool SubvariableFlow::traceForward(ReplaceVarnode *rvn)
sa = (int4)op->getIn(1)->getOffset() * 8;
newmask = (rvn->mask >> sa) & calc_mask(outvn->getSize());
if (newmask == 0) break; // subvar is set to zero, truncate flow
if (rvn->mask != (newmask << sa)) return false;
if (rvn->mask != (newmask << sa)) { // Some kind of truncation of the logical value
if (flowsize > (sa + outvn->getSize()) && (rvn->mask & 1) != 0) {
// Only a piece of the logical value remains
addTerminalPatchSameOp(op, rvn, 0);
hcount += 1;
break;
}
return false;
}
if (((newmask & 1)!=0)&&(outvn->getSize()==flowsize)) {
addTerminalPatch(op,rvn);
hcount += 1; // Dealt with this descendant
@ -549,15 +562,20 @@ bool SubvariableFlow::traceForward(ReplaceVarnode *rvn)
break;
case CPUI_CALL:
case CPUI_CALLIND:
if (!aggressive) return false;
callcount += 1;
if (callcount > 1)
slot = op->getRepeatSlot(rvn->vn, slot, iter);
if (!tryCallPull(op,rvn,slot)) return false;
hcount += 1; // Dealt with this descendant
break;
case CPUI_RETURN:
if (!aggressive) return false;
if (!tryReturnPull(op,rvn,slot)) return false;
hcount += 1;
break;
case CPUI_BRANCHIND:
if (!trySwitchPull(op, rvn)) return false;
hcount += 1;
break;
case CPUI_BOOL_NEGATE:
case CPUI_BOOL_AND:
case CPUI_BOOL_OR:
@ -632,8 +650,13 @@ bool SubvariableFlow::traceBackward(ReplaceVarnode *rvn)
return true;
case CPUI_INT_ZEXT:
case CPUI_INT_SEXT:
if ((rvn->mask & calc_mask(op->getIn(0)->getSize())) != rvn->mask)
if ((rvn->mask & calc_mask(op->getIn(0)->getSize())) != rvn->mask) {
if ((rvn->mask & 1)!=0 && flowsize > op->getIn(0)->getSize()) {
addPush(op,rvn);
return true;
}
break; // Check if subvariable comes through extension
}
rop = createOp(CPUI_COPY,1,rvn);
if (!createLink(rop,rvn->mask,0,op->getIn(0))) return false;
return true;
@ -723,12 +746,10 @@ bool SubvariableFlow::traceBackward(ReplaceVarnode *rvn)
return true;
}
break;
case CPUI_INDIRECT:
// TODO: This assumes that the INDIRECT is CALL-based. Add STORE-based logic.
if (aggressive) {
if (tryCallReturnPull(op,rvn))
return true;
}
case CPUI_CALL:
case CPUI_CALLIND:
if (tryCallReturnPush(op,rvn))
return true;
break;
case CPUI_INT_EQUAL:
case CPUI_INT_NOTEQUAL:
@ -773,14 +794,14 @@ bool SubvariableFlow::traceForwardSext(ReplaceVarnode *rvn)
int4 slot;
int4 dcount = 0;
int4 hcount = 0;
int4 callcount = 0;
list<PcodeOp *>::const_iterator iter,enditer;
iter = rvn->vn->beginDescend();
enditer = rvn->vn->endDescend();
while(iter != enditer) {
op = *iter++;
for(iter=rvn->vn->beginDescend();iter != enditer;++iter) {
op = *iter;
outvn = op->getOut();
if ((outvn!=(Varnode *)0)&&(outvn->isMark()))
if ((outvn!=(Varnode *)0)&&outvn->isMark()&&!op->isCall())
continue;
dcount += 1; // Count this descendant
slot = op->getSlot(rvn->vn);
@ -828,15 +849,20 @@ bool SubvariableFlow::traceForwardSext(ReplaceVarnode *rvn)
break;
case CPUI_CALL:
case CPUI_CALLIND:
if (!aggressive) return false;
callcount += 1;
if (callcount > 1)
slot = op->getRepeatSlot(rvn->vn, slot, iter);
if (!tryCallPull(op,rvn,slot)) return false;
hcount += 1; // Dealt with this descendant
break;
case CPUI_RETURN:
if (!aggressive) return false;
if (!tryReturnPull(op,rvn,slot)) return false;
hcount += 1;
break;
case CPUI_BRANCHIND:
if (!trySwitchPull(op,rvn)) return false;
hcount += 1;
break;
default:
return false;
}
@ -871,6 +897,13 @@ bool SubvariableFlow::traceBackwardSext(ReplaceVarnode *rvn)
if (!createLink(rop,rvn->mask,i,op->getIn(i))) // Same inputs and mask
return false;
return true;
case CPUI_INT_ZEXT:
if (op->getIn(0)->getSize() < flowsize) {
// zero extension from a smaller size still acts as a signed extension
addPush(op,rvn);
return true;
}
break;
case CPUI_INT_SEXT:
if (flowsize != op->getIn(0)->getSize()) return false;
rop = createOp(CPUI_COPY,1,rvn);
@ -885,11 +918,10 @@ bool SubvariableFlow::traceBackwardSext(ReplaceVarnode *rvn)
if (rop->input.size()==1)
addConstant(rop,calc_mask(op->getIn(1)->getSize()),1,op->getIn(1)->getOffset()); // Preserve the shift amount
return true;
case CPUI_INDIRECT:
if (aggressive) {
if (tryCallReturnPull(op,rvn))
return true;
}
case CPUI_CALL:
case CPUI_CALLIND:
if (tryCallReturnPush(op,rvn))
return true;
break;
default:
break;
@ -1006,6 +1038,21 @@ void SubvariableFlow::createNewOut(ReplaceOp *rop,uintb mask)
res->def = rop;
}
/// \brief Mark an operation where original data-flow is being pushed into a subgraph variable
///
/// The operation is not manipulating the logical value, but it produces a variable containing
/// the logical value. The original op will not change but will just produce a smaller value.
/// \param pushOp is the operation to mark
/// \param rvn is the output variable holding the logical value
void SubvariableFlow::addPush(PcodeOp *pushOp,ReplaceVarnode *rvn)
{
patchlist.push_front(PatchRecord()); // Push to the front of the patch list
patchlist.front().type = PatchRecord::push_patch;
patchlist.front().patchOp = pushOp;
patchlist.front().in1 = rvn;
}
/// \brief Mark an operation where a subgraph variable is naturally copied into the original data-flow
///
/// If the operations naturally takes the given logical value as input but the output
@ -1017,8 +1064,8 @@ void SubvariableFlow::addTerminalPatch(PcodeOp *pullop,ReplaceVarnode *rvn)
{
patchlist.push_back(PatchRecord());
patchlist.back().type = 0; // Ultimately gets converted to a COPY
patchlist.back().pullop = pullop; // Operation pulling the variable out
patchlist.back().type = PatchRecord::copy_patch; // Ultimately gets converted to a COPY
patchlist.back().patchOp = pullop; // Operation pulling the variable out
patchlist.back().in1 = rvn; // Point in container flow for pull
pullcount += 1; // a true terminal modification
}
@ -1035,8 +1082,8 @@ void SubvariableFlow::addTerminalPatchSameOp(PcodeOp *pullop,ReplaceVarnode *rvn
{
patchlist.push_back(PatchRecord());
patchlist.back().type = 2; // Keep the original op, just change input
patchlist.back().pullop = pullop; // Operation pulling the variable out
patchlist.back().type = PatchRecord::parameter_patch; // Keep the original op, just change input
patchlist.back().patchOp = pullop; // Operation pulling the variable out
patchlist.back().in1 = rvn; // Point in container flow for pull
patchlist.back().slot = slot;
pullcount += 1; // a true terminal modification
@ -1053,8 +1100,8 @@ void SubvariableFlow::addBooleanPatch(PcodeOp *pullop,ReplaceVarnode *rvn,int4 s
{
patchlist.push_back(PatchRecord());
patchlist.back().type = 2; // Make no change to the operator, just put in the new input
patchlist.back().pullop = pullop; // Operation pulling the variable out
patchlist.back().type = PatchRecord::parameter_patch; // Make no change to the operator, just put in the new input
patchlist.back().patchOp = pullop; // Operation pulling the variable out
patchlist.back().in1 = rvn; // Point in container flow for pull
patchlist.back().slot = slot;
// this is not a true modification
@ -1071,9 +1118,9 @@ void SubvariableFlow::addSuggestedPatch(ReplaceVarnode *rvn,PcodeOp *pushop,int4
{
patchlist.push_back(PatchRecord());
patchlist.back().type = 3;
patchlist.back().type = PatchRecord::extension_patch;
patchlist.back().in1 = rvn;
patchlist.back().pullop = pushop;
patchlist.back().patchOp = pushop;
if (sa == -1)
sa = leastsigbit_set(rvn->mask);
patchlist.back().slot = sa;
@ -1091,8 +1138,8 @@ void SubvariableFlow::addComparePatch(ReplaceVarnode *in1,ReplaceVarnode *in2,Pc
{
patchlist.push_back(PatchRecord());
patchlist.back().type = 1;
patchlist.back().pullop = op;
patchlist.back().type = PatchRecord::compare_patch;
patchlist.back().patchOp = op;
patchlist.back().in1 = in1;
patchlist.back().in2 = in2;
pullcount += 1;
@ -1152,6 +1199,7 @@ Address SubvariableFlow::getReplacementAddress(ReplaceVarnode *rvn) const
addr = addr + (rvn->vn->getSize() - flowsize - sa);
else
addr = addr + sa;
addr.renormalize(flowsize);
return addr;
}
@ -1271,27 +1319,39 @@ bool SubvariableFlow::doTrace(void)
void SubvariableFlow::doReplacement(void)
{
list<PatchRecord>::iterator piter;
list<ReplaceOp>::iterator iter;
// Do up front processing of the call return patches, which will be at the front of the list
for(piter=patchlist.begin();piter!=patchlist.end();++piter) {
if ((*piter).type != PatchRecord::push_patch) break;
PcodeOp *pushOp = (*piter).patchOp;
Varnode *newVn = getReplaceVarnode((*piter).in1);
Varnode *oldVn = pushOp->getOut();
fd->opSetOutput(pushOp, newVn);
// Create placeholder defining op for old Varnode, until dead code cleans it up
PcodeOp *newZext = fd->newOp(1, pushOp->getAddr());
fd->opSetOpcode(newZext, CPUI_INT_ZEXT);
fd->opSetInput(newZext,newVn,0);
fd->opSetOutput(newZext,oldVn);
fd->opInsertAfter(newZext, pushOp);
}
// Define all the outputs first
for(iter=oplist.begin();iter!=oplist.end();++iter) {
PcodeOp *newop = fd->newOp((*iter).numparams,(*iter).op->getAddr());
(*iter).replacement = newop;
if ((*iter).opc == CPUI_INDIRECT) {
patchIndirect( newop, (*iter).op, (*iter).output );
}
else {
fd->opSetOpcode(newop,(*iter).opc);
ReplaceVarnode *rout = (*iter).output;
// if (rout != (ReplaceVarnode *)0) {
// if (rout->replacement == (Varnode *)0)
// rout->replacement = fd->newUniqueOut(flowsize,newop);
// else
// fd->opSetOutput(newop,rout->replacement);
// }
fd->opSetOutput(newop,getReplaceVarnode(rout));
fd->opInsertAfter(newop,(*iter).op);
}
fd->opSetOpcode(newop,(*iter).opc);
ReplaceVarnode *rout = (*iter).output;
// if (rout != (ReplaceVarnode *)0) {
// if (rout->replacement == (Varnode *)0)
// rout->replacement = fd->newUniqueOut(flowsize,newop);
// else
// fd->opSetOutput(newop,rout->replacement);
// }
fd->opSetOutput(newop,getReplaceVarnode(rout));
fd->opInsertAfter(newop,(*iter).op);
}
// Set all the inputs
@ -1303,51 +1363,55 @@ void SubvariableFlow::doReplacement(void)
// These are operations that carry flow from the small variable into an existing
// variable of the correct size
list<PatchRecord>::iterator piter;
for(piter=patchlist.begin();piter!=patchlist.end();++piter) {
PcodeOp *pullop = (*piter).pullop;
int4 type = (*piter).type;
if (type == 0) {
for(;piter!=patchlist.end();++piter) {
PcodeOp *pullop = (*piter).patchOp;
switch((*piter).type) {
case PatchRecord::copy_patch:
while(pullop->numInput() > 1)
fd->opRemoveInput(pullop,pullop->numInput()-1);
fd->opSetInput(pullop,getReplaceVarnode((*piter).in1),0);
fd->opSetOpcode(pullop,CPUI_COPY);
}
else if (type == 1) { // A comparison
break;
case PatchRecord::compare_patch:
fd->opSetInput(pullop,getReplaceVarnode((*piter).in1),0);
fd->opSetInput(pullop,getReplaceVarnode((*piter).in2),1);
}
else if (type == 2) { // A call parameter or return value
break;
case PatchRecord::parameter_patch:
fd->opSetInput(pullop,getReplaceVarnode((*piter).in1),(*piter).slot);
}
else if (type == 3) {
// These are operations that flow the small variable into a bigger variable but
// where all the remaining bits are zero
int4 sa = (*piter).slot;
vector<Varnode *> invec;
Varnode *inVn = getReplaceVarnode((*piter).in1);
int4 outSize = pullop->getOut()->getSize();
if (sa == 0) {
invec.push_back( inVn );
OpCode opc = (inVn->getSize() == outSize) ? CPUI_COPY : CPUI_INT_ZEXT;
fd->opSetOpcode( pullop, opc );
fd->opSetAllInput(pullop,invec);
}
else {
if (inVn->getSize() != outSize) {
PcodeOp *zextop = fd->newOp(1,pullop->getAddr());
fd->opSetOpcode( zextop, CPUI_INT_ZEXT );
Varnode *zextout = fd->newUniqueOut(outSize,zextop);
fd->opSetInput(zextop,inVn,0);
fd->opInsertBefore(zextop,pullop);
invec.push_back(zextout);
}
else
break;
case PatchRecord::extension_patch:
{
// These are operations that flow the small variable into a bigger variable but
// where all the remaining bits are zero
int4 sa = (*piter).slot;
vector<Varnode *> invec;
Varnode *inVn = getReplaceVarnode((*piter).in1);
int4 outSize = pullop->getOut()->getSize();
if (sa == 0) {
invec.push_back(inVn);
invec.push_back(fd->newConstant(4,sa));
fd->opSetAllInput(pullop,invec);
fd->opSetOpcode( pullop, CPUI_INT_LEFT);
OpCode opc = (inVn->getSize() == outSize) ? CPUI_COPY : CPUI_INT_ZEXT;
fd->opSetOpcode(pullop, opc);
fd->opSetAllInput(pullop, invec);
}
else {
if (inVn->getSize() != outSize) {
PcodeOp *zextop = fd->newOp(1, pullop->getAddr());
fd->opSetOpcode(zextop, CPUI_INT_ZEXT);
Varnode *zextout = fd->newUniqueOut(outSize, zextop);
fd->opSetInput(zextop, inVn, 0);
fd->opInsertBefore(zextop, pullop);
invec.push_back(zextout);
}
else
invec.push_back(inVn);
invec.push_back(fd->newConstant(4, sa));
fd->opSetAllInput(pullop, invec);
fd->opSetOpcode(pullop, CPUI_INT_LEFT);
}
break;
}
case PatchRecord::push_patch:
break; // Shouldn't see these here, handled earlier
}
}
}

View File

@ -62,14 +62,22 @@ class SubvariableFlow {
/// \brief Operation with a new logical value as (part of) input, but output Varnode is unchanged
class PatchRecord {
friend class SubvariableFlow;
int4 type; ///< 0=COPY 1=compare 2=call 3=AND/SHIFT
PcodeOp *pullop; ///< Op being affected
/// The possible types of patches on ops being performed
enum patchtype {
copy_patch, ///< Turn op into a COPY of the logical value
compare_patch, ///< Turn compare op inputs into logical values
parameter_patch, ///< Convert a CALL/CALLIND/RETURN/BRANCHIND parameter into logical value
extension_patch, ///< Convert op into something that copies/extends logical value, adding zero bits
push_patch ///< Convert an operator output to the logical value
};
patchtype type; ///< The type of \b this patch
PcodeOp *patchOp; ///< Op being affected
ReplaceVarnode *in1; ///< The logical variable input
ReplaceVarnode *in2; ///< (optional second parameter)
int4 slot; ///< slot being affected or other parameter
};
int4 flowsize; ///< Size of the lgoical data-flow in bytes
int4 flowsize; ///< Size of the logical data-flow in bytes
int4 bitsize; ///< Number of bits in logical variable
bool returnsTraversed; ///< Have we tried to flow logical value across CPUI_RETURNs
bool aggressive; ///< Do we "know" initial seed point must be a sub variable
@ -87,16 +95,17 @@ class SubvariableFlow {
ReplaceVarnode *setReplacement(Varnode *vn,uintb mask,bool &inworklist);
ReplaceOp *createOp(OpCode opc,int4 numparam,ReplaceVarnode *outrvn);
ReplaceOp *createOpDown(OpCode opc,int4 numparam,PcodeOp *op,ReplaceVarnode *inrvn,int4 slot);
void patchIndirect(PcodeOp *newop,PcodeOp *oldop,ReplaceVarnode *out);
bool tryCallPull(PcodeOp *op,ReplaceVarnode *rvn,int4 slot);
bool tryReturnPull(PcodeOp *op,ReplaceVarnode *rvn,int4 slot);
bool tryCallReturnPull(PcodeOp *op,ReplaceVarnode *rvn);
bool tryCallReturnPush(PcodeOp *op,ReplaceVarnode *rvn);
bool trySwitchPull(PcodeOp *op,ReplaceVarnode *rvn);
bool traceForward(ReplaceVarnode *rvn); ///< Trace the logical data-flow forward for the given subgraph variable
bool traceBackward(ReplaceVarnode *rvn); ///< Trace the logical data-flow backward for the given subgraph variable
bool traceForwardSext(ReplaceVarnode *rvn); ///< Trace logical data-flow forward assuming sign-extensions
bool traceBackwardSext(ReplaceVarnode *rvn); ///< Trace logical data-flow backward assuming sign-extensions
bool createLink(ReplaceOp *rop,uintb mask,int4 slot,Varnode *vn);
bool createCompareBridge(PcodeOp *op,ReplaceVarnode *inrvn,int4 slot,Varnode *othervn);
void addPush(PcodeOp *pushOp,ReplaceVarnode *rvn);
void addTerminalPatch(PcodeOp *pullop,ReplaceVarnode *rvn);
void addTerminalPatchSameOp(PcodeOp *pullop,ReplaceVarnode *rvn,int4 slot);
void addBooleanPatch(PcodeOp *pullop,ReplaceVarnode *rvn,int4 slot);

View File

@ -196,6 +196,7 @@ void TransformVar::createReplacement(Funcdata *fd)
if (vn->getSpace()->isBigEndian())
bytePos = vn->getSize() - bytePos - byteSize;
Address addr = vn->getAddr() + bytePos;
addr.renormalize(byteSize);
if (def == (TransformOp *)0)
replacement = fd->newVarnode(byteSize,addr);
else

View File

@ -118,6 +118,41 @@ void SpacebaseSpace::restoreXml(const Element *el)
contain = getManager()->getSpaceByName(el->getAttributeValue("contain"));
}
/// The \e join space range maps to the underlying pieces in a natural endian aware way.
/// Given an offset in the range, figure out what address it is mapping to.
/// The particular piece is passed back as an index, and the Address is returned.
/// \param offset is the offset within \b this range to map
/// \param pos will hold the passed back piece index
/// \return the Address mapped to
Address JoinRecord::getEquivalentAddress(uintb offset,int4 &pos) const
{
if (offset < unified.offset)
return Address(); // offset comes before this range
int4 smallOff = (int4)(offset - unified.offset);
if (pieces[0].space->isBigEndian()) {
for(pos=0;pos<pieces.size();++pos) {
int4 pieceSize = pieces[pos].size;
if (smallOff < pieceSize)
break;
smallOff -= pieceSize;
}
if (pos == pieces.size())
return Address(); // offset comes after this range
}
else {
for (pos = pieces.size() - 1; pos >= 0; --pos) {
int4 pieceSize = pieces[pos].size;
if (smallOff < pieceSize)
break;
smallOff -= pieceSize;
}
if (pos < 0)
return Address(); // offset comes after this range
}
return Address(pieces[pos].space,pieces[pos].offset + smallOff);
}
/// Allow sorting on JoinRecords so that a collection of pieces can be quickly mapped to
/// its logical whole, specified with a join address
bool JoinRecord::operator<(const JoinRecord &op2) const
@ -612,6 +647,30 @@ JoinRecord *AddrSpaceManager::findAddJoin(const vector<VarnodeData> &pieces,uint
return splitlist.back();
}
/// Given a specific \e offset into the \e join address space, recover the JoinRecord that
/// contains the offset, as a range in the \e join address space. If there is no existing
/// record, null is returned.
/// \param offset is an offset into the join space
/// \return the JoinRecord containing that offset or null
JoinRecord *AddrSpaceManager::findJoinInternal(uintb offset) const
{
int4 min=0;
int4 max=splitlist.size()-1;
while(min<=max) { // Binary search
int4 mid = (min+max)/2;
JoinRecord *rec = splitlist[mid];
uintb val = rec->unified.offset;
if (val + rec->unified.size <= offset)
min = mid + 1;
else if (val > offset)
max = mid - 1;
else
return rec;
}
return (JoinRecord *)0;
}
/// Given a specific \e offset into the \e join address space, recover the JoinRecord that
/// lists the pieces corresponding to that offset. The offset must originally have come from
/// a JoinRecord returned by \b findAddJoin, otherwise this method throws an exception.
@ -619,7 +678,7 @@ JoinRecord *AddrSpaceManager::findAddJoin(const vector<VarnodeData> &pieces,uint
/// \return the JoinRecord for that offset
JoinRecord *AddrSpaceManager::findJoin(uintb offset) const
{ // Find a split record given the unified (join space) offset
{
int4 min=0;
int4 max=splitlist.size()-1;
while(min<=max) { // Binary search
@ -733,6 +792,48 @@ Address AddrSpaceManager::constructJoinAddress(const Translate *translate,
return join->getUnified().getAddr();
}
/// If an Address in the \e join AddressSpace is shifted from its original offset, it may no
/// longer have a valid JoinRecord. The shift or size change may even make the address of
/// one of the pieces a more natural representation. Given a new Address and size, this method
/// decides if there is a matching JoinRecord. If not it either constructs a new JoinRecord or
/// computes the address within the containing piece. The given Address is changed if necessary
/// either to the offset corresponding to the new JoinRecord or to a normal \e non-join Address.
/// \param addr is the given Address
/// \param size is the size of the range in bytes
void AddrSpaceManager::renormalizeJoinAddress(Address &addr,int4 size)
{
JoinRecord *joinRecord = findJoinInternal(addr.getOffset());
if (joinRecord == (JoinRecord *)0)
throw LowlevelError("Join address not covered by a JoinRecord");
if (addr.getOffset() == joinRecord->unified.offset && size == joinRecord->unified.size)
return; // JoinRecord matches perfectly, no change necessary
int4 pos1;
Address addr1 = joinRecord->getEquivalentAddress(addr.getOffset(), pos1);
int4 pos2;
Address addr2 = joinRecord->getEquivalentAddress(addr.getOffset() + (size-1), pos2);
if (addr2.isInvalid())
throw LowlevelError("Join address range not covered");
if (pos1 == pos2) {
addr = addr1;
return;
}
vector<VarnodeData> newPieces;
newPieces.push_back(joinRecord->pieces[pos1]);
int4 sizeTrunc1 = (int4)(addr1.getOffset() - joinRecord->pieces[pos1].offset);
pos1 += 1;
while(pos1 <= pos2) {
newPieces.push_back(joinRecord->pieces[pos1]);
pos1 += 1;
}
int4 sizeTrunc2 = joinRecord->pieces[pos2].size - (int4)(addr2.getOffset() - joinRecord->pieces[pos2].offset) - 1;
newPieces.front().offset = addr1.getOffset();
newPieces.front().size -= sizeTrunc1;
newPieces.back().size -= sizeTrunc2;
JoinRecord *newJoinRecord = findAddJoin(newPieces, size);
addr = Address(newJoinRecord->unified.space,newJoinRecord->unified.offset);
}
/// This constructs only a shell for the Translate object. It
/// won't be usable until it is initialized for a specific processor
/// The main entry point for this is the Translate::initialize method,

View File

@ -201,6 +201,7 @@ public:
bool isFloatExtension(void) const { return (pieces.size() == 1); } ///< Does this record extend a float varnode
const VarnodeData &getPiece(int4 i) const { return pieces[i]; } ///< Get the i-th piece
const VarnodeData &getUnified(void) const { return unified; } ///< Get the Varnode whole
Address getEquivalentAddress(uintb offset,int4 &pos) const; ///< Given offset in \join space, get equivalent address of piece
bool operator<(const JoinRecord &op2) const; ///< Compare records lexigraphically by pieces
};
@ -242,6 +243,7 @@ protected:
void copySpaces(const AddrSpaceManager *op2); ///< Copy spaces from another manager
void addSpacebasePointer(SpacebaseSpace *basespace,const VarnodeData &ptrdata,int4 truncSize,bool stackGrowth); ///< Set the base register of a spacebase space
void insertResolver(AddrSpace *spc,AddressResolver *rsolv); ///< Override the base resolver for a space
JoinRecord *findJoinInternal(uintb offset) const; ///< Find JoinRecord for \e offset in the join space
public:
AddrSpaceManager(void); ///< Construct an empty address space manager
virtual ~AddrSpaceManager(void); ///< Destroy the manager
@ -272,6 +274,9 @@ public:
/// \brief Build a logical whole from register pairs
Address constructJoinAddress(const Translate *translate,const Address &hiaddr,int4 hisz,const Address &loaddr,int4 losz);
/// \brief Make sure a possibly offset \e join address has a proper JoinRecord
void renormalizeJoinAddress(Address &addr,int4 size);
};
/// \brief The interface to a translation engine for a processor.

View File

@ -115,6 +115,7 @@ public:
virtual Datatype *clone(void) const=0; ///< Clone the data-type
virtual void saveXml(ostream &s) const; ///< Serialize the data-type to XML
int4 typeOrder(const Datatype &op) const { if (this==&op) return 0; return compare(op,10); } ///< Order this with -op- datatype
int4 typeOrderBool(const Datatype &op) const; ///< Order \b this with -op-, treating \e bool data-type as special
void saveXmlBasic(ostream &s) const; ///< Save basic data-type properties
void saveXmlRef(ostream &s) const; ///< Write an XML reference of \b this to stream
};
@ -447,4 +448,19 @@ public:
void cacheCoreTypes(void); ///< Cache common types
};
/// Order data-types, with special handling of the \e bool data-type. Data-types are compared
/// using the normal ordering, but \e bool is ordered after all other data-types. A return value
/// of 0 indicates the data-types are the same, -1 indicates that \b this is prefered (ordered earlier),
/// and 1 indicates \b this is ordered later.
/// \param op is the other data-type to compare with \b this
/// \return -1, 0, or 1
inline int4 Datatype::typeOrderBool(const Datatype &op) const
{
if (this == &op) return 0;
if (metatype == TYPE_BOOL) return 1; // Never prefer bool over other data-types
if (op.metatype == TYPE_BOOL) return -1;
return compare(op,10);
}
#endif

View File

@ -123,7 +123,7 @@ public:
/// Given a specific language and PcodeOp, emit the expression rooted at the operation.
/// \param lng is the PrintLanguage to emit
/// \param op is the specific PcodeOp
virtual void push(PrintLanguage *lng,const PcodeOp *op) const=0;
virtual void push(PrintLanguage *lng,const PcodeOp *op,const PcodeOp *readOp) const=0;
/// \brief Print (for debugging purposes) \b this specific PcodeOp to the stream
///
@ -207,7 +207,7 @@ public:
TypeOpCopy(TypeFactory *t); ///< Constructor
virtual Datatype *getInputCast(const PcodeOp *op,int4 slot,const CastStrategy *castStrategy) const;
virtual Datatype *getOutputToken(const PcodeOp *op,CastStrategy *castStrategy) const;
virtual void push(PrintLanguage *lng,const PcodeOp *op) const { lng->opCopy(op); }
virtual void push(PrintLanguage *lng,const PcodeOp *op,const PcodeOp *readOp) const { lng->opCopy(op); }
virtual void printRaw(ostream &s,const PcodeOp *op);
};
@ -218,7 +218,7 @@ public:
// virtual Datatype *getInputLocal(const PcodeOp *op,int4 slot) const;
virtual Datatype *getInputCast(const PcodeOp *op,int4 slot,const CastStrategy *castStrategy) const;
virtual Datatype *getOutputToken(const PcodeOp *op,CastStrategy *castStrategy) const;
virtual void push(PrintLanguage *lng,const PcodeOp *op) const { lng->opLoad(op); }
virtual void push(PrintLanguage *lng,const PcodeOp *op,const PcodeOp *readOp) const { lng->opLoad(op); }
virtual void printRaw(ostream &s,const PcodeOp *op);
};
@ -228,7 +228,7 @@ public:
TypeOpStore(TypeFactory *t); ///< Constructor
// virtual Datatype *getInputLocal(const PcodeOp *op,int4 slot) const;
virtual Datatype *getInputCast(const PcodeOp *op,int4 slot,const CastStrategy *castStrategy) const;
virtual void push(PrintLanguage *lng,const PcodeOp *op) const { lng->opStore(op); }
virtual void push(PrintLanguage *lng,const PcodeOp *op,const PcodeOp *readOp) const { lng->opStore(op); }
virtual void printRaw(ostream &s,const PcodeOp *op);
};
@ -236,7 +236,7 @@ public:
class TypeOpBranch : public TypeOp {
public:
TypeOpBranch(TypeFactory *t); ///< Constructor
virtual void push(PrintLanguage *lng,const PcodeOp *op) const { lng->opBranch(op); }
virtual void push(PrintLanguage *lng,const PcodeOp *op,const PcodeOp *readOp) const { lng->opBranch(op); }
virtual void printRaw(ostream &s,const PcodeOp *op);
};
@ -245,7 +245,7 @@ class TypeOpCbranch : public TypeOp {
public:
TypeOpCbranch(TypeFactory *t); ///< Constructor
virtual Datatype *getInputLocal(const PcodeOp *op,int4 slot) const;
virtual void push(PrintLanguage *lng,const PcodeOp *op) const { lng->opCbranch(op); }
virtual void push(PrintLanguage *lng,const PcodeOp *op,const PcodeOp *readOp) const { lng->opCbranch(op); }
virtual void printRaw(ostream &s,const PcodeOp *op);
};
@ -253,7 +253,7 @@ public:
class TypeOpBranchind : public TypeOp {
public:
TypeOpBranchind(TypeFactory *t); ///< Constructor
virtual void push(PrintLanguage *lng,const PcodeOp *op) const { lng->opBranchind(op); }
virtual void push(PrintLanguage *lng,const PcodeOp *op,const PcodeOp *readOp) const { lng->opBranchind(op); }
virtual void printRaw(ostream &s,const PcodeOp *op);
};
@ -261,7 +261,7 @@ public:
class TypeOpCall : public TypeOp {
public:
TypeOpCall(TypeFactory *t); ///< Constructor
virtual void push(PrintLanguage *lng,const PcodeOp *op) const { lng->opCall(op); }
virtual void push(PrintLanguage *lng,const PcodeOp *op,const PcodeOp *readOp) const { lng->opCall(op); }
virtual void printRaw(ostream &s,const PcodeOp *op);
virtual Datatype *getInputLocal(const PcodeOp *op,int4 slot) const;
virtual Datatype *getOutputLocal(const PcodeOp *op) const;
@ -271,7 +271,7 @@ public:
class TypeOpCallind : public TypeOp {
public:
TypeOpCallind(TypeFactory *t); ///< Constructor
virtual void push(PrintLanguage *lng,const PcodeOp *op) const { lng->opCallind(op); }
virtual void push(PrintLanguage *lng,const PcodeOp *op,const PcodeOp *readOp) const { lng->opCallind(op); }
virtual void printRaw(ostream &s,const PcodeOp *op);
virtual Datatype *getInputLocal(const PcodeOp *op,int4 slot) const;
virtual Datatype *getOutputLocal(const PcodeOp *op) const;
@ -281,7 +281,7 @@ public:
class TypeOpCallother : public TypeOp {
public:
TypeOpCallother(TypeFactory *t); ///< Constructor
virtual void push(PrintLanguage *lng,const PcodeOp *op) const { lng->opCallother(op); }
virtual void push(PrintLanguage *lng,const PcodeOp *op,const PcodeOp *readOp) const { lng->opCallother(op); }
virtual void printRaw(ostream &s,const PcodeOp *op);
virtual string getOperatorName(const PcodeOp *op) const;
virtual Datatype *getInputLocal(const PcodeOp *op,int4 slot) const;
@ -292,7 +292,7 @@ public:
class TypeOpReturn : public TypeOp {
public:
TypeOpReturn(TypeFactory *t); ///< Constructor
virtual void push(PrintLanguage *lng,const PcodeOp *op) const { lng->opReturn(op); }
virtual void push(PrintLanguage *lng,const PcodeOp *op,const PcodeOp *readOp) const { lng->opReturn(op); }
virtual void printRaw(ostream &s,const PcodeOp *op);
virtual Datatype *getInputLocal(const PcodeOp *op,int4 slot) const;
};
@ -301,7 +301,7 @@ public:
class TypeOpEqual : public TypeOpBinary {
public:
TypeOpEqual(TypeFactory *t); ///< Constructor
virtual void push(PrintLanguage *lng,const PcodeOp *op) const { lng->opIntEqual(op); }
virtual void push(PrintLanguage *lng,const PcodeOp *op,const PcodeOp *readOp) const { lng->opIntEqual(op); }
virtual Datatype *getInputCast(const PcodeOp *op,int4 slot,const CastStrategy *castStrategy) const;
};
@ -309,7 +309,7 @@ public:
class TypeOpNotEqual : public TypeOpBinary {
public:
TypeOpNotEqual(TypeFactory *t); ///< Constructor
virtual void push(PrintLanguage *lng,const PcodeOp *op) const { lng->opIntNotEqual(op); }
virtual void push(PrintLanguage *lng,const PcodeOp *op,const PcodeOp *readOp) const { lng->opIntNotEqual(op); }
virtual Datatype *getInputCast(const PcodeOp *op,int4 slot,const CastStrategy *castStrategy) const;
};
@ -317,7 +317,7 @@ public:
class TypeOpIntSless : public TypeOpBinary {
public:
TypeOpIntSless(TypeFactory *t); ///< Constructor
virtual void push(PrintLanguage *lng,const PcodeOp *op) const { lng->opIntSless(op); }
virtual void push(PrintLanguage *lng,const PcodeOp *op,const PcodeOp *readOp) const { lng->opIntSless(op); }
virtual Datatype *getInputCast(const PcodeOp *op,int4 slot,const CastStrategy *castStrategy) const;
};
@ -325,7 +325,7 @@ public:
class TypeOpIntSlessEqual : public TypeOpBinary {
public:
TypeOpIntSlessEqual(TypeFactory *t); ///< Constructor
virtual void push(PrintLanguage *lng,const PcodeOp *op) const { lng->opIntSlessEqual(op); }
virtual void push(PrintLanguage *lng,const PcodeOp *op,const PcodeOp *readOp) const { lng->opIntSlessEqual(op); }
virtual Datatype *getInputCast(const PcodeOp *op,int4 slot,const CastStrategy *castStrategy) const;
};
@ -333,7 +333,7 @@ public:
class TypeOpIntLess : public TypeOpBinary {
public:
TypeOpIntLess(TypeFactory *t); ///< Constructor
virtual void push(PrintLanguage *lng,const PcodeOp *op) const { lng->opIntLess(op); }
virtual void push(PrintLanguage *lng,const PcodeOp *op,const PcodeOp *readOp) const { lng->opIntLess(op); }
virtual Datatype *getInputCast(const PcodeOp *op,int4 slot,const CastStrategy *castStrategy) const;
};
@ -341,7 +341,7 @@ public:
class TypeOpIntLessEqual : public TypeOpBinary {
public:
TypeOpIntLessEqual(TypeFactory *t); ///< Constructor
virtual void push(PrintLanguage *lng,const PcodeOp *op) const { lng->opIntLessEqual(op); }
virtual void push(PrintLanguage *lng,const PcodeOp *op,const PcodeOp *readOp) const { lng->opIntLessEqual(op); }
virtual Datatype *getInputCast(const PcodeOp *op,int4 slot,const CastStrategy *castStrategy) const;
};
@ -349,7 +349,7 @@ public:
class TypeOpIntZext : public TypeOpFunc {
public:
TypeOpIntZext(TypeFactory *t); ///< Constructor
virtual void push(PrintLanguage *lng,const PcodeOp *op) const { lng->opIntZext(op); }
virtual void push(PrintLanguage *lng,const PcodeOp *op,const PcodeOp *readOp) const { lng->opIntZext(op,readOp); }
virtual string getOperatorName(const PcodeOp *op) const;
virtual Datatype *getInputCast(const PcodeOp *op,int4 slot,const CastStrategy *castStrategy) const;
};
@ -358,7 +358,7 @@ public:
class TypeOpIntSext : public TypeOpFunc {
public:
TypeOpIntSext(TypeFactory *t); ///< Constructor
virtual void push(PrintLanguage *lng,const PcodeOp *op) const { lng->opIntSext(op); }
virtual void push(PrintLanguage *lng,const PcodeOp *op,const PcodeOp *readOp) const { lng->opIntSext(op,readOp); }
virtual string getOperatorName(const PcodeOp *op) const;
virtual Datatype *getInputCast(const PcodeOp *op,int4 slot,const CastStrategy *castStrategy) const;
};
@ -367,7 +367,7 @@ public:
class TypeOpIntAdd : public TypeOpBinary {
public:
TypeOpIntAdd(TypeFactory *t); ///< Constructor
virtual void push(PrintLanguage *lng,const PcodeOp *op) const { lng->opIntAdd(op); }
virtual void push(PrintLanguage *lng,const PcodeOp *op,const PcodeOp *readOp) const { lng->opIntAdd(op); }
virtual Datatype *getOutputToken(const PcodeOp *op,CastStrategy *castStrategy) const;
};
@ -375,7 +375,7 @@ public:
class TypeOpIntSub : public TypeOpBinary {
public:
TypeOpIntSub(TypeFactory *t); ///< Constructor
virtual void push(PrintLanguage *lng,const PcodeOp *op) const { lng->opIntSub(op); }
virtual void push(PrintLanguage *lng,const PcodeOp *op,const PcodeOp *readOp) const { lng->opIntSub(op); }
virtual Datatype *getOutputToken(const PcodeOp *op,CastStrategy *castStrategy) const;
};
@ -383,7 +383,7 @@ public:
class TypeOpIntCarry : public TypeOpFunc {
public:
TypeOpIntCarry(TypeFactory *t); ///< Constructor
virtual void push(PrintLanguage *lng,const PcodeOp *op) const { lng->opIntCarry(op); }
virtual void push(PrintLanguage *lng,const PcodeOp *op,const PcodeOp *readOp) const { lng->opIntCarry(op); }
virtual string getOperatorName(const PcodeOp *op) const;
};
@ -391,7 +391,7 @@ public:
class TypeOpIntScarry : public TypeOpFunc {
public:
TypeOpIntScarry(TypeFactory *t); ///< Constructor
virtual void push(PrintLanguage *lng,const PcodeOp *op) const { lng->opIntScarry(op); }
virtual void push(PrintLanguage *lng,const PcodeOp *op,const PcodeOp *readOp) const { lng->opIntScarry(op); }
virtual string getOperatorName(const PcodeOp *op) const;
};
@ -399,7 +399,7 @@ public:
class TypeOpIntSborrow : public TypeOpFunc {
public:
TypeOpIntSborrow(TypeFactory *t); ///< Constructor
virtual void push(PrintLanguage *lng,const PcodeOp *op) const { lng->opIntSborrow(op); }
virtual void push(PrintLanguage *lng,const PcodeOp *op,const PcodeOp *readOp) const { lng->opIntSborrow(op); }
virtual string getOperatorName(const PcodeOp *op) const;
};
@ -407,7 +407,7 @@ public:
class TypeOpInt2Comp : public TypeOpUnary {
public:
TypeOpInt2Comp(TypeFactory *t); ///< Constructor
virtual void push(PrintLanguage *lng,const PcodeOp *op) const { lng->opInt2Comp(op); }
virtual void push(PrintLanguage *lng,const PcodeOp *op,const PcodeOp *readOp) const { lng->opInt2Comp(op); }
virtual Datatype *getOutputToken(const PcodeOp *op,CastStrategy *castStrategy) const;
};
@ -415,7 +415,7 @@ public:
class TypeOpIntNegate : public TypeOpUnary {
public:
TypeOpIntNegate(TypeFactory *t); ///< Constructor
virtual void push(PrintLanguage *lng,const PcodeOp *op) const { lng->opIntNegate(op); }
virtual void push(PrintLanguage *lng,const PcodeOp *op,const PcodeOp *readOp) const { lng->opIntNegate(op); }
virtual Datatype *getOutputToken(const PcodeOp *op,CastStrategy *castStrategy) const;
};
@ -423,7 +423,7 @@ public:
class TypeOpIntXor : public TypeOpBinary {
public:
TypeOpIntXor(TypeFactory *t); ///< Constructor
virtual void push(PrintLanguage *lng,const PcodeOp *op) const { lng->opIntXor(op); }
virtual void push(PrintLanguage *lng,const PcodeOp *op,const PcodeOp *readOp) const { lng->opIntXor(op); }
virtual Datatype *getOutputToken(const PcodeOp *op,CastStrategy *castStrategy) const;
};
@ -431,7 +431,7 @@ public:
class TypeOpIntAnd : public TypeOpBinary {
public:
TypeOpIntAnd(TypeFactory *t); ///< Constructor
virtual void push(PrintLanguage *lng,const PcodeOp *op) const { lng->opIntAnd(op); }
virtual void push(PrintLanguage *lng,const PcodeOp *op,const PcodeOp *readOp) const { lng->opIntAnd(op); }
virtual Datatype *getOutputToken(const PcodeOp *op,CastStrategy *castStrategy) const;
};
@ -439,7 +439,7 @@ public:
class TypeOpIntOr : public TypeOpBinary {
public:
TypeOpIntOr(TypeFactory *t); ///< Constructor
virtual void push(PrintLanguage *lng,const PcodeOp *op) const { lng->opIntOr(op); }
virtual void push(PrintLanguage *lng,const PcodeOp *op,const PcodeOp *readOp) const { lng->opIntOr(op); }
virtual Datatype *getOutputToken(const PcodeOp *op,CastStrategy *castStrategy) const;
};
@ -447,7 +447,7 @@ public:
class TypeOpIntLeft : public TypeOpBinary {
public:
TypeOpIntLeft(TypeFactory *t); ///< Constructor
virtual void push(PrintLanguage *lng,const PcodeOp *op) const { lng->opIntLeft(op); }
virtual void push(PrintLanguage *lng,const PcodeOp *op,const PcodeOp *readOp) const { lng->opIntLeft(op); }
virtual Datatype *getInputLocal(const PcodeOp *op,int4 slot) const;
virtual Datatype *getOutputToken(const PcodeOp *op,CastStrategy *castStrategy) const;
};
@ -456,7 +456,7 @@ public:
class TypeOpIntRight : public TypeOpBinary {
public:
TypeOpIntRight(TypeFactory *t); ///< Constructor
virtual void push(PrintLanguage *lng,const PcodeOp *op) const { lng->opIntRight(op); }
virtual void push(PrintLanguage *lng,const PcodeOp *op,const PcodeOp *readOp) const { lng->opIntRight(op); }
virtual Datatype *getInputCast(const PcodeOp *op,int4 slot,const CastStrategy *castStrategy) const;
virtual Datatype *getInputLocal(const PcodeOp *op,int4 slot) const;
virtual Datatype *getOutputToken(const PcodeOp *op,CastStrategy *castStrategy) const;
@ -466,7 +466,7 @@ public:
class TypeOpIntSright : public TypeOpBinary {
public:
TypeOpIntSright(TypeFactory *t); ///< Constructor
virtual void push(PrintLanguage *lng,const PcodeOp *op) const { lng->opIntSright(op); }
virtual void push(PrintLanguage *lng,const PcodeOp *op,const PcodeOp *readOp) const { lng->opIntSright(op); }
virtual void printRaw(ostream &s,const PcodeOp *op);
virtual Datatype *getInputCast(const PcodeOp *op,int4 slot,const CastStrategy *castStrategy) const;
virtual Datatype *getInputLocal(const PcodeOp *op,int4 slot) const;
@ -477,7 +477,7 @@ public:
class TypeOpIntMult : public TypeOpBinary {
public:
TypeOpIntMult(TypeFactory *t); ///< Constructor
virtual void push(PrintLanguage *lng,const PcodeOp *op) const { lng->opIntMult(op); }
virtual void push(PrintLanguage *lng,const PcodeOp *op,const PcodeOp *readOp) const { lng->opIntMult(op); }
virtual Datatype *getOutputToken(const PcodeOp *op,CastStrategy *castStrategy) const;
};
@ -485,7 +485,7 @@ public:
class TypeOpIntDiv : public TypeOpBinary {
public:
TypeOpIntDiv(TypeFactory *t); ///< Constructor
virtual void push(PrintLanguage *lng,const PcodeOp *op) const { lng->opIntDiv(op); }
virtual void push(PrintLanguage *lng,const PcodeOp *op,const PcodeOp *readOp) const { lng->opIntDiv(op); }
virtual Datatype *getInputCast(const PcodeOp *op,int4 slot,const CastStrategy *castStrategy) const;
};
@ -493,7 +493,7 @@ public:
class TypeOpIntSdiv : public TypeOpBinary {
public:
TypeOpIntSdiv(TypeFactory *t); ///< Constructor
virtual void push(PrintLanguage *lng,const PcodeOp *op) const { lng->opIntSdiv(op); }
virtual void push(PrintLanguage *lng,const PcodeOp *op,const PcodeOp *readOp) const { lng->opIntSdiv(op); }
virtual Datatype *getInputCast(const PcodeOp *op,int4 slot,const CastStrategy *castStrategy) const;
};
@ -501,7 +501,7 @@ public:
class TypeOpIntRem : public TypeOpBinary {
public:
TypeOpIntRem(TypeFactory *t); ///< Constructor
virtual void push(PrintLanguage *lng,const PcodeOp *op) const { lng->opIntRem(op); }
virtual void push(PrintLanguage *lng,const PcodeOp *op,const PcodeOp *readOp) const { lng->opIntRem(op); }
virtual Datatype *getInputCast(const PcodeOp *op,int4 slot,const CastStrategy *castStrategy) const;
};
@ -509,7 +509,7 @@ public:
class TypeOpIntSrem : public TypeOpBinary {
public:
TypeOpIntSrem(TypeFactory *t); ///< Constructor
virtual void push(PrintLanguage *lng,const PcodeOp *op) const { lng->opIntSrem(op); }
virtual void push(PrintLanguage *lng,const PcodeOp *op,const PcodeOp *readOp) const { lng->opIntSrem(op); }
virtual Datatype *getInputCast(const PcodeOp *op,int4 slot,const CastStrategy *castStrategy) const;
};
@ -517,161 +517,161 @@ public:
class TypeOpBoolNegate : public TypeOpUnary {
public:
TypeOpBoolNegate(TypeFactory *t); ///< Constructor
virtual void push(PrintLanguage *lng,const PcodeOp *op) const { lng->opBoolNegate(op); }
virtual void push(PrintLanguage *lng,const PcodeOp *op,const PcodeOp *readOp) const { lng->opBoolNegate(op); }
};
/// \brief Information about the BOOL_XOR op-code
class TypeOpBoolXor : public TypeOpBinary {
public:
TypeOpBoolXor(TypeFactory *t); ///< Constructor
virtual void push(PrintLanguage *lng,const PcodeOp *op) const { lng->opBoolXor(op); }
virtual void push(PrintLanguage *lng,const PcodeOp *op,const PcodeOp *readOp) const { lng->opBoolXor(op); }
};
/// \brief Information about the BOOL_AND op-code
class TypeOpBoolAnd : public TypeOpBinary {
public:
TypeOpBoolAnd(TypeFactory *t); ///< Constructor
virtual void push(PrintLanguage *lng,const PcodeOp *op) const { lng->opBoolAnd(op); }
virtual void push(PrintLanguage *lng,const PcodeOp *op,const PcodeOp *readOp) const { lng->opBoolAnd(op); }
};
/// \brief Information about the BOOL_OR op-code
class TypeOpBoolOr : public TypeOpBinary {
public:
TypeOpBoolOr(TypeFactory *t); ///< Constructor
virtual void push(PrintLanguage *lng,const PcodeOp *op) const { lng->opBoolOr(op); }
virtual void push(PrintLanguage *lng,const PcodeOp *op,const PcodeOp *readOp) const { lng->opBoolOr(op); }
};
/// \brief Information about the FLOAT_EQUAL op-code
class TypeOpFloatEqual : public TypeOpBinary {
public:
TypeOpFloatEqual(TypeFactory *t,const Translate *trans); ///< Constructor
virtual void push(PrintLanguage *lng,const PcodeOp *op) const { lng->opFloatEqual(op); }
virtual void push(PrintLanguage *lng,const PcodeOp *op,const PcodeOp *readOp) const { lng->opFloatEqual(op); }
};
/// \brief Information about the FLOAT_NOTEQUAL op-code
class TypeOpFloatNotEqual : public TypeOpBinary {
public:
TypeOpFloatNotEqual(TypeFactory *t,const Translate *trans); ///< Constructor
virtual void push(PrintLanguage *lng,const PcodeOp *op) const { lng->opFloatNotEqual(op); }
virtual void push(PrintLanguage *lng,const PcodeOp *op,const PcodeOp *readOp) const { lng->opFloatNotEqual(op); }
};
/// \brief Information about the FLOAT_LESS op-code
class TypeOpFloatLess : public TypeOpBinary {
public:
TypeOpFloatLess(TypeFactory *t,const Translate *trans); ///< Constructor
virtual void push(PrintLanguage *lng,const PcodeOp *op) const { lng->opFloatLess(op); }
virtual void push(PrintLanguage *lng,const PcodeOp *op,const PcodeOp *readOp) const { lng->opFloatLess(op); }
};
/// \brief Information about the FLOAT_LESSEQUAL op-code
class TypeOpFloatLessEqual : public TypeOpBinary {
public:
TypeOpFloatLessEqual(TypeFactory *t,const Translate *trans); ///< Constructor
virtual void push(PrintLanguage *lng,const PcodeOp *op) const { lng->opFloatLessEqual(op); }
virtual void push(PrintLanguage *lng,const PcodeOp *op,const PcodeOp *readOp) const { lng->opFloatLessEqual(op); }
};
/// \brief Information about the FLOAT_NAN op-code
class TypeOpFloatNan : public TypeOpFunc {
public:
TypeOpFloatNan(TypeFactory *t,const Translate *trans); ///< Constructor
virtual void push(PrintLanguage *lng,const PcodeOp *op) const { lng->opFloatNan(op); }
virtual void push(PrintLanguage *lng,const PcodeOp *op,const PcodeOp *readOp) const { lng->opFloatNan(op); }
};
/// \brief Information about the FLOAT_ADD op-code
class TypeOpFloatAdd : public TypeOpBinary {
public:
TypeOpFloatAdd(TypeFactory *t,const Translate *trans); ///< Constructor
virtual void push(PrintLanguage *lng,const PcodeOp *op) const { lng->opFloatAdd(op); }
virtual void push(PrintLanguage *lng,const PcodeOp *op,const PcodeOp *readOp) const { lng->opFloatAdd(op); }
};
/// \brief Information about the FLOAT_DIV op-code
class TypeOpFloatDiv : public TypeOpBinary {
public:
TypeOpFloatDiv(TypeFactory *t,const Translate *trans); ///< Constructor
virtual void push(PrintLanguage *lng,const PcodeOp *op) const { lng->opFloatDiv(op); }
virtual void push(PrintLanguage *lng,const PcodeOp *op,const PcodeOp *readOp) const { lng->opFloatDiv(op); }
};
/// \brief Information about the FLOAT_MULT op-code
class TypeOpFloatMult : public TypeOpBinary {
public:
TypeOpFloatMult(TypeFactory *t,const Translate *trans); ///< Constructor
virtual void push(PrintLanguage *lng,const PcodeOp *op) const { lng->opFloatMult(op); }
virtual void push(PrintLanguage *lng,const PcodeOp *op,const PcodeOp *readOp) const { lng->opFloatMult(op); }
};
/// \brief Information about the FLOAT_SUB op-code
class TypeOpFloatSub : public TypeOpBinary {
public:
TypeOpFloatSub(TypeFactory *t,const Translate *trans); ///< Constructor
virtual void push(PrintLanguage *lng,const PcodeOp *op) const { lng->opFloatSub(op); }
virtual void push(PrintLanguage *lng,const PcodeOp *op,const PcodeOp *readOp) const { lng->opFloatSub(op); }
};
/// \brief Information about the FLOAT_NEG op-code
class TypeOpFloatNeg : public TypeOpUnary {
public:
TypeOpFloatNeg(TypeFactory *t,const Translate *trans); ///< Constructor
virtual void push(PrintLanguage *lng,const PcodeOp *op) const { lng->opFloatNeg(op); }
virtual void push(PrintLanguage *lng,const PcodeOp *op,const PcodeOp *readOp) const { lng->opFloatNeg(op); }
};
/// \brief Information about the FLOAT_ABS op-code
class TypeOpFloatAbs : public TypeOpFunc {
public:
TypeOpFloatAbs(TypeFactory *t,const Translate *trans); ///< Constructor
virtual void push(PrintLanguage *lng,const PcodeOp *op) const { lng->opFloatAbs(op); }
virtual void push(PrintLanguage *lng,const PcodeOp *op,const PcodeOp *readOp) const { lng->opFloatAbs(op); }
};
/// \brief Information about the FLOAT_SQRT op-code
class TypeOpFloatSqrt : public TypeOpFunc {
public:
TypeOpFloatSqrt(TypeFactory *t,const Translate *trans); ///< Constructor
virtual void push(PrintLanguage *lng,const PcodeOp *op) const { lng->opFloatSqrt(op); }
virtual void push(PrintLanguage *lng,const PcodeOp *op,const PcodeOp *readOp) const { lng->opFloatSqrt(op); }
};
/// \brief Information about the FLOAT_INT2FLOAT op-code
class TypeOpFloatInt2Float : public TypeOpFunc {
public:
TypeOpFloatInt2Float(TypeFactory *t,const Translate *trans); ///< Constructor
virtual void push(PrintLanguage *lng,const PcodeOp *op) const { lng->opFloatInt2Float(op); }
virtual void push(PrintLanguage *lng,const PcodeOp *op,const PcodeOp *readOp) const { lng->opFloatInt2Float(op); }
};
/// \brief Information about the FLOAT_FLOAT2FLOAT op-code
class TypeOpFloatFloat2Float : public TypeOpFunc {
public:
TypeOpFloatFloat2Float(TypeFactory *t,const Translate *trans); ///< Constructor
virtual void push(PrintLanguage *lng,const PcodeOp *op) const { lng->opFloatFloat2Float(op); }
virtual void push(PrintLanguage *lng,const PcodeOp *op,const PcodeOp *readOp) const { lng->opFloatFloat2Float(op); }
};
/// \brief Information about the FLOAT_TRUNC op-code
class TypeOpFloatTrunc : public TypeOpFunc {
public:
TypeOpFloatTrunc(TypeFactory *t,const Translate *trans); ///< Constructor
virtual void push(PrintLanguage *lng,const PcodeOp *op) const { lng->opFloatTrunc(op); }
virtual void push(PrintLanguage *lng,const PcodeOp *op,const PcodeOp *readOp) const { lng->opFloatTrunc(op); }
};
/// \brief Information about the FLOAT_CEIL op-code
class TypeOpFloatCeil : public TypeOpFunc {
public:
TypeOpFloatCeil(TypeFactory *t,const Translate *trans); ///< Constructor
virtual void push(PrintLanguage *lng,const PcodeOp *op) const { lng->opFloatCeil(op); }
virtual void push(PrintLanguage *lng,const PcodeOp *op,const PcodeOp *readOp) const { lng->opFloatCeil(op); }
};
/// \brief Information about the FLOAT_FLOOR op-code
class TypeOpFloatFloor : public TypeOpFunc {
public:
TypeOpFloatFloor(TypeFactory *t,const Translate *trans); ///< Constructor
virtual void push(PrintLanguage *lng,const PcodeOp *op) const { lng->opFloatFloor(op); }
virtual void push(PrintLanguage *lng,const PcodeOp *op,const PcodeOp *readOp) const { lng->opFloatFloor(op); }
};
/// \brief Information about the FLOAT_ROUND op-code
class TypeOpFloatRound : public TypeOpFunc {
public:
TypeOpFloatRound(TypeFactory *t,const Translate *trans); ///< Constructor
virtual void push(PrintLanguage *lng,const PcodeOp *op) const { lng->opFloatRound(op); }
virtual void push(PrintLanguage *lng,const PcodeOp *op,const PcodeOp *readOp) const { lng->opFloatRound(op); }
};
/// \brief Information about the MULTIEQUAL op-code
class TypeOpMulti : public TypeOp {
public:
TypeOpMulti(TypeFactory *t); ///< Constructor
virtual void push(PrintLanguage *lng,const PcodeOp *op) const { lng->opMultiequal(op); }
virtual void push(PrintLanguage *lng,const PcodeOp *op,const PcodeOp *readOp) const { lng->opMultiequal(op); }
virtual void printRaw(ostream &s,const PcodeOp *op);
};
@ -680,7 +680,7 @@ class TypeOpIndirect : public TypeOp {
public:
TypeOpIndirect(TypeFactory *t); ///< Constructor
virtual Datatype *getInputLocal(const PcodeOp *op,int4 slot) const;
virtual void push(PrintLanguage *lng,const PcodeOp *op) const { lng->opIndirect(op); }
virtual void push(PrintLanguage *lng,const PcodeOp *op,const PcodeOp *readOp) const { lng->opIndirect(op); }
virtual void printRaw(ostream &s,const PcodeOp *op);
};
@ -690,7 +690,7 @@ public:
TypeOpPiece(TypeFactory *t); ///< Constructor
virtual Datatype *getOutputToken(const PcodeOp *op,CastStrategy *castStrategy) const;
virtual string getOperatorName(const PcodeOp *op) const;
virtual void push(PrintLanguage *lng,const PcodeOp *op) const { lng->opPiece(op); }
virtual void push(PrintLanguage *lng,const PcodeOp *op,const PcodeOp *readOp) const { lng->opPiece(op); }
};
/// \brief Information about the SUBPIECE op-code
@ -701,7 +701,7 @@ public:
// virtual Datatype *getInputLocal(const PcodeOp *op,int4 slot) const;
virtual Datatype *getOutputToken(const PcodeOp *op,CastStrategy *castStrategy) const;
virtual string getOperatorName(const PcodeOp *op) const;
virtual void push(PrintLanguage *lng,const PcodeOp *op) const { lng->opSubpiece(op); }
virtual void push(PrintLanguage *lng,const PcodeOp *op,const PcodeOp *readOp) const { lng->opSubpiece(op); }
};
/// \brief Information about the CAST op-code
@ -710,7 +710,7 @@ public:
TypeOpCast(TypeFactory *t); ///< Constructor
// We don't care what types are cast
// So no input and output requirements
virtual void push(PrintLanguage *lng,const PcodeOp *op) const { lng->opCast(op); }
virtual void push(PrintLanguage *lng,const PcodeOp *op,const PcodeOp *readOp) const { lng->opCast(op); }
virtual void printRaw(ostream &s,const PcodeOp *op);
};
@ -722,7 +722,7 @@ public:
virtual Datatype *getOutputLocal(const PcodeOp *op) const;
virtual Datatype *getOutputToken(const PcodeOp *op,CastStrategy *castStrategy) const;
virtual Datatype *getInputCast(const PcodeOp *op,int4 slot,const CastStrategy *castStrategy) const;
virtual void push(PrintLanguage *lng,const PcodeOp *op) const { lng->opPtradd(op); }
virtual void push(PrintLanguage *lng,const PcodeOp *op,const PcodeOp *readOp) const { lng->opPtradd(op); }
virtual void printRaw(ostream &s,const PcodeOp *op);
};
@ -734,7 +734,7 @@ public:
virtual Datatype *getInputLocal(const PcodeOp *op,int4 slot) const;
virtual Datatype *getInputCast(const PcodeOp *op,int4 slot,const CastStrategy *castStrategy) const;
virtual Datatype *getOutputToken(const PcodeOp *op,CastStrategy *castStrategy) const;
virtual void push(PrintLanguage *lng,const PcodeOp *op) const { lng->opPtrsub(op); }
virtual void push(PrintLanguage *lng,const PcodeOp *op,const PcodeOp *readOp) const { lng->opPtrsub(op); }
virtual void printRaw(ostream &s,const PcodeOp *op);
};
@ -753,7 +753,7 @@ public:
// virtual Datatype *getInputLocal(const PcodeOp *op,int4 slot) const;
virtual Datatype *getInputCast(const PcodeOp *op,int4 slot,const CastStrategy *castStrategy) const;
virtual Datatype *getOutputToken(const PcodeOp *op,CastStrategy *castStrategy) const;
virtual void push(PrintLanguage *lng,const PcodeOp *op) const { lng->opSegmentOp(op); }
virtual void push(PrintLanguage *lng,const PcodeOp *op,const PcodeOp *readOp) const { lng->opSegmentOp(op); }
virtual void printRaw(ostream &s,const PcodeOp *op);
};
@ -765,7 +765,7 @@ public:
virtual Datatype *getInputCast(const PcodeOp *op,int4 slot,const CastStrategy *castStrategy) const { return (Datatype *)0; } // Never needs casting
virtual Datatype *getOutputLocal(const PcodeOp *op) const;
virtual Datatype *getInputLocal(const PcodeOp *op,int4 slot) const;
virtual void push(PrintLanguage *lng,const PcodeOp *op) const { lng->opCpoolRefOp(op); }
virtual void push(PrintLanguage *lng,const PcodeOp *op,const PcodeOp *readOp) const { lng->opCpoolRefOp(op); }
virtual void printRaw(ostream &s,const PcodeOp *op);
};
@ -774,7 +774,7 @@ class TypeOpNew : public TypeOp {
public:
TypeOpNew(TypeFactory *t); ///< Constructor
virtual Datatype *getInputCast(const PcodeOp *op,int4 slot,const CastStrategy *castStrategy) const { return (Datatype *)0; } // Never needs casting
virtual void push(PrintLanguage *lng,const PcodeOp *op) const { lng->opNewOp(op); }
virtual void push(PrintLanguage *lng,const PcodeOp *op,const PcodeOp *readOp) const { lng->opNewOp(op); }
virtual void printRaw(ostream &s,const PcodeOp *op);
};
@ -783,7 +783,7 @@ class TypeOpInsert : public TypeOpFunc {
public:
TypeOpInsert(TypeFactory *t); ///< Constructor
virtual Datatype *getInputLocal(const PcodeOp *op,int4 slot) const;
virtual void push(PrintLanguage *lng,const PcodeOp *op) const { lng->opInsertOp(op); }
virtual void push(PrintLanguage *lng,const PcodeOp *op,const PcodeOp *readOp) const { lng->opInsertOp(op); }
};
/// \brief Information about the EXTRACT op-code
@ -791,14 +791,14 @@ class TypeOpExtract : public TypeOpFunc {
public:
TypeOpExtract(TypeFactory *t); ///< Constructor
virtual Datatype *getInputLocal(const PcodeOp *op,int4 slot) const;
virtual void push(PrintLanguage *lng,const PcodeOp *op) const { lng->opExtractOp(op); }
virtual void push(PrintLanguage *lng,const PcodeOp *op,const PcodeOp *readOp) const { lng->opExtractOp(op); }
};
/// \brief Information about the POPCOUNT op-code
class TypeOpPopcount : public TypeOpFunc {
public:
TypeOpPopcount(TypeFactory *t); ///< Constructor
virtual void push(PrintLanguage *lng,const PcodeOp *op) const { lng->opPopcountOp(op); }
virtual void push(PrintLanguage *lng,const PcodeOp *op,const PcodeOp *readOp) const { lng->opPopcountOp(op); }
};
#endif

View File

@ -112,7 +112,12 @@ void HighVariable::updateFlags(void) const
highflags &= ~flagsdirty; // Clear the dirty flag
}
/// Using Datatype::typeOrder, find the member Varnode with the most specific data-type.
/// Find the member Varnode with the most \e specialized data-type, handling \e bool specially.
/// Boolean data-types are \e specialized in the data-type lattice, but not all byte values are boolean values.
/// Within the Varnode/PcodeOp tree, the \e bool data-type can only propagate to a Varnode if it is verified to
/// only take the boolean values 0 and 1. Since the data-type representative represents the type of all
/// instances, if any instance is not boolean, then the HighVariable cannot be boolean, even though \e bool
/// is more specialized. This method uses Datatype::typeOrderBool() to implement the special handling.
/// \return the representative member
Varnode *HighVariable::getTypeRepresentative(void) const
@ -129,7 +134,7 @@ Varnode *HighVariable::getTypeRepresentative(void) const
if (vn->isTypeLock())
rep = vn;
}
else if (0>vn->getType()->typeOrder(*rep->getType()))
else if (0>vn->getType()->typeOrderBool(*rep->getType()))
rep = vn;
}
return rep;