fixup! ARM/Decoder: Various fixes

* Use C++14 std::integer_sequence.
* Split ArmDecoder::Instruction into ArmInstruction and ThumbInstruction
* Make Cond, Register, ShiftType and SignExtendRotation enum classes.
* ThumbMatcher is no longer allocated on the heap.
* JitX64::CondManager now uses Cond instead of SkyEye's ConditionCode.
* Add utility functions IsArmRegValid and MakeArmRegList.
This commit is contained in:
MerryMage 2016-04-05 08:41:21 +01:00
parent d9cf6474cf
commit 9555adb4f1
18 changed files with 592 additions and 504 deletions

View File

@ -5,6 +5,7 @@
#include <algorithm> #include <algorithm>
#include <cstddef> #include <cstddef>
#include <cstring> #include <cstring>
#include <utility>
#include <boost/optional.hpp> #include <boost/optional.hpp>
@ -17,21 +18,13 @@
namespace ArmDecoder { namespace ArmDecoder {
namespace Impl { namespace Impl {
// std::integer_sequence and std::make_integer_sequence are only available in C++14 // Internal implementation for call
template<size_t ...seq, typename Container, typename ...Args>
/// This type represents a sequence of integers void call_impl(std::integer_sequence<size_t, seq...>, Visitor* v, void (Visitor::*fn)(Args...), const Container& list) {
template<size_t ...> using FunctionArgTypes = typename std::tuple<Args...>;
struct integer_sequence {}; // Here we static_cast each element in list to the corresponding argument type for fn.
(v->*fn)(static_cast<typename std::tuple_element<seq, FunctionArgTypes>::type>(std::get<seq>(list))...);
/// This metafunction generates a sequence of integers from 0..N }
template<size_t N, size_t ...seq>
struct make_integer_sequence : make_integer_sequence<N - 1, N - 1, seq...> {};
// Internal implementation for make_integer_sequence
template<size_t ...seq>
struct make_integer_sequence<0, seq...> {
typedef integer_sequence<seq...> type;
};
/** /**
* This function takes a member function of Visitor and calls it with the parameters specified in list. * This function takes a member function of Visitor and calls it with the parameters specified in list.
@ -40,20 +33,14 @@ namespace Impl {
* @param fn Member function to call on v. * @param fn Member function to call on v.
* @param list List of arguments that will be splatted. * @param list List of arguments that will be splatted.
*/ */
template<size_t NumArgs, typename Function, typename Container> template<size_t NumArgs, typename Container, typename ...Args>
void call(Visitor* v, Function fn, const Container& list) { void call(Visitor* v, void (Visitor::*fn)(Args...), const Container& list) {
call_impl(typename make_integer_sequence<NumArgs>::type(), v, fn, list); call_impl(typename std::index_sequence_for<Args...>{}, v, fn, list);
}
// Internal implementation for call
template<size_t ...seq, typename Function, typename Container>
void call_impl(integer_sequence<seq...>, Visitor* v, Function fn, const Container& list) {
(v->*fn)(std::get<seq>(list)...);
} }
/// Function has NumArgs arguments /// Function has NumArgs arguments
template<size_t NumArgs, typename Function> template<size_t NumArgs, typename Function>
struct MatcherImpl : Matcher { struct MatcherImpl : ArmMatcher {
std::array<u32, NumArgs> masks = {}; std::array<u32, NumArgs> masks = {};
std::array<size_t, NumArgs> shifts = {}; std::array<size_t, NumArgs> shifts = {};
Function fn = nullptr; Function fn = nullptr;
@ -68,7 +55,7 @@ namespace Impl {
} }
template<size_t NumArgs, typename Function> template<size_t NumArgs, typename Function>
static std::unique_ptr<Matcher> MakeMatcher(const char format[32], Function fn) { static std::unique_ptr<ArmMatcher> MakeMatcher(const char format[32], Function fn) {
auto ret = Common::make_unique<Impl::MatcherImpl<NumArgs, Function>>(); auto ret = Common::make_unique<Impl::MatcherImpl<NumArgs, Function>>();
ret->fn = fn; ret->fn = fn;
ret->masks.fill(0); ret->masks.fill(0);
@ -111,10 +98,10 @@ static std::unique_ptr<Matcher> MakeMatcher(const char format[32], Function fn)
ASSERT(arg == NumArgs - 1); ASSERT(arg == NumArgs - 1);
return std::unique_ptr<Matcher>(std::move(ret)); return std::unique_ptr<ArmMatcher>(std::move(ret));
} }
static const std::array<Instruction, 221> arm_instruction_table = {{ static const std::array<ArmInstruction, 221> arm_instruction_table = {{
// Branch instructions // Branch instructions
{ "BLX (immediate)", MakeMatcher<2>("1111101hvvvvvvvvvvvvvvvvvvvvvvvv", &Visitor::BLX_imm) }, // ARMv5 { "BLX (immediate)", MakeMatcher<2>("1111101hvvvvvvvvvvvvvvvvvvvvvvvv", &Visitor::BLX_imm) }, // ARMv5
{ "BLX (register)", MakeMatcher<2>("cccc000100101111111111110011mmmm", &Visitor::BLX_reg) }, // ARMv5 { "BLX (register)", MakeMatcher<2>("cccc000100101111111111110011mmmm", &Visitor::BLX_reg) }, // ARMv5
@ -387,12 +374,11 @@ static const std::array<Instruction, 221> arm_instruction_table = {{
{ "SRS", MakeMatcher<0>("0000011--0-00000000000000001----", &Visitor::SRS) }, // ARMv6 { "SRS", MakeMatcher<0>("0000011--0-00000000000000001----", &Visitor::SRS) }, // ARMv6
}}; }};
boost::optional<const Instruction&> DecodeArm(u32 i) { boost::optional<const ArmInstruction&> DecodeArm(u32 i) {
auto iterator = std::find_if(arm_instruction_table.cbegin(), arm_instruction_table.cend(), [i](const Instruction& instruction) { auto iterator = std::find_if(arm_instruction_table.cbegin(), arm_instruction_table.cend(),
return instruction.Match(i); [i](const auto& instruction) { return instruction.Match(i); });
});
return iterator != arm_instruction_table.cend() ? boost::make_optional<const Instruction&>(*iterator) : boost::none; return iterator != arm_instruction_table.cend() ? boost::make_optional<const ArmInstruction&>(*iterator) : boost::none;
} }
} // namespace ArmDecoder } // namespace ArmDecoder

View File

@ -10,57 +10,142 @@
#include <boost/optional.hpp> #include <boost/optional.hpp>
#include "common/assert.h"
#include "common/common_types.h" #include "common/common_types.h"
#include "common/common_funcs.h" #include "common/common_funcs.h"
namespace ArmDecoder { namespace ArmDecoder {
// This is a generic ARMv6 decoder using double dispatch. // This is a generic ARMv6K decoder using double dispatch.
class Instruction; class ArmInstruction;
class ThumbInstruction;
class Visitor; class Visitor;
boost::optional<const Instruction&> DecodeArm(u32 instruction); /**
boost::optional<const Instruction&> DecodeThumb(u16 instruction); * This funtion identifies an ARM instruction and returns the relevant ArmInstruction.
* Returns boost::none if the instruction could not be deocoded.
*/
boost::optional<const ArmInstruction&> DecodeArm(u32 instruction);
struct Matcher { /**
* This funtion identifies a Thumb instruction and returns the relevant ThumbInstruction.
* Returns boost::none if the instruction could not be deocoded.
*/
boost::optional<const ThumbInstruction&> DecodeThumb(u16 instruction);
/// INTERNAL
struct ArmMatcher {
u32 bit_mask; u32 bit_mask;
u32 expected; u32 expected;
FORCE_INLINE bool Match(u32 x) const { bool Match(u32 x) const {
return (x & bit_mask) == expected; return (x & bit_mask) == expected;
} }
virtual void visit(Visitor* v, u32 inst) = 0; virtual void visit(Visitor* v, u32 inst) = 0;
}; };
class Instruction { /**
private: * This structure represents a decoder for a specific ARM instruction.
const std::unique_ptr<Matcher> matcher; * Calling Visit calls the relevant function on Visitor.
*/
class ArmInstruction final {
public: public:
Instruction(const char* const name, std::unique_ptr<Matcher> matcher) : matcher(std::move(matcher)), name(name) {} ArmInstruction(const char* const name, std::unique_ptr<ArmMatcher> matcher) : name(name), matcher(std::move(matcher)) {}
const char* const name; const char* const Name() const {
return name;
FORCE_INLINE bool Match(u32 instruction) const {
return (instruction & matcher->bit_mask) == matcher->expected;
} }
FORCE_INLINE void Visit(Visitor* v, u32 instruction) const { bool Match(u32 instruction) const {
return matcher->Match(instruction);
}
void Visit(Visitor* v, u32 instruction) const {
matcher->visit(v, instruction); matcher->visit(v, instruction);
} }
private:
const char* const name;
const std::unique_ptr<ArmMatcher> matcher;
};
/// INTERNAL
struct ThumbMatcher {
u16 bit_mask;
u16 expected;
bool Match(u16 x) const {
return (x & bit_mask) == expected;
}
std::function<void(Visitor*, u16 inst)> visit;
};
/**
* This structure represents a decoder for a specific Thumb instruction.
* Calling Visit calls the relevant function on Visitor.
*/
class ThumbInstruction final {
public:
ThumbInstruction(const char* const name, ThumbMatcher&& matcher) : name(name), matcher(std::move(matcher)) {}
const char* const Name() const {
return name;
}
bool Match(u32 instruction) const {
return matcher.Match(instruction);
}
void Visit(Visitor* v, u16 instruction) const {
matcher.visit(v, instruction);
}
private:
const char* const name;
const ThumbMatcher matcher;
};
enum class Cond {
EQ, NE, CS, CC, MI, PL, VS, VC, HI, LS, GE, LT, GT, LE, AL, NV
}; };
using Cond = u8;
using Imm4 = u32; using Imm4 = u32;
using Imm5 = u32; using Imm5 = u32;
using Imm8 = u32; using Imm8 = u32;
using Imm11 = u32; using Imm11 = u32;
using Imm12 = u32; using Imm12 = u32;
using Imm24 = u32; using Imm24 = u32;
using Register = int;
using RegisterList = u16; using RegisterList = u16;
using ShiftType = int;
using SignExtendRotation = int; enum class Register {
R0, R1, R2, R3, R4, R5, R6, R7, R8, R9, R10, R11, R12, R13, R14, R15,
SP = R13,
LR = R14,
PC = R15,
INVALID_REG = 99
};
static Register operator+ (Register arm_reg, int number) {
ASSERT(arm_reg != Register::INVALID_REG);
int value = static_cast<int>(arm_reg) + number;
ASSERT(value >= 0 && value <= 15);
return static_cast<Register>(value);
}
enum class ShiftType {
LSL,
LSR,
ASR,
ROR ///< RRX falls under this too
};
enum class SignExtendRotation {
ROR_0, ///< ROR #0 or omitted
ROR_8, ///< ROR #8
ROR_16, ///< ROR #16
ROR_24 ///< ROR #24
};
class Visitor { class Visitor {
public: public:

View File

@ -16,24 +16,11 @@
namespace ArmDecoder { namespace ArmDecoder {
namespace Impl { ThumbMatcher MakeMatcher(const char* const str, std::function<void(Visitor* v, u16 instruction)> fn) {
struct MatcherImpl : Matcher {
MatcherImpl(u32 mask, u32 expect, std::function<void(Visitor* v, u32 instruction)> fn) : fn(fn) {
bit_mask = mask;
expected = expect;
}
std::function<void(Visitor* v, u32 instruction)> fn;
virtual void visit(Visitor *v, u32 inst) override {
fn(v, inst);
}
};
}
std::unique_ptr<Matcher> MakeMatcher(const char* str, std::function<void(Visitor* v, u32 instruction)> fn) {
ASSERT(strlen(str) == 16); ASSERT(strlen(str) == 16);
u32 mask = 0; u16 mask = 0;
u32 expect = 0; u16 expect = 0;
for (int i = 0; i < 16; i++) { for (int i = 0; i < 16; i++) {
mask <<= 1; mask <<= 1;
@ -55,401 +42,405 @@ std::unique_ptr<Matcher> MakeMatcher(const char* str, std::function<void(Visitor
} }
} }
return Common::make_unique<Impl::MatcherImpl>(mask, expect, fn); return { mask, expect, fn };
} }
template<size_t a, size_t b, typename T> template<size_t begin_bit, size_t end_bit, typename T>
static constexpr T bits(T s){ static constexpr T bits(T s){
return ((s << ((sizeof(s) * 8 - 1) - b)) >> (sizeof(s) * 8 - b + a - 1)); static_assert(begin_bit <= end_bit, "bit range must begin before it ends");
static_assert(begin_bit < sizeof(s) * 8, "begin_bit must be smaller than size of T");
static_assert(end_bit < sizeof(s) * 8, "begin_bit must be smaller than size of T");
return (s >> begin_bit) & ((1 << (end_bit - begin_bit + 1)) - 1);
} }
static const std::array<Instruction, 27> thumb_instruction_table = { { static const std::array<ThumbInstruction, 27> thumb_instruction_table = { {
{ "LSL/LSR/ASR", MakeMatcher("000ooxxxxxxxxxxx", [](Visitor* v, u32 instruction) { { "LSL/LSR/ASR", MakeMatcher("000ooxxxxxxxxxxx", [](Visitor* v, u16 instruction) {
u32 opcode = bits<11, 12>(instruction); u32 opcode = bits<11, 12>(instruction);
u32 imm5 = bits<6, 10>(instruction); u32 imm5 = bits<6, 10>(instruction);
Register Rm = bits<3, 5>(instruction); Register Rm = static_cast<Register>(bits<3, 5>(instruction));
Register Rd = bits<0, 2>(instruction); Register Rd = static_cast<Register>(bits<0, 2>(instruction));
switch (opcode) { switch (opcode) {
case 0: // LSL <Rd>, <Rm>, #<imm5> case 0: // LSL <Rd>, <Rm>, #<imm5>
v->MOV_reg(0xE, /*S=*/true, Rd, imm5, 0b00, Rm); v->MOV_reg(Cond::AL, /*S=*/true, Rd, imm5, ShiftType::LSL, Rm);
break; break;
case 1: // LSR <Rd>, <Rm>, #<imm5> case 1: // LSR <Rd>, <Rm>, #<imm5>
v->MOV_reg(0xE, /*S=*/true, Rd, imm5, 0b01, Rm); v->MOV_reg(Cond::AL, /*S=*/true, Rd, imm5, ShiftType::LSR, Rm);
break; break;
case 2: // ASR <Rd>, <Rm>, #<imm5> case 2: // ASR <Rd>, <Rm>, #<imm5>
v->MOV_reg(0xE, /*S=*/true, Rd, imm5, 0b10, Rm); v->MOV_reg(Cond::AL, /*S=*/true, Rd, imm5, ShiftType::ASR, Rm);
break; break;
default: default:
UNREACHABLE(); UNREACHABLE();
} }
})}, })},
{ "ADD/SUB_reg", MakeMatcher("000110oxxxxxxxxx", [](Visitor* v, u32 instruction) { { "ADD/SUB_reg", MakeMatcher("000110oxxxxxxxxx", [](Visitor* v, u16 instruction) {
u32 opcode = bits<9, 9>(instruction); u32 opcode = bits<9, 9>(instruction);
Register Rm = bits<6, 8>(instruction); Register Rm = static_cast<Register>(bits<6, 8>(instruction));
Register Rn = bits<3, 5>(instruction); Register Rn = static_cast<Register>(bits<3, 5>(instruction));
Register Rd = bits<0, 2>(instruction); Register Rd = static_cast<Register>(bits<0, 2>(instruction));
switch (opcode) { switch (opcode) {
case 0: // ADD <Rd>, <Rn>, <Rm> case 0: // ADD <Rd>, <Rn>, <Rm>
v->ADD_reg(0xE, /*S=*/true, Rn, Rd, 0, 0, Rm); v->ADD_reg(Cond::AL, /*S=*/true, Rn, Rd, 0, ShiftType::LSL, Rm);
break; break;
case 1: // SUB <Rd>, <Rn>, <Rm> case 1: // SUB <Rd>, <Rn>, <Rm>
v->SUB_reg(0xE, /*S=*/true, Rn, Rd, 0, 0, Rm); v->SUB_reg(Cond::AL, /*S=*/true, Rn, Rd, 0, ShiftType::LSL, Rm);
break; break;
default: default:
UNREACHABLE(); UNREACHABLE();
} }
})}, })},
{ "ADD/SUB_imm", MakeMatcher("000111oxxxxxxxxx", [](Visitor* v, u32 instruction) { { "ADD/SUB_imm", MakeMatcher("000111oxxxxxxxxx", [](Visitor* v, u16 instruction) {
u32 opcode = bits<9, 9>(instruction); u32 opcode = bits<9, 9>(instruction);
u32 imm3 = bits<6, 8>(instruction); u32 imm3 = bits<6, 8>(instruction);
Register Rn = bits<3, 5>(instruction); Register Rn = static_cast<Register>(bits<3, 5>(instruction));
Register Rd = bits<0, 2>(instruction); Register Rd = static_cast<Register>(bits<0, 2>(instruction));
switch (opcode) { switch (opcode) {
case 0: // ADD <Rd>, <Rn>, #<imm3> case 0: // ADD <Rd>, <Rn>, #<imm3>
v->ADD_imm(0xE, /*S=*/true, Rn, Rd, 0, imm3); v->ADD_imm(Cond::AL, /*S=*/true, Rn, Rd, 0, imm3);
break; break;
case 1: // SUB <Rd>, <Rn>, #<imm3> case 1: // SUB <Rd>, <Rn>, #<imm3>
v->SUB_imm(0xE, /*S=*/true, Rn, Rd, 0, imm3); v->SUB_imm(Cond::AL, /*S=*/true, Rn, Rd, 0, imm3);
break; break;
default: default:
UNREACHABLE(); UNREACHABLE();
} }
})}, })},
{ "add/sub/cmp/mov_imm", MakeMatcher("001ooxxxxxxxxxxx", [](Visitor* v, u32 instruction) { { "add/sub/cmp/mov_imm", MakeMatcher("001ooxxxxxxxxxxx", [](Visitor* v, u16 instruction) {
u32 opcode = bits<11, 12>(instruction); u32 opcode = bits<11, 12>(instruction);
Register Rd = bits<8, 10>(instruction); Register Rd = static_cast<Register>(bits<8, 10>(instruction));
u32 imm8 = bits<0, 7>(instruction); u32 imm8 = bits<0, 7>(instruction);
switch (opcode) { switch (opcode) {
case 0: // MOV Rd, #imm8 case 0: // MOV Rd, #imm8
v->MOV_imm(0xE, /*S=*/true, Rd, 0, imm8); v->MOV_imm(Cond::AL, /*S=*/true, Rd, 0, imm8);
break; break;
case 1: // CMP Rn, #imm8 case 1: // CMP Rn, #imm8
v->CMP_imm(0xE, Rd, 0, imm8); v->CMP_imm(Cond::AL, Rd, 0, imm8);
break; break;
case 2: // ADD Rd, #imm8 case 2: // ADD Rd, #imm8
v->ADD_imm(0xE, /*S=*/true, Rd, Rd, 0, imm8); v->ADD_imm(Cond::AL, /*S=*/true, Rd, Rd, 0, imm8);
break; break;
case 3: // SUB Rd, #imm8 case 3: // SUB Rd, #imm8
v->SUB_imm(0xE, /*S=*/true, Rd, Rd, 0, imm8); v->SUB_imm(Cond::AL, /*S=*/true, Rd, Rd, 0, imm8);
break; break;
default: default:
UNREACHABLE(); UNREACHABLE();
} }
})}, })},
{ "data processing reg", MakeMatcher("010000ooooxxxxxx", [](Visitor* v, u32 instruction) { { "data processing reg", MakeMatcher("010000ooooxxxxxx", [](Visitor* v, u16 instruction) {
u32 opcode = bits<6, 9>(instruction); u32 opcode = bits<6, 9>(instruction);
Register Rm_Rs = bits<3, 5>(instruction); Register Ra = static_cast<Register>(bits<3, 5>(instruction));
Register Rd_Rn = bits<0, 2>(instruction); Register Rb = static_cast<Register>(bits<0, 2>(instruction));
switch (opcode) { switch (opcode) {
case 0: // AND Rd, Rm case 0: // AND Rd, Rm
v->AND_reg(0xE, /*S=*/true, Rd_Rn, Rd_Rn, 0, 0, Rm_Rs); v->AND_reg(Cond::AL, /*S=*/true, Rb, Rb, 0, ShiftType::LSL, Ra);
break; break;
case 1: // EOR Rd, Rm case 1: // EOR Rd, Rm
v->EOR_reg(0xE, /*S=*/true, Rd_Rn, Rd_Rn, 0, 0, Rm_Rs); v->EOR_reg(Cond::AL, /*S=*/true, Rb, Rb, 0, ShiftType::LSL, Ra);
break; break;
case 2: // LSL Rd, Rs case 2: // LSL Rd, Rs
v->MOV_rsr(0xE, /*S=*/true, Rd_Rn, Rm_Rs, 0b00, Rd_Rn); v->MOV_rsr(Cond::AL, /*S=*/true, Rb, Ra, ShiftType::LSL, Rb);
break; break;
case 3: // LSR Rd, Rs case 3: // LSR Rd, Rs
v->MOV_rsr(0xE, /*S=*/true, Rd_Rn, Rm_Rs, 0b01, Rd_Rn); v->MOV_rsr(Cond::AL, /*S=*/true, Rb, Ra, ShiftType::LSR, Rb);
break; break;
case 4: // ASR Rd, Rs case 4: // ASR Rd, Rs
v->MOV_rsr(0xE, /*S=*/true, Rd_Rn, Rm_Rs, 0b10, Rd_Rn); v->MOV_rsr(Cond::AL, /*S=*/true, Rb, Ra, ShiftType::ASR, Rb);
break; break;
case 5: // ADC Rd, Rm case 5: // ADC Rd, Rm
v->ADC_reg(0xE, /*S=*/true, Rd_Rn, Rd_Rn, 0, 0, Rm_Rs); v->ADC_reg(Cond::AL, /*S=*/true, Rb, Rb, 0, ShiftType::LSL, Ra);
break; break;
case 6: // SBC Rd, Rm case 6: // SBC Rd, Rm
v->SBC_reg(0xE, /*S=*/true, Rd_Rn, Rd_Rn, 0, 0, Rm_Rs); v->SBC_reg(Cond::AL, /*S=*/true, Rb, Rb, 0, ShiftType::LSL, Ra);
break; break;
case 7: // ROR Rd, Rs case 7: // ROR Rd, Rs
v->MOV_rsr(0xE, /*S=*/true, Rd_Rn, Rm_Rs, 0b11, Rd_Rn); v->MOV_rsr(Cond::AL, /*S=*/true, Rb, Ra, ShiftType::ROR, Rb);
break; break;
case 8: // TST Rm, Rn case 8: // TST Rm, Rn
v->TST_reg(0xE, Rd_Rn, 0, 0, Rm_Rs); v->TST_reg(Cond::AL, Rb, 0, ShiftType::LSL, Ra);
break; break;
case 9: // NEG Rd, Rm case 9: // NEG Rd, Rm
v->RSB_imm(0xE, /*S=*/true, Rm_Rs, Rd_Rn, 0, 0); v->RSB_imm(Cond::AL, /*S=*/true, Ra, Rb, 0, 0);
break; break;
case 10: // CMP Rm, Rn case 10: // CMP Rm, Rn
v->CMP_reg(0xE, Rd_Rn, 0, 0, Rm_Rs); v->CMP_reg(Cond::AL, Rb, 0, ShiftType::LSL, Ra);
break; break;
case 11: // CMN Rm, Rn case 11: // CMN Rm, Rn
v->CMN_reg(0xE, Rd_Rn, 0, 0, Rm_Rs); v->CMN_reg(Cond::AL, Rb, 0, ShiftType::LSL, Ra);
break; break;
case 12: // ORR Rd, Rm case 12: // ORR Rd, Rm
v->ORR_reg(0xE, /*S=*/true, Rd_Rn, Rd_Rn, 0, 0, Rm_Rs); v->ORR_reg(Cond::AL, /*S=*/true, Rb, Rb, 0, ShiftType::LSL, Ra);
break; break;
case 13: // MUL Rd, Rm case 13: // MUL Rd, Rm
v->MUL(0xE, /*S=*/true, Rd_Rn, Rd_Rn, Rm_Rs); v->MUL(Cond::AL, /*S=*/true, Rb, Rb, Ra);
break; break;
case 14: // BIC Rm, Rd case 14: // BIC Rm, Rd
v->BIC_reg(0xE, /*S=*/true, Rd_Rn, Rd_Rn, 0, 0, Rm_Rs); v->BIC_reg(Cond::AL, /*S=*/true, Rb, Rb, 0, ShiftType::LSL, Ra);
break; break;
case 15: // MVN Rd, Rm case 15: // MVN Rd, Rm
v->MVN_reg(0xE, /*S=*/true, Rd_Rn, 0, 0, Rm_Rs); v->MVN_reg(Cond::AL, /*S=*/true, Rb, 0, ShiftType::LSL, Ra);
break; break;
default: default:
UNREACHABLE(); UNREACHABLE();
} }
})}, })},
{ "special data processing", MakeMatcher("010001ooxxxxxxxx", [](Visitor* v, u32 instruction) { { "special data processing", MakeMatcher("010001ooxxxxxxxx", [](Visitor* v, u16 instruction) {
u32 opcode = bits<8, 9>(instruction); u32 opcode = bits<8, 9>(instruction);
Register Rm = bits<3, 6>(instruction); Register Rm = static_cast<Register>(bits<3, 6>(instruction));
Register Rd = bits<0, 2>(instruction) | (bits<7, 7>(instruction) << 3); Register Rd = static_cast<Register>(bits<0, 2>(instruction) | (bits<7, 7>(instruction) << 3));
switch (opcode) { switch (opcode) {
case 0: // ADD Rd, Rm case 0: // ADD Rd, Rm
v->ADD_reg(0xE, /*S=*/false, Rd, Rd, 0, 0, Rm); v->ADD_reg(Cond::AL, /*S=*/false, Rd, Rd, 0, ShiftType::LSL, Rm);
break; break;
case 1: // CMP Rm, Rn case 1: // CMP Rm, Rn
v->CMP_reg(0xE, Rd, 0, 0, Rm); v->CMP_reg(Cond::AL, Rd, 0, ShiftType::LSL, Rm);
break; break;
case 2: // MOV Rd, Rm case 2: // MOV Rd, Rm
v->MOV_reg(0xE, /*S=*/false, Rd, 0, 0, Rm); v->MOV_reg(Cond::AL, /*S=*/false, Rd, 0, ShiftType::LSL, Rm);
break; break;
default: default:
UNREACHABLE(); UNREACHABLE();
} }
})}, })},
{ "BLX/BX", MakeMatcher("01000111xxxxx000", [](Visitor* v, u32 instruction) { { "BLX/BX", MakeMatcher("01000111xxxxx000", [](Visitor* v, u16 instruction) {
bool L = bits<7, 7>(instruction); bool L = bits<7, 7>(instruction);
Register Rm = bits<3, 6>(instruction); Register Rm = static_cast<Register>(bits<3, 6>(instruction));
if (!L) { // BX Rm if (!L) { // BX Rm
v->BX(0xE, Rm); v->BX(Cond::AL, Rm);
} else { // BLX Rm } else { // BLX Rm
v->BLX_reg(0xE, Rm); v->BLX_reg(Cond::AL, Rm);
} }
})}, })},
{ "load from literal pool", MakeMatcher("01001xxxxxxxxxxx", [](Visitor* v, u32 instruction) { { "load from literal pool", MakeMatcher("01001xxxxxxxxxxx", [](Visitor* v, u16 instruction) {
// LDR Rd, [PC, #] // LDR Rd, [PC, #]
Register Rd = bits<8, 10>(instruction); Register Rd = static_cast<Register>(bits<8, 10>(instruction));
u32 imm8 = bits<0, 7>(instruction); u32 imm8 = bits<0, 7>(instruction);
v->LDR_imm(0xE, /*P=*/1, /*U=*/1, /*W=*/0, 15, Rd, imm8 * 4); v->LDR_imm(Cond::AL, /*P=*/1, /*U=*/1, /*W=*/0, Register::PC, Rd, imm8 * 4);
})}, })},
{ "load/store reg offset", MakeMatcher("0101oooxxxxxxxxx", [](Visitor* v, u32 instruction) { { "load/store reg offset", MakeMatcher("0101oooxxxxxxxxx", [](Visitor* v, u16 instruction) {
u32 opcode = bits<9, 11>(instruction); u32 opcode = bits<9, 11>(instruction);
Register Rm = bits<6, 8>(instruction); Register Rm = static_cast<Register>(bits<6, 8>(instruction));
Register Rn = bits<3, 5>(instruction); Register Rn = static_cast<Register>(bits<3, 5>(instruction));
Register Rd = bits<0, 2>(instruction); Register Rd = static_cast<Register>(bits<0, 2>(instruction));
switch (opcode) { switch (opcode) {
case 0: // STR Rd, [Rn, Rm] case 0: // STR Rd, [Rn, Rm]
v->STR_reg(0xE, /*P=*/1, /*U=*/1, /*W=*/0, Rn, Rd, 0, 0, Rm); v->STR_reg(Cond::AL, /*P=*/1, /*U=*/1, /*W=*/0, Rn, Rd, 0, ShiftType::LSL, Rm);
break; break;
case 1: // STRH Rd, [Rn, Rm] case 1: // STRH Rd, [Rn, Rm]
v->STRH_reg(0xE, /*P=*/1, /*U=*/1, /*W=*/0, Rn, Rd, Rm); v->STRH_reg(Cond::AL, /*P=*/1, /*U=*/1, /*W=*/0, Rn, Rd, Rm);
break; break;
case 2: // STRB Rd, [Rn, Rm] case 2: // STRB Rd, [Rn, Rm]
v->STRB_reg(0xE, /*P=*/1, /*U=*/1, /*W=*/0, Rn, Rd, 0, 0, Rm); v->STRB_reg(Cond::AL, /*P=*/1, /*U=*/1, /*W=*/0, Rn, Rd, 0, ShiftType::LSL, Rm);
break; break;
case 3: // LDRSB Rd, [Rn, Rm] case 3: // LDRSB Rd, [Rn, Rm]
v->LDRSB_reg(0xE, /*P=*/1, /*U=*/1, /*W=*/0, Rn, Rd, Rm); v->LDRSB_reg(Cond::AL, /*P=*/1, /*U=*/1, /*W=*/0, Rn, Rd, Rm);
break; break;
case 4: // LDR Rd, [Rn, Rm] case 4: // LDR Rd, [Rn, Rm]
v->LDR_reg(0xE, /*P=*/1, /*U=*/1, /*W=*/0, Rn, Rd, 0, 0, Rm); v->LDR_reg(Cond::AL, /*P=*/1, /*U=*/1, /*W=*/0, Rn, Rd, 0, ShiftType::LSL, Rm);
break; break;
case 5: // LDRH Rd, [Rn, Rm] case 5: // LDRH Rd, [Rn, Rm]
v->LDRH_reg(0xE, /*P=*/1, /*U=*/1, /*W=*/0, Rn, Rd, Rm); v->LDRH_reg(Cond::AL, /*P=*/1, /*U=*/1, /*W=*/0, Rn, Rd, Rm);
break; break;
case 6: // LDRB Rd, [Rn, Rm] case 6: // LDRB Rd, [Rn, Rm]
v->LDRB_reg(0xE, /*P=*/1, /*U=*/1, /*W=*/0, Rn, Rd, 0, 0, Rm); v->LDRB_reg(Cond::AL, /*P=*/1, /*U=*/1, /*W=*/0, Rn, Rd, 0, ShiftType::LSL, Rm);
break; break;
case 7: // LDRSH Rd, [Rn, Rm] case 7: // LDRSH Rd, [Rn, Rm]
v->LDRSH_reg(0xE, /*P=*/1, /*U=*/1, /*W=*/0, Rn, Rd, Rm); v->LDRSH_reg(Cond::AL, /*P=*/1, /*U=*/1, /*W=*/0, Rn, Rd, Rm);
break; break;
default: default:
UNREACHABLE(); UNREACHABLE();
} }
})}, })},
{ "STR(B)/LDR(B)_imm", MakeMatcher("011xxxxxxxxxxxxx", [](Visitor* v, u32 instruction) { { "STR(B)/LDR(B)_imm", MakeMatcher("011xxxxxxxxxxxxx", [](Visitor* v, u16 instruction) {
u32 opc = bits<11, 12>(instruction); u32 opc = bits<11, 12>(instruction);
Register offset = bits<6, 10>(instruction); u32 offset = bits<6, 10>(instruction);
Register Rn = bits<3, 5>(instruction); Register Rn = static_cast<Register>(bits<3, 5>(instruction));
Register Rd = bits<0, 2>(instruction); Register Rd = static_cast<Register>(bits<0, 2>(instruction));
switch (opc) { switch (opc) {
case 0: // STR Rd, [Rn, #offset] case 0: // STR Rd, [Rn, #offset]
v->STR_imm(0xE, /*P=*/1, /*U=*/1, /*W=*/0, Rn, Rd, offset * 4); v->STR_imm(Cond::AL, /*P=*/1, /*U=*/1, /*W=*/0, Rn, Rd, offset * 4);
break; break;
case 1: // LDR Rd, [Rn, #offset] case 1: // LDR Rd, [Rn, #offset]
v->LDR_imm(0xE, /*P=*/1, /*U=*/1, /*W=*/0, Rn, Rd, offset * 4); v->LDR_imm(Cond::AL, /*P=*/1, /*U=*/1, /*W=*/0, Rn, Rd, offset * 4);
break; break;
case 2: // STRB Rd, [Rn, #offset] case 2: // STRB Rd, [Rn, #offset]
v->STRB_imm(0xE, /*P=*/1, /*U=*/1, /*W=*/0, Rn, Rd, offset); v->STRB_imm(Cond::AL, /*P=*/1, /*U=*/1, /*W=*/0, Rn, Rd, offset);
break; break;
case 3: // LDRB Rd, [Rn, #offset] case 3: // LDRB Rd, [Rn, #offset]
v->LDRB_imm(0xE, /*P=*/1, /*U=*/1, /*W=*/0, Rn, Rd, offset); v->LDRB_imm(Cond::AL, /*P=*/1, /*U=*/1, /*W=*/0, Rn, Rd, offset);
break; break;
default: default:
UNREACHABLE(); UNREACHABLE();
} }
})}, })},
{ "STRH/LDRH_imm", MakeMatcher("1000xxxxxxxxxxxx", [](Visitor* v, u32 instruction) { { "STRH/LDRH_imm", MakeMatcher("1000xxxxxxxxxxxx", [](Visitor* v, u16 instruction) {
bool L = bits<11, 11>(instruction); bool L = bits<11, 11>(instruction);
Register offset = bits<6, 10>(instruction); u32 offset = bits<6, 10>(instruction);
Register Rn = bits<3, 5>(instruction); Register Rn = static_cast<Register>(bits<3, 5>(instruction));
Register Rd = bits<0, 2>(instruction); Register Rd = static_cast<Register>(bits<0, 2>(instruction));
if (!L) { // STRH Rd, [Rn, #offset] if (!L) { // STRH Rd, [Rn, #offset]
v->STRH_imm(0xE, /*P=*/1, /*U=*/1, /*W=*/0, Rn, Rd, (offset * 2) >> 4, (offset * 2) & 0xF); v->STRH_imm(Cond::AL, /*P=*/1, /*U=*/1, /*W=*/0, Rn, Rd, (offset * 2) >> 4, (offset * 2) & 0xF);
} else { // LDRH Rd, [Rn, #offset] } else { // LDRH Rd, [Rn, #offset]
v->LDRH_imm(0xE, /*P=*/1, /*U=*/1, /*W=*/0, Rn, Rd, (offset * 2) >> 4, (offset * 2) & 0xF); v->LDRH_imm(Cond::AL, /*P=*/1, /*U=*/1, /*W=*/0, Rn, Rd, (offset * 2) >> 4, (offset * 2) & 0xF);
} }
})}, })},
{ "load/store stack", MakeMatcher("1001xxxxxxxxxxxx", [](Visitor* v, u32 instruction) { { "load/store stack", MakeMatcher("1001xxxxxxxxxxxx", [](Visitor* v, u16 instruction) {
bool L = bits<11, 11>(instruction); bool L = bits<11, 11>(instruction);
Register Rd = bits<8, 10>(instruction); Register Rd = static_cast<Register>(bits<8, 10>(instruction));
u32 offset = bits<0, 7>(instruction); u32 offset = bits<0, 7>(instruction);
if (!L) { // STR Rd, [SP, #offset] if (!L) { // STR Rd, [SP, #offset]
v->STR_imm(0xE, /*P=*/1, /*U=*/1, /*W=*/0, 13, Rd, offset * 4); v->STR_imm(Cond::AL, /*P=*/1, /*U=*/1, /*W=*/0, Register::SP, Rd, offset * 4);
} else { // LDR Rd, [SP, #offset] } else { // LDR Rd, [SP, #offset]
v->LDR_imm(0xE, /*P=*/1, /*U=*/1, /*W=*/0, 13, Rd, offset * 4); v->LDR_imm(Cond::AL, /*P=*/1, /*U=*/1, /*W=*/0, Register::SP, Rd, offset * 4);
} }
})}, })},
{ "add to sp/pc", MakeMatcher("1010oxxxxxxxxxxx", [](Visitor* v, u32 instruction) { { "add to sp/pc", MakeMatcher("1010oxxxxxxxxxxx", [](Visitor* v, u16 instruction) {
// ADD Rd, PC/SP, #imm8 // ADD Rd, PC/SP, #imm8
Register Rn = bits<11, 11>(instruction) ? 13 : 15; Register Rn = bits<11, 11>(instruction) ? Register::SP : Register::PC;
Register Rd = bits<8, 10>(instruction); Register Rd = static_cast<Register>(bits<8, 10>(instruction));
u32 imm8 = bits<0, 7>(instruction); u32 imm8 = bits<0, 7>(instruction);
v->ADD_imm(0xE, /*S=*/false, Rn, Rd, 0xF, imm8); v->ADD_imm(Cond::AL, /*S=*/false, Rn, Rd, 0xF, imm8);
})}, })},
{ "adjust stack ptr", MakeMatcher("10110000oxxxxxxx", [](Visitor* v, u32 instruction) { { "adjust stack ptr", MakeMatcher("10110000oxxxxxxx", [](Visitor* v, u16 instruction) {
// SUB SP, SP, #<imm7*4> // SUB SP, SP, #<imm7*4>
u32 opc = bits<7, 7>(instruction); u32 opc = bits<7, 7>(instruction);
u32 imm7 = bits<0, 6>(instruction); u32 imm7 = bits<0, 6>(instruction);
switch (opc) { switch (opc) {
case 0: case 0:
v->ADD_imm(0xE, /*S=*/false, 13, 13, 0xF, imm7); v->ADD_imm(Cond::AL, /*S=*/false, Register::SP, Register::SP, 0xF, imm7);
break; break;
case 1: case 1:
v->SUB_imm(0xE, /*S=*/false, 13, 13, 0xF, imm7); v->SUB_imm(Cond::AL, /*S=*/false, Register::SP, Register::SP, 0xF, imm7);
break; break;
default: default:
UNREACHABLE(); UNREACHABLE();
} }
})}, })},
{ "sign/zero extend", MakeMatcher("10110010ooxxxxxx", [](Visitor* v, u32 instruction) { { "sign/zero extend", MakeMatcher("10110010ooxxxxxx", [](Visitor* v, u16 instruction) {
u32 opc = bits<6, 7>(instruction); u32 opc = bits<6, 7>(instruction);
Register Rm = bits<3, 5>(instruction); Register Rm = static_cast<Register>(bits<3, 5>(instruction));
Register Rd = bits<0, 2>(instruction); Register Rd = static_cast<Register>(bits<0, 2>(instruction));
switch (opc) { switch (opc) {
case 0: // SXTH Rd, Rm case 0: // SXTH Rd, Rm
v->SXTH(0xE, Rd, 0, Rm); v->SXTH(Cond::AL, Rd, SignExtendRotation::ROR_0, Rm);
break; break;
case 1: // SXTB Rd, Rm case 1: // SXTB Rd, Rm
v->SXTB(0xE, Rd, 0, Rm); v->SXTB(Cond::AL, Rd, SignExtendRotation::ROR_0, Rm);
break; break;
case 2: // UXTH Rd, Rm case 2: // UXTH Rd, Rm
v->UXTH(0xE, Rd, 0, Rm); v->UXTH(Cond::AL, Rd, SignExtendRotation::ROR_0, Rm);
break; break;
case 3: // UXTB Rd, Rm case 3: // UXTB Rd, Rm
v->UXTB(0xE, Rd, 0, Rm); v->UXTB(Cond::AL, Rd, SignExtendRotation::ROR_0, Rm);
break; break;
default: default:
UNREACHABLE(); UNREACHABLE();
} }
})}, })},
{ "PUSH/POP_reglist", MakeMatcher("1011x10xxxxxxxxx", [](Visitor* v, u32 instruction) { { "PUSH/POP_reglist", MakeMatcher("1011x10xxxxxxxxx", [](Visitor* v, u16 instruction) {
bool L = bits<11, 11>(instruction); bool L = bits<11, 11>(instruction);
bool R = bits<8, 8>(instruction); u32 R = bits<8, 8>(instruction);
u32 reglist = bits<0, 7>(instruction); u32 reglist = bits<0, 7>(instruction);
if (!L) { // PUSH {reglist, <R>=LR} if (!L) { // PUSH {reglist, <R>=LR}
reglist |= R << 14; reglist |= R << 14;
// Equivalent to STMDB SP!, {reglist} // Equivalent to STMDB SP!, {reglist}
v->STM(0xE, /*P=*/1, /*U=*/0, /*W=*/1, 13, reglist); v->STM(Cond::AL, /*P=*/1, /*U=*/0, /*W=*/1, Register::SP, reglist);
} else { // POP {reglist, <R>=PC} } else { // POP {reglist, <R>=PC}
reglist |= R << 15; reglist |= R << 15;
// Equivalent to LDMIA SP!, {reglist} // Equivalent to LDMIA SP!, {reglist}
v->LDM(0xE, /*P=*/0, /*U=*/1, /*W=*/1, 13, reglist); v->LDM(Cond::AL, /*P=*/0, /*U=*/1, /*W=*/1, Register::SP, reglist);
} }
})}, })},
{ "SETEND", MakeMatcher("101101100101x000", [](Visitor* v, u32 instruction) { { "SETEND", MakeMatcher("101101100101x000", [](Visitor* v, u16 instruction) {
bool E = bits<3, 3>(instruction); bool E = bits<3, 3>(instruction);
v->SETEND(E); v->SETEND(E);
})}, })},
{ "change processor state", MakeMatcher("10110110011x0xxx", [](Visitor* v, u32 instruction) { { "change processor state", MakeMatcher("10110110011x0xxx", [](Visitor* v, u16 instruction) {
bool imod = bits<4, 4>(instruction); bool imod = bits<4, 4>(instruction);
bool A = bits<2, 2>(instruction); bool A = bits<2, 2>(instruction);
bool I = bits<1, 1>(instruction); bool I = bits<1, 1>(instruction);
bool F = bits<0, 0>(instruction); bool F = bits<0, 0>(instruction);
v->CPS(); v->CPS();
})}, })},
{ "reverse bytes", MakeMatcher("10111010ooxxxxxx", [](Visitor* v, u32 instruction) { { "reverse bytes", MakeMatcher("10111010ooxxxxxx", [](Visitor* v, u16 instruction) {
u32 opc = bits<6, 7>(instruction); u32 opc = bits<6, 7>(instruction);
Register Rn = bits<3, 5>(instruction); Register Rn = static_cast<Register>(bits<3, 5>(instruction));
Register Rd = bits<0, 2>(instruction); Register Rd = static_cast<Register>(bits<0, 2>(instruction));
switch (opc) { switch (opc) {
case 0: // REV Rd, Rn case 0: // REV Rd, Rn
v->REV(0xE, Rd, Rn); v->REV(Cond::AL, Rd, Rn);
break; break;
case 1: // REV16 Rd, Rn case 1: // REV16 Rd, Rn
v->REV16(0xE, Rd, Rn); v->REV16(Cond::AL, Rd, Rn);
break; break;
case 2: // undefined case 2: // undefined
v->UDF(); v->UDF();
break; break;
case 3: // REVSH Rd, Rn case 3: // REVSH Rd, Rn
v->REVSH(0xE, Rd, Rn); v->REVSH(Cond::AL, Rd, Rn);
break; break;
default: default:
UNREACHABLE(); UNREACHABLE();
} }
})}, })},
{ "BKPT", MakeMatcher("10111110xxxxxxxx", [](Visitor* v, u32 instruction) { { "BKPT", MakeMatcher("10111110xxxxxxxx", [](Visitor* v, u16 instruction) {
// BKPT #imm8 // BKPT #imm8
Imm8 imm8 = bits<0, 7>(instruction); Imm8 imm8 = bits<0, 7>(instruction);
v->BKPT(0xE, imm8 >> 4, imm8 & 0xF); v->BKPT(Cond::AL, imm8 >> 4, imm8 & 0xF);
})}, })},
{ "STMIA/LDMIA", MakeMatcher("1100xxxxxxxxxxxx", [](Visitor* v, u32 instruction) { { "STMIA/LDMIA", MakeMatcher("1100xxxxxxxxxxxx", [](Visitor* v, u16 instruction) {
bool L = bits<11, 11>(instruction); bool L = bits<11, 11>(instruction);
Register Rn = bits<8, 10>(instruction); Register Rn = static_cast<Register>(bits<8, 10>(instruction));
u32 reglist = bits<0, 7>(instruction); u32 reglist = bits<0, 7>(instruction);
if (!L) { // STMIA Rn!, { reglist } if (!L) { // STMIA Rn!, { reglist }
v->STM(0xE, /*P=*/0, /*U=*/1, /*W=*/1, Rn, reglist); v->STM(Cond::AL, /*P=*/0, /*U=*/1, /*W=*/1, Rn, reglist);
} else { // LDMIA Rn!, { reglist } } else { // LDMIA Rn!, { reglist }
bool w = (reglist & (1 << Rn)) == 0; RegisterList Rn_bit = 1 << static_cast<unsigned>(Rn);
v->LDM(0xE, /*P=*/0, /*U=*/1, /*W=*/w, Rn, reglist); bool w = (reglist & Rn_bit) == 0;
v->LDM(Cond::AL, /*P=*/0, /*U=*/1, /*W=*/w, Rn, reglist);
} }
})}, })},
{ "B<cond>", MakeMatcher("1101xxxxxxxxxxxx", [](Visitor* v, u32 instruction) { { "B<cond>", MakeMatcher("1101xxxxxxxxxxxx", [](Visitor* v, u16 instruction) {
// B<cond> <PC + #offset*2> // B<cond> <PC + #offset*2>
Cond cond = bits<8, 11>(instruction); Cond cond = static_cast<Cond>(bits<8, 11>(instruction));
s32 offset = bits<0, 7>(instruction); s32 offset = bits<0, 7>(instruction);
ASSERT_MSG(cond != 0b1110, "UNDEFINED"); ASSERT_MSG(cond != Cond::AL, "UNDEFINED");
v->thumb_B(cond, offset); v->thumb_B(cond, offset);
})}, })},
{ "SWI", MakeMatcher("11011111xxxxxxxx", [](Visitor* v, u32 instruction) { { "SWI", MakeMatcher("11011111xxxxxxxx", [](Visitor* v, u16 instruction) {
// SWI #imm8 // SWI #imm8
Imm8 imm8 = bits<0, 7>(instruction); Imm8 imm8 = bits<0, 7>(instruction);
v->SVC(0xE, imm8); v->SVC(Cond::AL, imm8);
})}, })},
{ "B", MakeMatcher("11100xxxxxxxxxxx", [](Visitor* v, u32 instruction) { { "B", MakeMatcher("11100xxxxxxxxxxx", [](Visitor* v, u16 instruction) {
// B <PC + #offset*2> // B <PC + #offset*2>
Imm11 imm11 = bits<0, 10>(instruction); Imm11 imm11 = bits<0, 10>(instruction);
v->thumb_B(imm11); v->thumb_B(imm11);
})}, })},
{ "BLX (suffix)", MakeMatcher("11101xxxxxxxxxx0", [](Visitor* v, u32 instruction) { { "BLX (suffix)", MakeMatcher("11101xxxxxxxxxx0", [](Visitor* v, u16 instruction) {
Imm11 imm11 = bits<0, 10>(instruction); Imm11 imm11 = bits<0, 10>(instruction);
v->thumb_BLX_suffix(/*X=*/true, imm11); v->thumb_BLX_suffix(/*X=*/true, imm11);
})}, })},
{ "BL/BLX (prefix)", MakeMatcher("11110xxxxxxxxxxx", [](Visitor* v, u32 instruction) { { "BL/BLX (prefix)", MakeMatcher("11110xxxxxxxxxxx", [](Visitor* v, u16 instruction) {
Imm11 imm11 = bits<0, 10>(instruction); Imm11 imm11 = bits<0, 10>(instruction);
v->thumb_BLX_prefix(imm11); v->thumb_BLX_prefix(imm11);
})}, })},
{ "BL (suffix)", MakeMatcher("11111xxxxxxxxxxx", [](Visitor* v, u32 instruction) { { "BL (suffix)", MakeMatcher("11111xxxxxxxxxxx", [](Visitor* v, u16 instruction) {
Imm11 imm11 = bits<0, 10>(instruction); Imm11 imm11 = bits<0, 10>(instruction);
v->thumb_BLX_suffix(/*X=*/false, imm11); v->thumb_BLX_suffix(/*X=*/false, imm11);
})} })}
}}; }};
boost::optional<const Instruction&> DecodeThumb(u16 i) { boost::optional<const ThumbInstruction&> DecodeThumb(u16 i) {
// NOTE: The reverse search direction is important. Searching forwards would result in incorrect behavior. // NOTE: The reverse search direction is important. Searching forwards would result in incorrect behavior.
// This is because the entries in thumb_instruction_table have more specific matches coming after less specific ones. // This is because the entries in thumb_instruction_table have more specific matches coming after less specific ones.
// Example: // Example:
// 000ooxxxxxxxxxxx comes before 000110oxxxxxxxxx // 000ooxxxxxxxxxxx comes before 000110oxxxxxxxxx
// with a forward search direction notice how the first one will always be matched and the latter never will be. // with a forward search direction notice how the first one will always be matched and the latter never will be.
auto iterator = std::find_if(thumb_instruction_table.crbegin(), thumb_instruction_table.crend(), [i](const Instruction& instruction) { auto iterator = std::find_if(thumb_instruction_table.crbegin(), thumb_instruction_table.crend(),
return instruction.Match(i); [i](const auto& instruction) { return instruction.Match(i); });
});
return (iterator != thumb_instruction_table.crend()) ? boost::make_optional<const Instruction&>(*iterator) : boost::none; return (iterator != thumb_instruction_table.crend()) ? boost::make_optional<const ThumbInstruction&>(*iterator) : boost::none;
} }
}; };

View File

@ -4,6 +4,8 @@
#pragma once #pragma once
#include <type_traits>
#include "common/common_types.h" #include "common/common_types.h"
#include "core/arm/decoder/decoder.h" #include "core/arm/decoder/decoder.h"
@ -37,4 +39,18 @@ struct JitState {
s32 cycles_remaining; s32 cycles_remaining;
}; };
constexpr bool IsValidArmReg(ArmReg arm_reg) {
return static_cast<unsigned>(arm_reg) <= 15;
}
static bool IsEvenArmReg(ArmReg arm_reg) {
ASSERT(IsValidArmReg(arm_reg));
return static_cast<unsigned>(arm_reg) % 2 == 0;
}
/// Turns a ArmReg into an ArmRegList bitmap.
constexpr ArmRegList MakeRegList(ArmReg arm_reg) {
return 1 << static_cast<unsigned>(arm_reg);
}
} }

View File

@ -13,16 +13,16 @@ using namespace Gen;
void JitX64::CondManager::Init(JitX64* jit_) { void JitX64::CondManager::Init(JitX64* jit_) {
jit = jit_; jit = jit_;
current_cond = ConditionCode::AL; current_cond = Cond::AL;
flags_dirty = false; flags_dirty = false;
current_cond_fixup = {}; current_cond_fixup = {};
} }
void JitX64::CondManager::CompileCond(const ConditionCode new_cond) { void JitX64::CondManager::CompileCond(const Cond new_cond) {
if (current_cond == new_cond && !flags_dirty) if (current_cond == new_cond && !flags_dirty)
return; return;
if (current_cond != ConditionCode::AL && current_cond != ConditionCode::NV) { if (current_cond != Cond::AL && current_cond != Cond::NV) {
jit->reg_alloc.FlushEverything(); jit->reg_alloc.FlushEverything();
jit->reg_alloc.AssertNoLocked(); jit->reg_alloc.AssertNoLocked();
ASSERT(current_cond_fixup.ptr); ASSERT(current_cond_fixup.ptr);
@ -30,43 +30,43 @@ void JitX64::CondManager::CompileCond(const ConditionCode new_cond) {
current_cond_fixup.ptr = nullptr; current_cond_fixup.ptr = nullptr;
} }
if (new_cond != ConditionCode::AL && new_cond != ConditionCode::NV) { if (new_cond != Cond::AL && new_cond != Cond::NV) {
CCFlags cc; CCFlags cc;
switch (new_cond) { switch (new_cond) {
case ConditionCode::EQ: //z case Cond::EQ: //z
jit->code->CMP(8, jit->MJitStateZFlag(), Imm8(0)); jit->code->CMP(8, jit->MJitStateZFlag(), Imm8(0));
cc = CC_E; cc = CC_E;
break; break;
case ConditionCode::NE: //!z case Cond::NE: //!z
jit->code->CMP(8, jit->MJitStateZFlag(), Imm8(0)); jit->code->CMP(8, jit->MJitStateZFlag(), Imm8(0));
cc = CC_NE; cc = CC_NE;
break; break;
case ConditionCode::CS: //c case Cond::CS: //c
jit->code->CMP(8, jit->MJitStateCFlag(), Imm8(0)); jit->code->CMP(8, jit->MJitStateCFlag(), Imm8(0));
cc = CC_E; cc = CC_E;
break; break;
case ConditionCode::CC: //!c case Cond::CC: //!c
jit->code->CMP(8, jit->MJitStateCFlag(), Imm8(0)); jit->code->CMP(8, jit->MJitStateCFlag(), Imm8(0));
cc = CC_NE; cc = CC_NE;
break; break;
case ConditionCode::MI: //n case Cond::MI: //n
jit->code->CMP(8, jit->MJitStateNFlag(), Imm8(0)); jit->code->CMP(8, jit->MJitStateNFlag(), Imm8(0));
cc = CC_E; cc = CC_E;
break; break;
case ConditionCode::PL: //!n case Cond::PL: //!n
jit->code->CMP(8, jit->MJitStateNFlag(), Imm8(0)); jit->code->CMP(8, jit->MJitStateNFlag(), Imm8(0));
cc = CC_NE; cc = CC_NE;
break; break;
case ConditionCode::VS: //v case Cond::VS: //v
jit->code->CMP(8, jit->MJitStateVFlag(), Imm8(0)); jit->code->CMP(8, jit->MJitStateVFlag(), Imm8(0));
cc = CC_E; cc = CC_E;
break; break;
case ConditionCode::VC: //!v case Cond::VC: //!v
jit->code->CMP(8, jit->MJitStateVFlag(), Imm8(0)); jit->code->CMP(8, jit->MJitStateVFlag(), Imm8(0));
cc = CC_NE; cc = CC_NE;
break; break;
case ConditionCode::HI: { //c & !z case Cond::HI: { //c & !z
const X64Reg tmp = jit->reg_alloc.AllocTemp(); const X64Reg tmp = jit->reg_alloc.AllocTemp();
jit->code->MOVZX(64, 8, tmp, jit->MJitStateZFlag()); jit->code->MOVZX(64, 8, tmp, jit->MJitStateZFlag());
jit->code->CMP(8, jit->MJitStateCFlag(), R(tmp)); jit->code->CMP(8, jit->MJitStateCFlag(), R(tmp));
@ -74,7 +74,7 @@ void JitX64::CondManager::CompileCond(const ConditionCode new_cond) {
jit->reg_alloc.UnlockTemp(tmp); jit->reg_alloc.UnlockTemp(tmp);
break; break;
} }
case ConditionCode::LS: { //!c | z case Cond::LS: { //!c | z
const X64Reg tmp = jit->reg_alloc.AllocTemp(); const X64Reg tmp = jit->reg_alloc.AllocTemp();
jit->code->MOVZX(64, 8, tmp, jit->MJitStateZFlag()); jit->code->MOVZX(64, 8, tmp, jit->MJitStateZFlag());
jit->code->CMP(8, jit->MJitStateCFlag(), R(tmp)); jit->code->CMP(8, jit->MJitStateCFlag(), R(tmp));
@ -82,7 +82,7 @@ void JitX64::CondManager::CompileCond(const ConditionCode new_cond) {
jit->reg_alloc.UnlockTemp(tmp); jit->reg_alloc.UnlockTemp(tmp);
break; break;
} }
case ConditionCode::GE: { // n == v case Cond::GE: { // n == v
const X64Reg tmp = jit->reg_alloc.AllocTemp(); const X64Reg tmp = jit->reg_alloc.AllocTemp();
jit->code->MOVZX(64, 8, tmp, jit->MJitStateVFlag()); jit->code->MOVZX(64, 8, tmp, jit->MJitStateVFlag());
jit->code->CMP(8, jit->MJitStateNFlag(), R(tmp)); jit->code->CMP(8, jit->MJitStateNFlag(), R(tmp));
@ -90,7 +90,7 @@ void JitX64::CondManager::CompileCond(const ConditionCode new_cond) {
jit->reg_alloc.UnlockTemp(tmp); jit->reg_alloc.UnlockTemp(tmp);
break; break;
} }
case ConditionCode::LT: { // n != v case Cond::LT: { // n != v
const X64Reg tmp = jit->reg_alloc.AllocTemp(); const X64Reg tmp = jit->reg_alloc.AllocTemp();
jit->code->MOVZX(64, 8, tmp, jit->MJitStateVFlag()); jit->code->MOVZX(64, 8, tmp, jit->MJitStateVFlag());
jit->code->CMP(8, jit->MJitStateNFlag(), R(tmp)); jit->code->CMP(8, jit->MJitStateNFlag(), R(tmp));
@ -98,7 +98,7 @@ void JitX64::CondManager::CompileCond(const ConditionCode new_cond) {
jit->reg_alloc.UnlockTemp(tmp); jit->reg_alloc.UnlockTemp(tmp);
break; break;
} }
case ConditionCode::GT: { // !z & (n == v) case Cond::GT: { // !z & (n == v)
const X64Reg tmp = jit->reg_alloc.AllocTemp(); const X64Reg tmp = jit->reg_alloc.AllocTemp();
jit->code->MOVZX(64, 8, tmp, jit->MJitStateNFlag()); jit->code->MOVZX(64, 8, tmp, jit->MJitStateNFlag());
jit->code->XOR(8, R(tmp), jit->MJitStateVFlag()); jit->code->XOR(8, R(tmp), jit->MJitStateVFlag());
@ -108,7 +108,7 @@ void JitX64::CondManager::CompileCond(const ConditionCode new_cond) {
jit->reg_alloc.UnlockTemp(tmp); jit->reg_alloc.UnlockTemp(tmp);
break; break;
} }
case ConditionCode::LE: { // z | (n != v) case Cond::LE: { // z | (n != v)
X64Reg tmp = jit->reg_alloc.AllocTemp(); X64Reg tmp = jit->reg_alloc.AllocTemp();
jit->code->MOVZX(64, 8, tmp, jit->MJitStateNFlag()); jit->code->MOVZX(64, 8, tmp, jit->MJitStateNFlag());
jit->code->XOR(8, R(tmp), jit->MJitStateVFlag()); jit->code->XOR(8, R(tmp), jit->MJitStateVFlag());
@ -133,14 +133,14 @@ void JitX64::CondManager::CompileCond(const ConditionCode new_cond) {
} }
void JitX64::CondManager::Always() { void JitX64::CondManager::Always() {
CompileCond(ConditionCode::AL); CompileCond(Cond::AL);
} }
void JitX64::CondManager::FlagsDirty() { void JitX64::CondManager::FlagsDirty() {
flags_dirty = true; flags_dirty = true;
} }
ConditionCode JitX64::CondManager::CurrentCond() { Cond JitX64::CondManager::CurrentCond() {
return current_cond; return current_cond;
} }

View File

@ -11,7 +11,7 @@ namespace JitX64 {
using namespace Gen; using namespace Gen;
void JitX64::B(Cond cond, ArmImm24 imm24) { void JitX64::B(Cond cond, ArmImm24 imm24) {
cond_manager.CompileCond((ConditionCode)cond); cond_manager.CompileCond(cond);
const u32 new_pc = GetReg15Value() + MathUtil::SignExtend<26, s32>(imm24 << 2); const u32 new_pc = GetReg15Value() + MathUtil::SignExtend<26, s32>(imm24 << 2);
@ -20,28 +20,28 @@ void JitX64::B(Cond cond, ArmImm24 imm24) {
CompileUpdateCycles(false); CompileUpdateCycles(false);
CompileJumpToBB(new_pc); CompileJumpToBB(new_pc);
if (cond == ConditionCode::AL) { if (cond == Cond::AL) {
stop_compilation = true; stop_compilation = true;
} }
} }
void JitX64::BL(Cond cond, ArmImm24 imm24) { void JitX64::BL(Cond cond, ArmImm24 imm24) {
cond_manager.CompileCond((ConditionCode)cond); cond_manager.CompileCond(cond);
const u32 new_pc = GetReg15Value() + MathUtil::SignExtend<26, s32>(imm24 << 2); const u32 new_pc = GetReg15Value() + MathUtil::SignExtend<26, s32>(imm24 << 2);
ASSERT(!current.TFlag); ASSERT(!current.TFlag);
const u32 link_pc = current.arm_pc + GetInstSize(); const u32 link_pc = current.arm_pc + GetInstSize();
Gen::OpArg LR = reg_alloc.LockArmForWrite(14); Gen::OpArg LR = reg_alloc.LockArmForWrite(ArmReg::LR);
code->MOV(32, LR, Imm32(link_pc)); code->MOV(32, LR, Imm32(link_pc));
reg_alloc.UnlockArm(14); reg_alloc.UnlockArm(ArmReg::LR);
reg_alloc.FlushEverything(); reg_alloc.FlushEverything();
current.arm_pc += GetInstSize(); current.arm_pc += GetInstSize();
CompileUpdateCycles(false); CompileUpdateCycles(false);
CompileJumpToBB(new_pc); CompileJumpToBB(new_pc);
if (cond == ConditionCode::AL) { if (cond == Cond::AL) {
stop_compilation = true; stop_compilation = true;
} }
} }
@ -53,9 +53,9 @@ void JitX64::BLX_imm(bool H, ArmImm24 imm24) {
ASSERT(!current.TFlag); ASSERT(!current.TFlag);
const u32 link_pc = current.arm_pc + GetInstSize(); const u32 link_pc = current.arm_pc + GetInstSize();
Gen::OpArg LR = reg_alloc.LockArmForWrite(14); Gen::OpArg LR = reg_alloc.LockArmForWrite(ArmReg::LR);
code->MOV(32, LR, Imm32(link_pc)); code->MOV(32, LR, Imm32(link_pc));
reg_alloc.UnlockArm(14); reg_alloc.UnlockArm(ArmReg::LR);
current.TFlag = true; current.TFlag = true;
code->MOV(32, MJitStateTFlag(), Imm32(1)); code->MOV(32, MJitStateTFlag(), Imm32(1));
@ -69,26 +69,21 @@ void JitX64::BLX_imm(bool H, ArmImm24 imm24) {
} }
void JitX64::BLX_reg(Cond cond, ArmReg Rm_index) { void JitX64::BLX_reg(Cond cond, ArmReg Rm_index) {
cond_manager.CompileCond((ConditionCode)cond); cond_manager.CompileCond(cond);
ASSERT_MSG(Rm_index != ArmReg::PC, "UNPREDICTABLE");
const u32 link_pc = current.arm_pc + GetInstSize() + (current.TFlag ? 1 : 0); const u32 link_pc = current.arm_pc + GetInstSize() + (current.TFlag ? 1 : 0);
Gen::OpArg LR = reg_alloc.LockArmForWrite(14); Gen::OpArg LR = reg_alloc.LockArmForWrite(ArmReg::LR);
code->MOV(32, LR, Imm32(link_pc)); code->MOV(32, LR, Imm32(link_pc));
reg_alloc.UnlockArm(14); reg_alloc.UnlockArm(ArmReg::LR);
if (Rm_index == 15) {
// This is what the interpreter does. This effectively hangs the cpu.
// blx r15 is marked as UNPREDICTABLE in ARM ARM.
code->MOV(32, MJitStateArmPC(), Imm32(current.arm_pc));
code->MOV(32, MJitStateTFlag(), Imm32(0));
} else {
Gen::X64Reg Rm = reg_alloc.BindArmForRead(Rm_index); Gen::X64Reg Rm = reg_alloc.BindArmForRead(Rm_index);
code->MOV(32, MJitStateArmPC(), R(Rm)); code->MOV(32, MJitStateArmPC(), R(Rm));
code->AND(32, MJitStateArmPC(), Imm32(0xFFFFFFFE)); code->AND(32, MJitStateArmPC(), Imm32(0xFFFFFFFE));
code->BT(32, R(Rm), Imm8(0)); code->BT(32, R(Rm), Imm8(0));
code->SETcc(CC_C, MJitStateTFlag()); // NOTE: current.TFlag is now inaccurate code->SETcc(CC_C, MJitStateTFlag()); // NOTE: current.TFlag is now inaccurate
reg_alloc.UnlockArm(Rm_index); reg_alloc.UnlockArm(Rm_index);
}
current.arm_pc += GetInstSize(); current.arm_pc += GetInstSize();
CompileReturnToDispatch(); CompileReturnToDispatch();
@ -97,9 +92,9 @@ void JitX64::BLX_reg(Cond cond, ArmReg Rm_index) {
} }
void JitX64::BX(Cond cond, ArmReg Rm_index) { void JitX64::BX(Cond cond, ArmReg Rm_index) {
cond_manager.CompileCond((ConditionCode)cond); cond_manager.CompileCond(cond);
if (Rm_index == 15) { if (Rm_index == ArmReg::PC) {
code->MOV(32, MJitStateArmPC(), Imm32(GetReg15Value())); code->MOV(32, MJitStateArmPC(), Imm32(GetReg15Value()));
code->MOV(32, MJitStateTFlag(), Imm32(0)); code->MOV(32, MJitStateTFlag(), Imm32(0));
} else { } else {

View File

@ -11,7 +11,7 @@ namespace JitX64 {
using namespace Gen; using namespace Gen;
void JitX64::CompileDataProcessingHelper(ArmReg Rn_index, ArmReg Rd_index, std::function<void(X64Reg)> body) { void JitX64::CompileDataProcessingHelper(ArmReg Rn_index, ArmReg Rd_index, std::function<void(X64Reg)> body) {
if (Rn_index == 15) { if (Rn_index == ArmReg::PC) {
X64Reg Rd = reg_alloc.BindArmForWrite(Rd_index); X64Reg Rd = reg_alloc.BindArmForWrite(Rd_index);
code->MOV(32, R(Rd), Imm32(GetReg15Value())); code->MOV(32, R(Rd), Imm32(GetReg15Value()));
@ -48,7 +48,7 @@ void JitX64::CompileDataProcessingHelper_Reverse(ArmReg Rn_index, ArmReg Rd_inde
body(tmp); body(tmp);
if (Rd_index != 15) { if (Rd_index != ArmReg::PC) {
// TODO: Efficiency: Could implement this as a register rebind instead of needing to MOV. // TODO: Efficiency: Could implement this as a register rebind instead of needing to MOV.
OpArg Rd = reg_alloc.LockArmForReadWrite(Rd_index); OpArg Rd = reg_alloc.LockArmForReadWrite(Rd_index);
code->MOV(32, Rd, R(tmp)); code->MOV(32, Rd, R(tmp));
@ -67,7 +67,7 @@ void JitX64::CompileShifter_imm(X64Reg dest, ArmImm5 imm5, ShiftType shift, bool
// we output code that calculates and puts shifter_carry_out into MJitStateCFlag(). // we output code that calculates and puts shifter_carry_out into MJitStateCFlag().
switch (shift) { switch (shift) {
case 0b00: // Logical shift left by immediate case ShiftType::LSL: // Logical shift left by immediate
if (imm5 != 0) { if (imm5 != 0) {
code->SHL(32, R(dest), Imm8(imm5)); code->SHL(32, R(dest), Imm8(imm5));
if (do_shifter_carry_out) { if (do_shifter_carry_out) {
@ -75,7 +75,7 @@ void JitX64::CompileShifter_imm(X64Reg dest, ArmImm5 imm5, ShiftType shift, bool
} }
} }
return; return;
case 0b01: // Logical shift right by immediate case ShiftType::LSR: // Logical shift right by immediate
if (imm5 == 0) { if (imm5 == 0) {
if (do_shifter_carry_out) { if (do_shifter_carry_out) {
code->BT(32, R(dest), Imm8(31)); code->BT(32, R(dest), Imm8(31));
@ -89,7 +89,7 @@ void JitX64::CompileShifter_imm(X64Reg dest, ArmImm5 imm5, ShiftType shift, bool
} }
} }
return; return;
case 0b10: // Arithmetic shift right by immediate case ShiftType::ASR: // Arithmetic shift right by immediate
if (imm5 == 0) { if (imm5 == 0) {
if (do_shifter_carry_out) { if (do_shifter_carry_out) {
code->BT(32, R(dest), Imm8(31)); code->BT(32, R(dest), Imm8(31));
@ -103,7 +103,7 @@ void JitX64::CompileShifter_imm(X64Reg dest, ArmImm5 imm5, ShiftType shift, bool
} }
} }
return; return;
case 0b11: // Rotate right by immediate case ShiftType::ROR: // Rotate right by immediate
if (imm5 == 0) { //RRX if (imm5 == 0) { //RRX
code->BT(8, MJitStateCFlag(), Imm8(0)); code->BT(8, MJitStateCFlag(), Imm8(0));
code->RCR(32, R(dest), Imm8(1)); code->RCR(32, R(dest), Imm8(1));
@ -125,7 +125,7 @@ void JitX64::CompileShifter_imm(X64Reg dest, ArmImm5 imm5, ShiftType shift, bool
X64Reg JitX64::CompileDataProcessingHelper_reg(ArmImm5 imm5, ShiftType shift, ArmReg Rm_index, bool do_shifter_carry_out) { X64Reg JitX64::CompileDataProcessingHelper_reg(ArmImm5 imm5, ShiftType shift, ArmReg Rm_index, bool do_shifter_carry_out) {
X64Reg tmp = reg_alloc.AllocTemp(); X64Reg tmp = reg_alloc.AllocTemp();
if (Rm_index != 15) { if (Rm_index != ArmReg::PC) {
OpArg Rm = reg_alloc.LockArmForRead(Rm_index); OpArg Rm = reg_alloc.LockArmForRead(Rm_index);
code->MOV(32, R(tmp), Rm); code->MOV(32, R(tmp), Rm);
reg_alloc.UnlockArm(Rm_index); reg_alloc.UnlockArm(Rm_index);
@ -152,7 +152,7 @@ X64Reg JitX64::CompileDataProcessingHelper_rsr(ArmReg Rs_index, ShiftType shift,
X64Reg tmp = reg_alloc.AllocTemp(); X64Reg tmp = reg_alloc.AllocTemp();
if (Rs_index != 15) { if (Rs_index != ArmReg::PC) {
OpArg Rs = reg_alloc.LockArmForRead(Rs_index); OpArg Rs = reg_alloc.LockArmForRead(Rs_index);
code->MOV(32, R(RCX), Rs); code->MOV(32, R(RCX), Rs);
code->AND(32, R(RCX), Imm32(0xFF)); code->AND(32, R(RCX), Imm32(0xFF));
@ -161,7 +161,7 @@ X64Reg JitX64::CompileDataProcessingHelper_rsr(ArmReg Rs_index, ShiftType shift,
code->MOV(32, R(RCX), Imm32(GetReg15Value() & 0xFF)); code->MOV(32, R(RCX), Imm32(GetReg15Value() & 0xFF));
} }
if (Rm_index != 15) { if (Rm_index != ArmReg::PC) {
OpArg Rm = reg_alloc.LockArmForRead(Rm_index); OpArg Rm = reg_alloc.LockArmForRead(Rm_index);
code->MOV(32, R(tmp), Rm); code->MOV(32, R(tmp), Rm);
reg_alloc.UnlockArm(Rm_index); reg_alloc.UnlockArm(Rm_index);
@ -170,7 +170,7 @@ X64Reg JitX64::CompileDataProcessingHelper_rsr(ArmReg Rs_index, ShiftType shift,
} }
switch (shift) { switch (shift) {
case 0b00: { // Logical shift left by register case ShiftType::LSL: { // Logical shift left by register
if (!do_shifter_carry_out) { if (!do_shifter_carry_out) {
code->SHL(32, R(tmp), R(CL)); code->SHL(32, R(tmp), R(CL));
@ -207,7 +207,7 @@ X64Reg JitX64::CompileDataProcessingHelper_rsr(ArmReg Rs_index, ShiftType shift,
} }
break; break;
} }
case 0b01: { // Logical shift right by register case ShiftType::LSR: { // Logical shift right by register
if (!do_shifter_carry_out) { if (!do_shifter_carry_out) {
code->SHR(32, R(tmp), R(RCX)); code->SHR(32, R(tmp), R(RCX));
@ -244,7 +244,7 @@ X64Reg JitX64::CompileDataProcessingHelper_rsr(ArmReg Rs_index, ShiftType shift,
} }
break; break;
} }
case 0b10: { // Arithmetic shift right by register case ShiftType::ASR: { // Arithmetic shift right by register
if (!do_shifter_carry_out) { if (!do_shifter_carry_out) {
code->CMP(32, R(RCX), Imm8(31)); code->CMP(32, R(RCX), Imm8(31));
auto Rs_gt31 = code->J_CC(CC_A); auto Rs_gt31 = code->J_CC(CC_A);
@ -277,7 +277,7 @@ X64Reg JitX64::CompileDataProcessingHelper_rsr(ArmReg Rs_index, ShiftType shift,
} }
break; break;
} }
case 0b11: { // Rotate right by register case ShiftType::ROR: { // Rotate right by register
if (!do_shifter_carry_out) { if (!do_shifter_carry_out) {
code->AND(32, R(RCX), Imm32(0x1F)); code->AND(32, R(RCX), Imm32(0x1F));
code->ROR(32, R(tmp), R(CL)); code->ROR(32, R(tmp), R(CL));
@ -313,7 +313,7 @@ X64Reg JitX64::CompileDataProcessingHelper_rsr(ArmReg Rs_index, ShiftType shift,
} }
void JitX64::ADC_imm(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, int rotate, ArmImm8 imm8) { void JitX64::ADC_imm(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, int rotate, ArmImm8 imm8) {
cond_manager.CompileCond((ConditionCode)cond); cond_manager.CompileCond(cond);
u32 immediate = rotr(imm8, rotate * 2); u32 immediate = rotr(imm8, rotate * 2);
@ -327,13 +327,13 @@ void JitX64::ADC_imm(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, int ro
} }
current.arm_pc += GetInstSize(); current.arm_pc += GetInstSize();
if (Rd_index == 15) { if (Rd_index == ArmReg::PC) {
CompileReturnToDispatch(); CompileReturnToDispatch();
} }
} }
void JitX64::ADC_reg(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, ArmImm5 imm5, ShiftType shift, ArmReg Rm_index) { void JitX64::ADC_reg(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, ArmImm5 imm5, ShiftType shift, ArmReg Rm_index) {
cond_manager.CompileCond((ConditionCode)cond); cond_manager.CompileCond(cond);
Gen::X64Reg tmp = CompileDataProcessingHelper_reg(imm5, shift, Rm_index, false); Gen::X64Reg tmp = CompileDataProcessingHelper_reg(imm5, shift, Rm_index, false);
@ -349,13 +349,13 @@ void JitX64::ADC_reg(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, ArmImm
} }
current.arm_pc += GetInstSize(); current.arm_pc += GetInstSize();
if (Rd_index == 15) { if (Rd_index == ArmReg::PC) {
CompileReturnToDispatch(); CompileReturnToDispatch();
} }
} }
void JitX64::ADC_rsr(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, ArmReg Rs_index, ShiftType shift, ArmReg Rm_index) { void JitX64::ADC_rsr(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, ArmReg Rs_index, ShiftType shift, ArmReg Rm_index) {
cond_manager.CompileCond((ConditionCode)cond); cond_manager.CompileCond(cond);
Gen::X64Reg tmp = CompileDataProcessingHelper_rsr(Rs_index, shift, Rm_index, false); Gen::X64Reg tmp = CompileDataProcessingHelper_rsr(Rs_index, shift, Rm_index, false);
@ -371,13 +371,13 @@ void JitX64::ADC_rsr(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, ArmReg
} }
current.arm_pc += GetInstSize(); current.arm_pc += GetInstSize();
if (Rd_index == 15) { if (Rd_index == ArmReg::PC) {
CompileReturnToDispatch(); CompileReturnToDispatch();
} }
} }
void JitX64::ADD_imm(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, int rotate, ArmImm8 imm8) { void JitX64::ADD_imm(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, int rotate, ArmImm8 imm8) {
cond_manager.CompileCond((ConditionCode)cond); cond_manager.CompileCond(cond);
u32 immediate = rotr(imm8, rotate * 2); u32 immediate = rotr(imm8, rotate * 2);
@ -390,13 +390,13 @@ void JitX64::ADD_imm(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, int ro
} }
current.arm_pc += GetInstSize(); current.arm_pc += GetInstSize();
if (Rd_index == 15) { if (Rd_index == ArmReg::PC) {
CompileReturnToDispatch(); CompileReturnToDispatch();
} }
} }
void JitX64::ADD_reg(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, ArmImm5 imm5, ShiftType shift, ArmReg Rm_index) { void JitX64::ADD_reg(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, ArmImm5 imm5, ShiftType shift, ArmReg Rm_index) {
cond_manager.CompileCond((ConditionCode)cond); cond_manager.CompileCond(cond);
Gen::X64Reg tmp = CompileDataProcessingHelper_reg(imm5, shift, Rm_index, false); Gen::X64Reg tmp = CompileDataProcessingHelper_reg(imm5, shift, Rm_index, false);
@ -411,13 +411,13 @@ void JitX64::ADD_reg(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, ArmImm
} }
current.arm_pc += GetInstSize(); current.arm_pc += GetInstSize();
if (Rd_index == 15) { if (Rd_index == ArmReg::PC) {
CompileReturnToDispatch(); CompileReturnToDispatch();
} }
} }
void JitX64::ADD_rsr(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, ArmReg Rs_index, ShiftType shift, ArmReg Rm_index) { void JitX64::ADD_rsr(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, ArmReg Rs_index, ShiftType shift, ArmReg Rm_index) {
cond_manager.CompileCond((ConditionCode)cond); cond_manager.CompileCond(cond);
Gen::X64Reg tmp = CompileDataProcessingHelper_rsr(Rs_index, shift, Rm_index, false); Gen::X64Reg tmp = CompileDataProcessingHelper_rsr(Rs_index, shift, Rm_index, false);
@ -432,13 +432,13 @@ void JitX64::ADD_rsr(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, ArmReg
} }
current.arm_pc += GetInstSize(); current.arm_pc += GetInstSize();
if (Rd_index == 15) { if (Rd_index == ArmReg::PC) {
CompileReturnToDispatch(); CompileReturnToDispatch();
} }
} }
void JitX64::AND_imm(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, int rotate, ArmImm8 imm8) { void JitX64::AND_imm(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, int rotate, ArmImm8 imm8) {
cond_manager.CompileCond((ConditionCode)cond); cond_manager.CompileCond(cond);
u32 immediate = rotr(imm8, rotate * 2); u32 immediate = rotr(imm8, rotate * 2);
@ -454,13 +454,13 @@ void JitX64::AND_imm(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, int ro
} }
current.arm_pc += GetInstSize(); current.arm_pc += GetInstSize();
if (Rd_index == 15) { if (Rd_index == ArmReg::PC) {
CompileReturnToDispatch(); CompileReturnToDispatch();
} }
} }
void JitX64::AND_reg(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, ArmImm5 imm5, ShiftType shift, ArmReg Rm_index) { void JitX64::AND_reg(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, ArmImm5 imm5, ShiftType shift, ArmReg Rm_index) {
cond_manager.CompileCond((ConditionCode)cond); cond_manager.CompileCond(cond);
Gen::X64Reg tmp = CompileDataProcessingHelper_reg(imm5, shift, Rm_index, S); Gen::X64Reg tmp = CompileDataProcessingHelper_reg(imm5, shift, Rm_index, S);
@ -476,13 +476,13 @@ void JitX64::AND_reg(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, ArmImm
} }
current.arm_pc += GetInstSize(); current.arm_pc += GetInstSize();
if (Rd_index == 15) { if (Rd_index == ArmReg::PC) {
CompileReturnToDispatch(); CompileReturnToDispatch();
} }
} }
void JitX64::AND_rsr(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, ArmReg Rs_index, ShiftType shift, ArmReg Rm_index) { void JitX64::AND_rsr(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, ArmReg Rs_index, ShiftType shift, ArmReg Rm_index) {
cond_manager.CompileCond((ConditionCode)cond); cond_manager.CompileCond(cond);
Gen::X64Reg tmp = CompileDataProcessingHelper_rsr(Rs_index, shift, Rm_index, S); Gen::X64Reg tmp = CompileDataProcessingHelper_rsr(Rs_index, shift, Rm_index, S);
@ -498,13 +498,13 @@ void JitX64::AND_rsr(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, ArmReg
} }
current.arm_pc += GetInstSize(); current.arm_pc += GetInstSize();
if (Rd_index == 15) { if (Rd_index == ArmReg::PC) {
CompileReturnToDispatch(); CompileReturnToDispatch();
} }
} }
void JitX64::BIC_imm(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, int rotate, ArmImm8 imm8) { void JitX64::BIC_imm(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, int rotate, ArmImm8 imm8) {
cond_manager.CompileCond((ConditionCode)cond); cond_manager.CompileCond(cond);
u32 immediate = rotr(imm8, rotate * 2); u32 immediate = rotr(imm8, rotate * 2);
@ -520,13 +520,13 @@ void JitX64::BIC_imm(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, int ro
} }
current.arm_pc += GetInstSize(); current.arm_pc += GetInstSize();
if (Rd_index == 15) { if (Rd_index == ArmReg::PC) {
CompileReturnToDispatch(); CompileReturnToDispatch();
} }
} }
void JitX64::BIC_reg(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, ArmImm5 imm5, ShiftType shift, ArmReg Rm_index) { void JitX64::BIC_reg(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, ArmImm5 imm5, ShiftType shift, ArmReg Rm_index) {
cond_manager.CompileCond((ConditionCode)cond); cond_manager.CompileCond(cond);
Gen::X64Reg tmp = CompileDataProcessingHelper_reg(imm5, shift, Rm_index, S); Gen::X64Reg tmp = CompileDataProcessingHelper_reg(imm5, shift, Rm_index, S);
@ -543,13 +543,13 @@ void JitX64::BIC_reg(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, ArmImm
} }
current.arm_pc += GetInstSize(); current.arm_pc += GetInstSize();
if (Rd_index == 15) { if (Rd_index == ArmReg::PC) {
CompileReturnToDispatch(); CompileReturnToDispatch();
} }
} }
void JitX64::BIC_rsr(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, ArmReg Rs_index, ShiftType shift, ArmReg Rm_index) { void JitX64::BIC_rsr(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, ArmReg Rs_index, ShiftType shift, ArmReg Rm_index) {
cond_manager.CompileCond((ConditionCode)cond); cond_manager.CompileCond(cond);
Gen::X64Reg tmp = CompileDataProcessingHelper_rsr(Rs_index, shift, Rm_index, S); Gen::X64Reg tmp = CompileDataProcessingHelper_rsr(Rs_index, shift, Rm_index, S);
@ -566,18 +566,18 @@ void JitX64::BIC_rsr(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, ArmReg
} }
current.arm_pc += GetInstSize(); current.arm_pc += GetInstSize();
if (Rd_index == 15) { if (Rd_index == ArmReg::PC) {
CompileReturnToDispatch(); CompileReturnToDispatch();
} }
} }
void JitX64::CMN_imm(Cond cond, ArmReg Rn_index, int rotate, ArmImm8 imm8) { void JitX64::CMN_imm(Cond cond, ArmReg Rn_index, int rotate, ArmImm8 imm8) {
cond_manager.CompileCond((ConditionCode)cond); cond_manager.CompileCond(cond);
u32 immediate = rotr(imm8, rotate * 2); u32 immediate = rotr(imm8, rotate * 2);
X64Reg tmp = reg_alloc.AllocTemp(); X64Reg tmp = reg_alloc.AllocTemp();
if (Rn_index != 15) { if (Rn_index != ArmReg::PC) {
OpArg Rn = reg_alloc.LockArmForRead(Rn_index); OpArg Rn = reg_alloc.LockArmForRead(Rn_index);
code->MOV(32, R(tmp), Rn); code->MOV(32, R(tmp), Rn);
reg_alloc.UnlockArm(Rn_index); reg_alloc.UnlockArm(Rn_index);
@ -595,11 +595,11 @@ void JitX64::CMN_imm(Cond cond, ArmReg Rn_index, int rotate, ArmImm8 imm8) {
} }
void JitX64::CMN_reg(Cond cond, ArmReg Rn_index, ArmImm5 imm5, ShiftType shift, ArmReg Rm_index) { void JitX64::CMN_reg(Cond cond, ArmReg Rn_index, ArmImm5 imm5, ShiftType shift, ArmReg Rm_index) {
cond_manager.CompileCond((ConditionCode)cond); cond_manager.CompileCond(cond);
Gen::X64Reg tmp = CompileDataProcessingHelper_reg(imm5, shift, Rm_index, false); Gen::X64Reg tmp = CompileDataProcessingHelper_reg(imm5, shift, Rm_index, false);
if (Rn_index != 15) { if (Rn_index != ArmReg::PC) {
OpArg Rn = reg_alloc.LockArmForRead(Rn_index); OpArg Rn = reg_alloc.LockArmForRead(Rn_index);
code->ADD(32, R(tmp), Rn); code->ADD(32, R(tmp), Rn);
reg_alloc.UnlockArm(Rn_index); reg_alloc.UnlockArm(Rn_index);
@ -615,11 +615,11 @@ void JitX64::CMN_reg(Cond cond, ArmReg Rn_index, ArmImm5 imm5, ShiftType shift,
} }
void JitX64::CMN_rsr(Cond cond, ArmReg Rn_index, ArmReg Rs_index, ShiftType shift, ArmReg Rm_index) { void JitX64::CMN_rsr(Cond cond, ArmReg Rn_index, ArmReg Rs_index, ShiftType shift, ArmReg Rm_index) {
cond_manager.CompileCond((ConditionCode)cond); cond_manager.CompileCond(cond);
Gen::X64Reg tmp = CompileDataProcessingHelper_rsr(Rs_index, shift, Rm_index, false); Gen::X64Reg tmp = CompileDataProcessingHelper_rsr(Rs_index, shift, Rm_index, false);
if (Rn_index != 15) { if (Rn_index != ArmReg::PC) {
OpArg Rn = reg_alloc.LockArmForRead(Rn_index); OpArg Rn = reg_alloc.LockArmForRead(Rn_index);
code->ADD(32, R(tmp), Rn); code->ADD(32, R(tmp), Rn);
reg_alloc.UnlockArm(Rn_index); reg_alloc.UnlockArm(Rn_index);
@ -635,11 +635,11 @@ void JitX64::CMN_rsr(Cond cond, ArmReg Rn_index, ArmReg Rs_index, ShiftType shif
} }
void JitX64::CMP_imm(Cond cond, ArmReg Rn_index, int rotate, ArmImm8 imm8) { void JitX64::CMP_imm(Cond cond, ArmReg Rn_index, int rotate, ArmImm8 imm8) {
cond_manager.CompileCond((ConditionCode)cond); cond_manager.CompileCond(cond);
u32 immediate = rotr(imm8, rotate * 2); u32 immediate = rotr(imm8, rotate * 2);
if (Rn_index != 15) { if (Rn_index != ArmReg::PC) {
OpArg Rn = reg_alloc.LockArmForRead(Rn_index); OpArg Rn = reg_alloc.LockArmForRead(Rn_index);
code->CMP(32, Rn, Imm32(immediate)); code->CMP(32, Rn, Imm32(immediate));
reg_alloc.UnlockArm(Rn_index); reg_alloc.UnlockArm(Rn_index);
@ -658,11 +658,11 @@ void JitX64::CMP_imm(Cond cond, ArmReg Rn_index, int rotate, ArmImm8 imm8) {
} }
void JitX64::CMP_reg(Cond cond, ArmReg Rn_index, ArmImm5 imm5, ShiftType shift, ArmReg Rm_index) { void JitX64::CMP_reg(Cond cond, ArmReg Rn_index, ArmImm5 imm5, ShiftType shift, ArmReg Rm_index) {
cond_manager.CompileCond((ConditionCode)cond); cond_manager.CompileCond(cond);
Gen::X64Reg tmp = CompileDataProcessingHelper_reg(imm5, shift, Rm_index, false); Gen::X64Reg tmp = CompileDataProcessingHelper_reg(imm5, shift, Rm_index, false);
if (Rn_index != 15) { if (Rn_index != ArmReg::PC) {
OpArg Rn = reg_alloc.LockArmForRead(Rn_index); OpArg Rn = reg_alloc.LockArmForRead(Rn_index);
code->CMP(32, Rn, R(tmp)); code->CMP(32, Rn, R(tmp));
reg_alloc.UnlockArm(Rn_index); reg_alloc.UnlockArm(Rn_index);
@ -683,11 +683,11 @@ void JitX64::CMP_reg(Cond cond, ArmReg Rn_index, ArmImm5 imm5, ShiftType shift,
} }
void JitX64::CMP_rsr(Cond cond, ArmReg Rn_index, ArmReg Rs_index, ShiftType shift, ArmReg Rm_index) { void JitX64::CMP_rsr(Cond cond, ArmReg Rn_index, ArmReg Rs_index, ShiftType shift, ArmReg Rm_index) {
cond_manager.CompileCond((ConditionCode)cond); cond_manager.CompileCond(cond);
Gen::X64Reg tmp = CompileDataProcessingHelper_rsr(Rs_index, shift, Rm_index, false); Gen::X64Reg tmp = CompileDataProcessingHelper_rsr(Rs_index, shift, Rm_index, false);
if (Rn_index != 15) { if (Rn_index != ArmReg::PC) {
OpArg Rn = reg_alloc.LockArmForRead(Rn_index); OpArg Rn = reg_alloc.LockArmForRead(Rn_index);
code->CMP(32, Rn, R(tmp)); code->CMP(32, Rn, R(tmp));
reg_alloc.UnlockArm(Rn_index); reg_alloc.UnlockArm(Rn_index);
@ -708,7 +708,7 @@ void JitX64::CMP_rsr(Cond cond, ArmReg Rn_index, ArmReg Rs_index, ShiftType shif
} }
void JitX64::EOR_imm(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, int rotate, ArmImm8 imm8) { void JitX64::EOR_imm(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, int rotate, ArmImm8 imm8) {
cond_manager.CompileCond((ConditionCode)cond); cond_manager.CompileCond(cond);
u32 immediate = rotr(imm8, rotate * 2); u32 immediate = rotr(imm8, rotate * 2);
@ -724,13 +724,13 @@ void JitX64::EOR_imm(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, int ro
} }
current.arm_pc += GetInstSize(); current.arm_pc += GetInstSize();
if (Rd_index == 15) { if (Rd_index == ArmReg::PC) {
CompileReturnToDispatch(); CompileReturnToDispatch();
} }
} }
void JitX64::EOR_reg(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, ArmImm5 imm5, ShiftType shift, ArmReg Rm_index) { void JitX64::EOR_reg(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, ArmImm5 imm5, ShiftType shift, ArmReg Rm_index) {
cond_manager.CompileCond((ConditionCode)cond); cond_manager.CompileCond(cond);
Gen::X64Reg tmp = CompileDataProcessingHelper_reg(imm5, shift, Rm_index, S); Gen::X64Reg tmp = CompileDataProcessingHelper_reg(imm5, shift, Rm_index, S);
@ -745,13 +745,13 @@ void JitX64::EOR_reg(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, ArmImm
} }
current.arm_pc += GetInstSize(); current.arm_pc += GetInstSize();
if (Rd_index == 15) { if (Rd_index == ArmReg::PC) {
CompileReturnToDispatch(); CompileReturnToDispatch();
} }
} }
void JitX64::EOR_rsr(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, ArmReg Rs_index, ShiftType shift, ArmReg Rm_index) { void JitX64::EOR_rsr(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, ArmReg Rs_index, ShiftType shift, ArmReg Rm_index) {
cond_manager.CompileCond((ConditionCode)cond); cond_manager.CompileCond(cond);
Gen::X64Reg tmp = CompileDataProcessingHelper_rsr(Rs_index, shift, Rm_index, S); Gen::X64Reg tmp = CompileDataProcessingHelper_rsr(Rs_index, shift, Rm_index, S);
@ -766,13 +766,13 @@ void JitX64::EOR_rsr(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, ArmReg
} }
current.arm_pc += GetInstSize(); current.arm_pc += GetInstSize();
if (Rd_index == 15) { if (Rd_index == ArmReg::PC) {
CompileReturnToDispatch(); CompileReturnToDispatch();
} }
} }
void JitX64::MOV_imm(Cond cond, bool S, ArmReg Rd_index, int rotate, ArmImm8 imm8) { void JitX64::MOV_imm(Cond cond, bool S, ArmReg Rd_index, int rotate, ArmImm8 imm8) {
cond_manager.CompileCond((ConditionCode)cond); cond_manager.CompileCond(cond);
u32 immediate = rotr(imm8, rotate * 2); u32 immediate = rotr(imm8, rotate * 2);
@ -791,13 +791,13 @@ void JitX64::MOV_imm(Cond cond, bool S, ArmReg Rd_index, int rotate, ArmImm8 imm
} }
current.arm_pc += GetInstSize(); current.arm_pc += GetInstSize();
if (Rd_index == 15) { if (Rd_index == ArmReg::PC) {
CompileReturnToDispatch(); CompileReturnToDispatch();
} }
} }
void JitX64::MOV_reg(Cond cond, bool S, ArmReg Rd_index, ArmImm5 imm5, ShiftType shift, ArmReg Rm_index) { void JitX64::MOV_reg(Cond cond, bool S, ArmReg Rd_index, ArmImm5 imm5, ShiftType shift, ArmReg Rm_index) {
cond_manager.CompileCond((ConditionCode)cond); cond_manager.CompileCond(cond);
Gen::X64Reg tmp = CompileDataProcessingHelper_reg(imm5, shift, Rm_index, S); Gen::X64Reg tmp = CompileDataProcessingHelper_reg(imm5, shift, Rm_index, S);
@ -813,13 +813,13 @@ void JitX64::MOV_reg(Cond cond, bool S, ArmReg Rd_index, ArmImm5 imm5, ShiftType
reg_alloc.UnlockTemp(tmp); reg_alloc.UnlockTemp(tmp);
current.arm_pc += GetInstSize(); current.arm_pc += GetInstSize();
if (Rd_index == 15) { if (Rd_index == ArmReg::PC) {
CompileReturnToDispatch(); CompileReturnToDispatch();
} }
} }
void JitX64::MOV_rsr(Cond cond, bool S, ArmReg Rd_index, ArmReg Rs_index, ShiftType shift, ArmReg Rm_index) { void JitX64::MOV_rsr(Cond cond, bool S, ArmReg Rd_index, ArmReg Rs_index, ShiftType shift, ArmReg Rm_index) {
cond_manager.CompileCond((ConditionCode)cond); cond_manager.CompileCond(cond);
Gen::X64Reg tmp = CompileDataProcessingHelper_rsr(Rs_index, shift, Rm_index, S); Gen::X64Reg tmp = CompileDataProcessingHelper_rsr(Rs_index, shift, Rm_index, S);
@ -835,13 +835,13 @@ void JitX64::MOV_rsr(Cond cond, bool S, ArmReg Rd_index, ArmReg Rs_index, ShiftT
reg_alloc.UnlockTemp(tmp); reg_alloc.UnlockTemp(tmp);
current.arm_pc += GetInstSize(); current.arm_pc += GetInstSize();
if (Rd_index == 15) { if (Rd_index == ArmReg::PC) {
CompileReturnToDispatch(); CompileReturnToDispatch();
} }
} }
void JitX64::MVN_imm(Cond cond, bool S, ArmReg Rd_index, int rotate, ArmImm8 imm8) { void JitX64::MVN_imm(Cond cond, bool S, ArmReg Rd_index, int rotate, ArmImm8 imm8) {
cond_manager.CompileCond((ConditionCode)cond); cond_manager.CompileCond(cond);
u32 immediate = rotr(imm8, rotate * 2); u32 immediate = rotr(imm8, rotate * 2);
@ -860,13 +860,13 @@ void JitX64::MVN_imm(Cond cond, bool S, ArmReg Rd_index, int rotate, ArmImm8 imm
} }
current.arm_pc += GetInstSize(); current.arm_pc += GetInstSize();
if (Rd_index == 15) { if (Rd_index == ArmReg::PC) {
CompileReturnToDispatch(); CompileReturnToDispatch();
} }
} }
void JitX64::MVN_reg(Cond cond, bool S, ArmReg Rd_index, ArmImm5 imm5, ShiftType shift, ArmReg Rm_index) { void JitX64::MVN_reg(Cond cond, bool S, ArmReg Rd_index, ArmImm5 imm5, ShiftType shift, ArmReg Rm_index) {
cond_manager.CompileCond((ConditionCode)cond); cond_manager.CompileCond(cond);
Gen::X64Reg tmp = CompileDataProcessingHelper_reg(imm5, shift, Rm_index, S); Gen::X64Reg tmp = CompileDataProcessingHelper_reg(imm5, shift, Rm_index, S);
code->NOT(32, R(tmp)); code->NOT(32, R(tmp));
@ -883,13 +883,13 @@ void JitX64::MVN_reg(Cond cond, bool S, ArmReg Rd_index, ArmImm5 imm5, ShiftType
reg_alloc.UnlockTemp(tmp); reg_alloc.UnlockTemp(tmp);
current.arm_pc += GetInstSize(); current.arm_pc += GetInstSize();
if (Rd_index == 15) { if (Rd_index == ArmReg::PC) {
CompileReturnToDispatch(); CompileReturnToDispatch();
} }
} }
void JitX64::MVN_rsr(Cond cond, bool S, ArmReg Rd_index, ArmReg Rs_index, ShiftType shift, ArmReg Rm_index) { void JitX64::MVN_rsr(Cond cond, bool S, ArmReg Rd_index, ArmReg Rs_index, ShiftType shift, ArmReg Rm_index) {
cond_manager.CompileCond((ConditionCode)cond); cond_manager.CompileCond(cond);
Gen::X64Reg tmp = CompileDataProcessingHelper_rsr(Rs_index, shift, Rm_index, S); Gen::X64Reg tmp = CompileDataProcessingHelper_rsr(Rs_index, shift, Rm_index, S);
code->NOT(32, R(tmp)); code->NOT(32, R(tmp));
@ -906,13 +906,13 @@ void JitX64::MVN_rsr(Cond cond, bool S, ArmReg Rd_index, ArmReg Rs_index, ShiftT
reg_alloc.UnlockTemp(tmp); reg_alloc.UnlockTemp(tmp);
current.arm_pc += GetInstSize(); current.arm_pc += GetInstSize();
if (Rd_index == 15) { if (Rd_index == ArmReg::PC) {
CompileReturnToDispatch(); CompileReturnToDispatch();
} }
} }
void JitX64::ORR_imm(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, int rotate, ArmImm8 imm8) { void JitX64::ORR_imm(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, int rotate, ArmImm8 imm8) {
cond_manager.CompileCond((ConditionCode)cond); cond_manager.CompileCond(cond);
u32 immediate = rotr(imm8, rotate * 2); u32 immediate = rotr(imm8, rotate * 2);
@ -928,13 +928,13 @@ void JitX64::ORR_imm(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, int ro
} }
current.arm_pc += GetInstSize(); current.arm_pc += GetInstSize();
if (Rd_index == 15) { if (Rd_index == ArmReg::PC) {
CompileReturnToDispatch(); CompileReturnToDispatch();
} }
} }
void JitX64::ORR_reg(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, ArmImm5 imm5, ShiftType shift, ArmReg Rm_index) { void JitX64::ORR_reg(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, ArmImm5 imm5, ShiftType shift, ArmReg Rm_index) {
cond_manager.CompileCond((ConditionCode)cond); cond_manager.CompileCond(cond);
Gen::X64Reg tmp = CompileDataProcessingHelper_reg(imm5, shift, Rm_index, S); Gen::X64Reg tmp = CompileDataProcessingHelper_reg(imm5, shift, Rm_index, S);
@ -949,13 +949,13 @@ void JitX64::ORR_reg(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, ArmImm
} }
current.arm_pc += GetInstSize(); current.arm_pc += GetInstSize();
if (Rd_index == 15) { if (Rd_index == ArmReg::PC) {
CompileReturnToDispatch(); CompileReturnToDispatch();
} }
} }
void JitX64::ORR_rsr(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, ArmReg Rs_index, ShiftType shift, ArmReg Rm_index) { void JitX64::ORR_rsr(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, ArmReg Rs_index, ShiftType shift, ArmReg Rm_index) {
cond_manager.CompileCond((ConditionCode)cond); cond_manager.CompileCond(cond);
Gen::X64Reg tmp = CompileDataProcessingHelper_rsr(Rs_index, shift, Rm_index, S); Gen::X64Reg tmp = CompileDataProcessingHelper_rsr(Rs_index, shift, Rm_index, S);
@ -970,20 +970,20 @@ void JitX64::ORR_rsr(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, ArmReg
} }
current.arm_pc += GetInstSize(); current.arm_pc += GetInstSize();
if (Rd_index == 15) { if (Rd_index == ArmReg::PC) {
CompileReturnToDispatch(); CompileReturnToDispatch();
} }
} }
void JitX64::RSB_imm(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, int rotate, ArmImm8 imm8) { void JitX64::RSB_imm(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, int rotate, ArmImm8 imm8) {
cond_manager.CompileCond((ConditionCode)cond); cond_manager.CompileCond(cond);
u32 immediate = rotr(imm8, rotate * 2); u32 immediate = rotr(imm8, rotate * 2);
CompileDataProcessingHelper_Reverse(Rn_index, Rd_index, [&](X64Reg Rd) { CompileDataProcessingHelper_Reverse(Rn_index, Rd_index, [&](X64Reg Rd) {
code->MOV(32, R(Rd), Imm32(immediate)); code->MOV(32, R(Rd), Imm32(immediate));
if (Rn_index == 15) { if (Rn_index == ArmReg::PC) {
code->SUB(32, R(Rd), Imm32(GetReg15Value())); code->SUB(32, R(Rd), Imm32(GetReg15Value()));
} else { } else {
Gen::OpArg Rn = reg_alloc.LockArmForRead(Rn_index); Gen::OpArg Rn = reg_alloc.LockArmForRead(Rn_index);
@ -998,20 +998,20 @@ void JitX64::RSB_imm(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, int ro
} }
current.arm_pc += GetInstSize(); current.arm_pc += GetInstSize();
if (Rd_index == 15) { if (Rd_index == ArmReg::PC) {
CompileReturnToDispatch(); CompileReturnToDispatch();
} }
} }
void JitX64::RSB_reg(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, ArmImm5 imm5, ShiftType shift, ArmReg Rm_index) { void JitX64::RSB_reg(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, ArmImm5 imm5, ShiftType shift, ArmReg Rm_index) {
cond_manager.CompileCond((ConditionCode)cond); cond_manager.CompileCond(cond);
Gen::X64Reg tmp = CompileDataProcessingHelper_reg(imm5, shift, Rm_index, false); Gen::X64Reg tmp = CompileDataProcessingHelper_reg(imm5, shift, Rm_index, false);
CompileDataProcessingHelper_Reverse(Rn_index, Rd_index, [&](X64Reg Rd) { CompileDataProcessingHelper_Reverse(Rn_index, Rd_index, [&](X64Reg Rd) {
code->MOV(32, R(Rd), R(tmp)); code->MOV(32, R(Rd), R(tmp));
if (Rn_index == 15) { if (Rn_index == ArmReg::PC) {
code->SUB(32, R(Rd), Imm32(GetReg15Value())); code->SUB(32, R(Rd), Imm32(GetReg15Value()));
} else { } else {
Gen::OpArg Rn = reg_alloc.LockArmForRead(Rn_index); Gen::OpArg Rn = reg_alloc.LockArmForRead(Rn_index);
@ -1028,20 +1028,20 @@ void JitX64::RSB_reg(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, ArmImm
} }
current.arm_pc += GetInstSize(); current.arm_pc += GetInstSize();
if (Rd_index == 15) { if (Rd_index == ArmReg::PC) {
CompileReturnToDispatch(); CompileReturnToDispatch();
} }
} }
void JitX64::RSB_rsr(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, ArmReg Rs_index, ShiftType shift, ArmReg Rm_index) { void JitX64::RSB_rsr(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, ArmReg Rs_index, ShiftType shift, ArmReg Rm_index) {
cond_manager.CompileCond((ConditionCode)cond); cond_manager.CompileCond(cond);
Gen::X64Reg tmp = CompileDataProcessingHelper_rsr(Rs_index, shift, Rm_index, false); Gen::X64Reg tmp = CompileDataProcessingHelper_rsr(Rs_index, shift, Rm_index, false);
CompileDataProcessingHelper_Reverse(Rn_index, Rd_index, [&](X64Reg Rd) { CompileDataProcessingHelper_Reverse(Rn_index, Rd_index, [&](X64Reg Rd) {
code->MOV(32, R(Rd), R(tmp)); code->MOV(32, R(Rd), R(tmp));
if (Rn_index == 15) { if (Rn_index == ArmReg::PC) {
code->SUB(32, R(Rd), Imm32(GetReg15Value())); code->SUB(32, R(Rd), Imm32(GetReg15Value()));
} else { } else {
Gen::OpArg Rn = reg_alloc.LockArmForRead(Rn_index); Gen::OpArg Rn = reg_alloc.LockArmForRead(Rn_index);
@ -1058,13 +1058,13 @@ void JitX64::RSB_rsr(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, ArmReg
} }
current.arm_pc += GetInstSize(); current.arm_pc += GetInstSize();
if (Rd_index == 15) { if (Rd_index == ArmReg::PC) {
CompileReturnToDispatch(); CompileReturnToDispatch();
} }
} }
void JitX64::RSC_imm(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, int rotate, ArmImm8 imm8) { void JitX64::RSC_imm(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, int rotate, ArmImm8 imm8) {
cond_manager.CompileCond((ConditionCode)cond); cond_manager.CompileCond(cond);
u32 immediate = rotr(imm8, rotate * 2); u32 immediate = rotr(imm8, rotate * 2);
@ -1074,7 +1074,7 @@ void JitX64::RSC_imm(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, int ro
code->BT(32, MJitStateCFlag(), Imm8(0)); code->BT(32, MJitStateCFlag(), Imm8(0));
code->CMC(); code->CMC();
if (Rn_index == 15) { if (Rn_index == ArmReg::PC) {
code->SBB(32, R(Rd), Imm32(GetReg15Value())); code->SBB(32, R(Rd), Imm32(GetReg15Value()));
} else { } else {
Gen::OpArg Rn = reg_alloc.LockArmForRead(Rn_index); Gen::OpArg Rn = reg_alloc.LockArmForRead(Rn_index);
@ -1089,13 +1089,13 @@ void JitX64::RSC_imm(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, int ro
} }
current.arm_pc += GetInstSize(); current.arm_pc += GetInstSize();
if (Rd_index == 15) { if (Rd_index == ArmReg::PC) {
CompileReturnToDispatch(); CompileReturnToDispatch();
} }
} }
void JitX64::RSC_reg(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, ArmImm5 imm5, ShiftType shift, ArmReg Rm_index) { void JitX64::RSC_reg(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, ArmImm5 imm5, ShiftType shift, ArmReg Rm_index) {
cond_manager.CompileCond((ConditionCode)cond); cond_manager.CompileCond(cond);
Gen::X64Reg tmp = CompileDataProcessingHelper_reg(imm5, shift, Rm_index, false); Gen::X64Reg tmp = CompileDataProcessingHelper_reg(imm5, shift, Rm_index, false);
@ -1105,7 +1105,7 @@ void JitX64::RSC_reg(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, ArmImm
code->BT(32, MJitStateCFlag(), Imm8(0)); code->BT(32, MJitStateCFlag(), Imm8(0));
code->CMC(); code->CMC();
if (Rn_index == 15) { if (Rn_index == ArmReg::PC) {
code->SBB(32, R(Rd), Imm32(GetReg15Value())); code->SBB(32, R(Rd), Imm32(GetReg15Value()));
} else { } else {
Gen::OpArg Rn = reg_alloc.LockArmForRead(Rn_index); Gen::OpArg Rn = reg_alloc.LockArmForRead(Rn_index);
@ -1122,13 +1122,13 @@ void JitX64::RSC_reg(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, ArmImm
} }
current.arm_pc += GetInstSize(); current.arm_pc += GetInstSize();
if (Rd_index == 15) { if (Rd_index == ArmReg::PC) {
CompileReturnToDispatch(); CompileReturnToDispatch();
} }
} }
void JitX64::RSC_rsr(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, ArmReg Rs_index, ShiftType shift, ArmReg Rm_index) { void JitX64::RSC_rsr(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, ArmReg Rs_index, ShiftType shift, ArmReg Rm_index) {
cond_manager.CompileCond((ConditionCode)cond); cond_manager.CompileCond(cond);
Gen::X64Reg tmp = CompileDataProcessingHelper_rsr(Rs_index, shift, Rm_index, false); Gen::X64Reg tmp = CompileDataProcessingHelper_rsr(Rs_index, shift, Rm_index, false);
@ -1138,7 +1138,7 @@ void JitX64::RSC_rsr(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, ArmReg
code->BT(32, MJitStateCFlag(), Imm8(0)); code->BT(32, MJitStateCFlag(), Imm8(0));
code->CMC(); code->CMC();
if (Rn_index == 15) { if (Rn_index == ArmReg::PC) {
code->SBB(32, R(Rd), Imm32(GetReg15Value())); code->SBB(32, R(Rd), Imm32(GetReg15Value()));
} else { } else {
Gen::OpArg Rn = reg_alloc.LockArmForRead(Rn_index); Gen::OpArg Rn = reg_alloc.LockArmForRead(Rn_index);
@ -1155,13 +1155,13 @@ void JitX64::RSC_rsr(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, ArmReg
} }
current.arm_pc += GetInstSize(); current.arm_pc += GetInstSize();
if (Rd_index == 15) { if (Rd_index == ArmReg::PC) {
CompileReturnToDispatch(); CompileReturnToDispatch();
} }
} }
void JitX64::SBC_imm(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, int rotate, ArmImm8 imm8) { void JitX64::SBC_imm(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, int rotate, ArmImm8 imm8) {
cond_manager.CompileCond((ConditionCode)cond); cond_manager.CompileCond(cond);
u32 immediate = rotr(imm8, rotate * 2); u32 immediate = rotr(imm8, rotate * 2);
@ -1177,13 +1177,13 @@ void JitX64::SBC_imm(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, int ro
} }
current.arm_pc += GetInstSize(); current.arm_pc += GetInstSize();
if (Rd_index == 15) { if (Rd_index == ArmReg::PC) {
CompileReturnToDispatch(); CompileReturnToDispatch();
} }
} }
void JitX64::SBC_reg(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, ArmImm5 imm5, ShiftType shift, ArmReg Rm_index) { void JitX64::SBC_reg(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, ArmImm5 imm5, ShiftType shift, ArmReg Rm_index) {
cond_manager.CompileCond((ConditionCode)cond); cond_manager.CompileCond(cond);
Gen::X64Reg tmp = CompileDataProcessingHelper_reg(imm5, shift, Rm_index, false); Gen::X64Reg tmp = CompileDataProcessingHelper_reg(imm5, shift, Rm_index, false);
@ -1201,13 +1201,13 @@ void JitX64::SBC_reg(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, ArmImm
} }
current.arm_pc += GetInstSize(); current.arm_pc += GetInstSize();
if (Rd_index == 15) { if (Rd_index == ArmReg::PC) {
CompileReturnToDispatch(); CompileReturnToDispatch();
} }
} }
void JitX64::SBC_rsr(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, ArmReg Rs_index, ShiftType shift, ArmReg Rm_index) { void JitX64::SBC_rsr(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, ArmReg Rs_index, ShiftType shift, ArmReg Rm_index) {
cond_manager.CompileCond((ConditionCode)cond); cond_manager.CompileCond(cond);
Gen::X64Reg tmp = CompileDataProcessingHelper_rsr(Rs_index, shift, Rm_index, false); Gen::X64Reg tmp = CompileDataProcessingHelper_rsr(Rs_index, shift, Rm_index, false);
@ -1225,13 +1225,13 @@ void JitX64::SBC_rsr(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, ArmReg
} }
current.arm_pc += GetInstSize(); current.arm_pc += GetInstSize();
if (Rd_index == 15) { if (Rd_index == ArmReg::PC) {
CompileReturnToDispatch(); CompileReturnToDispatch();
} }
} }
void JitX64::SUB_imm(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, int rotate, ArmImm8 imm8) { void JitX64::SUB_imm(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, int rotate, ArmImm8 imm8) {
cond_manager.CompileCond((ConditionCode)cond); cond_manager.CompileCond(cond);
u32 immediate = rotr(imm8, rotate * 2); u32 immediate = rotr(imm8, rotate * 2);
@ -1245,13 +1245,13 @@ void JitX64::SUB_imm(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, int ro
} }
current.arm_pc += GetInstSize(); current.arm_pc += GetInstSize();
if (Rd_index == 15) { if (Rd_index == ArmReg::PC) {
CompileReturnToDispatch(); CompileReturnToDispatch();
} }
} }
void JitX64::SUB_reg(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, ArmImm5 imm5, ShiftType shift, ArmReg Rm_index) { void JitX64::SUB_reg(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, ArmImm5 imm5, ShiftType shift, ArmReg Rm_index) {
cond_manager.CompileCond((ConditionCode)cond); cond_manager.CompileCond(cond);
Gen::X64Reg tmp = CompileDataProcessingHelper_reg(imm5, shift, Rm_index, false); Gen::X64Reg tmp = CompileDataProcessingHelper_reg(imm5, shift, Rm_index, false);
@ -1267,13 +1267,13 @@ void JitX64::SUB_reg(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, ArmImm
} }
current.arm_pc += GetInstSize(); current.arm_pc += GetInstSize();
if (Rd_index == 15) { if (Rd_index == ArmReg::PC) {
CompileReturnToDispatch(); CompileReturnToDispatch();
} }
} }
void JitX64::SUB_rsr(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, ArmReg Rs_index, ShiftType shift, ArmReg Rm_index) { void JitX64::SUB_rsr(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, ArmReg Rs_index, ShiftType shift, ArmReg Rm_index) {
cond_manager.CompileCond((ConditionCode)cond); cond_manager.CompileCond(cond);
Gen::X64Reg tmp = CompileDataProcessingHelper_rsr(Rs_index, shift, Rm_index, false); Gen::X64Reg tmp = CompileDataProcessingHelper_rsr(Rs_index, shift, Rm_index, false);
@ -1289,20 +1289,20 @@ void JitX64::SUB_rsr(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, ArmReg
} }
current.arm_pc += GetInstSize(); current.arm_pc += GetInstSize();
if (Rd_index == 15) { if (Rd_index == ArmReg::PC) {
CompileReturnToDispatch(); CompileReturnToDispatch();
} }
} }
void JitX64::TEQ_imm(Cond cond, ArmReg Rn_index, int rotate, ArmImm8 imm8) { void JitX64::TEQ_imm(Cond cond, ArmReg Rn_index, int rotate, ArmImm8 imm8) {
cond_manager.CompileCond((ConditionCode)cond); cond_manager.CompileCond(cond);
u32 immediate = rotr(imm8, rotate * 2); u32 immediate = rotr(imm8, rotate * 2);
X64Reg Rn_tmp = reg_alloc.AllocTemp(); X64Reg Rn_tmp = reg_alloc.AllocTemp();
if (Rn_index == 15) { if (Rn_index == ArmReg::PC) {
code->MOV(32, R(Rn_tmp), Imm32(GetReg15Value())); code->MOV(32, R(Rn_tmp), Imm32(GetReg15Value()));
} else { } else {
Gen::OpArg Rn_real = reg_alloc.LockArmForRead(Rn_index); Gen::OpArg Rn_real = reg_alloc.LockArmForRead(Rn_index);
@ -1323,11 +1323,11 @@ void JitX64::TEQ_imm(Cond cond, ArmReg Rn_index, int rotate, ArmImm8 imm8) {
} }
void JitX64::TEQ_reg(Cond cond, ArmReg Rn_index, ArmImm5 imm5, ShiftType shift, ArmReg Rm_index) { void JitX64::TEQ_reg(Cond cond, ArmReg Rn_index, ArmImm5 imm5, ShiftType shift, ArmReg Rm_index) {
cond_manager.CompileCond((ConditionCode)cond); cond_manager.CompileCond(cond);
Gen::X64Reg tmp = CompileDataProcessingHelper_reg(imm5, shift, Rm_index, true); Gen::X64Reg tmp = CompileDataProcessingHelper_reg(imm5, shift, Rm_index, true);
if (Rn_index == 15) { if (Rn_index == ArmReg::PC) {
code->XOR(32, R(tmp), Imm32(GetReg15Value())); code->XOR(32, R(tmp), Imm32(GetReg15Value()));
} else { } else {
Gen::OpArg Rn = reg_alloc.LockArmForRead(Rn_index); Gen::OpArg Rn = reg_alloc.LockArmForRead(Rn_index);
@ -1343,11 +1343,11 @@ void JitX64::TEQ_reg(Cond cond, ArmReg Rn_index, ArmImm5 imm5, ShiftType shift,
} }
void JitX64::TEQ_rsr(Cond cond, ArmReg Rn_index, ArmReg Rs_index, ShiftType shift, ArmReg Rm_index) { void JitX64::TEQ_rsr(Cond cond, ArmReg Rn_index, ArmReg Rs_index, ShiftType shift, ArmReg Rm_index) {
cond_manager.CompileCond((ConditionCode)cond); cond_manager.CompileCond(cond);
Gen::X64Reg tmp = CompileDataProcessingHelper_rsr(Rs_index, shift, Rm_index, true); Gen::X64Reg tmp = CompileDataProcessingHelper_rsr(Rs_index, shift, Rm_index, true);
if (Rn_index == 15) { if (Rn_index == ArmReg::PC) {
code->XOR(32, R(tmp), Imm32(GetReg15Value())); code->XOR(32, R(tmp), Imm32(GetReg15Value()));
} else { } else {
Gen::OpArg Rn = reg_alloc.LockArmForRead(Rn_index); Gen::OpArg Rn = reg_alloc.LockArmForRead(Rn_index);
@ -1363,13 +1363,13 @@ void JitX64::TEQ_rsr(Cond cond, ArmReg Rn_index, ArmReg Rs_index, ShiftType shif
} }
void JitX64::TST_imm(Cond cond, ArmReg Rn_index, int rotate, ArmImm8 imm8) { void JitX64::TST_imm(Cond cond, ArmReg Rn_index, int rotate, ArmImm8 imm8) {
cond_manager.CompileCond((ConditionCode)cond); cond_manager.CompileCond(cond);
u32 immediate = rotr(imm8, rotate * 2); u32 immediate = rotr(imm8, rotate * 2);
X64Reg Rn; X64Reg Rn;
if (Rn_index == 15) { if (Rn_index == ArmReg::PC) {
Rn = reg_alloc.AllocTemp(); Rn = reg_alloc.AllocTemp();
code->MOV(32, R(Rn), Imm32(GetReg15Value())); code->MOV(32, R(Rn), Imm32(GetReg15Value()));
} else { } else {
@ -1378,7 +1378,7 @@ void JitX64::TST_imm(Cond cond, ArmReg Rn_index, int rotate, ArmImm8 imm8) {
code->TEST(32, R(Rn), Imm32(immediate)); code->TEST(32, R(Rn), Imm32(immediate));
if (Rn_index == 15) { if (Rn_index == ArmReg::PC) {
reg_alloc.UnlockTemp(Rn); reg_alloc.UnlockTemp(Rn);
} else { } else {
reg_alloc.UnlockArm(Rn_index); reg_alloc.UnlockArm(Rn_index);
@ -1393,11 +1393,11 @@ void JitX64::TST_imm(Cond cond, ArmReg Rn_index, int rotate, ArmImm8 imm8) {
} }
void JitX64::TST_reg(Cond cond, ArmReg Rn_index, ArmImm5 imm5, ShiftType shift, ArmReg Rm_index) { void JitX64::TST_reg(Cond cond, ArmReg Rn_index, ArmImm5 imm5, ShiftType shift, ArmReg Rm_index) {
cond_manager.CompileCond((ConditionCode)cond); cond_manager.CompileCond(cond);
Gen::X64Reg tmp = CompileDataProcessingHelper_reg(imm5, shift, Rm_index, true); Gen::X64Reg tmp = CompileDataProcessingHelper_reg(imm5, shift, Rm_index, true);
if (Rn_index == 15) { if (Rn_index == ArmReg::PC) {
code->TEST(32, R(tmp), Imm32(GetReg15Value())); code->TEST(32, R(tmp), Imm32(GetReg15Value()));
} else { } else {
OpArg Rn = reg_alloc.LockArmForRead(Rn_index); OpArg Rn = reg_alloc.LockArmForRead(Rn_index);
@ -1413,11 +1413,11 @@ void JitX64::TST_reg(Cond cond, ArmReg Rn_index, ArmImm5 imm5, ShiftType shift,
} }
void JitX64::TST_rsr(Cond cond, ArmReg Rn_index, ArmReg Rs_index, ShiftType shift, ArmReg Rm_index) { void JitX64::TST_rsr(Cond cond, ArmReg Rn_index, ArmReg Rs_index, ShiftType shift, ArmReg Rm_index) {
cond_manager.CompileCond((ConditionCode)cond); cond_manager.CompileCond(cond);
Gen::X64Reg tmp = CompileDataProcessingHelper_rsr(Rs_index, shift, Rm_index, true); Gen::X64Reg tmp = CompileDataProcessingHelper_rsr(Rs_index, shift, Rm_index, true);
if (Rn_index == 15) { if (Rn_index == ArmReg::PC) {
code->TEST(32, R(tmp), Imm32(GetReg15Value())); code->TEST(32, R(tmp), Imm32(GetReg15Value()));
} else { } else {
OpArg Rn = reg_alloc.LockArmForRead(Rn_index); OpArg Rn = reg_alloc.LockArmForRead(Rn_index);

View File

@ -18,7 +18,7 @@ static void Breakpoint(u32 imm) {
} }
void JitX64::BKPT(Cond cond, ArmImm12 imm12, ArmImm4 imm4) { void JitX64::BKPT(Cond cond, ArmImm12 imm12, ArmImm4 imm4) {
cond_manager.CompileCond((ConditionCode) cond); cond_manager.CompileCond(cond);
ASSERT_MSG(false, "BKPT instruction @ pc=0x%08X", current.arm_pc); ASSERT_MSG(false, "BKPT instruction @ pc=0x%08X", current.arm_pc);
@ -38,7 +38,7 @@ static void ServiceCall(u64 imm) {
} }
void JitX64::SVC(Cond cond, ArmImm24 imm24) { void JitX64::SVC(Cond cond, ArmImm24 imm24) {
cond_manager.CompileCond((ConditionCode)cond); cond_manager.CompileCond(cond);
// Flush and write out absolutely everything. // Flush and write out absolutely everything.
code->MOV(32, MJitStateArmPC(), Imm32(current.arm_pc)); code->MOV(32, MJitStateArmPC(), Imm32(current.arm_pc));

View File

@ -19,7 +19,7 @@ using namespace Gen;
void JitX64::LoadAndStoreWordOrUnsignedByte_Immediate_Helper(X64Reg dest, bool U, ArmReg Rn_index, ArmImm12 imm12) { void JitX64::LoadAndStoreWordOrUnsignedByte_Immediate_Helper(X64Reg dest, bool U, ArmReg Rn_index, ArmImm12 imm12) {
// address = Rn +/- imm12 // address = Rn +/- imm12
if (Rn_index == 15) { if (Rn_index == ArmReg::PC) {
u32 address; u32 address;
if (U) { if (U) {
address = GetReg15Value_WordAligned() + imm12; address = GetReg15Value_WordAligned() + imm12;
@ -37,7 +37,7 @@ void JitX64::LoadAndStoreWordOrUnsignedByte_Immediate_Helper(X64Reg dest, bool U
} }
void JitX64::LoadAndStoreWordOrUnsignedByte_ImmediateOffset(X64Reg dest, bool U, ArmReg Rn_index, ArmImm12 imm12) { void JitX64::LoadAndStoreWordOrUnsignedByte_ImmediateOffset(X64Reg dest, bool U, ArmReg Rn_index, ArmImm12 imm12) {
if (Rn_index != 15) { if (Rn_index != ArmReg::PC) {
OpArg Rn = reg_alloc.LockArmForRead(Rn_index); OpArg Rn = reg_alloc.LockArmForRead(Rn_index);
code->MOV(32, R(dest), Rn); code->MOV(32, R(dest), Rn);
reg_alloc.UnlockArm(Rn_index); reg_alloc.UnlockArm(Rn_index);
@ -47,7 +47,7 @@ void JitX64::LoadAndStoreWordOrUnsignedByte_ImmediateOffset(X64Reg dest, bool U,
} }
void JitX64::LoadAndStoreWordOrUnsignedByte_ImmediatePreIndexed(X64Reg dest, bool U, ArmReg Rn_index, ArmImm12 imm12) { void JitX64::LoadAndStoreWordOrUnsignedByte_ImmediatePreIndexed(X64Reg dest, bool U, ArmReg Rn_index, ArmImm12 imm12) {
ASSERT_MSG(Rn_index != 15, "UNPREDICTABLE"); ASSERT_MSG(Rn_index != ArmReg::PC, "UNPREDICTABLE");
X64Reg Rn = reg_alloc.BindArmForReadWrite(Rn_index); X64Reg Rn = reg_alloc.BindArmForReadWrite(Rn_index);
@ -58,7 +58,7 @@ void JitX64::LoadAndStoreWordOrUnsignedByte_ImmediatePreIndexed(X64Reg dest, boo
} }
void JitX64::LoadAndStoreWordOrUnsignedByte_ImmediatePostIndexed(X64Reg dest, bool U, ArmReg Rn_index, ArmImm12 imm12) { void JitX64::LoadAndStoreWordOrUnsignedByte_ImmediatePostIndexed(X64Reg dest, bool U, ArmReg Rn_index, ArmImm12 imm12) {
ASSERT_MSG(Rn_index != 15, "UNPREDICTABLE"); ASSERT_MSG(Rn_index != ArmReg::PC, "UNPREDICTABLE");
X64Reg Rn = reg_alloc.BindArmForReadWrite(Rn_index); X64Reg Rn = reg_alloc.BindArmForReadWrite(Rn_index);
code->MOV(32, R(dest), R(Rn)); code->MOV(32, R(dest), R(Rn));
@ -72,7 +72,7 @@ void JitX64::LoadAndStoreWordOrUnsignedByte_ImmediatePostIndexed(X64Reg dest, bo
void JitX64::LoadAndStoreWordOrUnsignedByte_Register_Helper(X64Reg dest, bool U, ArmReg Rn_index, ArmReg Rm_index) { void JitX64::LoadAndStoreWordOrUnsignedByte_Register_Helper(X64Reg dest, bool U, ArmReg Rn_index, ArmReg Rm_index) {
// address = Rn +/- Rm // address = Rn +/- Rm
ASSERT_MSG(Rm_index != 15, "UNPREDICTABLE"); ASSERT_MSG(Rm_index != ArmReg::PC, "UNPREDICTABLE");
if (Rm_index == Rn_index) { if (Rm_index == Rn_index) {
if (U) { if (U) {
@ -100,7 +100,7 @@ void JitX64::LoadAndStoreWordOrUnsignedByte_Register_Helper(X64Reg dest, bool U,
/// This function assumes that the value of Rn is already in dest. /// This function assumes that the value of Rn is already in dest.
void JitX64::LoadAndStoreWordOrUnsignedByte_ScaledRegister_Helper(X64Reg dest, bool U, ArmReg Rn_index, ArmImm5 imm5, ShiftType shift, ArmReg Rm_index) { void JitX64::LoadAndStoreWordOrUnsignedByte_ScaledRegister_Helper(X64Reg dest, bool U, ArmReg Rn_index, ArmImm5 imm5, ShiftType shift, ArmReg Rm_index) {
if (imm5 == 0 && shift == 0) { if (imm5 == 0 && shift == ShiftType::LSL) {
LoadAndStoreWordOrUnsignedByte_Register_Helper(dest, U, Rn_index, Rm_index); LoadAndStoreWordOrUnsignedByte_Register_Helper(dest, U, Rn_index, Rm_index);
return; return;
} }
@ -108,7 +108,7 @@ void JitX64::LoadAndStoreWordOrUnsignedByte_ScaledRegister_Helper(X64Reg dest, b
// index = Rm LSL imm5 / Rm LSR imm5 / Rm ASR imm5 / Rm ROR imm5 / Rm RRX // index = Rm LSL imm5 / Rm LSR imm5 / Rm ASR imm5 / Rm ROR imm5 / Rm RRX
// address = Rn +/- index // address = Rn +/- index
ASSERT_MSG(Rm_index != 15, "UNPREDICTABLE"); ASSERT_MSG(Rm_index != ArmReg::PC, "UNPREDICTABLE");
// TODO: Optimizations when Rn_index == Rm_index maybe. // TODO: Optimizations when Rn_index == Rm_index maybe.
@ -135,7 +135,7 @@ void JitX64::LoadAndStoreWordOrUnsignedByte_ScaledRegister_Helper(X64Reg dest, b
} }
void JitX64::LoadAndStoreWordOrUnsignedByte_ScaledRegisterOffset(X64Reg dest, bool U, ArmReg Rn_index, ArmImm5 imm5, ShiftType shift, ArmReg Rm_index) { void JitX64::LoadAndStoreWordOrUnsignedByte_ScaledRegisterOffset(X64Reg dest, bool U, ArmReg Rn_index, ArmImm5 imm5, ShiftType shift, ArmReg Rm_index) {
if (Rn_index != 15) { if (Rn_index != ArmReg::PC) {
OpArg Rn = reg_alloc.LockArmForRead(Rn_index); OpArg Rn = reg_alloc.LockArmForRead(Rn_index);
code->MOV(32, R(dest), Rn); code->MOV(32, R(dest), Rn);
reg_alloc.UnlockArm(Rn_index); reg_alloc.UnlockArm(Rn_index);
@ -147,7 +147,7 @@ void JitX64::LoadAndStoreWordOrUnsignedByte_ScaledRegisterOffset(X64Reg dest, bo
} }
void JitX64::LoadAndStoreWordOrUnsignedByte_ScaledRegisterPreIndexed(X64Reg dest, bool U, ArmReg Rn_index, ArmImm5 imm5, ShiftType shift, ArmReg Rm_index) { void JitX64::LoadAndStoreWordOrUnsignedByte_ScaledRegisterPreIndexed(X64Reg dest, bool U, ArmReg Rn_index, ArmImm5 imm5, ShiftType shift, ArmReg Rm_index) {
ASSERT_MSG(Rn_index != 15, "UNPREDICTABLE"); ASSERT_MSG(Rn_index != ArmReg::PC, "UNPREDICTABLE");
X64Reg Rn = reg_alloc.BindArmForReadWrite(Rn_index); X64Reg Rn = reg_alloc.BindArmForReadWrite(Rn_index);
@ -158,7 +158,7 @@ void JitX64::LoadAndStoreWordOrUnsignedByte_ScaledRegisterPreIndexed(X64Reg dest
} }
void JitX64::LoadAndStoreWordOrUnsignedByte_ScaledRegisterPostIndexed(X64Reg dest, bool U, ArmReg Rn_index, ArmImm5 imm5, ShiftType shift, ArmReg Rm_index) { void JitX64::LoadAndStoreWordOrUnsignedByte_ScaledRegisterPostIndexed(X64Reg dest, bool U, ArmReg Rn_index, ArmImm5 imm5, ShiftType shift, ArmReg Rm_index) {
ASSERT_MSG(Rn_index != 15, "UNPREDICTABLE"); ASSERT_MSG(Rn_index != ArmReg::PC, "UNPREDICTABLE");
X64Reg Rn = reg_alloc.BindArmForReadWrite(Rn_index); X64Reg Rn = reg_alloc.BindArmForReadWrite(Rn_index);
code->MOV(32, R(dest), R(Rn)); code->MOV(32, R(dest), R(Rn));
@ -168,14 +168,14 @@ void JitX64::LoadAndStoreWordOrUnsignedByte_ScaledRegisterPostIndexed(X64Reg des
reg_alloc.UnlockArm(Rn_index); reg_alloc.UnlockArm(Rn_index);
} }
static void GetValueOfRegister(XEmitter* code, RegAlloc& reg_alloc, u32 r15_value, X64Reg x64_reg, ArmReg arm_reg) { static void GetValueOfRegister(XEmitter* code, RegAlloc& reg_alloc, u32 r15_value, X64Reg dest_x64_reg, ArmReg src_arm_reg) {
if (arm_reg != 15) { if (src_arm_reg != ArmReg::PC) {
OpArg Rd = reg_alloc.LockArmForRead(arm_reg); OpArg Rd = reg_alloc.LockArmForRead(src_arm_reg);
code->MOV(32, R(x64_reg), Rd); code->MOV(32, R(dest_x64_reg), Rd);
reg_alloc.UnlockArm(arm_reg); reg_alloc.UnlockArm(src_arm_reg);
} else { } else {
// The following is IMPLEMENTATION DEFINED // The following is IMPLEMENTATION DEFINED
code->MOV(32, R(x64_reg), Imm32(r15_value)); code->MOV(32, R(dest_x64_reg), Imm32(r15_value));
} }
} }
@ -207,7 +207,7 @@ static void LoadStoreCommon_AddrMode2(JitX64* jit, RegAlloc& reg_alloc, bool P,
// Load/Store Instructions: Addressing Mode 2 // Load/Store Instructions: Addressing Mode 2
void JitX64::LDR_imm(Cond cond, bool P, bool U, bool W, ArmReg Rn_index, ArmReg Rd_index, ArmImm12 imm12) { void JitX64::LDR_imm(Cond cond, bool P, bool U, bool W, ArmReg Rn_index, ArmReg Rd_index, ArmImm12 imm12) {
cond_manager.CompileCond((ConditionCode)cond); cond_manager.CompileCond(cond);
// Rd == R15 is UNPREDICTABLE only if address[1:0] is not 0b00 or if value loaded into R15[1:0] is 0b10. // Rd == R15 is UNPREDICTABLE only if address[1:0] is not 0b00 or if value loaded into R15[1:0] is 0b10.
if (W) if (W)
@ -232,7 +232,7 @@ void JitX64::LDR_imm(Cond cond, bool P, bool U, bool W, ArmReg Rn_index, ArmReg
reg_alloc.UnlockX64(ABI_RETURN); reg_alloc.UnlockX64(ABI_RETURN);
current.arm_pc += GetInstSize(); current.arm_pc += GetInstSize();
if (Rd_index == 15) { if (Rd_index == ArmReg::PC) {
code->AND(32, MJitStateArmPC(), Imm32(0xFFFFFFFE)); code->AND(32, MJitStateArmPC(), Imm32(0xFFFFFFFE));
code->BT(32, R(ABI_RETURN), Imm8(0)); code->BT(32, R(ABI_RETURN), Imm8(0));
code->SETcc(CC_C, MJitStateTFlag()); code->SETcc(CC_C, MJitStateTFlag());
@ -241,7 +241,7 @@ void JitX64::LDR_imm(Cond cond, bool P, bool U, bool W, ArmReg Rn_index, ArmReg
} }
void JitX64::LDR_reg(Cond cond, bool P, bool U, bool W, ArmReg Rn_index, ArmReg Rd_index, ArmImm5 imm5, ShiftType shift, ArmReg Rm_index) { void JitX64::LDR_reg(Cond cond, bool P, bool U, bool W, ArmReg Rn_index, ArmReg Rd_index, ArmImm5 imm5, ShiftType shift, ArmReg Rm_index) {
cond_manager.CompileCond((ConditionCode)cond); cond_manager.CompileCond(cond);
LoadStoreCommon_AddrMode2(this, reg_alloc, P, W, LoadStoreCommon_AddrMode2(this, reg_alloc, P, W,
&JitX64::LoadAndStoreWordOrUnsignedByte_ScaledRegisterOffset, &JitX64::LoadAndStoreWordOrUnsignedByte_ScaledRegisterOffset,
@ -262,7 +262,7 @@ void JitX64::LDR_reg(Cond cond, bool P, bool U, bool W, ArmReg Rn_index, ArmReg
reg_alloc.UnlockX64(ABI_RETURN); reg_alloc.UnlockX64(ABI_RETURN);
current.arm_pc += GetInstSize(); current.arm_pc += GetInstSize();
if (Rd_index == 15) { if (Rd_index == ArmReg::PC) {
code->AND(32, MJitStateArmPC(), Imm32(0xFFFFFFFE)); code->AND(32, MJitStateArmPC(), Imm32(0xFFFFFFFE));
code->BT(32, R(ABI_RETURN), Imm8(0)); code->BT(32, R(ABI_RETURN), Imm8(0));
code->SETcc(CC_C, MJitStateTFlag()); code->SETcc(CC_C, MJitStateTFlag());
@ -271,9 +271,9 @@ void JitX64::LDR_reg(Cond cond, bool P, bool U, bool W, ArmReg Rn_index, ArmReg
} }
void JitX64::LDRB_imm(Cond cond, bool P, bool U, bool W, ArmReg Rn_index, ArmReg Rd_index, ArmImm12 imm12) { void JitX64::LDRB_imm(Cond cond, bool P, bool U, bool W, ArmReg Rn_index, ArmReg Rd_index, ArmImm12 imm12) {
cond_manager.CompileCond((ConditionCode)cond); cond_manager.CompileCond(cond);
ASSERT_MSG(Rd_index != 15, "UNPREDICTABLE"); ASSERT_MSG(Rd_index != ArmReg::PC, "UNPREDICTABLE");
if (W) if (W)
ASSERT_MSG(Rn_index != Rd_index, "UNPREDICTABLE"); ASSERT_MSG(Rn_index != Rd_index, "UNPREDICTABLE");
@ -299,9 +299,9 @@ void JitX64::LDRB_imm(Cond cond, bool P, bool U, bool W, ArmReg Rn_index, ArmReg
} }
void JitX64::LDRB_reg(Cond cond, bool P, bool U, bool W, ArmReg Rn_index, ArmReg Rd_index, ArmImm5 imm5, ShiftType shift, ArmReg Rm_index) { void JitX64::LDRB_reg(Cond cond, bool P, bool U, bool W, ArmReg Rn_index, ArmReg Rd_index, ArmImm5 imm5, ShiftType shift, ArmReg Rm_index) {
cond_manager.CompileCond((ConditionCode)cond); cond_manager.CompileCond(cond);
ASSERT_MSG(Rd_index != 15, "UNPREDICTABLE"); ASSERT_MSG(Rd_index != ArmReg::PC, "UNPREDICTABLE");
if (W) if (W)
ASSERT_MSG(Rn_index != Rd_index, "UNPREDICTABLE"); ASSERT_MSG(Rn_index != Rd_index, "UNPREDICTABLE");
@ -327,7 +327,7 @@ void JitX64::LDRB_reg(Cond cond, bool P, bool U, bool W, ArmReg Rn_index, ArmReg
} }
void JitX64::STR_imm(Cond cond, bool P, bool U, bool W, ArmReg Rn_index, ArmReg Rd_index, ArmImm12 imm12) { void JitX64::STR_imm(Cond cond, bool P, bool U, bool W, ArmReg Rn_index, ArmReg Rd_index, ArmImm12 imm12) {
cond_manager.CompileCond((ConditionCode)cond); cond_manager.CompileCond(cond);
// Rd_index == R15 is IMPLEMENTATION DEFINED // Rd_index == R15 is IMPLEMENTATION DEFINED
if (W) if (W)
@ -353,7 +353,7 @@ void JitX64::STR_imm(Cond cond, bool P, bool U, bool W, ArmReg Rn_index, ArmReg
} }
void JitX64::STR_reg(Cond cond, bool P, bool U, bool W, ArmReg Rn_index, ArmReg Rd_index, ArmImm5 imm5, ShiftType shift, ArmReg Rm_index) { void JitX64::STR_reg(Cond cond, bool P, bool U, bool W, ArmReg Rn_index, ArmReg Rd_index, ArmImm5 imm5, ShiftType shift, ArmReg Rm_index) {
cond_manager.CompileCond((ConditionCode)cond); cond_manager.CompileCond(cond);
// Rd_index == R15 is IMPLEMENTATION DEFINED // Rd_index == R15 is IMPLEMENTATION DEFINED
if (W) if (W)
@ -379,9 +379,9 @@ void JitX64::STR_reg(Cond cond, bool P, bool U, bool W, ArmReg Rn_index, ArmReg
} }
void JitX64::STRB_imm(Cond cond, bool P, bool U, bool W, ArmReg Rn_index, ArmReg Rd_index, ArmImm12 imm12) { void JitX64::STRB_imm(Cond cond, bool P, bool U, bool W, ArmReg Rn_index, ArmReg Rd_index, ArmImm12 imm12) {
cond_manager.CompileCond((ConditionCode)cond); cond_manager.CompileCond(cond);
ASSERT_MSG(Rd_index != 15, "UNPREDICTABLE"); ASSERT_MSG(Rd_index != ArmReg::PC, "UNPREDICTABLE");
if (W) if (W)
ASSERT_MSG(Rn_index != Rd_index, "UNPREDICTABLE"); ASSERT_MSG(Rn_index != Rd_index, "UNPREDICTABLE");
@ -405,9 +405,9 @@ void JitX64::STRB_imm(Cond cond, bool P, bool U, bool W, ArmReg Rn_index, ArmReg
} }
void JitX64::STRB_reg(Cond cond, bool P, bool U, bool W, ArmReg Rn_index, ArmReg Rd_index, ArmImm5 imm5, ShiftType shift, ArmReg Rm_index) { void JitX64::STRB_reg(Cond cond, bool P, bool U, bool W, ArmReg Rn_index, ArmReg Rd_index, ArmImm5 imm5, ShiftType shift, ArmReg Rm_index) {
cond_manager.CompileCond((ConditionCode)cond); cond_manager.CompileCond(cond);
ASSERT_MSG(Rd_index != 15, "UNPREDICTABLE"); ASSERT_MSG(Rd_index != ArmReg::PC, "UNPREDICTABLE");
if (W) if (W)
ASSERT_MSG(Rn_index != Rd_index, "UNPREDICTABLE"); ASSERT_MSG(Rn_index != Rd_index, "UNPREDICTABLE");
@ -462,10 +462,10 @@ static ArmImm8 CombineImm8ab(ArmImm4 imm8a, ArmImm4 imm8b) {
// Load/Store Instructions: Addressing Mode 3 // Load/Store Instructions: Addressing Mode 3
void JitX64::LDRD_imm(Cond cond, bool P, bool U, bool W, ArmReg Rn_index, ArmReg Rd_index, ArmImm4 imm8a, ArmImm4 imm8b) { void JitX64::LDRD_imm(Cond cond, bool P, bool U, bool W, ArmReg Rn_index, ArmReg Rd_index, ArmImm4 imm8a, ArmImm4 imm8b) {
cond_manager.CompileCond((ConditionCode)cond); cond_manager.CompileCond(cond);
ASSERT_MSG(Rd_index < 14, "UNPREDICTABLE"); ASSERT_MSG(Rd_index < ArmReg::R14, "UNPREDICTABLE");
ASSERT_MSG(Rd_index % 2 == 0, "UNDEFINED"); ASSERT_MSG(IsEvenArmReg(Rd_index), "UNDEFINED");
if (W) if (W)
ASSERT_MSG(Rn_index != Rd_index && Rn_index != Rd_index + 1, "UNPREDICTABLE"); ASSERT_MSG(Rn_index != Rd_index && Rn_index != Rd_index + 1, "UNPREDICTABLE");
@ -494,10 +494,10 @@ void JitX64::LDRD_imm(Cond cond, bool P, bool U, bool W, ArmReg Rn_index, ArmReg
} }
void JitX64::LDRD_reg(Cond cond, bool P, bool U, bool W, ArmReg Rn_index, ArmReg Rd_index, ArmReg Rm_index) { void JitX64::LDRD_reg(Cond cond, bool P, bool U, bool W, ArmReg Rn_index, ArmReg Rd_index, ArmReg Rm_index) {
cond_manager.CompileCond((ConditionCode)cond); cond_manager.CompileCond(cond);
ASSERT_MSG(Rd_index < 14, "UNPREDICTABLE"); ASSERT_MSG(Rd_index < ArmReg::R14, "UNPREDICTABLE");
ASSERT_MSG(Rd_index % 2 == 0, "UNDEFINED"); ASSERT_MSG(IsEvenArmReg(Rd_index), "UNDEFINED");
ASSERT_MSG(Rm_index != Rd_index && Rm_index != Rd_index + 1, "UNPREDICTABLE"); ASSERT_MSG(Rm_index != Rd_index && Rm_index != Rd_index + 1, "UNPREDICTABLE");
if (W) if (W)
ASSERT_MSG(Rn_index != Rd_index && Rn_index != Rd_index + 1, "UNPREDICTABLE"); ASSERT_MSG(Rn_index != Rd_index && Rn_index != Rd_index + 1, "UNPREDICTABLE");
@ -506,7 +506,7 @@ void JitX64::LDRD_reg(Cond cond, bool P, bool U, bool W, ArmReg Rn_index, ArmReg
&JitX64::LoadAndStoreWordOrUnsignedByte_ScaledRegisterOffset, &JitX64::LoadAndStoreWordOrUnsignedByte_ScaledRegisterOffset,
&JitX64::LoadAndStoreWordOrUnsignedByte_ScaledRegisterPreIndexed, &JitX64::LoadAndStoreWordOrUnsignedByte_ScaledRegisterPreIndexed,
&JitX64::LoadAndStoreWordOrUnsignedByte_ScaledRegisterPostIndexed, &JitX64::LoadAndStoreWordOrUnsignedByte_ScaledRegisterPostIndexed,
U, Rn_index, 0, 0, Rm_index); U, Rn_index, 0, ShiftType::LSL, Rm_index);
CompileCallHost(reinterpret_cast<const void* const>(!current.EFlag ? &Load64LE : &Load64BE)); CompileCallHost(reinterpret_cast<const void* const>(!current.EFlag ? &Load64LE : &Load64BE));
@ -527,9 +527,9 @@ void JitX64::LDRD_reg(Cond cond, bool P, bool U, bool W, ArmReg Rn_index, ArmReg
} }
void JitX64::LDRH_imm(Cond cond, bool P, bool U, bool W, ArmReg Rn_index, ArmReg Rd_index, ArmImm4 imm8a, ArmImm4 imm8b) { void JitX64::LDRH_imm(Cond cond, bool P, bool U, bool W, ArmReg Rn_index, ArmReg Rd_index, ArmImm4 imm8a, ArmImm4 imm8b) {
cond_manager.CompileCond((ConditionCode)cond); cond_manager.CompileCond(cond);
ASSERT_MSG(Rd_index != 15, "UNPREDICTABLE"); ASSERT_MSG(Rd_index != ArmReg::PC, "UNPREDICTABLE");
if (W) if (W)
ASSERT_MSG(Rn_index != Rd_index, "UNPREDICTABLE"); ASSERT_MSG(Rn_index != Rd_index, "UNPREDICTABLE");
@ -554,9 +554,9 @@ void JitX64::LDRH_imm(Cond cond, bool P, bool U, bool W, ArmReg Rn_index, ArmReg
} }
void JitX64::LDRH_reg(Cond cond, bool P, bool U, bool W, ArmReg Rn_index, ArmReg Rd_index, ArmReg Rm_index) { void JitX64::LDRH_reg(Cond cond, bool P, bool U, bool W, ArmReg Rn_index, ArmReg Rd_index, ArmReg Rm_index) {
cond_manager.CompileCond((ConditionCode)cond); cond_manager.CompileCond(cond);
ASSERT_MSG(Rd_index != 15, "UNPREDICTABLE"); ASSERT_MSG(Rd_index != ArmReg::PC, "UNPREDICTABLE");
if (W) if (W)
ASSERT_MSG(Rn_index != Rd_index, "UNPREDICTABLE"); ASSERT_MSG(Rn_index != Rd_index, "UNPREDICTABLE");
@ -564,7 +564,7 @@ void JitX64::LDRH_reg(Cond cond, bool P, bool U, bool W, ArmReg Rn_index, ArmReg
&JitX64::LoadAndStoreWordOrUnsignedByte_ScaledRegisterOffset, &JitX64::LoadAndStoreWordOrUnsignedByte_ScaledRegisterOffset,
&JitX64::LoadAndStoreWordOrUnsignedByte_ScaledRegisterPreIndexed, &JitX64::LoadAndStoreWordOrUnsignedByte_ScaledRegisterPreIndexed,
&JitX64::LoadAndStoreWordOrUnsignedByte_ScaledRegisterPostIndexed, &JitX64::LoadAndStoreWordOrUnsignedByte_ScaledRegisterPostIndexed,
U, Rn_index, 0, 0, Rm_index); U, Rn_index, 0, ShiftType::LSL, Rm_index);
CompileCallHost(reinterpret_cast<const void* const>(!current.EFlag ? &Load16LE : &Load16BE)); CompileCallHost(reinterpret_cast<const void* const>(!current.EFlag ? &Load16LE : &Load16BE));
@ -581,9 +581,9 @@ void JitX64::LDRH_reg(Cond cond, bool P, bool U, bool W, ArmReg Rn_index, ArmReg
} }
void JitX64::LDRSB_imm(Cond cond, bool P, bool U, bool W, ArmReg Rn_index, ArmReg Rd_index, ArmImm4 imm8a, ArmImm4 imm8b) { void JitX64::LDRSB_imm(Cond cond, bool P, bool U, bool W, ArmReg Rn_index, ArmReg Rd_index, ArmImm4 imm8a, ArmImm4 imm8b) {
cond_manager.CompileCond((ConditionCode)cond); cond_manager.CompileCond(cond);
ASSERT_MSG(Rd_index != 15, "UNPREDICTABLE"); ASSERT_MSG(Rd_index != ArmReg::PC, "UNPREDICTABLE");
if (W) if (W)
ASSERT_MSG(Rn_index != Rd_index, "UNPREDICTABLE"); ASSERT_MSG(Rn_index != Rd_index, "UNPREDICTABLE");
@ -608,9 +608,9 @@ void JitX64::LDRSB_imm(Cond cond, bool P, bool U, bool W, ArmReg Rn_index, ArmRe
} }
void JitX64::LDRSB_reg(Cond cond, bool P, bool U, bool W, ArmReg Rn_index, ArmReg Rd_index, ArmReg Rm_index) { void JitX64::LDRSB_reg(Cond cond, bool P, bool U, bool W, ArmReg Rn_index, ArmReg Rd_index, ArmReg Rm_index) {
cond_manager.CompileCond((ConditionCode)cond); cond_manager.CompileCond(cond);
ASSERT_MSG(Rd_index != 15, "UNPREDICTABLE"); ASSERT_MSG(Rd_index != ArmReg::PC, "UNPREDICTABLE");
if (W) if (W)
ASSERT_MSG(Rn_index != Rd_index, "UNPREDICTABLE"); ASSERT_MSG(Rn_index != Rd_index, "UNPREDICTABLE");
@ -618,7 +618,7 @@ void JitX64::LDRSB_reg(Cond cond, bool P, bool U, bool W, ArmReg Rn_index, ArmRe
&JitX64::LoadAndStoreWordOrUnsignedByte_ScaledRegisterOffset, &JitX64::LoadAndStoreWordOrUnsignedByte_ScaledRegisterOffset,
&JitX64::LoadAndStoreWordOrUnsignedByte_ScaledRegisterPreIndexed, &JitX64::LoadAndStoreWordOrUnsignedByte_ScaledRegisterPreIndexed,
&JitX64::LoadAndStoreWordOrUnsignedByte_ScaledRegisterPostIndexed, &JitX64::LoadAndStoreWordOrUnsignedByte_ScaledRegisterPostIndexed,
U, Rn_index, 0, 0, Rm_index); U, Rn_index, 0, ShiftType::LSL, Rm_index);
CompileCallHost(reinterpret_cast<const void* const>(&Load8)); CompileCallHost(reinterpret_cast<const void* const>(&Load8));
@ -635,9 +635,9 @@ void JitX64::LDRSB_reg(Cond cond, bool P, bool U, bool W, ArmReg Rn_index, ArmRe
} }
void JitX64::LDRSH_imm(Cond cond, bool P, bool U, bool W, ArmReg Rn_index, ArmReg Rd_index, ArmImm4 imm8a, ArmImm4 imm8b) { void JitX64::LDRSH_imm(Cond cond, bool P, bool U, bool W, ArmReg Rn_index, ArmReg Rd_index, ArmImm4 imm8a, ArmImm4 imm8b) {
cond_manager.CompileCond((ConditionCode)cond); cond_manager.CompileCond(cond);
ASSERT_MSG(Rd_index != 15, "UNPREDICTABLE"); ASSERT_MSG(Rd_index != ArmReg::PC, "UNPREDICTABLE");
if (W) if (W)
ASSERT_MSG(Rn_index != Rd_index, "UNPREDICTABLE"); ASSERT_MSG(Rn_index != Rd_index, "UNPREDICTABLE");
@ -662,9 +662,9 @@ void JitX64::LDRSH_imm(Cond cond, bool P, bool U, bool W, ArmReg Rn_index, ArmRe
} }
void JitX64::LDRSH_reg(Cond cond, bool P, bool U, bool W, ArmReg Rn_index, ArmReg Rd_index, ArmReg Rm_index) { void JitX64::LDRSH_reg(Cond cond, bool P, bool U, bool W, ArmReg Rn_index, ArmReg Rd_index, ArmReg Rm_index) {
cond_manager.CompileCond((ConditionCode)cond); cond_manager.CompileCond(cond);
ASSERT_MSG(Rd_index != 15, "UNPREDICTABLE"); ASSERT_MSG(Rd_index != ArmReg::PC, "UNPREDICTABLE");
if (W) if (W)
ASSERT_MSG(Rn_index != Rd_index, "UNPREDICTABLE"); ASSERT_MSG(Rn_index != Rd_index, "UNPREDICTABLE");
@ -672,7 +672,7 @@ void JitX64::LDRSH_reg(Cond cond, bool P, bool U, bool W, ArmReg Rn_index, ArmRe
&JitX64::LoadAndStoreWordOrUnsignedByte_ScaledRegisterOffset, &JitX64::LoadAndStoreWordOrUnsignedByte_ScaledRegisterOffset,
&JitX64::LoadAndStoreWordOrUnsignedByte_ScaledRegisterPreIndexed, &JitX64::LoadAndStoreWordOrUnsignedByte_ScaledRegisterPreIndexed,
&JitX64::LoadAndStoreWordOrUnsignedByte_ScaledRegisterPostIndexed, &JitX64::LoadAndStoreWordOrUnsignedByte_ScaledRegisterPostIndexed,
U, Rn_index, 0, 0, Rm_index); U, Rn_index, 0, ShiftType::LSL, Rm_index);
CompileCallHost(reinterpret_cast<const void* const>(!current.EFlag ? &Load16LE : &Load16BE)); CompileCallHost(reinterpret_cast<const void* const>(!current.EFlag ? &Load16LE : &Load16BE));
@ -689,10 +689,10 @@ void JitX64::LDRSH_reg(Cond cond, bool P, bool U, bool W, ArmReg Rn_index, ArmRe
} }
void JitX64::STRD_imm(Cond cond, bool P, bool U, bool W, ArmReg Rn_index, ArmReg Rd_index, ArmImm4 imm8a, ArmImm4 imm8b) { void JitX64::STRD_imm(Cond cond, bool P, bool U, bool W, ArmReg Rn_index, ArmReg Rd_index, ArmImm4 imm8a, ArmImm4 imm8b) {
cond_manager.CompileCond((ConditionCode)cond); cond_manager.CompileCond(cond);
ASSERT_MSG(Rd_index < 14, "UNPREDICTABLE"); ASSERT_MSG(Rd_index < ArmReg::R14, "UNPREDICTABLE");
ASSERT_MSG(Rd_index % 2 == 0, "UNDEFINED"); ASSERT_MSG(IsEvenArmReg(Rd_index), "UNDEFINED");
LoadStoreCommon_AddrMode3(this, reg_alloc, P, W, LoadStoreCommon_AddrMode3(this, reg_alloc, P, W,
&JitX64::LoadAndStoreWordOrUnsignedByte_ImmediateOffset, &JitX64::LoadAndStoreWordOrUnsignedByte_ImmediateOffset,
@ -718,10 +718,10 @@ void JitX64::STRD_imm(Cond cond, bool P, bool U, bool W, ArmReg Rn_index, ArmReg
} }
void JitX64::STRD_reg(Cond cond, bool P, bool U, bool W, ArmReg Rn_index, ArmReg Rd_index, ArmReg Rm_index) { void JitX64::STRD_reg(Cond cond, bool P, bool U, bool W, ArmReg Rn_index, ArmReg Rd_index, ArmReg Rm_index) {
cond_manager.CompileCond((ConditionCode)cond); cond_manager.CompileCond(cond);
ASSERT_MSG(Rd_index < 14, "UNPREDICTABLE"); ASSERT_MSG(Rd_index < ArmReg::R14, "UNPREDICTABLE");
ASSERT_MSG(Rd_index % 2 == 0, "UNDEFINED"); ASSERT_MSG(IsEvenArmReg(Rd_index), "UNDEFINED");
if (W) if (W)
ASSERT_MSG(Rn_index != Rd_index && Rn_index != Rd_index + 1, "UNPREDICTABLE"); ASSERT_MSG(Rn_index != Rd_index && Rn_index != Rd_index + 1, "UNPREDICTABLE");
@ -729,7 +729,7 @@ void JitX64::STRD_reg(Cond cond, bool P, bool U, bool W, ArmReg Rn_index, ArmReg
&JitX64::LoadAndStoreWordOrUnsignedByte_ScaledRegisterOffset, &JitX64::LoadAndStoreWordOrUnsignedByte_ScaledRegisterOffset,
&JitX64::LoadAndStoreWordOrUnsignedByte_ScaledRegisterPreIndexed, &JitX64::LoadAndStoreWordOrUnsignedByte_ScaledRegisterPreIndexed,
&JitX64::LoadAndStoreWordOrUnsignedByte_ScaledRegisterPostIndexed, &JitX64::LoadAndStoreWordOrUnsignedByte_ScaledRegisterPostIndexed,
U, Rn_index, 0, 0, Rm_index); U, Rn_index, 0, ShiftType::LSL, Rm_index);
reg_alloc.FlushX64(ABI_PARAM2); reg_alloc.FlushX64(ABI_PARAM2);
reg_alloc.LockX64(ABI_PARAM2); reg_alloc.LockX64(ABI_PARAM2);
@ -749,9 +749,9 @@ void JitX64::STRD_reg(Cond cond, bool P, bool U, bool W, ArmReg Rn_index, ArmReg
} }
void JitX64::STRH_imm(Cond cond, bool P, bool U, bool W, ArmReg Rn_index, ArmReg Rd_index, ArmImm4 imm8a, ArmImm4 imm8b) { void JitX64::STRH_imm(Cond cond, bool P, bool U, bool W, ArmReg Rn_index, ArmReg Rd_index, ArmImm4 imm8a, ArmImm4 imm8b) {
cond_manager.CompileCond((ConditionCode)cond); cond_manager.CompileCond(cond);
ASSERT_MSG(Rd_index != 15, "UNPREDICTABLE"); ASSERT_MSG(Rd_index != ArmReg::PC, "UNPREDICTABLE");
if (W) if (W)
ASSERT_MSG(Rd_index != Rn_index, "UNPREDICTABLE"); ASSERT_MSG(Rd_index != Rn_index, "UNPREDICTABLE");
@ -775,9 +775,9 @@ void JitX64::STRH_imm(Cond cond, bool P, bool U, bool W, ArmReg Rn_index, ArmReg
} }
void JitX64::STRH_reg(Cond cond, bool P, bool U, bool W, ArmReg Rn_index, ArmReg Rd_index, ArmReg Rm_index) { void JitX64::STRH_reg(Cond cond, bool P, bool U, bool W, ArmReg Rn_index, ArmReg Rd_index, ArmReg Rm_index) {
cond_manager.CompileCond((ConditionCode)cond); cond_manager.CompileCond(cond);
ASSERT_MSG(Rd_index != 15, "UNPREDICTABLE"); ASSERT_MSG(Rd_index != ArmReg::PC, "UNPREDICTABLE");
if (W) if (W)
ASSERT_MSG(Rd_index != Rn_index, "UNPREDICTABLE"); ASSERT_MSG(Rd_index != Rn_index, "UNPREDICTABLE");
@ -785,7 +785,7 @@ void JitX64::STRH_reg(Cond cond, bool P, bool U, bool W, ArmReg Rn_index, ArmReg
&JitX64::LoadAndStoreWordOrUnsignedByte_ScaledRegisterOffset, &JitX64::LoadAndStoreWordOrUnsignedByte_ScaledRegisterOffset,
&JitX64::LoadAndStoreWordOrUnsignedByte_ScaledRegisterPreIndexed, &JitX64::LoadAndStoreWordOrUnsignedByte_ScaledRegisterPreIndexed,
&JitX64::LoadAndStoreWordOrUnsignedByte_ScaledRegisterPostIndexed, &JitX64::LoadAndStoreWordOrUnsignedByte_ScaledRegisterPostIndexed,
U, Rn_index, 0, 0, Rm_index); U, Rn_index, 0, ShiftType::LSL, Rm_index);
reg_alloc.FlushX64(ABI_PARAM2); reg_alloc.FlushX64(ABI_PARAM2);
reg_alloc.LockX64(ABI_PARAM2); reg_alloc.LockX64(ABI_PARAM2);
@ -873,7 +873,7 @@ static void LoadAndStoreMultiple_DecrementAfter(XEmitter* code, RegAlloc& reg_al
} }
static void LoadAndStoreMultiple_DecrementBefore(XEmitter* code, RegAlloc& reg_alloc, bool W, ArmReg Rn_index, ArmRegList list, std::function<void()> call) { static void LoadAndStoreMultiple_DecrementBefore(XEmitter* code, RegAlloc& reg_alloc, bool W, ArmReg Rn_index, ArmRegList list, std::function<void()> call) {
if (W && !(list & (1 << Rn_index))) { if (W && !(list & MakeRegList(Rn_index))) {
X64Reg Rn = reg_alloc.BindArmForReadWrite(Rn_index); X64Reg Rn = reg_alloc.BindArmForReadWrite(Rn_index);
code->SUB(32, R(Rn), Imm32(4 * Common::CountSetBits(list))); code->SUB(32, R(Rn), Imm32(4 * Common::CountSetBits(list)));
code->MOV(32, R(ABI_PARAM1), R(Rn)); code->MOV(32, R(ABI_PARAM1), R(Rn));
@ -909,7 +909,7 @@ static void LoadAndStoreMultiple_Helper(XEmitter* code, RegAlloc& reg_alloc, boo
for (int i = 0; i < 15; i++) { for (int i = 0; i < 15; i++) {
if (list & (1 << i)) { if (list & (1 << i)) {
reg_alloc.FlushArm(i); reg_alloc.FlushArm(static_cast<ArmReg>(i));
} }
} }
@ -954,11 +954,11 @@ static void ExecuteLDMBE(u32 start_address, u16 reg_list, JitState* jit_state) {
} }
void JitX64::LDM(Cond cond, bool P, bool U, bool W, ArmReg Rn_index, ArmRegList list) { void JitX64::LDM(Cond cond, bool P, bool U, bool W, ArmReg Rn_index, ArmRegList list) {
cond_manager.CompileCond((ConditionCode)cond); cond_manager.CompileCond(cond);
ASSERT_MSG(Rn_index != 15, "UNPREDICTABLE"); ASSERT_MSG(Rn_index != ArmReg::PC, "UNPREDICTABLE");
ASSERT_MSG(list != 0, "UNPREDICTABLE"); ASSERT_MSG(list != 0, "UNPREDICTABLE");
if (W && (list & (1 << Rn_index))) if (W && (list & MakeRegList(Rn_index)))
ASSERT_MSG(false, "UNPREDICTABLE"); ASSERT_MSG(false, "UNPREDICTABLE");
// TODO: Optimize // TODO: Optimize
@ -1012,12 +1012,12 @@ static void ExecuteSTMBE(u32 start_address, u16 reg_list, JitState* jit_state) {
} }
void JitX64::STM(Cond cond, bool P, bool U, bool W, ArmReg Rn_index, ArmRegList list) { void JitX64::STM(Cond cond, bool P, bool U, bool W, ArmReg Rn_index, ArmRegList list) {
cond_manager.CompileCond((ConditionCode)cond); cond_manager.CompileCond(cond);
ASSERT_MSG(Rn_index != 15, "UNPREDICTABLE"); ASSERT_MSG(Rn_index != ArmReg::PC, "UNPREDICTABLE");
ASSERT_MSG(list != 0, "UNPREDICTABLE"); ASSERT_MSG(list != 0, "UNPREDICTABLE");
if (W && (list & (1 << Rn_index))) if (W && (list & MakeRegList(Rn_index)))
ASSERT_MSG((list & ((1 << Rn_index) - 1)) == 0, "UNPREDICTABLE"); ASSERT_MSG((list & (MakeRegList(Rn_index) - 1)) == 0, "UNPREDICTABLE");
// TODO: Optimize // TODO: Optimize

View File

@ -24,7 +24,7 @@ void JitX64::CLREX() {
} }
void ExclusiveLoadCommon(XEmitter* code, RegAlloc& reg_alloc, OpArg exclusive_state, OpArg exclusive_tag, ArmReg Rn_index, ArmReg Rd_index) { void ExclusiveLoadCommon(XEmitter* code, RegAlloc& reg_alloc, OpArg exclusive_state, OpArg exclusive_tag, ArmReg Rn_index, ArmReg Rd_index) {
ASSERT_MSG(Rn_index != 15 && Rd_index != 15, "UNPREDICTABLE"); ASSERT_MSG(Rn_index != ArmReg::PC && Rd_index != ArmReg::PC, "UNPREDICTABLE");
code->MOV(8, exclusive_state, Imm8(1)); code->MOV(8, exclusive_state, Imm8(1));
@ -42,9 +42,9 @@ void ExclusiveLoadCommon(XEmitter* code, RegAlloc& reg_alloc, OpArg exclusive_st
} }
void JitX64::LDREX(Cond cond, ArmReg Rn_index, ArmReg Rd_index) { void JitX64::LDREX(Cond cond, ArmReg Rn_index, ArmReg Rd_index) {
cond_manager.CompileCond((ConditionCode)cond); cond_manager.CompileCond(cond);
ASSERT_MSG(Rn_index != 15 && Rd_index != 15, "UNPREDICTABLE"); ASSERT_MSG(Rn_index != ArmReg::PC && Rd_index != ArmReg::PC, "UNPREDICTABLE");
ExclusiveLoadCommon(code, reg_alloc, MJitStateExclusiveState(), MJitStateExclusiveTag(), Rn_index, Rd_index); ExclusiveLoadCommon(code, reg_alloc, MJitStateExclusiveState(), MJitStateExclusiveTag(), Rn_index, Rd_index);
CompileCallHost(reinterpret_cast<const void* const>(!current.EFlag ? &Load32LE : &Load32BE)); CompileCallHost(reinterpret_cast<const void* const>(!current.EFlag ? &Load32LE : &Load32BE));
@ -61,9 +61,9 @@ void JitX64::LDREX(Cond cond, ArmReg Rn_index, ArmReg Rd_index) {
} }
void JitX64::LDREXB(Cond cond, ArmReg Rn_index, ArmReg Rd_index) { void JitX64::LDREXB(Cond cond, ArmReg Rn_index, ArmReg Rd_index) {
cond_manager.CompileCond((ConditionCode)cond); cond_manager.CompileCond(cond);
ASSERT_MSG(Rn_index != 15 && Rd_index != 15, "UNPREDICTABLE"); ASSERT_MSG(Rn_index != ArmReg::PC && Rd_index != ArmReg::PC, "UNPREDICTABLE");
ExclusiveLoadCommon(code, reg_alloc, MJitStateExclusiveState(), MJitStateExclusiveTag(), Rn_index, Rd_index); ExclusiveLoadCommon(code, reg_alloc, MJitStateExclusiveState(), MJitStateExclusiveTag(), Rn_index, Rd_index);
CompileCallHost(reinterpret_cast<const void* const>(&Load8)); CompileCallHost(reinterpret_cast<const void* const>(&Load8));
@ -80,11 +80,11 @@ void JitX64::LDREXB(Cond cond, ArmReg Rn_index, ArmReg Rd_index) {
} }
void JitX64::LDREXD(Cond cond, ArmReg Rn_index, ArmReg Rd_index) { void JitX64::LDREXD(Cond cond, ArmReg Rn_index, ArmReg Rd_index) {
cond_manager.CompileCond((ConditionCode)cond); cond_manager.CompileCond(cond);
ASSERT_MSG(Rd_index % 2 == 0, "UNPREDICTABLE"); ASSERT_MSG(IsEvenArmReg(Rd_index), "UNPREDICTABLE");
ASSERT_MSG(Rd_index < 14, "UNPREDICTABLE"); ASSERT_MSG(Rd_index < ArmReg::R14, "UNPREDICTABLE");
ASSERT_MSG(Rn_index != 15, "UNPREDICTABLE"); ASSERT_MSG(Rn_index != ArmReg::PC, "UNPREDICTABLE");
ExclusiveLoadCommon(code, reg_alloc, MJitStateExclusiveState(), MJitStateExclusiveTag(), Rn_index, Rd_index); ExclusiveLoadCommon(code, reg_alloc, MJitStateExclusiveState(), MJitStateExclusiveTag(), Rn_index, Rd_index);
CompileCallHost(reinterpret_cast<const void* const>(!current.EFlag ? Load64LE : Load64BE)); CompileCallHost(reinterpret_cast<const void* const>(!current.EFlag ? Load64LE : Load64BE));
@ -105,9 +105,9 @@ void JitX64::LDREXD(Cond cond, ArmReg Rn_index, ArmReg Rd_index) {
} }
void JitX64::LDREXH(Cond cond, ArmReg Rn_index, ArmReg Rd_index) { void JitX64::LDREXH(Cond cond, ArmReg Rn_index, ArmReg Rd_index) {
cond_manager.CompileCond((ConditionCode)cond); cond_manager.CompileCond(cond);
ASSERT_MSG(Rn_index != 15 && Rd_index != 15, "UNPREDICTABLE"); ASSERT_MSG(Rn_index != ArmReg::PC && Rd_index != ArmReg::PC, "UNPREDICTABLE");
ExclusiveLoadCommon(code, reg_alloc, MJitStateExclusiveState(), MJitStateExclusiveTag(), Rn_index, Rd_index); ExclusiveLoadCommon(code, reg_alloc, MJitStateExclusiveState(), MJitStateExclusiveTag(), Rn_index, Rd_index);
CompileCallHost(reinterpret_cast<const void* const>(!current.EFlag ? Load16LE : Load16BE)); CompileCallHost(reinterpret_cast<const void* const>(!current.EFlag ? Load16LE : Load16BE));
@ -157,9 +157,9 @@ void JitX64::STREX(Cond cond, ArmReg Rn_index, ArmReg Rd_index, ArmReg Rm_index)
CompileInterpretInstruction(); CompileInterpretInstruction();
return; return;
cond_manager.CompileCond((ConditionCode)cond); cond_manager.CompileCond(cond);
ASSERT_MSG(Rn_index != 15 && Rd_index != 15 && Rm_index != 15, "UNPREDICTABLE"); ASSERT_MSG(Rn_index != ArmReg::PC && Rd_index != ArmReg::PC && Rm_index != ArmReg::PC, "UNPREDICTABLE");
ASSERT_MSG(Rd_index != Rn_index && Rd_index != Rm_index, "UNPREDICTABLE"); ASSERT_MSG(Rd_index != Rn_index && Rd_index != Rm_index, "UNPREDICTABLE");
reg_alloc.FlushX64(ABI_PARAM1); reg_alloc.FlushX64(ABI_PARAM1);
@ -188,9 +188,9 @@ void JitX64::STREXB(Cond cond, ArmReg Rn_index, ArmReg Rd_index, ArmReg Rm_index
CompileInterpretInstruction(); CompileInterpretInstruction();
return; return;
cond_manager.CompileCond((ConditionCode)cond); cond_manager.CompileCond(cond);
ASSERT_MSG(Rn_index != 15 && Rd_index != 15 && Rm_index != 15, "UNPREDICTABLE"); ASSERT_MSG(Rn_index != ArmReg::PC && Rd_index != ArmReg::PC && Rm_index != ArmReg::PC, "UNPREDICTABLE");
ASSERT_MSG(Rd_index != Rn_index && Rd_index != Rm_index, "UNPREDICTABLE"); ASSERT_MSG(Rd_index != Rn_index && Rd_index != Rm_index, "UNPREDICTABLE");
reg_alloc.FlushX64(ABI_PARAM1); reg_alloc.FlushX64(ABI_PARAM1);
@ -219,13 +219,13 @@ void JitX64::STREXD(Cond cond, ArmReg Rn_index, ArmReg Rd_index, ArmReg Rm_index
CompileInterpretInstruction(); CompileInterpretInstruction();
return; return;
cond_manager.CompileCond((ConditionCode)cond); cond_manager.CompileCond(cond);
ASSERT_MSG(Rn_index != 15 && Rd_index != 15 && Rm_index != 15, "UNPREDICTABLE"); ASSERT_MSG(Rn_index != ArmReg::PC && Rd_index != ArmReg::PC && Rm_index != ArmReg::PC, "UNPREDICTABLE");
ASSERT_MSG(Rm_index != 14, "UNPREDICTABLE"); ASSERT_MSG(Rm_index != ArmReg::R14, "UNPREDICTABLE");
ASSERT_MSG(Rd_index != Rn_index && Rd_index != Rm_index, "UNPREDICTABLE"); ASSERT_MSG(Rd_index != Rn_index && Rd_index != Rm_index, "UNPREDICTABLE");
ASSERT_MSG(Rd_index != Rm_index + 1, "UNPREDICTABLE"); ASSERT_MSG(Rd_index != Rm_index + 1, "UNPREDICTABLE");
ASSERT_MSG(Rm_index % 2 == 0, "UNPREDICTABLE"); ASSERT_MSG(IsEvenArmReg(Rm_index), "UNPREDICTABLE");
reg_alloc.FlushX64(ABI_PARAM1); reg_alloc.FlushX64(ABI_PARAM1);
reg_alloc.LockX64(ABI_PARAM1); reg_alloc.LockX64(ABI_PARAM1);
@ -259,9 +259,9 @@ void JitX64::STREXH(Cond cond, ArmReg Rn_index, ArmReg Rd_index, ArmReg Rm_index
CompileInterpretInstruction(); CompileInterpretInstruction();
return; return;
cond_manager.CompileCond((ConditionCode)cond); cond_manager.CompileCond(cond);
ASSERT_MSG(Rn_index != 15 && Rd_index != 15 && Rm_index != 15, "UNPREDICTABLE"); ASSERT_MSG(Rn_index != ArmReg::PC && Rd_index != ArmReg::PC && Rm_index != ArmReg::PC, "UNPREDICTABLE");
ASSERT_MSG(Rd_index != Rn_index && Rd_index != Rm_index, "UNPREDICTABLE"); ASSERT_MSG(Rd_index != Rn_index && Rd_index != Rm_index, "UNPREDICTABLE");
reg_alloc.FlushX64(ABI_PARAM1); reg_alloc.FlushX64(ABI_PARAM1);

View File

@ -12,7 +12,7 @@ namespace JitX64 {
using namespace Gen; using namespace Gen;
void JitX64::thumb_B(Cond cond, ArmImm8 imm8) { void JitX64::thumb_B(Cond cond, ArmImm8 imm8) {
cond_manager.CompileCond((ConditionCode)cond); cond_manager.CompileCond(cond);
ASSERT_MSG(current.TFlag, "thumb_B may only be called in thumb mode"); ASSERT_MSG(current.TFlag, "thumb_B may only be called in thumb mode");
@ -23,7 +23,7 @@ void JitX64::thumb_B(Cond cond, ArmImm8 imm8) {
CompileUpdateCycles(false); CompileUpdateCycles(false);
CompileJumpToBB(new_pc); CompileJumpToBB(new_pc);
if (cond == ConditionCode::AL) { if (cond == Cond::AL) {
stop_compilation = true; stop_compilation = true;
} }
} }
@ -77,9 +77,9 @@ void JitX64::thumb_BLX_suffix(bool X, ArmImm11 imm11) {
(imm11 << 1); (imm11 << 1);
u32 new_lr = (current.arm_pc + 2) | 1; u32 new_lr = (current.arm_pc + 2) | 1;
Gen::OpArg LR = reg_alloc.LockArmForWrite(14); Gen::OpArg LR = reg_alloc.LockArmForWrite(ArmReg::LR);
code->MOV(32, LR, Imm32(new_lr)); code->MOV(32, LR, Imm32(new_lr));
reg_alloc.UnlockArm(14); reg_alloc.UnlockArm(ArmReg::LR);
if (X) { if (X) {
current.TFlag = false; current.TFlag = false;

View File

@ -79,7 +79,7 @@ void JitX64::CompileUpdateCycles(bool reset_cycles) {
} }
void JitX64::CompileReturnToDispatch() { void JitX64::CompileReturnToDispatch() {
if (cond_manager.CurrentCond() == ConditionCode::AL) { if (cond_manager.CurrentCond() == Cond::AL) {
reg_alloc.FlushEverything(); reg_alloc.FlushEverything();
CompileUpdateCycles(); CompileUpdateCycles();
code->JMPptr(MJitStateHostReturnRIP()); code->JMPptr(MJitStateHostReturnRIP());

View File

@ -132,15 +132,15 @@ private:
struct CondManager { struct CondManager {
private: private:
JitX64* jit; JitX64* jit;
ConditionCode current_cond; Cond current_cond;
bool flags_dirty; bool flags_dirty;
Gen::FixupBranch current_cond_fixup; Gen::FixupBranch current_cond_fixup;
public: public:
void Init(JitX64* jit_); void Init(JitX64* jit_);
void CompileCond(ConditionCode cond); void CompileCond(Cond cond);
void Always(); void Always();
void FlagsDirty(); void FlagsDirty();
ConditionCode CurrentCond(); Cond CurrentCond();
} cond_manager; } cond_manager;
private: private:

View File

@ -41,9 +41,9 @@ static Gen::OpArg MJitStateCpuReg(ArmReg arm_reg) {
static_assert(std::is_same<decltype(JitState::cpu_state), ARMul_State>::value, "JitState::cpu_state must be ARMul_State"); static_assert(std::is_same<decltype(JitState::cpu_state), ARMul_State>::value, "JitState::cpu_state must be ARMul_State");
static_assert(std::is_same<decltype(ARMul_State::Reg), std::array<u32, 16>>::value, "ARMul_State::Reg must be std::array<u32, 16>"); static_assert(std::is_same<decltype(ARMul_State::Reg), std::array<u32, 16>>::value, "ARMul_State::Reg must be std::array<u32, 16>");
ASSERT(arm_reg >= 0 && arm_reg <= 15); ASSERT(IsValidArmReg(arm_reg));
return Gen::MDisp(jit_state_reg, offsetof(JitState, cpu_state) + offsetof(ARMul_State, Reg) + (arm_reg) * sizeof(u32)); return Gen::MDisp(jit_state_reg, offsetof(JitState, cpu_state) + offsetof(ARMul_State, Reg) + static_cast<unsigned>(arm_reg) * sizeof(u32));
} }
void RegAlloc::Init(Gen::XEmitter* emitter) { void RegAlloc::Init(Gen::XEmitter* emitter) {
@ -51,12 +51,13 @@ void RegAlloc::Init(Gen::XEmitter* emitter) {
for (size_t i = 0; i < arm_gpr.size(); i++) { for (size_t i = 0; i < arm_gpr.size(); i++) {
arm_gpr[i].locked = false; arm_gpr[i].locked = false;
arm_gpr[i].location = MJitStateCpuReg(i); arm_gpr[i].location = MJitStateCpuReg(static_cast<ArmReg>(i));
} }
for (size_t i = 0; i < x64_gpr.size(); i++) { for (size_t i = 0; i < x64_gpr.size(); i++) {
x64_gpr[i].locked = false; x64_gpr[i].locked = false;
x64_gpr[i].state = X64State::State::Free; x64_gpr[i].state = X64State::State::Free;
x64_gpr[i].arm_reg = ArmReg::INVALID_REG;
} }
} }
@ -73,12 +74,12 @@ void RegAlloc::FlushX64(Gen::X64Reg x64_reg) {
break; break;
case X64State::State::CleanArmReg: { case X64State::State::CleanArmReg: {
state.state = X64State::State::Free; state.state = X64State::State::Free;
ArmState& arm_state = arm_gpr[state.arm_reg]; ArmState& arm_state = arm_gpr[static_cast<unsigned>(state.arm_reg)];
arm_state.location = MJitStateCpuReg(state.arm_reg); arm_state.location = MJitStateCpuReg(state.arm_reg);
break; break;
} }
case X64State::State::DirtyArmReg: { case X64State::State::DirtyArmReg: {
ArmState& arm_state = arm_gpr[state.arm_reg]; ArmState& arm_state = arm_gpr[static_cast<unsigned>(state.arm_reg)];
ASSERT(arm_state.location.IsSimpleReg()); ASSERT(arm_state.location.IsSimpleReg());
ASSERT(arm_state.location.GetSimpleReg() == x64_reg); ASSERT(arm_state.location.GetSimpleReg() == x64_reg);
FlushArm(state.arm_reg); FlushArm(state.arm_reg);
@ -113,9 +114,9 @@ void RegAlloc::UnlockX64(Gen::X64Reg x64_reg) {
} }
void RegAlloc::FlushArm(ArmReg arm_reg) { void RegAlloc::FlushArm(ArmReg arm_reg) {
ASSERT(arm_reg >= 0 && arm_reg <= 15); ASSERT(IsValidArmReg(arm_reg));
ArmState& arm_state = arm_gpr[arm_reg]; ArmState& arm_state = arm_gpr[static_cast<unsigned>(arm_reg)];
ASSERT(!arm_state.locked); ASSERT(!arm_state.locked);
if (!arm_state.location.IsSimpleReg()) { if (!arm_state.location.IsSimpleReg()) {
return; return;
@ -136,9 +137,9 @@ void RegAlloc::FlushArm(ArmReg arm_reg) {
} }
Gen::OpArg RegAlloc::LockArmForRead(ArmReg arm_reg) { Gen::OpArg RegAlloc::LockArmForRead(ArmReg arm_reg) {
ASSERT(arm_reg >= 0 && arm_reg <= 14); // Not valid for R15 (cannot read from it) ASSERT(IsValidArmReg(arm_reg) && arm_reg != ArmReg::PC); // Not valid for R15 (cannot read from it)
ArmState& arm_state = arm_gpr[arm_reg]; ArmState& arm_state = arm_gpr[static_cast<unsigned>(arm_reg)];
ASSERT(!arm_state.locked); ASSERT(!arm_state.locked);
arm_state.locked = true; arm_state.locked = true;
@ -157,9 +158,9 @@ Gen::OpArg RegAlloc::LockArmForRead(ArmReg arm_reg) {
} }
Gen::OpArg RegAlloc::LockArmForWrite(ArmReg arm_reg) { Gen::OpArg RegAlloc::LockArmForWrite(ArmReg arm_reg) {
ASSERT(arm_reg >= 0 && arm_reg <= 15); // Valid for R15 (write-only) ASSERT(IsValidArmReg(arm_reg)); // Valid for R15 (write-only)
ArmState& arm_state = arm_gpr[arm_reg]; ArmState& arm_state = arm_gpr[static_cast<unsigned>(arm_reg)];
ASSERT(!arm_state.locked); ASSERT(!arm_state.locked);
arm_state.locked = true; arm_state.locked = true;
@ -179,7 +180,7 @@ Gen::OpArg RegAlloc::LockArmForWrite(ArmReg arm_reg) {
} }
Gen::X64Reg RegAlloc::BindArmToX64(ArmReg arm_reg, bool load) { Gen::X64Reg RegAlloc::BindArmToX64(ArmReg arm_reg, bool load) {
ArmState& arm_state = arm_gpr[arm_reg]; ArmState& arm_state = arm_gpr[static_cast<unsigned>(arm_reg)];
ASSERT(!arm_state.locked); ASSERT(!arm_state.locked);
arm_state.locked = true; arm_state.locked = true;
@ -210,7 +211,7 @@ Gen::X64Reg RegAlloc::BindArmToX64(ArmReg arm_reg, bool load) {
} }
Gen::X64Reg RegAlloc::BindArmForRead(ArmReg arm_reg) { Gen::X64Reg RegAlloc::BindArmForRead(ArmReg arm_reg) {
ASSERT(arm_reg >= 0 && arm_reg <= 14); // Not valid for R15 (cannot read from it) ASSERT(IsValidArmReg(arm_reg) && arm_reg != ArmReg::PC); // Not valid for R15 (cannot read from it)
const Gen::X64Reg x64_reg = BindArmToX64(arm_reg, true); const Gen::X64Reg x64_reg = BindArmToX64(arm_reg, true);
@ -218,7 +219,7 @@ Gen::X64Reg RegAlloc::BindArmForRead(ArmReg arm_reg) {
} }
Gen::X64Reg RegAlloc::BindArmForWrite(ArmReg arm_reg) { Gen::X64Reg RegAlloc::BindArmForWrite(ArmReg arm_reg) {
ASSERT(arm_reg >= 0 && arm_reg <= 15); // Valid for R15 (we're not reading from it) ASSERT(IsValidArmReg(arm_reg)); // Valid for R15 (we're not reading from it)
const Gen::X64Reg x64_reg = BindArmToX64(arm_reg, false); const Gen::X64Reg x64_reg = BindArmToX64(arm_reg, false);
@ -228,9 +229,9 @@ Gen::X64Reg RegAlloc::BindArmForWrite(ArmReg arm_reg) {
} }
void RegAlloc::UnlockArm(ArmReg arm_reg) { void RegAlloc::UnlockArm(ArmReg arm_reg) {
ASSERT(arm_reg >= 0 && arm_reg <= 15); ASSERT(IsValidArmReg(arm_reg));
ArmState& arm_state = arm_gpr[arm_reg]; ArmState& arm_state = arm_gpr[static_cast<unsigned>(arm_reg)];
ASSERT(arm_state.locked); ASSERT(arm_state.locked);
arm_state.locked = false; arm_state.locked = false;
@ -247,7 +248,7 @@ void RegAlloc::UnlockArm(ArmReg arm_reg) {
} }
void RegAlloc::MarkDirty(ArmReg arm_reg) { void RegAlloc::MarkDirty(ArmReg arm_reg) {
const ArmState& arm_state = arm_gpr[arm_reg]; const ArmState& arm_state = arm_gpr[static_cast<unsigned>(arm_reg)];
ASSERT(arm_state.locked); ASSERT(arm_state.locked);
ASSERT(arm_state.location.IsSimpleReg()); ASSERT(arm_state.location.IsSimpleReg());
@ -287,7 +288,7 @@ void RegAlloc::FlushABICallerSaved() {
} }
Gen::X64Reg RegAlloc::GetX64For(ArmReg arm_reg) { Gen::X64Reg RegAlloc::GetX64For(ArmReg arm_reg) {
const ArmState& arm_state = arm_gpr[arm_reg]; const ArmState& arm_state = arm_gpr[static_cast<unsigned>(arm_reg)];
ASSERT(arm_state.location.IsSimpleReg()); ASSERT(arm_state.location.IsSimpleReg());
@ -301,7 +302,7 @@ Gen::X64Reg RegAlloc::GetX64For(ArmReg arm_reg) {
} }
bool RegAlloc::IsBoundToX64(ArmReg arm_reg) { bool RegAlloc::IsBoundToX64(ArmReg arm_reg) {
const ArmState& arm_state = arm_gpr[arm_reg]; const ArmState& arm_state = arm_gpr[static_cast<unsigned>(arm_reg)];
return arm_state.location.IsSimpleReg(); return arm_state.location.IsSimpleReg();
} }
@ -326,13 +327,13 @@ void RegAlloc::UnlockTemp(Gen::X64Reg x64_reg) {
} }
void RegAlloc::AssertNoLocked() { void RegAlloc::AssertNoLocked() {
for (ArmReg arm_reg = 0; arm_reg < arm_gpr.size(); arm_reg++) { for (size_t i = 0; i < arm_gpr.size(); i++) {
ArmState& arm_state = arm_gpr[arm_reg]; ArmState& arm_state = arm_gpr[i];
ASSERT(!arm_state.locked); ASSERT(!arm_state.locked);
if (arm_state.location.IsSimpleReg()) { if (arm_state.location.IsSimpleReg()) {
X64State& x64_state = x64_gpr[x64_reg_to_index.at(arm_state.location.GetSimpleReg())]; X64State& x64_state = x64_gpr[x64_reg_to_index.at(arm_state.location.GetSimpleReg())];
ASSERT(x64_state.state == X64State::State::CleanArmReg || x64_state.state == X64State::State::DirtyArmReg); ASSERT(x64_state.state == X64State::State::CleanArmReg || x64_state.state == X64State::State::DirtyArmReg);
ASSERT(x64_state.arm_reg == arm_reg); ASSERT(x64_state.arm_reg == static_cast<ArmReg>(i));
} }
} }

View File

@ -55,7 +55,7 @@ private:
bool locked = false; bool locked = false;
State state = State::Free; State state = State::Free;
ArmReg arm_reg = -1; ///< Only holds a valid value when state == DirtyArmReg / CleanArmReg ArmReg arm_reg = ArmReg::INVALID_REG; ///< Only holds a valid value when state == DirtyArmReg / CleanArmReg
}; };
std::array<ArmState, 16> arm_gpr; std::array<ArmState, 16> arm_gpr;

View File

@ -24,9 +24,12 @@ TEST_CASE("Fuzz ARM branch instructions", "[JitX64]") {
auto instruction_select = [&]() -> u32 { auto instruction_select = [&]() -> u32 {
size_t inst_index = RandInt<size_t>(0, instructions.size() - 1); size_t inst_index = RandInt<size_t>(0, instructions.size() - 1);
u32 random = RandInt<u32>(0, 0xFFFFFFFF); u32 random = RandInt<u32>(0, 0xFFFFFFF);
u32 Rm = RandInt<u32>(0, 14);
return instructions[inst_index].first | (random & (~instructions[inst_index].second)); u32 assemble_randoms = (random << 4) | Rm;
return instructions[inst_index].first | (assemble_randoms & (~instructions[inst_index].second));
}; };
SECTION("R15") { SECTION("R15") {

View File

@ -133,6 +133,7 @@ TEST_CASE("Fuzz ARM load/store multiple instructions", "[JitX64]") {
u32 Rn = RandInt<u32>(0, 14); u32 Rn = RandInt<u32>(0, 14);
u32 flags = RandInt<u32>(0, 0xF); u32 flags = RandInt<u32>(0, 0xF);
while (true) {
if (inst_index == 1 && (flags & 2)) { if (inst_index == 1 && (flags & 2)) {
if (reg_list & (1 << Rn)) if (reg_list & (1 << Rn))
reg_list &= ~((1 << Rn) - 1); reg_list &= ~((1 << Rn) - 1);
@ -140,6 +141,12 @@ TEST_CASE("Fuzz ARM load/store multiple instructions", "[JitX64]") {
reg_list &= ~(1 << Rn); reg_list &= ~(1 << Rn);
} }
if (reg_list)
break;
reg_list = RandInt<u32>(1, 0xFFFF);
}
u32 assemble_randoms = (reg_list << 0) | (Rn << 16) | (flags << 24) | (cond << 28); u32 assemble_randoms = (reg_list << 0) | (Rn << 16) | (flags << 24) | (cond << 28);
return instructions[inst_index].first | (assemble_randoms & (~instructions[inst_index].second)); return instructions[inst_index].first | (assemble_randoms & (~instructions[inst_index].second));

View File

@ -297,9 +297,13 @@ TEST_CASE("Fuzz Thumb instructions set 2 (affects PC)", "[JitX64][Thumb]") {
auto instruction_select = [&](int) -> u16 { auto instruction_select = [&](int) -> u16 {
size_t inst_index = RandInt<size_t>(0, instructions.size() - 1); size_t inst_index = RandInt<size_t>(0, instructions.size() - 1);
if (inst_index == 0) {
u16 Rm = RandInt<u16>(0, 14) << 3;
return instructions[inst_index].first | (Rm &~instructions[inst_index].second);
} else {
u16 random = RandInt<u16>(0, 0xFFFF); u16 random = RandInt<u16>(0, 0xFFFF);
return instructions[inst_index].first | (random &~instructions[inst_index].second); return instructions[inst_index].first | (random &~instructions[inst_index].second);
}
}; };
FuzzJitThumb(1, 1, 10000, instruction_select); FuzzJitThumb(1, 1, 10000, instruction_select);