From ec7c34ec92dec879a53df137eb70170e96953719 Mon Sep 17 00:00:00 2001 From: MerryMage Date: Wed, 30 Mar 2016 18:10:42 +0100 Subject: [PATCH] JitX64: Data Processing: Implement shift-by-register (_rsr) instructions --- src/core/arm/decoder/thumb.cpp | 8 +- .../jit_x64/instructions/data_processing.cpp | 566 +++++++++++++++++- src/core/arm/jit_x64/jit_x64.h | 2 +- .../arm/jit_x64/fuzz_arm_data_processing.cpp | 2 +- 4 files changed, 553 insertions(+), 25 deletions(-) diff --git a/src/core/arm/decoder/thumb.cpp b/src/core/arm/decoder/thumb.cpp index 87f8bd763..d646a7de0 100644 --- a/src/core/arm/decoder/thumb.cpp +++ b/src/core/arm/decoder/thumb.cpp @@ -148,13 +148,13 @@ static const std::array thumb_instruction_table = { { v->EOR_reg(0xE, /*S=*/true, Rd_Rn, Rd_Rn, 0, 0, Rm_Rs); break; case 2: // LSL Rd, Rs - v->MOV_rsr(0xE, /*S=*/true, Rd_Rn, Rm_Rs, 0b0001, Rd_Rn); + v->MOV_rsr(0xE, /*S=*/true, Rd_Rn, Rm_Rs, 0b00, Rd_Rn); break; case 3: // LSR Rd, Rs - v->MOV_rsr(0xE, /*S=*/true, Rd_Rn, Rm_Rs, 0b0011, Rd_Rn); + v->MOV_rsr(0xE, /*S=*/true, Rd_Rn, Rm_Rs, 0b01, Rd_Rn); break; case 4: // ASR Rd, Rs - v->MOV_rsr(0xE, /*S=*/true, Rd_Rn, Rm_Rs, 0b0101, Rd_Rn); + v->MOV_rsr(0xE, /*S=*/true, Rd_Rn, Rm_Rs, 0b10, Rd_Rn); break; case 5: // ADC Rd, Rm v->ADC_reg(0xE, /*S=*/true, Rd_Rn, Rd_Rn, 0, 0, Rm_Rs); @@ -163,7 +163,7 @@ static const std::array thumb_instruction_table = { { v->SBC_reg(0xE, /*S=*/true, Rd_Rn, Rd_Rn, 0, 0, Rm_Rs); break; case 7: // ROR Rd, Rs - v->MOV_rsr(0xE, /*S=*/true, Rd_Rn, Rm_Rs, 0b0111, Rd_Rn); + v->MOV_rsr(0xE, /*S=*/true, Rd_Rn, Rm_Rs, 0b11, Rd_Rn); break; case 8: // TST Rm, Rn v->TST_reg(0xE, Rd_Rn, 0, 0, Rm_Rs); diff --git a/src/core/arm/jit_x64/instructions/data_processing.cpp b/src/core/arm/jit_x64/instructions/data_processing.cpp index b54570bcc..79a87fc00 100644 --- a/src/core/arm/jit_x64/instructions/data_processing.cpp +++ b/src/core/arm/jit_x64/instructions/data_processing.cpp @@ -62,7 +62,9 @@ void JitX64::CompileDataProcessingHelper_Reverse(ArmReg Rn_index, ArmReg Rd_inde } X64Reg JitX64::CompileDataProcessingHelper_reg(ArmImm5 imm5, ShiftType shift, ArmReg Rm_index, bool do_shifter_carry_out) { - // We directly put shifter_carry_out into MJitStateCFlag() + // Caller must call reg_alloc.UnlockTemp on return value. + // if do_shifter_carry_out, + // we output code that calculates and puts shifter_carry_out into MJitStateCFlag(). X64Reg tmp = reg_alloc.AllocTemp(); @@ -119,12 +121,14 @@ X64Reg JitX64::CompileDataProcessingHelper_reg(ArmImm5 imm5, ShiftType shift, Ar if (imm5 == 0) { //RRX code->BT(8, MJitStateCFlag(), Imm8(0)); code->RCR(32, R(tmp), Imm8(1)); - if (do_shifter_carry_out) + if (do_shifter_carry_out) { code->SETcc(CC_C, MJitStateCFlag()); + } } else { code->ROR(32, R(tmp), Imm8(imm5)); - if (do_shifter_carry_out) + if (do_shifter_carry_out) { code->SETcc(CC_C, MJitStateCFlag()); + } } return tmp; } @@ -132,6 +136,176 @@ X64Reg JitX64::CompileDataProcessingHelper_reg(ArmImm5 imm5, ShiftType shift, Ar UNREACHABLE(); } +X64Reg JitX64::CompileDataProcessingHelper_rsr(ArmReg Rs_index, ShiftType shift, ArmReg Rm_index, bool do_shifter_carry_out) { + // Caller must call reg_alloc.UnlockTemp on return value. + // if do_shifter_carry_out, + // we output code that calculates and puts shifter_carry_out into MJitStateCFlag(). + + reg_alloc.FlushX64(RCX); + reg_alloc.LockX64(RCX); + + X64Reg tmp = reg_alloc.AllocTemp(); + + if (Rs_index != 15) { + OpArg Rs = reg_alloc.LockArmForRead(Rs_index); + code->MOV(32, R(RCX), Rs); + code->AND(32, R(RCX), Imm32(0xFF)); + reg_alloc.UnlockArm(Rs_index); + } else { + code->MOV(32, R(RCX), Imm32(GetReg15Value() & 0xFF)); + } + + if (Rm_index != 15) { + OpArg Rm = reg_alloc.LockArmForRead(Rm_index); + code->MOV(32, R(tmp), Rm); + reg_alloc.UnlockArm(Rm_index); + } else { + code->MOV(32, R(tmp), Imm32(GetReg15Value())); + } + + switch (shift) { + case 0b00: { // Logical shift left by register + if (!do_shifter_carry_out) { + code->SHL(32, R(tmp), R(CL)); + + code->CMP(32, R(RCX), Imm8(32)); + X64Reg zero = reg_alloc.AllocTemp(); + code->MOV(32, R(zero), Imm32(0)); + code->CMOVcc(32, tmp, R(zero), CC_NB); + reg_alloc.UnlockTemp(zero); + } else { + code->CMP(32, R(RCX), Imm8(32)); + auto Rs_gt32 = code->J_CC(CC_A); + auto Rs_eq32 = code->J_CC(CC_E); + // if (Rs & 0xFF == 0) goto end; + code->TEST(32, R(RCX), R(RCX)); + auto Rs_zero = code->J_CC(CC_Z); + // if (Rs & 0xFF < 32) { + code->SHL(32, R(tmp), R(CL)); + code->SETcc(CC_C, MJitStateCFlag()); + auto jmp_to_end_1 = code->J(); + // } else if (Rs & 0xFF > 32) { + code->SetJumpTarget(Rs_gt32); + code->MOV(32, R(tmp), Imm32(0)); + code->MOV(8, MJitStateCFlag(), Imm8(0)); + auto jmp_to_end_2 = code->J(); + // } else if (Rs & 0xFF == 32) { + code->SetJumpTarget(Rs_eq32); + code->BT(32, R(tmp), Imm8(0)); + code->SETcc(CC_C, MJitStateCFlag()); + code->MOV(32, R(tmp), Imm32(0)); + // } + code->SetJumpTarget(jmp_to_end_1); + code->SetJumpTarget(jmp_to_end_1); + code->SetJumpTarget(Rs_zero); + } + break; + } + case 0b01: { // Logical shift right by register + if (!do_shifter_carry_out) { + code->SHR(32, R(tmp), R(RCX)); + + code->CMP(32, R(RCX), Imm8(32)); + X64Reg zero = reg_alloc.AllocTemp(); + code->MOV(32, R(zero), Imm32(0)); + code->CMOVcc(32, tmp, R(zero), CC_NB); + reg_alloc.UnlockTemp(zero); + } else { + code->CMP(32, R(RCX), Imm8(32)); + auto Rs_gt32 = code->J_CC(CC_A); + auto Rs_eq32 = code->J_CC(CC_E); + // if (Rs & 0xFF == 0) goto end; + code->TEST(32, R(RCX), R(RCX)); + auto Rs_zero = code->J_CC(CC_Z); + // if (Rs & 0xFF < 32) { + code->SHR(32, R(tmp), R(RCX)); + code->SETcc(CC_C, MJitStateCFlag()); + auto jmp_to_end_1 = code->J(); + // } else if (Rs & 0xFF > 32) { + code->SetJumpTarget(Rs_gt32); + code->MOV(32, R(tmp), Imm32(0)); + code->MOV(8, MJitStateCFlag(), Imm8(0)); + auto jmp_to_end_2 = code->J(); + // } else if (Rs & 0xFF == 32) { + code->SetJumpTarget(Rs_eq32); + code->BT(32, R(tmp), Imm8(31)); + code->SETcc(CC_C, MJitStateCFlag()); + code->MOV(32, R(tmp), Imm32(0)); + // } + code->SetJumpTarget(jmp_to_end_1); + code->SetJumpTarget(jmp_to_end_2); + code->SetJumpTarget(Rs_zero); + } + break; + } + case 0b10: { // Arithmetic shift right by register + if (!do_shifter_carry_out) { + code->CMP(32, R(RCX), Imm8(31)); + auto Rs_gt31 = code->J_CC(CC_A); + // if (Rs & 0xFF <= 31) { + code->SAR(32, R(tmp), R(CL)); + auto jmp_to_end = code->J(); + // } else { + code->SetJumpTarget(Rs_gt31); + code->SAR(32, R(tmp), Imm8(31)); // Verified. + // } + code->SetJumpTarget(jmp_to_end); + } else { + code->CMP(32, R(RCX), Imm8(31)); + auto Rs_gt31 = code->J_CC(CC_A); + // if (Rs & 0xFF == 0) goto end; + code->TEST(32, R(RCX), R(RCX)); + auto Rs_zero = code->J_CC(CC_Z); + // if (Rs & 0xFF <= 31) { + code->SAR(32, R(tmp), R(CL)); + code->SETcc(CC_C, MJitStateCFlag()); + auto jmp_to_end = code->J(); + // } else if (Rs & 0xFF > 31) { + code->SetJumpTarget(Rs_gt31); + code->SAR(32, R(tmp), Imm8(31)); // Verified. + code->BT(32, R(tmp), Imm8(31)); + code->SETcc(CC_C, MJitStateCFlag()); + // } + code->SetJumpTarget(jmp_to_end); + code->SetJumpTarget(Rs_zero); + } + break; + } + case 0b11: { // Rotate right by register + if (!do_shifter_carry_out) { + code->AND(32, R(RCX), Imm32(0x1F)); + code->ROR(32, R(tmp), R(CL)); + } else { + // if (Rs & 0xFF == 0) goto end; + code->TEST(32, R(RCX), R(RCX)); + auto Rs_zero = code->J_CC(CC_Z); + + code->AND(32, R(RCX), Imm32(0x1F)); + auto zero_1F = code->J_CC(CC_Z); + // if (Rs & 0x1F != 0) { + code->ROR(32, R(tmp), R(CL)); + code->SETcc(CC_C, MJitStateCFlag()); + auto jmp_to_end = code->J(); + // } else { + code->SetJumpTarget(zero_1F); + code->BT(32, R(tmp), Imm8(31)); + code->SETcc(CC_C, MJitStateCFlag()); + // } + code->SetJumpTarget(jmp_to_end); + code->SetJumpTarget(Rs_zero); + } + break; + } + default: + UNREACHABLE(); + break; + } // switch (shift) + + reg_alloc.UnlockX64(RCX); + + return tmp; +} + void JitX64::ADC_imm(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, int rotate, ArmImm8 imm8) { cond_manager.CompileCond((ConditionCode)cond); @@ -174,6 +348,28 @@ void JitX64::ADC_reg(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, ArmImm } } +void JitX64::ADC_rsr(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, ArmReg Rs_index, ShiftType shift, ArmReg Rm_index) { + cond_manager.CompileCond((ConditionCode)cond); + + Gen::X64Reg tmp = CompileDataProcessingHelper_rsr(Rs_index, shift, Rm_index, false); + + CompileDataProcessingHelper(Rn_index, Rd_index, [&](X64Reg Rd) { + code->BT(32, MJitStateCFlag(), Imm8(0)); + code->ADC(32, R(Rd), R(tmp)); + }); + + reg_alloc.UnlockTemp(tmp); + + if (S) { + UpdateFlagsZVCN(); + } + + current.arm_pc += GetInstSize(); + if (Rd_index == 15) { + CompileReturnToDispatch(); + } +} + void JitX64::ADD_imm(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, int rotate, ArmImm8 imm8) { cond_manager.CompileCond((ConditionCode)cond); @@ -214,6 +410,27 @@ void JitX64::ADD_reg(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, ArmImm } } +void JitX64::ADD_rsr(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, ArmReg Rs_index, ShiftType shift, ArmReg Rm_index) { + cond_manager.CompileCond((ConditionCode)cond); + + Gen::X64Reg tmp = CompileDataProcessingHelper_rsr(Rs_index, shift, Rm_index, false); + + CompileDataProcessingHelper(Rn_index, Rd_index, [&](X64Reg Rd) { + code->ADD(32, R(Rd), R(tmp)); + }); + + reg_alloc.UnlockTemp(tmp); + + if (S) { + UpdateFlagsZVCN(); + } + + current.arm_pc += GetInstSize(); + if (Rd_index == 15) { + CompileReturnToDispatch(); + } +} + void JitX64::AND_imm(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, int rotate, ArmImm8 imm8) { cond_manager.CompileCond((ConditionCode)cond); @@ -249,6 +466,29 @@ void JitX64::AND_reg(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, ArmImm if (S) { UpdateFlagsZN(); + // C updated by CompileDataProcessingHelper_reg + } + + current.arm_pc += GetInstSize(); + if (Rd_index == 15) { + CompileReturnToDispatch(); + } +} + +void JitX64::AND_rsr(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, ArmReg Rs_index, ShiftType shift, ArmReg Rm_index) { + cond_manager.CompileCond((ConditionCode)cond); + + Gen::X64Reg tmp = CompileDataProcessingHelper_rsr(Rs_index, shift, Rm_index, S); + + CompileDataProcessingHelper(Rn_index, Rd_index, [&](X64Reg Rd) { + code->AND(32, R(Rd), R(tmp)); + }); + + reg_alloc.UnlockTemp(tmp); + + if (S) { + UpdateFlagsZN(); + // C updated by CompileDataProcessingHelper_reg } current.arm_pc += GetInstSize(); @@ -302,6 +542,29 @@ void JitX64::BIC_reg(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, ArmImm } } +void JitX64::BIC_rsr(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, ArmReg Rs_index, ShiftType shift, ArmReg Rm_index) { + cond_manager.CompileCond((ConditionCode)cond); + + Gen::X64Reg tmp = CompileDataProcessingHelper_rsr(Rs_index, shift, Rm_index, S); + + CompileDataProcessingHelper(Rn_index, Rd_index, [&](X64Reg Rd) { + // TODO: Use ANDN instead. + code->NOT(32, R(tmp)); + code->AND(32, R(Rd), R(tmp)); + }); + + reg_alloc.UnlockTemp(tmp); + + if (S) { + UpdateFlagsZN(); + } + + current.arm_pc += GetInstSize(); + if (Rd_index == 15) { + CompileReturnToDispatch(); + } +} + void JitX64::CMN_imm(Cond cond, ArmReg Rn_index, int rotate, ArmImm8 imm8) { cond_manager.CompileCond((ConditionCode)cond); @@ -345,6 +608,26 @@ void JitX64::CMN_reg(Cond cond, ArmReg Rn_index, ArmImm5 imm5, ShiftType shift, current.arm_pc += GetInstSize(); } +void JitX64::CMN_rsr(Cond cond, ArmReg Rn_index, ArmReg Rs_index, ShiftType shift, ArmReg Rm_index) { + cond_manager.CompileCond((ConditionCode)cond); + + Gen::X64Reg tmp = CompileDataProcessingHelper_rsr(Rs_index, shift, Rm_index, false); + + if (Rn_index != 15) { + OpArg Rn = reg_alloc.LockArmForRead(Rn_index); + code->ADD(32, R(tmp), Rn); + reg_alloc.UnlockArm(Rn_index); + } else { + code->ADD(32, R(tmp), Imm32(GetReg15Value())); + } + + reg_alloc.UnlockTemp(tmp); + + UpdateFlagsZVCN(); + + current.arm_pc += GetInstSize(); +} + void JitX64::CMP_imm(Cond cond, ArmReg Rn_index, int rotate, ArmImm8 imm8) { cond_manager.CompileCond((ConditionCode)cond); @@ -393,6 +676,31 @@ void JitX64::CMP_reg(Cond cond, ArmReg Rn_index, ArmImm5 imm5, ShiftType shift, current.arm_pc += GetInstSize(); } +void JitX64::CMP_rsr(Cond cond, ArmReg Rn_index, ArmReg Rs_index, ShiftType shift, ArmReg Rm_index) { + cond_manager.CompileCond((ConditionCode)cond); + + Gen::X64Reg tmp = CompileDataProcessingHelper_rsr(Rs_index, shift, Rm_index, false); + + if (Rn_index != 15) { + OpArg Rn = reg_alloc.LockArmForRead(Rn_index); + code->CMP(32, Rn, R(tmp)); + reg_alloc.UnlockArm(Rn_index); + } else { + // TODO: Optimize this + X64Reg tmp2 = reg_alloc.AllocTemp(); + code->MOV(32, R(tmp2), Imm32(GetReg15Value())); + code->CMP(32, R(tmp2), R(tmp)); + reg_alloc.UnlockTemp(tmp2); + } + + reg_alloc.UnlockTemp(tmp); + + UpdateFlagsC_complement(); + UpdateFlagsZVN(); + + current.arm_pc += GetInstSize(); +} + void JitX64::EOR_imm(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, int rotate, ArmImm8 imm8) { cond_manager.CompileCond((ConditionCode)cond); @@ -436,6 +744,27 @@ void JitX64::EOR_reg(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, ArmImm } } +void JitX64::EOR_rsr(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, ArmReg Rs_index, ShiftType shift, ArmReg Rm_index) { + cond_manager.CompileCond((ConditionCode)cond); + + Gen::X64Reg tmp = CompileDataProcessingHelper_rsr(Rs_index, shift, Rm_index, S); + + CompileDataProcessingHelper(Rn_index, Rd_index, [&](X64Reg Rd) { + code->XOR(32, R(Rd), R(tmp)); + }); + + reg_alloc.UnlockTemp(tmp); + + if (S) { + UpdateFlagsZN(); + } + + current.arm_pc += GetInstSize(); + if (Rd_index == 15) { + CompileReturnToDispatch(); + } +} + void JitX64::MOV_imm(Cond cond, bool S, ArmReg Rd_index, int rotate, ArmImm8 imm8) { cond_manager.CompileCond((ConditionCode)cond); @@ -483,6 +812,28 @@ void JitX64::MOV_reg(Cond cond, bool S, ArmReg Rd_index, ArmImm5 imm5, ShiftType } } +void JitX64::MOV_rsr(Cond cond, bool S, ArmReg Rd_index, ArmReg Rs_index, ShiftType shift, ArmReg Rm_index) { + cond_manager.CompileCond((ConditionCode)cond); + + Gen::X64Reg tmp = CompileDataProcessingHelper_rsr(Rs_index, shift, Rm_index, S); + + Gen::OpArg Rd = reg_alloc.LockArmForWrite(Rd_index); + code->MOV(32, Rd, R(tmp)); + reg_alloc.UnlockArm(Rd_index); + + if (S) { + code->CMP(32, R(tmp), Imm32(0)); + UpdateFlagsZN(); + } + + reg_alloc.UnlockTemp(tmp); + + current.arm_pc += GetInstSize(); + if (Rd_index == 15) { + CompileReturnToDispatch(); + } +} + void JitX64::MVN_imm(Cond cond, bool S, ArmReg Rd_index, int rotate, ArmImm8 imm8) { cond_manager.CompileCond((ConditionCode)cond); @@ -531,6 +882,29 @@ void JitX64::MVN_reg(Cond cond, bool S, ArmReg Rd_index, ArmImm5 imm5, ShiftType } } +void JitX64::MVN_rsr(Cond cond, bool S, ArmReg Rd_index, ArmReg Rs_index, ShiftType shift, ArmReg Rm_index) { + cond_manager.CompileCond((ConditionCode)cond); + + Gen::X64Reg tmp = CompileDataProcessingHelper_rsr(Rs_index, shift, Rm_index, S); + code->NOT(32, R(tmp)); + + Gen::OpArg Rd = reg_alloc.LockArmForWrite(Rd_index); + code->MOV(32, Rd, R(tmp)); + reg_alloc.UnlockArm(Rd_index); + + if (S) { + code->CMP(32, R(tmp), Imm32(0)); + UpdateFlagsZN(); + } + + reg_alloc.UnlockTemp(tmp); + + current.arm_pc += GetInstSize(); + if (Rd_index == 15) { + CompileReturnToDispatch(); + } +} + void JitX64::ORR_imm(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, int rotate, ArmImm8 imm8) { cond_manager.CompileCond((ConditionCode)cond); @@ -574,6 +948,27 @@ void JitX64::ORR_reg(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, ArmImm } } +void JitX64::ORR_rsr(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, ArmReg Rs_index, ShiftType shift, ArmReg Rm_index) { + cond_manager.CompileCond((ConditionCode)cond); + + Gen::X64Reg tmp = CompileDataProcessingHelper_rsr(Rs_index, shift, Rm_index, S); + + CompileDataProcessingHelper(Rn_index, Rd_index, [&](X64Reg Rd) { + code->OR(32, R(Rd), R(tmp)); + }); + + reg_alloc.UnlockTemp(tmp); + + if (S) { + UpdateFlagsZN(); + } + + current.arm_pc += GetInstSize(); + if (Rd_index == 15) { + CompileReturnToDispatch(); + } +} + void JitX64::RSB_imm(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, int rotate, ArmImm8 imm8) { cond_manager.CompileCond((ConditionCode)cond); @@ -632,6 +1027,36 @@ void JitX64::RSB_reg(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, ArmImm } } +void JitX64::RSB_rsr(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, ArmReg Rs_index, ShiftType shift, ArmReg Rm_index) { + cond_manager.CompileCond((ConditionCode)cond); + + Gen::X64Reg tmp = CompileDataProcessingHelper_rsr(Rs_index, shift, Rm_index, false); + + CompileDataProcessingHelper_Reverse(Rn_index, Rd_index, [&](X64Reg Rd) { + code->MOV(32, R(Rd), R(tmp)); + + if (Rn_index == 15) { + code->SUB(32, R(Rd), Imm32(GetReg15Value())); + } else { + Gen::OpArg Rn = reg_alloc.LockArmForRead(Rn_index); + code->SUB(32, R(Rd), Rn); + reg_alloc.UnlockArm(Rn_index); + } + }); + + reg_alloc.UnlockTemp(tmp); + + if (S) { + UpdateFlagsZVN(); + UpdateFlagsC_complement(); + } + + current.arm_pc += GetInstSize(); + if (Rd_index == 15) { + CompileReturnToDispatch(); + } +} + void JitX64::RSC_imm(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, int rotate, ArmImm8 imm8) { cond_manager.CompileCond((ConditionCode)cond); @@ -696,6 +1121,39 @@ void JitX64::RSC_reg(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, ArmImm } } +void JitX64::RSC_rsr(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, ArmReg Rs_index, ShiftType shift, ArmReg Rm_index) { + cond_manager.CompileCond((ConditionCode)cond); + + Gen::X64Reg tmp = CompileDataProcessingHelper_rsr(Rs_index, shift, Rm_index, false); + + CompileDataProcessingHelper_Reverse(Rn_index, Rd_index, [&](X64Reg Rd) { + code->MOV(32, R(Rd), R(tmp)); + + code->BT(32, MJitStateCFlag(), Imm8(0)); + code->CMC(); + + if (Rn_index == 15) { + code->SBB(32, R(Rd), Imm32(GetReg15Value())); + } else { + Gen::OpArg Rn = reg_alloc.LockArmForRead(Rn_index); + code->SBB(32, R(Rd), Rn); + reg_alloc.UnlockArm(Rn_index); + } + }); + + reg_alloc.UnlockTemp(tmp); + + if (S) { + UpdateFlagsZVN(); + UpdateFlagsC_complement(); + } + + current.arm_pc += GetInstSize(); + if (Rd_index == 15) { + CompileReturnToDispatch(); + } +} + void JitX64::SBC_imm(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, int rotate, ArmImm8 imm8) { cond_manager.CompileCond((ConditionCode)cond); @@ -742,6 +1200,30 @@ void JitX64::SBC_reg(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, ArmImm } } +void JitX64::SBC_rsr(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, ArmReg Rs_index, ShiftType shift, ArmReg Rm_index) { + cond_manager.CompileCond((ConditionCode)cond); + + Gen::X64Reg tmp = CompileDataProcessingHelper_rsr(Rs_index, shift, Rm_index, false); + + CompileDataProcessingHelper(Rn_index, Rd_index, [&](X64Reg Rd) { + code->BT(32, MJitStateCFlag(), Imm8(0)); + code->CMC(); + code->SBB(32, R(Rd), R(tmp)); + }); + + reg_alloc.UnlockTemp(tmp); + + if (S) { + UpdateFlagsZVN(); + UpdateFlagsC_complement(); + } + + current.arm_pc += GetInstSize(); + if (Rd_index == 15) { + CompileReturnToDispatch(); + } +} + void JitX64::SUB_imm(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, int rotate, ArmImm8 imm8) { cond_manager.CompileCond((ConditionCode)cond); @@ -784,6 +1266,29 @@ void JitX64::SUB_reg(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, ArmImm } } +void JitX64::SUB_rsr(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, ArmReg Rs_index, ShiftType shift, ArmReg Rm_index) { + cond_manager.CompileCond((ConditionCode)cond); + + Gen::X64Reg tmp = CompileDataProcessingHelper_rsr(Rs_index, shift, Rm_index, false); + + CompileDataProcessingHelper(Rn_index, Rd_index, [&](X64Reg Rd) { + code->SUB(32, R(Rd), R(tmp)); + }); + + reg_alloc.UnlockTemp(tmp); + + if (S) { + UpdateFlagsZVN(); + UpdateFlagsC_complement(); + } + + current.arm_pc += GetInstSize(); + if (Rd_index == 15) { + CompileReturnToDispatch(); + } +} + + void JitX64::TEQ_imm(Cond cond, ArmReg Rn_index, int rotate, ArmImm8 imm8) { cond_manager.CompileCond((ConditionCode)cond); @@ -831,6 +1336,26 @@ void JitX64::TEQ_reg(Cond cond, ArmReg Rn_index, ArmImm5 imm5, ShiftType shift, current.arm_pc += GetInstSize(); } +void JitX64::TEQ_rsr(Cond cond, ArmReg Rn_index, ArmReg Rs_index, ShiftType shift, ArmReg Rm_index) { + cond_manager.CompileCond((ConditionCode)cond); + + Gen::X64Reg tmp = CompileDataProcessingHelper_rsr(Rs_index, shift, Rm_index, true); + + if (Rn_index == 15) { + code->XOR(32, R(tmp), Imm32(GetReg15Value())); + } else { + Gen::OpArg Rn = reg_alloc.LockArmForRead(Rn_index); + code->XOR(32, R(tmp), Rn); + reg_alloc.UnlockArm(Rn_index); + } + + reg_alloc.UnlockTemp(tmp); + + UpdateFlagsZN(); + + current.arm_pc += GetInstSize(); +} + void JitX64::TST_imm(Cond cond, ArmReg Rn_index, int rotate, ArmImm8 imm8) { cond_manager.CompileCond((ConditionCode)cond); @@ -881,21 +1406,24 @@ void JitX64::TST_reg(Cond cond, ArmReg Rn_index, ArmImm5 imm5, ShiftType shift, current.arm_pc += GetInstSize(); } -void JitX64::ADC_rsr(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, ArmReg Rs_index, ShiftType shift, ArmReg Rm_index) { CompileInterpretInstruction(); } -void JitX64::ADD_rsr(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, ArmReg Rs_index, ShiftType shift, ArmReg Rm_index) { CompileInterpretInstruction(); } -void JitX64::AND_rsr(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, ArmReg Rs_index, ShiftType shift, ArmReg Rm_index) { CompileInterpretInstruction(); } -void JitX64::BIC_rsr(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, ArmReg Rs_index, ShiftType shift, ArmReg Rm_index) { CompileInterpretInstruction(); } -void JitX64::CMN_rsr(Cond cond, ArmReg Rn_index, ArmReg Rs_index, ShiftType shift, ArmReg Rm_index) { CompileInterpretInstruction(); } -void JitX64::CMP_rsr(Cond cond, ArmReg Rn_index, ArmReg Rs_index, ShiftType shift, ArmReg Rm_index) { CompileInterpretInstruction(); } -void JitX64::EOR_rsr(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, ArmReg Rs_index, ShiftType shift, ArmReg Rm_index) { CompileInterpretInstruction(); } -void JitX64::MOV_rsr(Cond cond, bool S, ArmReg Rd_index, ArmReg Rs_index, ShiftType shift, ArmReg Rm_index) { CompileInterpretInstruction(); } -void JitX64::MVN_rsr(Cond cond, bool S, ArmReg Rd_index, ArmReg Rs_index, ShiftType shift, ArmReg Rm_index) { CompileInterpretInstruction(); } -void JitX64::ORR_rsr(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, ArmReg Rs_index, ShiftType shift, ArmReg Rm_index) { CompileInterpretInstruction(); } -void JitX64::RSB_rsr(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, ArmReg Rs_index, ShiftType shift, ArmReg Rm_index) { CompileInterpretInstruction(); } -void JitX64::RSC_rsr(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, ArmReg Rs_index, ShiftType shift, ArmReg Rm_index) { CompileInterpretInstruction(); } -void JitX64::SBC_rsr(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, ArmReg Rs_index, ShiftType shift, ArmReg Rm_index) { CompileInterpretInstruction(); } -void JitX64::SUB_rsr(Cond cond, bool S, ArmReg Rn_index, ArmReg Rd_index, ArmReg Rs_index, ShiftType shift, ArmReg Rm_index) { CompileInterpretInstruction(); } -void JitX64::TEQ_rsr(Cond cond, ArmReg Rn_index, ArmReg Rs_index, ShiftType shift, ArmReg Rm_index) { CompileInterpretInstruction(); } -void JitX64::TST_rsr(Cond cond, ArmReg Rn_index, ArmReg Rs_index, ShiftType shift, ArmReg Rm_index) { CompileInterpretInstruction(); } +void JitX64::TST_rsr(Cond cond, ArmReg Rn_index, ArmReg Rs_index, ShiftType shift, ArmReg Rm_index) { + cond_manager.CompileCond((ConditionCode)cond); + Gen::X64Reg tmp = CompileDataProcessingHelper_rsr(Rs_index, shift, Rm_index, true); + + if (Rn_index == 15) { + code->TEST(32, R(tmp), Imm32(GetReg15Value())); + } else { + OpArg Rn = reg_alloc.LockArmForRead(Rn_index); + code->TEST(32, R(tmp), Rn); + reg_alloc.UnlockArm(Rn_index); + } + + reg_alloc.UnlockTemp(tmp); + + UpdateFlagsZN(); + + current.arm_pc += GetInstSize(); } + +} // namespace JitX64 diff --git a/src/core/arm/jit_x64/jit_x64.h b/src/core/arm/jit_x64/jit_x64.h index 914fb5d63..086e55fb2 100644 --- a/src/core/arm/jit_x64/jit_x64.h +++ b/src/core/arm/jit_x64/jit_x64.h @@ -161,7 +161,7 @@ private: void CompileDataProcessingHelper(ArmReg Rn_index, ArmReg Rd_index, std::function body); void CompileDataProcessingHelper_Reverse(ArmReg Rn_index, ArmReg Rd_index, std::function body); Gen::X64Reg CompileDataProcessingHelper_reg(ArmImm5 imm5, ShiftType shift, ArmReg Rm, bool do_shifter_carry_out); - Gen::X64Reg CompileDataProcessingHelper_rsr(ArmReg Rs, ShiftType shift, ArmReg Rm); + Gen::X64Reg CompileDataProcessingHelper_rsr(ArmReg Rs, ShiftType shift, ArmReg Rm, bool do_shifter_carry_out); void ADC_imm(Cond cond, bool S, ArmReg Rn, ArmReg Rd, int rotate, ArmImm8 imm8) override; void ADC_reg(Cond cond, bool S, ArmReg Rn, ArmReg Rd, ArmImm5 imm5, ShiftType shift, ArmReg Rm) override; void ADC_rsr(Cond cond, bool S, ArmReg Rn, ArmReg Rd, ArmReg Rs, ShiftType shift, ArmReg Rm) override; diff --git a/src/tests/core/arm/jit_x64/fuzz_arm_data_processing.cpp b/src/tests/core/arm/jit_x64/fuzz_arm_data_processing.cpp index 6e993b11b..2e493ea73 100644 --- a/src/tests/core/arm/jit_x64/fuzz_arm_data_processing.cpp +++ b/src/tests/core/arm/jit_x64/fuzz_arm_data_processing.cpp @@ -87,7 +87,7 @@ TEST_CASE("Fuzz ARM data processing instructions", "[JitX64]") { } SECTION("long blocks") { - FuzzJit(1024, 1025, 50, instruction_select_without_R15); + FuzzJit(1024, 1025, 200, instruction_select_without_R15); } auto instruction_select_only_R15 = [&]() -> u32 {