mirror of
https://github.com/yuzu-emu/yuzu.git
synced 2024-12-26 03:10:04 +00:00
Merge branch 'master' into tlds
This commit is contained in:
commit
c8f9bbbf85
@ -35,12 +35,12 @@ Filter::Filter(double a0, double a1, double a2, double b0, double b1, double b2)
|
||||
: a1(a1 / a0), a2(a2 / a0), b0(b0 / a0), b1(b1 / a0), b2(b2 / a0) {}
|
||||
|
||||
void Filter::Process(std::vector<s16>& signal) {
|
||||
const size_t num_frames = signal.size() / 2;
|
||||
for (size_t i = 0; i < num_frames; i++) {
|
||||
const std::size_t num_frames = signal.size() / 2;
|
||||
for (std::size_t i = 0; i < num_frames; i++) {
|
||||
std::rotate(in.begin(), in.end() - 1, in.end());
|
||||
std::rotate(out.begin(), out.end() - 1, out.end());
|
||||
|
||||
for (size_t ch = 0; ch < channel_count; ch++) {
|
||||
for (std::size_t ch = 0; ch < channel_count; ch++) {
|
||||
in[0][ch] = signal[i * channel_count + ch];
|
||||
|
||||
out[0][ch] = b0 * in[0][ch] + b1 * in[1][ch] + b2 * in[2][ch] - a1 * out[1][ch] -
|
||||
@ -54,14 +54,14 @@ void Filter::Process(std::vector<s16>& signal) {
|
||||
/// Calculates the appropriate Q for each biquad in a cascading filter.
|
||||
/// @param total_count The total number of biquads to be cascaded.
|
||||
/// @param index 0-index of the biquad to calculate the Q value for.
|
||||
static double CascadingBiquadQ(size_t total_count, size_t index) {
|
||||
static double CascadingBiquadQ(std::size_t total_count, std::size_t index) {
|
||||
const double pole = M_PI * (2 * index + 1) / (4.0 * total_count);
|
||||
return 1.0 / (2.0 * std::cos(pole));
|
||||
}
|
||||
|
||||
CascadingFilter CascadingFilter::LowPass(double cutoff, size_t cascade_size) {
|
||||
CascadingFilter CascadingFilter::LowPass(double cutoff, std::size_t cascade_size) {
|
||||
std::vector<Filter> cascade(cascade_size);
|
||||
for (size_t i = 0; i < cascade_size; i++) {
|
||||
for (std::size_t i = 0; i < cascade_size; i++) {
|
||||
cascade[i] = Filter::LowPass(cutoff, CascadingBiquadQ(cascade_size, i));
|
||||
}
|
||||
return CascadingFilter{std::move(cascade)};
|
||||
|
@ -30,7 +30,7 @@ public:
|
||||
void Process(std::vector<s16>& signal);
|
||||
|
||||
private:
|
||||
static constexpr size_t channel_count = 2;
|
||||
static constexpr std::size_t channel_count = 2;
|
||||
|
||||
/// Coefficients are in normalized form (a0 = 1.0).
|
||||
double a1, a2, b0, b1, b2;
|
||||
@ -46,7 +46,7 @@ public:
|
||||
/// Creates a cascading low-pass filter.
|
||||
/// @param cutoff Determines the cutoff frequency. A value from 0.0 to 1.0.
|
||||
/// @param cascade_size Number of biquads in cascade.
|
||||
static CascadingFilter LowPass(double cutoff, size_t cascade_size);
|
||||
static CascadingFilter LowPass(double cutoff, std::size_t cascade_size);
|
||||
|
||||
/// Passthrough.
|
||||
CascadingFilter();
|
||||
|
@ -14,7 +14,7 @@
|
||||
namespace AudioCore {
|
||||
|
||||
/// The Lanczos kernel
|
||||
static double Lanczos(size_t a, double x) {
|
||||
static double Lanczos(std::size_t a, double x) {
|
||||
if (x == 0.0)
|
||||
return 1.0;
|
||||
const double px = M_PI * x;
|
||||
@ -37,15 +37,15 @@ std::vector<s16> Interpolate(InterpolationState& state, std::vector<s16> input,
|
||||
}
|
||||
state.nyquist.Process(input);
|
||||
|
||||
constexpr size_t taps = InterpolationState::lanczos_taps;
|
||||
const size_t num_frames = input.size() / 2;
|
||||
constexpr std::size_t taps = InterpolationState::lanczos_taps;
|
||||
const std::size_t num_frames = input.size() / 2;
|
||||
|
||||
std::vector<s16> output;
|
||||
output.reserve(static_cast<size_t>(input.size() / ratio + 4));
|
||||
output.reserve(static_cast<std::size_t>(input.size() / ratio + 4));
|
||||
|
||||
double& pos = state.position;
|
||||
auto& h = state.history;
|
||||
for (size_t i = 0; i < num_frames; ++i) {
|
||||
for (std::size_t i = 0; i < num_frames; ++i) {
|
||||
std::rotate(h.begin(), h.end() - 1, h.end());
|
||||
h[0][0] = input[i * 2 + 0];
|
||||
h[0][1] = input[i * 2 + 1];
|
||||
@ -53,7 +53,7 @@ std::vector<s16> Interpolate(InterpolationState& state, std::vector<s16> input,
|
||||
while (pos <= 1.0) {
|
||||
double l = 0.0;
|
||||
double r = 0.0;
|
||||
for (size_t j = 0; j < h.size(); j++) {
|
||||
for (std::size_t j = 0; j < h.size(); j++) {
|
||||
l += Lanczos(taps, pos + j - taps + 1) * h[j][0];
|
||||
r += Lanczos(taps, pos + j - taps + 1) * h[j][1];
|
||||
}
|
||||
|
@ -12,8 +12,8 @@
|
||||
namespace AudioCore {
|
||||
|
||||
struct InterpolationState {
|
||||
static constexpr size_t lanczos_taps = 4;
|
||||
static constexpr size_t history_size = lanczos_taps * 2 - 1;
|
||||
static constexpr std::size_t lanczos_taps = 4;
|
||||
static constexpr std::size_t history_size = lanczos_taps * 2 - 1;
|
||||
|
||||
double current_ratio = 0.0;
|
||||
CascadingFilter nyquist;
|
||||
|
@ -39,7 +39,8 @@ StreamPtr AudioOut::OpenStream(u32 sample_rate, u32 num_channels, std::string&&
|
||||
sink->AcquireSinkStream(sample_rate, num_channels, name), std::move(name));
|
||||
}
|
||||
|
||||
std::vector<Buffer::Tag> AudioOut::GetTagsAndReleaseBuffers(StreamPtr stream, size_t max_count) {
|
||||
std::vector<Buffer::Tag> AudioOut::GetTagsAndReleaseBuffers(StreamPtr stream,
|
||||
std::size_t max_count) {
|
||||
return stream->GetTagsAndReleaseBuffers(max_count);
|
||||
}
|
||||
|
||||
|
@ -25,7 +25,7 @@ public:
|
||||
Stream::ReleaseCallback&& release_callback);
|
||||
|
||||
/// Returns a vector of recently released buffers specified by tag for the specified stream
|
||||
std::vector<Buffer::Tag> GetTagsAndReleaseBuffers(StreamPtr stream, size_t max_count);
|
||||
std::vector<Buffer::Tag> GetTagsAndReleaseBuffers(StreamPtr stream, std::size_t max_count);
|
||||
|
||||
/// Starts an audio stream for playback
|
||||
void StartStream(StreamPtr stream);
|
||||
|
@ -3,9 +3,12 @@
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "audio_core/algorithm/interpolate.h"
|
||||
#include "audio_core/audio_out.h"
|
||||
#include "audio_core/audio_renderer.h"
|
||||
#include "audio_core/codec.h"
|
||||
#include "common/assert.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "core/hle/kernel/event.h"
|
||||
#include "core/memory.h"
|
||||
|
||||
namespace AudioCore {
|
||||
@ -13,6 +16,41 @@ namespace AudioCore {
|
||||
constexpr u32 STREAM_SAMPLE_RATE{48000};
|
||||
constexpr u32 STREAM_NUM_CHANNELS{2};
|
||||
|
||||
class AudioRenderer::VoiceState {
|
||||
public:
|
||||
bool IsPlaying() const {
|
||||
return is_in_use && info.play_state == PlayState::Started;
|
||||
}
|
||||
|
||||
const VoiceOutStatus& GetOutStatus() const {
|
||||
return out_status;
|
||||
}
|
||||
|
||||
const VoiceInfo& GetInfo() const {
|
||||
return info;
|
||||
}
|
||||
|
||||
VoiceInfo& Info() {
|
||||
return info;
|
||||
}
|
||||
|
||||
void SetWaveIndex(std::size_t index);
|
||||
std::vector<s16> DequeueSamples(std::size_t sample_count);
|
||||
void UpdateState();
|
||||
void RefreshBuffer();
|
||||
|
||||
private:
|
||||
bool is_in_use{};
|
||||
bool is_refresh_pending{};
|
||||
std::size_t wave_index{};
|
||||
std::size_t offset{};
|
||||
Codec::ADPCMState adpcm_state{};
|
||||
InterpolationState interp_state{};
|
||||
std::vector<s16> samples;
|
||||
VoiceOutStatus out_status{};
|
||||
VoiceInfo info{};
|
||||
};
|
||||
|
||||
AudioRenderer::AudioRenderer(AudioRendererParameter params,
|
||||
Kernel::SharedPtr<Kernel::Event> buffer_event)
|
||||
: worker_params{params}, buffer_event{buffer_event}, voices(params.voice_count) {
|
||||
@ -27,6 +65,8 @@ AudioRenderer::AudioRenderer(AudioRendererParameter params,
|
||||
QueueMixedBuffer(2);
|
||||
}
|
||||
|
||||
AudioRenderer::~AudioRenderer() = default;
|
||||
|
||||
u32 AudioRenderer::GetSampleRate() const {
|
||||
return worker_params.sample_rate;
|
||||
}
|
||||
@ -52,8 +92,8 @@ std::vector<u8> AudioRenderer::UpdateAudioRenderer(const std::vector<u8>& input_
|
||||
memory_pool_count * sizeof(MemoryPoolInfo));
|
||||
|
||||
// Copy VoiceInfo structs
|
||||
size_t offset{sizeof(UpdateDataHeader) + config.behavior_size + config.memory_pools_size +
|
||||
config.voice_resource_size};
|
||||
std::size_t offset{sizeof(UpdateDataHeader) + config.behavior_size + config.memory_pools_size +
|
||||
config.voice_resource_size};
|
||||
for (auto& voice : voices) {
|
||||
std::memcpy(&voice.Info(), input_params.data() + offset, sizeof(VoiceInfo));
|
||||
offset += sizeof(VoiceInfo);
|
||||
@ -72,7 +112,7 @@ std::vector<u8> AudioRenderer::UpdateAudioRenderer(const std::vector<u8>& input_
|
||||
|
||||
// Update memory pool state
|
||||
std::vector<MemoryPoolEntry> memory_pool(memory_pool_count);
|
||||
for (size_t index = 0; index < memory_pool.size(); ++index) {
|
||||
for (std::size_t index = 0; index < memory_pool.size(); ++index) {
|
||||
if (mem_pool_info[index].pool_state == MemoryPoolStates::RequestAttach) {
|
||||
memory_pool[index].state = MemoryPoolStates::Attached;
|
||||
} else if (mem_pool_info[index].pool_state == MemoryPoolStates::RequestDetach) {
|
||||
@ -93,7 +133,7 @@ std::vector<u8> AudioRenderer::UpdateAudioRenderer(const std::vector<u8>& input_
|
||||
response_data.memory_pools_size);
|
||||
|
||||
// Copy output voice status
|
||||
size_t voice_out_status_offset{sizeof(UpdateDataHeader) + response_data.memory_pools_size};
|
||||
std::size_t voice_out_status_offset{sizeof(UpdateDataHeader) + response_data.memory_pools_size};
|
||||
for (const auto& voice : voices) {
|
||||
std::memcpy(output_params.data() + voice_out_status_offset, &voice.GetOutStatus(),
|
||||
sizeof(VoiceOutStatus));
|
||||
@ -103,12 +143,12 @@ std::vector<u8> AudioRenderer::UpdateAudioRenderer(const std::vector<u8>& input_
|
||||
return output_params;
|
||||
}
|
||||
|
||||
void AudioRenderer::VoiceState::SetWaveIndex(size_t index) {
|
||||
void AudioRenderer::VoiceState::SetWaveIndex(std::size_t index) {
|
||||
wave_index = index & 3;
|
||||
is_refresh_pending = true;
|
||||
}
|
||||
|
||||
std::vector<s16> AudioRenderer::VoiceState::DequeueSamples(size_t sample_count) {
|
||||
std::vector<s16> AudioRenderer::VoiceState::DequeueSamples(std::size_t sample_count) {
|
||||
if (!IsPlaying()) {
|
||||
return {};
|
||||
}
|
||||
@ -117,9 +157,9 @@ std::vector<s16> AudioRenderer::VoiceState::DequeueSamples(size_t sample_count)
|
||||
RefreshBuffer();
|
||||
}
|
||||
|
||||
const size_t max_size{samples.size() - offset};
|
||||
const size_t dequeue_offset{offset};
|
||||
size_t size{sample_count * STREAM_NUM_CHANNELS};
|
||||
const std::size_t max_size{samples.size() - offset};
|
||||
const std::size_t dequeue_offset{offset};
|
||||
std::size_t size{sample_count * STREAM_NUM_CHANNELS};
|
||||
if (size > max_size) {
|
||||
size = max_size;
|
||||
}
|
||||
@ -184,7 +224,7 @@ void AudioRenderer::VoiceState::RefreshBuffer() {
|
||||
case 1:
|
||||
// 1 channel is upsampled to 2 channel
|
||||
samples.resize(new_samples.size() * 2);
|
||||
for (size_t index = 0; index < new_samples.size(); ++index) {
|
||||
for (std::size_t index = 0; index < new_samples.size(); ++index) {
|
||||
samples[index * 2] = new_samples[index];
|
||||
samples[index * 2 + 1] = new_samples[index];
|
||||
}
|
||||
@ -210,7 +250,7 @@ static constexpr s16 ClampToS16(s32 value) {
|
||||
}
|
||||
|
||||
void AudioRenderer::QueueMixedBuffer(Buffer::Tag tag) {
|
||||
constexpr size_t BUFFER_SIZE{512};
|
||||
constexpr std::size_t BUFFER_SIZE{512};
|
||||
std::vector<s16> buffer(BUFFER_SIZE * stream->GetNumChannels());
|
||||
|
||||
for (auto& voice : voices) {
|
||||
@ -218,7 +258,7 @@ void AudioRenderer::QueueMixedBuffer(Buffer::Tag tag) {
|
||||
continue;
|
||||
}
|
||||
|
||||
size_t offset{};
|
||||
std::size_t offset{};
|
||||
s64 samples_remaining{BUFFER_SIZE};
|
||||
while (samples_remaining > 0) {
|
||||
const std::vector<s16> samples{voice.DequeueSamples(samples_remaining)};
|
||||
|
@ -8,16 +8,20 @@
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
|
||||
#include "audio_core/algorithm/interpolate.h"
|
||||
#include "audio_core/audio_out.h"
|
||||
#include "audio_core/codec.h"
|
||||
#include "audio_core/stream.h"
|
||||
#include "common/common_funcs.h"
|
||||
#include "common/common_types.h"
|
||||
#include "common/swap.h"
|
||||
#include "core/hle/kernel/event.h"
|
||||
#include "core/hle/kernel/object.h"
|
||||
|
||||
namespace Kernel {
|
||||
class Event;
|
||||
}
|
||||
|
||||
namespace AudioCore {
|
||||
|
||||
class AudioOut;
|
||||
|
||||
enum class PlayState : u8 {
|
||||
Started = 0,
|
||||
Stopped = 1,
|
||||
@ -158,6 +162,8 @@ static_assert(sizeof(UpdateDataHeader) == 0x40, "UpdateDataHeader has wrong size
|
||||
class AudioRenderer {
|
||||
public:
|
||||
AudioRenderer(AudioRendererParameter params, Kernel::SharedPtr<Kernel::Event> buffer_event);
|
||||
~AudioRenderer();
|
||||
|
||||
std::vector<u8> UpdateAudioRenderer(const std::vector<u8>& input_params);
|
||||
void QueueMixedBuffer(Buffer::Tag tag);
|
||||
void ReleaseAndQueueBuffers();
|
||||
@ -166,45 +172,12 @@ public:
|
||||
u32 GetMixBufferCount() const;
|
||||
|
||||
private:
|
||||
class VoiceState {
|
||||
public:
|
||||
bool IsPlaying() const {
|
||||
return is_in_use && info.play_state == PlayState::Started;
|
||||
}
|
||||
|
||||
const VoiceOutStatus& GetOutStatus() const {
|
||||
return out_status;
|
||||
}
|
||||
|
||||
const VoiceInfo& GetInfo() const {
|
||||
return info;
|
||||
}
|
||||
|
||||
VoiceInfo& Info() {
|
||||
return info;
|
||||
}
|
||||
|
||||
void SetWaveIndex(size_t index);
|
||||
std::vector<s16> DequeueSamples(size_t sample_count);
|
||||
void UpdateState();
|
||||
void RefreshBuffer();
|
||||
|
||||
private:
|
||||
bool is_in_use{};
|
||||
bool is_refresh_pending{};
|
||||
size_t wave_index{};
|
||||
size_t offset{};
|
||||
Codec::ADPCMState adpcm_state{};
|
||||
InterpolationState interp_state{};
|
||||
std::vector<s16> samples;
|
||||
VoiceOutStatus out_status{};
|
||||
VoiceInfo info{};
|
||||
};
|
||||
class VoiceState;
|
||||
|
||||
AudioRendererParameter worker_params;
|
||||
Kernel::SharedPtr<Kernel::Event> buffer_event;
|
||||
std::vector<VoiceState> voices;
|
||||
std::unique_ptr<AudioCore::AudioOut> audio_out;
|
||||
std::unique_ptr<AudioOut> audio_out;
|
||||
AudioCore::StreamPtr stream;
|
||||
};
|
||||
|
||||
|
@ -8,27 +8,27 @@
|
||||
|
||||
namespace AudioCore::Codec {
|
||||
|
||||
std::vector<s16> DecodeADPCM(const u8* const data, size_t size, const ADPCM_Coeff& coeff,
|
||||
std::vector<s16> DecodeADPCM(const u8* const data, std::size_t size, const ADPCM_Coeff& coeff,
|
||||
ADPCMState& state) {
|
||||
// GC-ADPCM with scale factor and variable coefficients.
|
||||
// Frames are 8 bytes long containing 14 samples each.
|
||||
// Samples are 4 bits (one nibble) long.
|
||||
|
||||
constexpr size_t FRAME_LEN = 8;
|
||||
constexpr size_t SAMPLES_PER_FRAME = 14;
|
||||
constexpr std::size_t FRAME_LEN = 8;
|
||||
constexpr std::size_t SAMPLES_PER_FRAME = 14;
|
||||
constexpr std::array<int, 16> SIGNED_NIBBLES = {
|
||||
{0, 1, 2, 3, 4, 5, 6, 7, -8, -7, -6, -5, -4, -3, -2, -1}};
|
||||
|
||||
const size_t sample_count = (size / FRAME_LEN) * SAMPLES_PER_FRAME;
|
||||
const size_t ret_size =
|
||||
const std::size_t sample_count = (size / FRAME_LEN) * SAMPLES_PER_FRAME;
|
||||
const std::size_t ret_size =
|
||||
sample_count % 2 == 0 ? sample_count : sample_count + 1; // Ensure multiple of two.
|
||||
std::vector<s16> ret(ret_size);
|
||||
|
||||
int yn1 = state.yn1, yn2 = state.yn2;
|
||||
|
||||
const size_t NUM_FRAMES =
|
||||
const std::size_t NUM_FRAMES =
|
||||
(sample_count + (SAMPLES_PER_FRAME - 1)) / SAMPLES_PER_FRAME; // Round up.
|
||||
for (size_t framei = 0; framei < NUM_FRAMES; framei++) {
|
||||
for (std::size_t framei = 0; framei < NUM_FRAMES; framei++) {
|
||||
const int frame_header = data[framei * FRAME_LEN];
|
||||
const int scale = 1 << (frame_header & 0xF);
|
||||
const int idx = (frame_header >> 4) & 0x7;
|
||||
@ -53,9 +53,9 @@ std::vector<s16> DecodeADPCM(const u8* const data, size_t size, const ADPCM_Coef
|
||||
return static_cast<s16>(val);
|
||||
};
|
||||
|
||||
size_t outputi = framei * SAMPLES_PER_FRAME;
|
||||
size_t datai = framei * FRAME_LEN + 1;
|
||||
for (size_t i = 0; i < SAMPLES_PER_FRAME && outputi < sample_count; i += 2) {
|
||||
std::size_t outputi = framei * SAMPLES_PER_FRAME;
|
||||
std::size_t datai = framei * FRAME_LEN + 1;
|
||||
for (std::size_t i = 0; i < SAMPLES_PER_FRAME && outputi < sample_count; i += 2) {
|
||||
const s16 sample1 = decode_sample(SIGNED_NIBBLES[data[datai] >> 4]);
|
||||
ret[outputi] = sample1;
|
||||
outputi++;
|
||||
|
@ -38,7 +38,7 @@ using ADPCM_Coeff = std::array<s16, 16>;
|
||||
* @param state ADPCM state, this is updated with new state
|
||||
* @return Decoded stereo signed PCM16 data, sample_count in length
|
||||
*/
|
||||
std::vector<s16> DecodeADPCM(const u8* const data, size_t size, const ADPCM_Coeff& coeff,
|
||||
std::vector<s16> DecodeADPCM(const u8* const data, std::size_t size, const ADPCM_Coeff& coeff,
|
||||
ADPCMState& state);
|
||||
|
||||
}; // namespace AudioCore::Codec
|
||||
|
@ -63,8 +63,8 @@ public:
|
||||
// Downsample 6 channels to 2
|
||||
std::vector<s16> buf;
|
||||
buf.reserve(samples.size() * num_channels / source_num_channels);
|
||||
for (size_t i = 0; i < samples.size(); i += source_num_channels) {
|
||||
for (size_t ch = 0; ch < num_channels; ch++) {
|
||||
for (std::size_t i = 0; i < samples.size(); i += source_num_channels) {
|
||||
for (std::size_t ch = 0; ch < num_channels; ch++) {
|
||||
buf.push_back(samples[i + ch]);
|
||||
}
|
||||
}
|
||||
@ -75,7 +75,7 @@ public:
|
||||
queue.Push(samples);
|
||||
}
|
||||
|
||||
size_t SamplesInQueue(u32 num_channels) const override {
|
||||
std::size_t SamplesInQueue(u32 num_channels) const override {
|
||||
if (!ctx)
|
||||
return 0;
|
||||
|
||||
@ -119,10 +119,10 @@ CubebSink::CubebSink(std::string target_device_name) {
|
||||
LOG_WARNING(Audio_Sink, "Audio output device enumeration not supported");
|
||||
} else {
|
||||
const auto collection_end{collection.device + collection.count};
|
||||
const auto device{std::find_if(collection.device, collection_end,
|
||||
[&](const cubeb_device_info& device) {
|
||||
return target_device_name == device.friendly_name;
|
||||
})};
|
||||
const auto device{
|
||||
std::find_if(collection.device, collection_end, [&](const cubeb_device_info& info) {
|
||||
return target_device_name == info.friendly_name;
|
||||
})};
|
||||
if (device != collection_end) {
|
||||
output_device = device->devid;
|
||||
}
|
||||
@ -159,15 +159,16 @@ long CubebSinkStream::DataCallback(cubeb_stream* stream, void* user_data, const
|
||||
return {};
|
||||
}
|
||||
|
||||
const size_t num_channels = impl->GetNumChannels();
|
||||
const size_t samples_to_write = num_channels * num_frames;
|
||||
size_t samples_written;
|
||||
const std::size_t num_channels = impl->GetNumChannels();
|
||||
const std::size_t samples_to_write = num_channels * num_frames;
|
||||
std::size_t samples_written;
|
||||
|
||||
if (Settings::values.enable_audio_stretching) {
|
||||
const std::vector<s16> in{impl->queue.Pop()};
|
||||
const size_t num_in{in.size() / num_channels};
|
||||
const std::size_t num_in{in.size() / num_channels};
|
||||
s16* const out{reinterpret_cast<s16*>(buffer)};
|
||||
const size_t out_frames = impl->time_stretch.Process(in.data(), num_in, out, num_frames);
|
||||
const std::size_t out_frames =
|
||||
impl->time_stretch.Process(in.data(), num_in, out, num_frames);
|
||||
samples_written = out_frames * num_channels;
|
||||
|
||||
if (impl->should_flush) {
|
||||
@ -184,7 +185,7 @@ long CubebSinkStream::DataCallback(cubeb_stream* stream, void* user_data, const
|
||||
}
|
||||
|
||||
// Fill the rest of the frames with last_frame
|
||||
for (size_t i = samples_written; i < samples_to_write; i += num_channels) {
|
||||
for (std::size_t i = samples_written; i < samples_to_write; i += num_channels) {
|
||||
std::memcpy(buffer + i * sizeof(s16), &impl->last_frame[0], num_channels * sizeof(s16));
|
||||
}
|
||||
|
||||
@ -197,7 +198,7 @@ std::vector<std::string> ListCubebSinkDevices() {
|
||||
std::vector<std::string> device_list;
|
||||
cubeb* ctx;
|
||||
|
||||
if (cubeb_init(&ctx, "Citra Device Enumerator", nullptr) != CUBEB_OK) {
|
||||
if (cubeb_init(&ctx, "yuzu Device Enumerator", nullptr) != CUBEB_OK) {
|
||||
LOG_CRITICAL(Audio_Sink, "cubeb_init failed");
|
||||
return {};
|
||||
}
|
||||
@ -206,7 +207,7 @@ std::vector<std::string> ListCubebSinkDevices() {
|
||||
if (cubeb_enumerate_devices(ctx, CUBEB_DEVICE_TYPE_OUTPUT, &collection) != CUBEB_OK) {
|
||||
LOG_WARNING(Audio_Sink, "Audio output device enumeration not supported");
|
||||
} else {
|
||||
for (size_t i = 0; i < collection.count; i++) {
|
||||
for (std::size_t i = 0; i < collection.count; i++) {
|
||||
const cubeb_device_info& device = collection.device[i];
|
||||
if (device.friendly_name) {
|
||||
device_list.emplace_back(device.friendly_name);
|
||||
|
@ -22,7 +22,7 @@ private:
|
||||
struct NullSinkStreamImpl final : SinkStream {
|
||||
void EnqueueSamples(u32 /*num_channels*/, const std::vector<s16>& /*samples*/) override {}
|
||||
|
||||
size_t SamplesInQueue(u32 /*num_channels*/) const override {
|
||||
std::size_t SamplesInQueue(u32 /*num_channels*/) const override {
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -7,6 +7,7 @@
|
||||
|
||||
#include "audio_core/sink.h"
|
||||
#include "audio_core/sink_details.h"
|
||||
#include "audio_core/sink_stream.h"
|
||||
#include "audio_core/stream.h"
|
||||
#include "common/assert.h"
|
||||
#include "common/logging/log.h"
|
||||
@ -17,7 +18,7 @@
|
||||
|
||||
namespace AudioCore {
|
||||
|
||||
constexpr size_t MaxAudioBufferCount{32};
|
||||
constexpr std::size_t MaxAudioBufferCount{32};
|
||||
|
||||
u32 Stream::GetNumChannels() const {
|
||||
switch (format) {
|
||||
@ -52,7 +53,7 @@ void Stream::Stop() {
|
||||
}
|
||||
|
||||
s64 Stream::GetBufferReleaseCycles(const Buffer& buffer) const {
|
||||
const size_t num_samples{buffer.GetSamples().size() / GetNumChannels()};
|
||||
const std::size_t num_samples{buffer.GetSamples().size() / GetNumChannels()};
|
||||
return CoreTiming::usToCycles((static_cast<u64>(num_samples) * 1000000) / sample_rate);
|
||||
}
|
||||
|
||||
@ -122,9 +123,9 @@ bool Stream::ContainsBuffer(Buffer::Tag tag) const {
|
||||
return {};
|
||||
}
|
||||
|
||||
std::vector<Buffer::Tag> Stream::GetTagsAndReleaseBuffers(size_t max_count) {
|
||||
std::vector<Buffer::Tag> Stream::GetTagsAndReleaseBuffers(std::size_t max_count) {
|
||||
std::vector<Buffer::Tag> tags;
|
||||
for (size_t count = 0; count < max_count && !released_buffers.empty(); ++count) {
|
||||
for (std::size_t count = 0; count < max_count && !released_buffers.empty(); ++count) {
|
||||
tags.push_back(released_buffers.front()->GetTag());
|
||||
released_buffers.pop();
|
||||
}
|
||||
|
@ -11,13 +11,16 @@
|
||||
#include <queue>
|
||||
|
||||
#include "audio_core/buffer.h"
|
||||
#include "audio_core/sink_stream.h"
|
||||
#include "common/assert.h"
|
||||
#include "common/common_types.h"
|
||||
#include "core/core_timing.h"
|
||||
|
||||
namespace CoreTiming {
|
||||
struct EventType;
|
||||
}
|
||||
|
||||
namespace AudioCore {
|
||||
|
||||
class SinkStream;
|
||||
|
||||
/**
|
||||
* Represents an audio stream, which is a sequence of queued buffers, to be outputed by AudioOut
|
||||
*/
|
||||
@ -49,7 +52,7 @@ public:
|
||||
bool ContainsBuffer(Buffer::Tag tag) const;
|
||||
|
||||
/// Returns a vector of recently released buffers specified by tag
|
||||
std::vector<Buffer::Tag> GetTagsAndReleaseBuffers(size_t max_count);
|
||||
std::vector<Buffer::Tag> GetTagsAndReleaseBuffers(std::size_t max_count);
|
||||
|
||||
/// Returns true if the stream is currently playing
|
||||
bool IsPlaying() const {
|
||||
@ -57,7 +60,7 @@ public:
|
||||
}
|
||||
|
||||
/// Returns the number of queued buffers
|
||||
size_t GetQueueSize() const {
|
||||
std::size_t GetQueueSize() const {
|
||||
return queued_buffers.size();
|
||||
}
|
||||
|
||||
|
@ -26,7 +26,8 @@ void TimeStretcher::Flush() {
|
||||
m_sound_touch.flush();
|
||||
}
|
||||
|
||||
size_t TimeStretcher::Process(const s16* in, size_t num_in, s16* out, size_t num_out) {
|
||||
std::size_t TimeStretcher::Process(const s16* in, std::size_t num_in, s16* out,
|
||||
std::size_t num_out) {
|
||||
const double time_delta = static_cast<double>(num_out) / m_sample_rate; // seconds
|
||||
|
||||
// We were given actual_samples number of samples, and num_samples were requested from us.
|
||||
@ -61,8 +62,8 @@ size_t TimeStretcher::Process(const s16* in, size_t num_in, s16* out, size_t num
|
||||
LOG_DEBUG(Audio, "{:5}/{:5} ratio:{:0.6f} backlog:{:0.6f}", num_in, num_out, m_stretch_ratio,
|
||||
backlog_fullness);
|
||||
|
||||
m_sound_touch.putSamples(in, num_in);
|
||||
return m_sound_touch.receiveSamples(out, num_out);
|
||||
m_sound_touch.putSamples(in, static_cast<u32>(num_in));
|
||||
return m_sound_touch.receiveSamples(out, static_cast<u32>(num_out));
|
||||
}
|
||||
|
||||
} // namespace AudioCore
|
||||
|
@ -4,7 +4,6 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
#include <cstddef>
|
||||
#include <SoundTouch.h>
|
||||
#include "common/common_types.h"
|
||||
@ -20,7 +19,7 @@ public:
|
||||
/// @param out Output sample buffer
|
||||
/// @param num_out Desired number of output frames in `out`
|
||||
/// @returns Actual number of frames written to `out`
|
||||
size_t Process(const s16* in, size_t num_in, s16* out, size_t num_out);
|
||||
std::size_t Process(const s16* in, std::size_t num_in, s16* out, std::size_t num_out);
|
||||
|
||||
void Clear();
|
||||
|
||||
|
@ -8,13 +8,13 @@
|
||||
namespace Common {
|
||||
|
||||
template <typename T>
|
||||
constexpr T AlignUp(T value, size_t size) {
|
||||
constexpr T AlignUp(T value, std::size_t size) {
|
||||
static_assert(std::is_unsigned_v<T>, "T must be an unsigned value.");
|
||||
return static_cast<T>(value + (size - value % size) % size);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
constexpr T AlignDown(T value, size_t size) {
|
||||
constexpr T AlignDown(T value, std::size_t size) {
|
||||
static_assert(std::is_unsigned_v<T>, "T must be an unsigned value.");
|
||||
return static_cast<T>(value - value % size);
|
||||
}
|
||||
|
@ -129,8 +129,8 @@ private:
|
||||
|
||||
public:
|
||||
/// Constants to allow limited introspection of fields if needed
|
||||
static constexpr size_t position = Position;
|
||||
static constexpr size_t bits = Bits;
|
||||
static constexpr std::size_t position = Position;
|
||||
static constexpr std::size_t bits = Bits;
|
||||
static constexpr StorageType mask = (((StorageTypeU)~0) >> (8 * sizeof(T) - bits)) << position;
|
||||
|
||||
/**
|
||||
|
@ -170,14 +170,14 @@ public:
|
||||
m_val |= (IntTy)1 << bit;
|
||||
}
|
||||
|
||||
static BitSet AllTrue(size_t count) {
|
||||
static BitSet AllTrue(std::size_t count) {
|
||||
return BitSet(count == sizeof(IntTy) * 8 ? ~(IntTy)0 : (((IntTy)1 << count) - 1));
|
||||
}
|
||||
|
||||
Ref operator[](size_t bit) {
|
||||
Ref operator[](std::size_t bit) {
|
||||
return Ref(this, (IntTy)1 << bit);
|
||||
}
|
||||
const Ref operator[](size_t bit) const {
|
||||
const Ref operator[](std::size_t bit) const {
|
||||
return (*const_cast<BitSet*>(this))[bit];
|
||||
}
|
||||
bool operator==(BitSet other) const {
|
||||
|
@ -114,7 +114,7 @@ static uint64 HashLen16(uint64 u, uint64 v, uint64 mul) {
|
||||
return b;
|
||||
}
|
||||
|
||||
static uint64 HashLen0to16(const char* s, size_t len) {
|
||||
static uint64 HashLen0to16(const char* s, std::size_t len) {
|
||||
if (len >= 8) {
|
||||
uint64 mul = k2 + len * 2;
|
||||
uint64 a = Fetch64(s) + k2;
|
||||
@ -141,7 +141,7 @@ static uint64 HashLen0to16(const char* s, size_t len) {
|
||||
|
||||
// This probably works well for 16-byte strings as well, but it may be overkill
|
||||
// in that case.
|
||||
static uint64 HashLen17to32(const char* s, size_t len) {
|
||||
static uint64 HashLen17to32(const char* s, std::size_t len) {
|
||||
uint64 mul = k2 + len * 2;
|
||||
uint64 a = Fetch64(s) * k1;
|
||||
uint64 b = Fetch64(s + 8);
|
||||
@ -170,7 +170,7 @@ static pair<uint64, uint64> WeakHashLen32WithSeeds(const char* s, uint64 a, uint
|
||||
}
|
||||
|
||||
// Return an 8-byte hash for 33 to 64 bytes.
|
||||
static uint64 HashLen33to64(const char* s, size_t len) {
|
||||
static uint64 HashLen33to64(const char* s, std::size_t len) {
|
||||
uint64 mul = k2 + len * 2;
|
||||
uint64 a = Fetch64(s) * k2;
|
||||
uint64 b = Fetch64(s + 8);
|
||||
@ -191,7 +191,7 @@ static uint64 HashLen33to64(const char* s, size_t len) {
|
||||
return b + x;
|
||||
}
|
||||
|
||||
uint64 CityHash64(const char* s, size_t len) {
|
||||
uint64 CityHash64(const char* s, std::size_t len) {
|
||||
if (len <= 32) {
|
||||
if (len <= 16) {
|
||||
return HashLen0to16(s, len);
|
||||
@ -212,7 +212,7 @@ uint64 CityHash64(const char* s, size_t len) {
|
||||
x = x * k1 + Fetch64(s);
|
||||
|
||||
// Decrease len to the nearest multiple of 64, and operate on 64-byte chunks.
|
||||
len = (len - 1) & ~static_cast<size_t>(63);
|
||||
len = (len - 1) & ~static_cast<std::size_t>(63);
|
||||
do {
|
||||
x = Rotate(x + y + v.first + Fetch64(s + 8), 37) * k1;
|
||||
y = Rotate(y + v.second + Fetch64(s + 48), 42) * k1;
|
||||
@ -229,17 +229,17 @@ uint64 CityHash64(const char* s, size_t len) {
|
||||
HashLen16(v.second, w.second) + x);
|
||||
}
|
||||
|
||||
uint64 CityHash64WithSeed(const char* s, size_t len, uint64 seed) {
|
||||
uint64 CityHash64WithSeed(const char* s, std::size_t len, uint64 seed) {
|
||||
return CityHash64WithSeeds(s, len, k2, seed);
|
||||
}
|
||||
|
||||
uint64 CityHash64WithSeeds(const char* s, size_t len, uint64 seed0, uint64 seed1) {
|
||||
uint64 CityHash64WithSeeds(const char* s, std::size_t len, uint64 seed0, uint64 seed1) {
|
||||
return HashLen16(CityHash64(s, len) - seed0, seed1);
|
||||
}
|
||||
|
||||
// A subroutine for CityHash128(). Returns a decent 128-bit hash for strings
|
||||
// of any length representable in signed long. Based on City and Murmur.
|
||||
static uint128 CityMurmur(const char* s, size_t len, uint128 seed) {
|
||||
static uint128 CityMurmur(const char* s, std::size_t len, uint128 seed) {
|
||||
uint64 a = Uint128Low64(seed);
|
||||
uint64 b = Uint128High64(seed);
|
||||
uint64 c = 0;
|
||||
@ -269,7 +269,7 @@ static uint128 CityMurmur(const char* s, size_t len, uint128 seed) {
|
||||
return uint128(a ^ b, HashLen16(b, a));
|
||||
}
|
||||
|
||||
uint128 CityHash128WithSeed(const char* s, size_t len, uint128 seed) {
|
||||
uint128 CityHash128WithSeed(const char* s, std::size_t len, uint128 seed) {
|
||||
if (len < 128) {
|
||||
return CityMurmur(s, len, seed);
|
||||
}
|
||||
@ -313,7 +313,7 @@ uint128 CityHash128WithSeed(const char* s, size_t len, uint128 seed) {
|
||||
w.first *= 9;
|
||||
v.first *= k0;
|
||||
// If 0 < len < 128, hash up to 4 chunks of 32 bytes each from the end of s.
|
||||
for (size_t tail_done = 0; tail_done < len;) {
|
||||
for (std::size_t tail_done = 0; tail_done < len;) {
|
||||
tail_done += 32;
|
||||
y = Rotate(x + y, 42) * k0 + v.second;
|
||||
w.first += Fetch64(s + len - tail_done + 16);
|
||||
@ -331,7 +331,7 @@ uint128 CityHash128WithSeed(const char* s, size_t len, uint128 seed) {
|
||||
return uint128(HashLen16(x + v.second, w.second) + y, HashLen16(x + w.second, y + v.second));
|
||||
}
|
||||
|
||||
uint128 CityHash128(const char* s, size_t len) {
|
||||
uint128 CityHash128(const char* s, std::size_t len) {
|
||||
return len >= 16
|
||||
? CityHash128WithSeed(s + 16, len - 16, uint128(Fetch64(s), Fetch64(s + 8) + k0))
|
||||
: CityHash128WithSeed(s, len, uint128(k0, k1));
|
||||
|
@ -63,7 +63,7 @@
|
||||
|
||||
#include <utility>
|
||||
#include <stdint.h>
|
||||
#include <stdlib.h> // for size_t.
|
||||
#include <stdlib.h> // for std::size_t.
|
||||
|
||||
namespace Common {
|
||||
|
||||
@ -77,22 +77,22 @@ inline uint64_t Uint128High64(const uint128& x) {
|
||||
}
|
||||
|
||||
// Hash function for a byte array.
|
||||
uint64_t CityHash64(const char* buf, size_t len);
|
||||
uint64_t CityHash64(const char* buf, std::size_t len);
|
||||
|
||||
// Hash function for a byte array. For convenience, a 64-bit seed is also
|
||||
// hashed into the result.
|
||||
uint64_t CityHash64WithSeed(const char* buf, size_t len, uint64_t seed);
|
||||
uint64_t CityHash64WithSeed(const char* buf, std::size_t len, uint64_t seed);
|
||||
|
||||
// Hash function for a byte array. For convenience, two seeds are also
|
||||
// hashed into the result.
|
||||
uint64_t CityHash64WithSeeds(const char* buf, size_t len, uint64_t seed0, uint64_t seed1);
|
||||
uint64_t CityHash64WithSeeds(const char* buf, std::size_t len, uint64_t seed0, uint64_t seed1);
|
||||
|
||||
// Hash function for a byte array.
|
||||
uint128 CityHash128(const char* s, size_t len);
|
||||
uint128 CityHash128(const char* s, std::size_t len);
|
||||
|
||||
// Hash function for a byte array. For convenience, a 128-bit seed is also
|
||||
// hashed into the result.
|
||||
uint128 CityHash128WithSeed(const char* s, size_t len, uint128 seed);
|
||||
uint128 CityHash128WithSeed(const char* s, std::size_t len, uint128 seed);
|
||||
|
||||
// Hash 128 input bits down to 64 bits of output.
|
||||
// This is intended to be a reasonably good hash function.
|
||||
|
@ -76,7 +76,7 @@ namespace FileUtil {
|
||||
// Modifies argument.
|
||||
static void StripTailDirSlashes(std::string& fname) {
|
||||
if (fname.length() > 1) {
|
||||
size_t i = fname.length();
|
||||
std::size_t i = fname.length();
|
||||
while (i > 0 && fname[i - 1] == DIR_SEP_CHR)
|
||||
--i;
|
||||
fname.resize(i);
|
||||
@ -201,7 +201,7 @@ bool CreateFullPath(const std::string& fullPath) {
|
||||
return true;
|
||||
}
|
||||
|
||||
size_t position = 0;
|
||||
std::size_t position = 0;
|
||||
while (true) {
|
||||
// Find next sub path
|
||||
position = fullPath.find(DIR_SEP_CHR, position);
|
||||
@ -299,7 +299,7 @@ bool Copy(const std::string& srcFilename, const std::string& destFilename) {
|
||||
std::array<char, 1024> buffer;
|
||||
while (!feof(input.get())) {
|
||||
// read input
|
||||
size_t rnum = fread(buffer.data(), sizeof(char), buffer.size(), input.get());
|
||||
std::size_t rnum = fread(buffer.data(), sizeof(char), buffer.size(), input.get());
|
||||
if (rnum != buffer.size()) {
|
||||
if (ferror(input.get()) != 0) {
|
||||
LOG_ERROR(Common_Filesystem, "failed reading from source, {} --> {}: {}",
|
||||
@ -309,7 +309,7 @@ bool Copy(const std::string& srcFilename, const std::string& destFilename) {
|
||||
}
|
||||
|
||||
// write output
|
||||
size_t wnum = fwrite(buffer.data(), sizeof(char), rnum, output.get());
|
||||
std::size_t wnum = fwrite(buffer.data(), sizeof(char), rnum, output.get());
|
||||
if (wnum != rnum) {
|
||||
LOG_ERROR(Common_Filesystem, "failed writing to output, {} --> {}: {}", srcFilename,
|
||||
destFilename, GetLastErrorMsg());
|
||||
@ -756,11 +756,11 @@ std::string GetNANDRegistrationDir(bool system) {
|
||||
return GetUserPath(UserPath::NANDDir) + "user/Contents/registered/";
|
||||
}
|
||||
|
||||
size_t WriteStringToFile(bool text_file, const std::string& str, const char* filename) {
|
||||
std::size_t WriteStringToFile(bool text_file, const std::string& str, const char* filename) {
|
||||
return FileUtil::IOFile(filename, text_file ? "w" : "wb").WriteBytes(str.data(), str.size());
|
||||
}
|
||||
|
||||
size_t ReadFileToString(bool text_file, const char* filename, std::string& str) {
|
||||
std::size_t ReadFileToString(bool text_file, const char* filename, std::string& str) {
|
||||
IOFile file(filename, text_file ? "r" : "rb");
|
||||
|
||||
if (!file.IsOpen())
|
||||
@ -829,7 +829,7 @@ std::vector<std::string> SplitPathComponents(std::string_view filename) {
|
||||
std::string_view GetParentPath(std::string_view path) {
|
||||
const auto name_bck_index = path.rfind('\\');
|
||||
const auto name_fwd_index = path.rfind('/');
|
||||
size_t name_index;
|
||||
std::size_t name_index;
|
||||
|
||||
if (name_bck_index == std::string_view::npos || name_fwd_index == std::string_view::npos) {
|
||||
name_index = std::min(name_bck_index, name_fwd_index);
|
||||
@ -868,7 +868,7 @@ std::string_view GetFilename(std::string_view path) {
|
||||
}
|
||||
|
||||
std::string_view GetExtensionFromFilename(std::string_view name) {
|
||||
const size_t index = name.rfind('.');
|
||||
const std::size_t index = name.rfind('.');
|
||||
|
||||
if (index == std::string_view::npos) {
|
||||
return {};
|
||||
|
@ -143,8 +143,9 @@ const std::string& GetExeDirectory();
|
||||
std::string AppDataRoamingDirectory();
|
||||
#endif
|
||||
|
||||
size_t WriteStringToFile(bool text_file, const std::string& str, const char* filename);
|
||||
size_t ReadFileToString(bool text_file, const char* filename, std::string& str);
|
||||
std::size_t WriteStringToFile(bool text_file, const std::string& str, const char* filename);
|
||||
|
||||
std::size_t ReadFileToString(bool text_file, const char* filename, std::string& str);
|
||||
|
||||
/**
|
||||
* Splits the filename into 8.3 format
|
||||
@ -177,10 +178,10 @@ std::string_view RemoveTrailingSlash(std::string_view path);
|
||||
|
||||
// Creates a new vector containing indices [first, last) from the original.
|
||||
template <typename T>
|
||||
std::vector<T> SliceVector(const std::vector<T>& vector, size_t first, size_t last) {
|
||||
std::vector<T> SliceVector(const std::vector<T>& vector, std::size_t first, std::size_t last) {
|
||||
if (first >= last)
|
||||
return {};
|
||||
last = std::min<size_t>(last, vector.size());
|
||||
last = std::min<std::size_t>(last, vector.size());
|
||||
return std::vector<T>(vector.begin() + first, vector.begin() + first + last);
|
||||
}
|
||||
|
||||
@ -213,47 +214,47 @@ public:
|
||||
bool Close();
|
||||
|
||||
template <typename T>
|
||||
size_t ReadArray(T* data, size_t length) const {
|
||||
std::size_t ReadArray(T* data, std::size_t length) const {
|
||||
static_assert(std::is_trivially_copyable_v<T>,
|
||||
"Given array does not consist of trivially copyable objects");
|
||||
|
||||
if (!IsOpen()) {
|
||||
return std::numeric_limits<size_t>::max();
|
||||
return std::numeric_limits<std::size_t>::max();
|
||||
}
|
||||
|
||||
return std::fread(data, sizeof(T), length, m_file);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
size_t WriteArray(const T* data, size_t length) {
|
||||
std::size_t WriteArray(const T* data, std::size_t length) {
|
||||
static_assert(std::is_trivially_copyable_v<T>,
|
||||
"Given array does not consist of trivially copyable objects");
|
||||
if (!IsOpen()) {
|
||||
return std::numeric_limits<size_t>::max();
|
||||
return std::numeric_limits<std::size_t>::max();
|
||||
}
|
||||
|
||||
return std::fwrite(data, sizeof(T), length, m_file);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
size_t ReadBytes(T* data, size_t length) const {
|
||||
std::size_t ReadBytes(T* data, std::size_t length) const {
|
||||
static_assert(std::is_trivially_copyable_v<T>, "T must be trivially copyable");
|
||||
return ReadArray(reinterpret_cast<char*>(data), length);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
size_t WriteBytes(const T* data, size_t length) {
|
||||
std::size_t WriteBytes(const T* data, std::size_t length) {
|
||||
static_assert(std::is_trivially_copyable_v<T>, "T must be trivially copyable");
|
||||
return WriteArray(reinterpret_cast<const char*>(data), length);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
size_t WriteObject(const T& object) {
|
||||
std::size_t WriteObject(const T& object) {
|
||||
static_assert(!std::is_pointer_v<T>, "WriteObject arguments must not be a pointer");
|
||||
return WriteArray(&object, 1);
|
||||
}
|
||||
|
||||
size_t WriteString(const std::string& str) {
|
||||
std::size_t WriteString(const std::string& str) {
|
||||
return WriteArray(str.c_str(), str.length());
|
||||
}
|
||||
|
||||
|
@ -17,7 +17,7 @@ namespace Common {
|
||||
* @param len Length of data (in bytes) to compute hash over
|
||||
* @returns 64-bit hash value that was computed over the data block
|
||||
*/
|
||||
static inline u64 ComputeHash64(const void* data, size_t len) {
|
||||
static inline u64 ComputeHash64(const void* data, std::size_t len) {
|
||||
return CityHash64(static_cast<const char*>(data), len);
|
||||
}
|
||||
|
||||
@ -63,7 +63,7 @@ struct HashableStruct {
|
||||
return !(*this == o);
|
||||
};
|
||||
|
||||
size_t Hash() const {
|
||||
std::size_t Hash() const {
|
||||
return Common::ComputeStructHash64(state);
|
||||
}
|
||||
};
|
||||
|
@ -18,7 +18,7 @@ u8 ToHexNibble(char c1) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
std::array<u8, 16> operator""_array16(const char* str, size_t len) {
|
||||
std::array<u8, 16> operator""_array16(const char* str, std::size_t len) {
|
||||
if (len != 32) {
|
||||
LOG_ERROR(Common,
|
||||
"Attempting to parse string to array that is not of correct size (expected=32, "
|
||||
@ -29,7 +29,7 @@ std::array<u8, 16> operator""_array16(const char* str, size_t len) {
|
||||
return HexStringToArray<16>(str);
|
||||
}
|
||||
|
||||
std::array<u8, 32> operator""_array32(const char* str, size_t len) {
|
||||
std::array<u8, 32> operator""_array32(const char* str, std::size_t len) {
|
||||
if (len != 64) {
|
||||
LOG_ERROR(Common,
|
||||
"Attempting to parse string to array that is not of correct size (expected=64, "
|
||||
|
@ -14,20 +14,20 @@ namespace Common {
|
||||
|
||||
u8 ToHexNibble(char c1);
|
||||
|
||||
template <size_t Size, bool le = false>
|
||||
template <std::size_t Size, bool le = false>
|
||||
std::array<u8, Size> HexStringToArray(std::string_view str) {
|
||||
std::array<u8, Size> out{};
|
||||
if constexpr (le) {
|
||||
for (size_t i = 2 * Size - 2; i <= 2 * Size; i -= 2)
|
||||
for (std::size_t i = 2 * Size - 2; i <= 2 * Size; i -= 2)
|
||||
out[i / 2] = (ToHexNibble(str[i]) << 4) | ToHexNibble(str[i + 1]);
|
||||
} else {
|
||||
for (size_t i = 0; i < 2 * Size; i += 2)
|
||||
for (std::size_t i = 0; i < 2 * Size; i += 2)
|
||||
out[i / 2] = (ToHexNibble(str[i]) << 4) | ToHexNibble(str[i + 1]);
|
||||
}
|
||||
return out;
|
||||
}
|
||||
|
||||
template <size_t Size>
|
||||
template <std::size_t Size>
|
||||
std::string HexArrayToString(std::array<u8, Size> array, bool upper = true) {
|
||||
std::string out;
|
||||
for (u8 c : array)
|
||||
@ -35,7 +35,7 @@ std::string HexArrayToString(std::array<u8, Size> array, bool upper = true) {
|
||||
return out;
|
||||
}
|
||||
|
||||
std::array<u8, 0x10> operator"" _array16(const char* str, size_t len);
|
||||
std::array<u8, 0x20> operator"" _array32(const char* str, size_t len);
|
||||
std::array<u8, 0x10> operator"" _array16(const char* str, std::size_t len);
|
||||
std::array<u8, 0x20> operator"" _array32(const char* str, std::size_t len);
|
||||
|
||||
} // namespace Common
|
||||
|
@ -135,7 +135,7 @@ FileBackend::FileBackend(const std::string& filename)
|
||||
void FileBackend::Write(const Entry& entry) {
|
||||
// prevent logs from going over the maximum size (in case its spamming and the user doesn't
|
||||
// know)
|
||||
constexpr size_t MAX_BYTES_WRITTEN = 50 * 1024L * 1024L;
|
||||
constexpr std::size_t MAX_BYTES_WRITTEN = 50 * 1024L * 1024L;
|
||||
if (!file.IsOpen() || bytes_written > MAX_BYTES_WRITTEN) {
|
||||
return;
|
||||
}
|
||||
|
@ -100,7 +100,7 @@ public:
|
||||
|
||||
private:
|
||||
FileUtil::IOFile file;
|
||||
size_t bytes_written;
|
||||
std::size_t bytes_written;
|
||||
};
|
||||
|
||||
void AddBackend(std::unique_ptr<Backend> backend);
|
||||
|
@ -71,7 +71,7 @@ void Filter::ResetAll(Level level) {
|
||||
}
|
||||
|
||||
void Filter::SetClassLevel(Class log_class, Level level) {
|
||||
class_levels[static_cast<size_t>(log_class)] = level;
|
||||
class_levels[static_cast<std::size_t>(log_class)] = level;
|
||||
}
|
||||
|
||||
void Filter::ParseFilterString(std::string_view filter_view) {
|
||||
@ -93,7 +93,8 @@ void Filter::ParseFilterString(std::string_view filter_view) {
|
||||
}
|
||||
|
||||
bool Filter::CheckMessage(Class log_class, Level level) const {
|
||||
return static_cast<u8>(level) >= static_cast<u8>(class_levels[static_cast<size_t>(log_class)]);
|
||||
return static_cast<u8>(level) >=
|
||||
static_cast<u8>(class_levels[static_cast<std::size_t>(log_class)]);
|
||||
}
|
||||
|
||||
bool Filter::IsDebug() const {
|
||||
|
@ -49,6 +49,6 @@ public:
|
||||
bool IsDebug() const;
|
||||
|
||||
private:
|
||||
std::array<Level, static_cast<size_t>(Class::Count)> class_levels;
|
||||
std::array<Level, static_cast<std::size_t>(Class::Count)> class_levels;
|
||||
};
|
||||
} // namespace Log
|
||||
|
@ -25,7 +25,7 @@
|
||||
// This is purposely not a full wrapper for virtualalloc/mmap, but it
|
||||
// provides exactly the primitive operations that Dolphin needs.
|
||||
|
||||
void* AllocateExecutableMemory(size_t size, bool low) {
|
||||
void* AllocateExecutableMemory(std::size_t size, bool low) {
|
||||
#if defined(_WIN32)
|
||||
void* ptr = VirtualAlloc(nullptr, size, MEM_COMMIT, PAGE_EXECUTE_READWRITE);
|
||||
#else
|
||||
@ -74,7 +74,7 @@ void* AllocateExecutableMemory(size_t size, bool low) {
|
||||
return ptr;
|
||||
}
|
||||
|
||||
void* AllocateMemoryPages(size_t size) {
|
||||
void* AllocateMemoryPages(std::size_t size) {
|
||||
#ifdef _WIN32
|
||||
void* ptr = VirtualAlloc(nullptr, size, MEM_COMMIT, PAGE_READWRITE);
|
||||
#else
|
||||
@ -90,7 +90,7 @@ void* AllocateMemoryPages(size_t size) {
|
||||
return ptr;
|
||||
}
|
||||
|
||||
void* AllocateAlignedMemory(size_t size, size_t alignment) {
|
||||
void* AllocateAlignedMemory(std::size_t size, std::size_t alignment) {
|
||||
#ifdef _WIN32
|
||||
void* ptr = _aligned_malloc(size, alignment);
|
||||
#else
|
||||
@ -109,7 +109,7 @@ void* AllocateAlignedMemory(size_t size, size_t alignment) {
|
||||
return ptr;
|
||||
}
|
||||
|
||||
void FreeMemoryPages(void* ptr, size_t size) {
|
||||
void FreeMemoryPages(void* ptr, std::size_t size) {
|
||||
if (ptr) {
|
||||
#ifdef _WIN32
|
||||
if (!VirtualFree(ptr, 0, MEM_RELEASE))
|
||||
@ -130,7 +130,7 @@ void FreeAlignedMemory(void* ptr) {
|
||||
}
|
||||
}
|
||||
|
||||
void WriteProtectMemory(void* ptr, size_t size, bool allowExecute) {
|
||||
void WriteProtectMemory(void* ptr, std::size_t size, bool allowExecute) {
|
||||
#ifdef _WIN32
|
||||
DWORD oldValue;
|
||||
if (!VirtualProtect(ptr, size, allowExecute ? PAGE_EXECUTE_READ : PAGE_READONLY, &oldValue))
|
||||
@ -140,7 +140,7 @@ void WriteProtectMemory(void* ptr, size_t size, bool allowExecute) {
|
||||
#endif
|
||||
}
|
||||
|
||||
void UnWriteProtectMemory(void* ptr, size_t size, bool allowExecute) {
|
||||
void UnWriteProtectMemory(void* ptr, std::size_t size, bool allowExecute) {
|
||||
#ifdef _WIN32
|
||||
DWORD oldValue;
|
||||
if (!VirtualProtect(ptr, size, allowExecute ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE,
|
||||
|
@ -7,13 +7,13 @@
|
||||
#include <cstddef>
|
||||
#include <string>
|
||||
|
||||
void* AllocateExecutableMemory(size_t size, bool low = true);
|
||||
void* AllocateMemoryPages(size_t size);
|
||||
void FreeMemoryPages(void* ptr, size_t size);
|
||||
void* AllocateAlignedMemory(size_t size, size_t alignment);
|
||||
void* AllocateExecutableMemory(std::size_t size, bool low = true);
|
||||
void* AllocateMemoryPages(std::size_t size);
|
||||
void FreeMemoryPages(void* ptr, std::size_t size);
|
||||
void* AllocateAlignedMemory(std::size_t size, std::size_t alignment);
|
||||
void FreeAlignedMemory(void* ptr);
|
||||
void WriteProtectMemory(void* ptr, size_t size, bool executable = false);
|
||||
void UnWriteProtectMemory(void* ptr, size_t size, bool allowExecute = false);
|
||||
void WriteProtectMemory(void* ptr, std::size_t size, bool executable = false);
|
||||
void UnWriteProtectMemory(void* ptr, std::size_t size, bool allowExecute = false);
|
||||
std::string MemUsage();
|
||||
|
||||
inline int GetPageSize() {
|
||||
|
@ -16,7 +16,7 @@
|
||||
// Call directly after the command or use the error num.
|
||||
// This function might change the error code.
|
||||
std::string GetLastErrorMsg() {
|
||||
static const size_t buff_size = 255;
|
||||
static const std::size_t buff_size = 255;
|
||||
char err_str[buff_size];
|
||||
|
||||
#ifdef _WIN32
|
||||
|
@ -19,31 +19,31 @@ namespace Common {
|
||||
/// @tparam T Element type
|
||||
/// @tparam capacity Number of slots in ring buffer
|
||||
/// @tparam granularity Slot size in terms of number of elements
|
||||
template <typename T, size_t capacity, size_t granularity = 1>
|
||||
template <typename T, std::size_t capacity, std::size_t granularity = 1>
|
||||
class RingBuffer {
|
||||
/// A "slot" is made of `granularity` elements of `T`.
|
||||
static constexpr size_t slot_size = granularity * sizeof(T);
|
||||
static constexpr std::size_t slot_size = granularity * sizeof(T);
|
||||
// T must be safely memcpy-able and have a trivial default constructor.
|
||||
static_assert(std::is_trivial_v<T>);
|
||||
// Ensure capacity is sensible.
|
||||
static_assert(capacity < std::numeric_limits<size_t>::max() / 2 / granularity);
|
||||
static_assert(capacity < std::numeric_limits<std::size_t>::max() / 2 / granularity);
|
||||
static_assert((capacity & (capacity - 1)) == 0, "capacity must be a power of two");
|
||||
// Ensure lock-free.
|
||||
static_assert(std::atomic<size_t>::is_always_lock_free);
|
||||
static_assert(std::atomic<std::size_t>::is_always_lock_free);
|
||||
|
||||
public:
|
||||
/// Pushes slots into the ring buffer
|
||||
/// @param new_slots Pointer to the slots to push
|
||||
/// @param slot_count Number of slots to push
|
||||
/// @returns The number of slots actually pushed
|
||||
size_t Push(const void* new_slots, size_t slot_count) {
|
||||
const size_t write_index = m_write_index.load();
|
||||
const size_t slots_free = capacity + m_read_index.load() - write_index;
|
||||
const size_t push_count = std::min(slot_count, slots_free);
|
||||
std::size_t Push(const void* new_slots, std::size_t slot_count) {
|
||||
const std::size_t write_index = m_write_index.load();
|
||||
const std::size_t slots_free = capacity + m_read_index.load() - write_index;
|
||||
const std::size_t push_count = std::min(slot_count, slots_free);
|
||||
|
||||
const size_t pos = write_index % capacity;
|
||||
const size_t first_copy = std::min(capacity - pos, push_count);
|
||||
const size_t second_copy = push_count - first_copy;
|
||||
const std::size_t pos = write_index % capacity;
|
||||
const std::size_t first_copy = std::min(capacity - pos, push_count);
|
||||
const std::size_t second_copy = push_count - first_copy;
|
||||
|
||||
const char* in = static_cast<const char*>(new_slots);
|
||||
std::memcpy(m_data.data() + pos * granularity, in, first_copy * slot_size);
|
||||
@ -55,7 +55,7 @@ public:
|
||||
return push_count;
|
||||
}
|
||||
|
||||
size_t Push(const std::vector<T>& input) {
|
||||
std::size_t Push(const std::vector<T>& input) {
|
||||
return Push(input.data(), input.size());
|
||||
}
|
||||
|
||||
@ -63,14 +63,14 @@ public:
|
||||
/// @param output Where to store the popped slots
|
||||
/// @param max_slots Maximum number of slots to pop
|
||||
/// @returns The number of slots actually popped
|
||||
size_t Pop(void* output, size_t max_slots = ~size_t(0)) {
|
||||
const size_t read_index = m_read_index.load();
|
||||
const size_t slots_filled = m_write_index.load() - read_index;
|
||||
const size_t pop_count = std::min(slots_filled, max_slots);
|
||||
std::size_t Pop(void* output, std::size_t max_slots = ~std::size_t(0)) {
|
||||
const std::size_t read_index = m_read_index.load();
|
||||
const std::size_t slots_filled = m_write_index.load() - read_index;
|
||||
const std::size_t pop_count = std::min(slots_filled, max_slots);
|
||||
|
||||
const size_t pos = read_index % capacity;
|
||||
const size_t first_copy = std::min(capacity - pos, pop_count);
|
||||
const size_t second_copy = pop_count - first_copy;
|
||||
const std::size_t pos = read_index % capacity;
|
||||
const std::size_t first_copy = std::min(capacity - pos, pop_count);
|
||||
const std::size_t second_copy = pop_count - first_copy;
|
||||
|
||||
char* out = static_cast<char*>(output);
|
||||
std::memcpy(out, m_data.data() + pos * granularity, first_copy * slot_size);
|
||||
@ -82,28 +82,28 @@ public:
|
||||
return pop_count;
|
||||
}
|
||||
|
||||
std::vector<T> Pop(size_t max_slots = ~size_t(0)) {
|
||||
std::vector<T> Pop(std::size_t max_slots = ~std::size_t(0)) {
|
||||
std::vector<T> out(std::min(max_slots, capacity) * granularity);
|
||||
const size_t count = Pop(out.data(), out.size() / granularity);
|
||||
const std::size_t count = Pop(out.data(), out.size() / granularity);
|
||||
out.resize(count * granularity);
|
||||
return out;
|
||||
}
|
||||
|
||||
/// @returns Number of slots used
|
||||
size_t Size() const {
|
||||
std::size_t Size() const {
|
||||
return m_write_index.load() - m_read_index.load();
|
||||
}
|
||||
|
||||
/// @returns Maximum size of ring buffer
|
||||
constexpr size_t Capacity() const {
|
||||
constexpr std::size_t Capacity() const {
|
||||
return capacity;
|
||||
}
|
||||
|
||||
private:
|
||||
// It is important to align the below variables for performance reasons:
|
||||
// Having them on the same cache-line would result in false-sharing between them.
|
||||
alignas(128) std::atomic<size_t> m_read_index{0};
|
||||
alignas(128) std::atomic<size_t> m_write_index{0};
|
||||
alignas(128) std::atomic<std::size_t> m_read_index{0};
|
||||
alignas(128) std::atomic<std::size_t> m_write_index{0};
|
||||
|
||||
std::array<T, granularity * capacity> m_data;
|
||||
};
|
||||
|
@ -37,7 +37,7 @@ std::string ToUpper(std::string str) {
|
||||
}
|
||||
|
||||
// For Debugging. Read out an u8 array.
|
||||
std::string ArrayToString(const u8* data, size_t size, int line_len, bool spaces) {
|
||||
std::string ArrayToString(const u8* data, std::size_t size, int line_len, bool spaces) {
|
||||
std::ostringstream oss;
|
||||
oss << std::setfill('0') << std::hex;
|
||||
|
||||
@ -60,7 +60,7 @@ std::string StringFromBuffer(const std::vector<u8>& data) {
|
||||
|
||||
// Turns " hej " into "hej". Also handles tabs.
|
||||
std::string StripSpaces(const std::string& str) {
|
||||
const size_t s = str.find_first_not_of(" \t\r\n");
|
||||
const std::size_t s = str.find_first_not_of(" \t\r\n");
|
||||
|
||||
if (str.npos != s)
|
||||
return str.substr(s, str.find_last_not_of(" \t\r\n") - s + 1);
|
||||
@ -121,10 +121,10 @@ bool SplitPath(const std::string& full_path, std::string* _pPath, std::string* _
|
||||
if (full_path.empty())
|
||||
return false;
|
||||
|
||||
size_t dir_end = full_path.find_last_of("/"
|
||||
std::size_t dir_end = full_path.find_last_of("/"
|
||||
// windows needs the : included for something like just "C:" to be considered a directory
|
||||
#ifdef _WIN32
|
||||
"\\:"
|
||||
"\\:"
|
||||
#endif
|
||||
);
|
||||
if (std::string::npos == dir_end)
|
||||
@ -132,7 +132,7 @@ bool SplitPath(const std::string& full_path, std::string* _pPath, std::string* _
|
||||
else
|
||||
dir_end += 1;
|
||||
|
||||
size_t fname_end = full_path.rfind('.');
|
||||
std::size_t fname_end = full_path.rfind('.');
|
||||
if (fname_end < dir_end || std::string::npos == fname_end)
|
||||
fname_end = full_path.size();
|
||||
|
||||
@ -172,7 +172,7 @@ void SplitString(const std::string& str, const char delim, std::vector<std::stri
|
||||
}
|
||||
|
||||
std::string TabsToSpaces(int tab_size, std::string in) {
|
||||
size_t i = 0;
|
||||
std::size_t i = 0;
|
||||
|
||||
while ((i = in.find('\t')) != std::string::npos) {
|
||||
in.replace(i, 1, tab_size, ' ');
|
||||
@ -182,7 +182,7 @@ std::string TabsToSpaces(int tab_size, std::string in) {
|
||||
}
|
||||
|
||||
std::string ReplaceAll(std::string result, const std::string& src, const std::string& dest) {
|
||||
size_t pos = 0;
|
||||
std::size_t pos = 0;
|
||||
|
||||
if (src == dest)
|
||||
return result;
|
||||
@ -280,22 +280,22 @@ static std::string CodeToUTF8(const char* fromcode, const std::basic_string<T>&
|
||||
return {};
|
||||
}
|
||||
|
||||
const size_t in_bytes = sizeof(T) * input.size();
|
||||
const std::size_t in_bytes = sizeof(T) * input.size();
|
||||
// Multiply by 4, which is the max number of bytes to encode a codepoint
|
||||
const size_t out_buffer_size = 4 * in_bytes;
|
||||
const std::size_t out_buffer_size = 4 * in_bytes;
|
||||
|
||||
std::string out_buffer(out_buffer_size, '\0');
|
||||
|
||||
auto src_buffer = &input[0];
|
||||
size_t src_bytes = in_bytes;
|
||||
std::size_t src_bytes = in_bytes;
|
||||
auto dst_buffer = &out_buffer[0];
|
||||
size_t dst_bytes = out_buffer.size();
|
||||
std::size_t dst_bytes = out_buffer.size();
|
||||
|
||||
while (0 != src_bytes) {
|
||||
size_t const iconv_result =
|
||||
std::size_t const iconv_result =
|
||||
iconv(conv_desc, (char**)(&src_buffer), &src_bytes, &dst_buffer, &dst_bytes);
|
||||
|
||||
if (static_cast<size_t>(-1) == iconv_result) {
|
||||
if (static_cast<std::size_t>(-1) == iconv_result) {
|
||||
if (EILSEQ == errno || EINVAL == errno) {
|
||||
// Try to skip the bad character
|
||||
if (0 != src_bytes) {
|
||||
@ -326,22 +326,22 @@ std::u16string UTF8ToUTF16(const std::string& input) {
|
||||
return {};
|
||||
}
|
||||
|
||||
const size_t in_bytes = sizeof(char) * input.size();
|
||||
const std::size_t in_bytes = sizeof(char) * input.size();
|
||||
// Multiply by 4, which is the max number of bytes to encode a codepoint
|
||||
const size_t out_buffer_size = 4 * sizeof(char16_t) * in_bytes;
|
||||
const std::size_t out_buffer_size = 4 * sizeof(char16_t) * in_bytes;
|
||||
|
||||
std::u16string out_buffer(out_buffer_size, char16_t{});
|
||||
|
||||
char* src_buffer = const_cast<char*>(&input[0]);
|
||||
size_t src_bytes = in_bytes;
|
||||
std::size_t src_bytes = in_bytes;
|
||||
char* dst_buffer = (char*)(&out_buffer[0]);
|
||||
size_t dst_bytes = out_buffer.size();
|
||||
std::size_t dst_bytes = out_buffer.size();
|
||||
|
||||
while (0 != src_bytes) {
|
||||
size_t const iconv_result =
|
||||
std::size_t const iconv_result =
|
||||
iconv(conv_desc, &src_buffer, &src_bytes, &dst_buffer, &dst_bytes);
|
||||
|
||||
if (static_cast<size_t>(-1) == iconv_result) {
|
||||
if (static_cast<std::size_t>(-1) == iconv_result) {
|
||||
if (EILSEQ == errno || EINVAL == errno) {
|
||||
// Try to skip the bad character
|
||||
if (0 != src_bytes) {
|
||||
@ -381,8 +381,8 @@ std::string SHIFTJISToUTF8(const std::string& input) {
|
||||
|
||||
#endif
|
||||
|
||||
std::string StringFromFixedZeroTerminatedBuffer(const char* buffer, size_t max_len) {
|
||||
size_t len = 0;
|
||||
std::string StringFromFixedZeroTerminatedBuffer(const char* buffer, std::size_t max_len) {
|
||||
std::size_t len = 0;
|
||||
while (len < max_len && buffer[len] != '\0')
|
||||
++len;
|
||||
|
||||
|
@ -19,7 +19,7 @@ std::string ToLower(std::string str);
|
||||
/// Make a string uppercase
|
||||
std::string ToUpper(std::string str);
|
||||
|
||||
std::string ArrayToString(const u8* data, size_t size, int line_len = 20, bool spaces = true);
|
||||
std::string ArrayToString(const u8* data, std::size_t size, int line_len = 20, bool spaces = true);
|
||||
|
||||
std::string StringFromBuffer(const std::vector<u8>& data);
|
||||
|
||||
@ -118,7 +118,7 @@ bool ComparePartialString(InIt begin, InIt end, const char* other) {
|
||||
* Creates a std::string from a fixed-size NUL-terminated char buffer. If the buffer isn't
|
||||
* NUL-terminated then the string ends at max_len characters.
|
||||
*/
|
||||
std::string StringFromFixedZeroTerminatedBuffer(const char* buffer, size_t max_len);
|
||||
std::string StringFromFixedZeroTerminatedBuffer(const char* buffer, std::size_t max_len);
|
||||
|
||||
/**
|
||||
* Attempts to trim an arbitrary prefix from `path`, leaving only the part starting at `root`. It's
|
||||
|
@ -60,12 +60,12 @@ private:
|
||||
|
||||
class Barrier {
|
||||
public:
|
||||
explicit Barrier(size_t count_) : count(count_), waiting(0), generation(0) {}
|
||||
explicit Barrier(std::size_t count_) : count(count_), waiting(0), generation(0) {}
|
||||
|
||||
/// Blocks until all "count" threads have called Sync()
|
||||
void Sync() {
|
||||
std::unique_lock<std::mutex> lk(mutex);
|
||||
const size_t current_generation = generation;
|
||||
const std::size_t current_generation = generation;
|
||||
|
||||
if (++waiting == count) {
|
||||
generation++;
|
||||
@ -80,9 +80,9 @@ public:
|
||||
private:
|
||||
std::condition_variable condvar;
|
||||
std::mutex mutex;
|
||||
const size_t count;
|
||||
size_t waiting;
|
||||
size_t generation; // Incremented once each time the barrier is used
|
||||
const std::size_t count;
|
||||
std::size_t waiting;
|
||||
std::size_t generation; // Incremented once each time the barrier is used
|
||||
};
|
||||
|
||||
void SleepCurrentThread(int ms);
|
||||
|
@ -97,7 +97,7 @@ const BitSet32 ABI_ALL_CALLEE_SAVED = BuildRegSet({
|
||||
Xbyak::util::xmm15,
|
||||
});
|
||||
|
||||
constexpr size_t ABI_SHADOW_SPACE = 0x20;
|
||||
constexpr std::size_t ABI_SHADOW_SPACE = 0x20;
|
||||
|
||||
#else
|
||||
|
||||
@ -147,22 +147,23 @@ const BitSet32 ABI_ALL_CALLEE_SAVED = BuildRegSet({
|
||||
Xbyak::util::r15,
|
||||
});
|
||||
|
||||
constexpr size_t ABI_SHADOW_SPACE = 0;
|
||||
constexpr std::size_t ABI_SHADOW_SPACE = 0;
|
||||
|
||||
#endif
|
||||
|
||||
inline void ABI_CalculateFrameSize(BitSet32 regs, size_t rsp_alignment, size_t needed_frame_size,
|
||||
s32* out_subtraction, s32* out_xmm_offset) {
|
||||
inline void ABI_CalculateFrameSize(BitSet32 regs, std::size_t rsp_alignment,
|
||||
std::size_t needed_frame_size, s32* out_subtraction,
|
||||
s32* out_xmm_offset) {
|
||||
int count = (regs & ABI_ALL_GPRS).Count();
|
||||
rsp_alignment -= count * 8;
|
||||
size_t subtraction = 0;
|
||||
std::size_t subtraction = 0;
|
||||
int xmm_count = (regs & ABI_ALL_XMMS).Count();
|
||||
if (xmm_count) {
|
||||
// If we have any XMMs to save, we must align the stack here.
|
||||
subtraction = rsp_alignment & 0xF;
|
||||
}
|
||||
subtraction += 0x10 * xmm_count;
|
||||
size_t xmm_base_subtraction = subtraction;
|
||||
std::size_t xmm_base_subtraction = subtraction;
|
||||
subtraction += needed_frame_size;
|
||||
subtraction += ABI_SHADOW_SPACE;
|
||||
// Final alignment.
|
||||
@ -173,8 +174,9 @@ inline void ABI_CalculateFrameSize(BitSet32 regs, size_t rsp_alignment, size_t n
|
||||
*out_xmm_offset = (s32)(subtraction - xmm_base_subtraction);
|
||||
}
|
||||
|
||||
inline size_t ABI_PushRegistersAndAdjustStack(Xbyak::CodeGenerator& code, BitSet32 regs,
|
||||
size_t rsp_alignment, size_t needed_frame_size = 0) {
|
||||
inline std::size_t ABI_PushRegistersAndAdjustStack(Xbyak::CodeGenerator& code, BitSet32 regs,
|
||||
std::size_t rsp_alignment,
|
||||
std::size_t needed_frame_size = 0) {
|
||||
s32 subtraction, xmm_offset;
|
||||
ABI_CalculateFrameSize(regs, rsp_alignment, needed_frame_size, &subtraction, &xmm_offset);
|
||||
|
||||
@ -195,7 +197,8 @@ inline size_t ABI_PushRegistersAndAdjustStack(Xbyak::CodeGenerator& code, BitSet
|
||||
}
|
||||
|
||||
inline void ABI_PopRegistersAndAdjustStack(Xbyak::CodeGenerator& code, BitSet32 regs,
|
||||
size_t rsp_alignment, size_t needed_frame_size = 0) {
|
||||
std::size_t rsp_alignment,
|
||||
std::size_t needed_frame_size = 0) {
|
||||
s32 subtraction, xmm_offset;
|
||||
ABI_CalculateFrameSize(regs, rsp_alignment, needed_frame_size, &subtraction, &xmm_offset);
|
||||
|
||||
|
@ -34,7 +34,7 @@ inline bool IsWithin2G(const Xbyak::CodeGenerator& code, uintptr_t target) {
|
||||
template <typename T>
|
||||
inline void CallFarFunction(Xbyak::CodeGenerator& code, const T f) {
|
||||
static_assert(std::is_pointer_v<T>, "Argument must be a (function) pointer.");
|
||||
size_t addr = reinterpret_cast<size_t>(f);
|
||||
std::size_t addr = reinterpret_cast<std::size_t>(f);
|
||||
if (IsWithin2G(code, addr)) {
|
||||
code.call(f);
|
||||
} else {
|
||||
|
@ -10,7 +10,7 @@
|
||||
|
||||
namespace Core {
|
||||
|
||||
/// Generic ARM11 CPU interface
|
||||
/// Generic ARMv8 CPU interface
|
||||
class ARM_Interface : NonCopyable {
|
||||
public:
|
||||
virtual ~ARM_Interface() {}
|
||||
@ -19,9 +19,9 @@ public:
|
||||
std::array<u64, 31> cpu_registers;
|
||||
u64 sp;
|
||||
u64 pc;
|
||||
u64 cpsr;
|
||||
std::array<u128, 32> fpu_registers;
|
||||
u64 fpscr;
|
||||
u64 pstate;
|
||||
std::array<u128, 32> vector_registers;
|
||||
u64 fpcr;
|
||||
};
|
||||
|
||||
/// Runs the CPU until an event happens
|
||||
@ -31,11 +31,11 @@ public:
|
||||
virtual void Step() = 0;
|
||||
|
||||
/// Maps a backing memory region for the CPU
|
||||
virtual void MapBackingMemory(VAddr address, size_t size, u8* memory,
|
||||
virtual void MapBackingMemory(VAddr address, std::size_t size, u8* memory,
|
||||
Kernel::VMAPermission perms) = 0;
|
||||
|
||||
/// Unmaps a region of memory that was previously mapped using MapBackingMemory
|
||||
virtual void UnmapMemory(VAddr address, size_t size) = 0;
|
||||
virtual void UnmapMemory(VAddr address, std::size_t size) = 0;
|
||||
|
||||
/// Clear all instruction cache
|
||||
virtual void ClearInstructionCache() = 0;
|
||||
@ -69,42 +69,50 @@ public:
|
||||
*/
|
||||
virtual void SetReg(int index, u64 value) = 0;
|
||||
|
||||
virtual u128 GetExtReg(int index) const = 0;
|
||||
|
||||
virtual void SetExtReg(int index, u128 value) = 0;
|
||||
/**
|
||||
* Gets the value of a specified vector register.
|
||||
*
|
||||
* @param index The index of the vector register.
|
||||
* @return the value within the vector register.
|
||||
*/
|
||||
virtual u128 GetVectorReg(int index) const = 0;
|
||||
|
||||
/**
|
||||
* Gets the value of a VFP register
|
||||
* @param index Register index (0-31)
|
||||
* @return Returns the value in the register
|
||||
* Sets a given value into a vector register.
|
||||
*
|
||||
* @param index The index of the vector register.
|
||||
* @param value The new value to place in the register.
|
||||
*/
|
||||
virtual u32 GetVFPReg(int index) const = 0;
|
||||
virtual void SetVectorReg(int index, u128 value) = 0;
|
||||
|
||||
/**
|
||||
* Sets a VFP register to the given value
|
||||
* @param index Register index (0-31)
|
||||
* @param value Value to set register to
|
||||
* Get the current PSTATE register
|
||||
* @return Returns the value of the PSTATE register
|
||||
*/
|
||||
virtual void SetVFPReg(int index, u32 value) = 0;
|
||||
virtual u32 GetPSTATE() const = 0;
|
||||
|
||||
/**
|
||||
* Get the current CPSR register
|
||||
* @return Returns the value of the CPSR register
|
||||
* Set the current PSTATE register
|
||||
* @param pstate Value to set PSTATE to
|
||||
*/
|
||||
virtual u32 GetCPSR() const = 0;
|
||||
|
||||
/**
|
||||
* Set the current CPSR register
|
||||
* @param cpsr Value to set CPSR to
|
||||
*/
|
||||
virtual void SetCPSR(u32 cpsr) = 0;
|
||||
virtual void SetPSTATE(u32 pstate) = 0;
|
||||
|
||||
virtual VAddr GetTlsAddress() const = 0;
|
||||
|
||||
virtual void SetTlsAddress(VAddr address) = 0;
|
||||
|
||||
/**
|
||||
* Gets the value within the TPIDR_EL0 (read/write software thread ID) register.
|
||||
*
|
||||
* @return the value within the register.
|
||||
*/
|
||||
virtual u64 GetTPIDR_EL0() const = 0;
|
||||
|
||||
/**
|
||||
* Sets a new value within the TPIDR_EL0 (read/write software thread ID) register.
|
||||
*
|
||||
* @param value The new value to place in the register.
|
||||
*/
|
||||
virtual void SetTPIDR_EL0(u64 value) = 0;
|
||||
|
||||
/**
|
||||
@ -119,6 +127,7 @@ public:
|
||||
*/
|
||||
virtual void LoadContext(const ThreadContext& ctx) = 0;
|
||||
|
||||
/// Clears the exclusive monitor's state.
|
||||
virtual void ClearExclusiveState() = 0;
|
||||
|
||||
/// Prepare core for thread reschedule (if needed to correctly handle state)
|
||||
|
@ -58,7 +58,7 @@ public:
|
||||
Memory::Write64(vaddr + 8, value[1]);
|
||||
}
|
||||
|
||||
void InterpreterFallback(u64 pc, size_t num_instructions) override {
|
||||
void InterpreterFallback(u64 pc, std::size_t num_instructions) override {
|
||||
LOG_INFO(Core_ARM, "Unicorn fallback @ 0x{:X} for {} instructions (instr = {:08X})", pc,
|
||||
num_instructions, MemoryReadCode(pc));
|
||||
|
||||
@ -81,7 +81,7 @@ public:
|
||||
return;
|
||||
default:
|
||||
ASSERT_MSG(false, "ExceptionRaised(exception = {}, pc = {:X})",
|
||||
static_cast<size_t>(exception), pc);
|
||||
static_cast<std::size_t>(exception), pc);
|
||||
}
|
||||
}
|
||||
|
||||
@ -110,7 +110,7 @@ public:
|
||||
}
|
||||
|
||||
ARM_Dynarmic& parent;
|
||||
size_t num_interpreted_instructions = 0;
|
||||
std::size_t num_interpreted_instructions = 0;
|
||||
u64 tpidrro_el0 = 0;
|
||||
u64 tpidr_el0 = 0;
|
||||
};
|
||||
@ -157,7 +157,8 @@ void ARM_Dynarmic::Step() {
|
||||
cb->InterpreterFallback(jit->GetPC(), 1);
|
||||
}
|
||||
|
||||
ARM_Dynarmic::ARM_Dynarmic(std::shared_ptr<ExclusiveMonitor> exclusive_monitor, size_t core_index)
|
||||
ARM_Dynarmic::ARM_Dynarmic(std::shared_ptr<ExclusiveMonitor> exclusive_monitor,
|
||||
std::size_t core_index)
|
||||
: cb(std::make_unique<ARM_Dynarmic_Callbacks>(*this)), core_index{core_index},
|
||||
exclusive_monitor{std::dynamic_pointer_cast<DynarmicExclusiveMonitor>(exclusive_monitor)} {
|
||||
ThreadContext ctx;
|
||||
@ -168,12 +169,12 @@ ARM_Dynarmic::ARM_Dynarmic(std::shared_ptr<ExclusiveMonitor> exclusive_monitor,
|
||||
|
||||
ARM_Dynarmic::~ARM_Dynarmic() = default;
|
||||
|
||||
void ARM_Dynarmic::MapBackingMemory(u64 address, size_t size, u8* memory,
|
||||
void ARM_Dynarmic::MapBackingMemory(u64 address, std::size_t size, u8* memory,
|
||||
Kernel::VMAPermission perms) {
|
||||
inner_unicorn.MapBackingMemory(address, size, memory, perms);
|
||||
}
|
||||
|
||||
void ARM_Dynarmic::UnmapMemory(u64 address, size_t size) {
|
||||
void ARM_Dynarmic::UnmapMemory(u64 address, std::size_t size) {
|
||||
inner_unicorn.UnmapMemory(address, size);
|
||||
}
|
||||
|
||||
@ -193,29 +194,20 @@ void ARM_Dynarmic::SetReg(int index, u64 value) {
|
||||
jit->SetRegister(index, value);
|
||||
}
|
||||
|
||||
u128 ARM_Dynarmic::GetExtReg(int index) const {
|
||||
u128 ARM_Dynarmic::GetVectorReg(int index) const {
|
||||
return jit->GetVector(index);
|
||||
}
|
||||
|
||||
void ARM_Dynarmic::SetExtReg(int index, u128 value) {
|
||||
void ARM_Dynarmic::SetVectorReg(int index, u128 value) {
|
||||
jit->SetVector(index, value);
|
||||
}
|
||||
|
||||
u32 ARM_Dynarmic::GetVFPReg(int /*index*/) const {
|
||||
UNIMPLEMENTED();
|
||||
return {};
|
||||
}
|
||||
|
||||
void ARM_Dynarmic::SetVFPReg(int /*index*/, u32 /*value*/) {
|
||||
UNIMPLEMENTED();
|
||||
}
|
||||
|
||||
u32 ARM_Dynarmic::GetCPSR() const {
|
||||
u32 ARM_Dynarmic::GetPSTATE() const {
|
||||
return jit->GetPstate();
|
||||
}
|
||||
|
||||
void ARM_Dynarmic::SetCPSR(u32 cpsr) {
|
||||
jit->SetPstate(cpsr);
|
||||
void ARM_Dynarmic::SetPSTATE(u32 pstate) {
|
||||
jit->SetPstate(pstate);
|
||||
}
|
||||
|
||||
u64 ARM_Dynarmic::GetTlsAddress() const {
|
||||
@ -238,18 +230,18 @@ void ARM_Dynarmic::SaveContext(ThreadContext& ctx) {
|
||||
ctx.cpu_registers = jit->GetRegisters();
|
||||
ctx.sp = jit->GetSP();
|
||||
ctx.pc = jit->GetPC();
|
||||
ctx.cpsr = jit->GetPstate();
|
||||
ctx.fpu_registers = jit->GetVectors();
|
||||
ctx.fpscr = jit->GetFpcr();
|
||||
ctx.pstate = jit->GetPstate();
|
||||
ctx.vector_registers = jit->GetVectors();
|
||||
ctx.fpcr = jit->GetFpcr();
|
||||
}
|
||||
|
||||
void ARM_Dynarmic::LoadContext(const ThreadContext& ctx) {
|
||||
jit->SetRegisters(ctx.cpu_registers);
|
||||
jit->SetSP(ctx.sp);
|
||||
jit->SetPC(ctx.pc);
|
||||
jit->SetPstate(static_cast<u32>(ctx.cpsr));
|
||||
jit->SetVectors(ctx.fpu_registers);
|
||||
jit->SetFpcr(static_cast<u32>(ctx.fpscr));
|
||||
jit->SetPstate(static_cast<u32>(ctx.pstate));
|
||||
jit->SetVectors(ctx.vector_registers);
|
||||
jit->SetFpcr(static_cast<u32>(ctx.fpcr));
|
||||
}
|
||||
|
||||
void ARM_Dynarmic::PrepareReschedule() {
|
||||
@ -269,10 +261,10 @@ void ARM_Dynarmic::PageTableChanged() {
|
||||
current_page_table = Memory::GetCurrentPageTable();
|
||||
}
|
||||
|
||||
DynarmicExclusiveMonitor::DynarmicExclusiveMonitor(size_t core_count) : monitor(core_count) {}
|
||||
DynarmicExclusiveMonitor::DynarmicExclusiveMonitor(std::size_t core_count) : monitor(core_count) {}
|
||||
DynarmicExclusiveMonitor::~DynarmicExclusiveMonitor() = default;
|
||||
|
||||
void DynarmicExclusiveMonitor::SetExclusive(size_t core_index, VAddr addr) {
|
||||
void DynarmicExclusiveMonitor::SetExclusive(std::size_t core_index, VAddr addr) {
|
||||
// Size doesn't actually matter.
|
||||
monitor.Mark(core_index, addr, 16);
|
||||
}
|
||||
@ -281,30 +273,30 @@ void DynarmicExclusiveMonitor::ClearExclusive() {
|
||||
monitor.Clear();
|
||||
}
|
||||
|
||||
bool DynarmicExclusiveMonitor::ExclusiveWrite8(size_t core_index, VAddr vaddr, u8 value) {
|
||||
bool DynarmicExclusiveMonitor::ExclusiveWrite8(std::size_t core_index, VAddr vaddr, u8 value) {
|
||||
return monitor.DoExclusiveOperation(core_index, vaddr, 1,
|
||||
[&] { Memory::Write8(vaddr, value); });
|
||||
}
|
||||
|
||||
bool DynarmicExclusiveMonitor::ExclusiveWrite16(size_t core_index, VAddr vaddr, u16 value) {
|
||||
bool DynarmicExclusiveMonitor::ExclusiveWrite16(std::size_t core_index, VAddr vaddr, u16 value) {
|
||||
return monitor.DoExclusiveOperation(core_index, vaddr, 2,
|
||||
[&] { Memory::Write16(vaddr, value); });
|
||||
}
|
||||
|
||||
bool DynarmicExclusiveMonitor::ExclusiveWrite32(size_t core_index, VAddr vaddr, u32 value) {
|
||||
bool DynarmicExclusiveMonitor::ExclusiveWrite32(std::size_t core_index, VAddr vaddr, u32 value) {
|
||||
return monitor.DoExclusiveOperation(core_index, vaddr, 4,
|
||||
[&] { Memory::Write32(vaddr, value); });
|
||||
}
|
||||
|
||||
bool DynarmicExclusiveMonitor::ExclusiveWrite64(size_t core_index, VAddr vaddr, u64 value) {
|
||||
bool DynarmicExclusiveMonitor::ExclusiveWrite64(std::size_t core_index, VAddr vaddr, u64 value) {
|
||||
return monitor.DoExclusiveOperation(core_index, vaddr, 8,
|
||||
[&] { Memory::Write64(vaddr, value); });
|
||||
}
|
||||
|
||||
bool DynarmicExclusiveMonitor::ExclusiveWrite128(size_t core_index, VAddr vaddr, u128 value) {
|
||||
bool DynarmicExclusiveMonitor::ExclusiveWrite128(std::size_t core_index, VAddr vaddr, u128 value) {
|
||||
return monitor.DoExclusiveOperation(core_index, vaddr, 16, [&] {
|
||||
Memory::Write64(vaddr, value[0]);
|
||||
Memory::Write64(vaddr, value[1]);
|
||||
Memory::Write64(vaddr + 0, value[0]);
|
||||
Memory::Write64(vaddr + 8, value[1]);
|
||||
});
|
||||
}
|
||||
|
||||
|
@ -19,24 +19,22 @@ class DynarmicExclusiveMonitor;
|
||||
|
||||
class ARM_Dynarmic final : public ARM_Interface {
|
||||
public:
|
||||
ARM_Dynarmic(std::shared_ptr<ExclusiveMonitor> exclusive_monitor, size_t core_index);
|
||||
ARM_Dynarmic(std::shared_ptr<ExclusiveMonitor> exclusive_monitor, std::size_t core_index);
|
||||
~ARM_Dynarmic();
|
||||
|
||||
void MapBackingMemory(VAddr address, size_t size, u8* memory,
|
||||
void MapBackingMemory(VAddr address, std::size_t size, u8* memory,
|
||||
Kernel::VMAPermission perms) override;
|
||||
void UnmapMemory(u64 address, size_t size) override;
|
||||
void UnmapMemory(u64 address, std::size_t size) override;
|
||||
void SetPC(u64 pc) override;
|
||||
u64 GetPC() const override;
|
||||
u64 GetReg(int index) const override;
|
||||
void SetReg(int index, u64 value) override;
|
||||
u128 GetExtReg(int index) const override;
|
||||
void SetExtReg(int index, u128 value) override;
|
||||
u32 GetVFPReg(int index) const override;
|
||||
void SetVFPReg(int index, u32 value) override;
|
||||
u32 GetCPSR() const override;
|
||||
u128 GetVectorReg(int index) const override;
|
||||
void SetVectorReg(int index, u128 value) override;
|
||||
u32 GetPSTATE() const override;
|
||||
void SetPSTATE(u32 pstate) override;
|
||||
void Run() override;
|
||||
void Step() override;
|
||||
void SetCPSR(u32 cpsr) override;
|
||||
VAddr GetTlsAddress() const override;
|
||||
void SetTlsAddress(VAddr address) override;
|
||||
void SetTPIDR_EL0(u64 value) override;
|
||||
@ -59,7 +57,7 @@ private:
|
||||
std::unique_ptr<Dynarmic::A64::Jit> jit;
|
||||
ARM_Unicorn inner_unicorn;
|
||||
|
||||
size_t core_index;
|
||||
std::size_t core_index;
|
||||
std::shared_ptr<DynarmicExclusiveMonitor> exclusive_monitor;
|
||||
|
||||
Memory::PageTable* current_page_table = nullptr;
|
||||
@ -67,17 +65,17 @@ private:
|
||||
|
||||
class DynarmicExclusiveMonitor final : public ExclusiveMonitor {
|
||||
public:
|
||||
explicit DynarmicExclusiveMonitor(size_t core_count);
|
||||
explicit DynarmicExclusiveMonitor(std::size_t core_count);
|
||||
~DynarmicExclusiveMonitor();
|
||||
|
||||
void SetExclusive(size_t core_index, VAddr addr) override;
|
||||
void SetExclusive(std::size_t core_index, VAddr addr) override;
|
||||
void ClearExclusive() override;
|
||||
|
||||
bool ExclusiveWrite8(size_t core_index, VAddr vaddr, u8 value) override;
|
||||
bool ExclusiveWrite16(size_t core_index, VAddr vaddr, u16 value) override;
|
||||
bool ExclusiveWrite32(size_t core_index, VAddr vaddr, u32 value) override;
|
||||
bool ExclusiveWrite64(size_t core_index, VAddr vaddr, u64 value) override;
|
||||
bool ExclusiveWrite128(size_t core_index, VAddr vaddr, u128 value) override;
|
||||
bool ExclusiveWrite8(std::size_t core_index, VAddr vaddr, u8 value) override;
|
||||
bool ExclusiveWrite16(std::size_t core_index, VAddr vaddr, u16 value) override;
|
||||
bool ExclusiveWrite32(std::size_t core_index, VAddr vaddr, u32 value) override;
|
||||
bool ExclusiveWrite64(std::size_t core_index, VAddr vaddr, u64 value) override;
|
||||
bool ExclusiveWrite128(std::size_t core_index, VAddr vaddr, u128 value) override;
|
||||
|
||||
private:
|
||||
friend class ARM_Dynarmic;
|
||||
|
@ -12,14 +12,14 @@ class ExclusiveMonitor {
|
||||
public:
|
||||
virtual ~ExclusiveMonitor();
|
||||
|
||||
virtual void SetExclusive(size_t core_index, VAddr addr) = 0;
|
||||
virtual void SetExclusive(std::size_t core_index, VAddr addr) = 0;
|
||||
virtual void ClearExclusive() = 0;
|
||||
|
||||
virtual bool ExclusiveWrite8(size_t core_index, VAddr vaddr, u8 value) = 0;
|
||||
virtual bool ExclusiveWrite16(size_t core_index, VAddr vaddr, u16 value) = 0;
|
||||
virtual bool ExclusiveWrite32(size_t core_index, VAddr vaddr, u32 value) = 0;
|
||||
virtual bool ExclusiveWrite64(size_t core_index, VAddr vaddr, u64 value) = 0;
|
||||
virtual bool ExclusiveWrite128(size_t core_index, VAddr vaddr, u128 value) = 0;
|
||||
virtual bool ExclusiveWrite8(std::size_t core_index, VAddr vaddr, u8 value) = 0;
|
||||
virtual bool ExclusiveWrite16(std::size_t core_index, VAddr vaddr, u16 value) = 0;
|
||||
virtual bool ExclusiveWrite32(std::size_t core_index, VAddr vaddr, u32 value) = 0;
|
||||
virtual bool ExclusiveWrite64(std::size_t core_index, VAddr vaddr, u64 value) = 0;
|
||||
virtual bool ExclusiveWrite128(std::size_t core_index, VAddr vaddr, u128 value) = 0;
|
||||
};
|
||||
|
||||
} // namespace Core
|
||||
|
@ -90,12 +90,12 @@ ARM_Unicorn::~ARM_Unicorn() {
|
||||
CHECKED(uc_close(uc));
|
||||
}
|
||||
|
||||
void ARM_Unicorn::MapBackingMemory(VAddr address, size_t size, u8* memory,
|
||||
void ARM_Unicorn::MapBackingMemory(VAddr address, std::size_t size, u8* memory,
|
||||
Kernel::VMAPermission perms) {
|
||||
CHECKED(uc_mem_map_ptr(uc, address, size, static_cast<u32>(perms), memory));
|
||||
}
|
||||
|
||||
void ARM_Unicorn::UnmapMemory(VAddr address, size_t size) {
|
||||
void ARM_Unicorn::UnmapMemory(VAddr address, std::size_t size) {
|
||||
CHECKED(uc_mem_unmap(uc, address, size));
|
||||
}
|
||||
|
||||
@ -131,33 +131,24 @@ void ARM_Unicorn::SetReg(int regn, u64 val) {
|
||||
CHECKED(uc_reg_write(uc, treg, &val));
|
||||
}
|
||||
|
||||
u128 ARM_Unicorn::GetExtReg(int /*index*/) const {
|
||||
u128 ARM_Unicorn::GetVectorReg(int /*index*/) const {
|
||||
UNIMPLEMENTED();
|
||||
static constexpr u128 res{};
|
||||
return res;
|
||||
}
|
||||
|
||||
void ARM_Unicorn::SetExtReg(int /*index*/, u128 /*value*/) {
|
||||
void ARM_Unicorn::SetVectorReg(int /*index*/, u128 /*value*/) {
|
||||
UNIMPLEMENTED();
|
||||
}
|
||||
|
||||
u32 ARM_Unicorn::GetVFPReg(int /*index*/) const {
|
||||
UNIMPLEMENTED();
|
||||
return {};
|
||||
}
|
||||
|
||||
void ARM_Unicorn::SetVFPReg(int /*index*/, u32 /*value*/) {
|
||||
UNIMPLEMENTED();
|
||||
}
|
||||
|
||||
u32 ARM_Unicorn::GetCPSR() const {
|
||||
u32 ARM_Unicorn::GetPSTATE() const {
|
||||
u64 nzcv{};
|
||||
CHECKED(uc_reg_read(uc, UC_ARM64_REG_NZCV, &nzcv));
|
||||
return static_cast<u32>(nzcv);
|
||||
}
|
||||
|
||||
void ARM_Unicorn::SetCPSR(u32 cpsr) {
|
||||
u64 nzcv = cpsr;
|
||||
void ARM_Unicorn::SetPSTATE(u32 pstate) {
|
||||
u64 nzcv = pstate;
|
||||
CHECKED(uc_reg_write(uc, UC_ARM64_REG_NZCV, &nzcv));
|
||||
}
|
||||
|
||||
@ -219,7 +210,7 @@ void ARM_Unicorn::SaveContext(ThreadContext& ctx) {
|
||||
|
||||
CHECKED(uc_reg_read(uc, UC_ARM64_REG_SP, &ctx.sp));
|
||||
CHECKED(uc_reg_read(uc, UC_ARM64_REG_PC, &ctx.pc));
|
||||
CHECKED(uc_reg_read(uc, UC_ARM64_REG_NZCV, &ctx.cpsr));
|
||||
CHECKED(uc_reg_read(uc, UC_ARM64_REG_NZCV, &ctx.pstate));
|
||||
|
||||
for (auto i = 0; i < 29; ++i) {
|
||||
uregs[i] = UC_ARM64_REG_X0 + i;
|
||||
@ -234,7 +225,7 @@ void ARM_Unicorn::SaveContext(ThreadContext& ctx) {
|
||||
|
||||
for (int i = 0; i < 32; ++i) {
|
||||
uregs[i] = UC_ARM64_REG_Q0 + i;
|
||||
tregs[i] = &ctx.fpu_registers[i];
|
||||
tregs[i] = &ctx.vector_registers[i];
|
||||
}
|
||||
|
||||
CHECKED(uc_reg_read_batch(uc, uregs, tregs, 32));
|
||||
@ -246,7 +237,7 @@ void ARM_Unicorn::LoadContext(const ThreadContext& ctx) {
|
||||
|
||||
CHECKED(uc_reg_write(uc, UC_ARM64_REG_SP, &ctx.sp));
|
||||
CHECKED(uc_reg_write(uc, UC_ARM64_REG_PC, &ctx.pc));
|
||||
CHECKED(uc_reg_write(uc, UC_ARM64_REG_NZCV, &ctx.cpsr));
|
||||
CHECKED(uc_reg_write(uc, UC_ARM64_REG_NZCV, &ctx.pstate));
|
||||
|
||||
for (int i = 0; i < 29; ++i) {
|
||||
uregs[i] = UC_ARM64_REG_X0 + i;
|
||||
@ -261,7 +252,7 @@ void ARM_Unicorn::LoadContext(const ThreadContext& ctx) {
|
||||
|
||||
for (auto i = 0; i < 32; ++i) {
|
||||
uregs[i] = UC_ARM64_REG_Q0 + i;
|
||||
tregs[i] = (void*)&ctx.fpu_registers[i];
|
||||
tregs[i] = (void*)&ctx.vector_registers[i];
|
||||
}
|
||||
|
||||
CHECKED(uc_reg_write_batch(uc, uregs, tregs, 32));
|
||||
|
@ -15,19 +15,17 @@ class ARM_Unicorn final : public ARM_Interface {
|
||||
public:
|
||||
ARM_Unicorn();
|
||||
~ARM_Unicorn();
|
||||
void MapBackingMemory(VAddr address, size_t size, u8* memory,
|
||||
void MapBackingMemory(VAddr address, std::size_t size, u8* memory,
|
||||
Kernel::VMAPermission perms) override;
|
||||
void UnmapMemory(VAddr address, size_t size) override;
|
||||
void UnmapMemory(VAddr address, std::size_t size) override;
|
||||
void SetPC(u64 pc) override;
|
||||
u64 GetPC() const override;
|
||||
u64 GetReg(int index) const override;
|
||||
void SetReg(int index, u64 value) override;
|
||||
u128 GetExtReg(int index) const override;
|
||||
void SetExtReg(int index, u128 value) override;
|
||||
u32 GetVFPReg(int index) const override;
|
||||
void SetVFPReg(int index, u32 value) override;
|
||||
u32 GetCPSR() const override;
|
||||
void SetCPSR(u32 cpsr) override;
|
||||
u128 GetVectorReg(int index) const override;
|
||||
void SetVectorReg(int index, u128 value) override;
|
||||
u32 GetPSTATE() const override;
|
||||
void SetPSTATE(u32 pstate) override;
|
||||
VAddr GetTlsAddress() const override;
|
||||
void SetTlsAddress(VAddr address) override;
|
||||
void SetTPIDR_EL0(u64 value) override;
|
||||
|
@ -140,7 +140,7 @@ struct System::Impl {
|
||||
|
||||
cpu_barrier = std::make_shared<CpuBarrier>();
|
||||
cpu_exclusive_monitor = Cpu::MakeExclusiveMonitor(cpu_cores.size());
|
||||
for (size_t index = 0; index < cpu_cores.size(); ++index) {
|
||||
for (std::size_t index = 0; index < cpu_cores.size(); ++index) {
|
||||
cpu_cores[index] = std::make_shared<Cpu>(cpu_exclusive_monitor, cpu_barrier, index);
|
||||
}
|
||||
|
||||
@ -161,7 +161,7 @@ struct System::Impl {
|
||||
// CPU core 0 is run on the main thread
|
||||
thread_to_cpu[std::this_thread::get_id()] = cpu_cores[0];
|
||||
if (Settings::values.use_multi_core) {
|
||||
for (size_t index = 0; index < cpu_core_threads.size(); ++index) {
|
||||
for (std::size_t index = 0; index < cpu_core_threads.size(); ++index) {
|
||||
cpu_core_threads[index] =
|
||||
std::make_unique<std::thread>(RunCpuCore, cpu_cores[index + 1]);
|
||||
thread_to_cpu[cpu_core_threads[index]->get_id()] = cpu_cores[index + 1];
|
||||
@ -285,7 +285,7 @@ struct System::Impl {
|
||||
std::shared_ptr<CpuBarrier> cpu_barrier;
|
||||
std::array<std::shared_ptr<Cpu>, NUM_CPU_CORES> cpu_cores;
|
||||
std::array<std::unique_ptr<std::thread>, NUM_CPU_CORES - 1> cpu_core_threads;
|
||||
size_t active_core{}; ///< Active core, only used in single thread mode
|
||||
std::size_t active_core{}; ///< Active core, only used in single thread mode
|
||||
|
||||
/// Service manager
|
||||
std::shared_ptr<Service::SM::ServiceManager> service_manager;
|
||||
@ -348,7 +348,7 @@ ARM_Interface& System::CurrentArmInterface() {
|
||||
return CurrentCpuCore().ArmInterface();
|
||||
}
|
||||
|
||||
size_t System::CurrentCoreIndex() {
|
||||
std::size_t System::CurrentCoreIndex() {
|
||||
return CurrentCpuCore().CoreIndex();
|
||||
}
|
||||
|
||||
@ -356,7 +356,7 @@ Kernel::Scheduler& System::CurrentScheduler() {
|
||||
return *CurrentCpuCore().Scheduler();
|
||||
}
|
||||
|
||||
const std::shared_ptr<Kernel::Scheduler>& System::Scheduler(size_t core_index) {
|
||||
const std::shared_ptr<Kernel::Scheduler>& System::Scheduler(std::size_t core_index) {
|
||||
ASSERT(core_index < NUM_CPU_CORES);
|
||||
return impl->cpu_cores[core_index]->Scheduler();
|
||||
}
|
||||
@ -369,12 +369,12 @@ const Kernel::SharedPtr<Kernel::Process>& System::CurrentProcess() const {
|
||||
return impl->kernel.CurrentProcess();
|
||||
}
|
||||
|
||||
ARM_Interface& System::ArmInterface(size_t core_index) {
|
||||
ARM_Interface& System::ArmInterface(std::size_t core_index) {
|
||||
ASSERT(core_index < NUM_CPU_CORES);
|
||||
return impl->cpu_cores[core_index]->ArmInterface();
|
||||
}
|
||||
|
||||
Cpu& System::CpuCore(size_t core_index) {
|
||||
Cpu& System::CpuCore(std::size_t core_index) {
|
||||
ASSERT(core_index < NUM_CPU_CORES);
|
||||
return *impl->cpu_cores[core_index];
|
||||
}
|
||||
|
@ -145,16 +145,16 @@ public:
|
||||
ARM_Interface& CurrentArmInterface();
|
||||
|
||||
/// Gets the index of the currently running CPU core
|
||||
size_t CurrentCoreIndex();
|
||||
std::size_t CurrentCoreIndex();
|
||||
|
||||
/// Gets the scheduler for the CPU core that is currently running
|
||||
Kernel::Scheduler& CurrentScheduler();
|
||||
|
||||
/// Gets an ARM interface to the CPU core with the specified index
|
||||
ARM_Interface& ArmInterface(size_t core_index);
|
||||
ARM_Interface& ArmInterface(std::size_t core_index);
|
||||
|
||||
/// Gets a CPU interface to the CPU core with the specified index
|
||||
Cpu& CpuCore(size_t core_index);
|
||||
Cpu& CpuCore(std::size_t core_index);
|
||||
|
||||
/// Gets the exclusive monitor
|
||||
ExclusiveMonitor& Monitor();
|
||||
@ -172,7 +172,7 @@ public:
|
||||
const VideoCore::RendererBase& Renderer() const;
|
||||
|
||||
/// Gets the scheduler for the CPU core with the specified index
|
||||
const std::shared_ptr<Kernel::Scheduler>& Scheduler(size_t core_index);
|
||||
const std::shared_ptr<Kernel::Scheduler>& Scheduler(std::size_t core_index);
|
||||
|
||||
/// Provides a reference to the current process
|
||||
Kernel::SharedPtr<Kernel::Process>& CurrentProcess();
|
||||
|
@ -9,6 +9,7 @@
|
||||
#ifdef ARCHITECTURE_x86_64
|
||||
#include "core/arm/dynarmic/arm_dynarmic.h"
|
||||
#endif
|
||||
#include "core/arm/exclusive_monitor.h"
|
||||
#include "core/arm/unicorn/arm_unicorn.h"
|
||||
#include "core/core_cpu.h"
|
||||
#include "core/core_timing.h"
|
||||
@ -49,7 +50,7 @@ bool CpuBarrier::Rendezvous() {
|
||||
}
|
||||
|
||||
Cpu::Cpu(std::shared_ptr<ExclusiveMonitor> exclusive_monitor,
|
||||
std::shared_ptr<CpuBarrier> cpu_barrier, size_t core_index)
|
||||
std::shared_ptr<CpuBarrier> cpu_barrier, std::size_t core_index)
|
||||
: cpu_barrier{std::move(cpu_barrier)}, core_index{core_index} {
|
||||
|
||||
if (Settings::values.use_cpu_jit) {
|
||||
@ -66,7 +67,9 @@ Cpu::Cpu(std::shared_ptr<ExclusiveMonitor> exclusive_monitor,
|
||||
scheduler = std::make_shared<Kernel::Scheduler>(arm_interface.get());
|
||||
}
|
||||
|
||||
std::shared_ptr<ExclusiveMonitor> Cpu::MakeExclusiveMonitor(size_t num_cores) {
|
||||
Cpu::~Cpu() = default;
|
||||
|
||||
std::shared_ptr<ExclusiveMonitor> Cpu::MakeExclusiveMonitor(std::size_t num_cores) {
|
||||
if (Settings::values.use_cpu_jit) {
|
||||
#ifdef ARCHITECTURE_x86_64
|
||||
return std::make_shared<DynarmicExclusiveMonitor>(num_cores);
|
||||
|
@ -6,11 +6,10 @@
|
||||
|
||||
#include <atomic>
|
||||
#include <condition_variable>
|
||||
#include <cstddef>
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
#include <string>
|
||||
#include "common/common_types.h"
|
||||
#include "core/arm/exclusive_monitor.h"
|
||||
|
||||
namespace Kernel {
|
||||
class Scheduler;
|
||||
@ -19,6 +18,7 @@ class Scheduler;
|
||||
namespace Core {
|
||||
|
||||
class ARM_Interface;
|
||||
class ExclusiveMonitor;
|
||||
|
||||
constexpr unsigned NUM_CPU_CORES{4};
|
||||
|
||||
@ -42,7 +42,8 @@ private:
|
||||
class Cpu {
|
||||
public:
|
||||
Cpu(std::shared_ptr<ExclusiveMonitor> exclusive_monitor,
|
||||
std::shared_ptr<CpuBarrier> cpu_barrier, size_t core_index);
|
||||
std::shared_ptr<CpuBarrier> cpu_barrier, std::size_t core_index);
|
||||
~Cpu();
|
||||
|
||||
void RunLoop(bool tight_loop = true);
|
||||
|
||||
@ -66,11 +67,11 @@ public:
|
||||
return core_index == 0;
|
||||
}
|
||||
|
||||
size_t CoreIndex() const {
|
||||
std::size_t CoreIndex() const {
|
||||
return core_index;
|
||||
}
|
||||
|
||||
static std::shared_ptr<ExclusiveMonitor> MakeExclusiveMonitor(size_t num_cores);
|
||||
static std::shared_ptr<ExclusiveMonitor> MakeExclusiveMonitor(std::size_t num_cores);
|
||||
|
||||
private:
|
||||
void Reschedule();
|
||||
@ -80,7 +81,7 @@ private:
|
||||
std::shared_ptr<Kernel::Scheduler> scheduler;
|
||||
|
||||
std::atomic<bool> reschedule_pending = false;
|
||||
size_t core_index;
|
||||
std::size_t core_index;
|
||||
};
|
||||
|
||||
} // namespace Core
|
||||
|
@ -10,9 +10,9 @@
|
||||
|
||||
namespace Core::Crypto {
|
||||
namespace {
|
||||
std::vector<u8> CalculateNintendoTweak(size_t sector_id) {
|
||||
std::vector<u8> CalculateNintendoTweak(std::size_t sector_id) {
|
||||
std::vector<u8> out(0x10);
|
||||
for (size_t i = 0xF; i <= 0xF; --i) {
|
||||
for (std::size_t i = 0xF; i <= 0xF; --i) {
|
||||
out[i] = sector_id & 0xFF;
|
||||
sector_id >>= 8;
|
||||
}
|
||||
@ -20,11 +20,14 @@ std::vector<u8> CalculateNintendoTweak(size_t sector_id) {
|
||||
}
|
||||
} // Anonymous namespace
|
||||
|
||||
static_assert(static_cast<size_t>(Mode::CTR) == static_cast<size_t>(MBEDTLS_CIPHER_AES_128_CTR),
|
||||
static_assert(static_cast<std::size_t>(Mode::CTR) ==
|
||||
static_cast<std::size_t>(MBEDTLS_CIPHER_AES_128_CTR),
|
||||
"CTR has incorrect value.");
|
||||
static_assert(static_cast<size_t>(Mode::ECB) == static_cast<size_t>(MBEDTLS_CIPHER_AES_128_ECB),
|
||||
static_assert(static_cast<std::size_t>(Mode::ECB) ==
|
||||
static_cast<std::size_t>(MBEDTLS_CIPHER_AES_128_ECB),
|
||||
"ECB has incorrect value.");
|
||||
static_assert(static_cast<size_t>(Mode::XTS) == static_cast<size_t>(MBEDTLS_CIPHER_AES_128_XTS),
|
||||
static_assert(static_cast<std::size_t>(Mode::XTS) ==
|
||||
static_cast<std::size_t>(MBEDTLS_CIPHER_AES_128_XTS),
|
||||
"XTS has incorrect value.");
|
||||
|
||||
// Structure to hide mbedtls types from header file
|
||||
@ -33,7 +36,7 @@ struct CipherContext {
|
||||
mbedtls_cipher_context_t decryption_context;
|
||||
};
|
||||
|
||||
template <typename Key, size_t KeySize>
|
||||
template <typename Key, std::size_t KeySize>
|
||||
Crypto::AESCipher<Key, KeySize>::AESCipher(Key key, Mode mode)
|
||||
: ctx(std::make_unique<CipherContext>()) {
|
||||
mbedtls_cipher_init(&ctx->encryption_context);
|
||||
@ -54,26 +57,26 @@ Crypto::AESCipher<Key, KeySize>::AESCipher(Key key, Mode mode)
|
||||
//"Failed to set key on mbedtls ciphers.");
|
||||
}
|
||||
|
||||
template <typename Key, size_t KeySize>
|
||||
template <typename Key, std::size_t KeySize>
|
||||
AESCipher<Key, KeySize>::~AESCipher() {
|
||||
mbedtls_cipher_free(&ctx->encryption_context);
|
||||
mbedtls_cipher_free(&ctx->decryption_context);
|
||||
}
|
||||
|
||||
template <typename Key, size_t KeySize>
|
||||
template <typename Key, std::size_t KeySize>
|
||||
void AESCipher<Key, KeySize>::SetIV(std::vector<u8> iv) {
|
||||
ASSERT_MSG((mbedtls_cipher_set_iv(&ctx->encryption_context, iv.data(), iv.size()) ||
|
||||
mbedtls_cipher_set_iv(&ctx->decryption_context, iv.data(), iv.size())) == 0,
|
||||
"Failed to set IV on mbedtls ciphers.");
|
||||
}
|
||||
|
||||
template <typename Key, size_t KeySize>
|
||||
void AESCipher<Key, KeySize>::Transcode(const u8* src, size_t size, u8* dest, Op op) const {
|
||||
template <typename Key, std::size_t KeySize>
|
||||
void AESCipher<Key, KeySize>::Transcode(const u8* src, std::size_t size, u8* dest, Op op) const {
|
||||
auto* const context = op == Op::Encrypt ? &ctx->encryption_context : &ctx->decryption_context;
|
||||
|
||||
mbedtls_cipher_reset(context);
|
||||
|
||||
size_t written = 0;
|
||||
std::size_t written = 0;
|
||||
if (mbedtls_cipher_get_cipher_mode(context) == MBEDTLS_MODE_XTS) {
|
||||
mbedtls_cipher_update(context, src, size, dest, &written);
|
||||
if (written != size) {
|
||||
@ -90,8 +93,8 @@ void AESCipher<Key, KeySize>::Transcode(const u8* src, size_t size, u8* dest, Op
|
||||
return;
|
||||
}
|
||||
|
||||
for (size_t offset = 0; offset < size; offset += block_size) {
|
||||
auto length = std::min<size_t>(block_size, size - offset);
|
||||
for (std::size_t offset = 0; offset < size; offset += block_size) {
|
||||
auto length = std::min<std::size_t>(block_size, size - offset);
|
||||
mbedtls_cipher_update(context, src + offset, length, dest + offset, &written);
|
||||
if (written != length) {
|
||||
if (length < block_size) {
|
||||
@ -110,12 +113,12 @@ void AESCipher<Key, KeySize>::Transcode(const u8* src, size_t size, u8* dest, Op
|
||||
mbedtls_cipher_finish(context, nullptr, nullptr);
|
||||
}
|
||||
|
||||
template <typename Key, size_t KeySize>
|
||||
void AESCipher<Key, KeySize>::XTSTranscode(const u8* src, size_t size, u8* dest, size_t sector_id,
|
||||
size_t sector_size, Op op) {
|
||||
template <typename Key, std::size_t KeySize>
|
||||
void AESCipher<Key, KeySize>::XTSTranscode(const u8* src, std::size_t size, u8* dest,
|
||||
std::size_t sector_id, std::size_t sector_size, Op op) {
|
||||
ASSERT_MSG(size % sector_size == 0, "XTS decryption size must be a multiple of sector size.");
|
||||
|
||||
for (size_t i = 0; i < size; i += sector_size) {
|
||||
for (std::size_t i = 0; i < size; i += sector_size) {
|
||||
SetIV(CalculateNintendoTweak(sector_id++));
|
||||
Transcode<u8, u8>(src + i, sector_size, dest + i, op);
|
||||
}
|
||||
|
@ -25,7 +25,7 @@ enum class Op {
|
||||
Decrypt,
|
||||
};
|
||||
|
||||
template <typename Key, size_t KeySize = sizeof(Key)>
|
||||
template <typename Key, std::size_t KeySize = sizeof(Key)>
|
||||
class AESCipher {
|
||||
static_assert(std::is_same_v<Key, std::array<u8, KeySize>>, "Key must be std::array of u8.");
|
||||
static_assert(KeySize == 0x10 || KeySize == 0x20, "KeySize must be 128 or 256.");
|
||||
@ -38,25 +38,25 @@ public:
|
||||
void SetIV(std::vector<u8> iv);
|
||||
|
||||
template <typename Source, typename Dest>
|
||||
void Transcode(const Source* src, size_t size, Dest* dest, Op op) const {
|
||||
void Transcode(const Source* src, std::size_t size, Dest* dest, Op op) const {
|
||||
static_assert(std::is_trivially_copyable_v<Source> && std::is_trivially_copyable_v<Dest>,
|
||||
"Transcode source and destination types must be trivially copyable.");
|
||||
Transcode(reinterpret_cast<const u8*>(src), size, reinterpret_cast<u8*>(dest), op);
|
||||
}
|
||||
|
||||
void Transcode(const u8* src, size_t size, u8* dest, Op op) const;
|
||||
void Transcode(const u8* src, std::size_t size, u8* dest, Op op) const;
|
||||
|
||||
template <typename Source, typename Dest>
|
||||
void XTSTranscode(const Source* src, size_t size, Dest* dest, size_t sector_id,
|
||||
size_t sector_size, Op op) {
|
||||
void XTSTranscode(const Source* src, std::size_t size, Dest* dest, std::size_t sector_id,
|
||||
std::size_t sector_size, Op op) {
|
||||
static_assert(std::is_trivially_copyable_v<Source> && std::is_trivially_copyable_v<Dest>,
|
||||
"XTSTranscode source and destination types must be trivially copyable.");
|
||||
XTSTranscode(reinterpret_cast<const u8*>(src), size, reinterpret_cast<u8*>(dest), sector_id,
|
||||
sector_size, op);
|
||||
}
|
||||
|
||||
void XTSTranscode(const u8* src, size_t size, u8* dest, size_t sector_id, size_t sector_size,
|
||||
Op op);
|
||||
void XTSTranscode(const u8* src, std::size_t size, u8* dest, std::size_t sector_id,
|
||||
std::size_t sector_size, Op op);
|
||||
|
||||
private:
|
||||
std::unique_ptr<CipherContext> ctx;
|
||||
|
@ -8,11 +8,12 @@
|
||||
|
||||
namespace Core::Crypto {
|
||||
|
||||
CTREncryptionLayer::CTREncryptionLayer(FileSys::VirtualFile base_, Key128 key_, size_t base_offset)
|
||||
CTREncryptionLayer::CTREncryptionLayer(FileSys::VirtualFile base_, Key128 key_,
|
||||
std::size_t base_offset)
|
||||
: EncryptionLayer(std::move(base_)), base_offset(base_offset), cipher(key_, Mode::CTR),
|
||||
iv(16, 0) {}
|
||||
|
||||
size_t CTREncryptionLayer::Read(u8* data, size_t length, size_t offset) const {
|
||||
std::size_t CTREncryptionLayer::Read(u8* data, std::size_t length, std::size_t offset) const {
|
||||
if (length == 0)
|
||||
return 0;
|
||||
|
||||
@ -28,7 +29,7 @@ size_t CTREncryptionLayer::Read(u8* data, size_t length, size_t offset) const {
|
||||
std::vector<u8> block = base->ReadBytes(0x10, offset - sector_offset);
|
||||
UpdateIV(base_offset + offset - sector_offset);
|
||||
cipher.Transcode(block.data(), block.size(), block.data(), Op::Decrypt);
|
||||
size_t read = 0x10 - sector_offset;
|
||||
std::size_t read = 0x10 - sector_offset;
|
||||
|
||||
if (length + sector_offset < 0x10) {
|
||||
std::memcpy(data, block.data() + sector_offset, std::min<u64>(length, read));
|
||||
@ -43,9 +44,9 @@ void CTREncryptionLayer::SetIV(const std::vector<u8>& iv_) {
|
||||
iv.assign(iv_.cbegin(), iv_.cbegin() + length);
|
||||
}
|
||||
|
||||
void CTREncryptionLayer::UpdateIV(size_t offset) const {
|
||||
void CTREncryptionLayer::UpdateIV(std::size_t offset) const {
|
||||
offset >>= 4;
|
||||
for (size_t i = 0; i < 8; ++i) {
|
||||
for (std::size_t i = 0; i < 8; ++i) {
|
||||
iv[16 - i - 1] = offset & 0xFF;
|
||||
offset >>= 8;
|
||||
}
|
||||
|
@ -14,20 +14,20 @@ namespace Core::Crypto {
|
||||
// Sits on top of a VirtualFile and provides CTR-mode AES decription.
|
||||
class CTREncryptionLayer : public EncryptionLayer {
|
||||
public:
|
||||
CTREncryptionLayer(FileSys::VirtualFile base, Key128 key, size_t base_offset);
|
||||
CTREncryptionLayer(FileSys::VirtualFile base, Key128 key, std::size_t base_offset);
|
||||
|
||||
size_t Read(u8* data, size_t length, size_t offset) const override;
|
||||
std::size_t Read(u8* data, std::size_t length, std::size_t offset) const override;
|
||||
|
||||
void SetIV(const std::vector<u8>& iv);
|
||||
|
||||
private:
|
||||
size_t base_offset;
|
||||
std::size_t base_offset;
|
||||
|
||||
// Must be mutable as operations modify cipher contexts.
|
||||
mutable AESCipher<Key128> cipher;
|
||||
mutable std::vector<u8> iv;
|
||||
|
||||
void UpdateIV(size_t offset) const;
|
||||
void UpdateIV(std::size_t offset) const;
|
||||
};
|
||||
|
||||
} // namespace Core::Crypto
|
||||
|
@ -12,11 +12,11 @@ std::string EncryptionLayer::GetName() const {
|
||||
return base->GetName();
|
||||
}
|
||||
|
||||
size_t EncryptionLayer::GetSize() const {
|
||||
std::size_t EncryptionLayer::GetSize() const {
|
||||
return base->GetSize();
|
||||
}
|
||||
|
||||
bool EncryptionLayer::Resize(size_t new_size) {
|
||||
bool EncryptionLayer::Resize(std::size_t new_size) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -32,7 +32,7 @@ bool EncryptionLayer::IsReadable() const {
|
||||
return true;
|
||||
}
|
||||
|
||||
size_t EncryptionLayer::Write(const u8* data, size_t length, size_t offset) {
|
||||
std::size_t EncryptionLayer::Write(const u8* data, std::size_t length, std::size_t offset) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -15,15 +15,15 @@ class EncryptionLayer : public FileSys::VfsFile {
|
||||
public:
|
||||
explicit EncryptionLayer(FileSys::VirtualFile base);
|
||||
|
||||
size_t Read(u8* data, size_t length, size_t offset) const override = 0;
|
||||
std::size_t Read(u8* data, std::size_t length, std::size_t offset) const override = 0;
|
||||
|
||||
std::string GetName() const override;
|
||||
size_t GetSize() const override;
|
||||
bool Resize(size_t new_size) override;
|
||||
std::size_t GetSize() const override;
|
||||
bool Resize(std::size_t new_size) override;
|
||||
std::shared_ptr<FileSys::VfsDirectory> GetContainingDirectory() const override;
|
||||
bool IsWritable() const override;
|
||||
bool IsReadable() const override;
|
||||
size_t Write(const u8* data, size_t length, size_t offset) override;
|
||||
std::size_t Write(const u8* data, std::size_t length, std::size_t offset) override;
|
||||
bool Rename(std::string_view name) override;
|
||||
|
||||
protected:
|
||||
|
@ -54,7 +54,7 @@ boost::optional<Key128> DeriveSDSeed() {
|
||||
return boost::none;
|
||||
|
||||
std::array<u8, 0x10> buffer{};
|
||||
size_t offset = 0;
|
||||
std::size_t offset = 0;
|
||||
for (; offset + 0x10 < save_43.GetSize(); ++offset) {
|
||||
save_43.Seek(offset, SEEK_SET);
|
||||
save_43.ReadBytes(buffer.data(), buffer.size());
|
||||
@ -105,7 +105,7 @@ Loader::ResultStatus DeriveSDKeys(std::array<Key256, 2>& sd_keys, const KeyManag
|
||||
|
||||
// Combine sources and seed
|
||||
for (auto& source : sd_key_sources) {
|
||||
for (size_t i = 0; i < source.size(); ++i)
|
||||
for (std::size_t i = 0; i < source.size(); ++i)
|
||||
source[i] ^= sd_seed[i & 0xF];
|
||||
}
|
||||
|
||||
@ -207,7 +207,7 @@ Key256 KeyManager::GetKey(S256KeyType id, u64 field1, u64 field2) const {
|
||||
return s256_keys.at({id, field1, field2});
|
||||
}
|
||||
|
||||
template <size_t Size>
|
||||
template <std::size_t Size>
|
||||
void KeyManager::WriteKeyToFile(bool title_key, std::string_view keyname,
|
||||
const std::array<u8, Size>& key) {
|
||||
const std::string yuzu_keys_dir = FileUtil::GetUserPath(FileUtil::UserPath::KeysDir);
|
||||
|
@ -108,7 +108,7 @@ private:
|
||||
void LoadFromFile(const std::string& filename, bool is_title_keys);
|
||||
void AttemptLoadKeyFile(const std::string& dir1, const std::string& dir2,
|
||||
const std::string& filename, bool title);
|
||||
template <size_t Size>
|
||||
template <std::size_t Size>
|
||||
void WriteKeyToFile(bool title_key, std::string_view keyname, const std::array<u8, Size>& key);
|
||||
|
||||
static const boost::container::flat_map<std::string, KeyIndex<S128KeyType>> s128_file_id;
|
||||
|
@ -14,7 +14,7 @@ constexpr u64 XTS_SECTOR_SIZE = 0x4000;
|
||||
XTSEncryptionLayer::XTSEncryptionLayer(FileSys::VirtualFile base_, Key256 key_)
|
||||
: EncryptionLayer(std::move(base_)), cipher(key_, Mode::XTS) {}
|
||||
|
||||
size_t XTSEncryptionLayer::Read(u8* data, size_t length, size_t offset) const {
|
||||
std::size_t XTSEncryptionLayer::Read(u8* data, std::size_t length, std::size_t offset) const {
|
||||
if (length == 0)
|
||||
return 0;
|
||||
|
||||
@ -46,7 +46,7 @@ size_t XTSEncryptionLayer::Read(u8* data, size_t length, size_t offset) const {
|
||||
block.resize(XTS_SECTOR_SIZE);
|
||||
cipher.XTSTranscode(block.data(), block.size(), block.data(),
|
||||
(offset - sector_offset) / XTS_SECTOR_SIZE, XTS_SECTOR_SIZE, Op::Decrypt);
|
||||
const size_t read = XTS_SECTOR_SIZE - sector_offset;
|
||||
const std::size_t read = XTS_SECTOR_SIZE - sector_offset;
|
||||
|
||||
if (length + sector_offset < XTS_SECTOR_SIZE) {
|
||||
std::memcpy(data, block.data() + sector_offset, std::min<u64>(length, read));
|
||||
|
@ -15,7 +15,7 @@ class XTSEncryptionLayer : public EncryptionLayer {
|
||||
public:
|
||||
XTSEncryptionLayer(FileSys::VirtualFile base, Key256 key);
|
||||
|
||||
size_t Read(u8* data, size_t length, size_t offset) const override;
|
||||
std::size_t Read(u8* data, std::size_t length, std::size_t offset) const override;
|
||||
|
||||
private:
|
||||
// Must be mutable as operations modify cipher contexts.
|
||||
|
@ -41,13 +41,14 @@ XCI::XCI(VirtualFile file_) : file(std::move(file_)), partitions(0x4) {
|
||||
|
||||
for (XCIPartition partition :
|
||||
{XCIPartition::Update, XCIPartition::Normal, XCIPartition::Secure, XCIPartition::Logo}) {
|
||||
auto raw = main_hfs.GetFile(partition_names[static_cast<size_t>(partition)]);
|
||||
auto raw = main_hfs.GetFile(partition_names[static_cast<std::size_t>(partition)]);
|
||||
if (raw != nullptr)
|
||||
partitions[static_cast<size_t>(partition)] = std::make_shared<PartitionFilesystem>(raw);
|
||||
partitions[static_cast<std::size_t>(partition)] =
|
||||
std::make_shared<PartitionFilesystem>(raw);
|
||||
}
|
||||
|
||||
secure_partition = std::make_shared<NSP>(
|
||||
main_hfs.GetFile(partition_names[static_cast<size_t>(XCIPartition::Secure)]));
|
||||
main_hfs.GetFile(partition_names[static_cast<std::size_t>(XCIPartition::Secure)]));
|
||||
|
||||
const auto secure_ncas = secure_partition->GetNCAsCollapsed();
|
||||
std::copy(secure_ncas.begin(), secure_ncas.end(), std::back_inserter(ncas));
|
||||
@ -92,7 +93,7 @@ Loader::ResultStatus XCI::GetProgramNCAStatus() const {
|
||||
}
|
||||
|
||||
VirtualDir XCI::GetPartition(XCIPartition partition) const {
|
||||
return partitions[static_cast<size_t>(partition)];
|
||||
return partitions[static_cast<std::size_t>(partition)];
|
||||
}
|
||||
|
||||
std::shared_ptr<NSP> XCI::GetSecurePartitionNSP() const {
|
||||
@ -168,11 +169,11 @@ bool XCI::ReplaceFileWithSubdirectory(VirtualFile file, VirtualDir dir) {
|
||||
}
|
||||
|
||||
Loader::ResultStatus XCI::AddNCAFromPartition(XCIPartition part) {
|
||||
if (partitions[static_cast<size_t>(part)] == nullptr) {
|
||||
if (partitions[static_cast<std::size_t>(part)] == nullptr) {
|
||||
return Loader::ResultStatus::ErrorXCIMissingPartition;
|
||||
}
|
||||
|
||||
for (const VirtualFile& file : partitions[static_cast<size_t>(part)]->GetFiles()) {
|
||||
for (const VirtualFile& file : partitions[static_cast<std::size_t>(part)]->GetFiles()) {
|
||||
if (file->GetExtension() != "nca")
|
||||
continue;
|
||||
auto nca = std::make_shared<NCA>(file);
|
||||
@ -187,7 +188,7 @@ Loader::ResultStatus XCI::AddNCAFromPartition(XCIPartition part) {
|
||||
} else {
|
||||
const u16 error_id = static_cast<u16>(nca->GetStatus());
|
||||
LOG_CRITICAL(Loader, "Could not load NCA {}/{}, failed with error code {:04X} ({})",
|
||||
partition_names[static_cast<size_t>(part)], nca->GetName(), error_id,
|
||||
partition_names[static_cast<std::size_t>(part)], nca->GetName(), error_id,
|
||||
nca->GetStatus());
|
||||
}
|
||||
}
|
||||
|
@ -298,11 +298,11 @@ NCA::NCA(VirtualFile file_, VirtualFile bktr_base_romfs_, u64 bktr_base_ivfc_off
|
||||
auto section = sections[i];
|
||||
|
||||
if (section.raw.header.filesystem_type == NCASectionFilesystemType::ROMFS) {
|
||||
const size_t base_offset =
|
||||
const std::size_t base_offset =
|
||||
header.section_tables[i].media_offset * MEDIA_OFFSET_MULTIPLIER;
|
||||
ivfc_offset = section.romfs.ivfc.levels[IVFC_MAX_LEVEL - 1].offset;
|
||||
const size_t romfs_offset = base_offset + ivfc_offset;
|
||||
const size_t romfs_size = section.romfs.ivfc.levels[IVFC_MAX_LEVEL - 1].size;
|
||||
const std::size_t romfs_offset = base_offset + ivfc_offset;
|
||||
const std::size_t romfs_size = section.romfs.ivfc.levels[IVFC_MAX_LEVEL - 1].size;
|
||||
auto raw = std::make_shared<OffsetVfsFile>(file, romfs_size, romfs_offset);
|
||||
auto dec = Decrypt(section, raw, romfs_offset);
|
||||
|
||||
|
@ -25,7 +25,7 @@ enum EntryType : u8 {
|
||||
struct Entry {
|
||||
Entry(std::string_view view, EntryType entry_type, u64 entry_size)
|
||||
: type{entry_type}, file_size{entry_size} {
|
||||
const size_t copy_size = view.copy(filename, std::size(filename) - 1);
|
||||
const std::size_t copy_size = view.copy(filename, std::size(filename) - 1);
|
||||
filename[copy_size] = '\0';
|
||||
}
|
||||
|
||||
|
@ -11,11 +11,11 @@
|
||||
namespace FileSys {
|
||||
|
||||
bool operator>=(TitleType lhs, TitleType rhs) {
|
||||
return static_cast<size_t>(lhs) >= static_cast<size_t>(rhs);
|
||||
return static_cast<std::size_t>(lhs) >= static_cast<std::size_t>(rhs);
|
||||
}
|
||||
|
||||
bool operator<=(TitleType lhs, TitleType rhs) {
|
||||
return static_cast<size_t>(lhs) <= static_cast<size_t>(rhs);
|
||||
return static_cast<std::size_t>(lhs) <= static_cast<std::size_t>(rhs);
|
||||
}
|
||||
|
||||
CNMT::CNMT(VirtualFile file) {
|
||||
|
@ -22,11 +22,11 @@ BKTR::BKTR(VirtualFile base_romfs_, VirtualFile bktr_romfs_, RelocationBlock rel
|
||||
base_romfs(std::move(base_romfs_)), bktr_romfs(std::move(bktr_romfs_)),
|
||||
encrypted(is_encrypted_), key(key_), base_offset(base_offset_), ivfc_offset(ivfc_offset_),
|
||||
section_ctr(section_ctr_) {
|
||||
for (size_t i = 0; i < relocation.number_buckets - 1; ++i) {
|
||||
for (std::size_t i = 0; i < relocation.number_buckets - 1; ++i) {
|
||||
relocation_buckets[i].entries.push_back({relocation.base_offsets[i + 1], 0, 0});
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < subsection.number_buckets - 1; ++i) {
|
||||
for (std::size_t i = 0; i < subsection.number_buckets - 1; ++i) {
|
||||
subsection_buckets[i].entries.push_back({subsection_buckets[i + 1].entries[0].address_patch,
|
||||
{0},
|
||||
subsection_buckets[i + 1].entries[0].ctr});
|
||||
@ -37,7 +37,7 @@ BKTR::BKTR(VirtualFile base_romfs_, VirtualFile bktr_romfs_, RelocationBlock rel
|
||||
|
||||
BKTR::~BKTR() = default;
|
||||
|
||||
size_t BKTR::Read(u8* data, size_t length, size_t offset) const {
|
||||
std::size_t BKTR::Read(u8* data, std::size_t length, std::size_t offset) const {
|
||||
// Read out of bounds.
|
||||
if (offset >= relocation.size)
|
||||
return 0;
|
||||
@ -69,14 +69,14 @@ size_t BKTR::Read(u8* data, size_t length, size_t offset) const {
|
||||
std::vector<u8> iv(16);
|
||||
auto subsection_ctr = subsection.ctr;
|
||||
auto offset_iv = section_offset + base_offset;
|
||||
for (size_t i = 0; i < section_ctr.size(); ++i)
|
||||
for (std::size_t i = 0; i < section_ctr.size(); ++i)
|
||||
iv[i] = section_ctr[0x8 - i - 1];
|
||||
offset_iv >>= 4;
|
||||
for (size_t i = 0; i < sizeof(u64); ++i) {
|
||||
for (std::size_t i = 0; i < sizeof(u64); ++i) {
|
||||
iv[0xF - i] = static_cast<u8>(offset_iv & 0xFF);
|
||||
offset_iv >>= 8;
|
||||
}
|
||||
for (size_t i = 0; i < sizeof(u32); ++i) {
|
||||
for (std::size_t i = 0; i < sizeof(u32); ++i) {
|
||||
iv[0x7 - i] = static_cast<u8>(subsection_ctr & 0xFF);
|
||||
subsection_ctr >>= 8;
|
||||
}
|
||||
@ -110,8 +110,8 @@ size_t BKTR::Read(u8* data, size_t length, size_t offset) const {
|
||||
}
|
||||
|
||||
template <bool Subsection, typename BlockType, typename BucketType>
|
||||
std::pair<size_t, size_t> BKTR::SearchBucketEntry(u64 offset, BlockType block,
|
||||
BucketType buckets) const {
|
||||
std::pair<std::size_t, std::size_t> BKTR::SearchBucketEntry(u64 offset, BlockType block,
|
||||
BucketType buckets) const {
|
||||
if constexpr (Subsection) {
|
||||
const auto last_bucket = buckets[block.number_buckets - 1];
|
||||
if (offset >= last_bucket.entries[last_bucket.number_entries].address_patch)
|
||||
@ -120,18 +120,18 @@ std::pair<size_t, size_t> BKTR::SearchBucketEntry(u64 offset, BlockType block,
|
||||
ASSERT_MSG(offset <= block.size, "Offset is out of bounds in BKTR relocation block.");
|
||||
}
|
||||
|
||||
size_t bucket_id = std::count_if(block.base_offsets.begin() + 1,
|
||||
block.base_offsets.begin() + block.number_buckets,
|
||||
[&offset](u64 base_offset) { return base_offset <= offset; });
|
||||
std::size_t bucket_id = std::count_if(
|
||||
block.base_offsets.begin() + 1, block.base_offsets.begin() + block.number_buckets,
|
||||
[&offset](u64 base_offset) { return base_offset <= offset; });
|
||||
|
||||
const auto bucket = buckets[bucket_id];
|
||||
|
||||
if (bucket.number_entries == 1)
|
||||
return {bucket_id, 0};
|
||||
|
||||
size_t low = 0;
|
||||
size_t mid = 0;
|
||||
size_t high = bucket.number_entries - 1;
|
||||
std::size_t low = 0;
|
||||
std::size_t mid = 0;
|
||||
std::size_t high = bucket.number_entries - 1;
|
||||
while (low <= high) {
|
||||
mid = (low + high) / 2;
|
||||
if (bucket.entries[mid].address_patch > offset) {
|
||||
@ -179,11 +179,11 @@ std::string BKTR::GetName() const {
|
||||
return base_romfs->GetName();
|
||||
}
|
||||
|
||||
size_t BKTR::GetSize() const {
|
||||
std::size_t BKTR::GetSize() const {
|
||||
return relocation.size;
|
||||
}
|
||||
|
||||
bool BKTR::Resize(size_t new_size) {
|
||||
bool BKTR::Resize(std::size_t new_size) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -199,7 +199,7 @@ bool BKTR::IsReadable() const {
|
||||
return true;
|
||||
}
|
||||
|
||||
size_t BKTR::Write(const u8* data, size_t length, size_t offset) {
|
||||
std::size_t BKTR::Write(const u8* data, std::size_t length, std::size_t offset) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -98,13 +98,13 @@ public:
|
||||
Core::Crypto::Key128 key, u64 base_offset, u64 ivfc_offset, std::array<u8, 8> section_ctr);
|
||||
~BKTR() override;
|
||||
|
||||
size_t Read(u8* data, size_t length, size_t offset) const override;
|
||||
std::size_t Read(u8* data, std::size_t length, std::size_t offset) const override;
|
||||
|
||||
std::string GetName() const override;
|
||||
|
||||
size_t GetSize() const override;
|
||||
std::size_t GetSize() const override;
|
||||
|
||||
bool Resize(size_t new_size) override;
|
||||
bool Resize(std::size_t new_size) override;
|
||||
|
||||
std::shared_ptr<VfsDirectory> GetContainingDirectory() const override;
|
||||
|
||||
@ -112,14 +112,14 @@ public:
|
||||
|
||||
bool IsReadable() const override;
|
||||
|
||||
size_t Write(const u8* data, size_t length, size_t offset) override;
|
||||
std::size_t Write(const u8* data, std::size_t length, std::size_t offset) override;
|
||||
|
||||
bool Rename(std::string_view name) override;
|
||||
|
||||
private:
|
||||
template <bool Subsection, typename BlockType, typename BucketType>
|
||||
std::pair<size_t, size_t> SearchBucketEntry(u64 offset, BlockType block,
|
||||
BucketType buckets) const;
|
||||
std::pair<std::size_t, std::size_t> SearchBucketEntry(u64 offset, BlockType block,
|
||||
BucketType buckets) const;
|
||||
|
||||
RelocationEntry GetRelocationEntry(u64 offset) const;
|
||||
RelocationEntry GetNextRelocationEntry(u64 offset) const;
|
||||
|
@ -42,21 +42,21 @@ PartitionFilesystem::PartitionFilesystem(std::shared_ptr<VfsFile> file) {
|
||||
|
||||
is_hfs = pfs_header.magic == Common::MakeMagic('H', 'F', 'S', '0');
|
||||
|
||||
size_t entry_size = is_hfs ? sizeof(HFSEntry) : sizeof(PFSEntry);
|
||||
size_t metadata_size =
|
||||
std::size_t entry_size = is_hfs ? sizeof(HFSEntry) : sizeof(PFSEntry);
|
||||
std::size_t metadata_size =
|
||||
sizeof(Header) + (pfs_header.num_entries * entry_size) + pfs_header.strtab_size;
|
||||
|
||||
// Actually read in now...
|
||||
std::vector<u8> file_data = file->ReadBytes(metadata_size);
|
||||
const size_t total_size = file_data.size();
|
||||
const std::size_t total_size = file_data.size();
|
||||
|
||||
if (total_size != metadata_size) {
|
||||
status = Loader::ResultStatus::ErrorIncorrectPFSFileSize;
|
||||
return;
|
||||
}
|
||||
|
||||
size_t entries_offset = sizeof(Header);
|
||||
size_t strtab_offset = entries_offset + (pfs_header.num_entries * entry_size);
|
||||
std::size_t entries_offset = sizeof(Header);
|
||||
std::size_t strtab_offset = entries_offset + (pfs_header.num_entries * entry_size);
|
||||
content_offset = strtab_offset + pfs_header.strtab_size;
|
||||
for (u16 i = 0; i < pfs_header.num_entries; i++) {
|
||||
FSEntry entry;
|
||||
|
@ -79,7 +79,7 @@ private:
|
||||
|
||||
Header pfs_header{};
|
||||
bool is_hfs = false;
|
||||
size_t content_offset = 0;
|
||||
std::size_t content_offset = 0;
|
||||
|
||||
std::vector<VirtualFile> pfs_files;
|
||||
std::vector<VirtualDir> pfs_dirs;
|
||||
|
@ -21,7 +21,7 @@ constexpr u64 SINGLE_BYTE_MODULUS = 0x100;
|
||||
std::string FormatTitleVersion(u32 version, TitleVersionFormat format) {
|
||||
std::array<u8, sizeof(u32)> bytes{};
|
||||
bytes[0] = version % SINGLE_BYTE_MODULUS;
|
||||
for (size_t i = 1; i < bytes.size(); ++i) {
|
||||
for (std::size_t i = 1; i < bytes.size(); ++i) {
|
||||
version /= SINGLE_BYTE_MODULUS;
|
||||
bytes[i] = version % SINGLE_BYTE_MODULUS;
|
||||
}
|
||||
@ -36,7 +36,7 @@ constexpr std::array<const char*, 1> PATCH_TYPE_NAMES{
|
||||
};
|
||||
|
||||
std::string FormatPatchTypeName(PatchType type) {
|
||||
return PATCH_TYPE_NAMES.at(static_cast<size_t>(type));
|
||||
return PATCH_TYPE_NAMES.at(static_cast<std::size_t>(type));
|
||||
}
|
||||
|
||||
PatchManager::PatchManager(u64 title_id) : title_id(title_id) {}
|
||||
|
@ -13,7 +13,7 @@
|
||||
namespace FileSys {
|
||||
|
||||
Loader::ResultStatus ProgramMetadata::Load(VirtualFile file) {
|
||||
size_t total_size = static_cast<size_t>(file->GetSize());
|
||||
std::size_t total_size = static_cast<std::size_t>(file->GetSize());
|
||||
if (total_size < sizeof(Header))
|
||||
return Loader::ResultStatus::ErrorBadNPDMHeader;
|
||||
|
||||
|
@ -62,11 +62,11 @@ static std::string GetCNMTName(TitleType type, u64 title_id) {
|
||||
"" ///< Currently unknown 'DeltaTitle'
|
||||
};
|
||||
|
||||
auto index = static_cast<size_t>(type);
|
||||
auto index = static_cast<std::size_t>(type);
|
||||
// If the index is after the jump in TitleType, subtract it out.
|
||||
if (index >= static_cast<size_t>(TitleType::Application)) {
|
||||
index -= static_cast<size_t>(TitleType::Application) -
|
||||
static_cast<size_t>(TitleType::FirmwarePackageB);
|
||||
if (index >= static_cast<std::size_t>(TitleType::Application)) {
|
||||
index -= static_cast<std::size_t>(TitleType::Application) -
|
||||
static_cast<std::size_t>(TitleType::FirmwarePackageB);
|
||||
}
|
||||
return fmt::format("{}_{:016x}.cnmt", TITLE_TYPE_NAMES[index], title_id);
|
||||
}
|
||||
@ -105,7 +105,7 @@ VirtualFile RegisteredCache::OpenFileOrDirectoryConcat(const VirtualDir& dir,
|
||||
} else {
|
||||
std::vector<VirtualFile> concat;
|
||||
// Since the files are a two-digit hex number, max is FF.
|
||||
for (size_t i = 0; i < 0x100; ++i) {
|
||||
for (std::size_t i = 0; i < 0x100; ++i) {
|
||||
auto next = nca_dir->GetFile(fmt::format("{:02X}", i));
|
||||
if (next != nullptr) {
|
||||
concat.push_back(std::move(next));
|
||||
|
@ -49,7 +49,7 @@ struct FileEntry {
|
||||
static_assert(sizeof(FileEntry) == 0x20, "FileEntry has incorrect size.");
|
||||
|
||||
template <typename Entry>
|
||||
static std::pair<Entry, std::string> GetEntry(const VirtualFile& file, size_t offset) {
|
||||
static std::pair<Entry, std::string> GetEntry(const VirtualFile& file, std::size_t offset) {
|
||||
Entry entry{};
|
||||
if (file->ReadObject(&entry, offset) != sizeof(Entry))
|
||||
return {};
|
||||
@ -59,8 +59,8 @@ static std::pair<Entry, std::string> GetEntry(const VirtualFile& file, size_t of
|
||||
return {entry, string};
|
||||
}
|
||||
|
||||
void ProcessFile(VirtualFile file, size_t file_offset, size_t data_offset, u32 this_file_offset,
|
||||
std::shared_ptr<VectorVfsDirectory> parent) {
|
||||
void ProcessFile(VirtualFile file, std::size_t file_offset, std::size_t data_offset,
|
||||
u32 this_file_offset, std::shared_ptr<VectorVfsDirectory> parent) {
|
||||
while (true) {
|
||||
auto entry = GetEntry<FileEntry>(file, file_offset + this_file_offset);
|
||||
|
||||
@ -74,8 +74,9 @@ void ProcessFile(VirtualFile file, size_t file_offset, size_t data_offset, u32 t
|
||||
}
|
||||
}
|
||||
|
||||
void ProcessDirectory(VirtualFile file, size_t dir_offset, size_t file_offset, size_t data_offset,
|
||||
u32 this_dir_offset, std::shared_ptr<VectorVfsDirectory> parent) {
|
||||
void ProcessDirectory(VirtualFile file, std::size_t dir_offset, std::size_t file_offset,
|
||||
std::size_t data_offset, u32 this_dir_offset,
|
||||
std::shared_ptr<VectorVfsDirectory> parent) {
|
||||
while (true) {
|
||||
auto entry = GetEntry<DirectoryEntry>(file, dir_offset + this_dir_offset);
|
||||
auto current = std::make_shared<VectorVfsDirectory>(
|
||||
|
@ -167,18 +167,18 @@ std::string VfsFile::GetExtension() const {
|
||||
|
||||
VfsDirectory::~VfsDirectory() = default;
|
||||
|
||||
boost::optional<u8> VfsFile::ReadByte(size_t offset) const {
|
||||
boost::optional<u8> VfsFile::ReadByte(std::size_t offset) const {
|
||||
u8 out{};
|
||||
size_t size = Read(&out, 1, offset);
|
||||
std::size_t size = Read(&out, 1, offset);
|
||||
if (size == 1)
|
||||
return out;
|
||||
|
||||
return boost::none;
|
||||
}
|
||||
|
||||
std::vector<u8> VfsFile::ReadBytes(size_t size, size_t offset) const {
|
||||
std::vector<u8> VfsFile::ReadBytes(std::size_t size, std::size_t offset) const {
|
||||
std::vector<u8> out(size);
|
||||
size_t read_size = Read(out.data(), size, offset);
|
||||
std::size_t read_size = Read(out.data(), size, offset);
|
||||
out.resize(read_size);
|
||||
return out;
|
||||
}
|
||||
@ -187,11 +187,11 @@ std::vector<u8> VfsFile::ReadAllBytes() const {
|
||||
return ReadBytes(GetSize());
|
||||
}
|
||||
|
||||
bool VfsFile::WriteByte(u8 data, size_t offset) {
|
||||
bool VfsFile::WriteByte(u8 data, std::size_t offset) {
|
||||
return Write(&data, 1, offset) == 1;
|
||||
}
|
||||
|
||||
size_t VfsFile::WriteBytes(const std::vector<u8>& data, size_t offset) {
|
||||
std::size_t VfsFile::WriteBytes(const std::vector<u8>& data, std::size_t offset) {
|
||||
return Write(data.data(), data.size(), offset);
|
||||
}
|
||||
|
||||
@ -215,7 +215,7 @@ std::shared_ptr<VfsFile> VfsDirectory::GetFileRelative(std::string_view path) co
|
||||
}
|
||||
|
||||
auto dir = GetSubdirectory(vec[0]);
|
||||
for (size_t component = 1; component < vec.size() - 1; ++component) {
|
||||
for (std::size_t component = 1; component < vec.size() - 1; ++component) {
|
||||
if (dir == nullptr) {
|
||||
return nullptr;
|
||||
}
|
||||
@ -249,7 +249,7 @@ std::shared_ptr<VfsDirectory> VfsDirectory::GetDirectoryRelative(std::string_vie
|
||||
}
|
||||
|
||||
auto dir = GetSubdirectory(vec[0]);
|
||||
for (size_t component = 1; component < vec.size(); ++component) {
|
||||
for (std::size_t component = 1; component < vec.size(); ++component) {
|
||||
if (dir == nullptr) {
|
||||
return nullptr;
|
||||
}
|
||||
@ -286,7 +286,7 @@ bool VfsDirectory::IsRoot() const {
|
||||
return GetParentDirectory() == nullptr;
|
||||
}
|
||||
|
||||
size_t VfsDirectory::GetSize() const {
|
||||
std::size_t VfsDirectory::GetSize() const {
|
||||
const auto& files = GetFiles();
|
||||
const auto sum_sizes = [](const auto& range) {
|
||||
return std::accumulate(range.begin(), range.end(), 0ULL,
|
||||
@ -434,13 +434,13 @@ bool ReadOnlyVfsDirectory::Rename(std::string_view name) {
|
||||
return false;
|
||||
}
|
||||
|
||||
bool DeepEquals(const VirtualFile& file1, const VirtualFile& file2, size_t block_size) {
|
||||
bool DeepEquals(const VirtualFile& file1, const VirtualFile& file2, std::size_t block_size) {
|
||||
if (file1->GetSize() != file2->GetSize())
|
||||
return false;
|
||||
|
||||
std::vector<u8> f1_v(block_size);
|
||||
std::vector<u8> f2_v(block_size);
|
||||
for (size_t i = 0; i < file1->GetSize(); i += block_size) {
|
||||
for (std::size_t i = 0; i < file1->GetSize(); i += block_size) {
|
||||
auto f1_vs = file1->Read(f1_v.data(), block_size, i);
|
||||
auto f2_vs = file2->Read(f2_v.data(), block_size, i);
|
||||
|
||||
|
@ -92,9 +92,9 @@ public:
|
||||
// Retrieves the extension of the file name.
|
||||
virtual std::string GetExtension() const;
|
||||
// Retrieves the size of the file.
|
||||
virtual size_t GetSize() const = 0;
|
||||
virtual std::size_t GetSize() const = 0;
|
||||
// Resizes the file to new_size. Returns whether or not the operation was successful.
|
||||
virtual bool Resize(size_t new_size) = 0;
|
||||
virtual bool Resize(std::size_t new_size) = 0;
|
||||
// Gets a pointer to the directory containing this file, returning nullptr if there is none.
|
||||
virtual std::shared_ptr<VfsDirectory> GetContainingDirectory() const = 0;
|
||||
|
||||
@ -105,15 +105,15 @@ public:
|
||||
|
||||
// The primary method of reading from the file. Reads length bytes into data starting at offset
|
||||
// into file. Returns number of bytes successfully read.
|
||||
virtual size_t Read(u8* data, size_t length, size_t offset = 0) const = 0;
|
||||
virtual std::size_t Read(u8* data, std::size_t length, std::size_t offset = 0) const = 0;
|
||||
// The primary method of writing to the file. Writes length bytes from data starting at offset
|
||||
// into file. Returns number of bytes successfully written.
|
||||
virtual size_t Write(const u8* data, size_t length, size_t offset = 0) = 0;
|
||||
virtual std::size_t Write(const u8* data, std::size_t length, std::size_t offset = 0) = 0;
|
||||
|
||||
// Reads exactly one byte at the offset provided, returning boost::none on error.
|
||||
virtual boost::optional<u8> ReadByte(size_t offset = 0) const;
|
||||
virtual boost::optional<u8> ReadByte(std::size_t offset = 0) const;
|
||||
// Reads size bytes starting at offset in file into a vector.
|
||||
virtual std::vector<u8> ReadBytes(size_t size, size_t offset = 0) const;
|
||||
virtual std::vector<u8> ReadBytes(std::size_t size, std::size_t offset = 0) const;
|
||||
// Reads all the bytes from the file into a vector. Equivalent to 'file->Read(file->GetSize(),
|
||||
// 0)'
|
||||
virtual std::vector<u8> ReadAllBytes() const;
|
||||
@ -121,7 +121,7 @@ public:
|
||||
// Reads an array of type T, size number_elements starting at offset.
|
||||
// Returns the number of bytes (sizeof(T)*number_elements) read successfully.
|
||||
template <typename T>
|
||||
size_t ReadArray(T* data, size_t number_elements, size_t offset = 0) const {
|
||||
std::size_t ReadArray(T* data, std::size_t number_elements, std::size_t offset = 0) const {
|
||||
static_assert(std::is_trivially_copyable_v<T>, "Data type must be trivially copyable.");
|
||||
|
||||
return Read(reinterpret_cast<u8*>(data), number_elements * sizeof(T), offset);
|
||||
@ -130,7 +130,7 @@ public:
|
||||
// Reads size bytes into the memory starting at data starting at offset into the file.
|
||||
// Returns the number of bytes read successfully.
|
||||
template <typename T>
|
||||
size_t ReadBytes(T* data, size_t size, size_t offset = 0) const {
|
||||
std::size_t ReadBytes(T* data, std::size_t size, std::size_t offset = 0) const {
|
||||
static_assert(std::is_trivially_copyable_v<T>, "Data type must be trivially copyable.");
|
||||
return Read(reinterpret_cast<u8*>(data), size, offset);
|
||||
}
|
||||
@ -138,22 +138,22 @@ public:
|
||||
// Reads one object of type T starting at offset in file.
|
||||
// Returns the number of bytes read successfully (sizeof(T)).
|
||||
template <typename T>
|
||||
size_t ReadObject(T* data, size_t offset = 0) const {
|
||||
std::size_t ReadObject(T* data, std::size_t offset = 0) const {
|
||||
static_assert(std::is_trivially_copyable_v<T>, "Data type must be trivially copyable.");
|
||||
return Read(reinterpret_cast<u8*>(data), sizeof(T), offset);
|
||||
}
|
||||
|
||||
// Writes exactly one byte to offset in file and retuns whether or not the byte was written
|
||||
// successfully.
|
||||
virtual bool WriteByte(u8 data, size_t offset = 0);
|
||||
virtual bool WriteByte(u8 data, std::size_t offset = 0);
|
||||
// Writes a vector of bytes to offset in file and returns the number of bytes successfully
|
||||
// written.
|
||||
virtual size_t WriteBytes(const std::vector<u8>& data, size_t offset = 0);
|
||||
virtual std::size_t WriteBytes(const std::vector<u8>& data, std::size_t offset = 0);
|
||||
|
||||
// Writes an array of type T, size number_elements to offset in file.
|
||||
// Returns the number of bytes (sizeof(T)*number_elements) written successfully.
|
||||
template <typename T>
|
||||
size_t WriteArray(const T* data, size_t number_elements, size_t offset = 0) {
|
||||
std::size_t WriteArray(const T* data, std::size_t number_elements, std::size_t offset = 0) {
|
||||
static_assert(std::is_trivially_copyable_v<T>, "Data type must be trivially copyable.");
|
||||
return Write(data, number_elements * sizeof(T), offset);
|
||||
}
|
||||
@ -161,7 +161,7 @@ public:
|
||||
// Writes size bytes starting at memory location data to offset in file.
|
||||
// Returns the number of bytes written successfully.
|
||||
template <typename T>
|
||||
size_t WriteBytes(const T* data, size_t size, size_t offset = 0) {
|
||||
std::size_t WriteBytes(const T* data, std::size_t size, std::size_t offset = 0) {
|
||||
static_assert(std::is_trivially_copyable_v<T>, "Data type must be trivially copyable.");
|
||||
return Write(reinterpret_cast<const u8*>(data), size, offset);
|
||||
}
|
||||
@ -169,7 +169,7 @@ public:
|
||||
// Writes one object of type T to offset in file.
|
||||
// Returns the number of bytes written successfully (sizeof(T)).
|
||||
template <typename T>
|
||||
size_t WriteObject(const T& data, size_t offset = 0) {
|
||||
std::size_t WriteObject(const T& data, std::size_t offset = 0) {
|
||||
static_assert(std::is_trivially_copyable_v<T>, "Data type must be trivially copyable.");
|
||||
return Write(&data, sizeof(T), offset);
|
||||
}
|
||||
@ -221,7 +221,7 @@ public:
|
||||
// Returns the name of the directory.
|
||||
virtual std::string GetName() const = 0;
|
||||
// Returns the total size of all files and subdirectories in this directory.
|
||||
virtual size_t GetSize() const;
|
||||
virtual std::size_t GetSize() const;
|
||||
// Returns the parent directory of this directory. Returns nullptr if this directory is root or
|
||||
// has no parent.
|
||||
virtual std::shared_ptr<VfsDirectory> GetParentDirectory() const = 0;
|
||||
@ -311,7 +311,7 @@ public:
|
||||
};
|
||||
|
||||
// Compare the two files, byte-for-byte, in increments specificed by block_size
|
||||
bool DeepEquals(const VirtualFile& file1, const VirtualFile& file2, size_t block_size = 0x200);
|
||||
bool DeepEquals(const VirtualFile& file1, const VirtualFile& file2, std::size_t block_size = 0x200);
|
||||
|
||||
// A method that copies the raw data between two different implementations of VirtualFile. If you
|
||||
// are using the same implementation, it is probably better to use the Copy method in the parent
|
||||
|
@ -20,7 +20,7 @@ VirtualFile ConcatenateFiles(std::vector<VirtualFile> files, std::string name) {
|
||||
|
||||
ConcatenatedVfsFile::ConcatenatedVfsFile(std::vector<VirtualFile> files_, std::string name)
|
||||
: name(std::move(name)) {
|
||||
size_t next_offset = 0;
|
||||
std::size_t next_offset = 0;
|
||||
for (const auto& file : files_) {
|
||||
files[next_offset] = file;
|
||||
next_offset += file->GetSize();
|
||||
@ -35,13 +35,13 @@ std::string ConcatenatedVfsFile::GetName() const {
|
||||
return files.begin()->second->GetName();
|
||||
}
|
||||
|
||||
size_t ConcatenatedVfsFile::GetSize() const {
|
||||
std::size_t ConcatenatedVfsFile::GetSize() const {
|
||||
if (files.empty())
|
||||
return 0;
|
||||
return files.rbegin()->first + files.rbegin()->second->GetSize();
|
||||
}
|
||||
|
||||
bool ConcatenatedVfsFile::Resize(size_t new_size) {
|
||||
bool ConcatenatedVfsFile::Resize(std::size_t new_size) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -59,7 +59,7 @@ bool ConcatenatedVfsFile::IsReadable() const {
|
||||
return true;
|
||||
}
|
||||
|
||||
size_t ConcatenatedVfsFile::Read(u8* data, size_t length, size_t offset) const {
|
||||
std::size_t ConcatenatedVfsFile::Read(u8* data, std::size_t length, std::size_t offset) const {
|
||||
auto entry = files.end();
|
||||
for (auto iter = files.begin(); iter != files.end(); ++iter) {
|
||||
if (iter->first > offset) {
|
||||
@ -84,7 +84,7 @@ size_t ConcatenatedVfsFile::Read(u8* data, size_t length, size_t offset) const {
|
||||
return entry->second->Read(data, length, offset - entry->first);
|
||||
}
|
||||
|
||||
size_t ConcatenatedVfsFile::Write(const u8* data, size_t length, size_t offset) {
|
||||
std::size_t ConcatenatedVfsFile::Write(const u8* data, std::size_t length, std::size_t offset) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -23,13 +23,13 @@ class ConcatenatedVfsFile : public VfsFile {
|
||||
|
||||
public:
|
||||
std::string GetName() const override;
|
||||
size_t GetSize() const override;
|
||||
bool Resize(size_t new_size) override;
|
||||
std::size_t GetSize() const override;
|
||||
bool Resize(std::size_t new_size) override;
|
||||
std::shared_ptr<VfsDirectory> GetContainingDirectory() const override;
|
||||
bool IsWritable() const override;
|
||||
bool IsReadable() const override;
|
||||
size_t Read(u8* data, size_t length, size_t offset) const override;
|
||||
size_t Write(const u8* data, size_t length, size_t offset) override;
|
||||
std::size_t Read(u8* data, std::size_t length, std::size_t offset) const override;
|
||||
std::size_t Write(const u8* data, std::size_t length, std::size_t offset) override;
|
||||
bool Rename(std::string_view name) override;
|
||||
|
||||
private:
|
||||
|
@ -9,7 +9,7 @@
|
||||
|
||||
namespace FileSys {
|
||||
|
||||
OffsetVfsFile::OffsetVfsFile(std::shared_ptr<VfsFile> file_, size_t size_, size_t offset_,
|
||||
OffsetVfsFile::OffsetVfsFile(std::shared_ptr<VfsFile> file_, std::size_t size_, std::size_t offset_,
|
||||
std::string name_, VirtualDir parent_)
|
||||
: file(file_), offset(offset_), size(size_), name(std::move(name_)),
|
||||
parent(parent_ == nullptr ? file->GetContainingDirectory() : std::move(parent_)) {}
|
||||
@ -18,11 +18,11 @@ std::string OffsetVfsFile::GetName() const {
|
||||
return name.empty() ? file->GetName() : name;
|
||||
}
|
||||
|
||||
size_t OffsetVfsFile::GetSize() const {
|
||||
std::size_t OffsetVfsFile::GetSize() const {
|
||||
return size;
|
||||
}
|
||||
|
||||
bool OffsetVfsFile::Resize(size_t new_size) {
|
||||
bool OffsetVfsFile::Resize(std::size_t new_size) {
|
||||
if (offset + new_size < file->GetSize()) {
|
||||
size = new_size;
|
||||
} else {
|
||||
@ -47,22 +47,22 @@ bool OffsetVfsFile::IsReadable() const {
|
||||
return file->IsReadable();
|
||||
}
|
||||
|
||||
size_t OffsetVfsFile::Read(u8* data, size_t length, size_t r_offset) const {
|
||||
std::size_t OffsetVfsFile::Read(u8* data, std::size_t length, std::size_t r_offset) const {
|
||||
return file->Read(data, TrimToFit(length, r_offset), offset + r_offset);
|
||||
}
|
||||
|
||||
size_t OffsetVfsFile::Write(const u8* data, size_t length, size_t r_offset) {
|
||||
std::size_t OffsetVfsFile::Write(const u8* data, std::size_t length, std::size_t r_offset) {
|
||||
return file->Write(data, TrimToFit(length, r_offset), offset + r_offset);
|
||||
}
|
||||
|
||||
boost::optional<u8> OffsetVfsFile::ReadByte(size_t r_offset) const {
|
||||
boost::optional<u8> OffsetVfsFile::ReadByte(std::size_t r_offset) const {
|
||||
if (r_offset < size)
|
||||
return file->ReadByte(offset + r_offset);
|
||||
|
||||
return boost::none;
|
||||
}
|
||||
|
||||
std::vector<u8> OffsetVfsFile::ReadBytes(size_t r_size, size_t r_offset) const {
|
||||
std::vector<u8> OffsetVfsFile::ReadBytes(std::size_t r_size, std::size_t r_offset) const {
|
||||
return file->ReadBytes(TrimToFit(r_size, r_offset), offset + r_offset);
|
||||
}
|
||||
|
||||
@ -70,14 +70,14 @@ std::vector<u8> OffsetVfsFile::ReadAllBytes() const {
|
||||
return file->ReadBytes(size, offset);
|
||||
}
|
||||
|
||||
bool OffsetVfsFile::WriteByte(u8 data, size_t r_offset) {
|
||||
bool OffsetVfsFile::WriteByte(u8 data, std::size_t r_offset) {
|
||||
if (r_offset < size)
|
||||
return file->WriteByte(data, offset + r_offset);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
size_t OffsetVfsFile::WriteBytes(const std::vector<u8>& data, size_t r_offset) {
|
||||
std::size_t OffsetVfsFile::WriteBytes(const std::vector<u8>& data, std::size_t r_offset) {
|
||||
return file->Write(data.data(), TrimToFit(data.size(), r_offset), offset + r_offset);
|
||||
}
|
||||
|
||||
@ -85,12 +85,12 @@ bool OffsetVfsFile::Rename(std::string_view name) {
|
||||
return file->Rename(name);
|
||||
}
|
||||
|
||||
size_t OffsetVfsFile::GetOffset() const {
|
||||
std::size_t OffsetVfsFile::GetOffset() const {
|
||||
return offset;
|
||||
}
|
||||
|
||||
size_t OffsetVfsFile::TrimToFit(size_t r_size, size_t r_offset) const {
|
||||
return std::clamp(r_size, size_t{0}, size - r_offset);
|
||||
std::size_t OffsetVfsFile::TrimToFit(std::size_t r_size, std::size_t r_offset) const {
|
||||
return std::clamp(r_size, std::size_t{0}, size - r_offset);
|
||||
}
|
||||
|
||||
} // namespace FileSys
|
||||
|
@ -17,33 +17,33 @@ namespace FileSys {
|
||||
// the size of this wrapper.
|
||||
class OffsetVfsFile : public VfsFile {
|
||||
public:
|
||||
OffsetVfsFile(std::shared_ptr<VfsFile> file, size_t size, size_t offset = 0,
|
||||
OffsetVfsFile(std::shared_ptr<VfsFile> file, std::size_t size, std::size_t offset = 0,
|
||||
std::string new_name = "", VirtualDir new_parent = nullptr);
|
||||
|
||||
std::string GetName() const override;
|
||||
size_t GetSize() const override;
|
||||
bool Resize(size_t new_size) override;
|
||||
std::size_t GetSize() const override;
|
||||
bool Resize(std::size_t new_size) override;
|
||||
std::shared_ptr<VfsDirectory> GetContainingDirectory() const override;
|
||||
bool IsWritable() const override;
|
||||
bool IsReadable() const override;
|
||||
size_t Read(u8* data, size_t length, size_t offset) const override;
|
||||
size_t Write(const u8* data, size_t length, size_t offset) override;
|
||||
boost::optional<u8> ReadByte(size_t offset) const override;
|
||||
std::vector<u8> ReadBytes(size_t size, size_t offset) const override;
|
||||
std::size_t Read(u8* data, std::size_t length, std::size_t offset) const override;
|
||||
std::size_t Write(const u8* data, std::size_t length, std::size_t offset) override;
|
||||
boost::optional<u8> ReadByte(std::size_t offset) const override;
|
||||
std::vector<u8> ReadBytes(std::size_t size, std::size_t offset) const override;
|
||||
std::vector<u8> ReadAllBytes() const override;
|
||||
bool WriteByte(u8 data, size_t offset) override;
|
||||
size_t WriteBytes(const std::vector<u8>& data, size_t offset) override;
|
||||
bool WriteByte(u8 data, std::size_t offset) override;
|
||||
std::size_t WriteBytes(const std::vector<u8>& data, std::size_t offset) override;
|
||||
|
||||
bool Rename(std::string_view name) override;
|
||||
|
||||
size_t GetOffset() const;
|
||||
std::size_t GetOffset() const;
|
||||
|
||||
private:
|
||||
size_t TrimToFit(size_t r_size, size_t r_offset) const;
|
||||
std::size_t TrimToFit(std::size_t r_size, std::size_t r_offset) const;
|
||||
|
||||
std::shared_ptr<VfsFile> file;
|
||||
size_t offset;
|
||||
size_t size;
|
||||
std::size_t offset;
|
||||
std::size_t size;
|
||||
std::string name;
|
||||
VirtualDir parent;
|
||||
};
|
||||
|
@ -227,11 +227,11 @@ std::string RealVfsFile::GetName() const {
|
||||
return path_components.back();
|
||||
}
|
||||
|
||||
size_t RealVfsFile::GetSize() const {
|
||||
std::size_t RealVfsFile::GetSize() const {
|
||||
return backing->GetSize();
|
||||
}
|
||||
|
||||
bool RealVfsFile::Resize(size_t new_size) {
|
||||
bool RealVfsFile::Resize(std::size_t new_size) {
|
||||
return backing->Resize(new_size);
|
||||
}
|
||||
|
||||
@ -247,13 +247,13 @@ bool RealVfsFile::IsReadable() const {
|
||||
return (perms & Mode::ReadWrite) != 0;
|
||||
}
|
||||
|
||||
size_t RealVfsFile::Read(u8* data, size_t length, size_t offset) const {
|
||||
std::size_t RealVfsFile::Read(u8* data, std::size_t length, std::size_t offset) const {
|
||||
if (!backing->Seek(offset, SEEK_SET))
|
||||
return 0;
|
||||
return backing->ReadBytes(data, length);
|
||||
}
|
||||
|
||||
size_t RealVfsFile::Write(const u8* data, size_t length, size_t offset) {
|
||||
std::size_t RealVfsFile::Write(const u8* data, std::size_t length, std::size_t offset) {
|
||||
if (!backing->Seek(offset, SEEK_SET))
|
||||
return 0;
|
||||
return backing->WriteBytes(data, length);
|
||||
|
@ -48,13 +48,13 @@ public:
|
||||
~RealVfsFile() override;
|
||||
|
||||
std::string GetName() const override;
|
||||
size_t GetSize() const override;
|
||||
bool Resize(size_t new_size) override;
|
||||
std::size_t GetSize() const override;
|
||||
bool Resize(std::size_t new_size) override;
|
||||
std::shared_ptr<VfsDirectory> GetContainingDirectory() const override;
|
||||
bool IsWritable() const override;
|
||||
bool IsReadable() const override;
|
||||
size_t Read(u8* data, size_t length, size_t offset) const override;
|
||||
size_t Write(const u8* data, size_t length, size_t offset) override;
|
||||
std::size_t Read(u8* data, std::size_t length, std::size_t offset) const override;
|
||||
std::size_t Write(const u8* data, std::size_t length, std::size_t offset) override;
|
||||
bool Rename(std::string_view name) override;
|
||||
|
||||
private:
|
||||
|
@ -25,8 +25,8 @@ namespace FileSys {
|
||||
constexpr u64 NAX_HEADER_PADDING_SIZE = 0x4000;
|
||||
|
||||
template <typename SourceData, typename SourceKey, typename Destination>
|
||||
static bool CalculateHMAC256(Destination* out, const SourceKey* key, size_t key_length,
|
||||
const SourceData* data, size_t data_length) {
|
||||
static bool CalculateHMAC256(Destination* out, const SourceKey* key, std::size_t key_length,
|
||||
const SourceData* data, std::size_t data_length) {
|
||||
mbedtls_md_context_t context;
|
||||
mbedtls_md_init(&context);
|
||||
|
||||
@ -91,7 +91,7 @@ Loader::ResultStatus NAX::Parse(std::string_view path) {
|
||||
|
||||
const auto enc_keys = header->key_area;
|
||||
|
||||
size_t i = 0;
|
||||
std::size_t i = 0;
|
||||
for (; i < sd_keys.size(); ++i) {
|
||||
std::array<Core::Crypto::Key128, 2> nax_keys{};
|
||||
if (!CalculateHMAC256(nax_keys.data(), sd_keys[i].data(), 0x10, std::string(path).c_str(),
|
||||
@ -99,7 +99,7 @@ Loader::ResultStatus NAX::Parse(std::string_view path) {
|
||||
return Loader::ResultStatus::ErrorNAXKeyHMACFailed;
|
||||
}
|
||||
|
||||
for (size_t j = 0; j < nax_keys.size(); ++j) {
|
||||
for (std::size_t j = 0; j < nax_keys.size(); ++j) {
|
||||
Core::Crypto::AESCipher<Core::Crypto::Key128> cipher(nax_keys[j],
|
||||
Core::Crypto::Mode::ECB);
|
||||
cipher.Transcode(enc_keys[j].data(), 0x10, header->key_area[j].data(),
|
||||
|
@ -65,9 +65,9 @@ constexpr u32 MSG_WAITALL = 8;
|
||||
constexpr u32 LR_REGISTER = 30;
|
||||
constexpr u32 SP_REGISTER = 31;
|
||||
constexpr u32 PC_REGISTER = 32;
|
||||
constexpr u32 CPSR_REGISTER = 33;
|
||||
constexpr u32 PSTATE_REGISTER = 33;
|
||||
constexpr u32 UC_ARM64_REG_Q0 = 34;
|
||||
constexpr u32 FPSCR_REGISTER = 66;
|
||||
constexpr u32 FPCR_REGISTER = 66;
|
||||
|
||||
// TODO/WiP - Used while working on support for FPU
|
||||
constexpr u32 TODO_DUMMY_REG_997 = 997;
|
||||
@ -116,7 +116,7 @@ constexpr char target_xml[] =
|
||||
|
||||
<reg name="pc" bitsize="64" type="code_ptr"/>
|
||||
|
||||
<flags id="cpsr_flags" size="4">
|
||||
<flags id="pstate_flags" size="4">
|
||||
<field name="SP" start="0" end="0"/>
|
||||
<field name="" start="1" end="1"/>
|
||||
<field name="EL" start="2" end="3"/>
|
||||
@ -135,7 +135,7 @@ constexpr char target_xml[] =
|
||||
<field name="Z" start="30" end="30"/>
|
||||
<field name="N" start="31" end="31"/>
|
||||
</flags>
|
||||
<reg name="cpsr" bitsize="32" type="cpsr_flags"/>
|
||||
<reg name="pstate" bitsize="32" type="pstate_flags"/>
|
||||
</feature>
|
||||
<feature name="org.gnu.gdb.aarch64.fpu">
|
||||
</feature>
|
||||
@ -227,10 +227,10 @@ static u64 RegRead(std::size_t id, Kernel::Thread* thread = nullptr) {
|
||||
return thread->context.sp;
|
||||
} else if (id == PC_REGISTER) {
|
||||
return thread->context.pc;
|
||||
} else if (id == CPSR_REGISTER) {
|
||||
return thread->context.cpsr;
|
||||
} else if (id > CPSR_REGISTER && id < FPSCR_REGISTER) {
|
||||
return thread->context.fpu_registers[id - UC_ARM64_REG_Q0][0];
|
||||
} else if (id == PSTATE_REGISTER) {
|
||||
return thread->context.pstate;
|
||||
} else if (id > PSTATE_REGISTER && id < FPCR_REGISTER) {
|
||||
return thread->context.vector_registers[id - UC_ARM64_REG_Q0][0];
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
@ -247,10 +247,10 @@ static void RegWrite(std::size_t id, u64 val, Kernel::Thread* thread = nullptr)
|
||||
thread->context.sp = val;
|
||||
} else if (id == PC_REGISTER) {
|
||||
thread->context.pc = val;
|
||||
} else if (id == CPSR_REGISTER) {
|
||||
thread->context.cpsr = val;
|
||||
} else if (id > CPSR_REGISTER && id < FPSCR_REGISTER) {
|
||||
thread->context.fpu_registers[id - (CPSR_REGISTER + 1)][0] = val;
|
||||
} else if (id == PSTATE_REGISTER) {
|
||||
thread->context.pstate = val;
|
||||
} else if (id > PSTATE_REGISTER && id < FPCR_REGISTER) {
|
||||
thread->context.vector_registers[id - (PSTATE_REGISTER + 1)][0] = val;
|
||||
}
|
||||
}
|
||||
|
||||
@ -292,7 +292,7 @@ static u8 NibbleToHex(u8 n) {
|
||||
* @param src Pointer to array of output hex string characters.
|
||||
* @param len Length of src array.
|
||||
*/
|
||||
static u32 HexToInt(const u8* src, size_t len) {
|
||||
static u32 HexToInt(const u8* src, std::size_t len) {
|
||||
u32 output = 0;
|
||||
while (len-- > 0) {
|
||||
output = (output << 4) | HexCharToValue(src[0]);
|
||||
@ -307,7 +307,7 @@ static u32 HexToInt(const u8* src, size_t len) {
|
||||
* @param src Pointer to array of output hex string characters.
|
||||
* @param len Length of src array.
|
||||
*/
|
||||
static u64 HexToLong(const u8* src, size_t len) {
|
||||
static u64 HexToLong(const u8* src, std::size_t len) {
|
||||
u64 output = 0;
|
||||
while (len-- > 0) {
|
||||
output = (output << 4) | HexCharToValue(src[0]);
|
||||
@ -323,7 +323,7 @@ static u64 HexToLong(const u8* src, size_t len) {
|
||||
* @param src Pointer to array of u8 bytes.
|
||||
* @param len Length of src array.
|
||||
*/
|
||||
static void MemToGdbHex(u8* dest, const u8* src, size_t len) {
|
||||
static void MemToGdbHex(u8* dest, const u8* src, std::size_t len) {
|
||||
while (len-- > 0) {
|
||||
u8 tmp = *src++;
|
||||
*dest++ = NibbleToHex(tmp >> 4);
|
||||
@ -338,7 +338,7 @@ static void MemToGdbHex(u8* dest, const u8* src, size_t len) {
|
||||
* @param src Pointer to array of output hex string characters.
|
||||
* @param len Length of src array.
|
||||
*/
|
||||
static void GdbHexToMem(u8* dest, const u8* src, size_t len) {
|
||||
static void GdbHexToMem(u8* dest, const u8* src, std::size_t len) {
|
||||
while (len-- > 0) {
|
||||
*dest++ = (HexCharToValue(src[0]) << 4) | HexCharToValue(src[1]);
|
||||
src += 2;
|
||||
@ -406,7 +406,7 @@ static u64 GdbHexToLong(const u8* src) {
|
||||
/// Read a byte from the gdb client.
|
||||
static u8 ReadByte() {
|
||||
u8 c;
|
||||
size_t received_size = recv(gdbserver_socket, reinterpret_cast<char*>(&c), 1, MSG_WAITALL);
|
||||
std::size_t received_size = recv(gdbserver_socket, reinterpret_cast<char*>(&c), 1, MSG_WAITALL);
|
||||
if (received_size != 1) {
|
||||
LOG_ERROR(Debug_GDBStub, "recv failed: {}", received_size);
|
||||
Shutdown();
|
||||
@ -416,7 +416,7 @@ static u8 ReadByte() {
|
||||
}
|
||||
|
||||
/// Calculate the checksum of the current command buffer.
|
||||
static u8 CalculateChecksum(const u8* buffer, size_t length) {
|
||||
static u8 CalculateChecksum(const u8* buffer, std::size_t length) {
|
||||
return static_cast<u8>(std::accumulate(buffer, buffer + length, 0, std::plus<u8>()));
|
||||
}
|
||||
|
||||
@ -518,7 +518,7 @@ bool CheckBreakpoint(VAddr addr, BreakpointType type) {
|
||||
* @param packet Packet to be sent to client.
|
||||
*/
|
||||
static void SendPacket(const char packet) {
|
||||
size_t sent_size = send(gdbserver_socket, &packet, 1, 0);
|
||||
std::size_t sent_size = send(gdbserver_socket, &packet, 1, 0);
|
||||
if (sent_size != 1) {
|
||||
LOG_ERROR(Debug_GDBStub, "send failed");
|
||||
}
|
||||
@ -781,11 +781,11 @@ static void ReadRegister() {
|
||||
LongToGdbHex(reply, RegRead(id, current_thread));
|
||||
} else if (id == PC_REGISTER) {
|
||||
LongToGdbHex(reply, RegRead(id, current_thread));
|
||||
} else if (id == CPSR_REGISTER) {
|
||||
IntToGdbHex(reply, (u32)RegRead(id, current_thread));
|
||||
} else if (id >= UC_ARM64_REG_Q0 && id < FPSCR_REGISTER) {
|
||||
} else if (id == PSTATE_REGISTER) {
|
||||
IntToGdbHex(reply, static_cast<u32>(RegRead(id, current_thread)));
|
||||
} else if (id >= UC_ARM64_REG_Q0 && id < FPCR_REGISTER) {
|
||||
LongToGdbHex(reply, RegRead(id, current_thread));
|
||||
} else if (id == FPSCR_REGISTER) {
|
||||
} else if (id == FPCR_REGISTER) {
|
||||
LongToGdbHex(reply, RegRead(TODO_DUMMY_REG_998, current_thread));
|
||||
} else {
|
||||
LongToGdbHex(reply, RegRead(TODO_DUMMY_REG_997, current_thread));
|
||||
@ -811,7 +811,7 @@ static void ReadRegisters() {
|
||||
|
||||
bufptr += 16;
|
||||
|
||||
IntToGdbHex(bufptr, (u32)RegRead(CPSR_REGISTER, current_thread));
|
||||
IntToGdbHex(bufptr, static_cast<u32>(RegRead(PSTATE_REGISTER, current_thread)));
|
||||
|
||||
bufptr += 8;
|
||||
|
||||
@ -843,11 +843,11 @@ static void WriteRegister() {
|
||||
RegWrite(id, GdbHexToLong(buffer_ptr), current_thread);
|
||||
} else if (id == PC_REGISTER) {
|
||||
RegWrite(id, GdbHexToLong(buffer_ptr), current_thread);
|
||||
} else if (id == CPSR_REGISTER) {
|
||||
} else if (id == PSTATE_REGISTER) {
|
||||
RegWrite(id, GdbHexToInt(buffer_ptr), current_thread);
|
||||
} else if (id >= UC_ARM64_REG_Q0 && id < FPSCR_REGISTER) {
|
||||
} else if (id >= UC_ARM64_REG_Q0 && id < FPCR_REGISTER) {
|
||||
RegWrite(id, GdbHexToLong(buffer_ptr), current_thread);
|
||||
} else if (id == FPSCR_REGISTER) {
|
||||
} else if (id == FPCR_REGISTER) {
|
||||
RegWrite(TODO_DUMMY_REG_998, GdbHexToLong(buffer_ptr), current_thread);
|
||||
} else {
|
||||
RegWrite(TODO_DUMMY_REG_997, GdbHexToLong(buffer_ptr), current_thread);
|
||||
@ -866,16 +866,16 @@ static void WriteRegisters() {
|
||||
if (command_buffer[0] != 'G')
|
||||
return SendReply("E01");
|
||||
|
||||
for (u32 i = 0, reg = 0; reg <= FPSCR_REGISTER; i++, reg++) {
|
||||
for (u32 i = 0, reg = 0; reg <= FPCR_REGISTER; i++, reg++) {
|
||||
if (reg <= SP_REGISTER) {
|
||||
RegWrite(reg, GdbHexToLong(buffer_ptr + i * 16), current_thread);
|
||||
} else if (reg == PC_REGISTER) {
|
||||
RegWrite(PC_REGISTER, GdbHexToLong(buffer_ptr + i * 16), current_thread);
|
||||
} else if (reg == CPSR_REGISTER) {
|
||||
RegWrite(CPSR_REGISTER, GdbHexToInt(buffer_ptr + i * 16), current_thread);
|
||||
} else if (reg >= UC_ARM64_REG_Q0 && reg < FPSCR_REGISTER) {
|
||||
} else if (reg == PSTATE_REGISTER) {
|
||||
RegWrite(PSTATE_REGISTER, GdbHexToInt(buffer_ptr + i * 16), current_thread);
|
||||
} else if (reg >= UC_ARM64_REG_Q0 && reg < FPCR_REGISTER) {
|
||||
RegWrite(reg, GdbHexToLong(buffer_ptr + i * 16), current_thread);
|
||||
} else if (reg == FPSCR_REGISTER) {
|
||||
} else if (reg == FPCR_REGISTER) {
|
||||
RegWrite(TODO_DUMMY_REG_998, GdbHexToLong(buffer_ptr + i * 16), current_thread);
|
||||
} else {
|
||||
UNIMPLEMENTED();
|
||||
|
@ -12,7 +12,7 @@
|
||||
namespace IPC {
|
||||
|
||||
/// Size of the command buffer area, in 32-bit words.
|
||||
constexpr size_t COMMAND_BUFFER_LENGTH = 0x100 / sizeof(u32);
|
||||
constexpr std::size_t COMMAND_BUFFER_LENGTH = 0x100 / sizeof(u32);
|
||||
|
||||
// These errors are commonly returned by invalid IPC translations, so alias them here for
|
||||
// convenience.
|
||||
|
@ -152,8 +152,8 @@ public:
|
||||
}
|
||||
|
||||
void ValidateHeader() {
|
||||
const size_t num_domain_objects = context->NumDomainObjects();
|
||||
const size_t num_move_objects = context->NumMoveObjects();
|
||||
const std::size_t num_domain_objects = context->NumDomainObjects();
|
||||
const std::size_t num_move_objects = context->NumMoveObjects();
|
||||
ASSERT_MSG(!num_domain_objects || !num_move_objects,
|
||||
"cannot move normal handles and domain objects");
|
||||
ASSERT_MSG((index - datapayload_index) == normal_params_size,
|
||||
@ -329,10 +329,10 @@ public:
|
||||
T PopRaw();
|
||||
|
||||
template <typename T>
|
||||
Kernel::SharedPtr<T> GetMoveObject(size_t index);
|
||||
Kernel::SharedPtr<T> GetMoveObject(std::size_t index);
|
||||
|
||||
template <typename T>
|
||||
Kernel::SharedPtr<T> GetCopyObject(size_t index);
|
||||
Kernel::SharedPtr<T> GetCopyObject(std::size_t index);
|
||||
|
||||
template <class T>
|
||||
std::shared_ptr<T> PopIpcInterface() {
|
||||
@ -406,12 +406,12 @@ void RequestParser::Pop(First& first_value, Other&... other_values) {
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
Kernel::SharedPtr<T> RequestParser::GetMoveObject(size_t index) {
|
||||
Kernel::SharedPtr<T> RequestParser::GetMoveObject(std::size_t index) {
|
||||
return context->GetMoveObject<T>(index);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
Kernel::SharedPtr<T> RequestParser::GetCopyObject(size_t index) {
|
||||
Kernel::SharedPtr<T> RequestParser::GetCopyObject(std::size_t index) {
|
||||
return context->GetCopyObject<T>(index);
|
||||
}
|
||||
|
||||
|
@ -35,16 +35,17 @@ static ResultCode WaitForAddress(VAddr address, s64 timeout) {
|
||||
|
||||
// Gets the threads waiting on an address.
|
||||
static std::vector<SharedPtr<Thread>> GetThreadsWaitingOnAddress(VAddr address) {
|
||||
const auto RetrieveWaitingThreads =
|
||||
[](size_t core_index, std::vector<SharedPtr<Thread>>& waiting_threads, VAddr arb_addr) {
|
||||
const auto& scheduler = Core::System::GetInstance().Scheduler(core_index);
|
||||
auto& thread_list = scheduler->GetThreadList();
|
||||
const auto RetrieveWaitingThreads = [](std::size_t core_index,
|
||||
std::vector<SharedPtr<Thread>>& waiting_threads,
|
||||
VAddr arb_addr) {
|
||||
const auto& scheduler = Core::System::GetInstance().Scheduler(core_index);
|
||||
auto& thread_list = scheduler->GetThreadList();
|
||||
|
||||
for (auto& thread : thread_list) {
|
||||
if (thread->arb_wait_address == arb_addr)
|
||||
waiting_threads.push_back(thread);
|
||||
}
|
||||
};
|
||||
for (auto& thread : thread_list) {
|
||||
if (thread->arb_wait_address == arb_addr)
|
||||
waiting_threads.push_back(thread);
|
||||
}
|
||||
};
|
||||
|
||||
// Retrieve all threads that are waiting for this address.
|
||||
std::vector<SharedPtr<Thread>> threads;
|
||||
@ -66,12 +67,12 @@ static std::vector<SharedPtr<Thread>> GetThreadsWaitingOnAddress(VAddr address)
|
||||
static void WakeThreads(std::vector<SharedPtr<Thread>>& waiting_threads, s32 num_to_wake) {
|
||||
// Only process up to 'target' threads, unless 'target' is <= 0, in which case process
|
||||
// them all.
|
||||
size_t last = waiting_threads.size();
|
||||
std::size_t last = waiting_threads.size();
|
||||
if (num_to_wake > 0)
|
||||
last = num_to_wake;
|
||||
|
||||
// Signal the waiting threads.
|
||||
for (size_t i = 0; i < last; i++) {
|
||||
for (std::size_t i = 0; i < last; i++) {
|
||||
ASSERT(waiting_threads[i]->status == ThreadStatus::WaitArb);
|
||||
waiting_threads[i]->SetWaitSynchronizationResult(RESULT_SUCCESS);
|
||||
waiting_threads[i]->arb_wait_address = 0;
|
||||
|
@ -17,6 +17,7 @@ enum {
|
||||
|
||||
// Confirmed Switch OS error codes
|
||||
MaxConnectionsReached = 7,
|
||||
InvalidSize = 101,
|
||||
InvalidAddress = 102,
|
||||
HandleTableFull = 105,
|
||||
InvalidMemoryState = 106,
|
||||
@ -29,6 +30,7 @@ enum {
|
||||
SynchronizationCanceled = 118,
|
||||
TooLarge = 119,
|
||||
InvalidEnumValue = 120,
|
||||
NoSuchEntry = 121,
|
||||
InvalidState = 125,
|
||||
ResourceLimitExceeded = 132,
|
||||
};
|
||||
@ -55,6 +57,7 @@ constexpr ResultCode ERR_INVALID_MEMORY_PERMISSIONS(ErrorModule::Kernel,
|
||||
ErrCodes::InvalidMemoryPermissions);
|
||||
constexpr ResultCode ERR_INVALID_HANDLE(ErrorModule::Kernel, ErrCodes::InvalidHandle);
|
||||
constexpr ResultCode ERR_INVALID_PROCESSOR_ID(ErrorModule::Kernel, ErrCodes::InvalidProcessorId);
|
||||
constexpr ResultCode ERR_INVALID_SIZE(ErrorModule::Kernel, ErrCodes::InvalidSize);
|
||||
constexpr ResultCode ERR_INVALID_STATE(ErrorModule::Kernel, ErrCodes::InvalidState);
|
||||
constexpr ResultCode ERR_INVALID_THREAD_PRIORITY(ErrorModule::Kernel,
|
||||
ErrCodes::InvalidThreadPriority);
|
||||
@ -63,7 +66,7 @@ constexpr ResultCode ERR_INVALID_OBJECT_ADDR(-1);
|
||||
constexpr ResultCode ERR_NOT_AUTHORIZED(-1);
|
||||
/// Alternate code returned instead of ERR_INVALID_HANDLE in some code paths.
|
||||
constexpr ResultCode ERR_INVALID_HANDLE_OS(-1);
|
||||
constexpr ResultCode ERR_NOT_FOUND(-1);
|
||||
constexpr ResultCode ERR_NOT_FOUND(ErrorModule::Kernel, ErrCodes::NoSuchEntry);
|
||||
constexpr ResultCode RESULT_TIMEOUT(ErrorModule::Kernel, ErrCodes::Timeout);
|
||||
/// Returned when Accept() is called on a port with no sessions to be accepted.
|
||||
constexpr ResultCode ERR_NO_PENDING_SESSIONS(-1);
|
||||
|
@ -65,7 +65,7 @@ ResultCode HandleTable::Close(Handle handle) {
|
||||
}
|
||||
|
||||
bool HandleTable::IsValid(Handle handle) const {
|
||||
size_t slot = GetSlot(handle);
|
||||
std::size_t slot = GetSlot(handle);
|
||||
u16 generation = GetGeneration(handle);
|
||||
|
||||
return slot < MAX_COUNT && objects[slot] != nullptr && generations[slot] == generation;
|
||||
|
@ -93,7 +93,7 @@ private:
|
||||
* This is the maximum limit of handles allowed per process in CTR-OS. It can be further
|
||||
* reduced by ExHeader values, but this is not emulated here.
|
||||
*/
|
||||
static const size_t MAX_COUNT = 4096;
|
||||
static const std::size_t MAX_COUNT = 4096;
|
||||
|
||||
static u16 GetSlot(Handle handle) {
|
||||
return handle >> 15;
|
||||
|
@ -42,9 +42,9 @@ SharedPtr<Event> HLERequestContext::SleepClientThread(SharedPtr<Thread> thread,
|
||||
Kernel::SharedPtr<Kernel::Event> event) {
|
||||
|
||||
// Put the client thread to sleep until the wait event is signaled or the timeout expires.
|
||||
thread->wakeup_callback =
|
||||
[context = *this, callback](ThreadWakeupReason reason, SharedPtr<Thread> thread,
|
||||
SharedPtr<WaitObject> object, size_t index) mutable -> bool {
|
||||
thread->wakeup_callback = [context = *this, callback](
|
||||
ThreadWakeupReason reason, SharedPtr<Thread> thread,
|
||||
SharedPtr<WaitObject> object, std::size_t index) mutable -> bool {
|
||||
ASSERT(thread->status == ThreadStatus::WaitHLEEvent);
|
||||
callback(thread, context, reason);
|
||||
context.WriteToOutgoingCommandBuffer(*thread);
|
||||
@ -199,8 +199,8 @@ ResultCode HLERequestContext::PopulateFromIncomingCommandBuffer(u32_le* src_cmdb
|
||||
}
|
||||
|
||||
// The data_size already includes the payload header, the padding and the domain header.
|
||||
size_t size = data_payload_offset + command_header->data_size -
|
||||
sizeof(IPC::DataPayloadHeader) / sizeof(u32) - 4;
|
||||
std::size_t size = data_payload_offset + command_header->data_size -
|
||||
sizeof(IPC::DataPayloadHeader) / sizeof(u32) - 4;
|
||||
if (domain_message_header)
|
||||
size -= sizeof(IPC::DomainMessageHeader) / sizeof(u32);
|
||||
std::copy_n(src_cmdbuf, size, cmd_buf.begin());
|
||||
@ -217,8 +217,8 @@ ResultCode HLERequestContext::WriteToOutgoingCommandBuffer(const Thread& thread)
|
||||
ParseCommandBuffer(cmd_buf.data(), false);
|
||||
|
||||
// The data_size already includes the payload header, the padding and the domain header.
|
||||
size_t size = data_payload_offset + command_header->data_size -
|
||||
sizeof(IPC::DataPayloadHeader) / sizeof(u32) - 4;
|
||||
std::size_t size = data_payload_offset + command_header->data_size -
|
||||
sizeof(IPC::DataPayloadHeader) / sizeof(u32) - 4;
|
||||
if (domain_message_header)
|
||||
size -= sizeof(IPC::DomainMessageHeader) / sizeof(u32);
|
||||
|
||||
@ -229,7 +229,7 @@ ResultCode HLERequestContext::WriteToOutgoingCommandBuffer(const Thread& thread)
|
||||
"Handle descriptor bit set but no handles to translate");
|
||||
// We write the translated handles at a specific offset in the command buffer, this space
|
||||
// was already reserved when writing the header.
|
||||
size_t current_offset =
|
||||
std::size_t current_offset =
|
||||
(sizeof(IPC::CommandHeader) + sizeof(IPC::HandleDescriptorHeader)) / sizeof(u32);
|
||||
ASSERT_MSG(!handle_descriptor_header->send_current_pid, "Sending PID is not implemented");
|
||||
|
||||
@ -258,7 +258,7 @@ ResultCode HLERequestContext::WriteToOutgoingCommandBuffer(const Thread& thread)
|
||||
ASSERT(domain_message_header->num_objects == domain_objects.size());
|
||||
// Write the domain objects to the command buffer, these go after the raw untranslated data.
|
||||
// TODO(Subv): This completely ignores C buffers.
|
||||
size_t domain_offset = size - domain_message_header->num_objects;
|
||||
std::size_t domain_offset = size - domain_message_header->num_objects;
|
||||
auto& request_handlers = server_session->domain_request_handlers;
|
||||
|
||||
for (auto& object : domain_objects) {
|
||||
@ -291,14 +291,15 @@ std::vector<u8> HLERequestContext::ReadBuffer(int buffer_index) const {
|
||||
return buffer;
|
||||
}
|
||||
|
||||
size_t HLERequestContext::WriteBuffer(const void* buffer, size_t size, int buffer_index) const {
|
||||
std::size_t HLERequestContext::WriteBuffer(const void* buffer, std::size_t size,
|
||||
int buffer_index) const {
|
||||
if (size == 0) {
|
||||
LOG_WARNING(Core, "skip empty buffer write");
|
||||
return 0;
|
||||
}
|
||||
|
||||
const bool is_buffer_b{BufferDescriptorB().size() && BufferDescriptorB()[buffer_index].Size()};
|
||||
const size_t buffer_size{GetWriteBufferSize(buffer_index)};
|
||||
const std::size_t buffer_size{GetWriteBufferSize(buffer_index)};
|
||||
if (size > buffer_size) {
|
||||
LOG_CRITICAL(Core, "size ({:016X}) is greater than buffer_size ({:016X})", size,
|
||||
buffer_size);
|
||||
@ -314,13 +315,13 @@ size_t HLERequestContext::WriteBuffer(const void* buffer, size_t size, int buffe
|
||||
return size;
|
||||
}
|
||||
|
||||
size_t HLERequestContext::GetReadBufferSize(int buffer_index) const {
|
||||
std::size_t HLERequestContext::GetReadBufferSize(int buffer_index) const {
|
||||
const bool is_buffer_a{BufferDescriptorA().size() && BufferDescriptorA()[buffer_index].Size()};
|
||||
return is_buffer_a ? BufferDescriptorA()[buffer_index].Size()
|
||||
: BufferDescriptorX()[buffer_index].Size();
|
||||
}
|
||||
|
||||
size_t HLERequestContext::GetWriteBufferSize(int buffer_index) const {
|
||||
std::size_t HLERequestContext::GetWriteBufferSize(int buffer_index) const {
|
||||
const bool is_buffer_b{BufferDescriptorB().size() && BufferDescriptorB()[buffer_index].Size()};
|
||||
return is_buffer_b ? BufferDescriptorB()[buffer_index].Size()
|
||||
: BufferDescriptorC()[buffer_index].Size();
|
||||
|
@ -170,7 +170,7 @@ public:
|
||||
std::vector<u8> ReadBuffer(int buffer_index = 0) const;
|
||||
|
||||
/// Helper function to write a buffer using the appropriate buffer descriptor
|
||||
size_t WriteBuffer(const void* buffer, size_t size, int buffer_index = 0) const;
|
||||
std::size_t WriteBuffer(const void* buffer, std::size_t size, int buffer_index = 0) const;
|
||||
|
||||
/* Helper function to write a buffer using the appropriate buffer descriptor
|
||||
*
|
||||
@ -182,7 +182,7 @@ public:
|
||||
*/
|
||||
template <typename ContiguousContainer,
|
||||
typename = std::enable_if_t<!std::is_pointer_v<ContiguousContainer>>>
|
||||
size_t WriteBuffer(const ContiguousContainer& container, int buffer_index = 0) const {
|
||||
std::size_t WriteBuffer(const ContiguousContainer& container, int buffer_index = 0) const {
|
||||
using ContiguousType = typename ContiguousContainer::value_type;
|
||||
|
||||
static_assert(std::is_trivially_copyable_v<ContiguousType>,
|
||||
@ -193,19 +193,19 @@ public:
|
||||
}
|
||||
|
||||
/// Helper function to get the size of the input buffer
|
||||
size_t GetReadBufferSize(int buffer_index = 0) const;
|
||||
std::size_t GetReadBufferSize(int buffer_index = 0) const;
|
||||
|
||||
/// Helper function to get the size of the output buffer
|
||||
size_t GetWriteBufferSize(int buffer_index = 0) const;
|
||||
std::size_t GetWriteBufferSize(int buffer_index = 0) const;
|
||||
|
||||
template <typename T>
|
||||
SharedPtr<T> GetCopyObject(size_t index) {
|
||||
SharedPtr<T> GetCopyObject(std::size_t index) {
|
||||
ASSERT(index < copy_objects.size());
|
||||
return DynamicObjectCast<T>(copy_objects[index]);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
SharedPtr<T> GetMoveObject(size_t index) {
|
||||
SharedPtr<T> GetMoveObject(std::size_t index) {
|
||||
ASSERT(index < move_objects.size());
|
||||
return DynamicObjectCast<T>(move_objects[index]);
|
||||
}
|
||||
@ -223,7 +223,7 @@ public:
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
std::shared_ptr<T> GetDomainRequestHandler(size_t index) const {
|
||||
std::shared_ptr<T> GetDomainRequestHandler(std::size_t index) const {
|
||||
return std::static_pointer_cast<T>(domain_request_handlers[index]);
|
||||
}
|
||||
|
||||
@ -240,15 +240,15 @@ public:
|
||||
domain_objects.clear();
|
||||
}
|
||||
|
||||
size_t NumMoveObjects() const {
|
||||
std::size_t NumMoveObjects() const {
|
||||
return move_objects.size();
|
||||
}
|
||||
|
||||
size_t NumCopyObjects() const {
|
||||
std::size_t NumCopyObjects() const {
|
||||
return copy_objects.size();
|
||||
}
|
||||
|
||||
size_t NumDomainObjects() const {
|
||||
std::size_t NumDomainObjects() const {
|
||||
return domain_objects.size();
|
||||
}
|
||||
|
||||
|
@ -62,7 +62,7 @@ ResultCode Mutex::TryAcquire(HandleTable& handle_table, VAddr address, Handle ho
|
||||
Handle requesting_thread_handle) {
|
||||
// The mutex address must be 4-byte aligned
|
||||
if ((address % sizeof(u32)) != 0) {
|
||||
return ResultCode(ErrorModule::Kernel, ErrCodes::InvalidAddress);
|
||||
return ERR_INVALID_ADDRESS;
|
||||
}
|
||||
|
||||
SharedPtr<Thread> holding_thread = handle_table.Get<Thread>(holding_thread_handle);
|
||||
@ -100,7 +100,7 @@ ResultCode Mutex::TryAcquire(HandleTable& handle_table, VAddr address, Handle ho
|
||||
ResultCode Mutex::Release(VAddr address) {
|
||||
// The mutex address must be 4-byte aligned
|
||||
if ((address % sizeof(u32)) != 0) {
|
||||
return ResultCode(ErrorModule::Kernel, ErrCodes::InvalidAddress);
|
||||
return ERR_INVALID_ADDRESS;
|
||||
}
|
||||
|
||||
auto [thread, num_waiters] = GetHighestPriorityMutexWaitingThread(GetCurrentThread(), address);
|
||||
|
@ -40,8 +40,8 @@ SharedPtr<Process> Process::Create(KernelCore& kernel, std::string&& name) {
|
||||
return process;
|
||||
}
|
||||
|
||||
void Process::ParseKernelCaps(const u32* kernel_caps, size_t len) {
|
||||
for (size_t i = 0; i < len; ++i) {
|
||||
void Process::ParseKernelCaps(const u32* kernel_caps, std::size_t len) {
|
||||
for (std::size_t i = 0; i < len; ++i) {
|
||||
u32 descriptor = kernel_caps[i];
|
||||
u32 type = descriptor >> 20;
|
||||
|
||||
@ -211,7 +211,7 @@ ResultCode Process::MirrorMemory(VAddr dst_addr, VAddr src_addr, u64 size) {
|
||||
"Shared memory exceeds bounds of mapped block");
|
||||
|
||||
const std::shared_ptr<std::vector<u8>>& backing_block = vma->second.backing_block;
|
||||
size_t backing_block_offset = vma->second.offset + vma_offset;
|
||||
std::size_t backing_block_offset = vma->second.offset + vma_offset;
|
||||
|
||||
CASCADE_RESULT(auto new_vma,
|
||||
vm_manager.MapMemoryBlock(dst_addr, backing_block, backing_block_offset, size,
|
||||
|
@ -59,7 +59,7 @@ class ResourceLimit;
|
||||
|
||||
struct CodeSet final : public Object {
|
||||
struct Segment {
|
||||
size_t offset = 0;
|
||||
std::size_t offset = 0;
|
||||
VAddr addr = 0;
|
||||
u32 size = 0;
|
||||
};
|
||||
@ -164,7 +164,7 @@ public:
|
||||
* Parses a list of kernel capability descriptors (as found in the ExHeader) and applies them
|
||||
* to this process.
|
||||
*/
|
||||
void ParseKernelCaps(const u32* kernel_caps, size_t len);
|
||||
void ParseKernelCaps(const u32* kernel_caps, std::size_t len);
|
||||
|
||||
/**
|
||||
* Applies address space changes and launches the process main thread.
|
||||
|
@ -119,7 +119,7 @@ public:
|
||||
/// Backing memory for this shared memory block.
|
||||
std::shared_ptr<std::vector<u8>> backing_block;
|
||||
/// Offset into the backing block for this shared memory.
|
||||
size_t backing_block_offset;
|
||||
std::size_t backing_block_offset;
|
||||
/// Size of the memory block. Page-aligned.
|
||||
u64 size;
|
||||
/// Permission restrictions applied to the process which created the block.
|
||||
|
@ -35,10 +35,21 @@
|
||||
#include "core/hle/service/service.h"
|
||||
|
||||
namespace Kernel {
|
||||
namespace {
|
||||
constexpr bool Is4KBAligned(VAddr address) {
|
||||
return (address & 0xFFF) == 0;
|
||||
}
|
||||
} // Anonymous namespace
|
||||
|
||||
/// Set the process heap to a given Size. It can both extend and shrink the heap.
|
||||
static ResultCode SetHeapSize(VAddr* heap_addr, u64 heap_size) {
|
||||
LOG_TRACE(Kernel_SVC, "called, heap_size=0x{:X}", heap_size);
|
||||
|
||||
// Size must be a multiple of 0x200000 (2MB) and be equal to or less than 4GB.
|
||||
if ((heap_size & 0xFFFFFFFE001FFFFF) != 0) {
|
||||
return ERR_INVALID_SIZE;
|
||||
}
|
||||
|
||||
auto& process = *Core::CurrentProcess();
|
||||
CASCADE_RESULT(*heap_addr,
|
||||
process.HeapAllocate(Memory::HEAP_VADDR, heap_size, VMAPermission::ReadWrite));
|
||||
@ -56,6 +67,15 @@ static ResultCode SetMemoryAttribute(VAddr addr, u64 size, u32 state0, u32 state
|
||||
static ResultCode MapMemory(VAddr dst_addr, VAddr src_addr, u64 size) {
|
||||
LOG_TRACE(Kernel_SVC, "called, dst_addr=0x{:X}, src_addr=0x{:X}, size=0x{:X}", dst_addr,
|
||||
src_addr, size);
|
||||
|
||||
if (!Is4KBAligned(dst_addr) || !Is4KBAligned(src_addr)) {
|
||||
return ERR_INVALID_ADDRESS;
|
||||
}
|
||||
|
||||
if (size == 0 || !Is4KBAligned(size)) {
|
||||
return ERR_INVALID_SIZE;
|
||||
}
|
||||
|
||||
return Core::CurrentProcess()->MirrorMemory(dst_addr, src_addr, size);
|
||||
}
|
||||
|
||||
@ -63,6 +83,15 @@ static ResultCode MapMemory(VAddr dst_addr, VAddr src_addr, u64 size) {
|
||||
static ResultCode UnmapMemory(VAddr dst_addr, VAddr src_addr, u64 size) {
|
||||
LOG_TRACE(Kernel_SVC, "called, dst_addr=0x{:X}, src_addr=0x{:X}, size=0x{:X}", dst_addr,
|
||||
src_addr, size);
|
||||
|
||||
if (!Is4KBAligned(dst_addr) || !Is4KBAligned(src_addr)) {
|
||||
return ERR_INVALID_ADDRESS;
|
||||
}
|
||||
|
||||
if (size == 0 || !Is4KBAligned(size)) {
|
||||
return ERR_INVALID_SIZE;
|
||||
}
|
||||
|
||||
return Core::CurrentProcess()->UnmapMemory(dst_addr, src_addr, size);
|
||||
}
|
||||
|
||||
@ -146,7 +175,7 @@ static ResultCode GetProcessId(u32* process_id, Handle process_handle) {
|
||||
|
||||
/// Default thread wakeup callback for WaitSynchronization
|
||||
static bool DefaultThreadWakeupCallback(ThreadWakeupReason reason, SharedPtr<Thread> thread,
|
||||
SharedPtr<WaitObject> object, size_t index) {
|
||||
SharedPtr<WaitObject> object, std::size_t index) {
|
||||
ASSERT(thread->status == ThreadStatus::WaitSynchAny);
|
||||
|
||||
if (reason == ThreadWakeupReason::Timeout) {
|
||||
@ -251,6 +280,10 @@ static ResultCode ArbitrateLock(Handle holding_thread_handle, VAddr mutex_addr,
|
||||
"requesting_current_thread_handle=0x{:08X}",
|
||||
holding_thread_handle, mutex_addr, requesting_thread_handle);
|
||||
|
||||
if (Memory::IsKernelVirtualAddress(mutex_addr)) {
|
||||
return ERR_INVALID_ADDRESS_STATE;
|
||||
}
|
||||
|
||||
auto& handle_table = Core::System::GetInstance().Kernel().HandleTable();
|
||||
return Mutex::TryAcquire(handle_table, mutex_addr, holding_thread_handle,
|
||||
requesting_thread_handle);
|
||||
@ -260,6 +293,10 @@ static ResultCode ArbitrateLock(Handle holding_thread_handle, VAddr mutex_addr,
|
||||
static ResultCode ArbitrateUnlock(VAddr mutex_addr) {
|
||||
LOG_TRACE(Kernel_SVC, "called mutex_addr=0x{:X}", mutex_addr);
|
||||
|
||||
if (Memory::IsKernelVirtualAddress(mutex_addr)) {
|
||||
return ERR_INVALID_ADDRESS_STATE;
|
||||
}
|
||||
|
||||
return Mutex::Release(mutex_addr);
|
||||
}
|
||||
|
||||
@ -415,35 +452,43 @@ static ResultCode MapSharedMemory(Handle shared_memory_handle, VAddr addr, u64 s
|
||||
"called, shared_memory_handle=0x{:X}, addr=0x{:X}, size=0x{:X}, permissions=0x{:08X}",
|
||||
shared_memory_handle, addr, size, permissions);
|
||||
|
||||
if (!Is4KBAligned(addr)) {
|
||||
return ERR_INVALID_ADDRESS;
|
||||
}
|
||||
|
||||
if (size == 0 || !Is4KBAligned(size)) {
|
||||
return ERR_INVALID_SIZE;
|
||||
}
|
||||
|
||||
const auto permissions_type = static_cast<MemoryPermission>(permissions);
|
||||
if (permissions_type != MemoryPermission::Read &&
|
||||
permissions_type != MemoryPermission::ReadWrite) {
|
||||
LOG_ERROR(Kernel_SVC, "Invalid permissions=0x{:08X}", permissions);
|
||||
return ERR_INVALID_MEMORY_PERMISSIONS;
|
||||
}
|
||||
|
||||
auto& kernel = Core::System::GetInstance().Kernel();
|
||||
auto shared_memory = kernel.HandleTable().Get<SharedMemory>(shared_memory_handle);
|
||||
if (!shared_memory) {
|
||||
return ERR_INVALID_HANDLE;
|
||||
}
|
||||
|
||||
MemoryPermission permissions_type = static_cast<MemoryPermission>(permissions);
|
||||
switch (permissions_type) {
|
||||
case MemoryPermission::Read:
|
||||
case MemoryPermission::Write:
|
||||
case MemoryPermission::ReadWrite:
|
||||
case MemoryPermission::Execute:
|
||||
case MemoryPermission::ReadExecute:
|
||||
case MemoryPermission::WriteExecute:
|
||||
case MemoryPermission::ReadWriteExecute:
|
||||
case MemoryPermission::DontCare:
|
||||
return shared_memory->Map(Core::CurrentProcess().get(), addr, permissions_type,
|
||||
MemoryPermission::DontCare);
|
||||
default:
|
||||
LOG_ERROR(Kernel_SVC, "unknown permissions=0x{:08X}", permissions);
|
||||
}
|
||||
|
||||
return RESULT_SUCCESS;
|
||||
return shared_memory->Map(Core::CurrentProcess().get(), addr, permissions_type,
|
||||
MemoryPermission::DontCare);
|
||||
}
|
||||
|
||||
static ResultCode UnmapSharedMemory(Handle shared_memory_handle, VAddr addr, u64 size) {
|
||||
LOG_WARNING(Kernel_SVC, "called, shared_memory_handle=0x{:08X}, addr=0x{:X}, size=0x{:X}",
|
||||
shared_memory_handle, addr, size);
|
||||
|
||||
if (!Is4KBAligned(addr)) {
|
||||
return ERR_INVALID_ADDRESS;
|
||||
}
|
||||
|
||||
if (size == 0 || !Is4KBAligned(size)) {
|
||||
return ERR_INVALID_SIZE;
|
||||
}
|
||||
|
||||
auto& kernel = Core::System::GetInstance().Kernel();
|
||||
auto shared_memory = kernel.HandleTable().Get<SharedMemory>(shared_memory_handle);
|
||||
|
||||
@ -524,7 +569,7 @@ static void ExitProcess() {
|
||||
/// Creates a new thread
|
||||
static ResultCode CreateThread(Handle* out_handle, VAddr entry_point, u64 arg, VAddr stack_top,
|
||||
u32 priority, s32 processor_id) {
|
||||
std::string name = fmt::format("unknown-{:X}", entry_point);
|
||||
std::string name = fmt::format("thread-{:X}", entry_point);
|
||||
|
||||
if (priority > THREADPRIO_LOWEST) {
|
||||
return ERR_INVALID_THREAD_PRIORITY;
|
||||
@ -647,16 +692,17 @@ static ResultCode SignalProcessWideKey(VAddr condition_variable_addr, s32 target
|
||||
LOG_TRACE(Kernel_SVC, "called, condition_variable_addr=0x{:X}, target=0x{:08X}",
|
||||
condition_variable_addr, target);
|
||||
|
||||
auto RetrieveWaitingThreads =
|
||||
[](size_t core_index, std::vector<SharedPtr<Thread>>& waiting_threads, VAddr condvar_addr) {
|
||||
const auto& scheduler = Core::System::GetInstance().Scheduler(core_index);
|
||||
auto& thread_list = scheduler->GetThreadList();
|
||||
auto RetrieveWaitingThreads = [](std::size_t core_index,
|
||||
std::vector<SharedPtr<Thread>>& waiting_threads,
|
||||
VAddr condvar_addr) {
|
||||
const auto& scheduler = Core::System::GetInstance().Scheduler(core_index);
|
||||
auto& thread_list = scheduler->GetThreadList();
|
||||
|
||||
for (auto& thread : thread_list) {
|
||||
if (thread->condvar_wait_address == condvar_addr)
|
||||
waiting_threads.push_back(thread);
|
||||
}
|
||||
};
|
||||
for (auto& thread : thread_list) {
|
||||
if (thread->condvar_wait_address == condvar_addr)
|
||||
waiting_threads.push_back(thread);
|
||||
}
|
||||
};
|
||||
|
||||
// Retrieve a list of all threads that are waiting for this condition variable.
|
||||
std::vector<SharedPtr<Thread>> waiting_threads;
|
||||
@ -672,7 +718,7 @@ static ResultCode SignalProcessWideKey(VAddr condition_variable_addr, s32 target
|
||||
|
||||
// Only process up to 'target' threads, unless 'target' is -1, in which case process
|
||||
// them all.
|
||||
size_t last = waiting_threads.size();
|
||||
std::size_t last = waiting_threads.size();
|
||||
if (target != -1)
|
||||
last = target;
|
||||
|
||||
@ -680,12 +726,12 @@ static ResultCode SignalProcessWideKey(VAddr condition_variable_addr, s32 target
|
||||
if (last > waiting_threads.size())
|
||||
return RESULT_SUCCESS;
|
||||
|
||||
for (size_t index = 0; index < last; ++index) {
|
||||
for (std::size_t index = 0; index < last; ++index) {
|
||||
auto& thread = waiting_threads[index];
|
||||
|
||||
ASSERT(thread->condvar_wait_address == condition_variable_addr);
|
||||
|
||||
size_t current_core = Core::System::GetInstance().CurrentCoreIndex();
|
||||
std::size_t current_core = Core::System::GetInstance().CurrentCoreIndex();
|
||||
|
||||
auto& monitor = Core::System::GetInstance().Monitor();
|
||||
|
||||
@ -898,12 +944,28 @@ static ResultCode CreateSharedMemory(Handle* handle, u64 size, u32 local_permiss
|
||||
LOG_TRACE(Kernel_SVC, "called, size=0x{:X}, localPerms=0x{:08X}, remotePerms=0x{:08X}", size,
|
||||
local_permissions, remote_permissions);
|
||||
|
||||
// Size must be a multiple of 4KB and be less than or equal to
|
||||
// approx. 8 GB (actually (1GB - 512B) * 8)
|
||||
if (size == 0 || (size & 0xFFFFFFFE00000FFF) != 0) {
|
||||
return ERR_INVALID_SIZE;
|
||||
}
|
||||
|
||||
const auto local_perms = static_cast<MemoryPermission>(local_permissions);
|
||||
if (local_perms != MemoryPermission::Read && local_perms != MemoryPermission::ReadWrite) {
|
||||
return ERR_INVALID_MEMORY_PERMISSIONS;
|
||||
}
|
||||
|
||||
const auto remote_perms = static_cast<MemoryPermission>(remote_permissions);
|
||||
if (remote_perms != MemoryPermission::Read && remote_perms != MemoryPermission::ReadWrite &&
|
||||
remote_perms != MemoryPermission::DontCare) {
|
||||
return ERR_INVALID_MEMORY_PERMISSIONS;
|
||||
}
|
||||
|
||||
auto& kernel = Core::System::GetInstance().Kernel();
|
||||
auto& handle_table = kernel.HandleTable();
|
||||
auto shared_mem_handle =
|
||||
SharedMemory::Create(kernel, handle_table.Get<Process>(KernelHandle::CurrentProcess), size,
|
||||
static_cast<MemoryPermission>(local_permissions),
|
||||
static_cast<MemoryPermission>(remote_permissions));
|
||||
local_perms, remote_perms);
|
||||
|
||||
CASCADE_RESULT(*handle, handle_table.Create(shared_mem_handle));
|
||||
return RESULT_SUCCESS;
|
||||
|
@ -13,7 +13,9 @@
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
#define PARAM(n) Core::CurrentArmInterface().GetReg(n)
|
||||
static inline u64 Param(int n) {
|
||||
return Core::CurrentArmInterface().GetReg(n);
|
||||
}
|
||||
|
||||
/**
|
||||
* HLE a function return from the current ARM userland process
|
||||
@ -28,23 +30,23 @@ static inline void FuncReturn(u64 res) {
|
||||
|
||||
template <ResultCode func(u64)>
|
||||
void SvcWrap() {
|
||||
FuncReturn(func(PARAM(0)).raw);
|
||||
FuncReturn(func(Param(0)).raw);
|
||||
}
|
||||
|
||||
template <ResultCode func(u32)>
|
||||
void SvcWrap() {
|
||||
FuncReturn(func((u32)PARAM(0)).raw);
|
||||
FuncReturn(func((u32)Param(0)).raw);
|
||||
}
|
||||
|
||||
template <ResultCode func(u32, u32)>
|
||||
void SvcWrap() {
|
||||
FuncReturn(func((u32)PARAM(0), (u32)PARAM(1)).raw);
|
||||
FuncReturn(func((u32)Param(0), (u32)Param(1)).raw);
|
||||
}
|
||||
|
||||
template <ResultCode func(u32*, u32)>
|
||||
void SvcWrap() {
|
||||
u32 param_1 = 0;
|
||||
u32 retval = func(¶m_1, (u32)PARAM(1)).raw;
|
||||
u32 retval = func(¶m_1, (u32)Param(1)).raw;
|
||||
Core::CurrentArmInterface().SetReg(1, param_1);
|
||||
FuncReturn(retval);
|
||||
}
|
||||
@ -52,39 +54,39 @@ void SvcWrap() {
|
||||
template <ResultCode func(u32*, u64)>
|
||||
void SvcWrap() {
|
||||
u32 param_1 = 0;
|
||||
u32 retval = func(¶m_1, PARAM(1)).raw;
|
||||
u32 retval = func(¶m_1, Param(1)).raw;
|
||||
Core::CurrentArmInterface().SetReg(1, param_1);
|
||||
FuncReturn(retval);
|
||||
}
|
||||
|
||||
template <ResultCode func(u64, s32)>
|
||||
void SvcWrap() {
|
||||
FuncReturn(func(PARAM(0), (s32)PARAM(1)).raw);
|
||||
FuncReturn(func(Param(0), (s32)Param(1)).raw);
|
||||
}
|
||||
|
||||
template <ResultCode func(u64*, u64)>
|
||||
void SvcWrap() {
|
||||
u64 param_1 = 0;
|
||||
u32 retval = func(¶m_1, PARAM(1)).raw;
|
||||
u32 retval = func(¶m_1, Param(1)).raw;
|
||||
Core::CurrentArmInterface().SetReg(1, param_1);
|
||||
FuncReturn(retval);
|
||||
}
|
||||
|
||||
template <ResultCode func(u32, u64)>
|
||||
void SvcWrap() {
|
||||
FuncReturn(func((u32)(PARAM(0) & 0xFFFFFFFF), PARAM(1)).raw);
|
||||
FuncReturn(func((u32)(Param(0) & 0xFFFFFFFF), Param(1)).raw);
|
||||
}
|
||||
|
||||
template <ResultCode func(u32, u32, u64)>
|
||||
void SvcWrap() {
|
||||
FuncReturn(func((u32)(PARAM(0) & 0xFFFFFFFF), (u32)(PARAM(1) & 0xFFFFFFFF), PARAM(2)).raw);
|
||||
FuncReturn(func((u32)(Param(0) & 0xFFFFFFFF), (u32)(Param(1) & 0xFFFFFFFF), Param(2)).raw);
|
||||
}
|
||||
|
||||
template <ResultCode func(u32, u32*, u64*)>
|
||||
void SvcWrap() {
|
||||
u32 param_1 = 0;
|
||||
u64 param_2 = 0;
|
||||
ResultCode retval = func((u32)(PARAM(2) & 0xFFFFFFFF), ¶m_1, ¶m_2);
|
||||
ResultCode retval = func((u32)(Param(2) & 0xFFFFFFFF), ¶m_1, ¶m_2);
|
||||
Core::CurrentArmInterface().SetReg(1, param_1);
|
||||
Core::CurrentArmInterface().SetReg(2, param_2);
|
||||
FuncReturn(retval.raw);
|
||||
@ -93,46 +95,46 @@ void SvcWrap() {
|
||||
template <ResultCode func(u64, u64, u32, u32)>
|
||||
void SvcWrap() {
|
||||
FuncReturn(
|
||||
func(PARAM(0), PARAM(1), (u32)(PARAM(3) & 0xFFFFFFFF), (u32)(PARAM(3) & 0xFFFFFFFF)).raw);
|
||||
func(Param(0), Param(1), (u32)(Param(3) & 0xFFFFFFFF), (u32)(Param(3) & 0xFFFFFFFF)).raw);
|
||||
}
|
||||
|
||||
template <ResultCode func(u32, u64, u32)>
|
||||
void SvcWrap() {
|
||||
FuncReturn(func((u32)PARAM(0), PARAM(1), (u32)PARAM(2)).raw);
|
||||
FuncReturn(func((u32)Param(0), Param(1), (u32)Param(2)).raw);
|
||||
}
|
||||
|
||||
template <ResultCode func(u64, u64, u64)>
|
||||
void SvcWrap() {
|
||||
FuncReturn(func(PARAM(0), PARAM(1), PARAM(2)).raw);
|
||||
FuncReturn(func(Param(0), Param(1), Param(2)).raw);
|
||||
}
|
||||
|
||||
template <ResultCode func(u32, u64, u64, u32)>
|
||||
void SvcWrap() {
|
||||
FuncReturn(func((u32)PARAM(0), PARAM(1), PARAM(2), (u32)PARAM(3)).raw);
|
||||
FuncReturn(func((u32)Param(0), Param(1), Param(2), (u32)Param(3)).raw);
|
||||
}
|
||||
|
||||
template <ResultCode func(u32, u64, u64)>
|
||||
void SvcWrap() {
|
||||
FuncReturn(func((u32)PARAM(0), PARAM(1), PARAM(2)).raw);
|
||||
FuncReturn(func((u32)Param(0), Param(1), Param(2)).raw);
|
||||
}
|
||||
|
||||
template <ResultCode func(u32*, u64, u64, s64)>
|
||||
void SvcWrap() {
|
||||
u32 param_1 = 0;
|
||||
ResultCode retval = func(¶m_1, PARAM(1), (u32)(PARAM(2) & 0xFFFFFFFF), (s64)PARAM(3));
|
||||
ResultCode retval = func(¶m_1, Param(1), (u32)(Param(2) & 0xFFFFFFFF), (s64)Param(3));
|
||||
Core::CurrentArmInterface().SetReg(1, param_1);
|
||||
FuncReturn(retval.raw);
|
||||
}
|
||||
|
||||
template <ResultCode func(u64, u64, u32, s64)>
|
||||
void SvcWrap() {
|
||||
FuncReturn(func(PARAM(0), PARAM(1), (u32)PARAM(2), (s64)PARAM(3)).raw);
|
||||
FuncReturn(func(Param(0), Param(1), (u32)Param(2), (s64)Param(3)).raw);
|
||||
}
|
||||
|
||||
template <ResultCode func(u64*, u64, u64, u64)>
|
||||
void SvcWrap() {
|
||||
u64 param_1 = 0;
|
||||
u32 retval = func(¶m_1, PARAM(1), PARAM(2), PARAM(3)).raw;
|
||||
u32 retval = func(¶m_1, Param(1), Param(2), Param(3)).raw;
|
||||
Core::CurrentArmInterface().SetReg(1, param_1);
|
||||
FuncReturn(retval);
|
||||
}
|
||||
@ -141,7 +143,7 @@ template <ResultCode func(u32*, u64, u64, u64, u32, s32)>
|
||||
void SvcWrap() {
|
||||
u32 param_1 = 0;
|
||||
u32 retval =
|
||||
func(¶m_1, PARAM(1), PARAM(2), PARAM(3), (u32)PARAM(4), (s32)(PARAM(5) & 0xFFFFFFFF))
|
||||
func(¶m_1, Param(1), Param(2), Param(3), (u32)Param(4), (s32)(Param(5) & 0xFFFFFFFF))
|
||||
.raw;
|
||||
Core::CurrentArmInterface().SetReg(1, param_1);
|
||||
FuncReturn(retval);
|
||||
@ -151,13 +153,13 @@ template <ResultCode func(MemoryInfo*, PageInfo*, u64)>
|
||||
void SvcWrap() {
|
||||
MemoryInfo memory_info = {};
|
||||
PageInfo page_info = {};
|
||||
u32 retval = func(&memory_info, &page_info, PARAM(2)).raw;
|
||||
u32 retval = func(&memory_info, &page_info, Param(2)).raw;
|
||||
|
||||
Memory::Write64(PARAM(0), memory_info.base_address);
|
||||
Memory::Write64(PARAM(0) + 8, memory_info.size);
|
||||
Memory::Write32(PARAM(0) + 16, memory_info.type);
|
||||
Memory::Write32(PARAM(0) + 20, memory_info.attributes);
|
||||
Memory::Write32(PARAM(0) + 24, memory_info.permission);
|
||||
Memory::Write64(Param(0), memory_info.base_address);
|
||||
Memory::Write64(Param(0) + 8, memory_info.size);
|
||||
Memory::Write32(Param(0) + 16, memory_info.type);
|
||||
Memory::Write32(Param(0) + 20, memory_info.attributes);
|
||||
Memory::Write32(Param(0) + 24, memory_info.permission);
|
||||
|
||||
FuncReturn(retval);
|
||||
}
|
||||
@ -165,7 +167,7 @@ void SvcWrap() {
|
||||
template <ResultCode func(u32*, u64, u64, u32)>
|
||||
void SvcWrap() {
|
||||
u32 param_1 = 0;
|
||||
u32 retval = func(¶m_1, PARAM(1), PARAM(2), (u32)(PARAM(3) & 0xFFFFFFFF)).raw;
|
||||
u32 retval = func(¶m_1, Param(1), Param(2), (u32)(Param(3) & 0xFFFFFFFF)).raw;
|
||||
Core::CurrentArmInterface().SetReg(1, param_1);
|
||||
FuncReturn(retval);
|
||||
}
|
||||
@ -174,7 +176,7 @@ template <ResultCode func(Handle*, u64, u32, u32)>
|
||||
void SvcWrap() {
|
||||
u32 param_1 = 0;
|
||||
u32 retval =
|
||||
func(¶m_1, PARAM(1), (u32)(PARAM(2) & 0xFFFFFFFF), (u32)(PARAM(3) & 0xFFFFFFFF)).raw;
|
||||
func(¶m_1, Param(1), (u32)(Param(2) & 0xFFFFFFFF), (u32)(Param(3) & 0xFFFFFFFF)).raw;
|
||||
Core::CurrentArmInterface().SetReg(1, param_1);
|
||||
FuncReturn(retval);
|
||||
}
|
||||
@ -182,14 +184,14 @@ void SvcWrap() {
|
||||
template <ResultCode func(u64, u32, s32, s64)>
|
||||
void SvcWrap() {
|
||||
FuncReturn(
|
||||
func(PARAM(0), (u32)(PARAM(1) & 0xFFFFFFFF), (s32)(PARAM(2) & 0xFFFFFFFF), (s64)PARAM(3))
|
||||
func(Param(0), (u32)(Param(1) & 0xFFFFFFFF), (s32)(Param(2) & 0xFFFFFFFF), (s64)Param(3))
|
||||
.raw);
|
||||
}
|
||||
|
||||
template <ResultCode func(u64, u32, s32, s32)>
|
||||
void SvcWrap() {
|
||||
FuncReturn(func(PARAM(0), (u32)(PARAM(1) & 0xFFFFFFFF), (s32)(PARAM(2) & 0xFFFFFFFF),
|
||||
(s32)(PARAM(3) & 0xFFFFFFFF))
|
||||
FuncReturn(func(Param(0), (u32)(Param(1) & 0xFFFFFFFF), (s32)(Param(2) & 0xFFFFFFFF),
|
||||
(s32)(Param(3) & 0xFFFFFFFF))
|
||||
.raw);
|
||||
}
|
||||
|
||||
@ -219,20 +221,17 @@ void SvcWrap() {
|
||||
|
||||
template <void func(s64)>
|
||||
void SvcWrap() {
|
||||
func((s64)PARAM(0));
|
||||
func((s64)Param(0));
|
||||
}
|
||||
|
||||
template <void func(u64, u64 len)>
|
||||
void SvcWrap() {
|
||||
func(PARAM(0), PARAM(1));
|
||||
func(Param(0), Param(1));
|
||||
}
|
||||
|
||||
template <void func(u64, u64, u64)>
|
||||
void SvcWrap() {
|
||||
func(PARAM(0), PARAM(1), PARAM(2));
|
||||
func(Param(0), Param(1), Param(2));
|
||||
}
|
||||
|
||||
#undef PARAM
|
||||
#undef FuncReturn
|
||||
|
||||
} // namespace Kernel
|
||||
|
@ -217,8 +217,8 @@ static void ResetThreadContext(Core::ARM_Interface::ThreadContext& context, VAdd
|
||||
context.cpu_registers[0] = arg;
|
||||
context.pc = entry_point;
|
||||
context.sp = stack_top;
|
||||
context.cpsr = 0;
|
||||
context.fpscr = 0;
|
||||
context.pstate = 0;
|
||||
context.fpcr = 0;
|
||||
}
|
||||
|
||||
ResultVal<SharedPtr<Thread>> Thread::Create(KernelCore& kernel, std::string name, VAddr entry_point,
|
||||
@ -275,7 +275,7 @@ ResultVal<SharedPtr<Thread>> Thread::Create(KernelCore& kernel, std::string name
|
||||
available_slot = 0; // Use the first slot in the new page
|
||||
|
||||
// Allocate some memory from the end of the linear heap for this region.
|
||||
const size_t offset = thread->tls_memory->size();
|
||||
const std::size_t offset = thread->tls_memory->size();
|
||||
thread->tls_memory->insert(thread->tls_memory->end(), Memory::PAGE_SIZE, 0);
|
||||
|
||||
auto& vm_manager = owner_process->vm_manager;
|
||||
|
@ -254,7 +254,7 @@ public:
|
||||
Handle callback_handle;
|
||||
|
||||
using WakeupCallback = bool(ThreadWakeupReason reason, SharedPtr<Thread> thread,
|
||||
SharedPtr<WaitObject> object, size_t index);
|
||||
SharedPtr<WaitObject> object, std::size_t index);
|
||||
// Callback that will be invoked when the thread is resumed from a waiting state. If the thread
|
||||
// was waiting via WaitSynchronizationN then the object will be the last object that became
|
||||
// available. In case of a timeout, the object will be nullptr.
|
||||
|
@ -86,7 +86,7 @@ VMManager::VMAHandle VMManager::FindVMA(VAddr target) const {
|
||||
|
||||
ResultVal<VMManager::VMAHandle> VMManager::MapMemoryBlock(VAddr target,
|
||||
std::shared_ptr<std::vector<u8>> block,
|
||||
size_t offset, u64 size,
|
||||
std::size_t offset, u64 size,
|
||||
MemoryState state) {
|
||||
ASSERT(block != nullptr);
|
||||
ASSERT(offset + size <= block->size());
|
||||
|
@ -81,7 +81,7 @@ struct VirtualMemoryArea {
|
||||
/// Memory block backing this VMA.
|
||||
std::shared_ptr<std::vector<u8>> backing_block = nullptr;
|
||||
/// Offset into the backing_memory the mapping starts from.
|
||||
size_t offset = 0;
|
||||
std::size_t offset = 0;
|
||||
|
||||
// Settings for type = BackingMemory
|
||||
/// Pointer backing this VMA. It will not be destroyed or freed when the VMA is removed.
|
||||
@ -147,7 +147,7 @@ public:
|
||||
* @param state MemoryState tag to attach to the VMA.
|
||||
*/
|
||||
ResultVal<VMAHandle> MapMemoryBlock(VAddr target, std::shared_ptr<std::vector<u8>> block,
|
||||
size_t offset, u64 size, MemoryState state);
|
||||
std::size_t offset, u64 size, MemoryState state);
|
||||
|
||||
/**
|
||||
* Maps an unmanaged host memory pointer at a given address.
|
||||
|
@ -81,7 +81,7 @@ void WaitObject::WakeupWaitingThread(SharedPtr<Thread> thread) {
|
||||
}
|
||||
}
|
||||
|
||||
size_t index = thread->GetWaitObjectIndex(this);
|
||||
std::size_t index = thread->GetWaitObjectIndex(this);
|
||||
|
||||
for (auto& object : thread->wait_objects)
|
||||
object->RemoveWaitingThread(thread.get());
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user