mirror of
https://github.com/yuzu-emu/yuzu.git
synced 2024-11-16 01:40:05 +00:00
NVServices: Address Feedback
This commit is contained in:
parent
d20ede40b1
commit
f3a39e0c9c
@ -36,9 +36,8 @@ void nvdisp_disp0::flip(u32 buffer_handle, u32 offset, u32 format, u32 width, u3
|
||||
addr, offset, width, height, stride, static_cast<PixelFormat>(format),
|
||||
transform, crop_rect};
|
||||
|
||||
auto& instance = system;
|
||||
instance.GetPerfStats().EndGameFrame();
|
||||
instance.GPU().SwapBuffers(framebuffer);
|
||||
system.GetPerfStats().EndGameFrame();
|
||||
system.GPU().SwapBuffers(framebuffer);
|
||||
}
|
||||
|
||||
} // namespace Service::Nvidia::Devices
|
||||
|
@ -87,13 +87,19 @@ u32 nvhost_ctrl::IocCtrlEventWait(const std::vector<u8>& input, std::vector<u8>&
|
||||
u32 event_id;
|
||||
if (is_async) {
|
||||
event_id = params.value & 0x00FF;
|
||||
if (event_id >= 64) {
|
||||
if (event_id >= MaxNvEvents) {
|
||||
std::memcpy(output.data(), ¶ms, sizeof(params));
|
||||
return NvResult::BadParameter;
|
||||
}
|
||||
} else {
|
||||
if (ctrl.fresh_call) {
|
||||
event_id = events_interface.GetFreeEvent();
|
||||
const auto result = events_interface.GetFreeEvent();
|
||||
if (result) {
|
||||
event_id = *result;
|
||||
} else {
|
||||
LOG_CRITICAL(Service_NVDRV, "No Free Events available!");
|
||||
event_id = params.value & 0x00FF;
|
||||
}
|
||||
} else {
|
||||
event_id = ctrl.event_id;
|
||||
}
|
||||
@ -129,6 +135,7 @@ u32 nvhost_ctrl::IocCtrlEventRegister(const std::vector<u8>& input, std::vector<
|
||||
IocCtrlEventRegisterParams params{};
|
||||
std::memcpy(¶ms, input.data(), sizeof(params));
|
||||
const u32 event_id = params.user_event_id & 0x00FF;
|
||||
LOG_DEBUG(Service_NVDRV, " called, user_event_id: {:X}", event_id);
|
||||
if (event_id >= MaxNvEvents) {
|
||||
return NvResult::BadParameter;
|
||||
}
|
||||
@ -143,6 +150,7 @@ u32 nvhost_ctrl::IocCtrlEventUnregister(const std::vector<u8>& input, std::vecto
|
||||
IocCtrlEventUnregisterParams params{};
|
||||
std::memcpy(¶ms, input.data(), sizeof(params));
|
||||
const u32 event_id = params.user_event_id & 0x00FF;
|
||||
LOG_DEBUG(Service_NVDRV, " called, user_event_id: {:X}", event_id);
|
||||
if (event_id >= MaxNvEvents) {
|
||||
return NvResult::BadParameter;
|
||||
}
|
||||
@ -159,7 +167,7 @@ u32 nvhost_ctrl::IocCtrlEventSignal(const std::vector<u8>& input, std::vector<u8
|
||||
// TODO(Blinkhawk): This is normally called when an NvEvents timeout on WaitSynchronization
|
||||
// It is believed from RE to cancel the GPU Event. However, better research is required
|
||||
u32 event_id = params.user_event_id & 0x00FF;
|
||||
LOG_WARNING(Service_NVDRV, "(STUBBED) called, user_event_id: {:X}", event_id);
|
||||
LOG_DEBUG(Service_NVDRV, " called, user_event_id: {:X}", event_id);
|
||||
if (event_id >= MaxNvEvents) {
|
||||
return NvResult::BadParameter;
|
||||
}
|
||||
|
@ -10,6 +10,7 @@
|
||||
#include "common/common_types.h"
|
||||
#include "common/swap.h"
|
||||
#include "core/hle/service/nvdrv/devices/nvdevice.h"
|
||||
#include "core/hle/service/nvdrv/nvdata.h"
|
||||
|
||||
namespace Service::Nvidia::Devices {
|
||||
|
||||
@ -114,10 +115,6 @@ private:
|
||||
static_assert(sizeof(IoctlGetErrorNotification) == 16,
|
||||
"IoctlGetErrorNotification is incorrect size");
|
||||
|
||||
struct Fence {
|
||||
u32_le id;
|
||||
u32_le value;
|
||||
};
|
||||
static_assert(sizeof(Fence) == 8, "Fence is incorrect size");
|
||||
|
||||
struct IoctlAllocGpfifoEx {
|
||||
|
@ -103,7 +103,7 @@ void NVDRV::QueryEvent(Kernel::HLERequestContext& ctx) {
|
||||
|
||||
IPC::ResponseBuilder rb{ctx, 3, 1};
|
||||
rb.Push(RESULT_SUCCESS);
|
||||
if (event_id < 64) {
|
||||
if (event_id < MaxNvEvents) {
|
||||
rb.PushCopyObjects(nvdrv->GetEvent(event_id));
|
||||
rb.Push<u32>(NvResult::Success);
|
||||
} else {
|
||||
|
@ -35,9 +35,13 @@ enum class EventState {
|
||||
};
|
||||
|
||||
struct IoctlCtrl {
|
||||
// First call done to the servioce for services that call itself again after a call.
|
||||
bool fresh_call{true};
|
||||
// Tells the Ioctl Wrapper that it must delay the IPC response and send the thread to sleep
|
||||
bool must_delay{};
|
||||
// Timeout for the delay
|
||||
s64 timeout{};
|
||||
// NV Event Id
|
||||
s32 event_id{-1};
|
||||
};
|
||||
|
||||
|
@ -27,25 +27,34 @@ class nvdevice;
|
||||
}
|
||||
|
||||
struct EventInterface {
|
||||
// Mask representing currently busy events
|
||||
u64 events_mask{};
|
||||
// Each kernel event associated to an NV event
|
||||
std::array<Kernel::EventPair, MaxNvEvents> events;
|
||||
// The status of the current NVEvent
|
||||
std::array<EventState, MaxNvEvents> status{};
|
||||
// Tells if an NVEvent is registered or not
|
||||
std::array<bool, MaxNvEvents> registered{};
|
||||
// When an NVEvent is waiting on GPU interrupt, this is the sync_point
|
||||
// associated with it.
|
||||
std::array<u32, MaxNvEvents> assigned_syncpt{};
|
||||
// This is the value of the GPU interrupt for which the NVEvent is waiting
|
||||
// for.
|
||||
std::array<u32, MaxNvEvents> assigned_value{};
|
||||
static constexpr u32 null_event = 0xFFFFFFFF;
|
||||
u32 GetFreeEvent() const {
|
||||
// Constant to denote an unasigned syncpoint.
|
||||
static constexpr u32 unassigned_syncpt = 0xFFFFFFFF;
|
||||
std::optional<u32> GetFreeEvent() const {
|
||||
u64 mask = events_mask;
|
||||
for (u32 i = 0; i < MaxNvEvents; i++) {
|
||||
const bool is_free = (mask & 0x1) == 0;
|
||||
if (is_free) {
|
||||
if (status[i] == EventState::Registered || status[i] == EventState::Free) {
|
||||
return i;
|
||||
return {i};
|
||||
}
|
||||
}
|
||||
mask = mask >> 1;
|
||||
}
|
||||
return null_event;
|
||||
return {};
|
||||
}
|
||||
void SetEventStatus(const u32 event_id, EventState new_status) {
|
||||
EventState old_status = status[event_id];
|
||||
@ -57,7 +66,7 @@ struct EventInterface {
|
||||
registered[event_id] = true;
|
||||
}
|
||||
if (new_status == EventState::Waiting || new_status == EventState::Busy) {
|
||||
events_mask |= (1 << event_id);
|
||||
events_mask |= (1ULL << event_id);
|
||||
}
|
||||
}
|
||||
void RegisterEvent(const u32 event_id) {
|
||||
@ -74,8 +83,8 @@ struct EventInterface {
|
||||
}
|
||||
void LiberateEvent(const u32 event_id) {
|
||||
status[event_id] = registered[event_id] ? EventState::Registered : EventState::Free;
|
||||
events_mask &= ~(1 << event_id);
|
||||
assigned_syncpt[event_id] = 0xFFFFFFFF;
|
||||
events_mask &= ~(1ULL << event_id);
|
||||
assigned_syncpt[event_id] = unassigned_syncpt;
|
||||
assigned_value[event_id] = 0;
|
||||
}
|
||||
};
|
||||
|
@ -80,6 +80,7 @@ void BufferQueue::QueueBuffer(u32 slot, BufferTransformFlags transform,
|
||||
|
||||
std::optional<std::reference_wrapper<const BufferQueue::Buffer>> BufferQueue::AcquireBuffer() {
|
||||
auto itr = queue.end();
|
||||
// Iterate to find a queued buffer matching the requested slot.
|
||||
while (itr == queue.end() && !queue_sequence.empty()) {
|
||||
u32 slot = queue_sequence.front();
|
||||
itr = std::find_if(queue.begin(), queue.end(), [&slot](const Buffer& buffer) {
|
||||
|
@ -536,7 +536,7 @@ private:
|
||||
|
||||
if (result) {
|
||||
// Buffer is available
|
||||
IGBPDequeueBufferResponseParcel response{(*result).first, *(*result).second};
|
||||
IGBPDequeueBufferResponseParcel response{result->first, *result->second};
|
||||
ctx.WriteBuffer(response.Serialize());
|
||||
} else {
|
||||
// Wait the current thread until a buffer becomes available
|
||||
@ -549,8 +549,7 @@ private:
|
||||
auto result = buffer_queue.DequeueBuffer(width, height);
|
||||
ASSERT_MSG(result != std::nullopt, "Could not dequeue buffer.");
|
||||
|
||||
IGBPDequeueBufferResponseParcel response{(*result).first,
|
||||
*(*result).second};
|
||||
IGBPDequeueBufferResponseParcel response{result->first, *result->second};
|
||||
ctx.WriteBuffer(response.Serialize());
|
||||
IPC::ResponseBuilder rb{ctx, 2};
|
||||
rb.Push(RESULT_SUCCESS);
|
||||
|
Loading…
Reference in New Issue
Block a user