aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorMarkus Wick <markus@selfnet.de>2021-04-07 13:57:49 +0200
committerMarkus Wick <markus@selfnet.de>2021-04-07 22:38:52 +0200
commite6fb49fa4bb2864702abcefc14f6bb62eaba7a7e (patch)
tree6cad1f7c35b6f9c539fbb9ad0fa2a1359a543a97 /src
parent5145133a604f626c05f832465ac22019b003c32a (diff)
video_core/gpu_thread: Keep the write lock for allocating the fence.
Else the fence might get submited out-of-order into the queue, which makes testing them pointless. Overhead should be tiny as the mutex is just moved from the queue to the writing code.
Diffstat (limited to 'src')
-rw-r--r--src/video_core/gpu_thread.cpp2
-rw-r--r--src/video_core/gpu_thread.h3
2 files changed, 4 insertions, 1 deletions
diff --git a/src/video_core/gpu_thread.cpp b/src/video_core/gpu_thread.cpp
index 6b8f06f780..9488bf5444 100644
--- a/src/video_core/gpu_thread.cpp
+++ b/src/video_core/gpu_thread.cpp
@@ -151,11 +151,13 @@ void ThreadManager::OnCommandListEnd() {
}
u64 ThreadManager::PushCommand(CommandData&& command_data) {
+ std::unique_lock lk(state.write_lock);
const u64 fence{++state.last_fence};
state.queue.Push(CommandDataContainer(std::move(command_data), fence));
if (!is_async) {
// In synchronous GPU mode, block the caller until the command has executed
+ lk.unlock();
WaitIdle();
}
diff --git a/src/video_core/gpu_thread.h b/src/video_core/gpu_thread.h
index d384164de2..cb901c22a8 100644
--- a/src/video_core/gpu_thread.h
+++ b/src/video_core/gpu_thread.h
@@ -101,7 +101,8 @@ struct CommandDataContainer {
struct SynchState final {
std::atomic_bool is_running{true};
- using CommandQueue = Common::MPSCQueue<CommandDataContainer>;
+ using CommandQueue = Common::SPSCQueue<CommandDataContainer>;
+ std::mutex write_lock;
CommandQueue queue;
u64 last_fence{};
std::atomic<u64> signaled_fence{};