// Copyright 2017 Citra Emulator Project // Licensed under GPLv2 or any later version // Refer to the license.txt file included. #include #include "audio_core/dsp_interface.h" #include "audio_core/sink.h" #include "audio_core/sink_details.h" #include "common/assert.h" #include "common/settings.h" #include "core/core.h" #include "core/dumping/backend.h" namespace AudioCore { DspInterface::DspInterface(Core::System& system_) : system(system_) {} DspInterface::~DspInterface() = default; void DspInterface::SetSink(AudioCore::SinkType sink_type, std::string_view audio_device) { // Dispose of the current sink first to avoid contention. sink.reset(); sink = AudioCore::GetSinkDetails(sink_type).create_sink(audio_device); sink->SetCallback( [this](s16* buffer, std::size_t num_frames) { OutputCallback(buffer, num_frames); }); time_stretcher.SetOutputSampleRate(sink->GetNativeSampleRate()); } Sink& DspInterface::GetSink() { ASSERT(sink); return *sink.get(); } void DspInterface::EnableStretching(bool enable) { enable_time_stretching = enable; } void DspInterface::OutputFrame(StereoFrame16 frame) { if (!sink) { return; } fifo.Push(frame.data(), frame.size()); auto video_dumper = system.GetVideoDumper(); if (video_dumper && video_dumper->IsDumping()) { video_dumper->AddAudioFrame(std::move(frame)); } } void DspInterface::OutputSample(std::array sample) { if (!sink) { return; } fifo.Push(&sample, 1); auto video_dumper = system.GetVideoDumper(); if (video_dumper && video_dumper->IsDumping()) { video_dumper->AddAudioSample(std::move(sample)); } } void DspInterface::OutputCallback(s16* buffer, std::size_t num_frames) { // Determine if we should stretch based on the current emulation speed. const auto perf_stats = system.GetLastPerfStats(); const auto should_stretch = enable_time_stretching && perf_stats.emulation_speed <= 95; if (performing_time_stretching && !should_stretch) { // If we just stopped stretching, flush the stretcher before returning to normal output. flushing_time_stretcher = true; } performing_time_stretching = should_stretch; std::size_t frames_written = 0; if (performing_time_stretching) { const std::vector in{fifo.Pop()}; const std::size_t num_in{in.size() / 2}; frames_written = time_stretcher.Process(in.data(), num_in, buffer, num_frames); } else { if (flushing_time_stretcher) { time_stretcher.Flush(); frames_written = time_stretcher.Process(nullptr, 0, buffer, num_frames); flushing_time_stretcher = false; // Make sure any frames that did not fit are cleared from the time stretcher, // so that they do not bleed into the next time the stretcher is enabled. time_stretcher.Clear(); } frames_written += fifo.Pop(buffer, num_frames - frames_written); } if (frames_written > 0) { std::memcpy(&last_frame[0], buffer + 2 * (frames_written - 1), 2 * sizeof(s16)); } // Hold last emitted frame; this prevents popping. for (std::size_t i = frames_written; i < num_frames; i++) { std::memcpy(buffer + 2 * i, &last_frame[0], 2 * sizeof(s16)); } // Implementation of the hardware volume slider // A cubic curve is used to approximate a linear change in human-perceived loudness const float linear_volume = std::clamp(Settings::Volume(), 0.0f, 1.0f); if (linear_volume != 1.0) { const float volume_scale_factor = linear_volume * linear_volume * linear_volume; for (std::size_t i = 0; i < num_frames; i++) { buffer[i * 2 + 0] = static_cast(buffer[i * 2 + 0] * volume_scale_factor); buffer[i * 2 + 1] = static_cast(buffer[i * 2 + 1] * volume_scale_factor); } } } } // namespace AudioCore