mirror of
https://github.com/citra-emu/citra.git
synced 2024-11-25 01:51:04 +00:00
DSP/Audio: Add documenation, cleanup, fix a weird crinkly audio bug, fix truncated ADPCM decoding
This commit is contained in:
parent
15d73b1259
commit
444cf8b1ae
@ -11,7 +11,7 @@ set(SRCS
|
|||||||
arm/skyeye_common/vfp/vfpdouble.cpp
|
arm/skyeye_common/vfp/vfpdouble.cpp
|
||||||
arm/skyeye_common/vfp/vfpinstr.cpp
|
arm/skyeye_common/vfp/vfpinstr.cpp
|
||||||
arm/skyeye_common/vfp/vfpsingle.cpp
|
arm/skyeye_common/vfp/vfpsingle.cpp
|
||||||
audio/stream.cpp
|
audio/audio.cpp
|
||||||
core.cpp
|
core.cpp
|
||||||
core_timing.cpp
|
core_timing.cpp
|
||||||
file_sys/archive_backend.cpp
|
file_sys/archive_backend.cpp
|
||||||
@ -138,7 +138,7 @@ set(HEADERS
|
|||||||
arm/skyeye_common/vfp/asm_vfp.h
|
arm/skyeye_common/vfp/asm_vfp.h
|
||||||
arm/skyeye_common/vfp/vfp.h
|
arm/skyeye_common/vfp/vfp.h
|
||||||
arm/skyeye_common/vfp/vfp_helper.h
|
arm/skyeye_common/vfp/vfp_helper.h
|
||||||
audio/stream.h
|
audio/audio.h
|
||||||
core.h
|
core.h
|
||||||
core_timing.h
|
core_timing.h
|
||||||
file_sys/archive_backend.h
|
file_sys/archive_backend.h
|
||||||
|
339
src/core/audio/audio.cpp
Normal file
339
src/core/audio/audio.cpp
Normal file
@ -0,0 +1,339 @@
|
|||||||
|
|
||||||
|
#include "AL/al.h"
|
||||||
|
#include "AL/alc.h"
|
||||||
|
#include "AL/alext.h"
|
||||||
|
|
||||||
|
#include "common/logging/log.h"
|
||||||
|
|
||||||
|
#include "core/audio/audio.h"
|
||||||
|
|
||||||
|
#include <algorithm>
|
||||||
|
#include <array>
|
||||||
|
#include <queue>
|
||||||
|
|
||||||
|
namespace Audio {
|
||||||
|
|
||||||
|
static const int BASE_SAMPLE_RATE = 22050;
|
||||||
|
|
||||||
|
struct Buffer {
|
||||||
|
u16 id; ///< buffer_id that userland gives us
|
||||||
|
ALuint buffer;
|
||||||
|
bool is_looping;
|
||||||
|
|
||||||
|
bool operator < (const Buffer& other) const {
|
||||||
|
// We want things with lower id to appear first, unless we have wraparound.
|
||||||
|
// priority_queue puts a before b when b < a.
|
||||||
|
// Should perhaps be a instead.
|
||||||
|
if ((other.id - id) > 1000) return true;
|
||||||
|
if ((id - other.id) > 1000) return false;
|
||||||
|
return id > other.id;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
struct AdpcmState {
|
||||||
|
// Two historical samples from previous processed buffer
|
||||||
|
s16 yn1; ///< y[n-1]
|
||||||
|
s16 yn2; ///< y[n-2]
|
||||||
|
};
|
||||||
|
|
||||||
|
// GC-ADPCM with scale factor and variable coefficients.
|
||||||
|
// Frames are 8 bytes long containing 14 samples each.
|
||||||
|
// Samples are 4 bits (one nybble) long.
|
||||||
|
std::vector<s16> DecodeADPCM(const u8 * const data, const size_t sample_count, const std::array<s16, 16>& adpcm_coeff, AdpcmState& state) {
|
||||||
|
const size_t FRAME_LEN = 8;
|
||||||
|
const size_t SAMPLES_PER_FRAME = 14;
|
||||||
|
const static int SIGNED_NYBBLES[16] = { 0,1,2,3,4,5,6,7,-8,-7,-6,-5,-4,-3,-2,-1 };
|
||||||
|
|
||||||
|
std::vector<s16> ret(sample_count);
|
||||||
|
int yn1 = 0, yn2 = 0;// state.yn1, yn2 = state.yn2;
|
||||||
|
|
||||||
|
const int NUM_FRAMES = (sample_count + (SAMPLES_PER_FRAME-1)) / SAMPLES_PER_FRAME; // Round up.
|
||||||
|
for (int frameno = 0; frameno < NUM_FRAMES; frameno++) {
|
||||||
|
int frame_header = data[frameno * FRAME_LEN];
|
||||||
|
int scale = 1 << (frame_header & 0xF);
|
||||||
|
int idx = (frame_header >> 4) & 0x7;
|
||||||
|
|
||||||
|
// Coefficients are fixed point with 11 bits fractional part.
|
||||||
|
int coef1 = adpcm_coeff[idx * 2 + 0];
|
||||||
|
int coef2 = adpcm_coeff[idx * 2 + 1];
|
||||||
|
|
||||||
|
auto process_nybble = [&](int nybble) -> s16 {
|
||||||
|
int xn = nybble * scale;
|
||||||
|
// We first transform everything into 11 bit fixed point, perform the second order digital filter, then transform back.
|
||||||
|
// 0x400 == 0.5 in 11 bit fixed point.
|
||||||
|
// Filter: y[n] = x[n] + 0.5 + c1 * y[n-1] + c2 * y[n-2]
|
||||||
|
int val = ((xn << 11) + 0x400 + coef1 * yn1 + coef2 * yn2) >> 11;
|
||||||
|
// Clamp to output range.
|
||||||
|
if (val >= 32767) val = 32767;
|
||||||
|
if (val <= -32768) val = -32768;
|
||||||
|
// Advance output feedback.
|
||||||
|
yn2 = yn1;
|
||||||
|
yn1 = val;
|
||||||
|
return (s16)val;
|
||||||
|
};
|
||||||
|
|
||||||
|
int outputi = frameno * SAMPLES_PER_FRAME;
|
||||||
|
int datai = frameno * FRAME_LEN + 1;
|
||||||
|
for (int i = 0; i < SAMPLES_PER_FRAME && outputi < sample_count; i += 2) {
|
||||||
|
ret[outputi++] = process_nybble(SIGNED_NYBBLES[data[datai] & 0xF]);
|
||||||
|
ret[outputi++] = process_nybble(SIGNED_NYBBLES[data[datai] >> 4]);
|
||||||
|
datai++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
state.yn1 = yn1;
|
||||||
|
state.yn2 = yn2;
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
struct OutputChannel {
|
||||||
|
ALuint source; ///< Each channel has it's own output, we lean on OpenAL to do our mixing.
|
||||||
|
|
||||||
|
// Configuration
|
||||||
|
int mono_or_stereo; ///< Value from userland. 1 == mono, 2 == stereo, other == ???
|
||||||
|
Format format;
|
||||||
|
bool enabled; ///< Userland wants us to remind them we have enabled this channel.
|
||||||
|
|
||||||
|
// Buffer management
|
||||||
|
std::priority_queue<Buffer> queue; ///< Things we have gotten from userland we haven't queued onto `source` yet.
|
||||||
|
std::queue<Buffer> playing; ///< Things we have queued onto `source`.
|
||||||
|
u16 last_bufid; ///< Userland wants us to report back what was the thing we last played.
|
||||||
|
|
||||||
|
// For ADPCM decoding use.
|
||||||
|
std::array<s16, 16> adpcm_coeffs;
|
||||||
|
AdpcmState adpcm_state;
|
||||||
|
};
|
||||||
|
|
||||||
|
OutputChannel chans[24];
|
||||||
|
|
||||||
|
int InitAL() {
|
||||||
|
ALCdevice *device = alcOpenDevice(nullptr);
|
||||||
|
if (!device) {
|
||||||
|
LOG_CRITICAL(Audio, "Could not open a device!");
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
ALCcontext *ctx = alcCreateContext(device, nullptr);
|
||||||
|
if (ctx == nullptr || alcMakeContextCurrent(ctx) == ALC_FALSE) {
|
||||||
|
if (ctx != nullptr) {
|
||||||
|
alcDestroyContext(ctx);
|
||||||
|
}
|
||||||
|
alcCloseDevice(device);
|
||||||
|
LOG_CRITICAL(Audio, "Could not set a context!");
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
LOG_INFO(Audio, "Audio output is on \"%s\"", alcGetString(device, ALC_DEVICE_SPECIFIER));
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
ALCint dev_rate; ///< Native sample rate of our output device
|
||||||
|
std::array<u8, 10000> silence; ///< Some silence, used if an audio error occurs
|
||||||
|
|
||||||
|
void Init() {
|
||||||
|
InitAL();
|
||||||
|
|
||||||
|
ALCdevice *device = alcGetContextsDevice(alcGetCurrentContext());
|
||||||
|
alcGetIntegerv(device, ALC_FREQUENCY, 1, &dev_rate);
|
||||||
|
if (alcGetError(device) != ALC_NO_ERROR) {
|
||||||
|
LOG_CRITICAL(Audio, "Failed to get device sample rate");
|
||||||
|
}
|
||||||
|
LOG_INFO(Audio, "Device Frequency: %i", dev_rate);
|
||||||
|
|
||||||
|
for (int i = 0; i < 24; i++) {
|
||||||
|
alGenSources(1, &chans[i].source);
|
||||||
|
if (alGetError() != AL_NO_ERROR) {
|
||||||
|
LOG_CRITICAL(Audio, "Channel %i: Failed to setup sound source", i);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
silence.fill(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
void Shutdown() {
|
||||||
|
ALCcontext *ctx = alcGetCurrentContext();
|
||||||
|
if (ctx == nullptr) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
ALCdevice* dev = alcGetContextsDevice(ctx);
|
||||||
|
|
||||||
|
for (int i = 0; i < 24; i++) {
|
||||||
|
alDeleteSources(1, &chans[i].source);
|
||||||
|
while (!chans[i].queue.empty()) {
|
||||||
|
alDeleteBuffers(1, &chans[i].queue.top().buffer);
|
||||||
|
chans[i].queue.pop();
|
||||||
|
}
|
||||||
|
while (!chans[i].playing.empty()) {
|
||||||
|
alDeleteBuffers(1, &chans[i].playing.front().buffer);
|
||||||
|
chans[i].playing.pop();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
alcMakeContextCurrent(nullptr);
|
||||||
|
alcDestroyContext(ctx);
|
||||||
|
alcCloseDevice(dev);
|
||||||
|
}
|
||||||
|
|
||||||
|
void UpdateFormat(int chanid, int mono_or_stereo, Format format) {
|
||||||
|
chans[chanid].mono_or_stereo = mono_or_stereo;
|
||||||
|
chans[chanid].format = format;
|
||||||
|
}
|
||||||
|
|
||||||
|
void UpdateAdpcm(int chanid, s16 coeffs[16]) {
|
||||||
|
LOG_DEBUG(Audio, "Channel %i: ADPCM Coeffs updated", chanid);
|
||||||
|
std::copy(coeffs, coeffs+16, std::begin(chans[chanid].adpcm_coeffs));
|
||||||
|
}
|
||||||
|
|
||||||
|
void EnqueueBuffer(int chanid, u16 buffer_id, void* data, int sample_count, bool is_looping) {
|
||||||
|
LOG_DEBUG(Audio, "Channel %i: Buffer %i: Enqueued (size %i)", chanid, buffer_id, sample_count);
|
||||||
|
|
||||||
|
if (is_looping) {
|
||||||
|
LOG_WARNING(Audio, "Channel %i: Buffer %i: Looped buffers are unimplemented", chanid, buffer_id);
|
||||||
|
}
|
||||||
|
|
||||||
|
ALuint b;
|
||||||
|
alGenBuffers(1, &b);
|
||||||
|
|
||||||
|
switch(chans[chanid].format) {
|
||||||
|
case FORMAT_PCM16:
|
||||||
|
switch (chans[chanid].mono_or_stereo) {
|
||||||
|
case 2:
|
||||||
|
alBufferData(b, AL_FORMAT_STEREO16, data, sample_count * 4, BASE_SAMPLE_RATE);
|
||||||
|
break;
|
||||||
|
case 1:
|
||||||
|
default:
|
||||||
|
alBufferData(b, AL_FORMAT_MONO16, data, sample_count * 2, BASE_SAMPLE_RATE);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (alGetError() != AL_NO_ERROR) goto do_silence;
|
||||||
|
|
||||||
|
break;
|
||||||
|
case FORMAT_PCM8:
|
||||||
|
switch (chans[chanid].mono_or_stereo) {
|
||||||
|
case 2:
|
||||||
|
alBufferData(b, AL_FORMAT_STEREO8, data, sample_count * 2, BASE_SAMPLE_RATE);
|
||||||
|
break;
|
||||||
|
case 1:
|
||||||
|
default:
|
||||||
|
alBufferData(b, AL_FORMAT_MONO8, data, sample_count * 1, BASE_SAMPLE_RATE);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (alGetError() != AL_NO_ERROR) goto do_silence;
|
||||||
|
|
||||||
|
break;
|
||||||
|
case FORMAT_ADPCM: {
|
||||||
|
if (chans[chanid].mono_or_stereo != 1) {
|
||||||
|
LOG_ERROR(Audio, "Channel %i: Buffer %i: Being fed non-mono ADPCM (size: %i samples)", chanid, buffer_id, sample_count);
|
||||||
|
}
|
||||||
|
|
||||||
|
std::vector<s16> decoded = DecodeADPCM((u8*)data, sample_count, chans[chanid].adpcm_coeffs, chans[chanid].adpcm_state);
|
||||||
|
alBufferData(b, AL_FORMAT_STEREO16, decoded.data(), decoded.size() * 2, BASE_SAMPLE_RATE);
|
||||||
|
|
||||||
|
if (alGetError() != AL_NO_ERROR) goto do_silence;
|
||||||
|
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
LOG_ERROR(Audio, "Channel %i: Buffer %i: Unrecognised audio format (size: %i samples)", chanid, buffer_id, sample_count);
|
||||||
|
do_silence:
|
||||||
|
if (alGetError() != AL_NO_ERROR) {
|
||||||
|
LOG_CRITICAL(Audio, "Channel %i: Buffer %i: OpenAL says \"%s\"", chanid, buffer_id, alGetString(alGetError()));
|
||||||
|
}
|
||||||
|
alBufferData(b, AL_FORMAT_MONO8, silence.data(), silence.size(), BASE_SAMPLE_RATE);
|
||||||
|
if (alGetError() != AL_NO_ERROR) {
|
||||||
|
LOG_CRITICAL(Audio, "Channel %i: Failed to init silence buffer!!! (%s)", chanid, alGetString(alGetError()));
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
chans[chanid].queue.emplace( Buffer { buffer_id, b, is_looping });
|
||||||
|
|
||||||
|
if (chans[chanid].queue.size() > 10) {
|
||||||
|
LOG_ERROR(Audio, "We have far far too many buffers enqueued on channel %i (%i of them)", chanid, chans[chanid].queue.size());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void Play(int chanid, bool play) {
|
||||||
|
if (play) {
|
||||||
|
LOG_INFO(Audio, "Channel %i: Enabled", chanid);
|
||||||
|
} else {
|
||||||
|
LOG_INFO(Audio, "Channel %i: Disabled", chanid);
|
||||||
|
}
|
||||||
|
chans[chanid].enabled = play;
|
||||||
|
}
|
||||||
|
|
||||||
|
void Tick(int chanid) {
|
||||||
|
auto& c = chans[chanid];
|
||||||
|
|
||||||
|
if (!c.queue.empty()) {
|
||||||
|
while (!c.queue.empty()) {
|
||||||
|
alSourceQueueBuffers(c.source, 1, &c.queue.top().buffer);
|
||||||
|
if (alGetError() != AL_NO_ERROR) {
|
||||||
|
alDeleteBuffers(1, &c.queue.top().buffer);
|
||||||
|
LOG_CRITICAL(Audio, "Channel %i: Buffer %i: Failed to enqueue : %s", chanid, c.queue.top().id, alGetString(alGetError()));
|
||||||
|
c.queue.pop();
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
c.playing.emplace(c.queue.top());
|
||||||
|
c.queue.pop();
|
||||||
|
}
|
||||||
|
if (c.enabled) {
|
||||||
|
ALint state;
|
||||||
|
alGetSourcei(c.source, AL_SOURCE_STATE, &state);
|
||||||
|
if (state != AL_PLAYING) {
|
||||||
|
alSourcePlay(c.source);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (chans[chanid].playing.size() > 10) {
|
||||||
|
LOG_ERROR(Audio, "Channel %i: We have far far too many buffers enqueued (%i of them)", chanid, chans[chanid].playing.size());
|
||||||
|
}
|
||||||
|
|
||||||
|
ALint processed;
|
||||||
|
alGetSourcei(c.source, AL_BUFFERS_PROCESSED, &processed);
|
||||||
|
while (processed > 0) {
|
||||||
|
ALuint buf;
|
||||||
|
alSourceUnqueueBuffers(c.source, 1, &buf);
|
||||||
|
processed--;
|
||||||
|
|
||||||
|
if (!c.playing.empty()) {
|
||||||
|
if (c.playing.front().buffer != buf) {
|
||||||
|
LOG_CRITICAL(Audio, "Channel %i: Play queue desynced with OpenAL queue. (buf???)", chanid);
|
||||||
|
} else {
|
||||||
|
LOG_DEBUG(Audio, "Channel %i: Buffer %i: Finished playing", chanid, c.playing.front().id);
|
||||||
|
}
|
||||||
|
c.last_bufid = c.playing.front().id;
|
||||||
|
c.playing.pop();
|
||||||
|
} else {
|
||||||
|
LOG_CRITICAL(Audio, "Channel %i: Play queue desynced with OpenAL queue. (empty)", chanid);
|
||||||
|
}
|
||||||
|
|
||||||
|
alDeleteBuffers(1, &buf);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!c.playing.empty()) {
|
||||||
|
c.last_bufid = c.playing.front().id;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
std::tuple<bool, u16, u32> GetStatus(int chanid) {
|
||||||
|
auto& c = chans[chanid];
|
||||||
|
|
||||||
|
bool isplaying = c.enabled;
|
||||||
|
u16 bufid = c.last_bufid;
|
||||||
|
u32 pos;
|
||||||
|
|
||||||
|
ALint state, samples;
|
||||||
|
alGetSourcei(c.source, AL_SOURCE_STATE, &state);
|
||||||
|
alGetSourcei(c.source, AL_SAMPLE_OFFSET, &samples);
|
||||||
|
pos = samples;
|
||||||
|
|
||||||
|
return std::make_tuple(isplaying, bufid, pos);
|
||||||
|
}
|
||||||
|
|
||||||
|
};
|
37
src/core/audio/audio.h
Normal file
37
src/core/audio/audio.h
Normal file
@ -0,0 +1,37 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include "AL/al.h"
|
||||||
|
#include "AL/alc.h"
|
||||||
|
#include "AL/alext.h"
|
||||||
|
|
||||||
|
#include "common/common_types.h"
|
||||||
|
|
||||||
|
#include <tuple>
|
||||||
|
|
||||||
|
namespace Audio {
|
||||||
|
|
||||||
|
void Init();
|
||||||
|
void Shutdown();
|
||||||
|
|
||||||
|
enum Format : u16 {
|
||||||
|
FORMAT_PCM8 = 0,
|
||||||
|
FORMAT_PCM16 = 1,
|
||||||
|
FORMAT_ADPCM = 2
|
||||||
|
};
|
||||||
|
|
||||||
|
void UpdateFormat(int chanid, int mono_or_stereo, Format format);
|
||||||
|
void UpdateAdpcm(int chanid, s16 coeffs[16]);
|
||||||
|
|
||||||
|
void Play(int chanid, bool play);
|
||||||
|
|
||||||
|
void EnqueueBuffer(int chanid, u16 buffer_id, void* data, int sample_count, bool is_looping);
|
||||||
|
|
||||||
|
void Tick(int chanid);
|
||||||
|
|
||||||
|
// Return values:
|
||||||
|
// <1>: is_enabled
|
||||||
|
// <2>: prev buffer_id
|
||||||
|
// <3>: current sample position in current buffer
|
||||||
|
std::tuple<bool, u16, u32> GetStatus(int chanid);
|
||||||
|
|
||||||
|
};
|
@ -1,288 +0,0 @@
|
|||||||
|
|
||||||
#include "AL/al.h"
|
|
||||||
#include "AL/alc.h"
|
|
||||||
#include "AL/alext.h"
|
|
||||||
|
|
||||||
#include "common/logging/log.h"
|
|
||||||
|
|
||||||
#include "core/audio/stream.h"
|
|
||||||
|
|
||||||
#include <algorithm>
|
|
||||||
#include <array>
|
|
||||||
#include <queue>
|
|
||||||
|
|
||||||
namespace Audio {
|
|
||||||
std::vector<s16> DecodeADPCM(u8* data, size_t sample_count, bool has_adpcm, u16 adpcm_ps, s16* adpcm_yn, const std::array<s16, 16>& adpcm_coeff);
|
|
||||||
|
|
||||||
static const int BASE_SAMPLE_RATE = 22050;
|
|
||||||
|
|
||||||
struct Buffer {
|
|
||||||
u16 id;
|
|
||||||
ALuint buffer;
|
|
||||||
bool is_looping;
|
|
||||||
|
|
||||||
bool operator < (const Buffer& other) const {
|
|
||||||
if ((other.id - id) > 1000) return true;
|
|
||||||
if ((id - other.id) > 1000) return false;
|
|
||||||
return id > other.id;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
struct AdpcmState {
|
|
||||||
u16 ps;
|
|
||||||
s16 yn0;
|
|
||||||
s16 yn1;
|
|
||||||
};
|
|
||||||
|
|
||||||
std::vector<s16> DecodeADPCM(u8* data, size_t sample_count, bool has_adpcm, u16 adpcm_ps, s16 adpcm_yn[2], const std::array<s16, 16>& adpcm_coeff, AdpcmState& state) {
|
|
||||||
std::vector<s16> ret(sample_count);
|
|
||||||
|
|
||||||
int yn0 = state.yn0, yn1 = state.yn1;
|
|
||||||
|
|
||||||
if (sample_count % 14 != 0) {
|
|
||||||
LOG_ERROR(Audio, "Audio stream has incomplete frames");
|
|
||||||
}
|
|
||||||
|
|
||||||
const static int signed_nybbles[16] = { 0,1,2,3,4,5,6,7,-8,-7,-6,-5,-4,-3,-2,-1 };
|
|
||||||
|
|
||||||
const int num_frames = sample_count / 14;
|
|
||||||
for (int frameno = 0; frameno < num_frames; frameno++) {
|
|
||||||
int frame_header = data[frameno * 8];
|
|
||||||
|
|
||||||
int scale = 1 << (frame_header & 0xF);
|
|
||||||
int idx = (frame_header >> 4) & 0x7;
|
|
||||||
|
|
||||||
int coef0 = (s16)adpcm_coeff[idx * 2 + 0];
|
|
||||||
int coef1 = (s16)adpcm_coeff[idx * 2 + 1];
|
|
||||||
|
|
||||||
auto next_nybble = [&](int nybble) -> s16 {
|
|
||||||
int val = (((nybble * scale) << 11) + 0x400 + coef0 * yn0 + coef1 * yn1) >> 11;
|
|
||||||
if (val >= 32767) val = 32767;
|
|
||||||
if (val <= -32768) val = -32768;
|
|
||||||
yn1 = yn0;
|
|
||||||
yn0 = val;
|
|
||||||
return (s16)val;
|
|
||||||
};
|
|
||||||
|
|
||||||
for (int i = frameno * 14, datai = frameno * 8 + 1, samplecount = 0; samplecount < 14; i += 2, datai++, samplecount += 2) {
|
|
||||||
ret[i + 0] = next_nybble(signed_nybbles[data[datai] & 0xF]);
|
|
||||||
ret[i + 1] = next_nybble(signed_nybbles[data[datai] >> 4]);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
state.yn0 = yn0;
|
|
||||||
state.yn1 = yn1;
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
struct OutputChannel {
|
|
||||||
ALuint source;
|
|
||||||
|
|
||||||
int mono_or_stereo;
|
|
||||||
Format format;
|
|
||||||
int format_rest;
|
|
||||||
|
|
||||||
std::priority_queue<Buffer> queue;
|
|
||||||
std::queue<Buffer> playing;
|
|
||||||
u16 last_bufid;
|
|
||||||
|
|
||||||
bool enabled;
|
|
||||||
|
|
||||||
std::array<s16, 16> adpcm_coeffs;
|
|
||||||
AdpcmState adpcm_state;
|
|
||||||
};
|
|
||||||
|
|
||||||
OutputChannel chans[24];
|
|
||||||
|
|
||||||
int InitAL(void)
|
|
||||||
{
|
|
||||||
ALCdevice *device;
|
|
||||||
ALCcontext *ctx;
|
|
||||||
|
|
||||||
/* Open and initialize a device with default settings */
|
|
||||||
device = alcOpenDevice(NULL);
|
|
||||||
if (!device)
|
|
||||||
{
|
|
||||||
LOG_CRITICAL(Audio, "Could not open a device!");
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx = alcCreateContext(device, NULL);
|
|
||||||
if (ctx == NULL || alcMakeContextCurrent(ctx) == ALC_FALSE)
|
|
||||||
{
|
|
||||||
if (ctx != NULL)
|
|
||||||
alcDestroyContext(ctx);
|
|
||||||
alcCloseDevice(device);
|
|
||||||
LOG_CRITICAL(Audio, "Could not set a context!");
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
LOG_INFO(Audio, "Opened \"%s\"", alcGetString(device, ALC_DEVICE_SPECIFIER));
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
ALuint silencebuffer;
|
|
||||||
ALCint dev_rate;
|
|
||||||
std::array<u8, 10000> silence;
|
|
||||||
|
|
||||||
void Init() {
|
|
||||||
InitAL();
|
|
||||||
|
|
||||||
{
|
|
||||||
ALCdevice *device = alcGetContextsDevice(alcGetCurrentContext());
|
|
||||||
alcGetIntegerv(device, ALC_FREQUENCY, 1, &dev_rate);
|
|
||||||
if (alcGetError(device) != ALC_NO_ERROR) LOG_CRITICAL(Audio, "Failed to get device sample rate");
|
|
||||||
LOG_INFO(Audio, "Device Frequency: %i", dev_rate);
|
|
||||||
}
|
|
||||||
|
|
||||||
for (int i = 0; i < 24; i++) {
|
|
||||||
alGenSources(1, &chans[i].source);
|
|
||||||
if (alGetError() != AL_NO_ERROR) LOG_CRITICAL(Audio, "Failed to setup sound source");
|
|
||||||
}
|
|
||||||
|
|
||||||
silence.fill(0);
|
|
||||||
}
|
|
||||||
|
|
||||||
void Shutdown() {}
|
|
||||||
|
|
||||||
void UpdateFormat(int chanid, int mono_or_stereo, Format format, int rest) {
|
|
||||||
chans[chanid].mono_or_stereo = mono_or_stereo;
|
|
||||||
chans[chanid].format = format;
|
|
||||||
chans[chanid].format_rest = rest;
|
|
||||||
}
|
|
||||||
|
|
||||||
void UpdateAdpcm(int chanid, s16 coeffs[16]) {
|
|
||||||
LOG_INFO(Audio, "ADPCM Coeffs updated for channel %i", chanid);
|
|
||||||
std::copy(coeffs, coeffs+16, std::begin(chans[chanid].adpcm_coeffs));
|
|
||||||
}
|
|
||||||
|
|
||||||
void EnqueueBuffer(int chanid, u16 buffer_id,
|
|
||||||
void* data, int sample_count,
|
|
||||||
bool has_adpcm, u16 adpcm_ps, s16 adpcm_yn[2],
|
|
||||||
bool is_looping) {
|
|
||||||
LOG_INFO(Audio, "enqueu for %i", chanid);
|
|
||||||
|
|
||||||
if (is_looping) {
|
|
||||||
LOG_WARNING(Audio, "Looped buffers are unimplemented");
|
|
||||||
}
|
|
||||||
|
|
||||||
ALuint b;
|
|
||||||
alGenBuffers(1, &b);
|
|
||||||
|
|
||||||
if (chans[chanid].format == FORMAT_PCM16) {
|
|
||||||
switch (chans[chanid].mono_or_stereo) {
|
|
||||||
case 2:
|
|
||||||
alBufferData(b, AL_FORMAT_STEREO16, data, sample_count * 4, BASE_SAMPLE_RATE);
|
|
||||||
break;
|
|
||||||
case 1:
|
|
||||||
default:
|
|
||||||
alBufferData(b, AL_FORMAT_MONO16, data, sample_count * 2, BASE_SAMPLE_RATE);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
if (alGetError() != AL_NO_ERROR) LOG_CRITICAL(Audio, "Failed to init buffer");
|
|
||||||
} else if (chans[chanid].format == FORMAT_PCM8) {
|
|
||||||
switch (chans[chanid].mono_or_stereo) {
|
|
||||||
case 2:
|
|
||||||
alBufferData(b, AL_FORMAT_STEREO8, data, sample_count * 2, BASE_SAMPLE_RATE);
|
|
||||||
break;
|
|
||||||
case 1:
|
|
||||||
default:
|
|
||||||
alBufferData(b, AL_FORMAT_MONO8, data, sample_count * 1, BASE_SAMPLE_RATE);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
if (alGetError() != AL_NO_ERROR) LOG_CRITICAL(Audio, "Failed to init buffer");
|
|
||||||
} else if (chans[chanid].format == FORMAT_ADPCM) {
|
|
||||||
if (chans[chanid].mono_or_stereo != 1) {
|
|
||||||
LOG_ERROR(Audio, "Being fed non-mono ADPCM");
|
|
||||||
}
|
|
||||||
std::vector<s16> decoded = DecodeADPCM((u8*)data, sample_count, has_adpcm, adpcm_ps, adpcm_yn, chans[chanid].adpcm_coeffs, chans[chanid].adpcm_state);
|
|
||||||
alBufferData(b, AL_FORMAT_STEREO16, decoded.data(), decoded.size()*2, BASE_SAMPLE_RATE);
|
|
||||||
if (alGetError() != AL_NO_ERROR) LOG_CRITICAL(Audio, "Failed to init buffer");
|
|
||||||
} else {
|
|
||||||
LOG_ERROR(Audio, "Unrecognised audio format in buffer 0x%04x (size: %i samples)", buffer_id, sample_count);
|
|
||||||
alBufferData(b, AL_FORMAT_MONO8, silence.data(), silence.size(), BASE_SAMPLE_RATE);
|
|
||||||
if (alGetError() != AL_NO_ERROR) LOG_CRITICAL(Audio, "Failed to init buffer");
|
|
||||||
}
|
|
||||||
|
|
||||||
chans[chanid].queue.emplace( Buffer { buffer_id, b, is_looping });
|
|
||||||
}
|
|
||||||
|
|
||||||
void Play(int chanid, bool play) {
|
|
||||||
LOG_INFO(Audio, "Play(%i,%i)", chanid, play);
|
|
||||||
chans[chanid].enabled = play;
|
|
||||||
}
|
|
||||||
|
|
||||||
void Tick(int chanid) {
|
|
||||||
auto& c = chans[chanid];
|
|
||||||
|
|
||||||
if (!c.queue.empty()) {
|
|
||||||
while (!c.queue.empty()) {
|
|
||||||
alSourceQueueBuffers(c.source, 1, &c.queue.top().buffer);
|
|
||||||
if (alGetError() != AL_NO_ERROR) {
|
|
||||||
LOG_CRITICAL(Audio, "Failed to enqueue buffer");
|
|
||||||
c.queue.pop();
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
c.playing.emplace(c.queue.top());
|
|
||||||
LOG_DEBUG(Audio, "Enqueued buffer id 0x%04x", c.queue.top().id);
|
|
||||||
c.queue.pop();
|
|
||||||
}
|
|
||||||
if (c.enabled) {
|
|
||||||
ALint state;
|
|
||||||
alGetSourcei(c.source, AL_SOURCE_STATE, &state);
|
|
||||||
if (state != AL_PLAYING) {
|
|
||||||
alSourcePlay(c.source);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!c.playing.empty()) {
|
|
||||||
c.last_bufid = c.playing.front().id;
|
|
||||||
}
|
|
||||||
|
|
||||||
ALint processed;
|
|
||||||
alGetSourcei(c.source, AL_BUFFERS_PROCESSED, &processed);
|
|
||||||
while (processed > 0) {
|
|
||||||
ALuint buf;
|
|
||||||
alSourceUnqueueBuffers(c.source, 1, &buf);
|
|
||||||
processed--;
|
|
||||||
|
|
||||||
LOG_DEBUG(Audio, "Finished buffer id 0x%04x", c.playing.front().id);
|
|
||||||
|
|
||||||
if (!c.playing.empty()) {
|
|
||||||
if (c.playing.front().buffer != buf) LOG_CRITICAL(Audio, "Audio is extremely funky. Should abort. (Desynced queue.)");
|
|
||||||
|
|
||||||
c.last_bufid = c.playing.front().id;
|
|
||||||
c.playing.pop();
|
|
||||||
} else {
|
|
||||||
LOG_CRITICAL(Audio, "Audio is extremely funky. Should abort. (Empty queue.)");
|
|
||||||
}
|
|
||||||
|
|
||||||
alDeleteBuffers(1, &buf);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!c.playing.empty()) {
|
|
||||||
c.last_bufid = c.playing.front().id;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
std::tuple<bool, u16, u32> GetStatus(int chanid) {
|
|
||||||
auto& c = chans[chanid];
|
|
||||||
|
|
||||||
bool isplaying = c.enabled;
|
|
||||||
u16 bufid = 0;
|
|
||||||
u32 pos = 0;
|
|
||||||
|
|
||||||
ALint state, samples;
|
|
||||||
alGetSourcei(c.source, AL_SOURCE_STATE, &state);
|
|
||||||
alGetSourcei(c.source, AL_SAMPLE_OFFSET, &samples);
|
|
||||||
|
|
||||||
bufid = c.last_bufid;
|
|
||||||
|
|
||||||
pos = samples;
|
|
||||||
|
|
||||||
return std::make_tuple(isplaying, bufid, pos);
|
|
||||||
}
|
|
||||||
|
|
||||||
};
|
|
@ -1,34 +0,0 @@
|
|||||||
#pragma once
|
|
||||||
|
|
||||||
#include "AL/al.h"
|
|
||||||
#include "AL/alc.h"
|
|
||||||
#include "AL/alext.h"
|
|
||||||
|
|
||||||
#include "common/common_types.h"
|
|
||||||
|
|
||||||
#include <tuple>
|
|
||||||
|
|
||||||
namespace Audio {
|
|
||||||
void Init();
|
|
||||||
void Shutdown();
|
|
||||||
|
|
||||||
enum Format : u16 {
|
|
||||||
FORMAT_PCM8 = 0,
|
|
||||||
FORMAT_PCM16 = 1,
|
|
||||||
FORMAT_ADPCM = 2
|
|
||||||
};
|
|
||||||
|
|
||||||
void UpdateFormat(int chanid, int mono_or_stereo, Format format, int rest);
|
|
||||||
void UpdateAdpcm(int chanid, s16 coeffs[16]);
|
|
||||||
|
|
||||||
void Play(int chanid, bool play);
|
|
||||||
|
|
||||||
void EnqueueBuffer(int chanid, u16 buffer_id,
|
|
||||||
void* data, int sample_count,
|
|
||||||
bool has_adpcm, u16 adpcm_ps, s16 adpcm_yn[2],
|
|
||||||
bool is_looping);
|
|
||||||
|
|
||||||
void Tick(int chanid);
|
|
||||||
|
|
||||||
std::tuple<bool, u16, u32> GetStatus(int chanid);
|
|
||||||
};
|
|
@ -5,7 +5,7 @@
|
|||||||
#include "common/bit_field.h"
|
#include "common/bit_field.h"
|
||||||
#include "common/logging/log.h"
|
#include "common/logging/log.h"
|
||||||
|
|
||||||
#include "core/audio/stream.h"
|
#include "core/audio/audio.h"
|
||||||
#include "core/core_timing.h"
|
#include "core/core_timing.h"
|
||||||
#include "core/hle/hle.h"
|
#include "core/hle/hle.h"
|
||||||
#include "core/hle/kernel/event.h"
|
#include "core/hle/kernel/event.h"
|
||||||
@ -36,51 +36,51 @@ static std::unordered_map<std::pair<u32, u32>, Kernel::SharedPtr<Kernel::Event>,
|
|||||||
static const u64 frame_tick = 1310252ull;
|
static const u64 frame_tick = 1310252ull;
|
||||||
static int tick_event;
|
static int tick_event;
|
||||||
|
|
||||||
// Addresses of various things
|
|
||||||
static const VAddr BASE_ADDR_0 = Memory::DSP_RAM_VADDR + 0x40000;
|
|
||||||
static const VAddr BASE_ADDR_1 = Memory::DSP_RAM_VADDR + 0x60000;
|
|
||||||
static constexpr VAddr DspAddrToVAddr(VAddr base, u32 dsp_addr) {
|
|
||||||
return (VAddr(dsp_addr) << 1) + base;
|
|
||||||
}
|
|
||||||
static const u32 DSPADDR0 = 0xBFFF; // Frame Counter
|
|
||||||
static const u32 DSPADDR1 = 0x9E92; // Channel Context (x24)
|
|
||||||
static const u32 DSPADDR2 = 0x8680; // Channel Status (x24)
|
|
||||||
static const u32 DSPADDR3 = 0xA792; // ADPCM Coefficients (x24)
|
|
||||||
static const u32 DSPADDR4 = 0x9430; // Context
|
|
||||||
static const u32 DSPADDR5 = 0x8400; // Status
|
|
||||||
static const u32 DSPADDR6 = 0x8540; // Loopback Samples
|
|
||||||
static const u32 DSPADDR7 = 0x9494;
|
|
||||||
static const u32 DSPADDR8 = 0x8710;
|
|
||||||
static const u32 DSPADDR9 = 0x8410; // ???
|
|
||||||
static const u32 DSPADDR10 = 0xA912;
|
|
||||||
static const u32 DSPADDR11 = 0xAA12;
|
|
||||||
static const u32 DSPADDR12 = 0xAAD2;
|
|
||||||
static const u32 DSPADDR13 = 0xAC52;
|
|
||||||
static const u32 DSPADDR14 = 0xAC5C;
|
|
||||||
static const u32 DSPADDR_frame_counter = DSPADDR0;
|
|
||||||
|
|
||||||
static const int NUM_CHANNELS = 24;
|
static const int NUM_CHANNELS = 24;
|
||||||
|
|
||||||
|
// DSP Addresses
|
||||||
|
|
||||||
|
static const VAddr BASE_ADDR_0 = Memory::DSP_RAM_VADDR + 0x40000;
|
||||||
|
static const VAddr BASE_ADDR_1 = Memory::DSP_RAM_VADDR + 0x60000;
|
||||||
|
|
||||||
|
enum DspRegion {
|
||||||
|
DSPADDR0 = 0xBFFF, // Frame Counter
|
||||||
|
DSPADDR1 = 0x9E92, // Channel Context (x24)
|
||||||
|
DSPADDR2 = 0x8680, // Channel Status (x24)
|
||||||
|
DSPADDR3 = 0xA792, // ADPCM Coefficients (x24)
|
||||||
|
DSPADDR4 = 0x9430, // Context
|
||||||
|
DSPADDR5 = 0x8400, // Status
|
||||||
|
DSPADDR6 = 0x8540, // Loopback Samples
|
||||||
|
DSPADDR7 = 0x9494,
|
||||||
|
DSPADDR8 = 0x8710,
|
||||||
|
DSPADDR9 = 0x8410, // ???
|
||||||
|
DSPADDR10 = 0xA912,
|
||||||
|
DSPADDR11 = 0xAA12,
|
||||||
|
DSPADDR12 = 0xAAD2,
|
||||||
|
DSPADDR13 = 0xAC52,
|
||||||
|
DSPADDR14 = 0xAC5C
|
||||||
|
};
|
||||||
|
|
||||||
|
static constexpr VAddr DspAddrToVAddr(VAddr base, DspRegion dsp_addr) {
|
||||||
|
return (VAddr(dsp_addr) << 1) + base;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* DSP_DSP::DspEndian
|
* dsp_u32:
|
||||||
* Care must be taken when reading/writing 32-bit values. The DSP has a 16-bit wordsize and is big-endian.
|
* Care must be taken when reading/writing 32-bit values as the words are not in the expected order.
|
||||||
* The bytes in each word when viewed from the ARM11, however, are in little-endian.
|
|
||||||
* Thus we have what appears to be a middle-endian encoding.
|
|
||||||
*
|
|
||||||
* The below function is its own inverse.
|
|
||||||
*/
|
*/
|
||||||
struct dsp_u32 {
|
struct dsp_u32 {
|
||||||
|
operator u32() {
|
||||||
|
return Convert(storage);
|
||||||
|
}
|
||||||
|
void operator=(u32 newvalue) {
|
||||||
|
storage = Convert(newvalue);
|
||||||
|
}
|
||||||
|
private:
|
||||||
static constexpr u32 Convert(u32 value) {
|
static constexpr u32 Convert(u32 value) {
|
||||||
return ((value & 0x0000FFFF) << 16) | ((value & 0xFFFF0000) >> 16);
|
return ((value & 0x0000FFFF) << 16) | ((value & 0xFFFF0000) >> 16);
|
||||||
}
|
}
|
||||||
operator u32() {
|
u32 storage;
|
||||||
return Convert(value);
|
|
||||||
}
|
|
||||||
void operator=(u32 newvalue) {
|
|
||||||
value = Convert(newvalue);
|
|
||||||
}
|
|
||||||
private:
|
|
||||||
u32 value;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
#define INSERT_PADDING_DSPWORDS(num_words) u16 CONCAT2(pad, __LINE__)[(num_words)]
|
#define INSERT_PADDING_DSPWORDS(num_words) u16 CONCAT2(pad, __LINE__)[(num_words)]
|
||||||
@ -88,21 +88,16 @@ private:
|
|||||||
static_assert(std::is_standard_layout<name>::value, "Structure doesn't use standard layout"); \
|
static_assert(std::is_standard_layout<name>::value, "Structure doesn't use standard layout"); \
|
||||||
static_assert(sizeof(name) == (size), "Unexpected struct size")
|
static_assert(sizeof(name) == (size), "Unexpected struct size")
|
||||||
|
|
||||||
/*
|
|
||||||
* ADPCM seems to be the usual Nintendo format.
|
|
||||||
* ps = predictor / scaler
|
|
||||||
* yn[0,1] = sample history
|
|
||||||
* Coefficients are found at DSPADDR3
|
|
||||||
*/
|
|
||||||
|
|
||||||
struct Buffer {
|
struct Buffer {
|
||||||
dsp_u32 physical_address;
|
dsp_u32 physical_address;
|
||||||
dsp_u32 sample_count;
|
dsp_u32 sample_count;
|
||||||
u16 adpcm_ps;
|
|
||||||
s16 adpcm_yn[2];
|
INSERT_PADDING_DSPWORDS(3);
|
||||||
u8 has_adpcm;
|
INSERT_PADDING_BYTES(1);
|
||||||
|
|
||||||
u8 is_looping;
|
u8 is_looping;
|
||||||
u16 buffer_id;
|
u16 buffer_id;
|
||||||
|
|
||||||
INSERT_PADDING_DSPWORDS(1);
|
INSERT_PADDING_DSPWORDS(1);
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -114,36 +109,39 @@ struct ChannelContext {
|
|||||||
float mix[12];
|
float mix[12];
|
||||||
float rate;
|
float rate;
|
||||||
u8 rim[2];
|
u8 rim[2];
|
||||||
u16 iirFilterType;
|
u16 iirfilter_type;
|
||||||
u16 iirFilter_mono[2];
|
u16 iirfilter_mono[2];
|
||||||
u16 iirFilter_biquad[5];
|
u16 iirfilter_biquad[5];
|
||||||
|
|
||||||
// Buffer Queue
|
// Buffer Queue
|
||||||
u16 buffers_dirty; //< Which of those queued buffers is dirty (bit i == buffers[i])
|
u16 buffers_dirty; //< Which of those queued buffers is dirty (bit i == buffers[i])
|
||||||
Buffer buffers[4]; //< Queued Buffers
|
Buffer buffers[4]; //< Queued Buffers
|
||||||
|
|
||||||
INSERT_PADDING_DSPWORDS(2);
|
INSERT_PADDING_DSPWORDS(2);
|
||||||
|
|
||||||
u16 is_active; //< Lower 8 bits == 0x01 if true.
|
u16 is_active; //< Lower 8 bits == 0x01 if true.
|
||||||
u16 sync;
|
u16 sync;
|
||||||
|
|
||||||
INSERT_PADDING_DSPWORDS(4);
|
INSERT_PADDING_DSPWORDS(4);
|
||||||
|
|
||||||
// Current Buffer
|
// Embedded Buffer
|
||||||
dsp_u32 physical_address;
|
dsp_u32 physical_address;
|
||||||
dsp_u32 sample_count;
|
dsp_u32 sample_count;
|
||||||
|
|
||||||
union {
|
union {
|
||||||
u16 flags1_raw;
|
u16 flags1_raw;
|
||||||
BitField<0, 2, u16> mono_or_stereo;
|
BitField<0, 2, u16> mono_or_stereo;
|
||||||
BitField<2, 2, Audio::Format> format;
|
BitField<2, 2, Audio::Format> format;
|
||||||
BitField<4, 12, u16> rest;
|
|
||||||
};
|
};
|
||||||
u16 adpcm_ps;
|
|
||||||
s16 adpcm_yn[2];
|
INSERT_PADDING_DSPWORDS(3);
|
||||||
|
|
||||||
union {
|
union {
|
||||||
u16 flags2_raw;
|
u16 flags2_raw;
|
||||||
BitField<0, 1, u16> has_adpcm;
|
BitField<0, 1, u16> has_adpcm;
|
||||||
BitField<1, 1, u16> is_looping;
|
BitField<1, 1, u16> is_looping;
|
||||||
BitField<2, 14, u16> rest2;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
u16 buffer_id;
|
u16 buffer_id;
|
||||||
};
|
};
|
||||||
ASSERT_STRUCT(ChannelContext, 192);
|
ASSERT_STRUCT(ChannelContext, 192);
|
||||||
@ -154,7 +152,7 @@ struct ChannelStatus {
|
|||||||
u16 sync;
|
u16 sync;
|
||||||
dsp_u32 buffer_position;
|
dsp_u32 buffer_position;
|
||||||
u16 current_buffer_id;
|
u16 current_buffer_id;
|
||||||
u16 previous_buffer_id;
|
INSERT_PADDING_DSPWORDS(1);
|
||||||
};
|
};
|
||||||
ASSERT_STRUCT(ChannelStatus, 12);
|
ASSERT_STRUCT(ChannelStatus, 12);
|
||||||
|
|
||||||
@ -175,8 +173,9 @@ static void AudioTick(u64, int cycles_late) {
|
|||||||
VAddr current_base;
|
VAddr current_base;
|
||||||
|
|
||||||
{
|
{
|
||||||
int id0 = (int)Memory::Read16(DspAddrToVAddr(BASE_ADDR_0, DSPADDR_frame_counter));
|
// Frame IDs.
|
||||||
int id1 = (int)Memory::Read16(DspAddrToVAddr(BASE_ADDR_1, DSPADDR_frame_counter));
|
int id0 = (int)Memory::Read16(DspAddrToVAddr(BASE_ADDR_0, DSPADDR0));
|
||||||
|
int id1 = (int)Memory::Read16(DspAddrToVAddr(BASE_ADDR_1, DSPADDR0));
|
||||||
|
|
||||||
// The frame id increments once per audio frame, with wraparound at 65,535.
|
// The frame id increments once per audio frame, with wraparound at 65,535.
|
||||||
// I am uncertain whether the real DSP actually does something like this,
|
// I am uncertain whether the real DSP actually does something like this,
|
||||||
@ -207,7 +206,7 @@ static void AudioTick(u64, int cycles_late) {
|
|||||||
if (ctx.dirty) {
|
if (ctx.dirty) {
|
||||||
if (TestAndUnsetBit(ctx.dirty, 29)) {
|
if (TestAndUnsetBit(ctx.dirty, 29)) {
|
||||||
// First time init
|
// First time init
|
||||||
LOG_WARNING(Service_DSP, "Unimplemented dirty bit 29");
|
LOG_DEBUG(Service_DSP, "Channel %i: First Time Init", chanid);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (TestAndUnsetBit(ctx.dirty, 2)) {
|
if (TestAndUnsetBit(ctx.dirty, 2)) {
|
||||||
@ -217,50 +216,42 @@ static void AudioTick(u64, int cycles_late) {
|
|||||||
|
|
||||||
if (TestAndUnsetBit(ctx.dirty, 17)) {
|
if (TestAndUnsetBit(ctx.dirty, 17)) {
|
||||||
// Interpolation type
|
// Interpolation type
|
||||||
LOG_WARNING(Service_DSP, "Unimplemented dirty bit 17");
|
LOG_WARNING(Service_DSP, "Channel %i: Unimplemented dirty bit 17", chanid);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (TestAndUnsetBit(ctx.dirty, 18)) {
|
if (TestAndUnsetBit(ctx.dirty, 18)) {
|
||||||
// Rate
|
// Rate
|
||||||
LOG_WARNING(Service_DSP, "Unimplemented dirty bit 18");
|
LOG_WARNING(Service_DSP, "Channel %i: Unimplemented Rate %f", chanid, ctx.rate);
|
||||||
LOG_INFO(Service_DSP, "Rate %f", ctx.rate);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (TestAndUnsetBit(ctx.dirty, 22)) {
|
if (TestAndUnsetBit(ctx.dirty, 22)) {
|
||||||
// IIR
|
// IIR
|
||||||
LOG_WARNING(Service_DSP, "Unimplemented dirty bit 22");
|
LOG_WARNING(Service_DSP, "Channel %i: Unimplemented IIR %x", chanid, ctx.iirfilter_type);
|
||||||
LOG_INFO(Service_DSP, "IIR %x", ctx.iirFilterType);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (TestAndUnsetBit(ctx.dirty, 28)) {
|
if (TestAndUnsetBit(ctx.dirty, 28)) {
|
||||||
// Sync count
|
// Sync count
|
||||||
LOG_DEBUG(Service_DSP, "Update Sync Count");
|
LOG_DEBUG(Service_DSP, "Channel %i: Update Sync Count");
|
||||||
|
|
||||||
status0.sync = ctx.sync;
|
status0.sync = ctx.sync;
|
||||||
status1.sync = ctx.sync;
|
status1.sync = ctx.sync;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (TestAndUnsetBit(ctx.dirty, 25) | TestAndUnsetBit(ctx.dirty, 26) | TestAndUnsetBit(ctx.dirty, 27)) {
|
if (TestAndUnsetBit(ctx.dirty, 25) | TestAndUnsetBit(ctx.dirty, 26) | TestAndUnsetBit(ctx.dirty, 27)) {
|
||||||
// Mix
|
// Mix
|
||||||
LOG_WARNING(Service_DSP, "Unimplemented dirty bit 25/26/27");
|
|
||||||
for (int i = 0; i < 12; i++)
|
for (int i = 0; i < 12; i++)
|
||||||
LOG_INFO(Service_DSP, "mix[%i] %f", i, ctx.mix[i]);
|
LOG_DEBUG(Service_DSP, "Channel %i: mix[%i] %f", chanid, i, ctx.mix[i]);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (TestAndUnsetBit(ctx.dirty, 4) | TestAndUnsetBit(ctx.dirty, 21) | TestAndUnsetBit(ctx.dirty, 30)) {
|
if (TestAndUnsetBit(ctx.dirty, 4) | TestAndUnsetBit(ctx.dirty, 21) | TestAndUnsetBit(ctx.dirty, 30)) {
|
||||||
// TODO(merry): One of these bits might merely signify an update to the format. Verify this.
|
// TODO(merry): One of these bits might merely signify an update to the format. Verify this.
|
||||||
// Embedded Buffer Changed
|
|
||||||
Audio::UpdateFormat(chanid, ctx.mono_or_stereo, ctx.format, ctx.rest);
|
// Format updated
|
||||||
|
Audio::UpdateFormat(chanid, ctx.mono_or_stereo, ctx.format);
|
||||||
channel_contex0[chanid].flags1_raw = channel_contex1[chanid].flags1_raw = ctx.flags1_raw;
|
channel_contex0[chanid].flags1_raw = channel_contex1[chanid].flags1_raw = ctx.flags1_raw;
|
||||||
channel_contex0[chanid].flags2_raw = channel_contex1[chanid].flags2_raw = ctx.flags2_raw;
|
channel_contex0[chanid].flags2_raw = channel_contex1[chanid].flags2_raw = ctx.flags2_raw;
|
||||||
if (ctx.rest || ctx.rest2) {
|
|
||||||
LOG_ERROR(Service_DSP, "chan %i rest %04x rest2 %04x", chanid, ctx.rest, ctx.rest2);
|
// Embedded Buffer Changed
|
||||||
}
|
Audio::EnqueueBuffer(chanid, ctx.buffer_id, Memory::GetPhysicalPointer(ctx.physical_address), ctx.sample_count, ctx.is_looping);
|
||||||
Audio::UpdateAdpcm(chanid, channel_adpcm_coeffs[chanid].coeff);
|
|
||||||
Audio::EnqueueBuffer(chanid, ctx.buffer_id,
|
|
||||||
Memory::GetPhysicalPointer(ctx.physical_address), ctx.sample_count,
|
|
||||||
ctx.has_adpcm, ctx.adpcm_ps, ctx.adpcm_yn,
|
|
||||||
ctx.is_looping);
|
|
||||||
|
|
||||||
status0.is_playing |= 0x100; // TODO: This is supposed to flicker on then turn off.
|
status0.is_playing |= 0x100; // TODO: This is supposed to flicker on then turn off.
|
||||||
}
|
}
|
||||||
@ -270,15 +261,12 @@ static void AudioTick(u64, int cycles_late) {
|
|||||||
for (int i = 0; i < 4; i++) {
|
for (int i = 0; i < 4; i++) {
|
||||||
if (TestAndUnsetBit(ctx.buffers_dirty, i)) {
|
if (TestAndUnsetBit(ctx.buffers_dirty, i)) {
|
||||||
auto& b = ctx.buffers[i];
|
auto& b = ctx.buffers[i];
|
||||||
Audio::EnqueueBuffer(chanid, b.buffer_id,
|
Audio::EnqueueBuffer(chanid, b.buffer_id, Memory::GetPhysicalPointer(b.physical_address), b.sample_count, b.is_looping);
|
||||||
Memory::GetPhysicalPointer(b.physical_address), b.sample_count,
|
|
||||||
b.has_adpcm, b.adpcm_ps, b.adpcm_yn,
|
|
||||||
b.is_looping);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ctx.buffers_dirty) {
|
if (ctx.buffers_dirty) {
|
||||||
LOG_ERROR(Service_DSP, "Unknown channel buffer dirty bits: 0x%04x", ctx.buffers_dirty);
|
LOG_ERROR(Service_DSP, "Channel %i: Unknown channel buffer dirty bits: 0x%04x", chanid, ctx.buffers_dirty);
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx.buffers_dirty = 0;
|
ctx.buffers_dirty = 0;
|
||||||
@ -292,16 +280,14 @@ static void AudioTick(u64, int cycles_late) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (ctx.dirty) {
|
if (ctx.dirty) {
|
||||||
LOG_ERROR(Service_DSP, "Unknown channel dirty bits: 0x%08x", ctx.dirty);
|
LOG_ERROR(Service_DSP, "Channel %i: Unknown channel dirty bits: 0x%08x", chanid, ctx.dirty);
|
||||||
LOG_ERROR(Service_DSP, "%i Rim %i %i", chanid, ctx.rim[0], ctx.rim[1]);
|
|
||||||
LOG_ERROR(Service_DSP, "%i IIR-type %i", chanid, ctx.iirFilterType);
|
|
||||||
LOG_ERROR(Service_DSP, "%i Mono %f %f", chanid, ctx.iirFilter_mono[0], ctx.iirFilter_mono[1]);
|
|
||||||
LOG_ERROR(Service_DSP, "%i Biquad %f %f %f %f %f", chanid, ctx.iirFilter_biquad[0], ctx.iirFilter_biquad[1], ctx.iirFilter_biquad[2], ctx.iirFilter_biquad[3], ctx.iirFilter_biquad[4]);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx.dirty = 0;
|
ctx.dirty = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO: Detect any change to the structures without a dirty flag update to identify what the other bits do.
|
||||||
|
|
||||||
Audio::Tick(chanid);
|
Audio::Tick(chanid);
|
||||||
|
|
||||||
// Update channel status
|
// Update channel status
|
||||||
@ -335,7 +321,7 @@ static void ConvertProcessAddressFromDspDram(Service::Interface* self) {
|
|||||||
u32 addr = cmd_buff[1];
|
u32 addr = cmd_buff[1];
|
||||||
|
|
||||||
cmd_buff[1] = 0; // No error
|
cmd_buff[1] = 0; // No error
|
||||||
cmd_buff[2] = DspAddrToVAddr(BASE_ADDR_0, addr);
|
cmd_buff[2] = DspAddrToVAddr(BASE_ADDR_0, (DspRegion)addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
// Licensed under GPLv2 or any later version
|
// Licensed under GPLv2 or any later version
|
||||||
// Refer to the license.txt file included.
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
#include "core/audio/stream.h"
|
#include "core/audio/audio.h"
|
||||||
#include "core/core.h"
|
#include "core/core.h"
|
||||||
#include "core/core_timing.h"
|
#include "core/core_timing.h"
|
||||||
#include "core/system.h"
|
#include "core/system.h"
|
||||||
|
Loading…
Reference in New Issue
Block a user