Audio: Implement decoding of ADPCM

This commit is contained in:
MerryMage 2016-01-24 19:30:55 +00:00
parent b481a5b701
commit 15d73b1259
3 changed files with 132 additions and 37 deletions

View File

@ -7,11 +7,12 @@
#include "core/audio/stream.h"
#include <algorithm>
#include <array>
#include <queue>
namespace Audio {
std::vector<u16> DecodeADPCM(u8* data, size_t sample_count, u16 adpcm_ps, s16 adpcm_yn[2], std::array<u8, 16> adpcm_coeff);
std::vector<s16> DecodeADPCM(u8* data, size_t sample_count, bool has_adpcm, u16 adpcm_ps, s16* adpcm_yn, const std::array<s16, 16>& adpcm_coeff);
static const int BASE_SAMPLE_RATE = 22050;
@ -27,13 +28,69 @@ namespace Audio {
}
};
struct AdpcmState {
u16 ps;
s16 yn0;
s16 yn1;
};
std::vector<s16> DecodeADPCM(u8* data, size_t sample_count, bool has_adpcm, u16 adpcm_ps, s16 adpcm_yn[2], const std::array<s16, 16>& adpcm_coeff, AdpcmState& state) {
std::vector<s16> ret(sample_count);
int yn0 = state.yn0, yn1 = state.yn1;
if (sample_count % 14 != 0) {
LOG_ERROR(Audio, "Audio stream has incomplete frames");
}
const static int signed_nybbles[16] = { 0,1,2,3,4,5,6,7,-8,-7,-6,-5,-4,-3,-2,-1 };
const int num_frames = sample_count / 14;
for (int frameno = 0; frameno < num_frames; frameno++) {
int frame_header = data[frameno * 8];
int scale = 1 << (frame_header & 0xF);
int idx = (frame_header >> 4) & 0x7;
int coef0 = (s16)adpcm_coeff[idx * 2 + 0];
int coef1 = (s16)adpcm_coeff[idx * 2 + 1];
auto next_nybble = [&](int nybble) -> s16 {
int val = (((nybble * scale) << 11) + 0x400 + coef0 * yn0 + coef1 * yn1) >> 11;
if (val >= 32767) val = 32767;
if (val <= -32768) val = -32768;
yn1 = yn0;
yn0 = val;
return (s16)val;
};
for (int i = frameno * 14, datai = frameno * 8 + 1, samplecount = 0; samplecount < 14; i += 2, datai++, samplecount += 2) {
ret[i + 0] = next_nybble(signed_nybbles[data[datai] & 0xF]);
ret[i + 1] = next_nybble(signed_nybbles[data[datai] >> 4]);
}
}
state.yn0 = yn0;
state.yn1 = yn1;
return ret;
}
struct OutputChannel {
ALuint source;
int mono_or_stereo;
Format format;
int format_rest;
std::priority_queue<Buffer> queue;
std::queue<Buffer> playing;
u16 last_bufid;
bool enabled;
std::array<s16, 16> adpcm_coeffs;
AdpcmState adpcm_state;
};
OutputChannel chans[24];
@ -83,21 +140,28 @@ namespace Audio {
alGenSources(1, &chans[i].source);
if (alGetError() != AL_NO_ERROR) LOG_CRITICAL(Audio, "Failed to setup sound source");
}
silence.fill(0);
}
void Shutdown() {}
void UpdateFormat(int chanid, int mono_or_stereo, Format format) {
void UpdateFormat(int chanid, int mono_or_stereo, Format format, int rest) {
chans[chanid].mono_or_stereo = mono_or_stereo;
chans[chanid].format = format;
chans[chanid].format_rest = rest;
}
LOG_WARNING(Audio, "(STUB)");
void UpdateAdpcm(int chanid, s16 coeffs[16]) {
LOG_INFO(Audio, "ADPCM Coeffs updated for channel %i", chanid);
std::copy(coeffs, coeffs+16, std::begin(chans[chanid].adpcm_coeffs));
}
void EnqueueBuffer(int chanid, u16 buffer_id,
void* data, int sample_count,
bool has_adpcm, u16 adpcm_ps, s16 adpcm_yn[2],
bool is_looping) {
LOG_INFO(Audio, "enqueu for %i", chanid);
if (is_looping) {
LOG_WARNING(Audio, "Looped buffers are unimplemented");
@ -128,14 +192,14 @@ namespace Audio {
break;
}
if (alGetError() != AL_NO_ERROR) LOG_CRITICAL(Audio, "Failed to init buffer");
} /*else if (chans[chanid].format == FORMAT_ADPCM) {
} else if (chans[chanid].format == FORMAT_ADPCM) {
if (chans[chanid].mono_or_stereo != 1) {
LOG_ERROR(Audio, "Being fed non-mono ADPCM");
}
std::vector<u16> decoded = DecodeADPCM(data, sample_count, adpcm_ps, adpcm_yn, chans[chanid].adpcm_coeff);
alBufferData(b, AL_FORMAT_MONO16, decoded.data(), decoded.size() * 2, BASE_SAMPLE_RATE);
std::vector<s16> decoded = DecodeADPCM((u8*)data, sample_count, has_adpcm, adpcm_ps, adpcm_yn, chans[chanid].adpcm_coeffs, chans[chanid].adpcm_state);
alBufferData(b, AL_FORMAT_STEREO16, decoded.data(), decoded.size()*2, BASE_SAMPLE_RATE);
if (alGetError() != AL_NO_ERROR) LOG_CRITICAL(Audio, "Failed to init buffer");
}*/ else {
} else {
LOG_ERROR(Audio, "Unrecognised audio format in buffer 0x%04x (size: %i samples)", buffer_id, sample_count);
alBufferData(b, AL_FORMAT_MONO8, silence.data(), silence.size(), BASE_SAMPLE_RATE);
if (alGetError() != AL_NO_ERROR) LOG_CRITICAL(Audio, "Failed to init buffer");
@ -144,22 +208,32 @@ namespace Audio {
chans[chanid].queue.emplace( Buffer { buffer_id, b, is_looping });
}
void Play(int chanid, bool play) {
LOG_INFO(Audio, "Play(%i,%i)", chanid, play);
chans[chanid].enabled = play;
}
void Tick(int chanid) {
auto& c = chans[chanid];
if (!c.queue.empty()) {
while (!c.queue.empty()) {
alSourceQueueBuffers(c.source, 1, &c.queue.top().buffer);
if (alGetError() != AL_NO_ERROR) LOG_CRITICAL(Audio, "Failed to enqueue buffer");
if (alGetError() != AL_NO_ERROR) {
LOG_CRITICAL(Audio, "Failed to enqueue buffer");
c.queue.pop();
continue;
}
c.playing.emplace(c.queue.top());
LOG_INFO(Audio, "Enqueued buffer id 0x%04x", c.queue.top().id);
LOG_DEBUG(Audio, "Enqueued buffer id 0x%04x", c.queue.top().id);
c.queue.pop();
}
ALint state;
alGetSourcei(c.source, AL_SOURCE_STATE, &state);
if (state != AL_PLAYING) {
alSourcePlay(c.source);
if (c.enabled) {
ALint state;
alGetSourcei(c.source, AL_SOURCE_STATE, &state);
if (state != AL_PLAYING) {
alSourcePlay(c.source);
}
}
}
@ -174,18 +248,15 @@ namespace Audio {
alSourceUnqueueBuffers(c.source, 1, &buf);
processed--;
LOG_INFO(Audio, "Finished buffer id 0x%04x", c.playing.front().id);
while (!c.playing.empty() && c.playing.front().buffer != buf) {
c.playing.pop();
LOG_ERROR(Audio, "Audio is extremely funky. Should abort. (Desynced queue.)");
}
LOG_DEBUG(Audio, "Finished buffer id 0x%04x", c.playing.front().id);
if (!c.playing.empty()) {
if (c.playing.front().buffer != buf) LOG_CRITICAL(Audio, "Audio is extremely funky. Should abort. (Desynced queue.)");
c.last_bufid = c.playing.front().id;
c.playing.pop();
} else {
LOG_ERROR(Audio, "Audio is extremely funky. Should abort. (Empty queue.)");
LOG_CRITICAL(Audio, "Audio is extremely funky. Should abort. (Empty queue.)");
}
alDeleteBuffers(1, &buf);
@ -199,7 +270,7 @@ namespace Audio {
std::tuple<bool, u16, u32> GetStatus(int chanid) {
auto& c = chans[chanid];
bool isplaying = false;
bool isplaying = c.enabled;
u16 bufid = 0;
u32 pos = 0;
@ -207,8 +278,6 @@ namespace Audio {
alGetSourcei(c.source, AL_SOURCE_STATE, &state);
alGetSourcei(c.source, AL_SAMPLE_OFFSET, &samples);
if (state == AL_PLAYING) isplaying = true;
bufid = c.last_bufid;
pos = samples;

View File

@ -10,7 +10,6 @@
namespace Audio {
void Init();
void Play(void* buf, size_t size);
void Shutdown();
enum Format : u16 {
@ -19,7 +18,10 @@ namespace Audio {
FORMAT_ADPCM = 2
};
void UpdateFormat(int chanid, int mono_or_stereo, Format format);
void UpdateFormat(int chanid, int mono_or_stereo, Format format, int rest);
void UpdateAdpcm(int chanid, s16 coeffs[16]);
void Play(int chanid, bool play);
void EnqueueBuffer(int chanid, u16 buffer_id,
void* data, int sample_count,

View File

@ -111,7 +111,12 @@ struct ChannelContext {
u32 dirty;
// Effects
INSERT_PADDING_DSPWORDS(35);
float mix[12];
float rate;
u8 rim[2];
u16 iirFilterType;
u16 iirFilter_mono[2];
u16 iirFilter_biquad[5];
// Buffer Queue
u16 buffers_dirty; //< Which of those queued buffers is dirty (bit i == buffers[i])
@ -126,14 +131,18 @@ struct ChannelContext {
dsp_u32 physical_address;
dsp_u32 sample_count;
union {
u16 flags1_raw;
BitField<0, 2, u16> mono_or_stereo;
BitField<2, 2, Audio::Format> format;
BitField<4, 12, u16> rest;
};
u16 adpcm_ps;
s16 adpcm_yn[2];
union {
u16 flags2_raw;
BitField<0, 1, u16> has_adpcm;
BitField<1, 1, u16> is_looping;
BitField<2, 14, u16> rest2;
};
u16 buffer_id;
};
@ -150,7 +159,7 @@ struct ChannelStatus {
ASSERT_STRUCT(ChannelStatus, 12);
struct AdpcmCoefficients {
u16 coeff[16];
s16 coeff[16];
};
ASSERT_STRUCT(AdpcmCoefficients, 32);
@ -184,6 +193,8 @@ static void AudioTick(u64, int cycles_late) {
}
auto channel_contexes = (ChannelContext*) Memory::GetPointer(DspAddrToVAddr(current_base, DSPADDR1));
auto channel_contex0 = (ChannelContext*)Memory::GetPointer(DspAddrToVAddr(BASE_ADDR_0, DSPADDR1));
auto channel_contex1 = (ChannelContext*)Memory::GetPointer(DspAddrToVAddr(BASE_ADDR_1, DSPADDR1));
auto channel_status0 = (ChannelStatus*)Memory::GetPointer(DspAddrToVAddr(BASE_ADDR_0, DSPADDR2));
auto channel_status1 = (ChannelStatus*)Memory::GetPointer(DspAddrToVAddr(BASE_ADDR_1, DSPADDR2));
auto channel_adpcm_coeffs = (AdpcmCoefficients*) Memory::GetPointer(DspAddrToVAddr(current_base, DSPADDR3));
@ -199,15 +210,9 @@ static void AudioTick(u64, int cycles_late) {
LOG_WARNING(Service_DSP, "Unimplemented dirty bit 29");
}
if (TestAndUnsetBit(ctx.dirty, 16)) {
// Is Active?
//LOG_WARNING(Service_DSP, "Unimplemented dirty bit 16");
}
if (TestAndUnsetBit(ctx.dirty, 2)) {
// Update ADPCM coefficients
LOG_WARNING(Service_DSP, "Unimplemented dirty bit 2");
AdpcmCoefficients& coeff = channel_adpcm_coeffs[chanid];
Audio::UpdateAdpcm(chanid, channel_adpcm_coeffs[chanid].coeff);
}
if (TestAndUnsetBit(ctx.dirty, 17)) {
@ -218,16 +223,18 @@ static void AudioTick(u64, int cycles_late) {
if (TestAndUnsetBit(ctx.dirty, 18)) {
// Rate
LOG_WARNING(Service_DSP, "Unimplemented dirty bit 18");
LOG_INFO(Service_DSP, "Rate %f", ctx.rate);
}
if (TestAndUnsetBit(ctx.dirty, 22)) {
// IIR
LOG_WARNING(Service_DSP, "Unimplemented dirty bit 22");
LOG_INFO(Service_DSP, "IIR %x", ctx.iirFilterType);
}
if (TestAndUnsetBit(ctx.dirty, 28)) {
// Sync count
LOG_WARNING(Service_DSP, "(STUB) Update Sync Count");
LOG_DEBUG(Service_DSP, "Update Sync Count");
status0.sync = ctx.sync;
status1.sync = ctx.sync;
@ -236,12 +243,20 @@ static void AudioTick(u64, int cycles_late) {
if (TestAndUnsetBit(ctx.dirty, 25) | TestAndUnsetBit(ctx.dirty, 26) | TestAndUnsetBit(ctx.dirty, 27)) {
// Mix
LOG_WARNING(Service_DSP, "Unimplemented dirty bit 25/26/27");
for (int i = 0; i < 12; i++)
LOG_INFO(Service_DSP, "mix[%i] %f", i, ctx.mix[i]);
}
if (TestAndUnsetBit(ctx.dirty, 4) | TestAndUnsetBit(ctx.dirty, 21) | TestAndUnsetBit(ctx.dirty, 30)) {
// TODO(merry): One of these bits might merely signify an update to the format. Verify this.
// Embedded Buffer Changed
Audio::UpdateFormat(chanid, ctx.mono_or_stereo, ctx.format);
Audio::UpdateFormat(chanid, ctx.mono_or_stereo, ctx.format, ctx.rest);
channel_contex0[chanid].flags1_raw = channel_contex1[chanid].flags1_raw = ctx.flags1_raw;
channel_contex0[chanid].flags2_raw = channel_contex1[chanid].flags2_raw = ctx.flags2_raw;
if (ctx.rest || ctx.rest2) {
LOG_ERROR(Service_DSP, "chan %i rest %04x rest2 %04x", chanid, ctx.rest, ctx.rest2);
}
Audio::UpdateAdpcm(chanid, channel_adpcm_coeffs[chanid].coeff);
Audio::EnqueueBuffer(chanid, ctx.buffer_id,
Memory::GetPhysicalPointer(ctx.physical_address), ctx.sample_count,
ctx.has_adpcm, ctx.adpcm_ps, ctx.adpcm_yn,
@ -271,8 +286,17 @@ static void AudioTick(u64, int cycles_late) {
status0.is_playing |= 0x100; // TODO: This is supposed to flicker on then turn off.
}
if (TestAndUnsetBit(ctx.dirty, 16)) {
// Is Active?
Audio::Play(chanid, (ctx.is_active & 0xFF) != 0);
}
if (ctx.dirty) {
LOG_ERROR(Service_DSP, "Unknown channel dirty bits: 0x%08x", ctx.dirty);
LOG_ERROR(Service_DSP, "%i Rim %i %i", chanid, ctx.rim[0], ctx.rim[1]);
LOG_ERROR(Service_DSP, "%i IIR-type %i", chanid, ctx.iirFilterType);
LOG_ERROR(Service_DSP, "%i Mono %f %f", chanid, ctx.iirFilter_mono[0], ctx.iirFilter_mono[1]);
LOG_ERROR(Service_DSP, "%i Biquad %f %f %f %f %f", chanid, ctx.iirFilter_biquad[0], ctx.iirFilter_biquad[1], ctx.iirFilter_biquad[2], ctx.iirFilter_biquad[3], ctx.iirFilter_biquad[4]);
}
ctx.dirty = 0;