Audio Renderer improvements (#210)
* Partial voice implementation on audio renderer * Implemented audren resampler (based on original impl) * Fix BiquadFilter struct * Pause audio playback on last stream buffer * Split audren/audout files into separate folders, some minor cleanup * Use AudioRendererParameter on GetWorkBufferSize aswell * Bump audren version to REV4, name a few things, increase sample buffer size * Remove useless new lines
This commit is contained in:
91
Ryujinx.Audio/Adpcm/AdpcmDecoder.cs
Normal file
91
Ryujinx.Audio/Adpcm/AdpcmDecoder.cs
Normal file
@ -0,0 +1,91 @@
|
||||
namespace Ryujinx.Audio.Adpcm
|
||||
{
|
||||
public static class AdpcmDecoder
|
||||
{
|
||||
private const int SamplesPerFrame = 14;
|
||||
private const int BytesPerFrame = 8;
|
||||
|
||||
public static int[] Decode(byte[] Buffer, AdpcmDecoderContext Context)
|
||||
{
|
||||
int Samples = GetSamplesCountFromSize(Buffer.Length);
|
||||
|
||||
int[] Pcm = new int[Samples * 2];
|
||||
|
||||
short History0 = Context.History0;
|
||||
short History1 = Context.History1;
|
||||
|
||||
int InputOffset = 0;
|
||||
int OutputOffset = 0;
|
||||
|
||||
while (InputOffset < Buffer.Length)
|
||||
{
|
||||
byte Header = Buffer[InputOffset++];
|
||||
|
||||
int Scale = 0x800 << (Header & 0xf);
|
||||
|
||||
int CoeffIndex = (Header >> 4) & 7;
|
||||
|
||||
short Coeff0 = Context.Coefficients[CoeffIndex * 2 + 0];
|
||||
short Coeff1 = Context.Coefficients[CoeffIndex * 2 + 1];
|
||||
|
||||
int FrameSamples = SamplesPerFrame;
|
||||
|
||||
if (FrameSamples > Samples)
|
||||
{
|
||||
FrameSamples = Samples;
|
||||
}
|
||||
|
||||
int Value = 0;
|
||||
|
||||
for (int SampleIndex = 0; SampleIndex < FrameSamples; SampleIndex++)
|
||||
{
|
||||
int Sample;
|
||||
|
||||
if ((SampleIndex & 1) == 0)
|
||||
{
|
||||
Value = Buffer[InputOffset++];
|
||||
|
||||
Sample = (Value << 24) >> 28;
|
||||
}
|
||||
else
|
||||
{
|
||||
Sample = (Value << 28) >> 28;
|
||||
}
|
||||
|
||||
int Prediction = Coeff0 * History0 + Coeff1 * History1;
|
||||
|
||||
Sample = (Sample * Scale + Prediction + 0x400) >> 11;
|
||||
|
||||
short SaturatedSample = DspUtils.Saturate(Sample);
|
||||
|
||||
History1 = History0;
|
||||
History0 = SaturatedSample;
|
||||
|
||||
Pcm[OutputOffset++] = SaturatedSample;
|
||||
Pcm[OutputOffset++] = SaturatedSample;
|
||||
}
|
||||
|
||||
Samples -= FrameSamples;
|
||||
}
|
||||
|
||||
Context.History0 = History0;
|
||||
Context.History1 = History1;
|
||||
|
||||
return Pcm;
|
||||
}
|
||||
|
||||
public static long GetSizeFromSamplesCount(int SamplesCount)
|
||||
{
|
||||
int Frames = SamplesCount / SamplesPerFrame;
|
||||
|
||||
return Frames * BytesPerFrame;
|
||||
}
|
||||
|
||||
public static int GetSamplesCountFromSize(long Size)
|
||||
{
|
||||
int Frames = (int)(Size / BytesPerFrame);
|
||||
|
||||
return Frames * SamplesPerFrame;
|
||||
}
|
||||
}
|
||||
}
|
10
Ryujinx.Audio/Adpcm/AdpcmDecoderContext.cs
Normal file
10
Ryujinx.Audio/Adpcm/AdpcmDecoderContext.cs
Normal file
@ -0,0 +1,10 @@
|
||||
namespace Ryujinx.Audio.Adpcm
|
||||
{
|
||||
public class AdpcmDecoderContext
|
||||
{
|
||||
public short[] Coefficients;
|
||||
|
||||
public short History0;
|
||||
public short History1;
|
||||
}
|
||||
}
|
@ -1,13 +0,0 @@
|
||||
namespace Ryujinx.Audio
|
||||
{
|
||||
public enum AudioFormat
|
||||
{
|
||||
Invalid = 0,
|
||||
PcmInt8 = 1,
|
||||
PcmInt16 = 2,
|
||||
PcmImt24 = 3,
|
||||
PcmImt32 = 4,
|
||||
PcmFloat = 5,
|
||||
Adpcm = 6
|
||||
}
|
||||
}
|
16
Ryujinx.Audio/DspUtils.cs
Normal file
16
Ryujinx.Audio/DspUtils.cs
Normal file
@ -0,0 +1,16 @@
|
||||
namespace Ryujinx.Audio.Adpcm
|
||||
{
|
||||
public static class DspUtils
|
||||
{
|
||||
public static short Saturate(int Value)
|
||||
{
|
||||
if (Value > short.MaxValue)
|
||||
Value = short.MaxValue;
|
||||
|
||||
if (Value < short.MinValue)
|
||||
Value = short.MinValue;
|
||||
|
||||
return (short)Value;
|
||||
}
|
||||
}
|
||||
}
|
@ -2,11 +2,7 @@ namespace Ryujinx.Audio
|
||||
{
|
||||
public interface IAalOutput
|
||||
{
|
||||
int OpenTrack(
|
||||
int SampleRate,
|
||||
int Channels,
|
||||
ReleaseCallback Callback,
|
||||
out AudioFormat Format);
|
||||
int OpenTrack(int SampleRate, int Channels, ReleaseCallback Callback);
|
||||
|
||||
void CloseTrack(int Track);
|
||||
|
||||
@ -14,7 +10,7 @@ namespace Ryujinx.Audio
|
||||
|
||||
long[] GetReleasedBuffers(int Track, int MaxCount);
|
||||
|
||||
void AppendBuffer(int Track, long Tag, byte[] Buffer);
|
||||
void AppendBuffer<T>(int Track, long Tag, T[] Buffer) where T : struct;
|
||||
|
||||
void Start(int Track);
|
||||
void Stop(int Track);
|
||||
|
@ -3,6 +3,7 @@ using OpenTK.Audio.OpenAL;
|
||||
using System;
|
||||
using System.Collections.Concurrent;
|
||||
using System.Collections.Generic;
|
||||
using System.Runtime.InteropServices;
|
||||
using System.Threading;
|
||||
|
||||
namespace Ryujinx.Audio.OpenAL
|
||||
@ -226,15 +227,9 @@ namespace Ryujinx.Audio.OpenAL
|
||||
while (KeepPolling);
|
||||
}
|
||||
|
||||
public int OpenTrack(
|
||||
int SampleRate,
|
||||
int Channels,
|
||||
ReleaseCallback Callback,
|
||||
out AudioFormat Format)
|
||||
public int OpenTrack(int SampleRate, int Channels, ReleaseCallback Callback)
|
||||
{
|
||||
Format = AudioFormat.PcmInt16;
|
||||
|
||||
Track Td = new Track(SampleRate, GetALFormat(Channels, Format), Callback);
|
||||
Track Td = new Track(SampleRate, GetALFormat(Channels), Callback);
|
||||
|
||||
for (int Id = 0; Id < MaxTracks; Id++)
|
||||
{
|
||||
@ -247,38 +242,16 @@ namespace Ryujinx.Audio.OpenAL
|
||||
return -1;
|
||||
}
|
||||
|
||||
private ALFormat GetALFormat(int Channels, AudioFormat Format)
|
||||
private ALFormat GetALFormat(int Channels)
|
||||
{
|
||||
if (Channels == 1)
|
||||
switch (Channels)
|
||||
{
|
||||
switch (Format)
|
||||
{
|
||||
case AudioFormat.PcmInt8: return ALFormat.Mono8;
|
||||
case AudioFormat.PcmInt16: return ALFormat.Mono16;
|
||||
}
|
||||
}
|
||||
else if (Channels == 2)
|
||||
{
|
||||
switch (Format)
|
||||
{
|
||||
case AudioFormat.PcmInt8: return ALFormat.Stereo8;
|
||||
case AudioFormat.PcmInt16: return ALFormat.Stereo16;
|
||||
}
|
||||
}
|
||||
else if (Channels == 6)
|
||||
{
|
||||
switch (Format)
|
||||
{
|
||||
case AudioFormat.PcmInt8: return ALFormat.Multi51Chn8Ext;
|
||||
case AudioFormat.PcmInt16: return ALFormat.Multi51Chn16Ext;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
throw new ArgumentOutOfRangeException(nameof(Channels));
|
||||
case 1: return ALFormat.Mono16;
|
||||
case 2: return ALFormat.Stereo16;
|
||||
case 6: return ALFormat.Multi51Chn16Ext;
|
||||
}
|
||||
|
||||
throw new ArgumentException(nameof(Format));
|
||||
throw new ArgumentOutOfRangeException(nameof(Channels));
|
||||
}
|
||||
|
||||
public void CloseTrack(int Track)
|
||||
@ -309,13 +282,15 @@ namespace Ryujinx.Audio.OpenAL
|
||||
return null;
|
||||
}
|
||||
|
||||
public void AppendBuffer(int Track, long Tag, byte[] Buffer)
|
||||
public void AppendBuffer<T>(int Track, long Tag, T[] Buffer) where T : struct
|
||||
{
|
||||
if (Tracks.TryGetValue(Track, out Track Td))
|
||||
{
|
||||
int BufferId = Td.AppendBuffer(Tag);
|
||||
|
||||
AL.BufferData(BufferId, Td.Format, Buffer, Buffer.Length, Td.SampleRate);
|
||||
int Size = Buffer.Length * Marshal.SizeOf<T>();
|
||||
|
||||
AL.BufferData<T>(BufferId, Td.Format, Buffer, Size, Td.SampleRate);
|
||||
|
||||
AL.SourceQueueBuffer(Td.SourceId, BufferId);
|
||||
|
||||
@ -366,7 +341,5 @@ namespace Ryujinx.Audio.OpenAL
|
||||
|
||||
return PlaybackState.Stopped;
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
}
|
Reference in New Issue
Block a user