mirror of https://github.com/bjornbytes/lovr.git
Put all the audio stuff in one file;
If there was ever some sort of hypothetical switch to a different audio library, this would make such a switch easier.
This commit is contained in:
parent
b0289de9a6
commit
b9a2bddd25
|
@ -361,8 +361,6 @@ if(LOVR_ENABLE_AUDIO)
|
|||
add_definitions(-DLOVR_ENABLE_AUDIO)
|
||||
target_sources(lovr PRIVATE
|
||||
src/modules/audio/audio.c
|
||||
src/modules/audio/source.c
|
||||
src/modules/audio/microphone.c
|
||||
src/api/l_audio.c
|
||||
src/api/l_audio_source.c
|
||||
src/api/l_audio_microphone.c
|
||||
|
|
|
@ -1,7 +1,5 @@
|
|||
#include "api.h"
|
||||
#include "audio/audio.h"
|
||||
#include "audio/microphone.h"
|
||||
#include "audio/source.h"
|
||||
#include "data/blob.h"
|
||||
#include "data/audioStream.h"
|
||||
#include "data/soundData.h"
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
#include "api.h"
|
||||
#include "audio/audio.h"
|
||||
#include "audio/source.h"
|
||||
#include "core/maf.h"
|
||||
#include <stdbool.h>
|
||||
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
#include "audio/audio.h"
|
||||
#include "audio/source.h"
|
||||
#include "data/audioStream.h"
|
||||
#include "data/soundData.h"
|
||||
#include "core/arr.h"
|
||||
#include "core/maf.h"
|
||||
#include "core/ref.h"
|
||||
|
@ -12,6 +12,26 @@
|
|||
#include <AL/alext.h>
|
||||
#endif
|
||||
|
||||
#define SOURCE_BUFFERS 4
|
||||
|
||||
struct Source {
|
||||
SourceType type;
|
||||
struct SoundData* soundData;
|
||||
struct AudioStream* stream;
|
||||
ALuint id;
|
||||
ALuint buffers[SOURCE_BUFFERS];
|
||||
bool isLooping;
|
||||
};
|
||||
|
||||
struct Microphone {
|
||||
ALCdevice* device;
|
||||
const char* name;
|
||||
bool isRecording;
|
||||
uint32_t sampleRate;
|
||||
uint32_t bitDepth;
|
||||
uint32_t channelCount;
|
||||
};
|
||||
|
||||
static struct {
|
||||
bool initialized;
|
||||
bool spatialized;
|
||||
|
@ -23,7 +43,7 @@ static struct {
|
|||
arr_t(Source*) sources;
|
||||
} state;
|
||||
|
||||
ALenum lovrAudioConvertFormat(uint32_t bitDepth, uint32_t channelCount) {
|
||||
static ALenum lovrAudioConvertFormat(uint32_t bitDepth, uint32_t channelCount) {
|
||||
if (bitDepth == 8 && channelCount == 1) {
|
||||
return AL_FORMAT_MONO8;
|
||||
} else if (bitDepth == 8 && channelCount == 2) {
|
||||
|
@ -33,7 +53,6 @@ ALenum lovrAudioConvertFormat(uint32_t bitDepth, uint32_t channelCount) {
|
|||
} else if (bitDepth == 16 && channelCount == 2) {
|
||||
return AL_FORMAT_STEREO16;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -205,3 +224,381 @@ void lovrAudioStop() {
|
|||
lovrSourceStop(state.sources.data[i]);
|
||||
}
|
||||
}
|
||||
|
||||
// Source
|
||||
|
||||
Source* lovrSourceCreateStatic(SoundData* soundData) {
|
||||
Source* source = lovrAlloc(Source);
|
||||
ALenum format = lovrAudioConvertFormat(soundData->bitDepth, soundData->channelCount);
|
||||
source->type = SOURCE_STATIC;
|
||||
source->soundData = soundData;
|
||||
alGenSources(1, &source->id);
|
||||
alGenBuffers(1, source->buffers);
|
||||
alBufferData(source->buffers[0], format, soundData->blob->data, (ALsizei) soundData->blob->size, soundData->sampleRate);
|
||||
alSourcei(source->id, AL_BUFFER, source->buffers[0]);
|
||||
lovrRetain(soundData);
|
||||
return source;
|
||||
}
|
||||
|
||||
Source* lovrSourceCreateStream(AudioStream* stream) {
|
||||
Source* source = lovrAlloc(Source);
|
||||
source->type = SOURCE_STREAM;
|
||||
source->stream = stream;
|
||||
alGenSources(1, &source->id);
|
||||
alGenBuffers(SOURCE_BUFFERS, source->buffers);
|
||||
lovrRetain(stream);
|
||||
return source;
|
||||
}
|
||||
|
||||
void lovrSourceDestroy(void* ref) {
|
||||
Source* source = ref;
|
||||
alDeleteSources(1, &source->id);
|
||||
alDeleteBuffers(source->type == SOURCE_STATIC ? 1 : SOURCE_BUFFERS, source->buffers);
|
||||
lovrRelease(SoundData, source->soundData);
|
||||
lovrRelease(AudioStream, source->stream);
|
||||
}
|
||||
|
||||
SourceType lovrSourceGetType(Source* source) {
|
||||
return source->type;
|
||||
}
|
||||
|
||||
uint32_t lovrSourceGetId(Source* source) {
|
||||
return source->id;
|
||||
}
|
||||
|
||||
AudioStream* lovrSourceGetStream(Source* source) {
|
||||
return source->stream;
|
||||
}
|
||||
|
||||
uint32_t lovrSourceGetBitDepth(Source* source) {
|
||||
return source->type == SOURCE_STATIC ? source->soundData->bitDepth : source->stream->bitDepth;
|
||||
}
|
||||
|
||||
void lovrSourceGetCone(Source* source, float* innerAngle, float* outerAngle, float* outerGain) {
|
||||
alGetSourcef(source->id, AL_CONE_INNER_ANGLE, innerAngle);
|
||||
alGetSourcef(source->id, AL_CONE_OUTER_ANGLE, outerAngle);
|
||||
alGetSourcef(source->id, AL_CONE_OUTER_GAIN, outerGain);
|
||||
*innerAngle *= (float) M_PI / 180.f;
|
||||
*outerAngle *= (float) M_PI / 180.f;
|
||||
}
|
||||
|
||||
uint32_t lovrSourceGetChannelCount(Source* source) {
|
||||
return source->type == SOURCE_STATIC ? source->soundData->channelCount : source->stream->channelCount;
|
||||
}
|
||||
|
||||
void lovrSourceGetOrientation(Source* source, quat orientation) {
|
||||
float v[4], forward[4] = { 0.f, 0.f, -1.f };
|
||||
alGetSourcefv(source->id, AL_DIRECTION, v);
|
||||
quat_between(orientation, forward, v);
|
||||
}
|
||||
|
||||
size_t lovrSourceGetDuration(Source* source) {
|
||||
return source->type == SOURCE_STATIC ? source->soundData->samples : source->stream->samples;
|
||||
}
|
||||
|
||||
void lovrSourceGetFalloff(Source* source, float* reference, float* max, float* rolloff) {
|
||||
alGetSourcef(source->id, AL_REFERENCE_DISTANCE, reference);
|
||||
alGetSourcef(source->id, AL_MAX_DISTANCE, max);
|
||||
alGetSourcef(source->id, AL_ROLLOFF_FACTOR, rolloff);
|
||||
}
|
||||
|
||||
float lovrSourceGetPitch(Source* source) {
|
||||
float pitch;
|
||||
alGetSourcef(source->id, AL_PITCH, &pitch);
|
||||
return pitch;
|
||||
}
|
||||
|
||||
void lovrSourceGetPosition(Source* source, vec3 position) {
|
||||
alGetSourcefv(source->id, AL_POSITION, position);
|
||||
}
|
||||
|
||||
uint32_t lovrSourceGetSampleRate(Source* source) {
|
||||
return source->type == SOURCE_STATIC ? source->soundData->sampleRate : source->stream->sampleRate;
|
||||
}
|
||||
|
||||
void lovrSourceGetVelocity(Source* source, vec3 velocity) {
|
||||
alGetSourcefv(source->id, AL_VELOCITY, velocity);
|
||||
}
|
||||
|
||||
float lovrSourceGetVolume(Source* source) {
|
||||
float volume;
|
||||
alGetSourcef(source->id, AL_GAIN, &volume);
|
||||
return volume;
|
||||
}
|
||||
|
||||
void lovrSourceGetVolumeLimits(Source* source, float* min, float* max) {
|
||||
alGetSourcef(source->id, AL_MIN_GAIN, min);
|
||||
alGetSourcef(source->id, AL_MAX_GAIN, max);
|
||||
}
|
||||
|
||||
bool lovrSourceIsLooping(Source* source) {
|
||||
return source->isLooping;
|
||||
}
|
||||
|
||||
bool lovrSourceIsPlaying(Source* source) {
|
||||
ALenum state;
|
||||
alGetSourcei(source->id, AL_SOURCE_STATE, &state);
|
||||
return state == AL_PLAYING;
|
||||
}
|
||||
|
||||
bool lovrSourceIsRelative(Source* source) {
|
||||
int isRelative;
|
||||
alGetSourcei(source->id, AL_SOURCE_RELATIVE, &isRelative);
|
||||
return isRelative == AL_TRUE;
|
||||
}
|
||||
|
||||
void lovrSourcePause(Source* source) {
|
||||
alSourcePause(source->id);
|
||||
}
|
||||
|
||||
void lovrSourcePlay(Source* source) {
|
||||
ALenum state;
|
||||
alGetSourcei(source->id, AL_SOURCE_STATE, &state);
|
||||
|
||||
if (source->type == SOURCE_STATIC) {
|
||||
if (state != AL_PLAYING) {
|
||||
alSourcePlay(source->id);
|
||||
}
|
||||
} else {
|
||||
switch (state) {
|
||||
case AL_INITIAL:
|
||||
case AL_STOPPED:
|
||||
alSourcei(source->id, AL_BUFFER, AL_NONE);
|
||||
lovrSourceStream(source, source->buffers, SOURCE_BUFFERS);
|
||||
alSourcePlay(source->id);
|
||||
break;
|
||||
case AL_PAUSED:
|
||||
alSourcePlay(source->id);
|
||||
break;
|
||||
case AL_PLAYING:
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void lovrSourceSeek(Source* source, size_t sample) {
|
||||
if (source->type == SOURCE_STATIC) {
|
||||
alSourcef(source->id, AL_SAMPLE_OFFSET, sample);
|
||||
} else {
|
||||
ALenum state;
|
||||
alGetSourcei(source->id, AL_SOURCE_STATE, &state);
|
||||
bool wasPaused = state == AL_PAUSED;
|
||||
alSourceStop(source->id);
|
||||
lovrAudioStreamSeek(source->stream, sample);
|
||||
lovrSourcePlay(source);
|
||||
if (wasPaused) {
|
||||
lovrSourcePause(source);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void lovrSourceSetCone(Source* source, float innerAngle, float outerAngle, float outerGain) {
|
||||
alSourcef(source->id, AL_CONE_INNER_ANGLE, innerAngle * 180.f / (float) M_PI);
|
||||
alSourcef(source->id, AL_CONE_OUTER_ANGLE, outerAngle * 180.f / (float) M_PI);
|
||||
alSourcef(source->id, AL_CONE_OUTER_GAIN, outerGain);
|
||||
}
|
||||
|
||||
void lovrSourceSetOrientation(Source* source, quat orientation) {
|
||||
float v[4] = { 0.f, 0.f, -1.f };
|
||||
quat_rotate(orientation, v);
|
||||
alSource3f(source->id, AL_DIRECTION, v[0], v[1], v[2]);
|
||||
}
|
||||
|
||||
void lovrSourceSetFalloff(Source* source, float reference, float max, float rolloff) {
|
||||
lovrAssert(lovrSourceGetChannelCount(source) == 1, "Positional audio is only supported for mono sources");
|
||||
alSourcef(source->id, AL_REFERENCE_DISTANCE, reference);
|
||||
alSourcef(source->id, AL_MAX_DISTANCE, max);
|
||||
alSourcef(source->id, AL_ROLLOFF_FACTOR, rolloff);
|
||||
}
|
||||
|
||||
void lovrSourceSetLooping(Source* source, bool isLooping) {
|
||||
lovrAssert(!source->stream || !lovrAudioStreamIsRaw(source->stream), "Can't loop a raw stream");
|
||||
source->isLooping = isLooping;
|
||||
if (source->type == SOURCE_STATIC) {
|
||||
alSourcei(source->id, AL_LOOPING, isLooping ? AL_TRUE : AL_FALSE);
|
||||
}
|
||||
}
|
||||
|
||||
void lovrSourceSetPitch(Source* source, float pitch) {
|
||||
alSourcef(source->id, AL_PITCH, pitch);
|
||||
}
|
||||
|
||||
void lovrSourceSetPosition(Source* source, vec3 position) {
|
||||
lovrAssert(lovrSourceGetChannelCount(source) == 1, "Positional audio is only supported for mono sources");
|
||||
alSource3f(source->id, AL_POSITION, position[0], position[1], position[2]);
|
||||
}
|
||||
|
||||
void lovrSourceSetRelative(Source* source, bool isRelative) {
|
||||
alSourcei(source->id, AL_SOURCE_RELATIVE, isRelative ? AL_TRUE : AL_FALSE);
|
||||
}
|
||||
|
||||
void lovrSourceSetVelocity(Source* source, vec3 velocity) {
|
||||
alSource3f(source->id, AL_VELOCITY, velocity[0], velocity[1], velocity[2]);
|
||||
}
|
||||
|
||||
void lovrSourceSetVolume(Source* source, float volume) {
|
||||
alSourcef(source->id, AL_GAIN, volume);
|
||||
}
|
||||
|
||||
void lovrSourceSetVolumeLimits(Source* source, float min, float max) {
|
||||
alSourcef(source->id, AL_MIN_GAIN, min);
|
||||
alSourcef(source->id, AL_MAX_GAIN, max);
|
||||
}
|
||||
|
||||
void lovrSourceStop(Source* source) {
|
||||
if (source->type == SOURCE_STATIC) {
|
||||
alSourceStop(source->id);
|
||||
} else {
|
||||
alSourceStop(source->id);
|
||||
alSourcei(source->id, AL_BUFFER, AL_NONE);
|
||||
lovrAudioStreamRewind(source->stream);
|
||||
}
|
||||
}
|
||||
|
||||
// Fills buffers with data and queues them, called once initially and over time to stream more data
|
||||
void lovrSourceStream(Source* source, ALuint* buffers, size_t count) {
|
||||
if (source->type == SOURCE_STATIC) {
|
||||
return;
|
||||
}
|
||||
|
||||
AudioStream* stream = source->stream;
|
||||
ALenum format = lovrAudioConvertFormat(stream->bitDepth, stream->channelCount);
|
||||
uint32_t frequency = stream->sampleRate;
|
||||
size_t samples = 0;
|
||||
size_t n = 0;
|
||||
|
||||
// Keep decoding until there is nothing left to decode or all the buffers are filled
|
||||
while (n < count && (samples = lovrAudioStreamDecode(stream, NULL, 0)) != 0) {
|
||||
alBufferData(buffers[n++], format, stream->buffer, (ALsizei) (samples * sizeof(ALshort)), frequency);
|
||||
}
|
||||
|
||||
alSourceQueueBuffers(source->id, (ALsizei) n, buffers);
|
||||
|
||||
if (samples == 0 && source->isLooping && n < count) {
|
||||
lovrAudioStreamRewind(stream);
|
||||
lovrSourceStream(source, buffers + n, count - n);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
size_t lovrSourceTell(Source* source) {
|
||||
switch (source->type) {
|
||||
case SOURCE_STATIC: {
|
||||
float offset;
|
||||
alGetSourcef(source->id, AL_SAMPLE_OFFSET, &offset);
|
||||
return offset;
|
||||
}
|
||||
|
||||
case SOURCE_STREAM: {
|
||||
size_t decoderOffset = lovrAudioStreamTell(source->stream);
|
||||
size_t samplesPerBuffer = source->stream->bufferSize / source->stream->channelCount / sizeof(ALshort);
|
||||
ALsizei queuedBuffers, sampleOffset;
|
||||
alGetSourcei(source->id, AL_BUFFERS_QUEUED, &queuedBuffers);
|
||||
alGetSourcei(source->id, AL_SAMPLE_OFFSET, &sampleOffset);
|
||||
|
||||
size_t offset = decoderOffset + sampleOffset;
|
||||
|
||||
if (queuedBuffers * samplesPerBuffer > offset) {
|
||||
return offset + source->stream->samples;
|
||||
} else {
|
||||
return offset;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
default: lovrThrow("Unreachable"); break;
|
||||
}
|
||||
}
|
||||
|
||||
// Microphone
|
||||
|
||||
Microphone* lovrMicrophoneCreate(const char* name, size_t samples, uint32_t sampleRate, uint32_t bitDepth, uint32_t channelCount) {
|
||||
Microphone* microphone = lovrAlloc(Microphone);
|
||||
ALCdevice* device = alcCaptureOpenDevice(name, sampleRate, lovrAudioConvertFormat(bitDepth, channelCount), (ALCsizei) samples);
|
||||
lovrAssert(device, "Error opening capture device for microphone '%s'", name);
|
||||
microphone->device = device;
|
||||
microphone->name = name ? name : alcGetString(device, ALC_CAPTURE_DEVICE_SPECIFIER);
|
||||
microphone->sampleRate = sampleRate;
|
||||
microphone->bitDepth = bitDepth;
|
||||
microphone->channelCount = channelCount;
|
||||
return microphone;
|
||||
}
|
||||
|
||||
void lovrMicrophoneDestroy(void* ref) {
|
||||
Microphone* microphone = ref;
|
||||
lovrMicrophoneStopRecording(microphone);
|
||||
alcCaptureCloseDevice(microphone->device);
|
||||
}
|
||||
|
||||
uint32_t lovrMicrophoneGetBitDepth(Microphone* microphone) {
|
||||
return microphone->bitDepth;
|
||||
}
|
||||
|
||||
uint32_t lovrMicrophoneGetChannelCount(Microphone* microphone) {
|
||||
return microphone->channelCount;
|
||||
}
|
||||
|
||||
SoundData* lovrMicrophoneGetData(Microphone* microphone, size_t samples, SoundData* soundData, size_t offset) {
|
||||
size_t availableSamples = lovrMicrophoneGetSampleCount(microphone);
|
||||
|
||||
if (!microphone->isRecording || availableSamples == 0) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (soundData == NULL) {
|
||||
soundData = lovrSoundDataCreate(samples, microphone->sampleRate, microphone->bitDepth, microphone->channelCount);
|
||||
} else {
|
||||
lovrAssert(soundData->channelCount == microphone->channelCount, "Microphone and SoundData channel counts must match");
|
||||
lovrAssert(soundData->sampleRate == microphone->sampleRate, "Microphone and SoundData sample rates must match");
|
||||
lovrAssert(soundData->bitDepth == microphone->bitDepth, "Microphone and SoundData bit depths must match");
|
||||
lovrAssert(offset + samples <= soundData->samples, "Tried to write samples past the end of a SoundData buffer");
|
||||
}
|
||||
|
||||
if (samples == 0 || samples > availableSamples) {
|
||||
samples = availableSamples;
|
||||
}
|
||||
|
||||
uint8_t* data = (uint8_t*) soundData->blob->data + offset * (microphone->bitDepth / 8) * microphone->channelCount;
|
||||
alcCaptureSamples(microphone->device, data, (ALCsizei) samples);
|
||||
return soundData;
|
||||
}
|
||||
|
||||
const char* lovrMicrophoneGetName(Microphone* microphone) {
|
||||
return microphone->name;
|
||||
}
|
||||
|
||||
size_t lovrMicrophoneGetSampleCount(Microphone* microphone) {
|
||||
if (!microphone->isRecording) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
ALCint samples;
|
||||
alcGetIntegerv(microphone->device, ALC_CAPTURE_SAMPLES, sizeof(ALCint), &samples);
|
||||
return (size_t) samples;
|
||||
}
|
||||
|
||||
uint32_t lovrMicrophoneGetSampleRate(Microphone* microphone) {
|
||||
return microphone->sampleRate;
|
||||
}
|
||||
|
||||
bool lovrMicrophoneIsRecording(Microphone* microphone) {
|
||||
return microphone->isRecording;
|
||||
}
|
||||
|
||||
void lovrMicrophoneStartRecording(Microphone* microphone) {
|
||||
if (microphone->isRecording) {
|
||||
return;
|
||||
}
|
||||
|
||||
alcCaptureStart(microphone->device);
|
||||
microphone->isRecording = true;
|
||||
}
|
||||
|
||||
void lovrMicrophoneStopRecording(Microphone* microphone) {
|
||||
if (!microphone->isRecording) {
|
||||
return;
|
||||
}
|
||||
|
||||
alcCaptureStop(microphone->device);
|
||||
microphone->isRecording = false;
|
||||
}
|
||||
|
|
|
@ -1,13 +1,26 @@
|
|||
#include <stdbool.h>
|
||||
#include <stdint.h>
|
||||
#include <stddef.h>
|
||||
|
||||
#pragma once
|
||||
|
||||
#define MAX_MICROPHONES 8
|
||||
|
||||
struct Source;
|
||||
struct AudioStream;
|
||||
struct SoundData;
|
||||
|
||||
int lovrAudioConvertFormat(uint32_t bitDepth, uint32_t channelCount);
|
||||
typedef struct Source Source;
|
||||
typedef struct Microphone Microphone;
|
||||
|
||||
typedef enum {
|
||||
SOURCE_STATIC,
|
||||
SOURCE_STREAM
|
||||
} SourceType;
|
||||
|
||||
typedef enum {
|
||||
UNIT_SECONDS,
|
||||
UNIT_SAMPLES
|
||||
} TimeUnit;
|
||||
|
||||
bool lovrAudioInit(void);
|
||||
void lovrAudioDestroy(void);
|
||||
|
@ -28,3 +41,57 @@ void lovrAudioSetPosition(float* position);
|
|||
void lovrAudioSetVelocity(float* velocity);
|
||||
void lovrAudioSetVolume(float volume);
|
||||
void lovrAudioStop(void);
|
||||
|
||||
Source* lovrSourceCreateStatic(struct SoundData* soundData);
|
||||
Source* lovrSourceCreateStream(struct AudioStream* stream);
|
||||
void lovrSourceDestroy(void* ref);
|
||||
SourceType lovrSourceGetType(Source* source);
|
||||
uint32_t lovrSourceGetId(Source* source);
|
||||
struct AudioStream* lovrSourceGetStream(Source* source);
|
||||
uint32_t lovrSourceGetBitDepth(Source* source);
|
||||
uint32_t lovrSourceGetChannelCount(Source* source);
|
||||
void lovrSourceGetCone(Source* source, float* innerAngle, float* outerAngle, float* outerGain);
|
||||
void lovrSourceGetOrientation(Source* source, float* orientation);
|
||||
size_t lovrSourceGetDuration(Source* source);
|
||||
void lovrSourceGetFalloff(Source* source, float* reference, float* max, float* rolloff);
|
||||
float lovrSourceGetPitch(Source* source);
|
||||
void lovrSourceGetPosition(Source* source, float* position);
|
||||
void lovrSourceGetVelocity(Source* source, float* velocity);
|
||||
uint32_t lovrSourceGetSampleRate(Source* source);
|
||||
float lovrSourceGetVolume(Source* source);
|
||||
void lovrSourceGetVolumeLimits(Source* source, float* min, float* max);
|
||||
bool lovrSourceIsLooping(Source* source);
|
||||
bool lovrSourceIsPaused(Source* source);
|
||||
bool lovrSourceIsPlaying(Source* source);
|
||||
bool lovrSourceIsRelative(Source* source);
|
||||
bool lovrSourceIsStopped(Source* source);
|
||||
void lovrSourcePause(Source* source);
|
||||
void lovrSourcePlay(Source* source);
|
||||
void lovrSourceResume(Source* source);
|
||||
void lovrSourceRewind(Source* source);
|
||||
void lovrSourceSeek(Source* source, size_t sample);
|
||||
void lovrSourceSetCone(Source* source, float inner, float outer, float outerGain);
|
||||
void lovrSourceSetOrientation(Source* source, float* orientation);
|
||||
void lovrSourceSetFalloff(Source* source, float reference, float max, float rolloff);
|
||||
void lovrSourceSetLooping(Source* source, bool isLooping);
|
||||
void lovrSourceSetPitch(Source* source, float pitch);
|
||||
void lovrSourceSetPosition(Source* source, float* position);
|
||||
void lovrSourceSetRelative(Source* source, bool isRelative);
|
||||
void lovrSourceSetVelocity(Source* source, float* velocity);
|
||||
void lovrSourceSetVolume(Source* source, float volume);
|
||||
void lovrSourceSetVolumeLimits(Source* source, float min, float max);
|
||||
void lovrSourceStop(Source* source);
|
||||
void lovrSourceStream(Source* source, uint32_t* buffers, size_t count);
|
||||
size_t lovrSourceTell(Source* source);
|
||||
|
||||
Microphone* lovrMicrophoneCreate(const char* name, size_t samples, uint32_t sampleRate, uint32_t bitDepth, uint32_t channelCount);
|
||||
void lovrMicrophoneDestroy(void* ref);
|
||||
uint32_t lovrMicrophoneGetBitDepth(Microphone* microphone);
|
||||
uint32_t lovrMicrophoneGetChannelCount(Microphone* microphone);
|
||||
struct SoundData* lovrMicrophoneGetData(Microphone* microphone, size_t samples, struct SoundData* soundData, size_t offset);
|
||||
const char* lovrMicrophoneGetName(Microphone* microphone);
|
||||
size_t lovrMicrophoneGetSampleCount(Microphone* microphone);
|
||||
uint32_t lovrMicrophoneGetSampleRate(Microphone* microphone);
|
||||
bool lovrMicrophoneIsRecording(Microphone* microphone);
|
||||
void lovrMicrophoneStartRecording(Microphone* microphone);
|
||||
void lovrMicrophoneStopRecording(Microphone* microphone);
|
||||
|
|
|
@ -1,107 +0,0 @@
|
|||
#include "audio/microphone.h"
|
||||
#include "audio/audio.h"
|
||||
#include "data/soundData.h"
|
||||
#include "core/ref.h"
|
||||
#include "core/util.h"
|
||||
#include <AL/al.h>
|
||||
#include <AL/alc.h>
|
||||
|
||||
struct Microphone {
|
||||
ALCdevice* device;
|
||||
const char* name;
|
||||
bool isRecording;
|
||||
uint32_t sampleRate;
|
||||
uint32_t bitDepth;
|
||||
uint32_t channelCount;
|
||||
};
|
||||
|
||||
Microphone* lovrMicrophoneCreate(const char* name, size_t samples, uint32_t sampleRate, uint32_t bitDepth, uint32_t channelCount) {
|
||||
Microphone* microphone = lovrAlloc(Microphone);
|
||||
ALCdevice* device = alcCaptureOpenDevice(name, sampleRate, lovrAudioConvertFormat(bitDepth, channelCount), (ALCsizei) samples);
|
||||
lovrAssert(device, "Error opening capture device for microphone '%s'", name);
|
||||
microphone->device = device;
|
||||
microphone->name = name ? name : alcGetString(device, ALC_CAPTURE_DEVICE_SPECIFIER);
|
||||
microphone->sampleRate = sampleRate;
|
||||
microphone->bitDepth = bitDepth;
|
||||
microphone->channelCount = channelCount;
|
||||
return microphone;
|
||||
}
|
||||
|
||||
void lovrMicrophoneDestroy(void* ref) {
|
||||
Microphone* microphone = ref;
|
||||
lovrMicrophoneStopRecording(microphone);
|
||||
alcCaptureCloseDevice(microphone->device);
|
||||
}
|
||||
|
||||
uint32_t lovrMicrophoneGetBitDepth(Microphone* microphone) {
|
||||
return microphone->bitDepth;
|
||||
}
|
||||
|
||||
uint32_t lovrMicrophoneGetChannelCount(Microphone* microphone) {
|
||||
return microphone->channelCount;
|
||||
}
|
||||
|
||||
SoundData* lovrMicrophoneGetData(Microphone* microphone, size_t samples, SoundData* soundData, size_t offset) {
|
||||
size_t availableSamples = lovrMicrophoneGetSampleCount(microphone);
|
||||
|
||||
if (!microphone->isRecording || availableSamples == 0) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (soundData == NULL) {
|
||||
soundData = lovrSoundDataCreate(samples, microphone->sampleRate, microphone->bitDepth, microphone->channelCount);
|
||||
} else {
|
||||
lovrAssert(soundData->channelCount == microphone->channelCount, "Microphone and SoundData channel counts must match");
|
||||
lovrAssert(soundData->sampleRate == microphone->sampleRate, "Microphone and SoundData sample rates must match");
|
||||
lovrAssert(soundData->bitDepth == microphone->bitDepth, "Microphone and SoundData bit depths must match");
|
||||
lovrAssert(offset + samples <= soundData->samples, "Tried to write samples past the end of a SoundData buffer");
|
||||
}
|
||||
|
||||
if (samples == 0 || samples > availableSamples) {
|
||||
samples = availableSamples;
|
||||
}
|
||||
|
||||
uint8_t* data = (uint8_t*) soundData->blob->data + offset * (microphone->bitDepth / 8) * microphone->channelCount;
|
||||
alcCaptureSamples(microphone->device, data, (ALCsizei) samples);
|
||||
return soundData;
|
||||
}
|
||||
|
||||
const char* lovrMicrophoneGetName(Microphone* microphone) {
|
||||
return microphone->name;
|
||||
}
|
||||
|
||||
size_t lovrMicrophoneGetSampleCount(Microphone* microphone) {
|
||||
if (!microphone->isRecording) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
ALCint samples;
|
||||
alcGetIntegerv(microphone->device, ALC_CAPTURE_SAMPLES, sizeof(ALCint), &samples);
|
||||
return (size_t) samples;
|
||||
}
|
||||
|
||||
uint32_t lovrMicrophoneGetSampleRate(Microphone* microphone) {
|
||||
return microphone->sampleRate;
|
||||
}
|
||||
|
||||
bool lovrMicrophoneIsRecording(Microphone* microphone) {
|
||||
return microphone->isRecording;
|
||||
}
|
||||
|
||||
void lovrMicrophoneStartRecording(Microphone* microphone) {
|
||||
if (microphone->isRecording) {
|
||||
return;
|
||||
}
|
||||
|
||||
alcCaptureStart(microphone->device);
|
||||
microphone->isRecording = true;
|
||||
}
|
||||
|
||||
void lovrMicrophoneStopRecording(Microphone* microphone) {
|
||||
if (!microphone->isRecording) {
|
||||
return;
|
||||
}
|
||||
|
||||
alcCaptureStop(microphone->device);
|
||||
microphone->isRecording = false;
|
||||
}
|
|
@ -1,304 +0,0 @@
|
|||
#include "audio/source.h"
|
||||
#include "audio/audio.h"
|
||||
#include "data/audioStream.h"
|
||||
#include "data/soundData.h"
|
||||
#include "core/maf.h"
|
||||
#include "core/ref.h"
|
||||
#include "core/util.h"
|
||||
#include <math.h>
|
||||
#include <stdlib.h>
|
||||
#include <AL/al.h>
|
||||
#include <AL/alc.h>
|
||||
|
||||
#define SOURCE_BUFFERS 4
|
||||
|
||||
struct Source {
|
||||
SourceType type;
|
||||
struct SoundData* soundData;
|
||||
struct AudioStream* stream;
|
||||
ALuint id;
|
||||
ALuint buffers[SOURCE_BUFFERS];
|
||||
bool isLooping;
|
||||
};
|
||||
|
||||
static ALenum getState(Source* source) {
|
||||
ALenum state;
|
||||
alGetSourcei(source->id, AL_SOURCE_STATE, &state);
|
||||
return state;
|
||||
}
|
||||
|
||||
Source* lovrSourceCreateStatic(SoundData* soundData) {
|
||||
Source* source = lovrAlloc(Source);
|
||||
ALenum format = lovrAudioConvertFormat(soundData->bitDepth, soundData->channelCount);
|
||||
source->type = SOURCE_STATIC;
|
||||
source->soundData = soundData;
|
||||
alGenSources(1, &source->id);
|
||||
alGenBuffers(1, source->buffers);
|
||||
alBufferData(source->buffers[0], format, soundData->blob->data, (ALsizei) soundData->blob->size, soundData->sampleRate);
|
||||
alSourcei(source->id, AL_BUFFER, source->buffers[0]);
|
||||
lovrRetain(soundData);
|
||||
return source;
|
||||
}
|
||||
|
||||
Source* lovrSourceCreateStream(AudioStream* stream) {
|
||||
Source* source = lovrAlloc(Source);
|
||||
source->type = SOURCE_STREAM;
|
||||
source->stream = stream;
|
||||
alGenSources(1, &source->id);
|
||||
alGenBuffers(SOURCE_BUFFERS, source->buffers);
|
||||
lovrRetain(stream);
|
||||
return source;
|
||||
}
|
||||
|
||||
void lovrSourceDestroy(void* ref) {
|
||||
Source* source = ref;
|
||||
alDeleteSources(1, &source->id);
|
||||
alDeleteBuffers(source->type == SOURCE_STATIC ? 1 : SOURCE_BUFFERS, source->buffers);
|
||||
lovrRelease(SoundData, source->soundData);
|
||||
lovrRelease(AudioStream, source->stream);
|
||||
}
|
||||
|
||||
SourceType lovrSourceGetType(Source* source) {
|
||||
return source->type;
|
||||
}
|
||||
|
||||
uint32_t lovrSourceGetId(Source* source) {
|
||||
return source->id;
|
||||
}
|
||||
|
||||
AudioStream* lovrSourceGetStream(Source* source) {
|
||||
return source->stream;
|
||||
}
|
||||
|
||||
uint32_t lovrSourceGetBitDepth(Source* source) {
|
||||
return source->type == SOURCE_STATIC ? source->soundData->bitDepth : source->stream->bitDepth;
|
||||
}
|
||||
|
||||
void lovrSourceGetCone(Source* source, float* innerAngle, float* outerAngle, float* outerGain) {
|
||||
alGetSourcef(source->id, AL_CONE_INNER_ANGLE, innerAngle);
|
||||
alGetSourcef(source->id, AL_CONE_OUTER_ANGLE, outerAngle);
|
||||
alGetSourcef(source->id, AL_CONE_OUTER_GAIN, outerGain);
|
||||
*innerAngle *= (float) M_PI / 180.f;
|
||||
*outerAngle *= (float) M_PI / 180.f;
|
||||
}
|
||||
|
||||
uint32_t lovrSourceGetChannelCount(Source* source) {
|
||||
return source->type == SOURCE_STATIC ? source->soundData->channelCount : source->stream->channelCount;
|
||||
}
|
||||
|
||||
void lovrSourceGetOrientation(Source* source, quat orientation) {
|
||||
float v[4], forward[4] = { 0.f, 0.f, -1.f };
|
||||
alGetSourcefv(source->id, AL_DIRECTION, v);
|
||||
quat_between(orientation, forward, v);
|
||||
}
|
||||
|
||||
size_t lovrSourceGetDuration(Source* source) {
|
||||
return source->type == SOURCE_STATIC ? source->soundData->samples : source->stream->samples;
|
||||
}
|
||||
|
||||
void lovrSourceGetFalloff(Source* source, float* reference, float* max, float* rolloff) {
|
||||
alGetSourcef(source->id, AL_REFERENCE_DISTANCE, reference);
|
||||
alGetSourcef(source->id, AL_MAX_DISTANCE, max);
|
||||
alGetSourcef(source->id, AL_ROLLOFF_FACTOR, rolloff);
|
||||
}
|
||||
|
||||
float lovrSourceGetPitch(Source* source) {
|
||||
float pitch;
|
||||
alGetSourcef(source->id, AL_PITCH, &pitch);
|
||||
return pitch;
|
||||
}
|
||||
|
||||
void lovrSourceGetPosition(Source* source, vec3 position) {
|
||||
alGetSourcefv(source->id, AL_POSITION, position);
|
||||
}
|
||||
|
||||
uint32_t lovrSourceGetSampleRate(Source* source) {
|
||||
return source->type == SOURCE_STATIC ? source->soundData->sampleRate : source->stream->sampleRate;
|
||||
}
|
||||
|
||||
void lovrSourceGetVelocity(Source* source, vec3 velocity) {
|
||||
alGetSourcefv(source->id, AL_VELOCITY, velocity);
|
||||
}
|
||||
|
||||
float lovrSourceGetVolume(Source* source) {
|
||||
float volume;
|
||||
alGetSourcef(source->id, AL_GAIN, &volume);
|
||||
return volume;
|
||||
}
|
||||
|
||||
void lovrSourceGetVolumeLimits(Source* source, float* min, float* max) {
|
||||
alGetSourcef(source->id, AL_MIN_GAIN, min);
|
||||
alGetSourcef(source->id, AL_MAX_GAIN, max);
|
||||
}
|
||||
|
||||
bool lovrSourceIsLooping(Source* source) {
|
||||
return source->isLooping;
|
||||
}
|
||||
|
||||
bool lovrSourceIsPlaying(Source* source) {
|
||||
return getState(source) == AL_PLAYING;
|
||||
}
|
||||
|
||||
bool lovrSourceIsRelative(Source* source) {
|
||||
int isRelative;
|
||||
alGetSourcei(source->id, AL_SOURCE_RELATIVE, &isRelative);
|
||||
return isRelative == AL_TRUE;
|
||||
}
|
||||
|
||||
void lovrSourcePause(Source* source) {
|
||||
alSourcePause(source->id);
|
||||
}
|
||||
|
||||
void lovrSourcePlay(Source* source) {
|
||||
if (source->type == SOURCE_STATIC) {
|
||||
if (getState(source) != AL_PLAYING) {
|
||||
alSourcePlay(source->id);
|
||||
}
|
||||
} else {
|
||||
switch (getState(source)) {
|
||||
case AL_INITIAL:
|
||||
case AL_STOPPED:
|
||||
alSourcei(source->id, AL_BUFFER, AL_NONE);
|
||||
lovrSourceStream(source, source->buffers, SOURCE_BUFFERS);
|
||||
alSourcePlay(source->id);
|
||||
break;
|
||||
case AL_PAUSED:
|
||||
alSourcePlay(source->id);
|
||||
break;
|
||||
case AL_PLAYING:
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void lovrSourceSeek(Source* source, size_t sample) {
|
||||
if (source->type == SOURCE_STATIC) {
|
||||
alSourcef(source->id, AL_SAMPLE_OFFSET, sample);
|
||||
} else {
|
||||
bool wasPaused = getState(source) == AL_PAUSED;
|
||||
alSourceStop(source->id);
|
||||
lovrAudioStreamSeek(source->stream, sample);
|
||||
lovrSourcePlay(source);
|
||||
if (wasPaused) {
|
||||
lovrSourcePause(source);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void lovrSourceSetCone(Source* source, float innerAngle, float outerAngle, float outerGain) {
|
||||
alSourcef(source->id, AL_CONE_INNER_ANGLE, innerAngle * 180.f / (float) M_PI);
|
||||
alSourcef(source->id, AL_CONE_OUTER_ANGLE, outerAngle * 180.f / (float) M_PI);
|
||||
alSourcef(source->id, AL_CONE_OUTER_GAIN, outerGain);
|
||||
}
|
||||
|
||||
void lovrSourceSetOrientation(Source* source, quat orientation) {
|
||||
float v[4] = { 0.f, 0.f, -1.f };
|
||||
quat_rotate(orientation, v);
|
||||
alSource3f(source->id, AL_DIRECTION, v[0], v[1], v[2]);
|
||||
}
|
||||
|
||||
void lovrSourceSetFalloff(Source* source, float reference, float max, float rolloff) {
|
||||
lovrAssert(lovrSourceGetChannelCount(source) == 1, "Positional audio is only supported for mono sources");
|
||||
alSourcef(source->id, AL_REFERENCE_DISTANCE, reference);
|
||||
alSourcef(source->id, AL_MAX_DISTANCE, max);
|
||||
alSourcef(source->id, AL_ROLLOFF_FACTOR, rolloff);
|
||||
}
|
||||
|
||||
void lovrSourceSetLooping(Source* source, bool isLooping) {
|
||||
lovrAssert(!source->stream || !lovrAudioStreamIsRaw(source->stream), "Can't loop a raw stream");
|
||||
source->isLooping = isLooping;
|
||||
if (source->type == SOURCE_STATIC) {
|
||||
alSourcei(source->id, AL_LOOPING, isLooping ? AL_TRUE : AL_FALSE);
|
||||
}
|
||||
}
|
||||
|
||||
void lovrSourceSetPitch(Source* source, float pitch) {
|
||||
alSourcef(source->id, AL_PITCH, pitch);
|
||||
}
|
||||
|
||||
void lovrSourceSetPosition(Source* source, vec3 position) {
|
||||
lovrAssert(lovrSourceGetChannelCount(source) == 1, "Positional audio is only supported for mono sources");
|
||||
alSource3f(source->id, AL_POSITION, position[0], position[1], position[2]);
|
||||
}
|
||||
|
||||
void lovrSourceSetRelative(Source* source, bool isRelative) {
|
||||
alSourcei(source->id, AL_SOURCE_RELATIVE, isRelative ? AL_TRUE : AL_FALSE);
|
||||
}
|
||||
|
||||
void lovrSourceSetVelocity(Source* source, vec3 velocity) {
|
||||
alSource3f(source->id, AL_VELOCITY, velocity[0], velocity[1], velocity[2]);
|
||||
}
|
||||
|
||||
void lovrSourceSetVolume(Source* source, float volume) {
|
||||
alSourcef(source->id, AL_GAIN, volume);
|
||||
}
|
||||
|
||||
void lovrSourceSetVolumeLimits(Source* source, float min, float max) {
|
||||
alSourcef(source->id, AL_MIN_GAIN, min);
|
||||
alSourcef(source->id, AL_MAX_GAIN, max);
|
||||
}
|
||||
|
||||
void lovrSourceStop(Source* source) {
|
||||
if (source->type == SOURCE_STATIC) {
|
||||
alSourceStop(source->id);
|
||||
} else {
|
||||
alSourceStop(source->id);
|
||||
alSourcei(source->id, AL_BUFFER, AL_NONE);
|
||||
lovrAudioStreamRewind(source->stream);
|
||||
}
|
||||
}
|
||||
|
||||
// Fills buffers with data and queues them, called once initially and over time to stream more data
|
||||
void lovrSourceStream(Source* source, ALuint* buffers, size_t count) {
|
||||
if (source->type == SOURCE_STATIC) {
|
||||
return;
|
||||
}
|
||||
|
||||
AudioStream* stream = source->stream;
|
||||
ALenum format = lovrAudioConvertFormat(stream->bitDepth, stream->channelCount);
|
||||
uint32_t frequency = stream->sampleRate;
|
||||
size_t samples = 0;
|
||||
size_t n = 0;
|
||||
|
||||
// Keep decoding until there is nothing left to decode or all the buffers are filled
|
||||
while (n < count && (samples = lovrAudioStreamDecode(stream, NULL, 0)) != 0) {
|
||||
alBufferData(buffers[n++], format, stream->buffer, (ALsizei) (samples * sizeof(ALshort)), frequency);
|
||||
}
|
||||
|
||||
alSourceQueueBuffers(source->id, (ALsizei) n, buffers);
|
||||
|
||||
if (samples == 0 && source->isLooping && n < count) {
|
||||
lovrAudioStreamRewind(stream);
|
||||
lovrSourceStream(source, buffers + n, count - n);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
size_t lovrSourceTell(Source* source) {
|
||||
switch (source->type) {
|
||||
case SOURCE_STATIC: {
|
||||
float offset;
|
||||
alGetSourcef(source->id, AL_SAMPLE_OFFSET, &offset);
|
||||
return offset;
|
||||
}
|
||||
|
||||
case SOURCE_STREAM: {
|
||||
size_t decoderOffset = lovrAudioStreamTell(source->stream);
|
||||
size_t samplesPerBuffer = source->stream->bufferSize / source->stream->channelCount / sizeof(ALshort);
|
||||
ALsizei queuedBuffers, sampleOffset;
|
||||
alGetSourcei(source->id, AL_BUFFERS_QUEUED, &queuedBuffers);
|
||||
alGetSourcei(source->id, AL_SAMPLE_OFFSET, &sampleOffset);
|
||||
|
||||
size_t offset = decoderOffset + sampleOffset;
|
||||
|
||||
if (queuedBuffers * samplesPerBuffer > offset) {
|
||||
return offset + source->stream->samples;
|
||||
} else {
|
||||
return offset;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
default: lovrThrow("Unreachable"); break;
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue