diff --git a/Tupfile b/Tupfile index 72bb6be5..f8345335 100644 --- a/Tupfile +++ b/Tupfile @@ -16,7 +16,7 @@ SRC += src/core/util.c SRC += src/core/zip.c # modules -SRC_@(AUDIO) += src/modules/audio/audio.c src/modules/audio/spatializers/dummy_spatializer.c +SRC_@(AUDIO) += src/modules/audio/audio.c src/modules/audio/spatializers/simple_spatializer.c SRC_@(DATA) += src/modules/data/*.c SRC_@(EVENT) += src/modules/event/*.c SRC_@(FILESYSTEM) += src/modules/filesystem/*.c diff --git a/Tuprules.tup b/Tuprules.tup index 773d1714..258bafe6 100644 --- a/Tuprules.tup +++ b/Tuprules.tup @@ -122,20 +122,6 @@ ifneq (@(CMAKE_DEPS),) LIBS_macos += $(BUILD)/glfw/src/libglfw.*dylib LIBS_linux += $(BUILD)/glfw/src/libglfw.*so* - # OpenAL - CFLAGS_@(AUDIO)_win32 += -I$(DEPS)/openal-soft/include - CFLAGS_@(AUDIO)_macos += -I$(DEPS)/openal-soft/include - CFLAGS_@(AUDIO)_linux += -I$(DEPS)/openal-soft/include - CFLAGS_@(AUDIO)_android += -I$(DEPS)/openal-soft/include - LDFLAGS_@(AUDIO)_win32 += -L$(BUILD)/openal/$(CONFIG) -lOpenAL32 - LDFLAGS_@(AUDIO)_macos += -L$(BUILD)/openal -lopenal - LDFLAGS_@(AUDIO)_linux += -L$(BUILD)/openal -lopenal - LDFLAGS_@(AUDIO)_android += -L$(BUILD)/lib/arm64-v8a -lopenal - LIBS_@(AUDIO)_win32 += $(BUILD)/openal/$(CONFIG)/OpenAL32.dll - LIBS_@(AUDIO)_macos += $(BUILD)/openal/libopenal.*dylib - LIBS_@(AUDIO)_linux += $(BUILD)/openal/libopenal.*so* - LIBS_@(AUDIO)_android += $(BUILD)/lib/arm64-v8a/libopenal.*so* - # msdfgen CFLAGS_@(DATA) += -I$(DEPS)/msdfgen LDFLAGS_@(DATA)_win32 += -L$(BUILD)/lib_msdfgen/$(CONFIG) -lmsdfgen diff --git a/src/api/l_audio.c b/src/api/l_audio.c index 2d50c583..ad3bda75 100644 --- a/src/api/l_audio.c +++ b/src/api/l_audio.c @@ -7,14 +7,49 @@ #include "core/util.h" #include -#define AUDIO_SPATIALIZER_MAX_SOURCES_HINT 16 - StringEntry lovrAudioType[] = { [AUDIO_PLAYBACK] = ENTRY("playback"), [AUDIO_CAPTURE] = ENTRY("capture"), { 0 } }; +StringEntry lovrTimeUnit[] = { + [UNIT_SECONDS] = ENTRY("seconds"), + [UNIT_FRAMES] = ENTRY("frames"), + { 0 } +}; + +static void onDevice(AudioDevice* device, void* userdata) { + lua_State* L = userdata; + lua_createtable(L, 0, 3); + void* id = lua_newuserdata(L, device->idSize); + memcpy(id, device->id, device->idSize); + lua_setfield(L, -2, "id"); + lua_pushstring(L, device->name); + lua_setfield(L, -2, "name"); + lua_pushboolean(L, device->isDefault); + lua_setfield(L, -2, "default"); + lua_rawseti(L, -2, luax_len(L, -2) + 1); +} + +static int l_lovrAudioGetDevices(lua_State *L) { + AudioType type = luax_checkenum(L, 1, AudioType, "playback"); + lua_newtable(L); + lovrAudioEnumerateDevices(type, onDevice, L); + return 1; +} + +static int l_lovrAudioSetDevice(lua_State *L) { + AudioType type = luax_checkenum(L, 1, AudioType, "playback"); + void* id = lua_touserdata(L, 2); + size_t size = luax_len(L, 2); + uint32_t sampleRate = lua_tointeger(L, 2); + SampleFormat format = luax_checkenum(L, 1, SampleFormat, "f32"); + bool success = lovrAudioSetDevice(type, id, size, sampleRate, format); + lua_pushboolean(L, success); + return 1; +} + static int l_lovrAudioStart(lua_State* L) { AudioType type = luax_checkenum(L, 1, AudioType, "playback"); bool started = lovrAudioStart(type); @@ -61,8 +96,8 @@ static int l_lovrAudioGetPose(lua_State *L) { } static int l_lovrAudioSetPose(lua_State *L) { - float position[4], orientation[4]; int index = 1; + float position[4], orientation[4]; index = luax_readvec3(L, index, position, NULL); index = luax_readquat(L, index, orientation, NULL); lovrAudioSetPose(position, orientation); @@ -75,42 +110,6 @@ static int l_lovrAudioGetCaptureStream(lua_State* L) { return 1; } -static int l_lovrAudioGetDevices(lua_State *L) { - AudioType type = luax_checkenum(L, 1, AudioType, "playback"); - AudioDeviceArr *devices = lovrAudioGetDevices(type); - - lua_newtable(L); - int top = lua_gettop(L); - for (size_t i = 0; i < devices->length; i++) { - AudioDevice *device = &devices->data[i]; - lua_newtable(L); - luax_pushenum(L, AudioType, device->type); - lua_setfield(L, -2, "type"); - lua_pushstring(L, device->name); - lua_setfield(L, -2, "name"); - lua_pushboolean(L, device->isDefault); - lua_setfield(L, -2, "isDefault"); - lua_rawseti(L, top, i + 1); - } - - lovrAudioFreeDevices(devices); - return 1; -} - -static int l_lovrAudioUseDevice(lua_State *L) { - AudioType type = luax_checkenum(L, 1, AudioType, "playback"); - const char *name = lua_tostring(L, 2); - lovrAudioUseDevice(type, name); - return 0; -} - -static int l_lovrAudioSetCaptureFormat(lua_State *L) { - SampleFormat format = luax_checkenum(L, 1, SampleFormat, "invalid"); - int sampleRate = lua_tointeger(L, 2); - lovrAudioSetCaptureFormat(format, sampleRate); - return 0; -} - static int l_lovrAudioGetSpatializer(lua_State *L) { lua_pushstring(L, lovrAudioGetSpatializer()); return 1; @@ -144,6 +143,8 @@ static int l_lovrAudioNewSource(lua_State* L) { } static const luaL_Reg lovrAudio[] = { + { "getDevices", l_lovrAudioGetDevices }, + { "setDevice", l_lovrAudioSetDevice }, { "start", l_lovrAudioStart }, { "stop", l_lovrAudioStop }, { "isStarted", l_lovrAudioIsStarted }, @@ -152,9 +153,6 @@ static const luaL_Reg lovrAudio[] = { { "getPose", l_lovrAudioGetPose }, { "setPose", l_lovrAudioSetPose }, { "getCaptureStream", l_lovrAudioGetCaptureStream }, - { "getDevices", l_lovrAudioGetDevices }, - { "useDevice", l_lovrAudioUseDevice }, - { "setCaptureFormat", l_lovrAudioSetCaptureFormat }, { "getSpatializer", l_lovrAudioGetSpatializer }, { "newSource", l_lovrAudioNewSource }, { NULL, NULL } @@ -166,30 +164,17 @@ int luaopen_lovr_audio(lua_State* L) { luax_registertype(L, Source); const char *spatializer = NULL; - int spatializerMaxSourcesHint = AUDIO_SPATIALIZER_MAX_SOURCES_HINT; luax_pushconf(L); lua_getfield(L, -1, "audio"); if (lua_istable(L, -1)) { - lua_getfield(L, -1, "spatializerMaxSourcesHint"); - if (lua_type(L, -1) == LUA_TNUMBER) { - spatializerMaxSourcesHint = lua_tointeger(L, -1); - } - lua_pop(L, 1); - lua_getfield(L, -1, "spatializer"); - if (lua_type(L, -1) == LUA_TSTRING) { - spatializer = lua_tostring(L, -1); - } + spatializer = lua_tostring(L, -1); lua_pop(L, 1); } lua_pop(L, 2); - SpatializerConfig config = { - .spatializer = spatializer, - .spatializerMaxSourcesHint=spatializerMaxSourcesHint - }; - - if (lovrAudioInit(config)) { + if (lovrAudioInit(spatializer)) { + lovrAudioSetDevice(AUDIO_PLAYBACK, NULL, 0, 44100, SAMPLE_F32); lovrAudioStart(AUDIO_PLAYBACK); luax_atexit(L, lovrAudioDestroy); } diff --git a/src/api/l_audio_source.c b/src/api/l_audio_source.c index ae89b7f8..e00e4f9a 100644 --- a/src/api/l_audio_source.c +++ b/src/api/l_audio_source.c @@ -6,8 +6,12 @@ static int l_lovrSourcePlay(lua_State* L) { Source* source = luax_checktype(L, 1, Source); - lovrSourcePlay(source); - return 0; + if (lua_isboolean(L, -1)) { + lovrSourceSetLooping(source, lua_toboolean(L, -1)); + } + bool played = lovrSourcePlay(source); + lua_pushboolean(L, played); + return 1; } static int l_lovrSourcePause(lua_State* L) { diff --git a/src/api/l_data.c b/src/api/l_data.c index d290f1f8..38196bc7 100644 --- a/src/api/l_data.c +++ b/src/api/l_data.c @@ -11,7 +11,6 @@ StringEntry lovrSampleFormat[] = { [SAMPLE_F32] = ENTRY("f32"), [SAMPLE_I16] = ENTRY("i16"), - [SAMPLE_INVALID] = ENTRY("invalid"), { 0 } }; diff --git a/src/api/l_data_soundData.c b/src/api/l_data_soundData.c index cecc5ad7..6d6c114e 100644 --- a/src/api/l_data_soundData.c +++ b/src/api/l_data_soundData.c @@ -5,12 +5,6 @@ #include "core/util.h" #include -StringEntry lovrTimeUnit[] = { - [UNIT_SECONDS] = ENTRY("seconds"), - [UNIT_SAMPLES] = ENTRY("samples"), - { 0 } -}; - static int l_lovrSoundDataGetBlob(lua_State* L) { SoundData* soundData = luax_checktype(L, 1, SoundData); Blob* blob = soundData->blob; @@ -18,16 +12,10 @@ static int l_lovrSoundDataGetBlob(lua_State* L) { return 1; } -static int l_lovrSoundDataGetDuration(lua_State* L) { +static int l_lovrSoundDataGetFrameCount(lua_State* L) { SoundData* soundData = luax_checktype(L, 1, SoundData); - TimeUnit units = luax_checkenum(L, 2, TimeUnit, "seconds"); - uint32_t frames = lovrSoundDataGetDuration(soundData); - if (units == UNIT_SECONDS) { - lua_pushnumber(L, (double) frames / soundData->sampleRate); - } else { - lua_pushinteger(L, frames); - } - + uint32_t frames = lovrSoundDataGetFrameCount(soundData); + lua_pushinteger(L, frames); return 1; } @@ -41,7 +29,7 @@ static int l_lovrSoundDataRead(lua_State* L) { int index = 2; SoundData* dest = luax_totype(L, index, SoundData); if (dest) index++; - size_t frameCount = lua_type(L, index) == LUA_TNUMBER ? lua_tointeger(L, index++) : lovrSoundDataGetDuration(source); + size_t frameCount = lua_type(L, index) == LUA_TNUMBER ? lua_tointeger(L, index++) : lovrSoundDataGetFrameCount(source); size_t offset = dest ? luaL_optinteger(L, index, 0) : 0; bool shouldRelease = false; if (dest == NULL) { @@ -90,7 +78,7 @@ static int l_lovrSoundDataSetSample(lua_State* L) { const luaL_Reg lovrSoundData[] = { { "getBlob", l_lovrSoundDataGetBlob }, - { "getDuration", l_lovrSoundDataGetDuration }, + { "getFrameCount", l_lovrSoundDataGetFrameCount }, { "read", l_lovrSoundDataRead }, { "append", l_lovrSoundDataAppend }, { "setSample", l_lovrSoundDataSetSample }, diff --git a/src/lib/miniaudio/miniaudio.c b/src/lib/miniaudio/miniaudio.c index ff7bf4f4..2b35d779 100644 --- a/src/lib/miniaudio/miniaudio.c +++ b/src/lib/miniaudio/miniaudio.c @@ -1,6 +1,11 @@ #define MINIAUDIO_IMPLEMENTATION #define MA_NO_DECODING +#define MA_NO_ENCODING +#define MA_NO_GENERATION #ifdef ANDROID #define MA_NO_RUNTIME_LINKING #endif +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wunused-function" #include "miniaudio.h" +#pragma clang diagnostic pop diff --git a/src/modules/audio/audio.c b/src/modules/audio/audio.c index 6d5d2723..8f912881 100644 --- a/src/modules/audio/audio.c +++ b/src/modules/audio/audio.c @@ -1,7 +1,6 @@ #include "audio/audio.h" #include "audio/spatializer.h" #include "data/soundData.h" -#include "data/blob.h" #include "core/arr.h" #include "core/ref.h" #include "core/os.h" @@ -9,7 +8,6 @@ #include "lib/miniaudio/miniaudio.h" #include #include -#include static const ma_format miniaudioFormats[] = { [SAMPLE_I16] = ma_format_s16, @@ -19,17 +17,8 @@ static const ma_format miniaudioFormats[] = { #define OUTPUT_FORMAT SAMPLE_F32 #define OUTPUT_CHANNELS 2 #define CAPTURE_CHANNELS 1 -#define CALLBACK_PERIODS 3 -#define PERIOD_LENGTH 128 -#define CALLBACK_LENGTH (PERIOD_LENGTH*CALLBACK_PERIODS) - -//#define LOVR_DEBUG_AUDIOTAP -#ifdef LOVR_DEBUG_AUDIOTAP -// To get a record of what the audio callback is playing, define LOVR_DEBUG_AUDIOTAP, -// after running look in the lovr save directory for lovrDebugAudio.raw, -// and open as raw 32-bit stereo floats (Audacity can do this, or Amadeus on Mac) -#include "filesystem/filesystem.h" -#endif +#define MAX_SOURCES 64 +#define BUFFER_SIZE 256 struct Source { Source* next; @@ -46,195 +35,129 @@ struct Source { bool spatial; }; -typedef struct { - char *deviceName; - uint32_t sampleRate; - SampleFormat format; -} AudioConfig; - static uint32_t outputChannelCountForSource(Source *source) { return source->spatial ? 1 : OUTPUT_CHANNELS; } static struct { bool initialized; ma_context context; - AudioConfig config[2]; ma_device devices[2]; ma_mutex lock; Source* sources; - SoundData *captureStream; + SoundData* captureStream; arr_t(ma_data_converter*) converters; float position[4]; float orientation[4]; Spatializer* spatializer; - bool fixedBuffer; - uint32_t bufferSize; - float *scratchBuffer1, *scratchBuffer2; // Used internally by mix(). Contains bufferSize stereo frames. - float *persistBuffer; // If fixedBuffer, preserves excess audio between frames. - float *persistBufferContent; // Pointer into persistBuffer - uint32_t persistBufferRemaining; // In fixedBuffer mode, how much of the previous frame's mixBuffer was consumed? -#ifdef LOVR_DEBUG_AUDIOTAP - bool audiotapWriting; -#endif + uint32_t leftoverOffset; + uint32_t leftoverFrames; + float leftovers[BUFFER_SIZE * 2]; } state; // Device callbacks -// Return value is number of stereo frames in buffer -// Note: output is always equal to scratchBuffer1. This saves a little memory but is ugly. -// count is always less than or equal to the size of scratchBuffer1 -static int generateSource(Source* source, float* output, uint32_t count) { - // Scratch buffers: Raw generated audio from source; converted to float by converter - char* raw; - float* aux; +static void onPlayback(ma_device* device, void* out, const void* in, uint32_t count) { + float* output = out; - if (source->spatial) { // In spatial mode, raw and aux are mono and only output is stereo - raw = (char*) state.scratchBuffer1; - aux = state.scratchBuffer2; - } else { // Otherwise, the data converter will produce stereo and aux=output is stereo - raw = (char*) state.scratchBuffer2; - aux = output; + // Consume any leftovers from the previous callback + if (state.leftoverFrames > 0) { + uint32_t leftoverFrames = MIN(count, state.leftoverFrames); + memcpy(output, state.leftovers + state.leftoverOffset * OUTPUT_CHANNELS, leftoverFrames * OUTPUT_CHANNELS * sizeof(float)); + state.leftoverOffset += leftoverFrames; + state.leftoverFrames -= leftoverFrames; + output += leftoverFrames * OUTPUT_CHANNELS; + count -= leftoverFrames; } - bool sourceFinished = false; - ma_uint64 framesIn = 0; - while (framesIn < count) { // Read from source until raw buffer filled - ma_uint64 framesRequested = count - framesIn; - // FIXME: Buffer size math will break (crash) if channels > 2 - ma_uint64 framesRead = source->sound->read(source->sound, source->offset, framesRequested, - raw + framesIn * SampleFormatBytesPerFrame(source->sound->channels, source->sound->format)); - framesIn += framesRead; - if (framesRead < framesRequested) { - source->offset = 0; - if (!source->looping) { // Source has reached its final end - sourceFinished = true; - break; - } - } else { - source->offset += framesRead; - } - } - // 1 channel for spatial, 2 otherwise - ma_uint64 framesConverted = framesIn * outputChannelCountForSource(source); - - // We assume framesConverted is not changed by calling this and discard its value - ma_data_converter_process_pcm_frames(source->converter, raw, &framesIn, aux, &framesConverted); - - ma_uint64 framesOut = framesIn; - - if (source->spatial) { - // Fixed buffer mode we have to pad buffer with silence if it underran - if (state.fixedBuffer && sourceFinished) { - memset(aux + framesIn, 0, (count - framesIn) * sizeof(float)); // Note always mono - framesOut = count; - } - framesOut = state.spatializer->apply(source, aux, output, framesIn, framesOut); + if (count == 0) { + return; } - if (sourceFinished) { - lovrSourcePause(source); - } + ma_mutex_lock(&state.lock); - return framesOut; -} + do { + float raw[BUFFER_SIZE * 2]; + float aux[BUFFER_SIZE * 2]; + float mix[BUFFER_SIZE * 2]; -static void onPlayback(ma_device* device, void* outputUntyped, const void* _, uint32_t count) { - float* output = outputUntyped; + float* dst = count >= BUFFER_SIZE ? output : state.leftovers; -#ifdef LOVR_DEBUG_AUDIOTAP - int originalCount = count; -#endif - - // This case means we are in fixedBuffer mode and there was excess data generated last frame - if (state.persistBufferRemaining > 0) { - uint32_t persistConsumed = MIN(count, state.persistBufferRemaining); - memcpy(output, state.persistBufferContent, persistConsumed * OUTPUT_CHANNELS * sizeof(float)); // Stereo frames - // Move forward both the persistBufferContent and output pointers so the right thing happens regardless of which is larger - // persistBufferRemaining being larger than count is deeply unlikely, but it is not impossible - state.persistBufferContent += persistConsumed*OUTPUT_CHANNELS; - state.persistBufferRemaining -= persistConsumed; - output += persistConsumed * OUTPUT_CHANNELS; - count -= persistConsumed; - } - - while (count > 0) { // Mixing will be done in a series of passes - ma_mutex_lock(&state.lock); - - // Usually we mix directly into the output buffer. - // But if we're in fixed buffer mode and the fixed buffer size is bigger than output, - // we mix into persistBuffer and save the excess until next onPlayback() call. - uint32_t passSize; - float* mixBuffer; - bool usingPersistBuffer; - if (state.fixedBuffer) { - usingPersistBuffer = state.bufferSize > count; - passSize = state.bufferSize; - if (usingPersistBuffer) { - mixBuffer = state.persistBuffer; - state.persistBufferRemaining = state.bufferSize; - memset(mixBuffer, 0, state.bufferSize * sizeof(float) * OUTPUT_CHANNELS); - } else { - mixBuffer = output; - state.persistBufferRemaining = 0; - } - } else { - usingPersistBuffer = false; - mixBuffer = output; - passSize = MIN(count, state.bufferSize); // In non-fixedBuffer mode we can use a buffer smaller than bufferSize, but not larger + if (dst == state.leftovers) { + memset(dst, 0, sizeof(state.leftovers)); } - // For each Source, remove it if it isn't playing or process it and remove it if it stops for (Source** list = &state.sources, *source = *list; source != NULL; source = *list) { - bool playing = source->playing; - if (playing) { - // Generate audio - uint32_t generated = generateSource(source, state.scratchBuffer1, passSize); - playing = source->playing; // Can change during generateSource - - // Mix and apply volume - for (uint32_t i = 0; i < generated * OUTPUT_CHANNELS; i++) { - mixBuffer[i] += state.scratchBuffer1[i] * source->volume; - } - } - - // Iterate/manage list - if (playing) { - list = &source->next; - } else { + if (!source->playing) { *list = source->next; source->tracked = false; lovrRelease(Source, source); + continue; } - } - { - uint32_t tailGenerated = state.spatializer->tail(state.scratchBuffer2, state.scratchBuffer1, passSize); - // Mix tail - for (uint32_t i = 0; i < tailGenerated * OUTPUT_CHANNELS; i++) { - mixBuffer[i] += state.scratchBuffer1[i]; + + uint32_t channels = outputChannelCountForSource(source); + uint32_t framesToConvert = BUFFER_SIZE; + uint32_t framesConverted = 0; + while (framesToConvert > 0) { + uint64_t rawCapacity = sizeof(raw) / source->sound->channels / sizeof(float); + uint64_t framesToRead = MIN(ma_data_converter_get_required_input_frame_count(source->converter, framesToConvert), rawCapacity); + uint64_t framesRead = source->sound->read(source->sound, source->offset, framesToRead, raw); + + ma_uint64 framesIn = framesRead; + ma_uint64 framesOut = framesToConvert; + ma_data_converter_process_pcm_frames(source->converter, raw, &framesIn, aux + framesConverted * channels, &framesOut); + // assert(framesIn == framesRead); + // assert(framesOut <= framesToConvert); + + if (framesRead < framesToRead) { + source->offset = 0; + if (!source->looping) { + source->playing = false; + memset(aux + framesConverted * channels, 0, framesToConvert * channels * sizeof(float)); + break; + } + } + + framesToConvert -= framesOut; + framesConverted += framesOut; + source->offset += framesRead; } - } - ma_mutex_unlock(&state.lock); - if (usingPersistBuffer) { // Copy persist buffer into output (if needed) - // Remember, in this scenario state.persistBuffer is mixBuffer and we just overwrote it in full - memcpy(output, state.persistBuffer, count * OUTPUT_CHANNELS * sizeof(float)); - state.persistBufferContent = state.persistBuffer + count * OUTPUT_CHANNELS; - state.persistBufferRemaining -= count; - count = 0; - } else { - output += passSize * OUTPUT_CHANNELS; - count -= passSize; - } - } + if (source->spatial) { + state.spatializer->apply(source, aux, mix, BUFFER_SIZE, BUFFER_SIZE); + } else { + memcpy(mix, aux, BUFFER_SIZE * OUTPUT_CHANNELS * sizeof(float)); + } -#ifdef LOVR_DEBUG_AUDIOTAP - if (state.audiotapWriting) { - lovrFilesystemWrite("lovrDebugAudio.raw", outputUntyped, originalCount * OUTPUT_CHANNELS * sizeof(float), true); - } -#endif + float volume = source->volume; + for (uint32_t i = 0; i < OUTPUT_CHANNELS * BUFFER_SIZE; i++) { + dst[i] += mix[i] * volume; + } + + list = &source->next; + } + + // Tail + uint32_t tailCount = state.spatializer->tail(aux, mix, BUFFER_SIZE); + for (uint32_t i = 0; i < tailCount * OUTPUT_CHANNELS; i++) { + dst[i] += mix[i]; + } + + // Copy leftovers to output + if (dst == state.leftovers) { + memcpy(output, state.leftovers, count * OUTPUT_CHANNELS * sizeof(float)); + state.leftoverFrames = BUFFER_SIZE - count; + state.leftoverOffset = count; + } + + // Scroll + output += BUFFER_SIZE * OUTPUT_CHANNELS; + count -= MIN(count, BUFFER_SIZE); + } while (count > 0); + + ma_mutex_unlock(&state.lock); } static void onCapture(ma_device* device, void* output, const void* input, uint32_t frames) { - size_t bytesPerFrame = SampleFormatBytesPerFrame(CAPTURE_CHANNELS, state.config[AUDIO_CAPTURE].format); + size_t bytesPerFrame = SampleFormatBytesPerFrame(CAPTURE_CHANNELS, state.captureStream->format); lovrSoundDataStreamAppendBuffer(state.captureStream, (float*) input, frames * bytesPerFrame); } @@ -244,17 +167,14 @@ static Spatializer* spatializers[] = { #ifdef LOVR_ENABLE_OCULUS_AUDIO &oculusSpatializer, #endif - &dummySpatializer + &simpleSpatializer }; // Entry -bool lovrAudioInit(SpatializerConfig config) { +bool lovrAudioInit(const char* spatializer) { if (state.initialized) return false; - state.config[AUDIO_PLAYBACK] = (AudioConfig) { .format = SAMPLE_F32, .sampleRate = 44100 }; - state.config[AUDIO_CAPTURE] = (AudioConfig) { .format = SAMPLE_F32, .sampleRate = 44100 }; - if (ma_context_init(NULL, 0, NULL, &state.context)) { return false; } @@ -262,42 +182,25 @@ bool lovrAudioInit(SpatializerConfig config) { int mutexStatus = ma_mutex_init(&state.lock); lovrAssert(mutexStatus == MA_SUCCESS, "Failed to create audio mutex"); - SpatializerConfigIn spatializerConfigIn = { - .maxSourcesHint = config.spatializerMaxSourcesHint, - .fixedBuffer = CALLBACK_LENGTH, - .sampleRate = state.config[AUDIO_PLAYBACK].sampleRate + SpatializerConfig spatializerConfig = { + .maxSourcesHint = MAX_SOURCES, + .fixedBuffer = BUFFER_SIZE, + .sampleRate = 44100 }; - SpatializerConfigOut spatializerConfigOut = { 0 }; - for (size_t i = 0; i < sizeof(spatializers) / sizeof(spatializers[0]); i++) { - if (config.spatializer && strcmp(config.spatializer, spatializers[i]->name)) { + if (spatializer && strcmp(spatializer, spatializers[i]->name)) { continue; } - if (spatializers[i]->init(spatializerConfigIn, &spatializerConfigOut)) { + if (spatializers[i]->init(spatializerConfig)) { state.spatializer = spatializers[i]; break; } } lovrAssert(state.spatializer, "Must have at least one spatializer"); - state.fixedBuffer = spatializerConfigOut.needFixedBuffer; - state.bufferSize = state.fixedBuffer ? CALLBACK_LENGTH : 1024; - state.scratchBuffer1 = malloc(state.bufferSize * sizeof(float) * OUTPUT_CHANNELS); - state.scratchBuffer2 = malloc(state.bufferSize * sizeof(float) * OUTPUT_CHANNELS); - if (state.fixedBuffer) { - state.persistBuffer = malloc(state.bufferSize * sizeof(float) * OUTPUT_CHANNELS); - } - state.persistBufferRemaining = 0; - arr_init(&state.converters); - -#ifdef LOVR_DEBUG_AUDIOTAP - lovrFilesystemWrite("lovrDebugAudio.raw", NULL, 0, false); // Erase file - state.audiotapWriting = true; -#endif - return state.initialized = true; } @@ -315,94 +218,77 @@ void lovrAudioDestroy() { free(state.converters.data[i]); } arr_free(&state.converters); - free(state.config[0].deviceName); - free(state.config[1].deviceName); - free(state.scratchBuffer1); - free(state.scratchBuffer2); - free(state.persistBuffer); memset(&state, 0, sizeof(state)); } -bool lovrAudioInitDevice(AudioType type) { - ma_device_info* playbackDevices; - ma_uint32 playbackDeviceCount; - ma_device_info* captureDevices; - ma_uint32 captureDeviceCount; - ma_result gettingStatus = ma_context_get_devices(&state.context, &playbackDevices, &playbackDeviceCount, &captureDevices, &captureDeviceCount); - lovrAssert(gettingStatus == MA_SUCCESS, "Failed to enumerate audio devices during initialization: %s (%d)", ma_result_description(gettingStatus), gettingStatus); +const char* lovrAudioGetSpatializer() { + return state.spatializer->name; +} + +static LOVR_THREAD_LOCAL struct { + AudioType type; + AudioDeviceCallback* callback; +} enumerateContext; + +static ma_bool32 enumerateCallback(ma_context* context, ma_device_type type, const ma_device_info* info, void* userdata) { + if (type == (enumerateContext.type == AUDIO_PLAYBACK ? ma_device_type_playback : ma_device_type_capture)) { + AudioDevice device = { + .id = &info->id, + .idSize = sizeof(info->id), + .name = info->name, + .isDefault = info->isDefault + }; + + enumerateContext.callback(&device, userdata); + } + + return MA_TRUE; +} + +void lovrAudioEnumerateDevices(AudioType type, AudioDeviceCallback* callback, void* userdata) { + enumerateContext.type = type; + enumerateContext.callback = callback; + ma_context_enumerate_devices(&state.context, enumerateCallback, userdata); +} + +bool lovrAudioSetDevice(AudioType type, void* id, size_t size, uint32_t sampleRate, SampleFormat format) { + if (id && size != sizeof(ma_device_id)) return false; + +#ifdef ANDROID + // XX miniaudio doesn't seem to be happy to set a specific device an android (fails with + // error -2 on device init). Since there is only one playback and one capture device in OpenSL, + // we can just set this to NULL and make this call a no-op. + id = NULL; +#endif ma_device_config config; + if (type == AUDIO_PLAYBACK) { - ma_device_type deviceType = ma_device_type_playback; - config = ma_device_config_init(deviceType); - - lovrAssert(state.config[AUDIO_PLAYBACK].format == OUTPUT_FORMAT, "Only f32 playback format currently supported"); - config.playback.format = miniaudioFormats[state.config[AUDIO_PLAYBACK].format]; - for (uint32_t i = 0; i < playbackDeviceCount && state.config[AUDIO_PLAYBACK].deviceName; i++) { - if (strcmp(playbackDevices[i].name, state.config[AUDIO_PLAYBACK].deviceName) == 0) { - config.playback.pDeviceID = &playbackDevices[i].id; - } - } - - if (state.config[AUDIO_PLAYBACK].deviceName && config.playback.pDeviceID == NULL) { - lovrLog(LOG_WARN, "audio", "No audio playback device called '%s'; falling back to default.", state.config[AUDIO_PLAYBACK].deviceName); - } - + lovrAssert(sampleRate == 44100, ""); + lovrAssert(format == SAMPLE_F32, ""); + config = ma_device_config_init(ma_device_type_playback); + config.playback.pDeviceID = (ma_device_id*) id; + config.playback.format = miniaudioFormats[format]; config.playback.channels = OUTPUT_CHANNELS; } else { - ma_device_type deviceType = ma_device_type_capture; - config = ma_device_config_init(deviceType); - - config.capture.format = miniaudioFormats[state.config[AUDIO_CAPTURE].format]; - for (uint32_t i = 0; i < captureDeviceCount && state.config[AUDIO_CAPTURE].deviceName; i++) { - if (strcmp(captureDevices[i].name, state.config[AUDIO_CAPTURE].deviceName) == 0) { - config.capture.pDeviceID = &captureDevices[i].id; - } - } - - if (state.config[AUDIO_CAPTURE].deviceName && config.capture.pDeviceID == NULL) { - lovrLog(LOG_WARN, "audio", "No audio capture device called '%s'; falling back to default.", state.config[AUDIO_CAPTURE].deviceName); - } - + config = ma_device_config_init(ma_device_type_capture); + config.capture.pDeviceID = (ma_device_id*) id; + config.capture.format = miniaudioFormats[format]; config.capture.channels = CAPTURE_CHANNELS; + lovrRelease(SoundData, state.captureStream); + state.captureStream = lovrSoundDataCreateStream(sampleRate * 1., CAPTURE_CHANNELS, sampleRate, format); } - config.periodSizeInFrames = PERIOD_LENGTH; - config.periods = 3; + config.sampleRate = sampleRate; config.performanceProfile = ma_performance_profile_low_latency; config.dataCallback = callbacks[type]; - config.sampleRate = state.config[type].sampleRate; - ma_result err = ma_device_init(&state.context, &config, &state.devices[type]); - if (err != MA_SUCCESS) { - lovrLog(LOG_WARN, "audio", "Failed to enable %s audio device: %s (%d)\n", type == AUDIO_PLAYBACK ? "playback" : "capture", ma_result_description(err), err); - return false; - } - - if (type == AUDIO_CAPTURE) { - lovrRelease(SoundData, state.captureStream); - state.captureStream = lovrSoundDataCreateStream(state.config[type].sampleRate * 1.0, CAPTURE_CHANNELS, state.config[type].sampleRate, state.config[type].format); - if (!state.captureStream) { - lovrLog(LOG_WARN, "audio", "Failed to init audio device %d\n", type); - lovrAudioDestroy(); - return false; - } - } - - return true; + ma_device_uninit(&state.devices[type]); + ma_result result = ma_device_init(&state.context, &config, &state.devices[type]); + return result == MA_SUCCESS; } bool lovrAudioStart(AudioType type) { - ma_uint32 deviceState = state.devices[type].state; - if (deviceState == MA_STATE_UNINITIALIZED) { - if (!lovrAudioInitDevice(type)) { - if (type == AUDIO_CAPTURE) { - lovrPlatformRequestPermission(AUDIO_CAPTURE_PERMISSION); - // by default, lovrAudioStart will be retried from boot.lua upon permission granted event - } - return false; - } - } return ma_device_start(&state.devices[type]) == MA_SUCCESS; } @@ -411,7 +297,7 @@ bool lovrAudioStop(AudioType type) { } bool lovrAudioIsStarted(AudioType type) { - return ma_device_get_state(&state.devices[type]) == MA_STATE_STARTED; + return ma_device_is_started(&state.devices[type]); } float lovrAudioGetVolume() { @@ -437,78 +323,6 @@ struct SoundData* lovrAudioGetCaptureStream() { return state.captureStream; } -AudioDeviceArr* lovrAudioGetDevices(AudioType type) { - ma_device_info* playbackDevices; - ma_uint32 playbackDeviceCount; - ma_device_info* captureDevices; - ma_uint32 captureDeviceCount; - ma_result gettingStatus = ma_context_get_devices(&state.context, &playbackDevices, &playbackDeviceCount, &captureDevices, &captureDeviceCount); - lovrAssert(gettingStatus == MA_SUCCESS, "Failed to enumerate audio devices: %s (%d)", ma_result_description(gettingStatus), gettingStatus); - - ma_uint32 count = type == AUDIO_PLAYBACK ? playbackDeviceCount : captureDeviceCount; - ma_device_info* madevices = type == AUDIO_PLAYBACK ? playbackDevices : captureDevices; - AudioDeviceArr* devices = calloc(1, sizeof(AudioDeviceArr)); - devices->capacity = devices->length = count; - devices->data = calloc(count, sizeof(AudioDevice)); - - for (uint32_t i = 0; i < count; i++) { - ma_device_info* mainfo = &madevices[i]; - AudioDevice* lovrInfo = &devices->data[i]; - lovrInfo->name = strdup(mainfo->name); - lovrInfo->type = type; - lovrInfo->isDefault = mainfo->isDefault; - } - - return devices; -} - -void lovrAudioFreeDevices(AudioDeviceArr *devices) { - for (size_t i = 0; i < devices->length; i++) { - free((void*) devices->data[i].name); - } - arr_free(devices); -} - -void lovrAudioSetCaptureFormat(SampleFormat format, uint32_t sampleRate) { - if (sampleRate) state.config[AUDIO_CAPTURE].sampleRate = sampleRate; - if (format != SAMPLE_INVALID) state.config[AUDIO_CAPTURE].format = format; - - // restart device if needed - ma_uint32 previousState = state.devices[AUDIO_CAPTURE].state; - if (previousState != MA_STATE_UNINITIALIZED) { - ma_device_uninit(&state.devices[AUDIO_CAPTURE]); - if (previousState == MA_STATE_STARTED) { - lovrAudioStart(AUDIO_CAPTURE); - } - } -} - -void lovrAudioUseDevice(AudioType type, const char* deviceName) { - free(state.config[type].deviceName); - -#ifdef ANDROID - // XX miniaudio doesn't seem to be happy to set a specific device an android (fails with - // error -2 on device init). Since there is only one playback and one capture device in OpenSL, - // we can just set this to NULL and make this call a no-op. - deviceName = NULL; -#endif - - state.config[type].deviceName = deviceName ? strdup(deviceName) : NULL; - - // restart device if needed - ma_uint32 previousState = state.devices[type].state; - if (previousState != MA_STATE_UNINITIALIZED) { - ma_device_uninit(&state.devices[type]); - if (previousState == MA_STATE_STARTED) { - lovrAudioStart(type); - } - } -} - -const char* lovrAudioGetSpatializer() { - return state.spatializer->name; -} - // Source Source* lovrSourceCreate(SoundData* sound, bool spatial) { @@ -536,7 +350,7 @@ Source* lovrSourceCreate(SoundData* sound, bool spatial) { config.channelsIn = sound->channels; config.channelsOut = outputChannelCountForSource(source); config.sampleRateIn = sound->sampleRate; - config.sampleRateOut = state.config[AUDIO_PLAYBACK].sampleRate; + config.sampleRateOut = 44100; ma_data_converter* converter = malloc(sizeof(ma_data_converter)); ma_result converterStatus = ma_data_converter_init(&config, converter); @@ -557,7 +371,7 @@ void lovrSourceDestroy(void* ref) { lovrRelease(SoundData, source->sound); } -void lovrSourcePlay(Source* source) { +bool lovrSourcePlay(Source* source) { ma_mutex_lock(&state.lock); source->playing = true; @@ -570,6 +384,7 @@ void lovrSourcePlay(Source* source) { } ma_mutex_unlock(&state.lock); + return true; } void lovrSourcePause(Source* source) { @@ -578,7 +393,7 @@ void lovrSourcePause(Source* source) { void lovrSourceStop(Source* source) { lovrSourcePause(source); - lovrSourceSetTime(source, 0, UNIT_SAMPLES); + lovrSourceSetTime(source, 0, UNIT_FRAMES); } bool lovrSourceIsPlaying(Source* source) { @@ -621,8 +436,7 @@ void lovrSourceSetPose(Source *source, float position[4], float orientation[4]) } double lovrSourceGetDuration(Source* source, TimeUnit units) { - uint32_t frames = lovrSoundDataGetDuration(source->sound); - return units == UNIT_SECONDS ? (double) frames / source->sound->sampleRate : frames; + return units == UNIT_SECONDS ? (double) source->sound->frames / source->sound->sampleRate : source->sound->frames; } double lovrSourceGetTime(Source* source, TimeUnit units) { diff --git a/src/modules/audio/audio.h b/src/modules/audio/audio.h index 8048f62a..674f9a15 100644 --- a/src/modules/audio/audio.h +++ b/src/modules/audio/audio.h @@ -1,8 +1,6 @@ #include #include #include -#include "data/soundData.h" -#include "core/arr.h" #pragma once @@ -15,21 +13,25 @@ typedef enum { AUDIO_CAPTURE } AudioType; -typedef struct { - const char *spatializer; - int spatializerMaxSourcesHint; -} SpatializerConfig; +typedef enum { + UNIT_SECONDS, + UNIT_FRAMES +} TimeUnit; typedef struct { - AudioType type; - const char *name; + size_t idSize; + const void* id; + const char* name; bool isDefault; } AudioDevice; -typedef arr_t(AudioDevice) AudioDeviceArr; +typedef void AudioDeviceCallback(AudioDevice* device, void* userdata); -bool lovrAudioInit(SpatializerConfig config); +bool lovrAudioInit(const char* spatializer); void lovrAudioDestroy(void); +const char* lovrAudioGetSpatializer(void); +void lovrAudioEnumerateDevices(AudioType type, AudioDeviceCallback* callback, void* userdata); +bool lovrAudioSetDevice(AudioType type, void* id, size_t size, uint32_t sampleRate, uint32_t format); bool lovrAudioStart(AudioType type); bool lovrAudioStop(AudioType type); bool lovrAudioIsStarted(AudioType type); @@ -38,17 +40,12 @@ void lovrAudioSetVolume(float volume); void lovrAudioGetPose(float position[4], float orientation[4]); void lovrAudioSetPose(float position[4], float orientation[4]); struct SoundData* lovrAudioGetCaptureStream(void); -AudioDeviceArr* lovrAudioGetDevices(AudioType type); -void lovrAudioFreeDevices(AudioDeviceArr* devices); -void lovrAudioUseDevice(AudioType type, const char* deviceName); -void lovrAudioSetCaptureFormat(SampleFormat format, uint32_t sampleRate); -const char* lovrAudioGetSpatializer(void); // Source Source* lovrSourceCreate(struct SoundData* soundData, bool spatial); void lovrSourceDestroy(void* ref); -void lovrSourcePlay(Source* source); +bool lovrSourcePlay(Source* source); void lovrSourcePause(Source* source); void lovrSourceStop(Source* source); bool lovrSourceIsPlaying(Source* source); diff --git a/src/modules/audio/spatializer.h b/src/modules/audio/spatializer.h index f5545e89..fa3c0700 100644 --- a/src/modules/audio/spatializer.h +++ b/src/modules/audio/spatializer.h @@ -5,15 +5,11 @@ typedef struct { int maxSourcesHint; int fixedBuffer; int sampleRate; -} SpatializerConfigIn; - -typedef struct { - bool needFixedBuffer; -} SpatializerConfigOut; +} SpatializerConfig; typedef struct { // return true on success - bool (*init)(SpatializerConfigIn configIn, SpatializerConfigOut *configOut); + bool (*init)(SpatializerConfig config); void (*destroy)(void); // input is mono, output is interleaved stereo, framesIn is mono frames, framesOut is stereo frames. @@ -30,10 +26,11 @@ typedef struct { void (*sourceCreate)(Source* source); void (*sourceDestroy)(Source* source); + bool buffered; const char* name; } Spatializer; -extern Spatializer dummySpatializer; +extern Spatializer simpleSpatializer; #ifdef LOVR_ENABLE_OCULUS_AUDIO extern Spatializer oculusSpatializer; #endif diff --git a/src/modules/audio/spatializers/oculus_spatializer.c b/src/modules/audio/spatializers/oculus_spatializer.c index acbba918..c7410196 100644 --- a/src/modules/audio/spatializers/oculus_spatializer.c +++ b/src/modules/audio/spatializers/oculus_spatializer.c @@ -28,11 +28,10 @@ struct { bool poseLockInited; } state; -static bool oculus_spatializer_init(SpatializerConfigIn configIn, SpatializerConfigOut* configOut) { +static bool oculus_spatializer_init(SpatializerConfig config) { // Initialize own state - state.sampleRate = configIn.sampleRate; - configOut->needFixedBuffer = true; - state.sourceMax = configIn.maxSourcesHint; + state.sampleRate = config.sampleRate; + state.sourceMax = config.maxSourcesHint; state.sources = calloc(state.sourceMax, sizeof(SourceRecord)); if (!state.poseLockInited) { @@ -42,14 +41,14 @@ static bool oculus_spatializer_init(SpatializerConfigIn configIn, SpatializerCon } // Initialize Oculus - ovrAudioContextConfiguration config = { 0 }; + ovrAudioContextConfiguration contextConfig = { 0 }; - config.acc_Size = sizeof( config ); - config.acc_MaxNumSources = state.sourceMax; - config.acc_SampleRate = state.sampleRate; - config.acc_BufferLength = configIn.fixedBuffer; // Stereo + contextConfig.acc_Size = sizeof(contextConfig); + contextConfig.acc_MaxNumSources = state.sourceMax; + contextConfig.acc_SampleRate = state.sampleRate; + contextConfig.acc_BufferLength = config.fixedBuffer; // Stereo - if (ovrAudio_CreateContext(&state.context, &config) != ovrSuccess) { + if (ovrAudio_CreateContext(&state.context, &contextConfig) != ovrSuccess) { return false; } @@ -226,12 +225,13 @@ static void oculus_spatializer_source_destroy(Source *source) { } Spatializer oculusSpatializer = { - oculus_spatializer_init, - oculus_spatializer_destroy, - oculus_spatializer_source_apply, - oculus_spatializer_tail, - oculus_spatializer_setListenerPose, - oculus_spatializer_source_create, - oculus_spatializer_source_destroy, // Need noop - "oculus" + .init = oculus_spatializer_init, + .destroy = oculus_spatializer_destroy, + .apply = oculus_spatializer_source_apply, + .tail = oculus_spatializer_tail, + .setListenerPose = oculus_spatializer_setListenerPose, + .sourceCreate = oculus_spatializer_source_create, + .sourceDestroy = oculus_spatializer_source_destroy, // Need noop + .buffered = true, + .name = "oculus" }; diff --git a/src/modules/audio/spatializers/dummy_spatializer.c b/src/modules/audio/spatializers/simple_spatializer.c similarity index 59% rename from src/modules/audio/spatializers/dummy_spatializer.c rename to src/modules/audio/spatializers/simple_spatializer.c index f1241260..7f652db3 100644 --- a/src/modules/audio/spatializers/dummy_spatializer.c +++ b/src/modules/audio/spatializers/simple_spatializer.c @@ -5,16 +5,16 @@ static struct { float listener[16]; } state; -bool dummy_spatializer_init(SpatializerConfigIn configIn, SpatializerConfigOut* configOut) { +bool simple_spatializer_init(SpatializerConfig config) { mat4_identity(state.listener); return true; } -void dummy_spatializer_destroy(void) { +void simple_spatializer_destroy(void) { // } -uint32_t dummy_spatializer_source_apply(Source* source, const float* input, float* output, uint32_t frames, uint32_t _frames) { +uint32_t simple_spatializer_source_apply(Source* source, const float* input, float* output, uint32_t frames, uint32_t _frames) { float sourcePos[4], sourceOrientation[4]; lovrSourceGetPose(source, sourcePos, sourceOrientation); @@ -40,31 +40,32 @@ uint32_t dummy_spatializer_source_apply(Source* source, const float* input, floa return frames; } -uint32_t dummy_spatializer_tail(float* scratch, float* output, uint32_t frames) { +uint32_t simple_spatializer_tail(float* scratch, float* output, uint32_t frames) { return 0; } -void dummy_spatializer_setListenerPose(float position[4], float orientation[4]) { +void simple_spatializer_setListenerPose(float position[4], float orientation[4]) { mat4_identity(state.listener); mat4_translate(state.listener, position[0], position[1], position[2]); mat4_rotateQuat(state.listener, orientation); } -void dummy_spatializer_source_create(Source* source) { +void simple_spatializer_source_create(Source* source) { // } -void dummy_spatializer_source_destroy(Source* source) { +void simple_spatializer_source_destroy(Source* source) { // } -Spatializer dummySpatializer = { - dummy_spatializer_init, - dummy_spatializer_destroy, - dummy_spatializer_source_apply, - dummy_spatializer_tail, - dummy_spatializer_setListenerPose, - dummy_spatializer_source_create, - dummy_spatializer_source_destroy, - "dummy" +Spatializer simpleSpatializer = { + .init = simple_spatializer_init, + .destroy = simple_spatializer_destroy, + .apply = simple_spatializer_source_apply, + .tail = simple_spatializer_tail, + .setListenerPose = simple_spatializer_setListenerPose, + .sourceCreate = simple_spatializer_source_create, + .sourceDestroy = simple_spatializer_source_destroy, + .buffered = false, + .name = "simple" }; diff --git a/src/modules/data/soundData.c b/src/modules/data/soundData.c index 03df46db..c4fa283b 100644 --- a/src/modules/data/soundData.c +++ b/src/modules/data/soundData.c @@ -13,7 +13,7 @@ static const ma_format miniAudioFormat[] = { [SAMPLE_F32] = ma_format_f32 }; -static const ma_format sampleSizes[] = { +static const size_t sampleSizes[] = { [SAMPLE_I16] = 2, [SAMPLE_F32] = 4 }; @@ -65,7 +65,7 @@ static uint32_t lovrSoundDataReadMp3(SoundData* soundData, uint32_t offset, uint */ static uint32_t lovrSoundDataReadRing(SoundData* soundData, uint32_t offset, uint32_t count, void* data) { - uint8_t* charData = (uint8_t*)data; + uint8_t* charData = (uint8_t*) data; size_t bytesPerFrame = SampleFormatBytesPerFrame(soundData->channels, soundData->format); size_t totalRead = 0; while (count > 0) { @@ -89,7 +89,6 @@ static uint32_t lovrSoundDataReadRing(SoundData* soundData, uint32_t offset, uin } SoundData* lovrSoundDataCreateRaw(uint32_t frameCount, uint32_t channelCount, uint32_t sampleRate, SampleFormat format, struct Blob* blob) { - lovrAssert(format != SAMPLE_INVALID, "Invalid format"); SoundData* soundData = lovrAlloc(SoundData); soundData->format = format; soundData->sampleRate = sampleRate; @@ -111,7 +110,6 @@ SoundData* lovrSoundDataCreateRaw(uint32_t frameCount, uint32_t channelCount, ui } SoundData* lovrSoundDataCreateStream(uint32_t bufferSizeInFrames, uint32_t channels, uint32_t sampleRate, SampleFormat format) { - lovrAssert(format != SAMPLE_INVALID, "Invalid format"); SoundData* soundData = lovrAlloc(SoundData); soundData->format = format; soundData->sampleRate = sampleRate; @@ -203,24 +201,24 @@ size_t lovrSoundDataStreamAppendSound(SoundData* soundData, SoundData* src) { return lovrSoundDataStreamAppendBlob(soundData, src->blob); } -void lovrSoundDataSetSample(SoundData* soundData, size_t index, float value) { +void lovrSoundDataSetSample(SoundData* soundData, uint32_t index, float value) { lovrAssert(soundData->blob && soundData->read == lovrSoundDataReadRaw, "Source SoundData must have static PCM data and not be a stream"); - lovrAssert(index < soundData->frames, "Sample index out of range"); + lovrAssert(index < soundData->frames * soundData->channels, "Sample index out of range"); switch (soundData->format) { case SAMPLE_I16: ((int16_t*) soundData->blob->data)[index] = value * SHRT_MAX; break; case SAMPLE_F32: ((float*) soundData->blob->data)[index] = value; break; - default: lovrThrow("Unsupported SoundData format %d\n", soundData->format); break; + default: lovrThrow("Unreachable"); break; } } -bool lovrSoundDataIsStream(SoundData *soundData) { - return soundData->read == lovrSoundDataReadRing; -} - -uint32_t lovrSoundDataGetDuration(SoundData *soundData) { +uint32_t lovrSoundDataGetFrameCount(SoundData *soundData) { if (lovrSoundDataIsStream(soundData)) { return ma_pcm_rb_available_read(soundData->ring); } else { return soundData->frames; } } + +bool lovrSoundDataIsStream(SoundData *soundData) { + return soundData->read == lovrSoundDataReadRing; +} diff --git a/src/modules/data/soundData.h b/src/modules/data/soundData.h index 948cbd6a..c7757717 100644 --- a/src/modules/data/soundData.h +++ b/src/modules/data/soundData.h @@ -11,15 +11,9 @@ typedef uint32_t SoundDataReader(struct SoundData* soundData, uint32_t offset, u typedef enum { SAMPLE_F32, - SAMPLE_I16, - SAMPLE_INVALID + SAMPLE_I16 } SampleFormat; -typedef enum { - UNIT_SECONDS, - UNIT_SAMPLES -} TimeUnit; - size_t SampleFormatBytesPerFrame(int channelCount, SampleFormat fmt); typedef struct SoundData { @@ -41,6 +35,6 @@ void lovrSoundDataDestroy(void* ref); size_t lovrSoundDataStreamAppendBuffer(SoundData* soundData, const void* buffer, size_t byteSize); size_t lovrSoundDataStreamAppendBlob(SoundData* soundData, struct Blob* blob); size_t lovrSoundDataStreamAppendSound(SoundData* soundData, SoundData *src); -void lovrSoundDataSetSample(SoundData* soundData, size_t index, float value); -uint32_t lovrSoundDataGetDuration(SoundData* soundData); +void lovrSoundDataSetSample(SoundData* soundData, uint32_t index, float value); +uint32_t lovrSoundDataGetFrameCount(SoundData* soundData); bool lovrSoundDataIsStream(SoundData* soundData); diff --git a/src/resources/boot.lua b/src/resources/boot.lua index 6193a8ce..ee6ac78a 100644 --- a/src/resources/boot.lua +++ b/src/resources/boot.lua @@ -173,8 +173,8 @@ function lovr.boot() end function lovr.permission(permission, granted) - if permission == "audiocapture" and granted then - lovr.audio.start("capture") + if permission == 'audiocapture' and granted then + lovr.audio.start('capture') end end @@ -197,7 +197,7 @@ function lovr.run() lovr.headset.update(dt) end if lovr.audio and lovr.headset then - lovr.audio.setListenerPose(lovr.headset.getPose()) + lovr.audio.setPose(lovr.headset.getPose()) end if lovr.update then lovr.update(dt) end if lovr.graphics then