From 7a3d74eb030f336f0d58152cb70602a2ab65a6d0 Mon Sep 17 00:00:00 2001 From: Florent Berthaut <florent.berthaut@univ-lille.fr> Date: Sun, 19 Mar 2023 22:45:33 +0100 Subject: [PATCH] Switched to longer render buffers --- src/audio/AudioManager.cpp | 144 ++++++++++++++++++-------------- src/audio/AudioManager.hpp | 66 ++++++++------- src/modules/ProjectorModule.cpp | 28 +++---- src/shaders/render43.fs | 75 +++++++++-------- 4 files changed, 171 insertions(+), 142 deletions(-) diff --git a/src/audio/AudioManager.cpp b/src/audio/AudioManager.cpp index 3850042..90aec71 100644 --- a/src/audio/AudioManager.cpp +++ b/src/audio/AudioManager.cpp @@ -24,8 +24,7 @@ void* audioThreadFunction(void* pvParam) { AudioManager *pThis=(AudioManager*)pvParam; - RtAudio *audio = pThis->audio; - audio->startStream(); + pThis->startStream(); return 0; } @@ -41,115 +40,134 @@ int AudioManager::getRate(){ } int AudioManager::getBufferSize(){ - return buf; + return m_bufSize; } int AudioManager::getMixSize(){ return data.mixSize; } -static int rtaudio_callback(void *outbuf, void *inbuf, unsigned int nFrames, double streamtime, RtAudioStreamStatus status, void *userdata){ +int AudioManager::rtaudio_callback(void *outbuf, void *inbuf, unsigned int nFrames, + double streamtime, RtAudioStreamStatus status, void *userdata) { (void)inbuf; float *buf = (float*)outbuf; - CallbackData *data = (CallbackData*) userdata; - unsigned int i = 0; - int j = 0; - int audioIndex = 0; + AudioManager::CallbackData *data = (AudioManager::CallbackData*) userdata; - //check if there is audio value to play - if(ringbuffer_read_space(data->audioBuf) > 2*(nFrames + data->mixSize)){ + //if we've reached the first buffer again, retrieve values from the ring + if(data->bufCounter==0) { + if(ringbuffer_read_space(data->ringBuffer) >= data->bufSize) { - //read the audio values - ringbuffer_read(data->audioBuf, data->audio, nFrames + data->mixSize); - - //put the values to play in the audio buffer - while(i < nFrames * data->nChannel) { - if(j < data->mixSize){ - buf[i] = data->audio[j] + data->mix[j]; - buf[i+1] = data->audio[j] + data->mix[j]; - - } - else{ - buf[i] = data->audio[j]; - buf[i+1] = data->audio[j]; + //read all the audio values + ringbuffer_read(data->ringBuffer, data->buffer, data->bufSize); + + //mix the first buffer with the last of the previous + float step = 1.0/float(nFrames); + float cnt = 0.0; + for(int i=0; i < nFrames; ++i) { + buf[i*2] = data->buffer[i]*cnt + data->mixBuffer[i]*(1.0-cnt); + //buf[i*2] = data->mixBuffer[i]*(1.0-cnt); + //buf[i*2] = data->buffer[i]*cnt; + buf[i*2+1] = buf[i*2]; + cnt+=step; } - audioIndex += 1; - j += 1; - i += 2; + + //save the last buffer for crossfading + memcpy(&data->mixBuffer[0], + data->buffer+(data->bufSize-data->mixSize), + data->mixSize*sizeof(float)); + } + else { + memset(buf, 0, nFrames*data->nChannel*sizeof(float)); + cout<<"AudioManager : Could not read buffer"<<endl; } - //save the value for the next crossfading - memcpy(&data->mix[0], data->audio + nFrames, data->mixSize*sizeof(float)); - } + } else { - memset(buf, 0, nFrames*data->nChannel*sizeof(float)); - cout<<"AudioManager : Could not read buffer"<<endl; + //read the next buffer + for(int i=0; i < nFrames; ++i) { + buf[i*2] = data->buffer[data->bufCounter*nFrames+i]; + buf[i*2+1] = buf[i*2]; + } } + + //move to the next buffer + data->bufCounter=(data->bufCounter+1)%(data->nbBufs-1); + return 0; } -AudioManager::AudioManager(){ - buf = 4096; - audio = new RtAudio(RtAudio::LINUX_PULSE); +AudioManager::AudioManager() { + //audio = new RtAudio(RtAudio::LINUX_PULSE); + m_rtAudio = new RtAudio(RtAudio::UNIX_JACK); param = new RtAudio::StreamParameters(); - param->deviceId = audio->getDefaultOutputDevice(); + param->deviceId = m_rtAudio->getDefaultOutputDevice(); param->nChannels = 2; options = new RtAudio::StreamOptions(); options->streamName = "Rivill"; + data.nRate = m_rtAudio->getDeviceInfo(param->deviceId).preferredSampleRate; - audio->openStream(param, NULL, RTAUDIO_FLOAT32, 44100, &buf, - rtaudio_callback, &data, options); + m_rtBufSize = 1024; + + m_rtAudio->openStream(param, NULL, RTAUDIO_FLOAT32, data.nRate, + &m_rtBufSize, + AudioManager::rtaudio_callback, &data, options); + + int nbBufs = 5; + m_bufSize = m_rtBufSize*nbBufs; + + data.ringBuffer = ringbuffer_create(m_bufSize*3); - data.mixSize = 1000; - data.nRate = audio->getDeviceInfo(param->deviceId).preferredSampleRate; data.nChannel = param->nChannels; - data.ratioSampleFreq = data.nRate/440.0; - data.ratioFreqSample = 1.0/data.ratioSampleFreq; - size_t size = 10 * buf; - data.audioBuf = ringbuffer_create(size); - - data.audio = new float[buf + data.mixSize]; - data.mix = new float[data.mixSize]; - memset(&data.mix[0], 0, data.mixSize*sizeof(float)); - audioValue = new float[buf + data.mixSize]; - m_thread = new std::thread(audioThreadFunction, this); + data.bufSize = m_bufSize; + data.buffer = new float[m_bufSize]; + data.nbBufs = nbBufs; + data.bufCounter = 0; + memset(&data.buffer[0], 0, data.bufSize*sizeof(float)); + data.mixSize = m_rtBufSize; + data.mixBuffer = new float[data.mixSize]; + memset(&data.mixBuffer[0], 0, data.mixSize*sizeof(float)); + m_thread = new std::thread(audioThreadFunction, this); } -void AudioManager::changeBuf(float* outputBuf, float maxSinValue){ +void AudioManager::changeBuf(float* outputBuf, float maxSinValue) { //check if there is space to write the new values - //if not, move write_ptr of both ringbuffer size index back - size_t write_space = ringbuffer_write_space(data.audioBuf); - if(write_space >= (buf+data.mixSize)){//!= 0){ + size_t write_space = ringbuffer_write_space(data.ringBuffer); + if(write_space >= m_bufSize) { + + + //write all data to the ringbuffer + ringbuffer_write(data.ringBuffer, outputBuf, data.bufSize); + + /* float phase = 0; float pas = 1.0/float(data.mixSize); //fill the audioValue array with the audio value - for(int i=0; i < int(buf); i++){ - if(i < data.mixSize){ + for(int i=0; i < int(data.nFrames); i++) { + if(i < data.mixSize) { audioValue[i] = outputBuf[i]*phase; phase += pas; } - else{ + else { audioValue[i] = outputBuf[i]; } - } - //pas = 1/data.mixSize; //fill the audioValue array with the values for the next crossfading - for(int i=0; i < data.mixSize; i++){ - audioValue[buf + i] = outputBuf[buf + i]*phase; - phase -= pas; + for(int i=0; i<data.mixSize; i++) { + audioValue[data.nFrames+i] = outputBuf[data.nFrames+i]*phase; + phase-=pas; } constAudioValue = const_cast<const float *>(audioValue); - ringbuffer_write(data.audioBuf, constAudioValue, buf + data.mixSize); + ringbuffer_write(data.audioBuf, constAudioValue, data.nFrames + data.mixSize); + */ } else { - cout<<"AudioManager : Could not write buffer"<<endl; + //cout<<"AudioManager : Could not write buffer"<<endl; } } diff --git a/src/audio/AudioManager.hpp b/src/audio/AudioManager.hpp index 00a5790..b25e8e9 100644 --- a/src/audio/AudioManager.hpp +++ b/src/audio/AudioManager.hpp @@ -20,6 +20,9 @@ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ +#ifndef AudioManager_h +#define AudioManager_h + #include <rtaudio/RtAudio.h> #include <cstring> #include <math.h> @@ -30,43 +33,37 @@ #include <iostream> #include <fstream> -using namespace std; #include <sys/time.h> #include "Ringbuffer.hpp" +using namespace std; -#ifndef AudioManager_h -#define AudioManager_h //#define POWER32 pow(2,31) -static std::mutex mtx_audio; - -typedef struct { - unsigned int nRate; /* Sampling Rate (sample/sec) */ - unsigned int nChannel; /* Channel Number */ - float ratioSampleFreq; /* Ratio between the sample frequence and the signal frequency */ - float ratioFreqSample; /* Ratio between the signal frequency and the sample frequence */ - int mixSize; /* the number of values to crossfade */ - ringbuffer *mixBuf; /* ringbuffer with the value for the crossfading */ - ringbuffer *audioBuf; /* ringbuffer with the value for audio render */ - float *audio; /* buffer to save the current audio value */ - float *mix; /* buffer to save the mix value */ -} CallbackData; - -static int rtaudio_callback(void *outbuf, void *inbuf, unsigned int nFrames, double streamtime, RtAudioStreamStatus status, void *userdata); class AudioManager { public: - RtAudio *audio; - RtAudio::StreamParameters *param; - unsigned int buf; //the size of the audio buffer - CallbackData data; //the audio data structure - pthread_t t; - - public: + struct CallbackData { + unsigned int nRate; + unsigned int nFrames; + unsigned int nChannel; + float ratioSampleFreq; + float ratioFreqSample; + ringbuffer *ringBuffer; + + float *buffer; + int bufSize; + + float *mixBuffer; + int mixSize; + + int bufCounter; + int nbBufs; + }; + static AudioManager* getInstance(); ~AudioManager(); void changeBuf(float* outputBuf, float maxSinValue); @@ -74,14 +71,27 @@ class AudioManager { int getBufferSize(); int getMixSize(); + inline void startStream(){m_rtAudio->startStream();} + private: std::thread* m_thread; AudioManager(); - float * audioValue; //array to save the audio value before writing - const float *constAudioValue; //array to write the audio value in the ringbuffer + float * audioValue; + const float *constAudioValue; RtAudio::StreamOptions *options; - + RtAudio *m_rtAudio; + RtAudio::StreamParameters *param; + unsigned int m_rtBufSize; + unsigned int m_bufSize; + CallbackData data; + + std::mutex mtx_audio; + + + static int rtaudio_callback(void *outbuf, void *inbuf, + unsigned int nFrames, double streamtime, + RtAudioStreamStatus status, void *userdata); }; diff --git a/src/modules/ProjectorModule.cpp b/src/modules/ProjectorModule.cpp index 610155c..0d92042 100644 --- a/src/modules/ProjectorModule.cpp +++ b/src/modules/ProjectorModule.cpp @@ -191,11 +191,11 @@ ProjectorModule::ProjectorModule(): Module() { //audio - m_nbTracks=10; + m_nbTracks=50; m_maxAudioValue = (std::numeric_limits<int>::max())/(m_width * m_height); m_audioMixSize = AudioManager::getInstance()->getMixSize(); - m_audioBufSize = AudioManager::getInstance()->getBufferSize() - + m_audioMixSize; + m_audioBufSize = AudioManager::getInstance()->getBufferSize(); + // + m_audioMixSize; m_imgBufSize = m_audioBufSize+3; m_audioBuffer = new float[m_audioBufSize]; memset(&m_audioBuffer[0], 0, m_audioBufSize*sizeof(float)); @@ -356,10 +356,9 @@ void ProjectorModule::draw() { m_audioBufSize); glUniform1i(m_uniforms[Reveal::RENDERPROG][Reveal::AUDIOMIXSIZE], m_audioMixSize); - - - #endif + + //Draw everything for(auto& geom : sceneGeoms) { geom->draw(m_contextHandlerID, Reveal::RENDERPROG, @@ -1025,20 +1024,21 @@ void ProjectorModule::processAudio() { //compute the new phase for the note i //m_phase[i] = (m_phase[i] + bufSize - 2 - mixSize) % m_outputAudio[ (bufSize*i) + 1]; //convert the audio value from int to float and save it in outputAudio - for(int j=0; j<m_audioBufSize; j++){ - m_audioBuffer[j] += trackGain * m_audioImg[(m_imgBufSize*i)+3+j] - / m_maxAudioValue; + for(int j=0; j<m_audioBufSize; j++) { + m_audioBuffer[j] += trackGain + * (float(m_audioImg[(m_imgBufSize*i)+3+j]) + / m_maxAudioValue); } - cout<<"read phase "<<i<<" "<<m_audioImg[m_imgBufSize*i+1]<<endl; - cout<<"write phase "<<i<<" "<<m_audioImg[m_imgBufSize*i+2]<<endl; - cout<<"nb frags = "<<m_audioImg[m_imgBufSize*i]<<endl; + // cout<<"read phase "<<i<<" " + // <<float(m_audioImg[m_imgBufSize*i+1])/m_maxAudioValue<<endl; + // cout<<"nb frags = "<<m_audioImg[m_imgBufSize*i]<<endl; m_audioImgInit[2*i+1] = m_audioImg[m_imgBufSize*i+2]; } } - float gain = 0.8; - if(totalGains > 0){ + float gain = 0.4; + if(totalGains > 0) { for(int i=0; i<m_audioBufSize; i++) { m_audioBuffer[i] = gain*m_audioBuffer[i]/totalGains; } diff --git a/src/shaders/render43.fs b/src/shaders/render43.fs index 2127e76..80a2801 100644 --- a/src/shaders/render43.fs +++ b/src/shaders/render43.fs @@ -110,12 +110,12 @@ out vec4 color; void additiveSynth(vec4 color) { float freq=0.0; - //float note = 20 + color.x * 50;//color.x * 127; - float note = 20.0; + float note = 20 + color.x * 80;//color.x * 127; + //float note = 69.0; int ind=0; int octave = int(note/12); - float modNote = int(note) % 12; + float modNote = mod(note, 12.0); if(modNote >= 8.5){ note = octave * 12 + 10; @@ -134,43 +134,44 @@ void additiveSynth(vec4 color) { ind = 4 * octave; } + //count this fragment + imageAtomicAdd(audioTex, ivec2(0, ind), 1); - freq = float(440 * pow(2, (note-69)/12)); - - - //retrieve phase stored from previous frame - float phase = float(imageLoad(audioTex, ivec2(1, ind)).x) - / maxAudioValue; - float samplesPerPeriod = audioRate/freq; - float phaseStep = 1.0/samplesPerPeriod; - for(int i=0; i<audioBufSize; i++) { - //compute value - float s = sin(phase * 2.0 * M_PI * freq); + //if the audio has not been generated by another fragment + if(imageLoad(audioTex, ivec2(0, ind)).x<=1) { + //compute frequency + freq = float(440.0 * pow(2.0, (note-69.0)/12.0)); + //retrieve phase stored from previous frame + float phase = float(imageLoad(audioTex, ivec2(1, ind)).x) + / maxAudioValue; + float outputPhase = phase; + float samplesPerPeriod = audioRate/freq; + float phaseStep = 1.0/samplesPerPeriod; + for(int i=0; i<audioBufSize; i++) { + //compute value + float s = sin(phase * 2.0 * M_PI); + + //write starting from 3rd column + imageStore(audioTex, ivec2(i+3, ind), ivec4(int(s*maxAudioValue))); + phase=mod(phase + phaseStep, 1.0); + } - //write starting from 3rd column - imageStore(audioTex, ivec2(i+3, ind), ivec4(int(s*maxAudioValue))); - phase=mod(phase + phaseStep, 1.0); + /* + float a = 2.0*sin(freq/float(audioRate)); + float s[2]; + s[0] = 1.0f; + s[1] = 0.0f; + for(int i=0; i<audioBufSize; i++) { + imageStore(audioTex, ivec2(i+3, ind), ivec4(int(s[1]*maxAudioValue))); + s[0] = s[0] - a*s[1]; + s[1] = s[1] + a*s[0]; + }*/ + + //remove audioMixSize steps and write phase to output + outputPhase = mod(outputPhase+phaseStep*(audioBufSize-audioMixSize), 1.0); + imageStore(audioTex, ivec2(2, ind), + ivec4(int(outputPhase*maxAudioValue))); } - - /* - float a = 2.0*sin(phase*freq/float(audioRate)); - float s[2]; - s[0] = 1.0f; - s[1] = 0.0f; - int i = 2; - while (i < audioBufSize) { - imageStore(audioTex, ivec2(i, ind), ivec4(int(s[1]*maxAudioValue))); - s[0] = s[0] - a*s[1]; - s[1] = s[1] + a*s[0]; - i += 1; - }*/ - - //increase phase by (audioBufSize-audioMixSize) steps - imageStore(audioTex, ivec2(2, ind), - ivec4(int(phase*maxAudioValue))); - - //count this fragment - imageAtomicAdd(audioTex, ivec2(0, ind), 1); } void main(void) { -- GitLab