#include "SynthVoice.h" #include //============================================================================== NeuralSynthVoice::NeuralSynthVoice(NeuralSharedParams& sp) : shared(sp) {} //============================================================================== void NeuralSynthVoice::prepare(const juce::dsp::ProcessSpec& spec) { setWaveform(0); tempBlock = juce::dsp::AudioBlock(heapBlock, spec.numChannels, spec.maximumBlockSize); processorChain.prepare(spec); adsr.setSampleRate(spec.sampleRate); this->spec = spec; } //============================================================================== void NeuralSynthVoice::renderNextBlock(juce::AudioBuffer& outputBuffer, int startSample, int numSamples) { if (numSamples <= 0) return; if (!adsr.isActive()) clearCurrentNote(); if (waveform != -1) { setWaveform(waveform); waveform = -1; } const int numChannels = outputBuffer.getNumChannels(); auto block = tempBlock.getSubBlock(0, (size_t)numSamples); block.clear(); // ===================================================================== // Oscillator // ===================================================================== auto& osc = processorChain.get(); juce::dsp::ProcessContextReplacing oscContext(block); osc.process(oscContext); // ===================================================================== // Distortion // ===================================================================== const float driveDb = shared.distortionDrive->load(); // 0..30 //const float distMix = juce::jlimit(0.0f, 1.0f, shared.distortionMix->load()); const float bias = juce::jlimit(-1.0f, 1.0f, shared.distortionBias->load()); const float toneHz = juce::jlimit(100.0f, 8000.0f, shared.distortionTone->load()); const int shape = (int)std::lround(juce::jlimit(0.0f, 2.0f, shared.distortionShape->load())); auto& distDry = processorChain.get(); auto& distWaveshaper = processorChain.template get(); if (shape == 0) { distWaveshaper.functionToUse = [bias](float x) noexcept { return std::tanh(x + bias); }; } else if (shape == 1) { distWaveshaper.functionToUse = [bias](float x) noexcept { const float v = x + bias; return juce::jlimit(-1.0f, 1.0f, v); }; } else if (shape == 2) { distWaveshaper.functionToUse = [bias](float x) noexcept { const float v = x + bias; return (float)(std::atan(v) * (2.0 / juce::MathConstants::pi)); }; } auto& distPreGain = processorChain.template get(); // [5] distPreGain.setGainDecibels(driveDb); // [6] auto& distPostLPF = processorChain.template get(); distPostLPF.coefficients = *juce::dsp::IIR::Coefficients::makePeakFilter( spec.sampleRate, toneHz, // cutoff 0.707f, // Q juce::Decibels::decibelsToGain(shared.highGainDbls->load()) ); // ===================================================================== // Flanger // ===================================================================== // Get pointer to writable data auto flanger = processorChain.get(); auto rate = shared.flangerPhase->load(); auto lfoPhase = shared.flangerPhase->load(); auto flangerDepth = shared.flangerDepth->load(); auto mix = shared.flangerDryMix->load(); auto feedback = shared.flangerFeedback->load(); // Step 2: Apply flanger sample-by-sample to the block auto* raw = block.getChannelPointer(0); for (int i = 0; i < numSamples; ++i) { float in = raw[i]; float lfo = std::sin(lfoPhase); float delayTime = (1.0f + lfo) * 0.5f * flangerDepth * spec.sampleRate; flanger.setDelay(delayTime); float delayed = flanger.popSample(0); flanger.pushSample(0, in + delayed * feedback); raw[i] = in * (1.0f - mix) + delayed * mix; lfoPhase += juce::MathConstants::twoPi * rate / spec.sampleRate; if (lfoPhase > juce::MathConstants::twoPi) lfoPhase -= juce::MathConstants::twoPi; } // Step 3: Run through ProcessorChain (filter + distortion) juce::dsp::ProcessContextReplacing fxContext(block); processorChain.process(fxContext); auto& master = processorChain.get(); const auto ex = shared.masterDbls->load(); master.setGainDecibels(shared.masterDbls->load()); auto& lowEQ = processorChain.get(); lowEQ.coefficients = juce::dsp::IIR::Coefficients::makeLowShelf( spec.sampleRate, 100.0f, // cutoff 0.707f, // Q, not used by all filters juce::Decibels::decibelsToGain(shared.lowGainDbls->load()) ); auto& midEQ = processorChain.get(); midEQ.coefficients = *juce::dsp::IIR::Coefficients::makePeakFilter( spec.sampleRate, 1000.0f, // center frequency 1.0f, // Q juce::Decibels::decibelsToGain(shared.midGainDbls->load()) ); // HIGH SHELF auto& highEQ = processorChain.get(); highEQ.coefficients = *juce::dsp::IIR::Coefficients::makePeakFilter( spec.sampleRate, 10000.0f, // cutoff 0.707f, // Q juce::Decibels::decibelsToGain(shared.highGainDbls->load()) ); // 3. Apply ADSR envelope to tempBlock std::vector channelPtrs; for (size_t ch = 0; ch < tempBlock.getNumChannels(); ++ch) channelPtrs.push_back(tempBlock.getChannelPointer(ch)); juce::AudioBuffer buffer(channelPtrs.data(), static_cast(tempBlock.getNumChannels()), static_cast(tempBlock.getNumSamples())); adsr.applyEnvelopeToBuffer(buffer, 0, numSamples); juce::dsp::AudioBlock(outputBuffer) .getSubBlock((size_t)startSample, (size_t)numSamples) .add(tempBlock); } //============================================================================== void NeuralSynthVoice::noteStarted() { auto velocity = getCurrentlyPlayingNote().noteOnVelocity.asUnsignedFloat(); auto freqHz = (float)getCurrentlyPlayingNote().getFrequencyInHertz(); processorChain.get().setFrequency(freqHz, true); auto& chorus = processorChain.get(); chorus.setCentreDelay(shared.chorusCentre->load()); chorus.setDepth(shared.chorusDepth->load()); chorus.setFeedback(shared.chorusFeedback->load()); chorus.setMix(shared.chorusMix->load()); chorus.setRate(shared.chorusRate->load()); processorChain.get().setDelay(shared.delayTime->load()); juce::Reverb::Parameters rp; rp.damping = shared.reverbDamping->load(); rp.dryLevel = shared.reverbDryLevel->load(); rp.freezeMode = shared.reverbFreezeMode->load(); rp.roomSize = shared.reverbRoomSize->load(); rp.wetLevel = shared.reverbWetLevel->load(); rp.width = shared.reverbWidth->load(); processorChain.get().setParameters(rp); juce::ADSR::Parameters p; p.attack = shared.adsrAttack->load(); p.decay = shared.adsrDecay->load(); p.sustain = shared.adsrSustain->load(); p.release = shared.adsrRelease->load(); adsr.setParameters(p); adsr.noteOn(); } //============================================================================== void NeuralSynthVoice::notePitchbendChanged() { auto freqHz = (float)getCurrentlyPlayingNote().getFrequencyInHertz(); processorChain.get().setFrequency(freqHz, true); } //============================================================================== void NeuralSynthVoice::noteStopped(bool allowTailOff) { adsr.noteOff(); //Triggers release phase } //============================================================================== void NeuralSynthVoice::notePressureChanged() {} void NeuralSynthVoice::noteTimbreChanged() {} void NeuralSynthVoice::noteKeyStateChanged() {} void NeuralSynthVoice::setWaveform(int waveformType) { auto& osc = processorChain.template get(); switch (waveformType) { case 0: osc.initialise([](float x) { return std::sin(x); }); break; case 1: osc.initialise([](float x) { return x / juce::MathConstants::pi; }); // Saw break; case 2: osc.initialise([](float x) { return x < 0.0f ? -1.0f : 1.0f; }); // Square break; case 3: osc.initialise([](float x) { return 2.0f * std::abs(2.0f * (x / juce::MathConstants::twoPi) - 1.0f) - 1.0f; }); // Triangle break; } }