Files
NeuralSynth/Source/SynthVoice.cpp
2025-10-25 17:57:05 +00:00

510 lines
22 KiB
C++
Raw Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

#include "SynthVoice.h"
#include <cmath>
//==============================================================================
NeuralSynthVoice::NeuralSynthVoice (NeuralSharedParams& sp)
: shared (sp) {}
//==============================================================================
void NeuralSynthVoice::prepare (const juce::dsp::ProcessSpec& newSpec)
{
spec = newSpec;
// --- Oscillator
osc.prepare (spec.sampleRate);
osc.setWave (BlepWave::Sine);
// --- Wavetable oscillator factory banks ---
wtOsc.prepare (spec.sampleRate);
morphLfo.prepare (spec.sampleRate);
currentWtBankIndex = -1;
wtOsc2.prepare (spec.sampleRate);
morphLfo2.prepare (spec.sampleRate);
currentWtBankIndex2 = -1;
const auto& library = WT::FactoryLibrary::get();
if (! library.empty())
{
wtOsc.setBank (library.front().bank);
currentWtBankIndex = 0;
wtOsc2.setBank (library.front().bank);
currentWtBankIndex2 = 0;
}
// --- Scratch buffer (IMPORTANT: allocate real memory)
tempBuffer.setSize ((int) spec.numChannels, (int) spec.maximumBlockSize,
false, false, true);
tempBlock = juce::dsp::AudioBlock<float> (tempBuffer);
// --- Prepare chain elements
chain.prepare (spec);
chain.get<masterIndex>().setRampDurationSeconds (0.02f);
chain.get<limiterIndex>().setThreshold (-1.0f);
chain.get<limiterIndex>().setRelease (0.05f);
chain.get<limiterIndex>().reset();
// Set maximum delay sizes BEFORE runtime changes
{
// Flanger: up to 20 ms
auto& flanger = chain.get<flangerIndex>();
const size_t maxFlangerDelay = (size_t) juce::jmax<size_t>(
1, (size_t) std::ceil (0.020 * spec.sampleRate));
flanger.setMaximumDelayInSamples (maxFlangerDelay);
flanger.reset();
}
{
// Simple delay: up to 2 s
auto& delay = chain.get<delayIndex>();
const size_t maxDelay = (size_t) juce::jmax<size_t>(
1, (size_t) std::ceil (2.0 * spec.sampleRate));
delay.setMaximumDelayInSamples (maxDelay);
delay.reset();
}
// Envelopes
adsr.setSampleRate (spec.sampleRate);
filterAdsr.setSampleRate (spec.sampleRate);
// Filter
svf.reset();
svf.prepare (spec);
// Initial filter type
const int type = (int) std::lround (juce::jlimit (0.0f, 2.0f,
shared.filterType ? shared.filterType->load() : 0.0f));
switch (type)
{
case 0: svf.setType (juce::dsp::StateVariableTPTFilterType::lowpass); break;
case 1: svf.setType (juce::dsp::StateVariableTPTFilterType::highpass); break;
case 2: svf.setType (juce::dsp::StateVariableTPTFilterType::bandpass); break;
default: break;
}
}
//==============================================================================
void NeuralSynthVoice::renderNextBlock (juce::AudioBuffer<float>& outputBuffer,
int startSample, int numSamples)
{
if (numSamples <= 0)
return;
if (! adsr.isActive())
clearCurrentNote();
// --- Generate oscillator into temp buffer (BLEP or Wavetable)
tempBuffer.clear();
const int numCh = juce::jmin ((int) spec.numChannels, tempBuffer.getNumChannels());
const auto& library = WT::FactoryLibrary::get();
const int librarySize = (int) library.size();
if (librarySize > 0 && shared.wtBank)
{
const int targetBank = juce::jlimit (0, librarySize - 1,
(int) std::lround (shared.wtBank->load()));
if (targetBank != currentWtBankIndex)
{
wtOsc.setBank (library[(size_t) targetBank].bank);
currentWtBankIndex = targetBank;
}
}
if (librarySize > 0 && shared.wt2Bank)
{
const int targetBank2 = juce::jlimit (0, librarySize - 1,
(int) std::lround (shared.wt2Bank->load()));
if (targetBank2 != currentWtBankIndex2)
{
wtOsc2.setBank (library[(size_t) targetBank2].bank);
currentWtBankIndex2 = targetBank2;
}
}
const bool useWTLayerA = (shared.wtOn && shared.wtOn->load() > 0.5f)
&& wtOsc.getFrameCount() > 0;
const bool useWTLayerB = (shared.wt2On && shared.wt2On->load() > 0.5f)
&& wtOsc2.getFrameCount() > 0;
const float morphMaxA = wtOsc.getMaxMorph();
const float morphBaseA = shared.wtMorph
? juce::jlimit (0.0f, morphMaxA, shared.wtMorph->load())
: 0.0f;
const float lfoDepthA = shared.wtLfoDepth ? shared.wtLfoDepth->load() : 0.0f;
const float lfoRateA = shared.wtLfoRate ? shared.wtLfoRate->load() : 1.0f;
const int lfoShapeA = shared.wtLfoShape ? (int) std::lround (shared.wtLfoShape->load()) : 0;
morphLfo.setRate (lfoRateA);
morphLfo.setShape (lfoShapeA);
const float depthFramesA = juce::jlimit (0.0f, morphMaxA, lfoDepthA);
const float morphMaxB = wtOsc2.getMaxMorph();
const float morphBaseB = shared.wt2Morph
? juce::jlimit (0.0f, morphMaxB, shared.wt2Morph->load())
: 0.0f;
const float lfoDepthB = shared.wt2LfoDepth ? shared.wt2LfoDepth->load() : 0.0f;
const float lfoRateB = shared.wt2LfoRate ? shared.wt2LfoRate->load() : 0.3f;
const int lfoShapeB = shared.wt2LfoShape ? (int) std::lround (shared.wt2LfoShape->load()) : 0;
morphLfo2.setRate (lfoRateB);
morphLfo2.setShape (lfoShapeB);
const float depthFramesB = juce::jlimit (0.0f, morphMaxB, lfoDepthB);
const float levelA = shared.wtLevel ? juce::jlimit (0.0f, 1.0f, shared.wtLevel->load()) : 0.0f;
const float levelB = shared.wt2Level ? juce::jlimit (0.0f, 1.0f, shared.wt2Level->load()) : 0.0f;
const float safeLevelSum = juce::jlimit (0.5f, 2.0f, levelA + levelB + 0.0001f);
const float mixGain = 0.45f / safeLevelSum;
for (int i = 0; i < numSamples; ++i)
{
float sampleA = useWTLayerA ? 0.0f : osc.process();
if (useWTLayerA)
{
const float lfoValueA = morphLfo.process();
const float headroomNegA = juce::jmin (depthFramesA, morphBaseA);
const float headroomPosA = juce::jmin (depthFramesA, morphMaxA - morphBaseA);
const float offsetA = (lfoValueA >= 0.0f ? lfoValueA * headroomPosA
: lfoValueA * headroomNegA);
const float morphValueA = juce::jlimit (0.0f, morphMaxA, morphBaseA + offsetA);
sampleA = wtOsc.process (morphValueA);
}
else
{
morphLfo.process(); // advance for consistency
}
float sampleB = 0.0f;
if (useWTLayerB)
{
const float lfoValueB = morphLfo2.process();
const float headroomNegB = juce::jmin (depthFramesB, morphBaseB);
const float headroomPosB = juce::jmin (depthFramesB, morphMaxB - morphBaseB);
const float offsetB = (lfoValueB >= 0.0f ? lfoValueB * headroomPosB
: lfoValueB * headroomNegB);
const float morphValueB = juce::jlimit (0.0f, morphMaxB, morphBaseB + offsetB);
sampleB = wtOsc2.process (morphValueB);
}
else
{
morphLfo2.process();
}
const float combined = mixGain * ((sampleA * levelA) + (sampleB * levelB));
for (int ch = 0; ch < numCh; ++ch)
tempBuffer.getWritePointer (ch)[i] = combined;
}
auto block = tempBlock.getSubBlock (0, (size_t) numSamples);
// ================================================================
// Flanger (pre-filter) manual per-sample to set varying delay
// ================================================================
{
auto& flanger = chain.get<flangerIndex>();
const bool enabled = shared.flangerOn && shared.flangerOn->load() > 0.5f;
if (enabled)
{
const float rate = shared.flangerRate ? shared.flangerRate->load() : 0.0f;
float lfoPhase = shared.flangerPhase ? shared.flangerPhase->load() : 0.0f;
const float flangerDepth = shared.flangerDepth ? shared.flangerDepth->load() : 0.0f; // ms
const float mix = shared.flangerDryMix ? shared.flangerDryMix->load() : 0.0f;
const float feedback = shared.flangerFeedback ? shared.flangerFeedback->load() : 0.0f;
const float baseDelayMs = shared.flangerDelay ? shared.flangerDelay->load() : 0.25f;
for (int i = 0; i < numSamples; ++i)
{
const float in = tempBuffer.getReadPointer (0)[i];
const float lfo = std::sin (lfoPhase);
const float delayMs = baseDelayMs + 0.5f * (1.0f + lfo) * flangerDepth;
const float delaySamples = juce::jmax (0.0f, delayMs * 0.001f * (float) spec.sampleRate);
flanger.setDelay (delaySamples);
const float delayed = flanger.popSample (0);
flanger.pushSample (0, in + delayed * feedback);
const float out = in * (1.0f - mix) + delayed * mix;
for (int ch = 0; ch < numCh; ++ch)
tempBuffer.getWritePointer (ch)[i] = out;
lfoPhase += juce::MathConstants<float>::twoPi * rate / (float) spec.sampleRate;
if (lfoPhase > juce::MathConstants<float>::twoPi)
lfoPhase -= juce::MathConstants<float>::twoPi;
}
}
}
// ================================================================
// Filter with per-sample ADSR modulation (poly)
// ================================================================
{
const bool enabled = shared.filterOn && shared.filterOn->load() > 0.5f;
// Update filter type every block (cheap)
const int ftype = (int) std::lround (juce::jlimit (0.0f, 2.0f,
shared.filterType ? shared.filterType->load() : 0.0f));
switch (ftype)
{
case 0: svf.setType (juce::dsp::StateVariableTPTFilterType::lowpass); break;
case 1: svf.setType (juce::dsp::StateVariableTPTFilterType::highpass); break;
case 2: svf.setType (juce::dsp::StateVariableTPTFilterType::bandpass); break;
default: break;
}
const float qOrRes = juce::jlimit (0.1f, 10.0f,
shared.filterResonance ? shared.filterResonance->load() : 0.7f);
svf.setResonance (qOrRes);
const float baseCutoff = juce::jlimit (20.0f, 20000.0f,
shared.filterCutoff ? shared.filterCutoff->load() : 1000.0f);
const float envAmt = shared.fenvAmount ? shared.fenvAmount->load() : 0.0f;
for (int i = 0; i < numSamples; ++i)
{
const float envVal = filterAdsr.getNextSample();
const float cutoff = juce::jlimit (20.0f, 20000.0f,
baseCutoff * std::pow (2.0f, envAmt * envVal));
svf.setCutoffFrequency (cutoff);
if (enabled)
{
for (int ch = 0; ch < numCh; ++ch)
{
float x = tempBuffer.getSample (ch, i);
x = svf.processSample (ch, x);
tempBuffer.setSample (ch, i, x);
}
}
}
}
// ================================================================
// Chorus
// ================================================================
if (shared.chorusOn && shared.chorusOn->load() > 0.5f)
{
auto& chorus = chain.get<chorusIndex>();
if (shared.chorusCentre) chorus.setCentreDelay (shared.chorusCentre->load());
if (shared.chorusDepth) chorus.setDepth (shared.chorusDepth->load());
if (shared.chorusFeedback) chorus.setFeedback (shared.chorusFeedback->load());
if (shared.chorusMix) chorus.setMix (shared.chorusMix->load());
if (shared.chorusRate) chorus.setRate (shared.chorusRate->load());
chain.get<chorusIndex>().process (juce::dsp::ProcessContextReplacing<float> (block));
}
// ================================================================
// Simple Delay (per-voice)
// ================================================================
if (shared.delayOn && shared.delayOn->load() > 0.5f)
{
auto& delay = chain.get<delayIndex>();
const float time = shared.delayTime ? shared.delayTime->load() : 0.1f;
delay.setDelay (juce::jmax (0.0f, time * (float) spec.sampleRate));
delay.process (juce::dsp::ProcessContextReplacing<float> (block));
}
// ================================================================
// Reverb
// ================================================================
if (shared.reverbOn && shared.reverbOn->load() > 0.5f)
{
juce::Reverb::Parameters rp;
rp.damping = shared.reverbDamping ? shared.reverbDamping->load() : 0.0f;
rp.dryLevel = shared.reverbDryLevel ? shared.reverbDryLevel->load() : 0.0f;
rp.freezeMode = shared.reverbFreezeMode ? shared.reverbFreezeMode->load() : 0.0f;
rp.roomSize = shared.reverbRoomSize ? shared.reverbRoomSize->load() : 0.0f;
rp.wetLevel = shared.reverbWetLevel ? shared.reverbWetLevel->load() : 0.0f;
rp.width = shared.reverbWidth ? shared.reverbWidth->load() : 0.0f;
chain.get<reverbIndex>().setParameters (rp);
chain.get<reverbIndex>().process (juce::dsp::ProcessContextReplacing<float> (block));
}
// ================================================================
// Distortion + tone (post LPF/Peak)
// ================================================================
{
const float driveDb = shared.distortionDrive ? shared.distortionDrive->load() : 0.0f;
const float bias = juce::jlimit (-1.0f, 1.0f, shared.distortionBias ? shared.distortionBias->load() : 0.0f);
const float toneHz = juce::jlimit (100.0f, 8000.0f, shared.distortionTone ? shared.distortionTone->load() : 3000.0f);
const int shape = (int) std::lround (juce::jlimit (0.0f, 2.0f,
shared.distortionShape ? shared.distortionShape->load() : 0.0f));
const float mix = shared.distortionMix ? shared.distortionMix->load() : 0.0f;
auto& pre = chain.get<distortionPreGain>();
auto& sh = chain.get<distortionIndex>();
auto& tone = chain.get<distortionPostLPF>();
pre.setGainDecibels (driveDb);
// Explicit std::function target (works on MSVC)
if (shape == 0) sh.functionToUse = std::function<float(float)>{ [bias](float x) noexcept { return std::tanh (x + bias); } };
else if (shape == 1) sh.functionToUse = std::function<float(float)>{ [bias](float x) noexcept { return juce::jlimit (-1.0f, 1.0f, x + bias); } };
else sh.functionToUse = std::function<float(float)>{ [bias](float x) noexcept { return std::atan (x + bias) * (2.0f / juce::MathConstants<float>::pi); } };
tone.coefficients = juce::dsp::IIR::Coefficients<float>::makePeakFilter (
spec.sampleRate, toneHz, 0.707f,
juce::Decibels::decibelsToGain (shared.highGainDbls ? shared.highGainDbls->load() : 0.0f));
if (shared.distortionOn && shared.distortionOn->load() > 0.5f)
{
// Wet/dry blend around the shaper
juce::AudioBuffer<float> dryCopy (tempBuffer.getNumChannels(), numSamples);
for (int ch = 0; ch < numCh; ++ch)
dryCopy.copyFrom (ch, 0, tempBuffer, ch, 0, numSamples);
// pre -> shaper -> tone
pre.process (juce::dsp::ProcessContextReplacing<float> (block));
sh.process (juce::dsp::ProcessContextReplacing<float> (block));
tone.process (juce::dsp::ProcessContextReplacing<float> (block));
const float wet = mix, dry = 1.0f - mix;
for (int ch = 0; ch < numCh; ++ch)
{
auto* d = dryCopy.getReadPointer (ch);
auto* w = tempBuffer.getWritePointer (ch);
for (int i = 0; i < numSamples; ++i)
w[i] = dry * d[i] + wet * w[i];
}
}
}
// ================================================================
// EQ + Master + Limiter (EQ guarded by eqOn)
// ================================================================
{
const bool eqEnabled = shared.eqOn && shared.eqOn->load() > 0.5f;
auto& eqL = chain.get<eqLowIndex>();
auto& eqM = chain.get<eqMidIndex>();
auto& eqH = chain.get<eqHighIndex>();
if (eqEnabled)
{
eqL.coefficients = juce::dsp::IIR::Coefficients<float>::makeLowShelf (
spec.sampleRate, 100.0f, 0.707f,
juce::Decibels::decibelsToGain (shared.lowGainDbls ? shared.lowGainDbls->load() : 0.0f));
eqM.coefficients = juce::dsp::IIR::Coefficients<float>::makePeakFilter (
spec.sampleRate, 1000.0f, 1.0f,
juce::Decibels::decibelsToGain (shared.midGainDbls ? shared.midGainDbls->load() : 0.0f));
eqH.coefficients = juce::dsp::IIR::Coefficients<float>::makePeakFilter (
spec.sampleRate, 10000.0f, 0.707f,
juce::Decibels::decibelsToGain (shared.highGainDbls ? shared.highGainDbls->load() : 0.0f));
eqL.process (juce::dsp::ProcessContextReplacing<float> (block));
eqM.process (juce::dsp::ProcessContextReplacing<float> (block));
eqH.process (juce::dsp::ProcessContextReplacing<float> (block));
}
chain.get<masterIndex>().setGainDecibels (shared.masterDbls ? shared.masterDbls->load() : 0.0f);
chain.get<masterIndex>().process (juce::dsp::ProcessContextReplacing<float> (block));
chain.get<limiterIndex>().process (juce::dsp::ProcessContextReplacing<float> (block));
}
// ================================================================
// Apply AMP ADSR envelope
// ================================================================
{
juce::AudioBuffer<float> buf (tempBuffer.getArrayOfWritePointers(), numCh, numSamples);
adsr.applyEnvelopeToBuffer (buf, 0, numSamples);
}
// Mix into output
juce::dsp::AudioBlock<float> (outputBuffer)
.getSubBlock ((size_t) startSample, (size_t) numSamples)
.add (block);
}
//==============================================================================
void NeuralSynthVoice::noteStarted()
{
const float freqHz = (float) getCurrentlyPlayingNote().getFrequencyInHertz();
const float initPhase = shared.wtPhase
? juce::jlimit (0.0f, 1.0f, shared.wtPhase->load())
: 0.0f;
// Oscillator frequency and phase retrigger (BLEP + WT)
osc.setFrequency (freqHz);
osc.resetPhase (initPhase);
wtOsc.setFrequency (freqHz);
wtOsc.resetPhase (initPhase);
morphLfo.reset();
const float initPhaseB = shared.wt2Phase
? juce::jlimit (0.0f, 1.0f, shared.wt2Phase->load())
: initPhase;
wtOsc2.setFrequency (freqHz);
wtOsc2.resetPhase (initPhaseB);
morphLfo2.reset();
// Chorus snapshot
if (shared.chorusCentre) chain.get<chorusIndex>().setCentreDelay (shared.chorusCentre->load());
if (shared.chorusDepth) chain.get<chorusIndex>().setDepth (shared.chorusDepth->load());
if (shared.chorusFeedback) chain.get<chorusIndex>().setFeedback (shared.chorusFeedback->load());
if (shared.chorusMix) chain.get<chorusIndex>().setMix (shared.chorusMix->load());
if (shared.chorusRate) chain.get<chorusIndex>().setRate (shared.chorusRate->load());
// Delay time (in samples)
if (shared.delayTime)
chain.get<delayIndex>().setDelay (juce::jmax (0.0f, shared.delayTime->load() * (float) spec.sampleRate));
// Reverb snapshot
juce::Reverb::Parameters rp;
rp.damping = shared.reverbDamping ? shared.reverbDamping->load() : 0.0f;
rp.dryLevel = shared.reverbDryLevel ? shared.reverbDryLevel->load() : 0.0f;
rp.freezeMode = shared.reverbFreezeMode ? shared.reverbFreezeMode->load() : 0.0f;
rp.roomSize = shared.reverbRoomSize ? shared.reverbRoomSize->load() : 0.0f;
rp.wetLevel = shared.reverbWetLevel ? shared.reverbWetLevel->load() : 0.0f;
rp.width = shared.reverbWidth ? shared.reverbWidth->load() : 0.0f;
chain.get<reverbIndex>().setParameters (rp);
// Amp ADSR
juce::ADSR::Parameters ap;
ap.attack = shared.adsrAttack ? shared.adsrAttack->load() : 0.01f;
ap.decay = shared.adsrDecay ? shared.adsrDecay->load() : 0.10f;
ap.sustain = shared.adsrSustain ? shared.adsrSustain->load() : 0.80f;
ap.release = shared.adsrRelease ? shared.adsrRelease->load() : 0.40f;
adsr.setParameters (ap);
adsr.noteOn();
// Filter ADSR
juce::ADSR::Parameters fp;
fp.attack = shared.fenvAttack ? shared.fenvAttack->load() : 0.01f;
fp.decay = shared.fenvDecay ? shared.fenvDecay->load() : 0.10f;
fp.sustain = shared.fenvSustain ? shared.fenvSustain->load() : 0.80f;
fp.release = shared.fenvRelease ? shared.fenvRelease->load() : 0.40f;
filterAdsr.setParameters (fp);
filterAdsr.noteOn();
}
//==============================================================================
void NeuralSynthVoice::notePitchbendChanged()
{
const float freqHz = (float) getCurrentlyPlayingNote().getFrequencyInHertz();
osc.setFrequency (freqHz);
wtOsc.setFrequency (freqHz);
}
//==============================================================================
void NeuralSynthVoice::noteStopped (bool allowTailOff)
{
juce::ignoreUnused (allowTailOff);
adsr.noteOff();
filterAdsr.noteOff();
}
//==============================================================================