#include "SynthVoice.h" #include //============================================================================== NeuralSynthVoice::NeuralSynthVoice (NeuralSharedParams& sp) : shared (sp) {} //============================================================================== void NeuralSynthVoice::prepare (const juce::dsp::ProcessSpec& newSpec) { spec = newSpec; // --- Oscillator osc.prepare (spec.sampleRate); setWaveform (0); // default to sine // --- Scratch buffer (IMPORTANT: allocate real memory) tempBuffer.setSize ((int) spec.numChannels, (int) spec.maximumBlockSize, false, false, true); tempBlock = juce::dsp::AudioBlock (tempBuffer); // --- Prepare chain elements chain.prepare (spec); // Set maximum delay sizes BEFORE runtime changes { // Flanger: up to 20 ms auto& flanger = chain.get(); const size_t maxFlangerDelay = (size_t) juce::jmax( 1, (size_t) std::ceil (0.020 * spec.sampleRate)); flanger.setMaximumDelayInSamples (maxFlangerDelay); flanger.reset(); } { // Simple delay: up to 2 s auto& delay = chain.get(); const size_t maxDelay = (size_t) juce::jmax( 1, (size_t) std::ceil (2.0 * spec.sampleRate)); delay.setMaximumDelayInSamples (maxDelay); delay.reset(); } // Envelopes adsr.setSampleRate (spec.sampleRate); filterAdsr.setSampleRate (spec.sampleRate); // Filter svf.reset(); svf.prepare (spec); // Initial filter type const int type = (int) std::lround (juce::jlimit (0.0f, 2.0f, shared.filterType ? shared.filterType->load() : 0.0f)); switch (type) { case 0: svf.setType (juce::dsp::StateVariableTPTFilterType::lowpass); break; case 1: svf.setType (juce::dsp::StateVariableTPTFilterType::highpass); break; case 2: svf.setType (juce::dsp::StateVariableTPTFilterType::bandpass); break; default: break; } } //============================================================================== void NeuralSynthVoice::renderNextBlock (juce::AudioBuffer& outputBuffer, int startSample, int numSamples) { if (numSamples <= 0) return; if (! adsr.isActive()) clearCurrentNote(); // Apply pending waveform change (from GUI / processor thread) const int wf = pendingWaveform.exchange (-1, std::memory_order_acq_rel); if (wf != -1) setWaveform (wf); // --- Generate oscillator into temp buffer tempBuffer.clear(); const int numCh = juce::jmin ((int) spec.numChannels, tempBuffer.getNumChannels()); for (int i = 0; i < numSamples; ++i) { const float s = osc.process(); for (int ch = 0; ch < numCh; ++ch) tempBuffer.getWritePointer (ch)[i] = s; } auto block = tempBlock.getSubBlock (0, (size_t) numSamples); // ================================================================ // Flanger (pre-filter) – manual per-sample to set varying delay // ================================================================ { auto& flanger = chain.get(); const bool enabled = shared.flangerOn && shared.flangerOn->load() > 0.5f; if (enabled) { const float rate = shared.flangerRate ? shared.flangerRate->load() : 0.0f; float lfoPhase = shared.flangerPhase ? shared.flangerPhase->load() : 0.0f; const float flangerDepth = shared.flangerDepth ? shared.flangerDepth->load() : 0.0f; // ms const float mix = shared.flangerDryMix ? shared.flangerDryMix->load() : 0.0f; const float feedback = shared.flangerFeedback ? shared.flangerFeedback->load() : 0.0f; const float baseDelayMs = shared.flangerDelay ? shared.flangerDelay->load() : 0.25f; for (int i = 0; i < numSamples; ++i) { const float in = tempBuffer.getReadPointer (0)[i]; const float lfo = std::sin (lfoPhase); const float delayMs = baseDelayMs + 0.5f * (1.0f + lfo) * flangerDepth; const float delaySamples = juce::jmax (0.0f, delayMs * 0.001f * (float) spec.sampleRate); flanger.setDelay (delaySamples); const float delayed = flanger.popSample (0); flanger.pushSample (0, in + delayed * feedback); const float out = in * (1.0f - mix) + delayed * mix; for (int ch = 0; ch < numCh; ++ch) tempBuffer.getWritePointer (ch)[i] = out; lfoPhase += juce::MathConstants::twoPi * rate / (float) spec.sampleRate; if (lfoPhase > juce::MathConstants::twoPi) lfoPhase -= juce::MathConstants::twoPi; } } } // ================================================================ // Filter with per-sample ADSR modulation (poly) // ================================================================ { const bool enabled = shared.filterOn && shared.filterOn->load() > 0.5f; // Update filter type every block (cheap) const int ftype = (int) std::lround (juce::jlimit (0.0f, 2.0f, shared.filterType ? shared.filterType->load() : 0.0f)); switch (ftype) { case 0: svf.setType (juce::dsp::StateVariableTPTFilterType::lowpass); break; case 1: svf.setType (juce::dsp::StateVariableTPTFilterType::highpass); break; case 2: svf.setType (juce::dsp::StateVariableTPTFilterType::bandpass); break; default: break; } const float qOrRes = juce::jlimit (0.1f, 10.0f, shared.filterResonance ? shared.filterResonance->load() : 0.7f); svf.setResonance (qOrRes); const float baseCutoff = juce::jlimit (20.0f, 20000.0f, shared.filterCutoff ? shared.filterCutoff->load() : 1000.0f); const float envAmt = shared.fenvAmount ? shared.fenvAmount->load() : 0.0f; for (int i = 0; i < numSamples; ++i) { const float envVal = filterAdsr.getNextSample(); const float cutoff = juce::jlimit (20.0f, 20000.0f, baseCutoff * std::pow (2.0f, envAmt * envVal)); svf.setCutoffFrequency (cutoff); if (enabled) { for (int ch = 0; ch < numCh; ++ch) { float x = tempBuffer.getSample (ch, i); x = svf.processSample (ch, x); tempBuffer.setSample (ch, i, x); } } } } // ================================================================ // Chorus // ================================================================ if (shared.chorusOn && shared.chorusOn->load() > 0.5f) { auto& chorus = chain.get(); if (shared.chorusCentre) chorus.setCentreDelay (shared.chorusCentre->load()); if (shared.chorusDepth) chorus.setDepth (shared.chorusDepth->load()); if (shared.chorusFeedback) chorus.setFeedback (shared.chorusFeedback->load()); if (shared.chorusMix) chorus.setMix (shared.chorusMix->load()); if (shared.chorusRate) chorus.setRate (shared.chorusRate->load()); chain.get().process (juce::dsp::ProcessContextReplacing (block)); } // ================================================================ // Simple Delay (per-voice) // ================================================================ if (shared.delayOn && shared.delayOn->load() > 0.5f) { auto& delay = chain.get(); const float time = shared.delayTime ? shared.delayTime->load() : 0.1f; delay.setDelay (juce::jmax (0.0f, time * (float) spec.sampleRate)); delay.process (juce::dsp::ProcessContextReplacing (block)); } // ================================================================ // Reverb // ================================================================ if (shared.reverbOn && shared.reverbOn->load() > 0.5f) { juce::Reverb::Parameters rp; rp.damping = shared.reverbDamping ? shared.reverbDamping->load() : 0.0f; rp.dryLevel = shared.reverbDryLevel ? shared.reverbDryLevel->load() : 0.0f; rp.freezeMode = shared.reverbFreezeMode ? shared.reverbFreezeMode->load() : 0.0f; rp.roomSize = shared.reverbRoomSize ? shared.reverbRoomSize->load() : 0.0f; rp.wetLevel = shared.reverbWetLevel ? shared.reverbWetLevel->load() : 0.0f; rp.width = shared.reverbWidth ? shared.reverbWidth->load() : 0.0f; chain.get().setParameters (rp); chain.get().process (juce::dsp::ProcessContextReplacing (block)); } // ================================================================ // Distortion + tone (post LPF/Peak) // ================================================================ { const float driveDb = shared.distortionDrive ? shared.distortionDrive->load() : 0.0f; const float bias = juce::jlimit (-1.0f, 1.0f, shared.distortionBias ? shared.distortionBias->load() : 0.0f); const float toneHz = juce::jlimit (100.0f, 8000.0f, shared.distortionTone ? shared.distortionTone->load() : 3000.0f); const int shape = (int) std::lround (juce::jlimit (0.0f, 2.0f, shared.distortionShape ? shared.distortionShape->load() : 0.0f)); const float mix = shared.distortionMix ? shared.distortionMix->load() : 0.0f; auto& pre = chain.get(); auto& sh = chain.get(); auto& tone = chain.get(); pre.setGainDecibels (driveDb); // Explicit std::function target (works on MSVC) if (shape == 0) sh.functionToUse = std::function{ [bias](float x) noexcept { return std::tanh (x + bias); } }; else if (shape == 1) sh.functionToUse = std::function{ [bias](float x) noexcept { return juce::jlimit (-1.0f, 1.0f, x + bias); } }; else sh.functionToUse = std::function{ [bias](float x) noexcept { return std::atan (x + bias) * (2.0f / juce::MathConstants::pi); } }; tone.coefficients = juce::dsp::IIR::Coefficients::makePeakFilter ( spec.sampleRate, toneHz, 0.707f, juce::Decibels::decibelsToGain (shared.highGainDbls ? shared.highGainDbls->load() : 0.0f)); if (shared.distortionOn && shared.distortionOn->load() > 0.5f) { // Wet/dry blend around the shaper juce::AudioBuffer dryCopy (tempBuffer.getNumChannels(), numSamples); for (int ch = 0; ch < numCh; ++ch) dryCopy.copyFrom (ch, 0, tempBuffer, ch, 0, numSamples); // pre -> shaper -> tone pre.process (juce::dsp::ProcessContextReplacing (block)); sh.process (juce::dsp::ProcessContextReplacing (block)); tone.process (juce::dsp::ProcessContextReplacing (block)); const float wet = mix, dry = 1.0f - mix; for (int ch = 0; ch < numCh; ++ch) { auto* d = dryCopy.getReadPointer (ch); auto* w = tempBuffer.getWritePointer (ch); for (int i = 0; i < numSamples; ++i) w[i] = dry * d[i] + wet * w[i]; } } } // ================================================================ // EQ + Master + Limiter (EQ guarded by eqOn) // ================================================================ { const bool eqEnabled = shared.eqOn && shared.eqOn->load() > 0.5f; auto& eqL = chain.get(); auto& eqM = chain.get(); auto& eqH = chain.get(); if (eqEnabled) { eqL.coefficients = juce::dsp::IIR::Coefficients::makeLowShelf ( spec.sampleRate, 100.0f, 0.707f, juce::Decibels::decibelsToGain (shared.lowGainDbls ? shared.lowGainDbls->load() : 0.0f)); eqM.coefficients = juce::dsp::IIR::Coefficients::makePeakFilter ( spec.sampleRate, 1000.0f, 1.0f, juce::Decibels::decibelsToGain (shared.midGainDbls ? shared.midGainDbls->load() : 0.0f)); eqH.coefficients = juce::dsp::IIR::Coefficients::makePeakFilter ( spec.sampleRate, 10000.0f, 0.707f, juce::Decibels::decibelsToGain (shared.highGainDbls ? shared.highGainDbls->load() : 0.0f)); eqL.process (juce::dsp::ProcessContextReplacing (block)); eqM.process (juce::dsp::ProcessContextReplacing (block)); eqH.process (juce::dsp::ProcessContextReplacing (block)); } chain.get().setGainDecibels (shared.masterDbls ? shared.masterDbls->load() : 0.0f); chain.get().process (juce::dsp::ProcessContextReplacing (block)); chain.get().process (juce::dsp::ProcessContextReplacing (block)); } // ================================================================ // Apply AMP ADSR envelope // ================================================================ { juce::AudioBuffer buf (tempBuffer.getArrayOfWritePointers(), numCh, numSamples); adsr.applyEnvelopeToBuffer (buf, 0, numSamples); } // Mix into output juce::dsp::AudioBlock (outputBuffer) .getSubBlock ((size_t) startSample, (size_t) numSamples) .add (block); } //============================================================================== void NeuralSynthVoice::noteStarted() { const float freqHz = (float) getCurrentlyPlayingNote().getFrequencyInHertz(); // Oscillator frequency and phase retrigger osc.setFrequency (freqHz); osc.resetPhase (0.0f); // Chorus snapshot if (shared.chorusCentre) chain.get().setCentreDelay (shared.chorusCentre->load()); if (shared.chorusDepth) chain.get().setDepth (shared.chorusDepth->load()); if (shared.chorusFeedback) chain.get().setFeedback (shared.chorusFeedback->load()); if (shared.chorusMix) chain.get().setMix (shared.chorusMix->load()); if (shared.chorusRate) chain.get().setRate (shared.chorusRate->load()); // Delay time (in samples) if (shared.delayTime) chain.get().setDelay (juce::jmax (0.0f, shared.delayTime->load() * (float) spec.sampleRate)); // Reverb snapshot juce::Reverb::Parameters rp; rp.damping = shared.reverbDamping ? shared.reverbDamping->load() : 0.0f; rp.dryLevel = shared.reverbDryLevel ? shared.reverbDryLevel->load() : 0.0f; rp.freezeMode = shared.reverbFreezeMode ? shared.reverbFreezeMode->load() : 0.0f; rp.roomSize = shared.reverbRoomSize ? shared.reverbRoomSize->load() : 0.0f; rp.wetLevel = shared.reverbWetLevel ? shared.reverbWetLevel->load() : 0.0f; rp.width = shared.reverbWidth ? shared.reverbWidth->load() : 0.0f; chain.get().setParameters (rp); // Amp ADSR juce::ADSR::Parameters ap; ap.attack = shared.adsrAttack ? shared.adsrAttack->load() : 0.01f; ap.decay = shared.adsrDecay ? shared.adsrDecay->load() : 0.10f; ap.sustain = shared.adsrSustain ? shared.adsrSustain->load() : 0.80f; ap.release = shared.adsrRelease ? shared.adsrRelease->load() : 0.40f; adsr.setParameters (ap); adsr.noteOn(); // Filter ADSR juce::ADSR::Parameters fp; fp.attack = shared.fenvAttack ? shared.fenvAttack->load() : 0.01f; fp.decay = shared.fenvDecay ? shared.fenvDecay->load() : 0.10f; fp.sustain = shared.fenvSustain ? shared.fenvSustain->load() : 0.80f; fp.release = shared.fenvRelease ? shared.fenvRelease->load() : 0.40f; filterAdsr.setParameters (fp); filterAdsr.noteOn(); } //============================================================================== void NeuralSynthVoice::notePitchbendChanged() { const float freqHz = (float) getCurrentlyPlayingNote().getFrequencyInHertz(); osc.setFrequency (freqHz); } //============================================================================== void NeuralSynthVoice::noteStopped (bool allowTailOff) { juce::ignoreUnused (allowTailOff); adsr.noteOff(); filterAdsr.noteOff(); } //============================================================================== void NeuralSynthVoice::setWaveform (int waveformType) { switch (juce::jlimit (0, 3, waveformType)) { case 0: osc.setWave (BlepWave::Sine); break; case 1: osc.setWave (BlepWave::Saw); break; case 2: osc.setWave (BlepWave::Square); break; case 3: osc.setWave (BlepWave::Triangle); break; default: osc.setWave (BlepWave::Sine); break; } }