Some splitting of code
This commit is contained in:
@@ -325,6 +325,8 @@ void NeuralSynthAudioProcessorEditor::showPresetMenu()
|
||||
menu.addSubMenu(category, sub);
|
||||
}
|
||||
|
||||
menu.addItem(categories.size() + 1, "Custom ...", true, false);
|
||||
|
||||
menu.showMenuAsync(juce::PopupMenu::Options().withParentComponent(this),
|
||||
[this, baseId](int result)
|
||||
{
|
||||
|
||||
@@ -1,5 +1,13 @@
|
||||
#include "SynthVoice.h"
|
||||
|
||||
#include <cmath>
|
||||
#include "SynthVoice/ADSR.h"
|
||||
#include "SynthVoice/Chorus.h"
|
||||
#include "SynthVoice/Distortion.h"
|
||||
#include "SynthVoice/EQ.h"
|
||||
#include "SynthVoice/Flanger.h"
|
||||
#include "SynthVoice/Reverb.h"
|
||||
#include "SynthVoice/SimpleDelay.h"
|
||||
|
||||
//==============================================================================
|
||||
|
||||
@@ -201,216 +209,13 @@ void NeuralSynthVoice::renderNextBlock (juce::AudioBuffer<float>& outputBuffer,
|
||||
|
||||
auto block = tempBlock.getSubBlock (0, (size_t) numSamples);
|
||||
|
||||
// ================================================================
|
||||
// Flanger (pre-filter) – manual per-sample to set varying delay
|
||||
// ================================================================
|
||||
{
|
||||
auto& flanger = chain.get<flangerIndex>();
|
||||
renderFlanger(numSamples, numCh);
|
||||
renderADSR(numSamples, numCh);
|
||||
renderChorus(block);
|
||||
renderSimpleDelay(block);
|
||||
renderDistortion(numSamples, numCh, block);
|
||||
renderEQ(block);
|
||||
|
||||
const bool enabled = shared.flangerOn && shared.flangerOn->load() > 0.5f;
|
||||
if (enabled)
|
||||
{
|
||||
const float rate = shared.flangerRate ? shared.flangerRate->load() : 0.0f;
|
||||
float lfoPhase = shared.flangerPhase ? shared.flangerPhase->load() : 0.0f;
|
||||
const float flangerDepth = shared.flangerDepth ? shared.flangerDepth->load() : 0.0f; // ms
|
||||
const float mix = shared.flangerDryMix ? shared.flangerDryMix->load() : 0.0f;
|
||||
const float feedback = shared.flangerFeedback ? shared.flangerFeedback->load() : 0.0f;
|
||||
const float baseDelayMs = shared.flangerDelay ? shared.flangerDelay->load() : 0.25f;
|
||||
|
||||
for (int i = 0; i < numSamples; ++i)
|
||||
{
|
||||
const float in = tempBuffer.getReadPointer (0)[i];
|
||||
|
||||
const float lfo = std::sin (lfoPhase);
|
||||
const float delayMs = baseDelayMs + 0.5f * (1.0f + lfo) * flangerDepth;
|
||||
const float delaySamples = juce::jmax (0.0f, delayMs * 0.001f * (float) spec.sampleRate);
|
||||
|
||||
flanger.setDelay (delaySamples);
|
||||
|
||||
const float delayed = flanger.popSample (0);
|
||||
flanger.pushSample (0, in + delayed * feedback);
|
||||
|
||||
const float out = in * (1.0f - mix) + delayed * mix;
|
||||
for (int ch = 0; ch < numCh; ++ch)
|
||||
tempBuffer.getWritePointer (ch)[i] = out;
|
||||
|
||||
lfoPhase += juce::MathConstants<float>::twoPi * rate / (float) spec.sampleRate;
|
||||
if (lfoPhase > juce::MathConstants<float>::twoPi)
|
||||
lfoPhase -= juce::MathConstants<float>::twoPi;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ================================================================
|
||||
// Filter with per-sample ADSR modulation (poly)
|
||||
// ================================================================
|
||||
{
|
||||
const bool enabled = shared.filterOn && shared.filterOn->load() > 0.5f;
|
||||
|
||||
// Update filter type every block (cheap)
|
||||
const int ftype = (int) std::lround (juce::jlimit (0.0f, 2.0f,
|
||||
shared.filterType ? shared.filterType->load() : 0.0f));
|
||||
switch (ftype)
|
||||
{
|
||||
case 0: svf.setType (juce::dsp::StateVariableTPTFilterType::lowpass); break;
|
||||
case 1: svf.setType (juce::dsp::StateVariableTPTFilterType::highpass); break;
|
||||
case 2: svf.setType (juce::dsp::StateVariableTPTFilterType::bandpass); break;
|
||||
default: break;
|
||||
}
|
||||
|
||||
const float qOrRes = juce::jlimit (0.1f, 10.0f,
|
||||
shared.filterResonance ? shared.filterResonance->load() : 0.7f);
|
||||
svf.setResonance (qOrRes);
|
||||
|
||||
const float baseCutoff = juce::jlimit (20.0f, 20000.0f,
|
||||
shared.filterCutoff ? shared.filterCutoff->load() : 1000.0f);
|
||||
const float envAmt = shared.fenvAmount ? shared.fenvAmount->load() : 0.0f;
|
||||
|
||||
for (int i = 0; i < numSamples; ++i)
|
||||
{
|
||||
const float envVal = filterAdsr.getNextSample();
|
||||
const float cutoff = juce::jlimit (20.0f, 20000.0f,
|
||||
baseCutoff * std::pow (2.0f, envAmt * envVal));
|
||||
svf.setCutoffFrequency (cutoff);
|
||||
|
||||
if (enabled)
|
||||
{
|
||||
for (int ch = 0; ch < numCh; ++ch)
|
||||
{
|
||||
float x = tempBuffer.getSample (ch, i);
|
||||
x = svf.processSample (ch, x);
|
||||
tempBuffer.setSample (ch, i, x);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ================================================================
|
||||
// Chorus
|
||||
// ================================================================
|
||||
if (shared.chorusOn && shared.chorusOn->load() > 0.5f)
|
||||
{
|
||||
auto& chorus = chain.get<chorusIndex>();
|
||||
if (shared.chorusCentre) chorus.setCentreDelay (shared.chorusCentre->load());
|
||||
if (shared.chorusDepth) chorus.setDepth (shared.chorusDepth->load());
|
||||
if (shared.chorusFeedback) chorus.setFeedback (shared.chorusFeedback->load());
|
||||
if (shared.chorusMix) chorus.setMix (shared.chorusMix->load());
|
||||
if (shared.chorusRate) chorus.setRate (shared.chorusRate->load());
|
||||
|
||||
chain.get<chorusIndex>().process (juce::dsp::ProcessContextReplacing<float> (block));
|
||||
}
|
||||
|
||||
// ================================================================
|
||||
// Simple Delay (per-voice)
|
||||
// ================================================================
|
||||
if (shared.delayOn && shared.delayOn->load() > 0.5f)
|
||||
{
|
||||
auto& delay = chain.get<delayIndex>();
|
||||
const float time = shared.delayTime ? shared.delayTime->load() : 0.1f;
|
||||
delay.setDelay (juce::jmax (0.0f, time * (float) spec.sampleRate));
|
||||
delay.process (juce::dsp::ProcessContextReplacing<float> (block));
|
||||
}
|
||||
|
||||
// ================================================================
|
||||
// Reverb
|
||||
// ================================================================
|
||||
if (shared.reverbOn && shared.reverbOn->load() > 0.5f)
|
||||
{
|
||||
juce::Reverb::Parameters rp;
|
||||
rp.damping = shared.reverbDamping ? shared.reverbDamping->load() : 0.0f;
|
||||
rp.dryLevel = shared.reverbDryLevel ? shared.reverbDryLevel->load() : 0.0f;
|
||||
rp.freezeMode = shared.reverbFreezeMode ? shared.reverbFreezeMode->load() : 0.0f;
|
||||
rp.roomSize = shared.reverbRoomSize ? shared.reverbRoomSize->load() : 0.0f;
|
||||
rp.wetLevel = shared.reverbWetLevel ? shared.reverbWetLevel->load() : 0.0f;
|
||||
rp.width = shared.reverbWidth ? shared.reverbWidth->load() : 0.0f;
|
||||
|
||||
chain.get<reverbIndex>().setParameters (rp);
|
||||
chain.get<reverbIndex>().process (juce::dsp::ProcessContextReplacing<float> (block));
|
||||
}
|
||||
|
||||
// ================================================================
|
||||
// Distortion + tone (post LPF/Peak)
|
||||
// ================================================================
|
||||
{
|
||||
const float driveDb = shared.distortionDrive ? shared.distortionDrive->load() : 0.0f;
|
||||
const float bias = juce::jlimit (-1.0f, 1.0f, shared.distortionBias ? shared.distortionBias->load() : 0.0f);
|
||||
const float toneHz = juce::jlimit (100.0f, 8000.0f, shared.distortionTone ? shared.distortionTone->load() : 3000.0f);
|
||||
const int shape = (int) std::lround (juce::jlimit (0.0f, 2.0f,
|
||||
shared.distortionShape ? shared.distortionShape->load() : 0.0f));
|
||||
const float mix = shared.distortionMix ? shared.distortionMix->load() : 0.0f;
|
||||
|
||||
auto& pre = chain.get<distortionPreGain>();
|
||||
auto& sh = chain.get<distortionIndex>();
|
||||
auto& tone = chain.get<distortionPostLPF>();
|
||||
|
||||
pre.setGainDecibels (driveDb);
|
||||
|
||||
// Explicit std::function target (works on MSVC)
|
||||
if (shape == 0) sh.functionToUse = std::function<float(float)>{ [bias](float x) noexcept { return std::tanh (x + bias); } };
|
||||
else if (shape == 1) sh.functionToUse = std::function<float(float)>{ [bias](float x) noexcept { return juce::jlimit (-1.0f, 1.0f, x + bias); } };
|
||||
else sh.functionToUse = std::function<float(float)>{ [bias](float x) noexcept { return std::atan (x + bias) * (2.0f / juce::MathConstants<float>::pi); } };
|
||||
|
||||
tone.coefficients = juce::dsp::IIR::Coefficients<float>::makePeakFilter (
|
||||
spec.sampleRate, toneHz, 0.707f,
|
||||
juce::Decibels::decibelsToGain (shared.highGainDbls ? shared.highGainDbls->load() : 0.0f));
|
||||
|
||||
if (shared.distortionOn && shared.distortionOn->load() > 0.5f)
|
||||
{
|
||||
// Wet/dry blend around the shaper
|
||||
juce::AudioBuffer<float> dryCopy (tempBuffer.getNumChannels(), numSamples);
|
||||
for (int ch = 0; ch < numCh; ++ch)
|
||||
dryCopy.copyFrom (ch, 0, tempBuffer, ch, 0, numSamples);
|
||||
|
||||
// pre -> shaper -> tone
|
||||
pre.process (juce::dsp::ProcessContextReplacing<float> (block));
|
||||
sh.process (juce::dsp::ProcessContextReplacing<float> (block));
|
||||
tone.process (juce::dsp::ProcessContextReplacing<float> (block));
|
||||
|
||||
const float wet = mix, dry = 1.0f - mix;
|
||||
for (int ch = 0; ch < numCh; ++ch)
|
||||
{
|
||||
auto* d = dryCopy.getReadPointer (ch);
|
||||
auto* w = tempBuffer.getWritePointer (ch);
|
||||
for (int i = 0; i < numSamples; ++i)
|
||||
w[i] = dry * d[i] + wet * w[i];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ================================================================
|
||||
// EQ + Master + Limiter (EQ guarded by eqOn)
|
||||
// ================================================================
|
||||
{
|
||||
const bool eqEnabled = shared.eqOn && shared.eqOn->load() > 0.5f;
|
||||
|
||||
auto& eqL = chain.get<eqLowIndex>();
|
||||
auto& eqM = chain.get<eqMidIndex>();
|
||||
auto& eqH = chain.get<eqHighIndex>();
|
||||
|
||||
if (eqEnabled)
|
||||
{
|
||||
eqL.coefficients = juce::dsp::IIR::Coefficients<float>::makeLowShelf (
|
||||
spec.sampleRate, 100.0f, 0.707f,
|
||||
juce::Decibels::decibelsToGain (shared.lowGainDbls ? shared.lowGainDbls->load() : 0.0f));
|
||||
|
||||
eqM.coefficients = juce::dsp::IIR::Coefficients<float>::makePeakFilter (
|
||||
spec.sampleRate, 1000.0f, 1.0f,
|
||||
juce::Decibels::decibelsToGain (shared.midGainDbls ? shared.midGainDbls->load() : 0.0f));
|
||||
|
||||
eqH.coefficients = juce::dsp::IIR::Coefficients<float>::makePeakFilter (
|
||||
spec.sampleRate, 10000.0f, 0.707f,
|
||||
juce::Decibels::decibelsToGain (shared.highGainDbls ? shared.highGainDbls->load() : 0.0f));
|
||||
|
||||
eqL.process (juce::dsp::ProcessContextReplacing<float> (block));
|
||||
eqM.process (juce::dsp::ProcessContextReplacing<float> (block));
|
||||
eqH.process (juce::dsp::ProcessContextReplacing<float> (block));
|
||||
}
|
||||
|
||||
chain.get<masterIndex>().setGainDecibels (shared.masterDbls ? shared.masterDbls->load() : 0.0f);
|
||||
chain.get<masterIndex>().process (juce::dsp::ProcessContextReplacing<float> (block));
|
||||
|
||||
chain.get<limiterIndex>().process (juce::dsp::ProcessContextReplacing<float> (block));
|
||||
}
|
||||
|
||||
// ================================================================
|
||||
// Apply AMP ADSR envelope
|
||||
|
||||
@@ -78,6 +78,15 @@ private:
|
||||
using Reverb = juce::dsp::Reverb;
|
||||
using Limiter = juce::dsp::Limiter<float>;
|
||||
|
||||
// Separate functions for different parts
|
||||
void renderReverb(juce::dsp::AudioBlock<float> &block);
|
||||
void renderSimpleDelay(juce::dsp::AudioBlock<float> &block);
|
||||
void renderADSR(int numSamples, int numCh);
|
||||
void renderChorus(juce::dsp::AudioBlock<float> &block);
|
||||
void renderFlanger(int numSamples, int numCh);
|
||||
void renderDistortion(int numSamples, int numCh, juce::dsp::AudioBlock<float> &block);
|
||||
void renderEQ(juce::dsp::AudioBlock<float> &block);
|
||||
|
||||
enum ChainIndex
|
||||
{
|
||||
flangerIndex = 0,
|
||||
|
||||
46
Source/SynthVoice/ADSR.h
Normal file
46
Source/SynthVoice/ADSR.h
Normal file
@@ -0,0 +1,46 @@
|
||||
#pragma once
|
||||
#include "../SynthVoice.h"
|
||||
|
||||
void NeuralSynthVoice::renderADSR(int numSamples, int numCh) {
|
||||
// ================================================================
|
||||
// Filter with per-sample ADSR modulation (poly)
|
||||
// ================================================================
|
||||
const bool enabled = shared.filterOn && shared.filterOn->load() > 0.5f;
|
||||
|
||||
// Update filter type every block (cheap)
|
||||
const int ftype = (int) std::lround (juce::jlimit (0.0f, 2.0f,
|
||||
shared.filterType ? shared.filterType->load() : 0.0f));
|
||||
switch (ftype)
|
||||
{
|
||||
case 0: svf.setType (juce::dsp::StateVariableTPTFilterType::lowpass); break;
|
||||
case 1: svf.setType (juce::dsp::StateVariableTPTFilterType::highpass); break;
|
||||
case 2: svf.setType (juce::dsp::StateVariableTPTFilterType::bandpass); break;
|
||||
default: break;
|
||||
}
|
||||
|
||||
const float qOrRes = juce::jlimit (0.1f, 10.0f,
|
||||
shared.filterResonance ? shared.filterResonance->load() : 0.7f);
|
||||
svf.setResonance (qOrRes);
|
||||
|
||||
const float baseCutoff = juce::jlimit (20.0f, 20000.0f,
|
||||
shared.filterCutoff ? shared.filterCutoff->load() : 1000.0f);
|
||||
const float envAmt = shared.fenvAmount ? shared.fenvAmount->load() : 0.0f;
|
||||
|
||||
for (int i = 0; i < numSamples; ++i)
|
||||
{
|
||||
const float envVal = filterAdsr.getNextSample();
|
||||
const float cutoff = juce::jlimit (20.0f, 20000.0f,
|
||||
baseCutoff * std::pow (2.0f, envAmt * envVal));
|
||||
svf.setCutoffFrequency (cutoff);
|
||||
|
||||
if (enabled)
|
||||
{
|
||||
for (int ch = 0; ch < numCh; ++ch)
|
||||
{
|
||||
float x = tempBuffer.getSample (ch, i);
|
||||
x = svf.processSample (ch, x);
|
||||
tempBuffer.setSample (ch, i, x);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
19
Source/SynthVoice/Chorus.h
Normal file
19
Source/SynthVoice/Chorus.h
Normal file
@@ -0,0 +1,19 @@
|
||||
#pragma once
|
||||
#include "../SynthVoice.h"
|
||||
|
||||
void NeuralSynthVoice::renderChorus(juce::dsp::AudioBlock<float> &block) {
|
||||
// ================================================================
|
||||
// Chorus
|
||||
// ================================================================
|
||||
if (shared.chorusOn && shared.chorusOn->load() > 0.5f)
|
||||
{
|
||||
auto& chorus = chain.get<chorusIndex>();
|
||||
if (shared.chorusCentre) chorus.setCentreDelay (shared.chorusCentre->load());
|
||||
if (shared.chorusDepth) chorus.setDepth (shared.chorusDepth->load());
|
||||
if (shared.chorusFeedback) chorus.setFeedback (shared.chorusFeedback->load());
|
||||
if (shared.chorusMix) chorus.setMix (shared.chorusMix->load());
|
||||
if (shared.chorusRate) chorus.setRate (shared.chorusRate->load());
|
||||
|
||||
chain.get<chorusIndex>().process (juce::dsp::ProcessContextReplacing<float> (block));
|
||||
}
|
||||
}
|
||||
54
Source/SynthVoice/Distortion.h
Normal file
54
Source/SynthVoice/Distortion.h
Normal file
@@ -0,0 +1,54 @@
|
||||
#pragma once
|
||||
#include "../SynthVoice.h"
|
||||
|
||||
void NeuralSynthVoice::renderDistortion(
|
||||
int numSamples,
|
||||
int numCh,
|
||||
juce::dsp::AudioBlock<float> &block) {
|
||||
// ================================================================
|
||||
// Distortion + tone (post LPF/Peak)
|
||||
// ================================================================
|
||||
const float driveDb = shared.distortionDrive ? shared.distortionDrive->load() : 0.0f;
|
||||
const float bias = juce::jlimit (-1.0f, 1.0f, shared.distortionBias ? shared.distortionBias->load() : 0.0f);
|
||||
const float toneHz = juce::jlimit (100.0f, 8000.0f, shared.distortionTone ? shared.distortionTone->load() : 3000.0f);
|
||||
const int shape = (int) std::lround (juce::jlimit (0.0f, 2.0f,
|
||||
shared.distortionShape ? shared.distortionShape->load() : 0.0f));
|
||||
const float mix = shared.distortionMix ? shared.distortionMix->load() : 0.0f;
|
||||
|
||||
auto& pre = chain.get<distortionPreGain>();
|
||||
auto& sh = chain.get<distortionIndex>();
|
||||
auto& tone = chain.get<distortionPostLPF>();
|
||||
|
||||
pre.setGainDecibels (driveDb);
|
||||
|
||||
// Explicit std::function target (works on MSVC)
|
||||
if (shape == 0) sh.functionToUse = std::function<float(float)>{ [bias](float x) noexcept { return std::tanh (x + bias); } };
|
||||
else if (shape == 1) sh.functionToUse = std::function<float(float)>{ [bias](float x) noexcept { return juce::jlimit (-1.0f, 1.0f, x + bias); } };
|
||||
else sh.functionToUse = std::function<float(float)>{ [bias](float x) noexcept { return std::atan (x + bias) * (2.0f / juce::MathConstants<float>::pi); } };
|
||||
|
||||
tone.coefficients = juce::dsp::IIR::Coefficients<float>::makePeakFilter (
|
||||
spec.sampleRate, toneHz, 0.707f,
|
||||
juce::Decibels::decibelsToGain (shared.highGainDbls ? shared.highGainDbls->load() : 0.0f));
|
||||
|
||||
if (shared.distortionOn && shared.distortionOn->load() > 0.5f)
|
||||
{
|
||||
// Wet/dry blend around the shaper
|
||||
juce::AudioBuffer<float> dryCopy (tempBuffer.getNumChannels(), numSamples);
|
||||
for (int ch = 0; ch < numCh; ++ch)
|
||||
dryCopy.copyFrom (ch, 0, tempBuffer, ch, 0, numSamples);
|
||||
|
||||
// pre -> shaper -> tone
|
||||
pre.process (juce::dsp::ProcessContextReplacing<float> (block));
|
||||
sh.process (juce::dsp::ProcessContextReplacing<float> (block));
|
||||
tone.process (juce::dsp::ProcessContextReplacing<float> (block));
|
||||
|
||||
const float wet = mix, dry = 1.0f - mix;
|
||||
for (int ch = 0; ch < numCh; ++ch)
|
||||
{
|
||||
auto* d = dryCopy.getReadPointer (ch);
|
||||
auto* w = tempBuffer.getWritePointer (ch);
|
||||
for (int i = 0; i < numSamples; ++i)
|
||||
w[i] = dry * d[i] + wet * w[i];
|
||||
}
|
||||
}
|
||||
}
|
||||
38
Source/SynthVoice/EQ.h
Normal file
38
Source/SynthVoice/EQ.h
Normal file
@@ -0,0 +1,38 @@
|
||||
#pragma once
|
||||
#include "../SynthVoice.h"
|
||||
|
||||
void NeuralSynthVoice::renderEQ(juce::dsp::AudioBlock<float> &block)
|
||||
{
|
||||
// ================================================================
|
||||
// EQ + Master + Limiter (EQ guarded by eqOn)
|
||||
// ================================================================
|
||||
const bool eqEnabled = shared.eqOn && shared.eqOn->load() > 0.5f;
|
||||
|
||||
auto& eqL = chain.get<eqLowIndex>();
|
||||
auto& eqM = chain.get<eqMidIndex>();
|
||||
auto& eqH = chain.get<eqHighIndex>();
|
||||
|
||||
if (eqEnabled)
|
||||
{
|
||||
eqL.coefficients = juce::dsp::IIR::Coefficients<float>::makeLowShelf (
|
||||
spec.sampleRate, 100.0f, 0.707f,
|
||||
juce::Decibels::decibelsToGain (shared.lowGainDbls ? shared.lowGainDbls->load() : 0.0f));
|
||||
|
||||
eqM.coefficients = juce::dsp::IIR::Coefficients<float>::makePeakFilter (
|
||||
spec.sampleRate, 1000.0f, 1.0f,
|
||||
juce::Decibels::decibelsToGain (shared.midGainDbls ? shared.midGainDbls->load() : 0.0f));
|
||||
|
||||
eqH.coefficients = juce::dsp::IIR::Coefficients<float>::makePeakFilter (
|
||||
spec.sampleRate, 10000.0f, 0.707f,
|
||||
juce::Decibels::decibelsToGain (shared.highGainDbls ? shared.highGainDbls->load() : 0.0f));
|
||||
|
||||
eqL.process (juce::dsp::ProcessContextReplacing<float> (block));
|
||||
eqM.process (juce::dsp::ProcessContextReplacing<float> (block));
|
||||
eqH.process (juce::dsp::ProcessContextReplacing<float> (block));
|
||||
}
|
||||
|
||||
chain.get<masterIndex>().setGainDecibels (shared.masterDbls ? shared.masterDbls->load() : 0.0f);
|
||||
chain.get<masterIndex>().process (juce::dsp::ProcessContextReplacing<float> (block));
|
||||
|
||||
chain.get<limiterIndex>().process (juce::dsp::ProcessContextReplacing<float> (block));
|
||||
}
|
||||
43
Source/SynthVoice/Flanger.h
Normal file
43
Source/SynthVoice/Flanger.h
Normal file
@@ -0,0 +1,43 @@
|
||||
#pragma once
|
||||
#include "../SynthVoice.h"
|
||||
|
||||
void NeuralSynthVoice::renderFlanger(int numSamples, int numCh)
|
||||
{
|
||||
// ================================================================
|
||||
// Flanger (pre-filter) – manual per-sample to set varying delay
|
||||
// ================================================================
|
||||
auto& flanger = chain.get<flangerIndex>();
|
||||
|
||||
const bool enabled = shared.flangerOn && shared.flangerOn->load() > 0.5f;
|
||||
if (enabled)
|
||||
{
|
||||
const float rate = shared.flangerRate ? shared.flangerRate->load() : 0.0f;
|
||||
float lfoPhase = shared.flangerPhase ? shared.flangerPhase->load() : 0.0f;
|
||||
const float flangerDepth = shared.flangerDepth ? shared.flangerDepth->load() : 0.0f; // ms
|
||||
const float mix = shared.flangerDryMix ? shared.flangerDryMix->load() : 0.0f;
|
||||
const float feedback = shared.flangerFeedback ? shared.flangerFeedback->load() : 0.0f;
|
||||
const float baseDelayMs = shared.flangerDelay ? shared.flangerDelay->load() : 0.25f;
|
||||
|
||||
for (int i = 0; i < numSamples; ++i)
|
||||
{
|
||||
const float in = tempBuffer.getReadPointer (0)[i];
|
||||
|
||||
const float lfo = std::sin (lfoPhase);
|
||||
const float delayMs = baseDelayMs + 0.5f * (1.0f + lfo) * flangerDepth;
|
||||
const float delaySamples = juce::jmax (0.0f, delayMs * 0.001f * (float) spec.sampleRate);
|
||||
|
||||
flanger.setDelay (delaySamples);
|
||||
|
||||
const float delayed = flanger.popSample (0);
|
||||
flanger.pushSample (0, in + delayed * feedback);
|
||||
|
||||
const float out = in * (1.0f - mix) + delayed * mix;
|
||||
for (int ch = 0; ch < numCh; ++ch)
|
||||
tempBuffer.getWritePointer (ch)[i] = out;
|
||||
|
||||
lfoPhase += juce::MathConstants<float>::twoPi * rate / (float) spec.sampleRate;
|
||||
if (lfoPhase > juce::MathConstants<float>::twoPi)
|
||||
lfoPhase -= juce::MathConstants<float>::twoPi;
|
||||
}
|
||||
}
|
||||
}
|
||||
22
Source/SynthVoice/Reverb.h
Normal file
22
Source/SynthVoice/Reverb.h
Normal file
@@ -0,0 +1,22 @@
|
||||
#pragma once
|
||||
#include "../SynthVoice.h"
|
||||
|
||||
void NeuralSynthVoice::renderReverb(juce::dsp::AudioBlock<float> &block) {
|
||||
// ================================================================
|
||||
// Reverb
|
||||
// ================================================================
|
||||
if (shared.reverbOn && shared.reverbOn->load() > 0.5f)
|
||||
{
|
||||
juce::Reverb::Parameters rp;
|
||||
rp.damping = shared.reverbDamping ? shared.reverbDamping->load() : 0.0f;
|
||||
rp.dryLevel = shared.reverbDryLevel ? shared.reverbDryLevel->load() : 0.0f;
|
||||
rp.freezeMode = shared.reverbFreezeMode ? shared.reverbFreezeMode->load() : 0.0f;
|
||||
rp.roomSize = shared.reverbRoomSize ? shared.reverbRoomSize->load() : 0.0f;
|
||||
rp.wetLevel = shared.reverbWetLevel ? shared.reverbWetLevel->load() : 0.0f;
|
||||
rp.width = shared.reverbWidth ? shared.reverbWidth->load() : 0.0f;
|
||||
|
||||
chain.get<reverbIndex>().setParameters (rp);
|
||||
chain.get<reverbIndex>().process (juce::dsp::ProcessContextReplacing<float> (block));
|
||||
}
|
||||
|
||||
}
|
||||
16
Source/SynthVoice/SimpleDelay.h
Normal file
16
Source/SynthVoice/SimpleDelay.h
Normal file
@@ -0,0 +1,16 @@
|
||||
#pragma once
|
||||
#include "../SynthVoice.h"
|
||||
|
||||
void NeuralSynthVoice::renderSimpleDelay(juce::dsp::AudioBlock<float> &block)
|
||||
{
|
||||
// ================================================================
|
||||
// Simple Delay (per-voice)
|
||||
// ================================================================
|
||||
if (shared.delayOn && shared.delayOn->load() > 0.5f)
|
||||
{
|
||||
auto& delay = chain.get<delayIndex>();
|
||||
const float time = shared.delayTime ? shared.delayTime->load() : 0.1f;
|
||||
delay.setDelay (juce::jmax (0.0f, time * (float) spec.sampleRate));
|
||||
delay.process (juce::dsp::ProcessContextReplacing<float> (block));
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user