More advanced version
This commit is contained in:
@@ -1,5 +1,7 @@
|
||||
#include "SynthVoice.h"
|
||||
|
||||
#include <cmath>
|
||||
|
||||
//==============================================================================
|
||||
NeuralSynthVoice::NeuralSynthVoice(NeuralSharedParams& sp) : shared(sp) {}
|
||||
|
||||
@@ -10,60 +12,142 @@ void NeuralSynthVoice::prepare(const juce::dsp::ProcessSpec& spec)
|
||||
tempBlock = juce::dsp::AudioBlock<float>(heapBlock, spec.numChannels, spec.maximumBlockSize);
|
||||
processorChain.prepare(spec);
|
||||
adsr.setSampleRate(spec.sampleRate);
|
||||
|
||||
this->spec = spec;
|
||||
}
|
||||
|
||||
//==============================================================================
|
||||
void NeuralSynthVoice::noteStarted()
|
||||
{
|
||||
auto velocity = getCurrentlyPlayingNote().noteOnVelocity.asUnsignedFloat();
|
||||
auto freqHz = (float)getCurrentlyPlayingNote().getFrequencyInHertz();
|
||||
|
||||
processorChain.get<synthIndex>().setFrequency(freqHz, true);
|
||||
|
||||
juce::ADSR::Parameters p;
|
||||
p.attack = shared.attack->load();
|
||||
p.decay = shared.decay->load();
|
||||
p.sustain = shared.sustain->load();
|
||||
p.release = shared.release->load();
|
||||
|
||||
adsr.setParameters(p);
|
||||
adsr.noteOn();
|
||||
}
|
||||
|
||||
//==============================================================================
|
||||
void NeuralSynthVoice::notePitchbendChanged()
|
||||
{
|
||||
auto freqHz = (float)getCurrentlyPlayingNote().getFrequencyInHertz();
|
||||
processorChain.get<synthIndex>().setFrequency(freqHz, true);
|
||||
}
|
||||
|
||||
//==============================================================================
|
||||
void NeuralSynthVoice::noteStopped(bool allowTailOff)
|
||||
{
|
||||
adsr.noteOff(); //Triggers release phase
|
||||
}
|
||||
|
||||
//==============================================================================
|
||||
void NeuralSynthVoice::notePressureChanged() {}
|
||||
void NeuralSynthVoice::noteTimbreChanged() {}
|
||||
void NeuralSynthVoice::noteKeyStateChanged() {}
|
||||
|
||||
//==============================================================================
|
||||
void NeuralSynthVoice::renderNextBlock(juce::AudioBuffer<float>& outputBuffer, int startSample, int numSamples)
|
||||
{
|
||||
if (numSamples <= 0) return;
|
||||
|
||||
if (!adsr.isActive())
|
||||
clearCurrentNote();
|
||||
|
||||
if (waveform != -1) {
|
||||
if (waveform != -1) {
|
||||
setWaveform(waveform);
|
||||
waveform = -1;
|
||||
}
|
||||
|
||||
const int numChannels = outputBuffer.getNumChannels();
|
||||
|
||||
auto block = tempBlock.getSubBlock(0, (size_t)numSamples);
|
||||
block.clear();
|
||||
juce::dsp::ProcessContextReplacing<float> context(block);
|
||||
processorChain.process(context);
|
||||
|
||||
// =====================================================================
|
||||
// Oscillator
|
||||
// =====================================================================
|
||||
auto& osc = processorChain.get<oscIndex>();
|
||||
juce::dsp::ProcessContextReplacing<float> oscContext(block);
|
||||
osc.process(oscContext);
|
||||
|
||||
// =====================================================================
|
||||
// Distortion
|
||||
// =====================================================================
|
||||
const float driveDb = shared.distortionDrive->load(); // 0..30
|
||||
//const float distMix = juce::jlimit(0.0f, 1.0f, shared.distortionMix->load());
|
||||
const float bias = juce::jlimit(-1.0f, 1.0f, shared.distortionBias->load());
|
||||
const float toneHz = juce::jlimit(100.0f, 8000.0f, shared.distortionTone->load());
|
||||
const int shape = (int)std::lround(juce::jlimit(0.0f, 2.0f, shared.distortionShape->load()));
|
||||
|
||||
auto& distDry = processorChain.get<distortionPreGain>();
|
||||
|
||||
auto& distWaveshaper = processorChain.template get<distortionIndex>();
|
||||
|
||||
if (shape == 0) {
|
||||
distWaveshaper.functionToUse = [bias](float x) noexcept {
|
||||
return std::tanh(x + bias);
|
||||
};
|
||||
}
|
||||
else if (shape == 1) {
|
||||
distWaveshaper.functionToUse = [bias](float x) noexcept {
|
||||
const float v = x + bias;
|
||||
return juce::jlimit(-1.0f, 1.0f, v);
|
||||
};
|
||||
}
|
||||
else if (shape == 2) {
|
||||
distWaveshaper.functionToUse = [bias](float x) noexcept {
|
||||
const float v = x + bias;
|
||||
return (float)(std::atan(v) * (2.0 / juce::MathConstants<double>::pi));
|
||||
};
|
||||
}
|
||||
auto& distPreGain = processorChain.template get<distortionPreGain>(); // [5]
|
||||
distPreGain.setGainDecibels(driveDb); // [6]
|
||||
|
||||
auto& distPostLPF = processorChain.template get<distortionPostLPF>();
|
||||
distPostLPF.coefficients = *juce::dsp::IIR::Coefficients<float>::makePeakFilter(
|
||||
spec.sampleRate,
|
||||
toneHz, // cutoff
|
||||
0.707f, // Q
|
||||
juce::Decibels::decibelsToGain(shared.highGainDbls->load())
|
||||
);
|
||||
|
||||
// =====================================================================
|
||||
// Flanger
|
||||
// =====================================================================
|
||||
// Get pointer to writable data
|
||||
auto flanger = processorChain.get<flangerIndex>();
|
||||
auto rate = shared.flangerPhase->load();
|
||||
auto lfoPhase = shared.flangerPhase->load();
|
||||
auto flangerDepth = shared.flangerDepth->load();
|
||||
auto mix = shared.flangerDryMix->load();
|
||||
auto feedback = shared.flangerFeedback->load();
|
||||
|
||||
// Step 2: Apply flanger sample-by-sample to the block
|
||||
auto* raw = block.getChannelPointer(0);
|
||||
|
||||
for (int i = 0; i < numSamples; ++i)
|
||||
{
|
||||
float in = raw[i];
|
||||
|
||||
float lfo = std::sin(lfoPhase);
|
||||
float delayTime = (1.0f + lfo) * 0.5f * flangerDepth * spec.sampleRate;
|
||||
|
||||
flanger.setDelay(delayTime);
|
||||
|
||||
float delayed = flanger.popSample(0);
|
||||
flanger.pushSample(0, in + delayed * feedback);
|
||||
|
||||
raw[i] = in * (1.0f - mix) + delayed * mix;
|
||||
|
||||
lfoPhase += juce::MathConstants<float>::twoPi * rate / spec.sampleRate;
|
||||
if (lfoPhase > juce::MathConstants<float>::twoPi)
|
||||
lfoPhase -= juce::MathConstants<float>::twoPi;
|
||||
}
|
||||
|
||||
// Step 3: Run through ProcessorChain (filter + distortion)
|
||||
juce::dsp::ProcessContextReplacing<float> fxContext(block);
|
||||
processorChain.process(fxContext);
|
||||
|
||||
auto& master = processorChain.get<masterIndex>();
|
||||
const auto ex = shared.masterDbls->load();
|
||||
master.setGainDecibels(shared.masterDbls->load());
|
||||
|
||||
auto& lowEQ = processorChain.get<eqLowIndex>();
|
||||
lowEQ.coefficients = juce::dsp::IIR::Coefficients<float>::makeLowShelf(
|
||||
spec.sampleRate,
|
||||
100.0f, // cutoff
|
||||
0.707f, // Q, not used by all filters
|
||||
juce::Decibels::decibelsToGain(shared.lowGainDbls->load())
|
||||
);
|
||||
|
||||
auto& midEQ = processorChain.get<eqMidIndex>();
|
||||
midEQ.coefficients = *juce::dsp::IIR::Coefficients<float>::makePeakFilter(
|
||||
spec.sampleRate,
|
||||
1000.0f, // center frequency
|
||||
1.0f, // Q
|
||||
juce::Decibels::decibelsToGain(shared.midGainDbls->load())
|
||||
);
|
||||
|
||||
// HIGH SHELF
|
||||
auto& highEQ = processorChain.get<eqHighIndex>();
|
||||
highEQ.coefficients = *juce::dsp::IIR::Coefficients<float>::makePeakFilter(
|
||||
spec.sampleRate,
|
||||
10000.0f, // cutoff
|
||||
0.707f, // Q
|
||||
juce::Decibels::decibelsToGain(shared.highGainDbls->load())
|
||||
);
|
||||
|
||||
// 3. Apply ADSR envelope to tempBlock
|
||||
std::vector<float*> channelPtrs;
|
||||
for (size_t ch = 0; ch < tempBlock.getNumChannels(); ++ch)
|
||||
@@ -80,9 +164,65 @@ void NeuralSynthVoice::renderNextBlock(juce::AudioBuffer<float>& outputBuffer, i
|
||||
.add(tempBlock);
|
||||
}
|
||||
|
||||
//==============================================================================
|
||||
void NeuralSynthVoice::noteStarted()
|
||||
{
|
||||
auto velocity = getCurrentlyPlayingNote().noteOnVelocity.asUnsignedFloat();
|
||||
auto freqHz = (float)getCurrentlyPlayingNote().getFrequencyInHertz();
|
||||
|
||||
processorChain.get<oscIndex>().setFrequency(freqHz, true);
|
||||
|
||||
auto& chorus = processorChain.get<chorusIndex>();
|
||||
chorus.setCentreDelay(shared.chorusCentre->load());
|
||||
chorus.setDepth(shared.chorusDepth->load());
|
||||
chorus.setFeedback(shared.chorusFeedback->load());
|
||||
chorus.setMix(shared.chorusMix->load());
|
||||
chorus.setRate(shared.chorusRate->load());
|
||||
|
||||
processorChain.get<delayIndex>().setDelay(shared.delayTime->load());
|
||||
|
||||
juce::Reverb::Parameters rp;
|
||||
|
||||
rp.damping = shared.reverbDamping->load();
|
||||
rp.dryLevel = shared.reverbDryLevel->load();
|
||||
rp.freezeMode = shared.reverbFreezeMode->load();
|
||||
rp.roomSize = shared.reverbRoomSize->load();
|
||||
rp.wetLevel = shared.reverbWetLevel->load();
|
||||
rp.width = shared.reverbWidth->load();
|
||||
processorChain.get<reverbIndex>().setParameters(rp);
|
||||
|
||||
juce::ADSR::Parameters p;
|
||||
p.attack = shared.adsrAttack->load();
|
||||
p.decay = shared.adsrDecay->load();
|
||||
p.sustain = shared.adsrSustain->load();
|
||||
p.release = shared.adsrRelease->load();
|
||||
|
||||
adsr.setParameters(p);
|
||||
adsr.noteOn();
|
||||
|
||||
}
|
||||
|
||||
//==============================================================================
|
||||
void NeuralSynthVoice::notePitchbendChanged()
|
||||
{
|
||||
auto freqHz = (float)getCurrentlyPlayingNote().getFrequencyInHertz();
|
||||
processorChain.get<oscIndex>().setFrequency(freqHz, true);
|
||||
}
|
||||
|
||||
//==============================================================================
|
||||
void NeuralSynthVoice::noteStopped(bool allowTailOff)
|
||||
{
|
||||
adsr.noteOff(); //Triggers release phase
|
||||
}
|
||||
|
||||
//==============================================================================
|
||||
void NeuralSynthVoice::notePressureChanged() {}
|
||||
void NeuralSynthVoice::noteTimbreChanged() {}
|
||||
void NeuralSynthVoice::noteKeyStateChanged() {}
|
||||
|
||||
void NeuralSynthVoice::setWaveform(int waveformType)
|
||||
{
|
||||
auto& osc = processorChain.template get<synthIndex>();
|
||||
auto& osc = processorChain.template get<oscIndex>();
|
||||
|
||||
switch (waveformType)
|
||||
{
|
||||
|
||||
Reference in New Issue
Block a user