Upload files to "Source"
This commit is contained in:
@@ -1,247 +1,417 @@
|
||||
#include "SynthVoice.h"
|
||||
|
||||
#include <cmath>
|
||||
|
||||
//==============================================================================
|
||||
NeuralSynthVoice::NeuralSynthVoice(NeuralSharedParams& sp) : shared(sp) {}
|
||||
|
||||
//==============================================================================
|
||||
void NeuralSynthVoice::prepare(const juce::dsp::ProcessSpec& spec)
|
||||
{
|
||||
setWaveform(0);
|
||||
tempBlock = juce::dsp::AudioBlock<float>(heapBlock, spec.numChannels, spec.maximumBlockSize);
|
||||
processorChain.prepare(spec);
|
||||
adsr.setSampleRate(spec.sampleRate);
|
||||
|
||||
this->spec = spec;
|
||||
}
|
||||
|
||||
//==============================================================================
|
||||
void NeuralSynthVoice::renderNextBlock(juce::AudioBuffer<float>& outputBuffer, int startSample, int numSamples)
|
||||
{
|
||||
if (numSamples <= 0) return;
|
||||
|
||||
if (!adsr.isActive())
|
||||
clearCurrentNote();
|
||||
|
||||
if (waveform != -1) {
|
||||
setWaveform(waveform);
|
||||
waveform = -1;
|
||||
}
|
||||
|
||||
const int numChannels = outputBuffer.getNumChannels();
|
||||
|
||||
auto block = tempBlock.getSubBlock(0, (size_t)numSamples);
|
||||
block.clear();
|
||||
|
||||
// =====================================================================
|
||||
// Oscillator
|
||||
// =====================================================================
|
||||
auto& osc = processorChain.get<oscIndex>();
|
||||
juce::dsp::ProcessContextReplacing<float> oscContext(block);
|
||||
osc.process(oscContext);
|
||||
|
||||
// =====================================================================
|
||||
// Distortion
|
||||
// =====================================================================
|
||||
const float driveDb = shared.distortionDrive->load(); // 0..30
|
||||
//const float distMix = juce::jlimit(0.0f, 1.0f, shared.distortionMix->load());
|
||||
const float bias = juce::jlimit(-1.0f, 1.0f, shared.distortionBias->load());
|
||||
const float toneHz = juce::jlimit(100.0f, 8000.0f, shared.distortionTone->load());
|
||||
const int shape = (int)std::lround(juce::jlimit(0.0f, 2.0f, shared.distortionShape->load()));
|
||||
|
||||
auto& distDry = processorChain.get<distortionPreGain>();
|
||||
|
||||
auto& distWaveshaper = processorChain.template get<distortionIndex>();
|
||||
|
||||
if (shape == 0) {
|
||||
distWaveshaper.functionToUse = [bias](float x) noexcept {
|
||||
return std::tanh(x + bias);
|
||||
};
|
||||
}
|
||||
else if (shape == 1) {
|
||||
distWaveshaper.functionToUse = [bias](float x) noexcept {
|
||||
const float v = x + bias;
|
||||
return juce::jlimit(-1.0f, 1.0f, v);
|
||||
};
|
||||
}
|
||||
else if (shape == 2) {
|
||||
distWaveshaper.functionToUse = [bias](float x) noexcept {
|
||||
const float v = x + bias;
|
||||
return (float)(std::atan(v) * (2.0 / juce::MathConstants<double>::pi));
|
||||
};
|
||||
}
|
||||
auto& distPreGain = processorChain.template get<distortionPreGain>(); // [5]
|
||||
distPreGain.setGainDecibels(driveDb); // [6]
|
||||
|
||||
auto& distPostLPF = processorChain.template get<distortionPostLPF>();
|
||||
distPostLPF.coefficients = *juce::dsp::IIR::Coefficients<float>::makePeakFilter(
|
||||
spec.sampleRate,
|
||||
toneHz, // cutoff
|
||||
0.707f, // Q
|
||||
juce::Decibels::decibelsToGain(shared.highGainDbls->load())
|
||||
);
|
||||
|
||||
// =====================================================================
|
||||
// Flanger
|
||||
// =====================================================================
|
||||
// Get pointer to writable data
|
||||
auto flanger = processorChain.get<flangerIndex>();
|
||||
auto rate = shared.flangerPhase->load();
|
||||
auto lfoPhase = shared.flangerPhase->load();
|
||||
auto flangerDepth = shared.flangerDepth->load();
|
||||
auto mix = shared.flangerDryMix->load();
|
||||
auto feedback = shared.flangerFeedback->load();
|
||||
|
||||
// Step 2: Apply flanger sample-by-sample to the block
|
||||
auto* raw = block.getChannelPointer(0);
|
||||
|
||||
for (int i = 0; i < numSamples; ++i)
|
||||
{
|
||||
float in = raw[i];
|
||||
|
||||
float lfo = std::sin(lfoPhase);
|
||||
float delayTime = (1.0f + lfo) * 0.5f * flangerDepth * spec.sampleRate;
|
||||
|
||||
flanger.setDelay(delayTime);
|
||||
|
||||
float delayed = flanger.popSample(0);
|
||||
flanger.pushSample(0, in + delayed * feedback);
|
||||
|
||||
raw[i] = in * (1.0f - mix) + delayed * mix;
|
||||
|
||||
lfoPhase += juce::MathConstants<float>::twoPi * rate / spec.sampleRate;
|
||||
if (lfoPhase > juce::MathConstants<float>::twoPi)
|
||||
lfoPhase -= juce::MathConstants<float>::twoPi;
|
||||
}
|
||||
|
||||
// Step 3: Run through ProcessorChain (filter + distortion)
|
||||
juce::dsp::ProcessContextReplacing<float> fxContext(block);
|
||||
processorChain.process(fxContext);
|
||||
|
||||
auto& master = processorChain.get<masterIndex>();
|
||||
const auto ex = shared.masterDbls->load();
|
||||
master.setGainDecibels(shared.masterDbls->load());
|
||||
|
||||
auto& lowEQ = processorChain.get<eqLowIndex>();
|
||||
lowEQ.coefficients = juce::dsp::IIR::Coefficients<float>::makeLowShelf(
|
||||
spec.sampleRate,
|
||||
100.0f, // cutoff
|
||||
0.707f, // Q, not used by all filters
|
||||
juce::Decibels::decibelsToGain(shared.lowGainDbls->load())
|
||||
);
|
||||
|
||||
auto& midEQ = processorChain.get<eqMidIndex>();
|
||||
midEQ.coefficients = *juce::dsp::IIR::Coefficients<float>::makePeakFilter(
|
||||
spec.sampleRate,
|
||||
1000.0f, // center frequency
|
||||
1.0f, // Q
|
||||
juce::Decibels::decibelsToGain(shared.midGainDbls->load())
|
||||
);
|
||||
|
||||
// HIGH SHELF
|
||||
auto& highEQ = processorChain.get<eqHighIndex>();
|
||||
highEQ.coefficients = *juce::dsp::IIR::Coefficients<float>::makePeakFilter(
|
||||
spec.sampleRate,
|
||||
10000.0f, // cutoff
|
||||
0.707f, // Q
|
||||
juce::Decibels::decibelsToGain(shared.highGainDbls->load())
|
||||
);
|
||||
|
||||
// 3. Apply ADSR envelope to tempBlock
|
||||
std::vector<float*> channelPtrs;
|
||||
for (size_t ch = 0; ch < tempBlock.getNumChannels(); ++ch)
|
||||
channelPtrs.push_back(tempBlock.getChannelPointer(ch));
|
||||
|
||||
juce::AudioBuffer<float> buffer(channelPtrs.data(),
|
||||
static_cast<int>(tempBlock.getNumChannels()),
|
||||
static_cast<int>(tempBlock.getNumSamples()));
|
||||
|
||||
adsr.applyEnvelopeToBuffer(buffer, 0, numSamples);
|
||||
|
||||
juce::dsp::AudioBlock<float>(outputBuffer)
|
||||
.getSubBlock((size_t)startSample, (size_t)numSamples)
|
||||
.add(tempBlock);
|
||||
}
|
||||
|
||||
//==============================================================================
|
||||
void NeuralSynthVoice::noteStarted()
|
||||
{
|
||||
auto velocity = getCurrentlyPlayingNote().noteOnVelocity.asUnsignedFloat();
|
||||
auto freqHz = (float)getCurrentlyPlayingNote().getFrequencyInHertz();
|
||||
|
||||
processorChain.get<oscIndex>().setFrequency(freqHz, true);
|
||||
|
||||
auto& chorus = processorChain.get<chorusIndex>();
|
||||
chorus.setCentreDelay(shared.chorusCentre->load());
|
||||
chorus.setDepth(shared.chorusDepth->load());
|
||||
chorus.setFeedback(shared.chorusFeedback->load());
|
||||
chorus.setMix(shared.chorusMix->load());
|
||||
chorus.setRate(shared.chorusRate->load());
|
||||
|
||||
processorChain.get<delayIndex>().setDelay(shared.delayTime->load());
|
||||
|
||||
juce::Reverb::Parameters rp;
|
||||
|
||||
rp.damping = shared.reverbDamping->load();
|
||||
rp.dryLevel = shared.reverbDryLevel->load();
|
||||
rp.freezeMode = shared.reverbFreezeMode->load();
|
||||
rp.roomSize = shared.reverbRoomSize->load();
|
||||
rp.wetLevel = shared.reverbWetLevel->load();
|
||||
rp.width = shared.reverbWidth->load();
|
||||
processorChain.get<reverbIndex>().setParameters(rp);
|
||||
|
||||
juce::ADSR::Parameters p;
|
||||
p.attack = shared.adsrAttack->load();
|
||||
p.decay = shared.adsrDecay->load();
|
||||
p.sustain = shared.adsrSustain->load();
|
||||
p.release = shared.adsrRelease->load();
|
||||
|
||||
adsr.setParameters(p);
|
||||
adsr.noteOn();
|
||||
|
||||
}
|
||||
|
||||
//==============================================================================
|
||||
void NeuralSynthVoice::notePitchbendChanged()
|
||||
{
|
||||
auto freqHz = (float)getCurrentlyPlayingNote().getFrequencyInHertz();
|
||||
processorChain.get<oscIndex>().setFrequency(freqHz, true);
|
||||
}
|
||||
|
||||
//==============================================================================
|
||||
void NeuralSynthVoice::noteStopped(bool allowTailOff)
|
||||
{
|
||||
adsr.noteOff(); //Triggers release phase
|
||||
}
|
||||
|
||||
//==============================================================================
|
||||
void NeuralSynthVoice::notePressureChanged() {}
|
||||
void NeuralSynthVoice::noteTimbreChanged() {}
|
||||
void NeuralSynthVoice::noteKeyStateChanged() {}
|
||||
|
||||
void NeuralSynthVoice::setWaveform(int waveformType)
|
||||
{
|
||||
auto& osc = processorChain.template get<oscIndex>();
|
||||
|
||||
switch (waveformType)
|
||||
{
|
||||
case 0:
|
||||
osc.initialise([](float x) { return std::sin(x); });
|
||||
break;
|
||||
|
||||
case 1:
|
||||
osc.initialise([](float x) { return x / juce::MathConstants<float>::pi; }); // Saw
|
||||
break;
|
||||
|
||||
case 2:
|
||||
osc.initialise([](float x) { return x < 0.0f ? -1.0f : 1.0f; }); // Square
|
||||
break;
|
||||
|
||||
case 3:
|
||||
osc.initialise([](float x) {
|
||||
return 2.0f * std::abs(2.0f * (x / juce::MathConstants<float>::twoPi) - 1.0f) - 1.0f;
|
||||
}); // Triangle
|
||||
break;
|
||||
}
|
||||
}
|
||||
#include "SynthVoice.h"
|
||||
#include <cmath>
|
||||
std::shared_ptr<WT::Bank> NeuralSynthVoice::wtBank;
|
||||
|
||||
//==============================================================================
|
||||
|
||||
NeuralSynthVoice::NeuralSynthVoice (NeuralSharedParams& sp)
|
||||
: shared (sp) {}
|
||||
|
||||
//==============================================================================
|
||||
|
||||
void NeuralSynthVoice::prepare (const juce::dsp::ProcessSpec& newSpec)
|
||||
{
|
||||
spec = newSpec;
|
||||
|
||||
// --- Oscillator
|
||||
osc.prepare (spec.sampleRate);
|
||||
setWaveform (0); // default to sine
|
||||
// --- Wavetable bank (build once), then prepare osc ---
|
||||
if (!wtBank)
|
||||
{
|
||||
wtBank = std::make_shared<WT::Bank>(2048, 16, 6); // N=2048, frames=16, levels=6
|
||||
wtBank->generateDefaultMorph(); // Sine -> Saw -> Square -> Triangle
|
||||
wtBank->buildMipmaps();
|
||||
}
|
||||
wtOsc.prepare(spec.sampleRate);
|
||||
wtOsc.setBank(wtBank);
|
||||
|
||||
// --- Scratch buffer (IMPORTANT: allocate real memory)
|
||||
tempBuffer.setSize ((int) spec.numChannels, (int) spec.maximumBlockSize,
|
||||
false, false, true);
|
||||
tempBlock = juce::dsp::AudioBlock<float> (tempBuffer);
|
||||
|
||||
// --- Prepare chain elements
|
||||
chain.prepare (spec);
|
||||
|
||||
// Set maximum delay sizes BEFORE runtime changes
|
||||
{
|
||||
// Flanger: up to 20 ms
|
||||
auto& flanger = chain.get<flangerIndex>();
|
||||
const size_t maxFlangerDelay = (size_t) juce::jmax<size_t>(
|
||||
1, (size_t) std::ceil (0.020 * spec.sampleRate));
|
||||
flanger.setMaximumDelayInSamples (maxFlangerDelay);
|
||||
flanger.reset();
|
||||
}
|
||||
{
|
||||
// Simple delay: up to 2 s
|
||||
auto& delay = chain.get<delayIndex>();
|
||||
const size_t maxDelay = (size_t) juce::jmax<size_t>(
|
||||
1, (size_t) std::ceil (2.0 * spec.sampleRate));
|
||||
delay.setMaximumDelayInSamples (maxDelay);
|
||||
delay.reset();
|
||||
}
|
||||
|
||||
// Envelopes
|
||||
adsr.setSampleRate (spec.sampleRate);
|
||||
filterAdsr.setSampleRate (spec.sampleRate);
|
||||
|
||||
// Filter
|
||||
svf.reset();
|
||||
svf.prepare (spec);
|
||||
|
||||
// Initial filter type
|
||||
const int type = (int) std::lround (juce::jlimit (0.0f, 2.0f,
|
||||
shared.filterType ? shared.filterType->load() : 0.0f));
|
||||
switch (type)
|
||||
{
|
||||
case 0: svf.setType (juce::dsp::StateVariableTPTFilterType::lowpass); break;
|
||||
case 1: svf.setType (juce::dsp::StateVariableTPTFilterType::highpass); break;
|
||||
case 2: svf.setType (juce::dsp::StateVariableTPTFilterType::bandpass); break;
|
||||
default: break;
|
||||
}
|
||||
}
|
||||
|
||||
//==============================================================================
|
||||
|
||||
void NeuralSynthVoice::renderNextBlock (juce::AudioBuffer<float>& outputBuffer,
|
||||
int startSample, int numSamples)
|
||||
{
|
||||
if (numSamples <= 0)
|
||||
return;
|
||||
|
||||
//if (! adsr.isActive())
|
||||
// clearCurrentNote();
|
||||
|
||||
// Apply pending waveform change (from GUI / processor thread)
|
||||
const int wf = pendingWaveform.exchange (-1, std::memory_order_acq_rel);
|
||||
if (wf != -1)
|
||||
setWaveform (wf);
|
||||
|
||||
// --- Generate oscillator into temp buffer (WT or BLEP) ---
|
||||
tempBuffer.clear();
|
||||
const int numCh = juce::jmin ((int) spec.numChannels, tempBuffer.getNumChannels());
|
||||
|
||||
const bool useWT = (shared.wtOn && shared.wtOn->load() > 0.5f);
|
||||
if (useWT && shared.wtMorph)
|
||||
wtOsc.setMorph(shared.wtMorph->load()); // 0..15 continuous
|
||||
|
||||
for (int i = 0; i < numSamples; ++i)
|
||||
{
|
||||
const float s = useWT ? wtOsc.process() : osc.process();
|
||||
for (int ch = 0; ch < numCh; ++ch)
|
||||
tempBuffer.getWritePointer (ch)[i] = s;
|
||||
}
|
||||
|
||||
auto block = tempBlock.getSubBlock (0, (size_t) numSamples);
|
||||
|
||||
// ================================================================
|
||||
// Flanger (pre-filter) – manual per-sample to set varying delay
|
||||
// ================================================================
|
||||
{
|
||||
auto& flanger = chain.get<flangerIndex>();
|
||||
|
||||
const bool enabled = shared.flangerOn && shared.flangerOn->load() > 0.5f;
|
||||
if (enabled)
|
||||
{
|
||||
const float rate = shared.flangerRate ? shared.flangerRate->load() : 0.0f;
|
||||
float lfoPhase = shared.flangerPhase ? shared.flangerPhase->load() : 0.0f;
|
||||
const float flangerDepth = shared.flangerDepth ? shared.flangerDepth->load() : 0.0f; // ms
|
||||
const float mix = shared.flangerDryMix ? shared.flangerDryMix->load() : 0.0f;
|
||||
const float feedback = shared.flangerFeedback ? shared.flangerFeedback->load() : 0.0f;
|
||||
const float baseDelayMs = shared.flangerDelay ? shared.flangerDelay->load() : 0.25f;
|
||||
|
||||
for (int i = 0; i < numSamples; ++i)
|
||||
{
|
||||
const float in = tempBuffer.getReadPointer (0)[i];
|
||||
|
||||
const float lfo = std::sin (lfoPhase);
|
||||
const float delayMs = baseDelayMs + 0.5f * (1.0f + lfo) * flangerDepth;
|
||||
const float delaySamples = juce::jmax (0.0f, delayMs * 0.001f * (float) spec.sampleRate);
|
||||
|
||||
flanger.setDelay (delaySamples);
|
||||
|
||||
const float delayed = flanger.popSample (0);
|
||||
flanger.pushSample (0, in + delayed * feedback);
|
||||
|
||||
const float out = in * (1.0f - mix) + delayed * mix;
|
||||
for (int ch = 0; ch < numCh; ++ch)
|
||||
tempBuffer.getWritePointer (ch)[i] = out;
|
||||
|
||||
lfoPhase += juce::MathConstants<float>::twoPi * rate / (float) spec.sampleRate;
|
||||
if (lfoPhase > juce::MathConstants<float>::twoPi)
|
||||
lfoPhase -= juce::MathConstants<float>::twoPi;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ================================================================
|
||||
// Filter with per-sample ADSR modulation (poly)
|
||||
// ================================================================
|
||||
{
|
||||
const bool enabled = shared.filterOn && shared.filterOn->load() > 0.5f;
|
||||
|
||||
// Update filter type every block (cheap)
|
||||
const int ftype = (int) std::lround (juce::jlimit (0.0f, 2.0f,
|
||||
shared.filterType ? shared.filterType->load() : 0.0f));
|
||||
switch (ftype)
|
||||
{
|
||||
case 0: svf.setType (juce::dsp::StateVariableTPTFilterType::lowpass); break;
|
||||
case 1: svf.setType (juce::dsp::StateVariableTPTFilterType::highpass); break;
|
||||
case 2: svf.setType (juce::dsp::StateVariableTPTFilterType::bandpass); break;
|
||||
default: break;
|
||||
}
|
||||
|
||||
const float qOrRes = juce::jlimit (0.1f, 10.0f,
|
||||
shared.filterResonance ? shared.filterResonance->load() : 0.7f);
|
||||
svf.setResonance (qOrRes);
|
||||
|
||||
const float baseCutoff = juce::jlimit (20.0f, 20000.0f,
|
||||
shared.filterCutoff ? shared.filterCutoff->load() : 1000.0f);
|
||||
const float envAmt = shared.fenvAmount ? shared.fenvAmount->load() : 0.0f;
|
||||
|
||||
for (int i = 0; i < numSamples; ++i)
|
||||
{
|
||||
const float envVal = filterAdsr.getNextSample();
|
||||
const float cutoff = juce::jlimit (20.0f, 20000.0f,
|
||||
baseCutoff * std::pow (2.0f, envAmt * envVal));
|
||||
svf.setCutoffFrequency (cutoff);
|
||||
|
||||
if (enabled)
|
||||
{
|
||||
for (int ch = 0; ch < numCh; ++ch)
|
||||
{
|
||||
float x = tempBuffer.getSample (ch, i);
|
||||
x = svf.processSample (ch, x);
|
||||
tempBuffer.setSample (ch, i, x);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ================================================================
|
||||
// Chorus
|
||||
// ================================================================
|
||||
if (shared.chorusOn && shared.chorusOn->load() > 0.5f)
|
||||
{
|
||||
auto& chorus = chain.get<chorusIndex>();
|
||||
if (shared.chorusCentre) chorus.setCentreDelay (shared.chorusCentre->load());
|
||||
if (shared.chorusDepth) chorus.setDepth (shared.chorusDepth->load());
|
||||
if (shared.chorusFeedback) chorus.setFeedback (shared.chorusFeedback->load());
|
||||
if (shared.chorusMix) chorus.setMix (shared.chorusMix->load());
|
||||
if (shared.chorusRate) chorus.setRate (shared.chorusRate->load());
|
||||
|
||||
chain.get<chorusIndex>().process (juce::dsp::ProcessContextReplacing<float> (block));
|
||||
}
|
||||
|
||||
// ================================================================
|
||||
// Simple Delay (per-voice)
|
||||
// ================================================================
|
||||
if (shared.delayOn && shared.delayOn->load() > 0.5f)
|
||||
{
|
||||
auto& delay = chain.get<delayIndex>();
|
||||
const float time = shared.delayTime ? shared.delayTime->load() : 0.1f;
|
||||
delay.setDelay (juce::jmax (0.0f, time * (float) spec.sampleRate));
|
||||
delay.process (juce::dsp::ProcessContextReplacing<float> (block));
|
||||
}
|
||||
|
||||
// ================================================================
|
||||
// Reverb
|
||||
// ================================================================
|
||||
if (shared.reverbOn && shared.reverbOn->load() > 0.5f)
|
||||
{
|
||||
juce::Reverb::Parameters rp;
|
||||
rp.damping = shared.reverbDamping ? shared.reverbDamping->load() : 0.0f;
|
||||
rp.dryLevel = shared.reverbDryLevel ? shared.reverbDryLevel->load() : 0.0f;
|
||||
rp.freezeMode = shared.reverbFreezeMode ? shared.reverbFreezeMode->load() : 0.0f;
|
||||
rp.roomSize = shared.reverbRoomSize ? shared.reverbRoomSize->load() : 0.0f;
|
||||
rp.wetLevel = shared.reverbWetLevel ? shared.reverbWetLevel->load() : 0.0f;
|
||||
rp.width = shared.reverbWidth ? shared.reverbWidth->load() : 0.0f;
|
||||
|
||||
chain.get<reverbIndex>().setParameters (rp);
|
||||
chain.get<reverbIndex>().process (juce::dsp::ProcessContextReplacing<float> (block));
|
||||
}
|
||||
|
||||
// ================================================================
|
||||
// Distortion + tone (post LPF/Peak)
|
||||
// ================================================================
|
||||
{
|
||||
const float driveDb = shared.distortionDrive ? shared.distortionDrive->load() : 0.0f;
|
||||
const float bias = juce::jlimit (-1.0f, 1.0f, shared.distortionBias ? shared.distortionBias->load() : 0.0f);
|
||||
const float toneHz = juce::jlimit (100.0f, 8000.0f, shared.distortionTone ? shared.distortionTone->load() : 3000.0f);
|
||||
const int shape = (int) std::lround (juce::jlimit (0.0f, 2.0f,
|
||||
shared.distortionShape ? shared.distortionShape->load() : 0.0f));
|
||||
const float mix = shared.distortionMix ? shared.distortionMix->load() : 0.0f;
|
||||
|
||||
auto& pre = chain.get<distortionPreGain>();
|
||||
auto& sh = chain.get<distortionIndex>();
|
||||
auto& tone = chain.get<distortionPostLPF>();
|
||||
|
||||
pre.setGainDecibels (driveDb);
|
||||
|
||||
// Explicit std::function target (works on MSVC)
|
||||
if (shape == 0) sh.functionToUse = std::function<float(float)>{ [bias](float x) noexcept { return std::tanh (x + bias); } };
|
||||
else if (shape == 1) sh.functionToUse = std::function<float(float)>{ [bias](float x) noexcept { return juce::jlimit (-1.0f, 1.0f, x + bias); } };
|
||||
else sh.functionToUse = std::function<float(float)>{ [bias](float x) noexcept { return std::atan (x + bias) * (2.0f / juce::MathConstants<float>::pi); } };
|
||||
|
||||
tone.coefficients = juce::dsp::IIR::Coefficients<float>::makePeakFilter (
|
||||
spec.sampleRate, toneHz, 0.707f,
|
||||
juce::Decibels::decibelsToGain (shared.highGainDbls ? shared.highGainDbls->load() : 0.0f));
|
||||
|
||||
if (shared.distortionOn && shared.distortionOn->load() > 0.5f)
|
||||
{
|
||||
// Wet/dry blend around the shaper
|
||||
juce::AudioBuffer<float> dryCopy (tempBuffer.getNumChannels(), numSamples);
|
||||
for (int ch = 0; ch < numCh; ++ch)
|
||||
dryCopy.copyFrom (ch, 0, tempBuffer, ch, 0, numSamples);
|
||||
|
||||
// pre -> shaper -> tone
|
||||
pre.process (juce::dsp::ProcessContextReplacing<float> (block));
|
||||
sh.process (juce::dsp::ProcessContextReplacing<float> (block));
|
||||
tone.process (juce::dsp::ProcessContextReplacing<float> (block));
|
||||
|
||||
const float wet = mix, dry = 1.0f - mix;
|
||||
for (int ch = 0; ch < numCh; ++ch)
|
||||
{
|
||||
auto* d = dryCopy.getReadPointer (ch);
|
||||
auto* w = tempBuffer.getWritePointer (ch);
|
||||
for (int i = 0; i < numSamples; ++i)
|
||||
w[i] = dry * d[i] + wet * w[i];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ================================================================
|
||||
// EQ + Master + Limiter (EQ guarded by eqOn)
|
||||
// ================================================================
|
||||
{
|
||||
const bool eqEnabled = shared.eqOn && shared.eqOn->load() > 0.5f;
|
||||
|
||||
auto& eqL = chain.get<eqLowIndex>();
|
||||
auto& eqM = chain.get<eqMidIndex>();
|
||||
auto& eqH = chain.get<eqHighIndex>();
|
||||
|
||||
if (eqEnabled)
|
||||
{
|
||||
eqL.coefficients = juce::dsp::IIR::Coefficients<float>::makeLowShelf (
|
||||
spec.sampleRate, 100.0f, 0.707f,
|
||||
juce::Decibels::decibelsToGain (shared.lowGainDbls ? shared.lowGainDbls->load() : 0.0f));
|
||||
|
||||
eqM.coefficients = juce::dsp::IIR::Coefficients<float>::makePeakFilter (
|
||||
spec.sampleRate, 1000.0f, 1.0f,
|
||||
juce::Decibels::decibelsToGain (shared.midGainDbls ? shared.midGainDbls->load() : 0.0f));
|
||||
|
||||
eqH.coefficients = juce::dsp::IIR::Coefficients<float>::makePeakFilter (
|
||||
spec.sampleRate, 10000.0f, 0.707f,
|
||||
juce::Decibels::decibelsToGain (shared.highGainDbls ? shared.highGainDbls->load() : 0.0f));
|
||||
|
||||
eqL.process (juce::dsp::ProcessContextReplacing<float> (block));
|
||||
eqM.process (juce::dsp::ProcessContextReplacing<float> (block));
|
||||
eqH.process (juce::dsp::ProcessContextReplacing<float> (block));
|
||||
}
|
||||
|
||||
chain.get<masterIndex>().setGainDecibels (shared.masterDbls ? shared.masterDbls->load() : 0.0f);
|
||||
chain.get<masterIndex>().process (juce::dsp::ProcessContextReplacing<float> (block));
|
||||
|
||||
chain.get<limiterIndex>().process (juce::dsp::ProcessContextReplacing<float> (block));
|
||||
}
|
||||
|
||||
// ================================================================
|
||||
// Apply AMP ADSR envelope
|
||||
// ================================================================
|
||||
{
|
||||
juce::AudioBuffer<float> buf (tempBuffer.getArrayOfWritePointers(), numCh, numSamples);
|
||||
adsr.applyEnvelopeToBuffer (buf, 0, numSamples);
|
||||
}
|
||||
|
||||
// Mix into output
|
||||
juce::dsp::AudioBlock<float> (outputBuffer)
|
||||
.getSubBlock ((size_t) startSample, (size_t) numSamples)
|
||||
.add (block);
|
||||
}
|
||||
|
||||
//==============================================================================
|
||||
|
||||
void NeuralSynthVoice::noteStarted()
|
||||
{
|
||||
const float freqHz = (float) getCurrentlyPlayingNote().getFrequencyInHertz();
|
||||
|
||||
// Oscillator frequency + phase
|
||||
osc.setFrequency (freqHz);
|
||||
osc.resetPhase (0.0f);
|
||||
|
||||
// Wavetable oscillator too
|
||||
wtOsc.setFrequency(freqHz);
|
||||
wtOsc.resetPhase(0.0f);
|
||||
|
||||
// Chorus snapshot
|
||||
if (shared.chorusCentre) chain.get<chorusIndex>().setCentreDelay (shared.chorusCentre->load());
|
||||
if (shared.chorusDepth) chain.get<chorusIndex>().setDepth (shared.chorusDepth->load());
|
||||
if (shared.chorusFeedback) chain.get<chorusIndex>().setFeedback (shared.chorusFeedback->load());
|
||||
if (shared.chorusMix) chain.get<chorusIndex>().setMix (shared.chorusMix->load());
|
||||
if (shared.chorusRate) chain.get<chorusIndex>().setRate (shared.chorusRate->load());
|
||||
|
||||
// Delay time (in samples)
|
||||
if (shared.delayTime)
|
||||
chain.get<delayIndex>().setDelay (juce::jmax (0.0f, shared.delayTime->load() * (float) spec.sampleRate));
|
||||
|
||||
// Reverb snapshot
|
||||
juce::Reverb::Parameters rp;
|
||||
rp.damping = shared.reverbDamping ? shared.reverbDamping->load() : 0.0f;
|
||||
rp.dryLevel = shared.reverbDryLevel ? shared.reverbDryLevel->load() : 0.0f;
|
||||
rp.freezeMode = shared.reverbFreezeMode ? shared.reverbFreezeMode->load() : 0.0f;
|
||||
rp.roomSize = shared.reverbRoomSize ? shared.reverbRoomSize->load() : 0.0f;
|
||||
rp.wetLevel = shared.reverbWetLevel ? shared.reverbWetLevel->load() : 0.0f;
|
||||
rp.width = shared.reverbWidth ? shared.reverbWidth->load() : 0.0f;
|
||||
chain.get<reverbIndex>().setParameters (rp);
|
||||
|
||||
// Amp ADSR
|
||||
juce::ADSR::Parameters ap;
|
||||
ap.attack = shared.adsrAttack ? shared.adsrAttack->load() : 0.01f;
|
||||
ap.decay = shared.adsrDecay ? shared.adsrDecay->load() : 0.10f;
|
||||
ap.sustain = shared.adsrSustain ? shared.adsrSustain->load() : 0.80f;
|
||||
ap.release = shared.adsrRelease ? shared.adsrRelease->load() : 0.40f;
|
||||
adsr.setParameters (ap);
|
||||
adsr.noteOn();
|
||||
|
||||
// Filter ADSR
|
||||
juce::ADSR::Parameters fp;
|
||||
fp.attack = shared.fenvAttack ? shared.fenvAttack->load() : 0.01f;
|
||||
fp.decay = shared.fenvDecay ? shared.fenvDecay->load() : 0.10f;
|
||||
fp.sustain = shared.fenvSustain ? shared.fenvSustain->load() : 0.80f;
|
||||
fp.release = shared.fenvRelease ? shared.fenvRelease->load() : 0.40f;
|
||||
filterAdsr.setParameters (fp);
|
||||
filterAdsr.noteOn();
|
||||
}
|
||||
|
||||
//==============================================================================
|
||||
|
||||
void NeuralSynthVoice::notePitchbendChanged()
|
||||
{
|
||||
const float freqHz = (float) getCurrentlyPlayingNote().getFrequencyInHertz();
|
||||
osc.setFrequency (freqHz);
|
||||
wtOsc.setFrequency (freqHz);
|
||||
}
|
||||
|
||||
//==============================================================================
|
||||
|
||||
void NeuralSynthVoice::noteStopped (bool allowTailOff)
|
||||
{
|
||||
juce::ignoreUnused (allowTailOff);
|
||||
adsr.noteOff();
|
||||
filterAdsr.noteOff();
|
||||
}
|
||||
|
||||
//==============================================================================
|
||||
|
||||
void NeuralSynthVoice::setWaveform (int waveformType)
|
||||
{
|
||||
switch (juce::jlimit (0, 3, waveformType))
|
||||
{
|
||||
case 0: osc.setWave (BlepWave::Sine); break;
|
||||
case 1: osc.setWave (BlepWave::Saw); break;
|
||||
case 2: osc.setWave (BlepWave::Square); break;
|
||||
case 3: osc.setWave (BlepWave::Triangle); break;
|
||||
default: osc.setWave (BlepWave::Sine); break;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,124 +1,100 @@
|
||||
#pragma once
|
||||
|
||||
#include <JuceHeader.h>
|
||||
#include "NeuralSharedParams.h"
|
||||
|
||||
#include <iostream>
|
||||
|
||||
/*struct ADSRProcessor : public juce::dsp::ProcessorBase
|
||||
{
|
||||
// -----------------------------------------------------------------
|
||||
void prepare(const juce::dsp::ProcessSpec& spec) override
|
||||
{
|
||||
adsr.setSampleRate(spec.sampleRate);
|
||||
}
|
||||
|
||||
void reset() override { adsr.reset(); }
|
||||
|
||||
void process(const juce::dsp::ProcessContextReplacing<float> &ctx) override
|
||||
{
|
||||
DBG("Processing...");
|
||||
|
||||
auto& outputBlock = context.getOutputBlock();
|
||||
const auto numSamples = (int)outputBlock.getNumSamples();
|
||||
const auto numChannels = (int)outputBlock.getNumChannels();
|
||||
|
||||
// Wrap the outputBlock into AudioBuffer
|
||||
for (int ch = 0; ch < numChannels; ++ch)
|
||||
buffer.setWritePointer(ch, outputBlock.getChannelPointer(ch));
|
||||
|
||||
adsr.applyEnvelopeToBuffer(buffer, 0, numSamples);
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------
|
||||
// These two are NOT part of the ProcessorBase interface <20> they are
|
||||
// your private hooks that the voice will call on note events.
|
||||
void noteOn(const juce::ADSR::Parameters& p) {
|
||||
adsr.setParameters(p); adsr.noteOn();
|
||||
}
|
||||
void noteOff() { adsr.noteOff(); }
|
||||
|
||||
private:
|
||||
juce::ADSR adsr;
|
||||
juce::AudioBuffer<float> buffer;
|
||||
};*/
|
||||
|
||||
//==============================================================================
|
||||
class NeuralSynthVoice : public juce::MPESynthesiserVoice
|
||||
{
|
||||
public:
|
||||
NeuralSynthVoice(NeuralSharedParams& sp);
|
||||
|
||||
//==============================================================================
|
||||
void prepare(const juce::dsp::ProcessSpec& spec);
|
||||
|
||||
//==============================================================================
|
||||
void noteStarted() override;
|
||||
|
||||
//==============================================================================
|
||||
void notePitchbendChanged() override;
|
||||
|
||||
//==============================================================================
|
||||
void noteStopped(bool) override;
|
||||
|
||||
//==============================================================================
|
||||
void notePressureChanged();
|
||||
void noteTimbreChanged();
|
||||
void noteKeyStateChanged();
|
||||
|
||||
//==============================================================================
|
||||
void renderNextBlock(juce::AudioBuffer<float>& outputBuffer, int startSample, int numSamples);
|
||||
|
||||
void setWaveform(int waveformType);
|
||||
|
||||
void changeWaveform(int waveform) noexcept {
|
||||
this->waveform = waveform;
|
||||
}
|
||||
|
||||
private:
|
||||
//==============================================================================
|
||||
juce::HeapBlock<char> heapBlock;
|
||||
juce::dsp::AudioBlock<float> tempBlock;
|
||||
|
||||
enum
|
||||
{
|
||||
oscIndex,
|
||||
distortionPreGain,
|
||||
distortionIndex,
|
||||
distortionPostLPF,
|
||||
flangerIndex,
|
||||
chorusIndex,
|
||||
delayIndex,
|
||||
reverbIndex,
|
||||
eqLowIndex,
|
||||
eqMidIndex,
|
||||
eqHighIndex,
|
||||
masterIndex
|
||||
};
|
||||
|
||||
juce::dsp::ProcessorChain<
|
||||
juce::dsp::Oscillator<float>,
|
||||
juce::dsp::Gain<float>,
|
||||
juce::dsp::WaveShaper<float, std::function<float(float)>>,
|
||||
juce::dsp::IIR::Filter<float>,
|
||||
juce::dsp::DelayLine<float, juce::dsp::DelayLineInterpolationTypes::Linear>,
|
||||
juce::dsp::Chorus<float>,
|
||||
juce::dsp::DelayLine<float>,
|
||||
juce::dsp::Reverb,
|
||||
juce::dsp::IIR::Filter<float>, // Low shelf
|
||||
juce::dsp::IIR::Filter<float>, // Mid peak
|
||||
juce::dsp::IIR::Filter<float>, // High shelf
|
||||
juce::dsp::Gain<float>
|
||||
> processorChain;
|
||||
|
||||
juce::dsp::ProcessSpec spec;
|
||||
|
||||
juce::ADSR adsr;
|
||||
NeuralSharedParams& shared;
|
||||
|
||||
static constexpr size_t lfoUpdateRate = 100;
|
||||
|
||||
static inline float msToSecs(float ms) { return ms * 0.001f; }
|
||||
|
||||
std::atomic<int> waveform { -1 };
|
||||
};
|
||||
#pragma once
|
||||
#include <JuceHeader.h>
|
||||
#include <functional>
|
||||
#include "NeuralSharedParams.h"
|
||||
#include "BlepOsc.h"
|
||||
#include "WavetableOsc.h" // <-- new
|
||||
|
||||
//==============================================================================
|
||||
// A single voice with BLEP osc + optional Wavetable osc (morph + anti-aliasing),
|
||||
// per-voice ADSR, filter ADSR, flanger/delay/chorus/reverb/distortion/EQ/master.
|
||||
class NeuralSynthVoice : public juce::MPESynthesiserVoice
|
||||
{
|
||||
public:
|
||||
explicit NeuralSynthVoice (NeuralSharedParams& sharedParams);
|
||||
|
||||
// JUCE voice API
|
||||
void prepare (const juce::dsp::ProcessSpec& spec);
|
||||
void renderNextBlock (juce::AudioBuffer<float>& outputBuffer,
|
||||
int startSample, int numSamples) override;
|
||||
|
||||
void noteStarted() override;
|
||||
void noteStopped (bool allowTailOff) override;
|
||||
void notePitchbendChanged() override;
|
||||
|
||||
void notePressureChanged() override {}
|
||||
void noteTimbreChanged() override {}
|
||||
void noteKeyStateChanged() override {}
|
||||
|
||||
// Called from the processor when the GUI waveform param changes
|
||||
void changeWaveform (int wf) { setWaveform (wf); }
|
||||
|
||||
private:
|
||||
void setWaveform (int waveformType);
|
||||
|
||||
//=== Processing chain (without oscillator) =================================
|
||||
using DelayLine = juce::dsp::DelayLine<float,
|
||||
juce::dsp::DelayLineInterpolationTypes::Linear>;
|
||||
using IIR = juce::dsp::IIR::Filter<float>;
|
||||
using Gain = juce::dsp::Gain<float>;
|
||||
using WaveShaper = juce::dsp::WaveShaper<float, std::function<float(float)>>;
|
||||
using Chorus = juce::dsp::Chorus<float>;
|
||||
using Reverb = juce::dsp::Reverb;
|
||||
using Limiter = juce::dsp::Limiter<float>;
|
||||
|
||||
enum ChainIndex
|
||||
{
|
||||
flangerIndex = 0,
|
||||
delayIndex,
|
||||
chorusIndex,
|
||||
reverbIndex,
|
||||
distortionPreGain,
|
||||
distortionIndex,
|
||||
distortionPostLPF,
|
||||
eqLowIndex,
|
||||
eqMidIndex,
|
||||
eqHighIndex,
|
||||
masterIndex,
|
||||
limiterIndex
|
||||
};
|
||||
|
||||
using Chain = juce::dsp::ProcessorChain<
|
||||
DelayLine, // flanger
|
||||
DelayLine, // simple delay
|
||||
Chorus, // chorus
|
||||
Reverb, // reverb
|
||||
Gain, // distortion pre-gain (drive)
|
||||
WaveShaper, // distortion waveshaper
|
||||
IIR, // tone / post-EQ for distortion
|
||||
IIR, // EQ low
|
||||
IIR, // EQ mid
|
||||
IIR, // EQ high
|
||||
Gain, // master gain
|
||||
Limiter // safety limiter
|
||||
>;
|
||||
|
||||
private:
|
||||
NeuralSharedParams& shared;
|
||||
juce::dsp::ProcessSpec spec {};
|
||||
|
||||
// ==== Oscillators ========================================================
|
||||
BlepOsc osc; // polyBLEP (existing)
|
||||
std::atomic<int> pendingWaveform { -1 };
|
||||
|
||||
WT::Osc wtOsc; // wavetable oscillator (new)
|
||||
static std::shared_ptr<WT::Bank> wtBank; // shared bank across voices
|
||||
|
||||
// ==== Envelopes & Filter =================================================
|
||||
juce::ADSR adsr;
|
||||
juce::ADSR filterAdsr;
|
||||
juce::dsp::StateVariableTPTFilter<float> svf;
|
||||
|
||||
// ==== FX chain ===========================================================
|
||||
Chain chain;
|
||||
|
||||
// ==== Scratch buffer =====================================================
|
||||
juce::AudioBuffer<float> tempBuffer;
|
||||
juce::dsp::AudioBlock<float> tempBlock;
|
||||
|
||||
JUCE_DECLARE_NON_COPYABLE_WITH_LEAK_DETECTOR (NeuralSynthVoice)
|
||||
};
|
||||
|
||||
261
Source/WavetableOsc.h
Normal file
261
Source/WavetableOsc.h
Normal file
@@ -0,0 +1,261 @@
|
||||
#pragma once
|
||||
#include <JuceHeader.h>
|
||||
#include <vector>
|
||||
#include <cmath>
|
||||
|
||||
// ============================== Design =======================================
|
||||
// - Bank with F frames, each frame is a single-cycle table of N samples.
|
||||
// - For each frame, we create L mip-levels: level 0 = full bandwidth,
|
||||
// level l halves the permitted harmonics (spectral truncation).
|
||||
// - Runtime chooses level from note frequency and sampleRate, then morphs
|
||||
// between adjacent frames and crossfades between the two nearest levels.
|
||||
// - Table read uses linear interpolation (cheap and good enough with N>=2048).
|
||||
|
||||
namespace WT
|
||||
{
|
||||
// Utility: complex array wrapper for JUCE FFT (interleaved real/imag floats)
|
||||
struct ComplexBuf
|
||||
{
|
||||
std::vector<float> data; // size = 2 * N
|
||||
explicit ComplexBuf(size_t N = 0) { resize(N); }
|
||||
void resize(size_t N) { data.assign(2 * N, 0.0f); }
|
||||
juce::dsp::Complex<float>* asComplex() { return reinterpret_cast<juce::dsp::Complex<float>*>(data.data()); }
|
||||
};
|
||||
|
||||
// =======================================================================
|
||||
// WavetableBank: holds raw frames + mipmapped versions
|
||||
// =======================================================================
|
||||
class Bank
|
||||
{
|
||||
public:
|
||||
// N = table length (must be power-of-two for FFT), frames = number of morph frames
|
||||
// mipLevels = how many spectral levels (>=1). 5 ~ 6 is plenty for synth use.
|
||||
Bank(size_t N = 2048, int frames = 16, int mipLevels = 6)
|
||||
: tableSize(N), numFrames(frames), numLevels(mipLevels),
|
||||
fft((int)std::log2((double)N))
|
||||
{
|
||||
jassert(juce::isPowerOfTwo((int)N));
|
||||
tables.resize((size_t)numLevels);
|
||||
for (int l = 0; l < numLevels; ++l)
|
||||
tables[(size_t)l].resize((size_t)numFrames, std::vector<float>(tableSize, 0.0f));
|
||||
}
|
||||
|
||||
size_t getSize() const { return tableSize; }
|
||||
int getFrames() const { return numFrames; }
|
||||
int getLevels() const { return numLevels; }
|
||||
|
||||
// Provide raw “design” frames (time-domain single-cycle) then call buildMipmaps().
|
||||
// framesRaw.size() must equal numFrames, each frame length must equal tableSize.
|
||||
void setRawFrames(const std::vector<std::vector<float>>& framesRaw)
|
||||
{
|
||||
jassert((int)framesRaw.size() == numFrames);
|
||||
for (const auto& f : framesRaw) jassert(f.size() == tableSize);
|
||||
raw = framesRaw;
|
||||
}
|
||||
|
||||
// Convenience: generate 16-frame bank morphing Sine -> Saw -> Square -> Triangle
|
||||
void generateDefaultMorph()
|
||||
{
|
||||
std::vector<std::vector<float>> frames;
|
||||
frames.resize((size_t)numFrames, std::vector<float>(tableSize, 0.0f));
|
||||
|
||||
auto fill = [&](int idx, auto func)
|
||||
{
|
||||
auto& t = frames[(size_t)idx];
|
||||
for (size_t n = 0; n < tableSize; ++n)
|
||||
{
|
||||
const float ph = (float) (juce::MathConstants<double>::twoPi * (double)n / (double)tableSize);
|
||||
t[n] = func(ph);
|
||||
}
|
||||
normalise(t);
|
||||
};
|
||||
|
||||
// helper waves
|
||||
auto sine = [](float ph) { return std::sin(ph); };
|
||||
auto saw = [](float ph) { return (float)(2.0 * (ph / juce::MathConstants<float>::twoPi) - 1.0); };
|
||||
auto sq = [](float ph) { return ph < juce::MathConstants<float>::pi ? 1.0f : -1.0f; };
|
||||
auto tri = [](float ph) {
|
||||
float v = (float)(2.0 * std::abs(2.0 * (ph / juce::MathConstants<float>::twoPi) - 1.0) - 1.0);
|
||||
return v;
|
||||
};
|
||||
|
||||
// 0..5: sine->saw, 6..10: saw->square, 11..15: square->triangle
|
||||
const int F = numFrames;
|
||||
for (int i = 0; i < F; ++i)
|
||||
{
|
||||
const float t = (float) i / (float) juce::jmax(1, F - 1);
|
||||
std::function<float(float)> a, b;
|
||||
float mix = 0.0f;
|
||||
|
||||
if (i <= 5) { a = sine; b = saw; mix = (float)i / 5.0f; }
|
||||
else if (i <=10) { a = saw; b = sq; mix = (float)(i - 6) / 4.0f; }
|
||||
else { a = sq; b = tri; mix = (float)(i - 11) / 4.0f; }
|
||||
|
||||
fill(i, [=](float ph){ return (1.0f - mix) * a(ph) + mix * b(ph); });
|
||||
}
|
||||
|
||||
setRawFrames(frames);
|
||||
}
|
||||
|
||||
// Build mip-levels by FFT → spectral truncation → IFFT
|
||||
void buildMipmaps()
|
||||
{
|
||||
jassert(!raw.empty());
|
||||
ComplexBuf freq(tableSize);
|
||||
ComplexBuf time(tableSize);
|
||||
|
||||
for (int f = 0; f < numFrames; ++f)
|
||||
{
|
||||
// Forward FFT of raw frame
|
||||
std::fill(freq.data.begin(), freq.data.end(), 0.0f);
|
||||
for (size_t n = 0; n < tableSize; ++n)
|
||||
{
|
||||
time.data[2 * n + 0] = raw[(size_t)f][n];
|
||||
time.data[2 * n + 1] = 0.0f;
|
||||
}
|
||||
fft.performRealOnlyForwardTransform(time.data.data());
|
||||
// After JUCE real FFT, bins are laid out as: Re[0], Re[N/2], Re[1], Im[1], Re[2], Im[2], ...
|
||||
// We'll reconstruct complex bins for easy masking.
|
||||
|
||||
// Helper to zero all harmonics above kMax (inclusive index in [0..N/2])
|
||||
auto maskAndIFFT = [&](int level, int kMax)
|
||||
{
|
||||
// Copy time.data into working complex bins
|
||||
auto* bins = freq.asComplex();
|
||||
// DC & Nyquist are purely real in real-FFT
|
||||
bins[0].real (time.data[0]);
|
||||
bins[0].imag (0.0f);
|
||||
bins[tableSize/2].real (time.data[1]);
|
||||
bins[tableSize/2].imag (0.0f);
|
||||
|
||||
// Rebuild the rest (Re[k], Im[k]) packed starting at index 2
|
||||
for (size_t k = 1; k < tableSize/2; ++k)
|
||||
{
|
||||
bins[k].real (time.data[2 * k + 0]);
|
||||
bins[k].imag (time.data[2 * k + 1]);
|
||||
}
|
||||
|
||||
// Mask
|
||||
for (size_t k = (size_t)kMax + 1; k < tableSize/2; ++k)
|
||||
bins[k] = { 0.0f, 0.0f };
|
||||
|
||||
// Pack back into real-FFT layout for inverse
|
||||
time.data[0] = bins[0].real(); // DC
|
||||
time.data[1] = bins[tableSize/2].real(); // Nyquist
|
||||
for (size_t k = 1; k < tableSize/2; ++k)
|
||||
{
|
||||
time.data[2 * k + 0] = bins[k].real();
|
||||
time.data[2 * k + 1] = bins[k].imag();
|
||||
}
|
||||
|
||||
// IFFT
|
||||
fft.performRealOnlyInverseTransform(time.data.data());
|
||||
|
||||
// Copy, normalise a little (scale JUCE inverse divides by N already)
|
||||
auto& dst = tables[(size_t)level][(size_t)f];
|
||||
for (size_t n = 0; n < tableSize; ++n)
|
||||
dst[n] = time.data[2 * n + 0];
|
||||
|
||||
normalise(dst);
|
||||
};
|
||||
|
||||
// Level 0 → all harmonics available up to N/2 - 1
|
||||
for (int l = 0; l < numLevels; ++l)
|
||||
{
|
||||
const int maxH = (int)((tableSize / 2) >> l); // halve per level
|
||||
const int kMax = juce::jmax(1, juce::jmin(maxH, (int)tableSize/2 - 1));
|
||||
maskAndIFFT(l, kMax);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// sample at (frame, level, phase in [0,1))
|
||||
inline float lookup (float frameIdx, int level, float phase) const noexcept
|
||||
{
|
||||
const int f0 = juce::jlimit(0, numFrames - 1, (int)std::floor(frameIdx));
|
||||
const int f1 = juce::jlimit(0, numFrames - 1, f0 + 1);
|
||||
const float t = juce::jlimit(0.0f, 1.0f, frameIdx - (float)f0);
|
||||
|
||||
const auto& T0 = tables[(size_t)level][(size_t)f0];
|
||||
const auto& T1 = tables[(size_t)level][(size_t)f1];
|
||||
|
||||
const float pos = phase * (float)tableSize;
|
||||
const int i0 = (int) std::floor(pos) & (int)(tableSize - 1);
|
||||
const int i1 = (i0 + 1) & (int)(tableSize - 1);
|
||||
const float a = pos - (float) std::floor(pos);
|
||||
|
||||
const float s0 = juce::jmap(a, T0[(size_t)i0], T0[(size_t)i1]);
|
||||
const float s1 = juce::jmap(a, T1[(size_t)i0], T1[(size_t)i1]);
|
||||
return juce::jmap(t, s0, s1);
|
||||
}
|
||||
|
||||
// choose mip-level for given frequency (Hz) & sampleRate
|
||||
inline int chooseLevel (float freq, double sampleRate) const noexcept
|
||||
{
|
||||
// permitted harmonics at this pitch:
|
||||
const float maxH = (float) (0.5 * sampleRate / juce::jmax(1.0f, freq));
|
||||
// level so that harmonic budget of level >= maxH, i.e. l = ceil(log2((N/2)/maxH))
|
||||
const float base = (float)(tableSize * 0.5);
|
||||
const float ratio = base / juce::jmax(1.0f, maxH);
|
||||
int l = (int) std::ceil (std::log2 (ratio));
|
||||
return juce::jlimit (0, numLevels - 1, l);
|
||||
}
|
||||
|
||||
static void normalise (std::vector<float>& t)
|
||||
{
|
||||
float mx = 0.0f;
|
||||
for (float v : t) mx = juce::jmax(mx, std::abs(v));
|
||||
if (mx < 1.0e-6f) return;
|
||||
for (float& v : t) v /= mx;
|
||||
}
|
||||
|
||||
private:
|
||||
size_t tableSize;
|
||||
int numFrames;
|
||||
int numLevels;
|
||||
|
||||
juce::dsp::FFT fft;
|
||||
std::vector<std::vector<float>> raw;
|
||||
// [level][frame][sample]
|
||||
std::vector<std::vector<std::vector<float>>> tables;
|
||||
};
|
||||
|
||||
// =======================================================================
|
||||
// Wavetable Oscillator
|
||||
// =======================================================================
|
||||
class Osc
|
||||
{
|
||||
public:
|
||||
void prepare (double sr) { sampleRate = sr; }
|
||||
void setBank (std::shared_ptr<Bank> b) { bank = std::move(b); }
|
||||
void setFrequency (float f) { freq = juce::jmax(0.0f, f); phaseInc = freq / (float)sampleRate; }
|
||||
void setMorph (float m) { morph = m; } // 0..frames-1 (continuous)
|
||||
void resetPhase (float p = 0.0f) { phase = juce::jlimit(0.0f, 1.0f, p); }
|
||||
|
||||
float process()
|
||||
{
|
||||
if (!bank) return 0.0f;
|
||||
|
||||
const int l0 = bank->chooseLevel(freq, sampleRate);
|
||||
const int l1 = juce::jmin(l0 + 1, bank->getLevels() - 1);
|
||||
const float preferL0 = 1.0f - juce::jlimit(0.0f, 1.0f,
|
||||
(float)l0 - (float)bank->chooseLevel(freq * 0.99f, sampleRate));
|
||||
|
||||
const float s0 = bank->lookup(morph, l0, phase);
|
||||
const float s1 = bank->lookup(morph, l1, phase);
|
||||
const float out = juce::jmap(preferL0, s1, s0); // simple crossfade
|
||||
|
||||
phase += phaseInc;
|
||||
while (phase >= 1.0f) phase -= 1.0f;
|
||||
return out;
|
||||
}
|
||||
|
||||
private:
|
||||
std::shared_ptr<Bank> bank;
|
||||
double sampleRate { 44100.0 };
|
||||
float freq { 0.0f };
|
||||
float morph { 0.0f }; // 0..frames-1
|
||||
float phase { 0.0f };
|
||||
float phaseInc { 0.0f };
|
||||
};
|
||||
} // namespace WT
|
||||
Reference in New Issue
Block a user