6028 lines
260 KiB
C++
6028 lines
260 KiB
C++
#include "PluginProcessor.h"
|
|
#include "PluginEditor.h"
|
|
#include "BinaryData.h"
|
|
#include "PianoPhysicsData.h"
|
|
#include <algorithm>
|
|
#include <cmath>
|
|
#include <limits>
|
|
|
|
// SIMD headers for vectorization
|
|
#if defined(__SSE__) || defined(_M_X64) || defined(_M_IX86)
|
|
#include <xmmintrin.h>
|
|
#include <emmintrin.h>
|
|
#define USE_SSE 1
|
|
#endif
|
|
|
|
//==============================================================================
|
|
// FAST MATH LOOKUP TABLES AND APPROXIMATIONS
|
|
// These replace expensive std:: functions in the hot path
|
|
//==============================================================================
|
|
|
|
namespace FastMath {
|
|
|
|
// Lookup table sizes
|
|
static constexpr int kSinTableSize = 4096;
|
|
static constexpr int kSqrtTableSize = 4096;
|
|
static constexpr int kPowTableSize = 1024;
|
|
|
|
// Precomputed tables (initialized once)
|
|
static float sinTable[kSinTableSize];
|
|
static float sqrtTable[kSqrtTableSize]; // sqrt(x) for x in [0, 4]
|
|
static float pow2Table[kPowTableSize]; // 2^x for x in [-4, 4]
|
|
static bool tablesInitialized = false;
|
|
|
|
inline void initTables()
|
|
{
|
|
if (tablesInitialized) return;
|
|
|
|
// Sin table: covers [0, 2*PI]
|
|
for (int i = 0; i < kSinTableSize; ++i)
|
|
{
|
|
float phase = (float) i / (float) kSinTableSize * juce::MathConstants<float>::twoPi;
|
|
sinTable[i] = std::sin (phase);
|
|
}
|
|
|
|
// Sqrt table: covers [0, 4] (sufficient for normalized audio)
|
|
for (int i = 0; i < kSqrtTableSize; ++i)
|
|
{
|
|
float x = (float) i / (float) kSqrtTableSize * 4.0f;
|
|
sqrtTable[i] = std::sqrt (x);
|
|
}
|
|
|
|
// Pow2 table: covers 2^x for x in [-4, 4]
|
|
for (int i = 0; i < kPowTableSize; ++i)
|
|
{
|
|
float x = ((float) i / (float) kPowTableSize) * 8.0f - 4.0f;
|
|
pow2Table[i] = std::pow (2.0f, x);
|
|
}
|
|
|
|
tablesInitialized = true;
|
|
}
|
|
|
|
// Fast sine using lookup table with linear interpolation
|
|
inline float fastSin (float phase)
|
|
{
|
|
// Wrap phase to [0, 2*PI]
|
|
const float twoPi = juce::MathConstants<float>::twoPi;
|
|
while (phase < 0.0f) phase += twoPi;
|
|
while (phase >= twoPi) phase -= twoPi;
|
|
|
|
float idx = phase / twoPi * (float) kSinTableSize;
|
|
int i0 = (int) idx;
|
|
float frac = idx - (float) i0;
|
|
i0 = i0 & (kSinTableSize - 1);
|
|
int i1 = (i0 + 1) & (kSinTableSize - 1);
|
|
|
|
return sinTable[i0] + frac * (sinTable[i1] - sinTable[i0]);
|
|
}
|
|
|
|
// Fast sqrt approximation (for values 0-4, good for normalized audio)
|
|
inline float fastSqrt (float x)
|
|
{
|
|
if (x <= 0.0f) return 0.0f;
|
|
if (x >= 4.0f) return std::sqrt (x); // Fallback for out of range
|
|
|
|
float idx = x * (float) kSqrtTableSize * 0.25f;
|
|
int i0 = (int) idx;
|
|
float frac = idx - (float) i0;
|
|
i0 = juce::jlimit (0, kSqrtTableSize - 2, i0);
|
|
|
|
return sqrtTable[i0] + frac * (sqrtTable[i0 + 1] - sqrtTable[i0]);
|
|
}
|
|
|
|
// Fast inverse sqrt (Quake-style with one Newton-Raphson iteration)
|
|
inline float fastInvSqrt (float x)
|
|
{
|
|
union { float f; uint32_t i; } conv;
|
|
conv.f = x;
|
|
conv.i = 0x5f3759df - (conv.i >> 1);
|
|
conv.f *= 1.5f - (x * 0.5f * conv.f * conv.f);
|
|
return conv.f;
|
|
}
|
|
|
|
// Fast pow(2, x) for x in [-4, 4]
|
|
inline float fastPow2 (float x)
|
|
{
|
|
x = juce::jlimit (-4.0f, 3.99f, x);
|
|
float idx = (x + 4.0f) * (float) kPowTableSize * 0.125f;
|
|
int i0 = (int) idx;
|
|
float frac = idx - (float) i0;
|
|
i0 = juce::jlimit (0, kPowTableSize - 2, i0);
|
|
|
|
return pow2Table[i0] + frac * (pow2Table[i0 + 1] - pow2Table[i0]);
|
|
}
|
|
|
|
// Fast pow approximation using log2/exp2 identity: x^y = 2^(y * log2(x))
|
|
// Only accurate for positive x, and limited y range
|
|
inline float fastPow (float base, float exp)
|
|
{
|
|
if (base <= 0.0f) return 0.0f;
|
|
if (exp == 0.0f) return 1.0f;
|
|
if (exp == 1.0f) return base;
|
|
if (exp == 2.0f) return base * base;
|
|
if (exp == 0.5f) return fastSqrt (base);
|
|
|
|
// Use actual pow for accuracy in edge cases
|
|
return std::pow (base, exp);
|
|
}
|
|
|
|
// Fast tanh approximation (Pade approximant)
|
|
inline float fastTanh (float x)
|
|
{
|
|
if (x < -3.0f) return -1.0f;
|
|
if (x > 3.0f) return 1.0f;
|
|
float x2 = x * x;
|
|
return x * (27.0f + x2) / (27.0f + 9.0f * x2);
|
|
}
|
|
|
|
// Fast exp approximation
|
|
inline float fastExp (float x)
|
|
{
|
|
x = juce::jlimit (-10.0f, 10.0f, x);
|
|
// Schraudolph's approximation
|
|
union { float f; int32_t i; } v;
|
|
v.i = (int32_t) (12102203.0f * x + 1065353216.0f);
|
|
return v.f;
|
|
}
|
|
|
|
} // namespace FastMath
|
|
|
|
// FIX #1: SharedBus struct moved to header file
|
|
// FIX #1: Removed thread_local - buses are now owned by FluteSynthAudioProcessor
|
|
|
|
static std::array<float,12> getTemperamentOffsetsByChoice (int choice);
|
|
static std::array<float,128> expandPitchClassOffsets (const std::array<float,12>& offsets);
|
|
|
|
static float mapHammerStiffnessToModel (float stiffnessSi)
|
|
{
|
|
const float kMin = 4.0e8f;
|
|
const float kMax = 1.0e10f;
|
|
const float logMin = std::log10 (kMin);
|
|
const float logMax = std::log10 (kMax);
|
|
const float logK = std::log10 (juce::jlimit (kMin, kMax, stiffnessSi));
|
|
const float t = (logK - logMin) / (logMax - logMin);
|
|
return 200.0f + t * (20000.0f - 200.0f);
|
|
}
|
|
|
|
static float mapInharmonicityToDispersion (float bCoeff, float baseDispersion)
|
|
{
|
|
const float bMin = 0.00018f;
|
|
const float bMax = 0.40f;
|
|
const float logMin = std::log10 (bMin);
|
|
const float logMax = std::log10 (bMax);
|
|
const float logB = std::log10 (juce::jlimit (bMin, bMax, bCoeff));
|
|
const float t = (logB - logMin) / (logMax - logMin);
|
|
const float scale = 0.25f + 0.85f * t;
|
|
return juce::jlimit (0.0f, 0.50f, baseDispersion * scale);
|
|
}
|
|
|
|
//==============================================================================
|
|
// Voice (VA)
|
|
FluteVoice::FluteVoice (juce::AudioProcessorValueTreeState& state) : apvts (state)
|
|
{
|
|
using FilterType = juce::dsp::StateVariableTPTFilterType;
|
|
svf.setType (FilterType::lowpass);
|
|
adsr.setSampleRate (44100.0); // updated in prepare()
|
|
}
|
|
|
|
bool FluteVoice::canPlaySound (juce::SynthesiserSound* s)
|
|
{
|
|
return dynamic_cast<SimpleSound*> (s) != nullptr;
|
|
}
|
|
|
|
void FluteVoice::startNote (int midiNoteNumber, float velocity, juce::SynthesiserSound*, int)
|
|
{
|
|
const int midiIdx = juce::jlimit (0, 127, midiNoteNumber);
|
|
const float v = juce::jlimit (0.0f, 1.0f, velocity);
|
|
// Velocity response: 24dB range with perceptual curve for expressive but audible dynamics
|
|
const float velPerceptual = std::sqrt (v);
|
|
velocityGain = juce::Decibels::decibelsToGain (juce::jmap (velPerceptual, 0.0f, 1.0f, -18.0f, 0.0f));
|
|
const float cents = juce::jlimit (-100.0f, 100.0f,
|
|
pitchCompOffsetCents + pitchCompSlopeCents * ((float) midiIdx - 60.0f)
|
|
+ noteOffsetsCents[(size_t) midiIdx]);
|
|
const double pitchComp = std::pow (2.0, cents / 1200.0);
|
|
currentFrequency = juce::MidiMessage::getMidiNoteInHertz (midiIdx) * pitchComp;
|
|
setFrequency (currentFrequency);
|
|
phase = 0.0f;
|
|
updateParams();
|
|
adsr.noteOn();
|
|
}
|
|
|
|
void FluteVoice::stopNote (float, bool allowTailOff)
|
|
{
|
|
if (allowTailOff) adsr.noteOff();
|
|
else { adsr.reset(); clearCurrentNote(); }
|
|
}
|
|
|
|
void FluteVoice::renderNextBlock (juce::AudioBuffer<float>& buffer, int startSample, int numSamples)
|
|
{
|
|
if (! adsr.isActive()) { clearCurrentNote(); return; }
|
|
|
|
updateParams();
|
|
|
|
auto* left = buffer.getWritePointer (0, startSample);
|
|
auto* right = (buffer.getNumChannels() > 1) ? buffer.getWritePointer (1, startSample) : nullptr;
|
|
|
|
for (int i = 0; i < numSamples; ++i)
|
|
{
|
|
const float sine = std::sin (phase);
|
|
const float saw = 2.0f * (phaseOverPi - std::floor (phaseOverPi + 0.5f));
|
|
const float square = (sine >= 0.0f ? 1.0f : -1.0f);
|
|
|
|
float osc = sine * wSine + saw * wSaw + square * wSquare;
|
|
|
|
if (preNoiseLin > 0.0f)
|
|
osc += preNoiseLin * (randomUniform() * 2.0f - 1.0f);
|
|
|
|
float filtered = DebugToggles::kEnableVaFilter ? svf.processSample (0, osc) : osc;
|
|
|
|
float env = adsr.getNextSample();
|
|
float y = filtered * env * velocityGain;
|
|
|
|
left[i] += y;
|
|
if (right) right[i] += y;
|
|
|
|
phase += phaseDelta;
|
|
if (phase > juce::MathConstants<float>::twoPi)
|
|
phase -= juce::MathConstants<float>::twoPi;
|
|
phaseOverPi = phase / juce::MathConstants<float>::pi;
|
|
}
|
|
|
|
if (! adsr.isActive()) clearCurrentNote();
|
|
}
|
|
|
|
void FluteVoice::prepare (double sr, int samplesPerBlock, int /*numChannels*/)
|
|
{
|
|
sampleRate = sr;
|
|
adsr.setSampleRate (sr);
|
|
|
|
juce::dsp::ProcessSpec spec;
|
|
spec.sampleRate = sr;
|
|
spec.maximumBlockSize = (juce::uint32) samplesPerBlock;
|
|
spec.numChannels = 1; // voice is mono
|
|
svf.reset();
|
|
svf.prepare (spec);
|
|
|
|
setFrequency (currentFrequency);
|
|
}
|
|
|
|
void FluteVoice::setFrequency (double hz)
|
|
{
|
|
currentFrequency = hz * masterTuneFactor;
|
|
phaseDelta = (float) (juce::MathConstants<double>::twoPi * currentFrequency / sampleRate);
|
|
}
|
|
|
|
float FluteVoice::randomUniform()
|
|
{
|
|
rng = 1664525u * rng + 1013904223u;
|
|
return (rng >> 8) * (1.0f / 16777216.0f);
|
|
}
|
|
|
|
void FluteVoice::updateParams()
|
|
{
|
|
float s = apvts.getRawParameterValue (ParamIDs::oscSine)->load();
|
|
float sa = apvts.getRawParameterValue (ParamIDs::oscSaw)->load();
|
|
float sq = apvts.getRawParameterValue (ParamIDs::oscSquare)->load();
|
|
float sum = std::max (0.0001f, s + sa + sq);
|
|
wSine = s / sum; wSaw = sa / sum; wSquare = sq / sum;
|
|
|
|
juce::ADSR::Parameters p;
|
|
p.attack = apvts.getRawParameterValue (ParamIDs::attack)->load();
|
|
p.decay = apvts.getRawParameterValue (ParamIDs::decay)->load();
|
|
p.sustain = apvts.getRawParameterValue (ParamIDs::sustain)->load();
|
|
p.release = apvts.getRawParameterValue (ParamIDs::release)->load();
|
|
adsr.setParameters (p);
|
|
|
|
float cut = apvts.getRawParameterValue (ParamIDs::cutoff)->load();
|
|
float res = apvts.getRawParameterValue (ParamIDs::resonance)->load();
|
|
svf.setCutoffFrequency (cut);
|
|
svf.setResonance (res);
|
|
|
|
if (DebugToggles::kEnableNoiseDb)
|
|
{
|
|
float nDb = apvts.getRawParameterValue (ParamIDs::noiseDb)->load();
|
|
preNoiseLin = juce::Decibels::decibelsToGain (nDb);
|
|
}
|
|
else
|
|
{
|
|
preNoiseLin = 0.0f;
|
|
}
|
|
}
|
|
|
|
//==============================================================================
|
|
// PM (with amplitude ADSR)
|
|
void WaveguideFlute::prepare (double sr, int blockSize, int numCh)
|
|
{
|
|
sampleRate = sr;
|
|
(void) blockSize; (void) numCh;
|
|
setFrequency (440.0);
|
|
noiseGain = 0.02f;
|
|
jetFeedback = 0.2f;
|
|
dc.reset (sr);
|
|
|
|
adsr.setSampleRate (sr);
|
|
adsr.setParameters (envParams);
|
|
}
|
|
|
|
void WaveguideFlute::setFrequency (double hz)
|
|
{
|
|
frequency = hz * masterTuneFactor;
|
|
double lenSamples = sampleRate / frequency;
|
|
int len = (int) juce::jmax (16.0, std::floor (lenSamples + 0.5));
|
|
delay.setSize (1, len + 4);
|
|
writePos = 0;
|
|
for (int i = 0; i < delay.getNumSamples(); ++i) delay.setSample (0, i, 0.0f);
|
|
}
|
|
|
|
void WaveguideFlute::setEnvParams (float a, float d, float s, float r)
|
|
{
|
|
envParams.attack = juce::jmax (0.0f, a);
|
|
envParams.decay = juce::jmax (0.0f, d);
|
|
envParams.sustain = s;
|
|
envParams.release = juce::jmax (0.0f, r);
|
|
baseRelease = envParams.release;
|
|
adsr.setParameters (envParams);
|
|
}
|
|
|
|
void WaveguideFlute::setReleaseScale (float baseR, float scale)
|
|
{
|
|
baseRelease = juce::jlimit (0.030f, 7.000f, baseR);
|
|
envParams.release = juce::jlimit (0.030f, 7.000f, baseRelease * juce::jlimit (0.2f, 4.0f, scale));
|
|
adsr.setParameters (envParams);
|
|
}
|
|
|
|
void WaveguideFlute::noteOn (int midi, float vel)
|
|
{
|
|
const int midiIdx = juce::jlimit (0, 127, midi);
|
|
const float v = juce::jlimit (0.0f, 1.0f, vel);
|
|
// Velocity response: 24dB range with perceptual curve for expressive but audible dynamics
|
|
const float velPerceptual = std::sqrt (v);
|
|
velocityGain = juce::Decibels::decibelsToGain (juce::jmap (velPerceptual, 0.0f, 1.0f, -18.0f, 0.0f));
|
|
const float cents = juce::jlimit (-100.0f, 100.0f,
|
|
pitchCompOffsetCents + pitchCompSlopeCents * ((float) midiIdx - 60.0f)
|
|
+ noteOffsetsCents[(size_t) midiIdx]);
|
|
const double pitchComp = std::pow (2.0, cents / 1200.0);
|
|
setFrequency (juce::MidiMessage::getMidiNoteInHertz (midiIdx) * pitchComp);
|
|
active = true; // run the loop
|
|
phase = 0.0f;
|
|
adsr.noteOn(); // start amplitude envelope
|
|
}
|
|
|
|
void WaveguideFlute::noteOff()
|
|
{
|
|
// Do not stop immediately; let ADSR release tail the sound.
|
|
adsr.noteOff();
|
|
}
|
|
|
|
void WaveguideFlute::render (juce::AudioBuffer<float>& buffer, int start, int num)
|
|
{
|
|
// If we are neither running nor have an active envelope, nothing to do
|
|
if (!active && !adsr.isActive())
|
|
return;
|
|
|
|
auto* L = buffer.getWritePointer (0, start);
|
|
auto* R = (buffer.getNumChannels() > 1 ? buffer.getWritePointer (1, start) : nullptr);
|
|
|
|
for (int i = 0; i < num; ++i)
|
|
{
|
|
const int len = delay.getNumSamples();
|
|
const int readPos = (writePos + 1) % len;
|
|
float y = delay.getSample (0, readPos);
|
|
|
|
// Jet nonlinearity (simple tanh) + weak noise excitation
|
|
float breath = noiseGain * (randomUniform() * 2.0f - 1.0f);
|
|
float jet = std::tanh (y * 1.6f + breath);
|
|
|
|
// Feedback + loss
|
|
float next = 0.996f * (jet * jetFeedback + y * (1.0f - jetFeedback));
|
|
|
|
// DC-block
|
|
if (DebugToggles::kEnablePmDcBlock)
|
|
next = dc.process (next);
|
|
|
|
// write back
|
|
delay.setSample (0, writePos, next);
|
|
writePos = (writePos + 1) % len;
|
|
|
|
// Amplitude ADSR
|
|
float env = adsr.getNextSample();
|
|
float out = next * env * velocityGain;
|
|
|
|
L[i] += out;
|
|
if (R) R[i] += out;
|
|
}
|
|
|
|
// If envelope has fully finished, stop running the loop next time
|
|
if (!adsr.isActive())
|
|
active = false;
|
|
}
|
|
|
|
float WaveguideFlute::randomUniform()
|
|
{
|
|
rng = 1664525u * rng + 1013904223u;
|
|
return (rng >> 8) * (1.0f / 16777216.0f);
|
|
}
|
|
|
|
//==============================================================================
|
|
// pm2 stiff-string
|
|
// FIX #1 & #4: Removed static beginSharedBuses - buses now owned by processor and passed to voices
|
|
|
|
void Pm2StringBank::prepare (double sr, int blockSize, int numCh)
|
|
{
|
|
sampleRate = sr;
|
|
adsr.setSampleRate (sr);
|
|
adsr.setParameters (envParams);
|
|
postLpfEnv.setSampleRate (sr);
|
|
postLpfEnv.setParameters (postLpfEnvParams);
|
|
active = false;
|
|
keyHeld = false;
|
|
useReleaseLoopGain = false;
|
|
releaseDelaySamples = 0;
|
|
noteLifeSamples = 0;
|
|
damperDelaySamples = 0;
|
|
sustainPedalDown = false;
|
|
loopEnergySmoothed = 0.0f;
|
|
if (sampleRate > 0.0)
|
|
{
|
|
pedalChangeSamplesTotal = juce::jmax (1, (int) std::round (0.015 * sampleRate));
|
|
pedalChangeSamplesRemaining = 0;
|
|
pedalChangeFade = 1.0f;
|
|
const float tauSamples = (float) (0.050 * sampleRate); // ~50ms smoothing
|
|
loopEnergySmoothCoeff = tauSamples > 1.0f ? (1.0f - std::exp (-1.0f / tauSamples)) : 1.0f;
|
|
}
|
|
else
|
|
{
|
|
pedalChangeSamplesTotal = 0;
|
|
pedalChangeSamplesRemaining = 0;
|
|
pedalChangeFade = 1.0f;
|
|
loopEnergySmoothCoeff = 1.0f;
|
|
}
|
|
const int maxDelayLen = juce::jmax (8, (int) std::ceil ((sampleRate / 20.0) + 4.0));
|
|
for (auto& s : strings)
|
|
{
|
|
s.delay.clear();
|
|
s.delay.reserve ((size_t) maxDelayLen);
|
|
s.writePos = 0;
|
|
s.delaySamples = 0.0;
|
|
s.loopGain = 0.999f;
|
|
s.baseGain = 1.0f;
|
|
s.panGainL = 0.7071f;
|
|
s.panGainR = 0.7071f;
|
|
s.loopGainSmoothed = 0.999f;
|
|
s.damperLossPrev = damper.lossOff;
|
|
s.damperSoftenCountdown = 0;
|
|
s.damperSoftenState = 0.0f;
|
|
s.apStages = 1;
|
|
s.dc.reset (sr);
|
|
for (auto& ap : s.ap) ap = {};
|
|
s.lpState = 0.0f;
|
|
s.lpCoeff = 0.25f;
|
|
s.interpAlpha = 0.0f; // Thiran allpass interpolator coefficient
|
|
s.interpZ1 = 0.0f; // Thiran allpass interpolator state
|
|
s.toneInjectSamplesLeft = 0;
|
|
s.toneInjectPhase = 0.0f;
|
|
s.toneInjectPhaseDelta = 0.0f;
|
|
s.toneInjectGain = 0.0f;
|
|
s.hammer = {};
|
|
s.energyGainSmoothed = 1.0f;
|
|
s.duplex.buf.clear();
|
|
s.duplex.buf.reserve ((size_t) maxDelayLen);
|
|
// Initialize fundamental resonator state (IMPROVEMENT 2)
|
|
s.fundResonatorState1 = 0.0f;
|
|
s.fundResonatorState2 = 0.0f;
|
|
s.fundResonatorCoeff = 0.0f;
|
|
s.fundResonatorGain = 0.0f;
|
|
}
|
|
|
|
juce::dsp::ProcessSpec spec { sr, (juce::uint32) blockSize, (juce::uint32) juce::jmax (1, numCh) };
|
|
noteHpf.reset();
|
|
noteHpf.prepare (spec);
|
|
noteHpf.setType (juce::dsp::StateVariableTPTFilterType::highpass);
|
|
noteHpf.setResonance (0.707f);
|
|
noteHpfNumChannels = (int) spec.numChannels;
|
|
auto prepBp = [&spec] (juce::dsp::StateVariableTPTFilter<float>& f)
|
|
{
|
|
f.reset();
|
|
f.prepare (spec);
|
|
f.setType (juce::dsp::StateVariableTPTFilterType::bandpass);
|
|
};
|
|
prepBp (couplingBpL);
|
|
prepBp (couplingBpR);
|
|
prepBp (sympBpL);
|
|
prepBp (sympBpR);
|
|
updateNoteHpf (currentMidiNote);
|
|
lastOutL = 0.0f;
|
|
lastOutR = 0.0f;
|
|
postLpfStateL = 0.0f;
|
|
postLpfStateR = 0.0f;
|
|
|
|
// Initialize pink noise state (IMPROVEMENT 1)
|
|
pinkNoiseState.fill (0.0f);
|
|
pinkNoiseCounter = 0;
|
|
|
|
// Initialize body resonance noise state (IMPROVEMENT 3)
|
|
bodyNoiseState = 0.0f;
|
|
bodyNoiseLp1 = 0.0f;
|
|
bodyNoiseLp2 = 0.0f;
|
|
bodyNoiseHp = 0.0f;
|
|
bodyNoiseRng = 0x12345678;
|
|
}
|
|
|
|
void Pm2StringBank::setParams (const PresetModel::PmString& p)
|
|
{
|
|
params = p;
|
|
}
|
|
|
|
void Pm2StringBank::setHammerParams (const PresetModel::HammerModel& h)
|
|
{
|
|
hammer = h;
|
|
}
|
|
|
|
void Pm2StringBank::setFeltParams (const PresetModel::FeltModel& f)
|
|
{
|
|
if (DebugToggles::kEnableFelt)
|
|
{
|
|
felt = f;
|
|
}
|
|
else
|
|
{
|
|
felt.preload = 0.0f;
|
|
felt.stiffness = 1.0f;
|
|
felt.hysteresis = 0.0f;
|
|
felt.maxAmp = 10.0f;
|
|
}
|
|
}
|
|
|
|
void Pm2StringBank::setDuplexParams (const PresetModel::Duplex& d)
|
|
{
|
|
duplex = d;
|
|
}
|
|
|
|
void Pm2StringBank::setWdfParams (const PresetModel::WdfModel& w)
|
|
{
|
|
wdf = w;
|
|
}
|
|
|
|
void Pm2StringBank::setCouplingParams (const PresetModel::Coupling& c)
|
|
{
|
|
couplingGain = juce::jlimit (0.0f, 0.2f, c.gain);
|
|
couplingQ = juce::jlimit (0.2f, 5.0f, c.q);
|
|
sympGain = juce::jlimit (0.0f, 0.3f, c.sympGain);
|
|
sympHighDamp = juce::jlimit (0.0f, 1.0f, c.sympHighDamp);
|
|
}
|
|
|
|
void Pm2StringBank::setDamperParams (const PresetModel::Damper& d)
|
|
{
|
|
damper = d;
|
|
float tauSamples = (float) (damper.smoothMs * 0.001 * sampleRate);
|
|
damperSmoothCoeff = tauSamples > 1.0f ? (1.0f - std::exp (-1.0f / juce::jmax (1.0f, tauSamples))) : 1.0f;
|
|
damperLiftSmoothCoeff = damperSmoothCoeff;
|
|
damperSoftenSamples = (int) std::round (damper.softenMs * 0.001 * sampleRate);
|
|
damperSoftenA = std::exp (-2.0f * juce::MathConstants<float>::pi * juce::jlimit (40.0f, 8000.0f, damper.softenHz) / (float) juce::jmax (20.0, sampleRate));
|
|
}
|
|
|
|
void Pm2StringBank::setDamperLift (float lift)
|
|
{
|
|
damperLiftTarget = juce::jlimit (0.0f, 1.0f, lift);
|
|
}
|
|
|
|
void Pm2StringBank::beginVoiceStealFade (float ms)
|
|
{
|
|
const float clampedMs = juce::jlimit (2.0f, 80.0f, ms);
|
|
stealFadeSamples = juce::jmax (1, (int) std::round (clampedMs * 0.001f * sampleRate));
|
|
stealFadeRemaining = stealFadeSamples;
|
|
}
|
|
|
|
void Pm2StringBank::setSoftPedal (bool down, const PresetModel::UnaCorda& una)
|
|
{
|
|
softPedalDown = down;
|
|
unaCorda = una;
|
|
}
|
|
|
|
void Pm2StringBank::setSustainPedalDown (bool down)
|
|
{
|
|
if (sustainPedalDown != down)
|
|
{
|
|
if (pedalChangeSamplesTotal <= 0 && sampleRate > 0.0)
|
|
pedalChangeSamplesTotal = juce::jmax (1, (int) std::round (0.015 * sampleRate));
|
|
pedalChangeSamplesRemaining = pedalChangeSamplesTotal;
|
|
pedalChangeFade = 0.0f;
|
|
}
|
|
sustainPedalDown = down;
|
|
if (! sustainPedalDown && ! keyHeld)
|
|
{
|
|
useReleaseLoopGain = true;
|
|
adsr.noteOff();
|
|
postLpfEnv.noteOff();
|
|
}
|
|
}
|
|
|
|
void Pm2StringBank::setEnvParams (float attack, float decay, float sustain, float release)
|
|
{
|
|
envParams.attack = juce::jmax (0.0f, attack);
|
|
envParams.decay = juce::jmax (0.0f, decay);
|
|
envParams.sustain = sustain;
|
|
envParams.release = juce::jmax (0.0f, release);
|
|
baseRelease = envParams.release;
|
|
adsr.setParameters (envParams);
|
|
|
|
// Store decay for influencing physical model T60
|
|
// Use the provided decay directly (no clamp) to scale T60.
|
|
if (envParams.decay > 0.0f)
|
|
decayTimeScale = juce::jlimit (0.5f, 1.5f, envParams.decay / 2.0f);
|
|
else
|
|
decayTimeScale = 0.0f;
|
|
}
|
|
|
|
void Pm2StringBank::setReleaseScale (float baseR, float scale)
|
|
{
|
|
baseRelease = juce::jlimit (0.030f, 7.000f, baseR);
|
|
envParams.release = juce::jlimit (0.030f, 7.000f, baseRelease * juce::jlimit (0.2f, 4.0f, scale));
|
|
adsr.setParameters (envParams);
|
|
}
|
|
|
|
void Pm2StringBank::updateNoteHpf (int midiNoteNumber)
|
|
{
|
|
if (sampleRate <= 0.0)
|
|
return;
|
|
|
|
const float note = (float) juce::jlimit (0, 127, midiNoteNumber);
|
|
const float norm = juce::jlimit (0.0f, 1.0f, (note - 21.0f) / (108.0f - 21.0f));
|
|
const float eased = std::pow (norm, 1.5f);
|
|
noteHpfCutoff = juce::jlimit (30.0f, 70.0f, 30.0f + eased * (70.0f - 30.0f));
|
|
noteHpf.setCutoffFrequency (noteHpfCutoff);
|
|
}
|
|
|
|
void Pm2StringBank::resizeString (StringState& s, double samples)
|
|
{
|
|
const int len = juce::jmax (8, (int) std::ceil (samples + 4.0));
|
|
s.delay.resize ((size_t) len);
|
|
std::fill (s.delay.begin(), s.delay.end(), 0.0f);
|
|
s.writePos = 0;
|
|
s.delaySamples = samples;
|
|
// FIX #5: Removed dc.reset() - resetting the DC blocker on every note-on
|
|
// causes clicks when there's residual DC offset being filtered.
|
|
// DC blocker state is now only reset in prepare().
|
|
|
|
// Thiran allpass interpolator coefficient for fractional delay
|
|
// Formula: alpha = (1 - d) / (1 + d) where d is fractional delay in (0, 1)
|
|
const double intPart = std::floor (samples);
|
|
double frac = samples - intPart;
|
|
// Ensure frac is in valid range for stable allpass (avoid d=0 or d=1)
|
|
frac = juce::jlimit (0.1, 0.9, frac);
|
|
s.interpAlpha = (float) ((1.0 - frac) / (1.0 + frac));
|
|
s.interpZ1 = 0.0f; // Reset interpolator state
|
|
}
|
|
|
|
static inline float mixLinear (float a, float b, float t)
|
|
{
|
|
return a * (1.0f - t) + b * t;
|
|
}
|
|
|
|
static inline float softClip (float x, float limit)
|
|
{
|
|
if (! DebugToggles::kEnablePm2SoftClip)
|
|
return x;
|
|
const float safeLimit = juce::jmax (1.0e-6f, limit);
|
|
return safeLimit * FastMath::fastTanh (x / safeLimit);
|
|
}
|
|
|
|
static PresetModel::WdfModel sanitizeWdf (PresetModel::WdfModel w)
|
|
{
|
|
auto c = PresetModel::clamp;
|
|
auto finiteOr = [] (float v, float fallback) { return std::isfinite (v) ? v : fallback; };
|
|
w.enabled = w.enabled;
|
|
w.blend = c (finiteOr (w.blend, 0.0f), 0.0f, 1.0f);
|
|
w.loss = c (finiteOr (w.loss, 0.0f), 0.0f, 0.1f);
|
|
w.bridgeMass = c (finiteOr (w.bridgeMass, 1.0f), 0.1f, 10.0f);
|
|
w.plateStiffness = c (finiteOr (w.plateStiffness, 1.0f), 0.1f, 5.0f);
|
|
return w;
|
|
}
|
|
|
|
// Lightweight WDF-ish burst generator (offline prototype port)
|
|
static std::vector<float> buildWdfBurst (double sampleRate,
|
|
double baseHz,
|
|
float velocity,
|
|
int totalExcite,
|
|
float loss,
|
|
float bridgeMass,
|
|
float plateStiffness,
|
|
uint32_t& rng)
|
|
{
|
|
std::vector<float> out ((size_t) juce::jmax (1, totalExcite), 0.0f);
|
|
if (! std::isfinite (sampleRate) || sampleRate <= 0.0 || ! std::isfinite (baseHz) || baseHz <= 0.0f)
|
|
return out;
|
|
|
|
const double delaySamples = sampleRate / juce::jmax (20.0, baseHz);
|
|
const int delayLen = juce::jmax (8, (int) std::ceil (delaySamples));
|
|
std::vector<float> delay ((size_t) delayLen, 0.0f);
|
|
int write = 0;
|
|
const float frac = (float) (delaySamples - std::floor (delaySamples));
|
|
|
|
const float loopLoss = juce::jlimit (0.0f, 0.1f, loss);
|
|
const float loopGain = std::exp (-loopLoss);
|
|
float lpState = 0.0f;
|
|
const float lpCoeff = 0.25f;
|
|
|
|
auto rand01 = [&rng]() -> float
|
|
{
|
|
rng = 1664525u * rng + 1013904223u;
|
|
return (float) ((rng >> 8) * (1.0 / 16777216.0)); // [0..1)
|
|
};
|
|
|
|
const float R_h = 1.0f;
|
|
const float R_s = 1.0f + loopLoss * 10.0f;
|
|
const float R_b = juce::jlimit (0.05f, 10.0f, bridgeMass);
|
|
float bx = 0.0f, bv = 0.0f;
|
|
const float mass = juce::jmax (0.05f, bridgeMass);
|
|
const float stiff = juce::jmax (0.1f, plateStiffness);
|
|
const float damp = loopLoss * 20.0f;
|
|
|
|
const float preload = 0.08f;
|
|
const float stiffness = 2.4f;
|
|
const float hysteresis = 0.15f;
|
|
const float feltMax = 1.4f;
|
|
const float maxDelta = feltMax * 0.5f;
|
|
float feltState = 0.0f;
|
|
|
|
const int attack = juce::jmax (1, (int) std::round (0.006 * sampleRate));
|
|
const int decay = juce::jmax (1, (int) std::round (0.010 * sampleRate));
|
|
const int release= juce::jmax (1, (int) std::round (0.006 * sampleRate));
|
|
|
|
auto delayRead = [&delay, delayLen](int idx, float fracPart) -> float
|
|
{
|
|
const int i0 = (idx + delayLen) % delayLen;
|
|
const int i1 = (i0 + 1) % delayLen;
|
|
return delay[(size_t) i0] * (1.0f - fracPart) + delay[(size_t) i1] * fracPart;
|
|
};
|
|
|
|
for (int n = 0; n < (int) out.size(); ++n)
|
|
{
|
|
float env = 0.0f;
|
|
if (n < attack) env = (float) n / (float) attack;
|
|
else if (n < attack + decay)
|
|
{
|
|
const float t = (float) (n - attack) / (float) decay;
|
|
env = 1.0f - 0.9f * t;
|
|
}
|
|
else if (n < attack + decay + release)
|
|
{
|
|
const float t = (float) (n - attack - decay) / (float) release;
|
|
env = juce::jmax (0.0f, 0.1f * (1.0f - t));
|
|
}
|
|
|
|
// hammer noise
|
|
float noise = (rand01() * 2.0f - 1.0f) * env * std::pow (juce::jlimit (0.0f, 1.0f, velocity), 1.2f);
|
|
|
|
// felt-ish shaping
|
|
float mag = std::pow (preload + std::abs (noise), stiffness) - std::pow (preload, stiffness);
|
|
float shaped = (1.0f - hysteresis) * mag + hysteresis * feltState;
|
|
feltState = shaped;
|
|
float feltOut = softClip ((float) std::copysign (shaped, noise), feltMax);
|
|
float delta = feltOut - feltState;
|
|
delta = softClip (delta, maxDelta);
|
|
feltOut = feltState + delta;
|
|
|
|
float a_h = feltOut * R_h;
|
|
float a_s = delayRead (write, frac);
|
|
float a_b = bx; // displacement proxy
|
|
|
|
const float denom = (1.0f / R_h) + (1.0f / R_s) + (1.0f / R_b);
|
|
const float Vj = (a_h / R_h + a_s / R_s + a_b / R_b) / juce::jmax (1.0e-6f, denom);
|
|
const float b_h = 2.0f * Vj - a_h;
|
|
const float b_s = 2.0f * Vj - a_s;
|
|
const float b_b = 2.0f * Vj - a_b;
|
|
(void) b_h; // reserved for future refinement
|
|
|
|
// bridge integrator (semi-implicit)
|
|
float drive = (b_b - a_b) * 0.5f / juce::jmax (1.0e-6f, R_b);
|
|
float dt = 1.0f / (float) sampleRate;
|
|
float acc = (drive - stiff * bx - damp * bv) / juce::jmax (0.05f, mass);
|
|
bv = softClip (bv + dt * acc, 20.0f);
|
|
bx = softClip (bx + dt * bv, 5.0f);
|
|
|
|
// loop
|
|
lpState = lpState + lpCoeff * (b_s - lpState);
|
|
float loopSample = lpState * loopGain;
|
|
delay[(size_t) write] = loopSample;
|
|
write = (write + 1) % delayLen;
|
|
|
|
float mixed = mixLinear (b_s * 0.5f + a_s * 0.5f, bx * 0.5f, 0.4f);
|
|
if (! std::isfinite (mixed))
|
|
mixed = 0.0f;
|
|
out[(size_t) n] = softClip (mixed, 2.0f);
|
|
}
|
|
|
|
return out;
|
|
}
|
|
|
|
void Pm2StringBank::noteOn (int midiNoteNumber, float velocity)
|
|
{
|
|
keyHeld = true;
|
|
keyReleaseSamplesRemaining = 0;
|
|
keyOffFadeSamplesRemaining = 0;
|
|
pendingNoteOff = false;
|
|
minNoteOffRemaining = minNoteDurationSamples;
|
|
currentMidiNote = midiNoteNumber;
|
|
useReleaseLoopGain = false;
|
|
loopEnergySmoothed = 0.0f;
|
|
// If this voice was previously stolen, cancel any pending steal fade.
|
|
stealFadeRemaining = 0;
|
|
stealFadeSamples = 0;
|
|
stealInProgress = false;
|
|
updateNoteHpf (midiNoteNumber);
|
|
damperLiftSmoothed = damperLiftTarget;
|
|
|
|
// CPU OPTIMIZATION: String count varies by register for CPU efficiency
|
|
// - Treble (>= C7/96): 2 strings (real pianos use 2-3, less difference audible)
|
|
// - Bass gradual fade: G2 down to D#2, third string fades from 80% to 0%
|
|
// - Deep bass (<= D#2/39): 2 strings only
|
|
// - Mid range: Full 3 strings for rich chorus effect
|
|
const int trebleSplitNote = 96; // C7 - use 2 strings above this
|
|
const int bassFadeStartNote = 43; // G2 - third string at 80%
|
|
const int bassFadeEndNote = 36; // C2 - switch to 2 strings at and below this
|
|
|
|
// Calculate third string gain scale for bass notes (1.0 = full, 0.0 = muted)
|
|
// Gradual fade: G2=0.8, F#2=0.6, F2=0.4, E2=0.2, D#2=0.0, etc.
|
|
if (midiNoteNumber >= trebleSplitNote)
|
|
{
|
|
currentNumStrings = 2;
|
|
thirdStringGainScale = 0.0f;
|
|
}
|
|
else if (midiNoteNumber > bassFadeStartNote)
|
|
{
|
|
// Above G2: full third string
|
|
currentNumStrings = 3;
|
|
thirdStringGainScale = 1.0f;
|
|
}
|
|
else if (midiNoteNumber <= bassFadeEndNote)
|
|
{
|
|
// C2 and below: use only 2 strings
|
|
currentNumStrings = 2;
|
|
thirdStringGainScale = 0.0f;
|
|
}
|
|
else
|
|
{
|
|
// Gradual fade zone from G2 (43) down to C#2 (37)
|
|
// G2=43 -> 0.80, F#2=42 -> 0.60, F2=41 -> 0.40, E2=40 -> 0.20
|
|
// D#2=39 -> 0.0 (effectively 2 strings), D2=38 -> 0.0, C#2=37 -> 0.0
|
|
currentNumStrings = 3; // Keep 3 strings for smooth crossfade
|
|
const float fadeRange = (float) (bassFadeStartNote - bassFadeEndNote); // 43-36 = 7 semitones
|
|
const float notePos = (float) (midiNoteNumber - bassFadeEndNote); // Position in fade zone
|
|
// Scale so G2 (43) = 0.8 and C#2 (37) = ~0.0
|
|
thirdStringGainScale = juce::jlimit (0.0f, 0.8f, (notePos / fadeRange) * 0.8f);
|
|
|
|
// If gain is very small, use 2 strings to save CPU
|
|
if (thirdStringGainScale < 0.05f)
|
|
{
|
|
currentNumStrings = 2;
|
|
thirdStringGainScale = 0.0f;
|
|
}
|
|
}
|
|
|
|
const int midiIdx = juce::jlimit (0, 127, midiNoteNumber);
|
|
const float pitchCents = juce::jlimit (-100.0f, 100.0f,
|
|
pitchCompOffsetCents + pitchCompSlopeCents * ((float) midiIdx - 60.0f)
|
|
+ noteOffsetsCents[(size_t) midiIdx]);
|
|
const double pitchComp = std::pow (2.0, pitchCents / 1200.0);
|
|
const double baseHz = juce::MidiMessage::getMidiNoteInHertz (midiIdx) * pitchComp * masterTuneFactor;
|
|
const float v = juce::jlimit (0.0f, 1.0f, velocity);
|
|
if (v < 0.4f)
|
|
lowVelSkip = (randomUniform() < 0.25f);
|
|
else
|
|
lowVelSkip = false;
|
|
// Velocity response with moderate dynamic range:
|
|
// - 24dB range keeps soft notes audible while still expressive
|
|
// - Applied perceptual curve (sqrt) so velocity feels more natural to play
|
|
const float velPerceptual = std::sqrt (v);
|
|
velocityGain = juce::Decibels::decibelsToGain (juce::jmap (velPerceptual, 0.0f, 1.0f, -18.0f, 0.0f));
|
|
if (PhysicsToggles::kUsePerNotePhysics)
|
|
{
|
|
const int midiVel = juce::jlimit (0, 127, (int) std::round (v * 127.0f));
|
|
const float hammerVel = PianoPhysics::Velocity::midiToHammerVelocity (midiVel);
|
|
const float brightnessTilt = juce::jlimit (0.0f, 1.0f, PianoPhysics::Velocity::getBrightnessTilt (hammerVel));
|
|
const float minHzBase = (float) baseHz * 1.5f;
|
|
const float maxHzBase = (float) baseHz * (4.0f + 6.0f * brightnessTilt);
|
|
float minHz = juce::jlimit (500.0f, 6000.0f, minHzBase);
|
|
float maxHz = juce::jlimit (2500.0f, 16000.0f, maxHzBase);
|
|
if (sustainPedalDown)
|
|
{
|
|
minHz *= 1.45f;
|
|
maxHz *= 1.35f;
|
|
}
|
|
postLpfMinHz = juce::jlimit (500.0f, 8000.0f, minHz);
|
|
postLpfMaxHz = juce::jlimit (2500.0f, 18000.0f, maxHz);
|
|
|
|
const float t60 = PianoPhysics::StringDecay::getT60 ((float) baseHz, midiNoteNumber);
|
|
const float brightDecay = juce::jlimit (0.10f, 1.4f, t60 * 0.05f);
|
|
const float brightRelease = juce::jlimit (0.08f, 1.0f, brightDecay * 0.70f);
|
|
postLpfEnvParams.attack = 0.003f;
|
|
postLpfEnvParams.decay = brightDecay;
|
|
float sustain = juce::jlimit (0.08f, 0.35f, 0.12f + 0.18f * brightnessTilt);
|
|
if (sustainPedalDown)
|
|
sustain = juce::jlimit (0.15f, 0.60f, sustain + 0.14f);
|
|
postLpfEnvParams.sustain = sustain;
|
|
postLpfEnvParams.release = brightRelease;
|
|
postLpfEnv.setParameters (postLpfEnvParams);
|
|
}
|
|
noteFadeSamplesTotal = juce::jmax (1, (int) std::round (0.001 * sampleRate));
|
|
noteFadeSamplesRemaining = noteFadeSamplesTotal;
|
|
postLpfEnv.noteOn();
|
|
postLpfStateL = 0.0f;
|
|
postLpfStateR = 0.0f;
|
|
const float bpFreq = juce::jlimit (60.0f, 6000.0f, (float) baseHz * 1.1f);
|
|
couplingBpL.setCutoffFrequency (bpFreq);
|
|
couplingBpR.setCutoffFrequency (bpFreq);
|
|
couplingBpL.setResonance (couplingQ);
|
|
couplingBpR.setResonance (couplingQ);
|
|
sympBpL.setCutoffFrequency (bpFreq);
|
|
sympBpR.setCutoffFrequency (bpFreq);
|
|
sympBpL.setResonance (juce::jmax (0.3f, couplingQ * 0.8f));
|
|
sympBpR.setResonance (juce::jmax (0.3f, couplingQ * 0.8f));
|
|
// Lift velocity curve: 35% minimum excitation floor ensures soft notes have enough energy
|
|
// to properly excite the string model, while velocityGain handles actual loudness dynamics
|
|
const float velCurveRaw = std::pow (v, juce::jlimit (0.6f, 2.5f, hammer.gamma));
|
|
const float velCurve = 0.35f + 0.65f * velCurveRaw; // 35% floor, scales to 100%
|
|
const float stiffnessScale = juce::jlimit (0.4f, 2.5f, 1.0f + hammer.stiffnessVelScale * (velCurve - 0.5f) * 2.0f);
|
|
const float preloadScale = juce::jlimit (0.3f, 2.5f, 1.0f + hammer.preloadVelScale * (velCurve - 0.5f) * 2.0f);
|
|
const float toneScale = juce::jlimit (0.5f, 2.0f, 1.0f + hammer.toneVelScale * (velCurve - 0.5f) * 2.0f);
|
|
const float toneHzEff = juce::jlimit (hammer.toneMinHz, hammer.toneMaxHz, hammer.toneHz * toneScale);
|
|
float contactScale = 1.0f;
|
|
if (PhysicsToggles::kUsePerNotePhysics)
|
|
{
|
|
const float exp = PianoPhysics::Hammer::getExponent (midiNoteNumber);
|
|
contactScale = juce::jlimit (0.6f, 1.4f, PianoPhysics::Hammer::getContactDurationScale (v, exp));
|
|
}
|
|
const int hammerWindowSamples = juce::jmax (1, (int) std::round (juce::jlimit (5.0f, 45.0f, hammer.attackWindowMs * contactScale) * 0.001 * sampleRate));
|
|
{
|
|
const float semisFrom60 = (float) midiNoteNumber - 60.0f;
|
|
// FIXED: Allow larger negative slopes for realistic treble attenuation
|
|
float slopeDbPerSemi = juce::jlimit (-0.25f, 0.1f, loudnessSlopeDbPerSemi);
|
|
slopeDbPerSemi *= 0.95f; // slightly lighter pitch-dependent attenuation
|
|
if (sustainPedalDown)
|
|
slopeDbPerSemi *= 0.85f; // slightly lighter sustain-dependent reduction
|
|
// FIXED: Tighter limits - prevent extreme boosts, allow more attenuation for treble
|
|
pitchLoudnessGain = juce::jlimit (0.6f, 1.25f,
|
|
juce::Decibels::decibelsToGain (semisFrom60 * slopeDbPerSemi));
|
|
}
|
|
|
|
// Frequency-dependent loop loss scalar (no filter in loop)
|
|
{
|
|
const float noteNorm = juce::jlimit (0.0f, 1.0f, ((float) midiNoteNumber - 21.0f) / (108.0f - 21.0f));
|
|
const float curve = noteNorm * noteNorm;
|
|
float maxLoss = 0.004f; // mild high-note darkening without filter in loop
|
|
if (PhysicsToggles::kUsePerNotePhysics && sustainPedalDown)
|
|
maxLoss = 0.002f;
|
|
freqLossScalar = juce::jlimit (0.98f, 1.0f, 1.0f - maxLoss * curve);
|
|
// Compensate for higher loop rates so treble notes aren't over-damped.
|
|
const float lossComp = juce::jlimit (0.0f, 1.0f, (noteNorm - 0.55f) / 0.45f);
|
|
freqLossScalar = mixLinear (freqLossScalar, 1.0f, 0.65f * lossComp);
|
|
}
|
|
|
|
// Note-dependent stereo width: bass wide, treble narrow
|
|
const float noteForWidth = (float) midiNoteNumber;
|
|
const float widthPos = juce::jlimit (0.0f, 1.0f,
|
|
(noteForWidth - params.stereoWidthNoteLo)
|
|
/ juce::jmax (1.0f, params.stereoWidthNoteHi - params.stereoWidthNoteLo));
|
|
stereoWidth = mixLinear (params.stereoWidthLow, params.stereoWidthHigh, widthPos);
|
|
|
|
// FIXED: Calculate normalization factor for multi-string summing
|
|
// Sum the gains of all active strings and normalize so total energy stays consistent
|
|
{
|
|
float gainSum = 0.0f;
|
|
for (int i = 0; i < currentNumStrings; ++i)
|
|
gainSum += params.gain[(size_t) i];
|
|
// Use sqrt for energy-based normalization (not amplitude-based)
|
|
// This prevents level buildup when multiple strings are summed
|
|
const float rawNorm = (gainSum > 0.001f) ? (1.0f / std::sqrt (gainSum)) : 1.0f;
|
|
stringGainNorm = mixLinear (1.0f, rawNorm, 0.6f);
|
|
}
|
|
|
|
for (int i = 0; i < currentNumStrings; ++i)
|
|
{
|
|
float detune = params.detuneCents[(size_t) i];
|
|
if (softPedalDown)
|
|
detune += unaCorda.detuneCents;
|
|
double hz = baseHz * std::pow (2.0, detune / 1200.0);
|
|
double targetDelay = sampleRate / juce::jmax (20.0, hz);
|
|
|
|
// Inharmonicity-driven dispersion for physical realism.
|
|
float g = 0.0f;
|
|
if (PhysicsToggles::kUsePerNotePhysics)
|
|
{
|
|
const float bCoeff = PianoPhysics::Inharmonicity::getB (midiNoteNumber);
|
|
g = mapInharmonicityToDispersion (bCoeff, params.dispersionAmt);
|
|
}
|
|
else
|
|
{
|
|
// Heuristic dispersion curve (legacy).
|
|
const float noteNormDisp = juce::jlimit (0.0f, 1.0f, ((float) midiNoteNumber - 21.0f) / (108.0f - 21.0f));
|
|
const float bassInharm = std::pow (juce::jmax (0.0f, 1.0f - noteNormDisp * 1.3f), 1.4f);
|
|
const float trebleOnset = 0.72f; // ~C6
|
|
const float trebleInharm = (noteNormDisp > trebleOnset)
|
|
? std::pow ((noteNormDisp - trebleOnset) / (1.0f - trebleOnset), 1.8f) * 0.25f
|
|
: 0.0f;
|
|
const float inharmCurve = bassInharm + trebleInharm;
|
|
g = juce::jlimit (0.0f, 0.40f, params.dispersionAmt * inharmCurve);
|
|
}
|
|
|
|
// Additional frequency scaling for very low notes (more dispersion needed)
|
|
const float freqScale = juce::jlimit (0.7f, 1.2f, 1.0f + 0.2f * (1.0f - juce::jmin (1.0f, (float) hz / 200.0f)));
|
|
g *= freqScale;
|
|
|
|
if (! DebugToggles::kEnablePm2Dispersion)
|
|
g = 0.0f;
|
|
if (highPolyMode)
|
|
g *= 0.4f;
|
|
|
|
// Gentler taper for mid-high frequencies (was too aggressive)
|
|
float taper = (float) (1.0 - 0.20 * juce::jlimit (0.0, 1.0, (hz - 600.0) / 2200.0));
|
|
const float noteNormDisp = juce::jlimit (0.0f, 1.0f, ((float) midiNoteNumber - 21.0f) / (108.0f - 21.0f));
|
|
const float dispScale = mixLinear (1.0f, dispersionHighMult, std::pow (noteNormDisp, dispersionPow));
|
|
float gScaled = juce::jlimit (0.0f, 0.50f, g * taper * dispScale);
|
|
const float dispTrebleComp = 1.0f - 0.35f * juce::jlimit (0.0f, 1.0f, ((float) midiNoteNumber - 72.0f) / 36.0f);
|
|
gScaled *= dispTrebleComp;
|
|
if (! DebugToggles::kEnablePm2Dispersion)
|
|
gScaled = 0.0f;
|
|
|
|
// FIX: Smooth transition of allpass stages instead of hard cutoffs at specific notes
|
|
// Previously had abrupt changes at notes 76, 84, 88 causing audible discontinuities
|
|
int apStagesEffective = juce::jlimit (1, 4, params.apStages);
|
|
// Gradual reduction from note 72 to 96 (C5 to C7)
|
|
const float apFadeNorm = juce::jlimit (0.0f, 1.0f, ((float) currentMidiNote - 72.0f) / 24.0f);
|
|
if (apFadeNorm > 0.0f)
|
|
{
|
|
// Smoothly reduce stages: 4->3 by note 80, 3->2 by note 88, 2->1 by note 96
|
|
const float targetStages = 4.0f - apFadeNorm * 3.0f;
|
|
apStagesEffective = juce::jmax (1, (int) std::round (targetStages));
|
|
}
|
|
if (economyMode && currentMidiNote > 72)
|
|
apStagesEffective = juce::jmin (apStagesEffective, 2);
|
|
if (economyMode && currentMidiNote > 84)
|
|
apStagesEffective = 1;
|
|
if (highPolyMode)
|
|
apStagesEffective = 1;
|
|
if (! DebugToggles::kEnablePm2Dispersion)
|
|
apStagesEffective = 0;
|
|
|
|
// Improved group delay compensation:
|
|
// - Use 0.92 factor (was 0.55) to better match actual allpass delay
|
|
// - Add frequency-dependent correction for higher notes
|
|
const float baseCompensation = 0.92f;
|
|
const float freqCorrection = 1.0f + 0.08f * juce::jlimit (0.0f, 1.0f, (float)(hz - 200.0) / 2000.0f);
|
|
float groupDelay = (apStagesEffective > 0 && gScaled > 0.0f)
|
|
? baseCompensation * freqCorrection * (float) apStagesEffective * (1.0f - gScaled) / (1.0f + gScaled)
|
|
: 0.0f;
|
|
double delaySamples = juce::jmax (8.0, targetDelay - (double) groupDelay);
|
|
auto& s = strings[(size_t) i];
|
|
resizeString (s, delaySamples);
|
|
// Option 1: Initialize energy limiter - calibration window captures peak during first 100ms
|
|
const int calibrationMs = 200;
|
|
s.energyCalibSamplesLeft = (int) (sampleRate * calibrationMs / 1000.0);
|
|
s.energyCalibComplete = false;
|
|
s.energyPeak = 0.0f;
|
|
s.energySmoothed = 0.0f;
|
|
s.energyGainSmoothed = 1.0f;
|
|
|
|
// ====================================================================
|
|
// FREQUENCY-NORMALIZED LOSS MODEL - REALISTIC PIANO SUSTAIN
|
|
// A real grand piano has very long sustain times:
|
|
// - Low notes (C1-C2): 25-40 seconds
|
|
// - Mid notes (C3-C4): 18-25 seconds
|
|
// - High notes (C6-C7): 12-18 seconds
|
|
// The GUI Decay control scales this via decayTimeScale.
|
|
// ====================================================================
|
|
|
|
// Base T60 varies with pitch - lower notes sustain longer (like a real piano)
|
|
// Reference: A4 (440 Hz, MIDI 69) gets base T60 of ~22 seconds
|
|
const float midiNote = 12.0f * std::log2 ((float) hz / 440.0f) + 69.0f;
|
|
const float noteNorm = juce::jlimit (0.0f, 1.0f, (midiNote - 21.0f) / 87.0f); // A0 to C8
|
|
|
|
float pitchBasedT60 = 0.0f;
|
|
if (PhysicsToggles::kUsePerNotePhysics)
|
|
{
|
|
pitchBasedT60 = PianoPhysics::StringDecay::getT60 ((float) hz, midiNoteNumber);
|
|
}
|
|
else
|
|
{
|
|
// FIX: Further increased highT60 from 16s to 22s for much longer treble sustain
|
|
// Using an even gentler curve (power of 0.5) so mid/high notes keep most of their sustain
|
|
// Low notes: ~28s, Mid notes: ~24s, High notes: ~22s (much more even distribution)
|
|
const float lowT60 = 28.0f; // Increased from 26
|
|
const float highT60 = 24.0f; // Smaller low/high spread for less pitch-dependent decay
|
|
// Use a very gentle curve - high notes should only lose ~20% of low note sustain
|
|
const float curvedNoteNorm = std::pow (noteNorm, 0.5f); // Even gentler curve (was 0.7)
|
|
pitchBasedT60 = lowT60 - curvedNoteNorm * (lowT60 - highT60);
|
|
}
|
|
|
|
// Apply loss parameter and GUI decay control
|
|
const float effectiveLoss = juce::jmax (0.0025f, params.loss);
|
|
const float lossNorm = juce::jlimit (0.0f, 1.0f, (effectiveLoss - 0.0005f) / (0.02f - 0.0005f));
|
|
float lossEffect = 1.0f - 0.3f * lossNorm; // Reduced from 0.4 - loss has less impact now
|
|
if (PhysicsToggles::kUsePhysicsDefaults)
|
|
lossEffect = 1.0f;
|
|
float targetT60_s = pitchBasedT60 * lossEffect * decayTimeScale;
|
|
// Global sustain scaler - reduced from 1.50 to shorten note duration slightly
|
|
const float globalT60Scale = 1.15f; // Was 1.50f - notes were sustaining too long
|
|
targetT60_s *= globalT60Scale;
|
|
if (PhysicsToggles::kUsePhysicsDefaults)
|
|
targetT60_s = juce::jmax (6.0f, targetT60_s); // Was 12.0f - reduced minimum sustain
|
|
// Extra treble sustain to avoid high-note truncation (applies above C5).
|
|
if (DebugToggles::kEnablePm2TrebleT60Boost)
|
|
{
|
|
const float trebleNorm = juce::jlimit (0.0f, 1.0f, ((float) midiNoteNumber - 72.0f) / 36.0f);
|
|
const float trebleT60Boost = 1.0f + 1.05f * trebleNorm; // up to +105% at C8
|
|
targetT60_s *= trebleT60Boost;
|
|
}
|
|
|
|
// Calculate loops per second for this string
|
|
const float loopsPerSecond = (float) hz;
|
|
|
|
// Total loops during the target T60 period
|
|
const float totalLoopsInT60 = targetT60_s * loopsPerSecond;
|
|
|
|
// For T60 (60 dB decay), we need: loopGain^totalLoopsInT60 = 10^(-60/20) = 0.001
|
|
// So: loopGain = 0.001^(1/totalLoopsInT60) = exp(ln(0.001)/totalLoopsInT60)
|
|
// ln(0.001) ≈ -6.9078
|
|
float loopGainCalc = std::exp (-6.9078f / juce::jmax (1.0f, totalLoopsInT60));
|
|
if (PhysicsToggles::kUsePhysicsDefaults)
|
|
loopGainCalc = juce::jlimit (0.98f, 0.99999f, loopGainCalc);
|
|
|
|
// FIX: Add a small pitch-dependent boost to loop gain for higher notes
|
|
// This compensates for the cumulative effect of per-loop filtering at high frequencies
|
|
if (DebugToggles::kEnablePm2TrebleLoopGainBoost)
|
|
{
|
|
const float trebleBoostStart = 60.0f; // Start boosting above middle C
|
|
const float trebleBoostAmount = 0.0001f * juce::jmax (0.0f, midiNote - trebleBoostStart);
|
|
loopGainCalc = juce::jlimit (0.95f, 0.99999f, loopGainCalc + trebleBoostAmount);
|
|
}
|
|
// Extra high-note loop gain to counter per-loop filtering compounding.
|
|
if (DebugToggles::kEnablePm2TrebleLoopGainComp)
|
|
{
|
|
const float loopGainComp = 0.00024f * juce::jlimit (0.0f, 1.0f, (midiNote - 72.0f) / 36.0f);
|
|
loopGainCalc = juce::jlimit (0.95f, 0.99999f, loopGainCalc + loopGainComp);
|
|
}
|
|
|
|
s.loopGainBase = loopGainCalc;
|
|
|
|
s.loopGainRelease = juce::jlimit (0.90f, 0.99999f, s.loopGainBase * juce::jlimit (1.0f, 4.0f, releaseExtension));
|
|
s.loopGain = s.loopGainBase;
|
|
s.loopGainSmoothed = s.loopGainBase;
|
|
s.damperLossPrev = damper.lossOff;
|
|
s.damperLossSmoothed = damper.lossOff;
|
|
s.damperSoftenCountdown = 0;
|
|
s.damperSoftenState = 0.0f;
|
|
|
|
// LPF coefficient: frequency-normalized for consistent tone across the keyboard
|
|
// FIX: Made filter much gentler for high notes to preserve sustain
|
|
// High notes iterate through the loop many more times per second, so aggressive
|
|
// filtering compounds quickly and kills the sound
|
|
const float refHz = 440.0f;
|
|
const float freqRatio = refHz / juce::jmax (20.0f, (float) hz);
|
|
// IMPROVEMENT 4: Gentler high-frequency rolloff for more realistic harmonic content
|
|
// Real piano strings have a gradual high-frequency decay, not aggressive filtering
|
|
// Higher baseLpCoeff = less filtering per sample = more upper harmonics preserved
|
|
const float baseLpCoeff = 0.92f; // Increased from 0.88 for more HF content
|
|
|
|
// Gentler frequency compensation: high notes need much less filtering
|
|
// because they iterate through the loop many more times per second
|
|
const float freqCompensation = 1.0f / juce::jmax (0.75f, std::pow (freqRatio, 0.08f));
|
|
|
|
// Velocity-dependent brightness: harder hits = brighter tone (more upper harmonics)
|
|
// This simulates how harder hammer strikes excite more high-frequency content
|
|
const float velBrightness = 1.0f + 0.025f * (velCurve - 0.5f);
|
|
|
|
const float lpTrebleComp = 1.0f + 0.08f * juce::jlimit (0.0f, 1.0f, (midiNote - 72.0f) / 36.0f);
|
|
s.lpCoeff = juce::jlimit (0.82f, 0.998f, baseLpCoeff * freqCompensation * velBrightness * lpTrebleComp);
|
|
s.lpState = 0.0f;
|
|
|
|
// dispersion allpass coefficient scaled by dispersionAmt and pitch
|
|
// reuse g
|
|
s.apStages = apStagesEffective;
|
|
for (int k = 0; k < s.apStages; ++k) { s.ap[(size_t) k].g = gScaled; s.ap[(size_t) k].z1 = 0.0f; }
|
|
for (int k = s.apStages; k < 4; ++k) { s.ap[(size_t) k].g = 0.0f; s.ap[(size_t) k].z1 = 0.0f; }
|
|
|
|
// Hammer excitation: either continuous interaction or simplified burst
|
|
const float refDelaySamples = 100.0f;
|
|
const float actualDelaySamples = (float) s.delay.size();
|
|
// FIX: Made high note attenuation much gentler - was pow(..., 0.4) which cut high notes to ~50%
|
|
// Now using pow(..., 0.2) so high notes retain ~75% of excitation energy
|
|
const float highNoteAtten = std::pow (juce::jmin (1.0f, actualDelaySamples / refDelaySamples), 0.01f);
|
|
float exciteGain = juce::jlimit (0.0f, 1.4f, hammer.force * velCurve * highNoteAtten); // Reduced limit from 1.8
|
|
if (PhysicsToggles::kUsePhysicsDefaults)
|
|
exciteGain *= 0.85f; // Was 1.2f - reduced to lower overall levels
|
|
// Normalize excitation across the keyboard so hard notes land closer in level.
|
|
const float noteNormExcite = juce::jlimit (0.0f, 1.0f, ((float) midiNoteNumber - 21.0f) / (108.0f - 21.0f));
|
|
const float exciteNorm = 1.10f - 0.20f * noteNormExcite; // gentle high-note reduction only
|
|
exciteGain = juce::jlimit (0.0f, 1.8f, exciteGain * exciteNorm);
|
|
// Ensure all notes retain a minimum excitation floor for audible transient
|
|
// Reduced from 0.22f base to lower overall levels
|
|
const float noteNormFloor = juce::jlimit (0.0f, 1.0f, (midiNoteNumber - 21.0f) / 87.0f);
|
|
const float exciteFloor = 0.15f + 0.08f * noteNormFloor; // Was 0.22f + 0.10f
|
|
exciteGain = juce::jmax (exciteGain, exciteFloor);
|
|
if (PhysicsToggles::kUsePhysicsDefaults)
|
|
{
|
|
const float toneHz = (float) (sampleRate / juce::jmax (8.0, delaySamples));
|
|
s.toneInjectPhase = 0.0f;
|
|
s.toneInjectPhaseDelta = juce::MathConstants<float>::twoPi * toneHz / (float) sampleRate;
|
|
s.toneInjectSamplesLeft = (int) std::round (0.015f * sampleRate); // Reduced from 0.020
|
|
s.toneInjectGain = 0.12f * exciteGain; // Reduced from 0.20f
|
|
}
|
|
|
|
const float attackMs = juce::jlimit (1.0f, 12.0f, hammer.attackMs * contactScale);
|
|
const float timeScale = juce::jlimit (0.95f, 1.0f,
|
|
std::pow (actualDelaySamples / refDelaySamples, 0.25f)); // gentler treble shortening
|
|
const int attackSamples = juce::jmax (1, (int) std::round (attackMs * 0.001 * sampleRate * timeScale));
|
|
const int decaySamples = juce::jmax (1, (int) std::round (0.012 * sampleRate * timeScale));
|
|
const int releaseSamples = juce::jmax (1, (int) std::round (0.005 * sampleRate * timeScale));
|
|
juce::ignoreUnused (releaseSamples);
|
|
|
|
const int minExciteSamples = (int) std::round (0.18 * sampleRate); // ensure audible transient on high notes
|
|
const int totalExcite = juce::jmax ((int) s.delay.size(), minExciteSamples);
|
|
|
|
// IMPROVEMENT 7: Velocity-Dependent Harmonic Content
|
|
// Real pianos produce different harmonic spectra at different velocities
|
|
// Soft hits: darker tone, fewer upper partials (felt absorbs high frequencies)
|
|
// Hard hits: brighter tone, more upper partials (felt compresses, acts harder)
|
|
const float velToneBoostBase = 1.0f + 0.6f * velCurve; // up to 60% brighter at ff
|
|
const float pitchToneScale = 1.0f + 0.25f * noteNorm; // higher notes naturally brighter
|
|
const float trebleToneComp = DebugToggles::kEnablePm2TrebleToneComp
|
|
? (1.0f - 0.45f * juce::jlimit (0.0f, 1.0f, (midiNoteNumber - 72.0f) / 36.0f))
|
|
: 1.0f;
|
|
const float velToneBoost = velToneBoostBase * (1.0f - 0.20f * noteNorm);
|
|
|
|
// Base tone frequency from preset, scaled by velocity and pitch
|
|
const float toneHzScaled = toneHzEff * velToneBoost * pitchToneScale * trebleToneComp;
|
|
const float minToneHz = juce::jmax (toneHzScaled, (float) hz * 3.0f);
|
|
const float toneHzMax = mixLinear (20000.0f, 12000.0f, juce::jlimit (0.0f, 1.0f, (midiNoteNumber - 72.0f) / 36.0f));
|
|
const float toneHz = juce::jlimit (2000.0f, toneHzMax, minToneHz);
|
|
const float theta = 2.0f * juce::MathConstants<float>::pi * toneHz / (float) sampleRate;
|
|
const float alpha = std::exp (-theta);
|
|
float lpState = 0.0f;
|
|
s.feltState = 0.0f;
|
|
s.feltLastOut = 0.0f;
|
|
s.feltEnvPrev = 0.0f;
|
|
float preload = juce::jlimit (0.0f, 1.0f, felt.preload * preloadScale);
|
|
float preloadPow = std::pow (preload, juce::jlimit (1.0f, 5.0f, felt.stiffness));
|
|
float stiffness = juce::jlimit (1.0f, 5.0f, felt.stiffness * stiffnessScale);
|
|
float hyst = juce::jlimit (0.0f, 0.6f, felt.hysteresis);
|
|
float maxAmp = juce::jlimit (0.2f, 4.0f, felt.maxAmp);
|
|
float maxDelta = maxAmp * 0.5f;
|
|
if (! DebugToggles::kEnablePm2FeltShaping)
|
|
{
|
|
preload = 0.0f;
|
|
preloadPow = 0.0f;
|
|
stiffness = 1.0f;
|
|
hyst = 0.0f;
|
|
maxAmp = 10.0f;
|
|
maxDelta = maxAmp * 0.5f;
|
|
}
|
|
|
|
// FIX: Smooth autoSimplify transition - use gradual blend instead of hard cutoff at 84
|
|
const float simplifyNorm = juce::jlimit (0.0f, 1.0f, ((float) midiNoteNumber - 80.0f) / 16.0f);
|
|
const bool autoSimplify = (simplifyNorm > 0.5f) || (v < 0.25f) || highPolyMode;
|
|
bool useSimplified = (hammer.simplifiedMode || autoSimplify) && DebugToggles::kEnablePm2SimplifiedBurst;
|
|
if (PhysicsToggles::kUsePhysicsDefaults)
|
|
useSimplified = false;
|
|
// Avoid simplified burst in high notes to reduce squeak/quack.
|
|
if (midiNoteNumber >= 72)
|
|
useSimplified = false;
|
|
// FIX: Previously disabled hammer excitation for notes >= C5 (MIDI 72), which caused
|
|
// high notes to sound thin/squeaky and truncated. The hammer-string interaction is
|
|
// essential for proper piano timbre at all pitches.
|
|
const bool disableExcitation = false; // Enable hammer for all notes
|
|
std::vector<float> wdfBurst;
|
|
const auto wdfSafe = sanitizeWdf (wdf);
|
|
bool allowWdfBurst = true;
|
|
if (v < 0.6f)
|
|
{
|
|
// Skip 1 out of 4 low-velocity notes to reduce CPU in burst generation.
|
|
static uint32_t wdfSkipCounter = 0;
|
|
allowWdfBurst = ((wdfSkipCounter++ % 4u) != 3u);
|
|
}
|
|
const bool useWdf = DebugToggles::kEnablePm2WdfBurst
|
|
&& DebugToggles::kEnablePm2SimplifiedBurst
|
|
&& ! economyMode && ! highPolyMode
|
|
&& wdfSafe.enabled && wdfSafe.blend > 1.0e-4f && allowWdfBurst
|
|
&& midiNoteNumber < 96; // Extended from 84 to reduce split audibility
|
|
if (useSimplified && useWdf)
|
|
{
|
|
wdfBurst = buildWdfBurst (sampleRate,
|
|
hz,
|
|
juce::jlimit (0.0f, 1.0f, v),
|
|
totalExcite,
|
|
wdfSafe.loss,
|
|
wdfSafe.bridgeMass,
|
|
wdfSafe.plateStiffness,
|
|
rng);
|
|
for (auto& sample : wdfBurst)
|
|
{
|
|
if (! std::isfinite (sample))
|
|
sample = 0.0f;
|
|
sample = juce::jlimit (-2.0f, 2.0f, sample);
|
|
}
|
|
}
|
|
const float wdfBlend = wdfSafe.blend;
|
|
const float wdfTrebleAtten = DebugToggles::kEnablePm2WdfTrebleAtten
|
|
? (1.0f - 0.50f * juce::jlimit (0.0f, 1.0f, ((float) midiNoteNumber - 72.0f) / 36.0f))
|
|
: 1.0f;
|
|
const int wdfRamp = juce::jmax (1, totalExcite / 4);
|
|
|
|
if (useSimplified)
|
|
{
|
|
if (! DebugToggles::kEnablePm2HammerExcitation)
|
|
{
|
|
for (auto& sample : s.delay) sample = 0.0f;
|
|
}
|
|
else
|
|
{
|
|
int envAttack = juce::jmin (attackSamples, totalExcite / 3);
|
|
int envDecay = juce::jmin (decaySamples, totalExcite / 2);
|
|
envAttack = juce::jmax (1, (int) std::round (envAttack * 0.5f)); // steeper onset
|
|
envDecay = juce::jmax (1, (int) std::round (envDecay * 0.75f));
|
|
const int envRelease = juce::jmax (0, totalExcite - envAttack - envDecay);
|
|
|
|
const int delaySize = (int) s.delay.size();
|
|
const int writeCount = juce::jmin (totalExcite, delaySize);
|
|
for (int n = 0; n < totalExcite; ++n)
|
|
{
|
|
float env = 0.0f;
|
|
if (n < envAttack)
|
|
env = (float) n / juce::jmax (1.0f, (float) envAttack);
|
|
else if (n < envAttack + envDecay)
|
|
{
|
|
const float t = (float) (n - envAttack) / juce::jmax (1.0f, (float) envDecay);
|
|
env = 1.0f - 0.85f * t;
|
|
}
|
|
else if (envRelease > 0)
|
|
{
|
|
const float t = (float) (n - envAttack - envDecay) / juce::jmax (1.0f, (float) envRelease);
|
|
env = juce::jmax (0.0f, 0.15f * (1.0f - t));
|
|
}
|
|
|
|
// IMPROVEMENT 1: Pink noise for more realistic spectral density
|
|
// Voss-McCartney approximation creates inter-harmonic energy
|
|
float whiteNoise = randomUniform() * 2.0f - 1.0f;
|
|
|
|
// Update pink noise octave bands at different rates
|
|
pinkNoiseCounter++;
|
|
if ((pinkNoiseCounter & 0x01) == 0)
|
|
pinkNoiseState[0] = randomUniform() * 2.0f - 1.0f;
|
|
if ((pinkNoiseCounter & 0x03) == 0)
|
|
pinkNoiseState[1] = randomUniform() * 2.0f - 1.0f;
|
|
if ((pinkNoiseCounter & 0x0F) == 0)
|
|
pinkNoiseState[2] = randomUniform() * 2.0f - 1.0f;
|
|
float pinkNoise = (pinkNoiseState[0] + pinkNoiseState[1] + pinkNoiseState[2] + whiteNoise) * 0.25f;
|
|
|
|
// Mix white (brightness/attack) and pink (body/warmth) based on velocity and pitch
|
|
// Lower velocity and lower notes get more pink noise for warmth
|
|
// Higher velocity and higher notes get more white noise for brightness
|
|
const float noteNormMix = juce::jlimit (0.0f, 1.0f, ((float) midiNoteNumber - 21.0f) / 87.0f);
|
|
const float pinkTrebleBoost = 0.12f * noteNormMix;
|
|
const float pinkMix = juce::jlimit (0.25f, 0.75f,
|
|
0.45f - 0.15f * velCurve + 0.08f * (1.0f - noteNormMix)
|
|
+ pinkTrebleBoost);
|
|
float noise = (whiteNoise * (1.0f - pinkMix) + pinkNoise * pinkMix) * env * exciteGain;
|
|
|
|
lpState = lpState + (1.0f - alpha) * (noise - lpState);
|
|
|
|
float mag = std::pow (preload + std::abs (lpState), stiffness) - preloadPow;
|
|
float shaped = (1.0f - hyst) * mag + hyst * s.feltState;
|
|
s.feltState = shaped;
|
|
|
|
float envDeriv = env - s.feltEnvPrev;
|
|
s.feltEnvPrev = env;
|
|
float rateBoost = juce::jlimit (0.8f, 1.4f, 1.0f + envDeriv * 2.0f);
|
|
|
|
float burst = softClip ((float) std::copysign (shaped * rateBoost, lpState), maxAmp);
|
|
float delta = burst - s.feltLastOut;
|
|
delta = softClip (delta, maxDelta);
|
|
burst = s.feltLastOut + delta;
|
|
s.feltLastOut = burst;
|
|
|
|
// IMPROVEMENT 9: Add subtle attack transient noise
|
|
// Simulates felt compression noise and mechanical "thunk" character
|
|
// Most prominent in first few ms of note, filtered to mid-high frequencies
|
|
// FIX: Smooth fade-out from note 72 to 84 instead of hard cutoff at 76
|
|
const float transientFade = 1.0f - juce::jlimit (0.0f, 1.0f, ((float) midiNoteNumber - 72.0f) / 12.0f);
|
|
if (transientFade > 0.01f && n < envAttack + envDecay / 2)
|
|
{
|
|
const float transientProgress = (float) n / (float) (envAttack + envDecay / 2);
|
|
// Quadratic decay envelope for transient
|
|
const float transientEnv = (1.0f - transientProgress) * (1.0f - transientProgress);
|
|
|
|
// Generate transient noise scaled by velocity (harder hits = more thunk)
|
|
const float transientTrebleAtten = 1.0f - 0.85f * noteNormMix;
|
|
float transientNoise = (randomUniform() * 2.0f - 1.0f) * transientEnv * exciteGain * 0.12f * velCurve * transientTrebleAtten;
|
|
|
|
// Simple highpass approximation at ~300Hz for "thunk" character
|
|
// Using differentiation: hp[n] = x[n] - x[n-1] * (1-coeff)
|
|
static thread_local float transientHpState = 0.0f;
|
|
if (n == 0) transientHpState = 0.0f; // Reset for each string
|
|
const float hpCoeff = 0.92f; // ~300Hz highpass
|
|
float hpOut = transientNoise - transientHpState * hpCoeff;
|
|
transientHpState = transientNoise;
|
|
|
|
// FIX: Apply smooth fade for higher notes
|
|
burst += hpOut * transientFade;
|
|
}
|
|
|
|
if (useWdf && n < (int) wdfBurst.size())
|
|
{
|
|
const float ramp = juce::jlimit (0.0f, 1.0f, (float) n / (float) wdfRamp);
|
|
const float blend = juce::jlimit (0.0f, 1.0f, wdfBlend * wdfTrebleAtten * ramp);
|
|
burst = mixLinear (burst, wdfBurst[(size_t) n], blend);
|
|
}
|
|
if (n < writeCount)
|
|
s.delay[(size_t) n] = burst;
|
|
}
|
|
if (writeCount < delaySize)
|
|
{
|
|
for (int n = writeCount; n < delaySize; ++n)
|
|
s.delay[(size_t) n] = 0.0f;
|
|
}
|
|
}
|
|
}
|
|
else
|
|
{
|
|
for (auto& sample : s.delay) sample = 0.0f;
|
|
if (PhysicsToggles::kUsePhysicsDefaults)
|
|
{
|
|
const float periodSamples = (float) juce::jmax (4.0, s.delaySamples);
|
|
const int seedSamples = juce::jlimit (4, (int) s.delay.size(),
|
|
(int) std::round (periodSamples * 2.0f));
|
|
const float seedAmp = 0.08f * exciteGain; // Reduced from 0.12f to lower levels
|
|
for (int n = 0; n < seedSamples; ++n)
|
|
s.delay[(size_t) n] = std::sin (juce::MathConstants<float>::twoPi
|
|
* (float) n / periodSamples) * seedAmp;
|
|
}
|
|
if (! DebugToggles::kEnablePm2HammerExcitation || disableExcitation)
|
|
s.hammer.active = false;
|
|
else
|
|
s.hammer.active = true;
|
|
// FIX: Allow hammer interaction to continue beyond the delay line size
|
|
// This gives longer, more natural hammer-string contact especially for higher notes
|
|
// The interaction now runs for the full attack window + tail, not limited by delay line
|
|
const float pedalLift = juce::jlimit (0.0f, 1.0f, sustainPedalDown ? 1.0f : damperLiftTarget);
|
|
const float pedalTailScale = 1.0f + 0.6f * pedalLift;
|
|
const int hammerTailSamples = juce::jmax (1, (int) std::round (decaySamples * 4 * pedalTailScale)); // Longer tail when pedal is down
|
|
const int minHammerSamples = (int) std::round ((0.20f + 0.06f * pedalLift) * sampleRate); // 200-260ms interaction
|
|
s.hammer.samplesLeft = juce::jmax (minHammerSamples, hammerWindowSamples + hammerTailSamples);
|
|
s.hammer.samplesTotal = s.hammer.samplesLeft;
|
|
s.hammer.samplesElapsed = 0;
|
|
s.hammer.pos = 0.0f;
|
|
const float strikeBoost = juce::jlimit (0.90f, 1.05f, 0.95f + 0.15f * v); // softer onset to reduce spike
|
|
s.hammer.vel = exciteGain * 1.2f * strikeBoost; // Reduced from 1.6f to lower levels
|
|
s.hammer.pen = 0.0f;
|
|
float hammerMass = hammer.massKg;
|
|
float hammerK = hammer.contactStiffness;
|
|
float hammerExp = hammer.contactExponent;
|
|
float hammerDamping = hammer.contactDamping;
|
|
if (PhysicsToggles::kUsePerNotePhysics)
|
|
{
|
|
hammerMass = PianoPhysics::Hammer::getMass (midiNoteNumber);
|
|
hammerK = mapHammerStiffnessToModel (PianoPhysics::Hammer::getStiffness (midiNoteNumber));
|
|
hammerExp = PianoPhysics::Hammer::getExponent (midiNoteNumber);
|
|
const float physHyst = PianoPhysics::Hammer::getHysteresis (midiNoteNumber);
|
|
hammerDamping = juce::jmap (physHyst, 0.08f, 0.18f, 4.0f, 8.0f);
|
|
}
|
|
|
|
s.hammer.mass = juce::jlimit (0.005f, 0.08f, hammerMass);
|
|
s.hammer.k = juce::jlimit (200.0f, 20000.0f, hammerK * stiffnessScale);
|
|
s.hammer.exp = juce::jlimit (1.4f, 4.0f, hammerExp);
|
|
s.hammer.damping = juce::jlimit (0.5f, 40.0f, hammerDamping);
|
|
s.hammer.preload = juce::jlimit (0.0f, 1.0f, hammer.force * 0.5f + preload * 0.5f);
|
|
s.hammer.preload = juce::jlimit (0.0f, 1.0f, s.hammer.preload * preloadScale);
|
|
s.hammer.maxPen = juce::jlimit (0.0005f, 0.030f, hammer.maxPenetration);
|
|
s.hammer.toneAlpha = juce::jlimit (0.0f, 0.9999f, alpha);
|
|
s.hammer.toneState = 0.0f;
|
|
// FIX: Increased hammer gain significantly for stronger excitation
|
|
// This ensures the initial transient has enough energy to sustain
|
|
s.hammer.gain = 0.008f * juce::jlimit (0.9f, 1.25f, 0.9f + 0.35f * v); // Reduced from 0.012 to lower levels
|
|
s.hammer.gainSmoothed = DebugToggles::kEnablePm2HammerGainRamp ? 0.0f : s.hammer.gain;
|
|
s.hammer.simplified = false;
|
|
}
|
|
|
|
const bool enableDuplex = (! economyMode) || currentMidiNote <= 100;
|
|
const float duplexGainDb = juce::jlimit (-20.0f, -6.0f, duplex.gainDb);
|
|
const float duplexGainLin = juce::Decibels::decibelsToGain (duplexGainDb);
|
|
// CPU optimisation: Only apply duplex to the first (loudest) string
|
|
// This preserves most of the afterlength effect while reducing cost by ~67% for 3-string setups
|
|
const bool useDuplex = DebugToggles::kEnablePm2Duplex
|
|
&& ! economyMode && ! highPolyMode && enableDuplex
|
|
&& (duplexGainDb > -19.5f) && (i == 0) && ! lowVelSkip;
|
|
if (useDuplex)
|
|
{
|
|
// duplex buffer length ~ afterlength
|
|
float ratio = juce::jlimit (1.1f, 4.0f, duplex.ratio);
|
|
double duplexDelay = delaySamples / ratio;
|
|
const int dlen = juce::jmax (4, (int) std::ceil (duplexDelay));
|
|
s.duplex.buf.resize ((size_t) dlen);
|
|
std::fill (s.duplex.buf.begin(), s.duplex.buf.end(), 0.0f);
|
|
s.duplex.write = 0;
|
|
s.duplex.gain = juce::jlimit (0.0f, 0.5f, duplexGainLin);
|
|
// decayMs -> feedback factor
|
|
double decayMs = juce::jlimit (10.0, 400.0, (double) duplex.decayMs);
|
|
double tauSamples = (decayMs * 0.001) * sampleRate;
|
|
double fb = std::exp (-1.0 / juce::jmax (8.0, tauSamples));
|
|
s.duplex.feedback = (float) juce::jlimit (0.0, 0.99, fb);
|
|
s.duplex.inputGain = 0.15f;
|
|
}
|
|
else
|
|
{
|
|
s.duplex.buf.clear();
|
|
s.duplex.write = 0;
|
|
s.duplex.gain = 0.0f;
|
|
s.duplex.feedback = 0.0f;
|
|
s.duplex.inputGain = 0.0f;
|
|
}
|
|
|
|
s.baseGain = params.gain[(size_t) i];
|
|
// CPU OPTIMIZATION: Apply bass third string fade for notes below G2
|
|
// This gradually reduces the third string's contribution in the bass register
|
|
if (i == 2)
|
|
s.baseGain *= thirdStringGainScale;
|
|
const float pan = juce::jlimit (-1.0f, 1.0f, params.pan[(size_t) i] * stereoWidth);
|
|
s.panGainL = std::sqrt (0.5f * (1.0f - pan));
|
|
s.panGainR = std::sqrt (0.5f * (1.0f + pan));
|
|
|
|
// IMPROVEMENT 2: Initialize fundamental boost lowpass
|
|
// Uses lowpass extraction on the output to boost fundamental region
|
|
// Only the first string's state is used (for the summed output)
|
|
if (i == 0)
|
|
{
|
|
// Lowpass cutoff at ~1.5x fundamental to capture fundamental region
|
|
const float lpCutoffHz = juce::jlimit (40.0f, 800.0f, (float) hz * 1.5f);
|
|
|
|
// One-pole lowpass coefficient: alpha = exp(-2*pi*fc/fs)
|
|
const float lpAlpha = std::exp (-2.0f * juce::MathConstants<float>::pi * lpCutoffHz / (float) sampleRate);
|
|
s.fundResonatorCoeff = lpAlpha;
|
|
|
|
// Pitch-dependent gain: boost more in mid-range where fundamental weakness is most noticeable
|
|
// Reduce boost for extreme bass (already fundamental-heavy) and extreme treble (short strings)
|
|
const float fundBoostNorm = 1.0f - std::abs ((float) midiNoteNumber - 60.0f) / 48.0f;
|
|
const float fundBoostClamped = juce::jmax (0.0f, fundBoostNorm);
|
|
s.fundResonatorGain = juce::jlimit (0.02f, 0.15f, 0.10f * fundBoostClamped);
|
|
|
|
// Reset filter state
|
|
s.fundResonatorState1 = 0.0f;
|
|
s.fundResonatorState2 = 0.0f;
|
|
}
|
|
else
|
|
{
|
|
// Other strings don't use these (only string 0's state is used for output boost)
|
|
s.fundResonatorCoeff = 0.0f;
|
|
s.fundResonatorGain = 0.0f;
|
|
s.fundResonatorState1 = 0.0f;
|
|
s.fundResonatorState2 = 0.0f;
|
|
}
|
|
}
|
|
|
|
// Option 5: Initialize anti-swell envelope - slow decay for high notes only
|
|
// Decay rate: 0 for notes below E4(64), increasing to ~0.5dB/sec at C8(108)
|
|
antiSwellEnv = 1.0f;
|
|
if (DebugToggles::kEnablePm2AntiSwell)
|
|
{
|
|
const float antiSwellNoteNorm = DebugToggles::kEnablePm2AntiSwellTreblePivot
|
|
? juce::jlimit (0.0f, 1.0f, ((float) midiNoteNumber - 72.0f) / 48.0f)
|
|
: 0.0f;
|
|
// Convert dB/sec to linear decay per sample: lighter damping for high notes
|
|
const float dbPerSecMax = 0.15f;
|
|
const float dbPerSec = dbPerSecMax * antiSwellNoteNorm * antiSwellNoteNorm; // Quadratic curve
|
|
const float dbPerSample = dbPerSec / (float) sampleRate;
|
|
antiSwellDecayPerSample = std::pow (10.0f, -dbPerSample / 20.0f); // Convert to linear multiplier
|
|
}
|
|
else
|
|
{
|
|
antiSwellDecayPerSample = 1.0f;
|
|
}
|
|
|
|
adsr.noteOn();
|
|
active = true;
|
|
}
|
|
|
|
void Pm2StringBank::hardRetrigger (int midiNoteNumber, float velocity)
|
|
{
|
|
resetForHardRetrigger();
|
|
noteOn (midiNoteNumber, velocity);
|
|
}
|
|
|
|
void Pm2StringBank::noteOff()
|
|
{
|
|
if (minNoteOffRemaining > 0)
|
|
{
|
|
pendingNoteOff = true;
|
|
return;
|
|
}
|
|
applyNoteOffInternal();
|
|
}
|
|
|
|
void Pm2StringBank::applyNoteOffInternal()
|
|
{
|
|
keyHeld = false;
|
|
if (sustainPedalDown && ! stealInProgress)
|
|
return; // wait for pedal release to start tail decay
|
|
|
|
const float releaseMs = PhysicsToggles::kUsePhysicsDefaults ? 0.002f : 0.002f; // 2ms release
|
|
keyReleaseSamplesTotal = juce::jmax (1, (int) std::round (releaseMs * sampleRate));
|
|
keyReleaseSamplesRemaining = keyReleaseSamplesTotal;
|
|
const float fadeMs = 0.001f; // 1ms fade
|
|
keyOffFadeSamplesTotal = juce::jmax (1, (int) std::round (fadeMs * sampleRate));
|
|
keyOffFadeSamplesRemaining = keyOffFadeSamplesTotal;
|
|
useReleaseLoopGain = true;
|
|
releaseDelaySamples = 0;
|
|
damperDelaySamples = PhysicsToggles::kUsePhysicsDefaults ? (int) std::round (0.001f * sampleRate) : 0; // 1ms damper delay - nearly instant
|
|
adsr.noteOff();
|
|
postLpfEnv.noteOff();
|
|
}
|
|
|
|
void Pm2StringBank::forceSilence()
|
|
{
|
|
adsr.reset();
|
|
postLpfEnv.reset();
|
|
active = false;
|
|
currentMidiNote = -1;
|
|
pedalChangeSamplesRemaining = 0;
|
|
pedalChangeFade = 1.0f;
|
|
minNoteOffRemaining = 0;
|
|
pendingNoteOff = false;
|
|
for (auto& s : strings)
|
|
{
|
|
std::fill (s.delay.begin(), s.delay.end(), 0.0f);
|
|
s.lpState = 0.0f;
|
|
s.damperSoftenState = 0.0f;
|
|
s.damperLossSmoothed = damper.lossOff;
|
|
s.dc = {};
|
|
}
|
|
lastEnv = 0.0f;
|
|
lastOutL = 0.0f;
|
|
lastOutR = 0.0f;
|
|
postLpfStateL = 0.0f;
|
|
postLpfStateR = 0.0f;
|
|
loopEnergySmoothed = 0.0f;
|
|
// FIX #3: Only reset steal fade if we're NOT in a steal operation.
|
|
// When voice stealing, we want the fade to complete to avoid clicks.
|
|
if (! stealInProgress)
|
|
stealFadeRemaining = 0;
|
|
stealInProgress = false; // Clear the flag after use
|
|
}
|
|
|
|
void Pm2StringBank::resetForHardRetrigger()
|
|
{
|
|
adsr.reset();
|
|
postLpfEnv.reset();
|
|
active = false;
|
|
keyHeld = false;
|
|
useReleaseLoopGain = false;
|
|
keyReleaseSamplesRemaining = 0;
|
|
keyOffFadeSamplesRemaining = 0;
|
|
noteFadeSamplesRemaining = 0;
|
|
noteFadeSamplesTotal = 0;
|
|
lowVelSkip = false;
|
|
minNoteOffRemaining = 0;
|
|
pendingNoteOff = false;
|
|
stealFadeRemaining = 0;
|
|
stealFadeSamples = 0;
|
|
stealInProgress = false;
|
|
pedalChangeSamplesRemaining = 0;
|
|
pedalChangeFade = 1.0f;
|
|
antiSwellEnv = 1.0f;
|
|
antiSwellDecayPerSample = 1.0f;
|
|
loopEnergySmoothed = 0.0f;
|
|
lastEnv = 0.0f;
|
|
lastOutL = 0.0f;
|
|
lastOutR = 0.0f;
|
|
postLpfStateL = 0.0f;
|
|
postLpfStateR = 0.0f;
|
|
for (auto& s : strings)
|
|
{
|
|
std::fill (s.delay.begin(), s.delay.end(), 0.0f);
|
|
s.writePos = 0;
|
|
s.lpState = 0.0f;
|
|
s.damperSoftenState = 0.0f;
|
|
s.damperLossSmoothed = damper.lossOff;
|
|
s.damperLossPrev = damper.lossOff;
|
|
s.interpZ1 = 0.0f;
|
|
s.hammer = {};
|
|
s.duplex.buf.clear();
|
|
s.duplex.write = 0;
|
|
s.duplex.feedback = 0.0f;
|
|
s.duplex.gain = 0.0f;
|
|
s.duplex.inputGain = 0.0f;
|
|
if (sampleRate > 0.0)
|
|
s.dc.reset (sampleRate);
|
|
else
|
|
s.dc = {};
|
|
s.energyCalibSamplesLeft = 0;
|
|
s.energyCalibComplete = false;
|
|
s.energyPeak = 0.0f;
|
|
s.energySmoothed = 0.0f;
|
|
s.energyGainSmoothed = 1.0f;
|
|
}
|
|
}
|
|
|
|
float Pm2StringBank::randomUniform()
|
|
{
|
|
rng = 1664525u * rng + 1013904223u;
|
|
return (rng >> 8) * (1.0f / 16777216.0f);
|
|
}
|
|
|
|
void Pm2StringBank::render (juce::AudioBuffer<float>& buffer, int startSample, int numSamples, int startSampleInBlock)
|
|
{
|
|
if (! active && ! adsr.isActive())
|
|
return;
|
|
|
|
const float dt = (float) (1.0 / juce::jmax (20.0, sampleRate));
|
|
auto* L = buffer.getWritePointer (0, startSample);
|
|
auto* R = buffer.getNumChannels() > 1 ? buffer.getWritePointer (1, startSample) : nullptr;
|
|
float blockAbsMax = 0.0f;
|
|
const bool undampedSend = keyHeld || sustainPedalDown;
|
|
const bool damperEnabled = DebugToggles::kEnablePm2Damper;
|
|
const bool couplingEnabled = DebugToggles::kEnableCoupling;
|
|
const bool stringFiltersEnabled = DebugToggles::kEnablePm2StringFilters;
|
|
|
|
const float couplingGainEff = (economyMode || ! couplingEnabled) ? 0.0f : couplingGain * polyphonyScale;
|
|
const float sympGainEff = (economyMode || ! couplingEnabled) ? 0.0f : sympGain * polyphonyScale;
|
|
const bool skipBodyNoise = economyMode;
|
|
const bool skipDuplex = false;
|
|
const float softGainScale = softPedalDown ? juce::jlimit (0.0f, 1.0f, unaCorda.gainScale) : 1.0f;
|
|
|
|
for (int i = 0; i < numSamples; ++i)
|
|
{
|
|
if (minNoteOffRemaining > 0)
|
|
{
|
|
--minNoteOffRemaining;
|
|
if (minNoteOffRemaining == 0 && pendingNoteOff)
|
|
{
|
|
pendingNoteOff = false;
|
|
applyNoteOffInternal();
|
|
}
|
|
}
|
|
if (pedalChangeSamplesRemaining > 0 && pedalChangeSamplesTotal > 0)
|
|
{
|
|
float t = 1.0f - (float) pedalChangeSamplesRemaining / (float) pedalChangeSamplesTotal;
|
|
t = juce::jlimit (0.0f, 1.0f, t);
|
|
pedalChangeFade = t * t * (3.0f - 2.0f * t);
|
|
--pedalChangeSamplesRemaining;
|
|
}
|
|
else
|
|
{
|
|
pedalChangeFade = 1.0f;
|
|
}
|
|
|
|
float stealGain = 1.0f;
|
|
if (stealFadeRemaining > 0 && stealFadeSamples > 0)
|
|
{
|
|
stealGain = (float) stealFadeRemaining / (float) stealFadeSamples;
|
|
--stealFadeRemaining;
|
|
}
|
|
|
|
float sumL = 0.0f, sumR = 0.0f;
|
|
float loopEnergySample = 0.0f;
|
|
for (int sIdx = 0; sIdx < currentNumStrings; ++sIdx)
|
|
{
|
|
auto& s = strings[(size_t) sIdx];
|
|
if (s.delay.empty()) continue;
|
|
|
|
const int len = (int) s.delay.size();
|
|
|
|
float y = 0.0f;
|
|
// Use cheaper linear interpolation for economy mode and treble notes
|
|
if (economyMode || ! DebugToggles::kEnablePm2FracDelayInterp || currentMidiNote >= 84)
|
|
{
|
|
// Cheaper linear interpolation
|
|
double readPos = (double) s.writePos - s.delaySamples;
|
|
while (readPos < 0.0) readPos += (double) len;
|
|
while (readPos >= (double) len) readPos -= (double) len;
|
|
int i1 = (int) std::floor (readPos);
|
|
float frac = (float) (readPos - (double) i1);
|
|
int i0 = i1;
|
|
int i2 = (i1 + 1) % len;
|
|
float x1 = s.delay[(size_t) i0];
|
|
float x2 = s.delay[(size_t) i2];
|
|
y = x1 + frac * (x2 - x1);
|
|
}
|
|
else
|
|
{
|
|
// Thiran first-order allpass interpolation for fractional delay
|
|
// Much cheaper than Lagrange (1 read + state vs 4 reads + 15 FMAs)
|
|
// while maintaining flat magnitude response (ideal for physical modelling)
|
|
//
|
|
// Formula (transposed direct form II):
|
|
// y = alpha * x + z1
|
|
// z1_new = x - alpha * y
|
|
// where alpha = (1-d)/(1+d), d = fractional delay
|
|
|
|
// Read from integer delay position
|
|
const int intDelay = (int) std::floor (s.delaySamples);
|
|
int readIdx = s.writePos - intDelay;
|
|
while (readIdx < 0) readIdx += len;
|
|
const float x = s.delay[(size_t) readIdx];
|
|
|
|
// Apply allpass for fractional delay
|
|
const float alpha = s.interpAlpha;
|
|
y = alpha * x + s.interpZ1;
|
|
s.interpZ1 = x - alpha * y;
|
|
}
|
|
|
|
// dispersion allpass chain
|
|
for (int k = 0; k < s.apStages; ++k)
|
|
y = s.ap[(size_t) k].process (y);
|
|
|
|
// loop loss (scalar) + frequency-dependent loss via one-pole LP
|
|
float damperLossTarget = 1.0f;
|
|
if (damperEnabled)
|
|
{
|
|
// Smooth damper lift changes to avoid zipper noise under rapid pedal motion.
|
|
damperLiftSmoothed += damperLiftSmoothCoeff * (damperLiftTarget - damperLiftSmoothed);
|
|
// Treat the damper as fully lifted while the key (or sustain pedal) is down.
|
|
// When the key is released, ramp the damper transition over a few ms to avoid clicks.
|
|
float releaseBlend = 0.0f;
|
|
if (keyHeld || sustainPedalDown)
|
|
{
|
|
releaseBlend = 1.0f;
|
|
}
|
|
else if (PhysicsToggles::kUsePhysicsDefaults && damperDelaySamples > 0)
|
|
{
|
|
--damperDelaySamples;
|
|
releaseBlend = 1.0f;
|
|
}
|
|
else if (keyReleaseSamplesRemaining > 0 && keyReleaseSamplesTotal > 0)
|
|
{
|
|
releaseBlend = (float) keyReleaseSamplesRemaining / (float) keyReleaseSamplesTotal;
|
|
--keyReleaseSamplesRemaining;
|
|
}
|
|
const float damperLiftEffective = damperLiftSmoothed + (1.0f - damperLiftSmoothed) * releaseBlend;
|
|
|
|
damperLossTarget = (damperLiftEffective >= 0.999f)
|
|
? damper.lossOff
|
|
: mixLinear (damper.lossDamped, damper.lossHalf, damperLiftEffective);
|
|
const float damperCoeff = DebugToggles::kEnablePm2ExtraDamperSmoothing
|
|
? (damperSmoothCoeff * 0.5f)
|
|
: damperSmoothCoeff;
|
|
s.damperLossSmoothed += damperCoeff * (damperLossTarget - s.damperLossSmoothed);
|
|
}
|
|
else
|
|
{
|
|
damperLiftSmoothed = 1.0f;
|
|
s.damperLossSmoothed = 1.0f;
|
|
s.damperLossPrev = 1.0f;
|
|
}
|
|
if (releaseDelaySamples > 0)
|
|
--releaseDelaySamples;
|
|
if (releaseDelaySamples == 0 && ! useReleaseLoopGain && ! keyHeld && ! sustainPedalDown)
|
|
useReleaseLoopGain = true;
|
|
float loopTarget = (useReleaseLoopGain ? s.loopGainRelease : s.loopGainBase)
|
|
* (damperEnabled ? s.damperLossSmoothed : 1.0f);
|
|
if (DebugToggles::kEnablePm2FreqDependentLoss)
|
|
loopTarget *= freqLossScalar;
|
|
const float swellScale = PhysicsToggles::kUsePhysicsDefaults ? 0.6f : 1.0f;
|
|
if (DebugToggles::kEnablePm2HighNoteLoopDamping)
|
|
{
|
|
if (sustainPedalDown)
|
|
{
|
|
// Gentle swell prevention for high notes with pedal down
|
|
const float noteNorm = juce::jlimit (0.0f, 1.0f, ((float) currentMidiNote - 68.0f) / 36.0f);//1st value is midi note it starts at 2nd value is notespan of big reduction
|
|
const float sustainSwellDamp = 1.0f - (0.0008f * swellScale) * noteNorm;
|
|
loopTarget *= sustainSwellDamp;
|
|
}
|
|
else
|
|
{
|
|
// Extra high-note damping without sustain to prevent runaway
|
|
const float noteNorm = juce::jlimit (0.0f, 1.0f, ((float) currentMidiNote - 84.0f) / 40.0f); // ~C6 to C9
|
|
const float noPedalHighDamp = 1.0f - (0.0020f * swellScale) * noteNorm;
|
|
loopTarget *= noPedalHighDamp;
|
|
}
|
|
}
|
|
const float loopCoeff = DebugToggles::kEnablePm2ExtraLoopGainSmoothing
|
|
? (damperSmoothCoeff * 0.5f)
|
|
: damperSmoothCoeff;
|
|
s.loopGainSmoothed += loopCoeff * (loopTarget - s.loopGainSmoothed);
|
|
if (damperEnabled)
|
|
{
|
|
if (damperLossTarget < s.damperLossPrev - 1.0e-4f)
|
|
s.damperSoftenCountdown = damperSoftenSamples;
|
|
s.damperLossPrev = damperLossTarget;
|
|
}
|
|
|
|
y *= s.loopGainSmoothed;
|
|
if (stringFiltersEnabled)
|
|
{
|
|
float lp = s.lpState + s.lpCoeff * (y - s.lpState);
|
|
s.lpState = lp;
|
|
y = lp;
|
|
}
|
|
loopEnergySample += y * y;
|
|
|
|
// Option 1: Per-string energy limiter to prevent swell
|
|
// During calibration window, capture peak energy level
|
|
// After calibration, soft-limit to prevent exceeding ~110% of reference
|
|
if (! economyMode && DebugToggles::kEnablePm2EnergyLimiter && ! PhysicsToggles::kUsePhysicsDefaults)
|
|
{
|
|
const float absY = std::abs (y);
|
|
// Smooth energy tracking (fast attack, slow release)
|
|
const float noteNormHigh = juce::jlimit (0.0f, 1.0f, ((float) currentMidiNote - 76.0f) / 32.0f); // E5 to G7
|
|
const float energyAttack = mixLinear (0.095f, 0.13f, noteNormHigh); // Faster attack for treble
|
|
const float energyRelease = 0.001f; // Slow release
|
|
const float energyCoeff = (absY > s.energySmoothed) ? energyAttack : energyRelease;
|
|
s.energySmoothed += energyCoeff * (absY - s.energySmoothed);
|
|
|
|
if (! s.energyCalibComplete)
|
|
{
|
|
// During calibration: track peak energy
|
|
if (s.energySmoothed > s.energyPeak)
|
|
s.energyPeak = s.energySmoothed;
|
|
--s.energyCalibSamplesLeft;
|
|
if (s.energyCalibSamplesLeft <= 0)
|
|
{
|
|
s.energyCalibComplete = true;
|
|
// Add 10% headroom to avoid limiting normal dynamics
|
|
const float headroom = mixLinear (1.35f, 1.25f, noteNormHigh);
|
|
s.energyPeak *= headroom;
|
|
// Minimum floor to avoid division issues on very quiet notes
|
|
s.energyPeak = juce::jmax (s.energyPeak, 0.003f);
|
|
}
|
|
}
|
|
else if (s.energyPeak > 0.0f)
|
|
{
|
|
// After calibration: soft-limit if energy exceeds reference
|
|
const float ratio = s.energySmoothed / s.energyPeak;
|
|
float targetGain = 1.0f;
|
|
if (ratio > 1.0f)
|
|
{
|
|
// Soft knee limiter: gentle compression above threshold
|
|
const float excess = ratio - 1.0f;
|
|
const float compRatio = mixLinear (1.15f, 1.45f, noteNormHigh);
|
|
targetGain = 1.0f / (1.0f + excess * compRatio);
|
|
}
|
|
targetGain = juce::jmax (0.6f, targetGain);
|
|
const float gainAttack = mixLinear (0.10f, 0.14f, noteNormHigh);
|
|
const float gainRelease = 0.006f;
|
|
const float gainCoeff = (targetGain < s.energyGainSmoothed) ? gainAttack : gainRelease;
|
|
s.energyGainSmoothed += gainCoeff * (targetGain - s.energyGainSmoothed);
|
|
y *= s.energyGainSmoothed;
|
|
}
|
|
}
|
|
|
|
if (damperEnabled && s.damperSoftenCountdown > 0)
|
|
{
|
|
const float softenMix = 1.0f - damperSoftenA;
|
|
s.damperSoftenState = s.damperSoftenState + softenMix * (y - s.damperSoftenState);
|
|
y = s.damperSoftenState;
|
|
--s.damperSoftenCountdown;
|
|
}
|
|
|
|
// dc block
|
|
if (DebugToggles::kEnablePm2DcBlock)
|
|
y = s.dc.process (y);
|
|
|
|
// Continuous hammer-string interaction (adds excitation during attack)
|
|
float excitation = 0.0f;
|
|
if (DebugToggles::kEnablePm2HammerExcitation && s.hammer.active && s.hammer.samplesLeft > 0)
|
|
{
|
|
auto& h = s.hammer;
|
|
float pen = h.pos - y;
|
|
pen = juce::jlimit (-h.maxPen, h.maxPen, pen);
|
|
float contact = juce::jmax (0.0f, pen + h.preload);
|
|
// CPU OPTIMIZATION: Use fastPow for hammer force calculation
|
|
float force = h.k * FastMath::fastPow (contact, h.exp) - h.damping * h.vel;
|
|
force = softClip (force, 1200.0f); // softer clamp to avoid discontinuities
|
|
float accel = force / juce::jmax (0.001f, h.mass);
|
|
h.vel += accel * dt;
|
|
h.pos += h.vel * dt;
|
|
h.pen = pen;
|
|
float shaped = h.toneState + (1.0f - h.toneAlpha) * (force - h.toneState);
|
|
h.toneState = shaped;
|
|
// FIX: Scale excitation based on remaining interaction time for smoother decay
|
|
// This prevents the abrupt cutoff when samplesLeft reaches 0
|
|
const float fadeStart = 200.0f; // Start fading in last 200 samples
|
|
float fadeGain = (h.samplesLeft > fadeStart) ? 1.0f
|
|
: (float) h.samplesLeft / fadeStart;
|
|
// Slight fade-in to soften the initial spike
|
|
const float fadeInSamples = 120.0f;
|
|
const float fadeInGain = (h.samplesElapsed < fadeInSamples)
|
|
? (float) h.samplesElapsed / fadeInSamples
|
|
: 1.0f;
|
|
float hammerGain = h.gain;
|
|
if (DebugToggles::kEnablePm2HammerGainRamp)
|
|
{
|
|
const float gainCoeff = 0.02f;
|
|
h.gainSmoothed += gainCoeff * (hammerGain - h.gainSmoothed);
|
|
hammerGain = h.gainSmoothed;
|
|
}
|
|
excitation = softClip (shaped * hammerGain * fadeGain * fadeInGain, 3.0f);
|
|
if (PhysicsToggles::kUsePhysicsDefaults && s.toneInjectSamplesLeft > 0)
|
|
{
|
|
excitation += FastMath::fastSin (s.toneInjectPhase) * s.toneInjectGain;
|
|
s.toneInjectPhase += s.toneInjectPhaseDelta;
|
|
if (s.toneInjectPhase >= juce::MathConstants<float>::twoPi)
|
|
s.toneInjectPhase -= juce::MathConstants<float>::twoPi;
|
|
--s.toneInjectSamplesLeft;
|
|
}
|
|
--h.samplesLeft;
|
|
++h.samplesElapsed;
|
|
if (! std::isfinite (h.pos) || ! std::isfinite (h.vel) || h.samplesLeft <= 0 || (h.pos <= 0.0f && h.vel <= 0.0f))
|
|
h.active = false;
|
|
}
|
|
|
|
// write back
|
|
float writeSample = softClip (y + excitation, 4.0f);
|
|
s.delay[(size_t) s.writePos] = writeSample;
|
|
s.writePos = (s.writePos + 1) % len;
|
|
|
|
float mono = y * s.baseGain * softGainScale;
|
|
if (PhysicsToggles::kUsePhysicsDefaults && s.toneInjectSamplesLeft > 0)
|
|
{
|
|
mono += FastMath::fastSin (s.toneInjectPhase) * s.toneInjectGain;
|
|
s.toneInjectPhase += s.toneInjectPhaseDelta;
|
|
if (s.toneInjectPhase >= juce::MathConstants<float>::twoPi)
|
|
s.toneInjectPhase -= juce::MathConstants<float>::twoPi;
|
|
--s.toneInjectSamplesLeft;
|
|
}
|
|
sumL += mono * s.panGainL;
|
|
sumR += mono * s.panGainR;
|
|
|
|
// duplex tap (short afterlength)
|
|
// CPU OPTIMIZATION: Skip for bass notes (less audible in low register)
|
|
if (! skipDuplex && ! s.duplex.buf.empty())
|
|
{
|
|
float prev = s.duplex.buf[(size_t) s.duplex.write];
|
|
float input = y * s.duplex.inputGain;
|
|
s.duplex.buf[(size_t) s.duplex.write] = input + s.duplex.feedback * prev;
|
|
s.duplex.write = (s.duplex.write + 1) % (int) s.duplex.buf.size();
|
|
float duplexMono = prev * s.duplex.gain;
|
|
sumL += duplexMono * s.panGainL;
|
|
sumR += duplexMono * s.panGainR;
|
|
}
|
|
}
|
|
|
|
// IMPROVEMENT 3: Add stochastic body resonance (inter-harmonic fill)
|
|
// Simulates soundboard and cabinet broadband resonance that fills gaps between harmonics
|
|
// Modulated by loop energy so it follows the note's amplitude envelope
|
|
// CPU OPTIMIZATION: Skip for bass notes (less audible in low register)
|
|
if (! skipBodyNoise)
|
|
{
|
|
const float loopEnergy = FastMath::fastSqrt (juce::jmax (0.0f, loopEnergySmoothed));
|
|
const float noiseLevel = loopEnergy * 0.003f; // Reduced from 0.006f to lower noise floor
|
|
const bool allowBodyNoise = adsr.isActive() && loopEnergySmoothed > 1.0e-4f;
|
|
|
|
if (allowBodyNoise && noiseLevel > 1.0e-6f)
|
|
{
|
|
// Generate filtered noise (bandpass ~80-2500 Hz range)
|
|
bodyNoiseRng = 1664525u * bodyNoiseRng + 1013904223u;
|
|
float rawNoise = ((bodyNoiseRng >> 8) * (1.0f / 16777216.0f)) * 2.0f - 1.0f;
|
|
|
|
// Two-pole lowpass - reduced cutoff for less HF content
|
|
const float lpCoeff = 0.08f; // Was 0.12f - lower cutoff ~2kHz
|
|
bodyNoiseLp1 += lpCoeff * (rawNoise - bodyNoiseLp1);
|
|
bodyNoiseLp2 += lpCoeff * (bodyNoiseLp1 - bodyNoiseLp2);
|
|
|
|
// One-pole highpass at ~80Hz to remove rumble
|
|
const float hpCoeff = 0.995f;
|
|
float filtered = bodyNoiseLp2 - bodyNoiseHp;
|
|
bodyNoiseHp = bodyNoiseLp2 * (1.0f - hpCoeff) + bodyNoiseHp * hpCoeff;
|
|
|
|
// Apply energy modulation
|
|
float bodyNoise = filtered * noiseLevel;
|
|
|
|
// Slight stereo decorrelation for natural width
|
|
bodyNoiseRng = 1664525u * bodyNoiseRng + 1013904223u;
|
|
float stereoOffset = ((bodyNoiseRng >> 16) * (1.0f / 65536.0f)) * 0.3f;
|
|
|
|
sumL += bodyNoise * (0.5f + stereoOffset);
|
|
sumR += bodyNoise * (0.5f - stereoOffset);
|
|
}
|
|
}
|
|
|
|
// IMPROVEMENT 2: Output-stage fundamental boost using lowpass extraction
|
|
// Extract low frequencies (fundamental region) and mix back for warmer tone
|
|
// This is applied to the output sum, NOT inside the waveguide loop
|
|
{
|
|
// Use the first string's resonator state for the summed output
|
|
// Simple one-pole lowpass to extract fundamental region (~200Hz cutoff)
|
|
auto& s = strings[0];
|
|
const float lpAlpha = s.fundResonatorCoeff; // Pre-calculated in noteOn based on fundamental
|
|
if (s.fundResonatorGain > 0.001f && lpAlpha > 0.0f)
|
|
{
|
|
// Extract low frequency content
|
|
s.fundResonatorState1 += (1.0f - lpAlpha) * (sumL - s.fundResonatorState1);
|
|
s.fundResonatorState2 += (1.0f - lpAlpha) * (sumR - s.fundResonatorState2);
|
|
|
|
// Mix extracted lows back in for fundamental boost
|
|
sumL += s.fundResonatorState1 * s.fundResonatorGain;
|
|
sumR += s.fundResonatorState2 * s.fundResonatorGain;
|
|
}
|
|
}
|
|
|
|
loopEnergySmoothed += loopEnergySmoothCoeff * (loopEnergySample - loopEnergySmoothed);
|
|
lastEnv = loopEnergySmoothed;
|
|
// FIXED: Apply stringGainNorm to prevent level buildup from multi-string summing
|
|
float outL = sumL * pitchLoudnessGain * stringGainNorm;
|
|
float outR = sumR * pitchLoudnessGain * stringGainNorm;
|
|
if (! std::isfinite (outL)) outL = lastOutL * 0.98f;
|
|
if (! std::isfinite (outR)) outR = lastOutR * 0.98f;
|
|
const bool noteHpfEnabled = DebugToggles::kEnablePm2NoteHpf;
|
|
float hpL = noteHpfEnabled ? noteHpf.processSample (0, outL) : outL;
|
|
float hpR = noteHpfEnabled ? noteHpf.processSample (noteHpfNumChannels > 1 ? 1 : 0, outR) : outR;
|
|
if (DebugToggles::kEnablePm2PostLpfEnv)
|
|
{
|
|
const float env = postLpfEnv.getNextSample();
|
|
const float cutoff = juce::jlimit (400.0f, 16000.0f,
|
|
postLpfMinHz + (postLpfMaxHz - postLpfMinHz) * env);
|
|
const float a = std::exp (-2.0f * juce::MathConstants<float>::pi * cutoff
|
|
/ (float) juce::jmax (20.0, sampleRate));
|
|
postLpfStateL = a * postLpfStateL + (1.0f - a) * hpL;
|
|
postLpfStateR = a * postLpfStateR + (1.0f - a) * hpR;
|
|
hpL = postLpfStateL;
|
|
hpR = postLpfStateR;
|
|
}
|
|
// FIX #1 & #4: Use instance bus pointers with block-relative indices
|
|
// Weak coupling/sympathetic returns from other notes
|
|
const int busIdx = startSampleInBlock + i; // Block-relative index for shared buses
|
|
const auto coupleIn = (couplingEnabled && couplingBus) ? couplingBus->read (busIdx) : std::make_pair (0.0f, 0.0f);
|
|
const auto sympIn = (couplingEnabled && sympBus) ? sympBus->read (busIdx) : std::make_pair (0.0f, 0.0f);
|
|
|
|
// Option 2: Reduce coupling and sympathetic return for high notes to prevent swell
|
|
// High notes are more prone to energy accumulation from these feedback paths
|
|
const float highNoteNorm = DebugToggles::kEnablePm2HighNoteCouplingTilt
|
|
? juce::jlimit (0.0f, 1.0f, ((float) currentMidiNote - 72.0f) / 48.0f)
|
|
: 0.0f;
|
|
const float coupleTilt = 1.0f - 0.6f * highNoteNorm * highNoteNorm; // Quadratic reduction, lighter and spread wider
|
|
const float sympTilt = 1.0f - sympHighDamp * highNoteNorm - 0.25f * highNoteNorm * highNoteNorm; // Extra quadratic term
|
|
|
|
const float pedalFade = pedalChangeFade;
|
|
const float coupleGain = couplingGainEff * pedalFade;
|
|
const float sympGainScaled = sympGainEff * pedalFade;
|
|
float coupleL = (coupleGain > 1.0e-4f) ? couplingBpL.processSample (0, coupleIn.first) * coupleGain * coupleTilt : 0.0f;
|
|
float coupleR = (coupleGain > 1.0e-4f) ? couplingBpR.processSample (noteHpfNumChannels > 1 ? 1 : 0, coupleIn.second) * coupleGain * coupleTilt : 0.0f;
|
|
float sympL = (sympGainScaled > 1.0e-4f) ? sympBpL.processSample (0, sympIn.first) * (sympGainScaled * sympTilt) : 0.0f;
|
|
float sympR = (sympGainScaled > 1.0e-4f) ? sympBpR.processSample (noteHpfNumChannels > 1 ? 1 : 0, sympIn.second) * (sympGainScaled * sympTilt) : 0.0f;
|
|
hpL += coupleL + sympL;
|
|
hpR += coupleR + sympR;
|
|
hpL *= stealGain * velocityGain;
|
|
hpR *= stealGain * velocityGain;
|
|
if (noteFadeSamplesRemaining > 0 && noteFadeSamplesTotal > 0)
|
|
{
|
|
const float fadeT = 1.0f - (float) noteFadeSamplesRemaining / (float) noteFadeSamplesTotal;
|
|
hpL *= fadeT;
|
|
hpR *= fadeT;
|
|
--noteFadeSamplesRemaining;
|
|
}
|
|
if (! keyHeld && ! sustainPedalDown && keyOffFadeSamplesRemaining > 0 && keyOffFadeSamplesTotal > 0)
|
|
{
|
|
const float fadeT = 1.0f - (float) keyOffFadeSamplesRemaining / (float) keyOffFadeSamplesTotal;
|
|
hpL *= fadeT;
|
|
hpR *= fadeT;
|
|
--keyOffFadeSamplesRemaining;
|
|
}
|
|
|
|
// Option 5: Apply anti-swell envelope - very slow decay for high notes
|
|
// This counteracts gradual energy accumulation that causes swell
|
|
if (DebugToggles::kEnablePm2AntiSwell)
|
|
{
|
|
hpL *= antiSwellEnv;
|
|
hpR *= antiSwellEnv;
|
|
// Decay the envelope (only when key is held to prevent affecting release)
|
|
if (keyHeld || sustainPedalDown)
|
|
antiSwellEnv *= antiSwellDecayPerSample;
|
|
// Floor to prevent envelope from going to zero over very long holds
|
|
antiSwellEnv = juce::jmax (antiSwellEnv, 0.9f);
|
|
}
|
|
|
|
if (R)
|
|
{
|
|
L[i] += hpL;
|
|
R[i] += hpR;
|
|
}
|
|
else
|
|
{
|
|
L[i] += 0.5f * (hpL + hpR);
|
|
}
|
|
lastOutL = hpL;
|
|
lastOutR = hpR;
|
|
blockAbsMax = std::max (blockAbsMax, std::max (std::abs (hpL), std::abs (hpR)));
|
|
// FIX #1 & #4: Use instance bus pointers with block-relative indices
|
|
if (couplingEnabled && couplingBus && couplingGainEff > 1.0e-4f)
|
|
couplingBus->add (busIdx, hpL * pedalChangeFade, hpR * pedalChangeFade);
|
|
if (couplingEnabled && undampedSend && sympBus && ! lowVelSkip && sympGainEff > 1.0e-4f)
|
|
sympBus->add (busIdx, hpL * pedalChangeFade, hpR * pedalChangeFade);
|
|
|
|
if (stealFadeSamples > 0 && stealFadeRemaining == 0)
|
|
{
|
|
forceSilence();
|
|
return;
|
|
}
|
|
}
|
|
|
|
noteLifeSamples += numSamples;
|
|
// CPU OPTIMIZATION: Very aggressive thresholds for faster voice termination
|
|
// Notes that have decayed below audibility are terminated quickly to free CPU
|
|
// These thresholds are 4-5x more aggressive than previous values
|
|
const float energyThresholdBase = PhysicsToggles::kUsePhysicsDefaults ? 5.0e-4f : 8.0e-4f;
|
|
const float blockThresholdBase = PhysicsToggles::kUsePhysicsDefaults ? 2.0e-4f : 3.0e-4f;
|
|
const int minLifeBase = PhysicsToggles::kUsePhysicsDefaults ? (int) std::round (0.003 * sampleRate) : 0; // 3ms minimum
|
|
const float stopNoteNorm = juce::jlimit (0.0f, 1.0f, ((float) currentMidiNote - 72.0f) / 36.0f);
|
|
const float stopScale = 1.0f - 0.85f * stopNoteNorm; // More aggressive scaling for high notes
|
|
const float energyThreshold = energyThresholdBase * juce::jmax (0.2f, stopScale);
|
|
const float blockThreshold = blockThresholdBase * juce::jmax (0.2f, stopScale);
|
|
const int minLife = minLifeBase + (int) std::round (stopNoteNorm * 0.010 * sampleRate); // 10ms extra for high notes
|
|
if (! keyHeld && ! sustainPedalDown
|
|
&& loopEnergySmoothed < energyThreshold && blockAbsMax < blockThreshold
|
|
&& noteLifeSamples > minLife)
|
|
{
|
|
adsr.reset();
|
|
active = false;
|
|
}
|
|
}
|
|
|
|
void Pm2Synth::preallocateVoiceForNote (int midiNoteNumber)
|
|
{
|
|
juce::ignoreUnused (midiNoteNumber);
|
|
|
|
int active = 0;
|
|
for (int i = 0; i < getNumVoices(); ++i)
|
|
if (auto* v = getVoice (i))
|
|
if (v->isVoiceActive())
|
|
++active;
|
|
|
|
if (active < getNumVoices())
|
|
return; // at least one free voice, no need to steal
|
|
|
|
// Voice stealing: pick the quietest voice by loop energy
|
|
juce::SynthesiserVoice* best = nullptr;
|
|
float bestEnergy = std::numeric_limits<float>::max();
|
|
|
|
for (int i = 0; i < getNumVoices(); ++i)
|
|
{
|
|
auto* v = getVoice (i);
|
|
if (v == nullptr)
|
|
continue;
|
|
|
|
if (auto* pv = dynamic_cast<Pm2Voice*> (v))
|
|
{
|
|
float energy = pv->getLoopEnergy();
|
|
if (! std::isfinite (energy))
|
|
energy = std::numeric_limits<float>::max();
|
|
if (energy < bestEnergy)
|
|
{
|
|
bestEnergy = energy;
|
|
best = v;
|
|
}
|
|
}
|
|
}
|
|
|
|
if (auto* pv = dynamic_cast<Pm2Voice*> (best))
|
|
{
|
|
pv->prepareForSteal();
|
|
pv->stopNote (0.0f, true); // allow tail-off for smoother steal
|
|
requestDeclick (64);
|
|
}
|
|
}
|
|
|
|
// FIX: Preallocate multiple voices for a chord - ensures all notes in a chord get voices
|
|
// without JUCE's internal voice stealing interfering with our age-based priority
|
|
void Pm2Synth::preallocateVoicesForChord (int numNotesNeeded)
|
|
{
|
|
if (numNotesNeeded <= 0)
|
|
return;
|
|
|
|
// Count currently active voices
|
|
int activeCount = 0;
|
|
for (int i = 0; i < getNumVoices(); ++i)
|
|
if (auto* v = getVoice (i))
|
|
if (v->isVoiceActive())
|
|
++activeCount;
|
|
|
|
// Calculate how many voices we need to free
|
|
const int totalVoices = getNumVoices();
|
|
const int freeVoices = totalVoices - activeCount;
|
|
int voicesToSteal = numNotesNeeded - freeVoices;
|
|
|
|
// Steal voices one at a time, always picking the oldest
|
|
while (voicesToSteal > 0)
|
|
{
|
|
// Find the quietest note to steal by loop energy
|
|
juce::SynthesiserVoice* best = nullptr;
|
|
float bestEnergy = std::numeric_limits<float>::max();
|
|
|
|
for (int i = 0; i < getNumVoices(); ++i)
|
|
{
|
|
auto* v = getVoice (i);
|
|
if (v == nullptr || ! v->isVoiceActive())
|
|
continue;
|
|
|
|
if (auto* pv = dynamic_cast<Pm2Voice*> (v))
|
|
{
|
|
float energy = pv->getLoopEnergy();
|
|
if (! std::isfinite (energy))
|
|
energy = std::numeric_limits<float>::max();
|
|
if (energy < bestEnergy)
|
|
{
|
|
bestEnergy = energy;
|
|
best = v;
|
|
}
|
|
}
|
|
}
|
|
|
|
if (auto* pv = dynamic_cast<Pm2Voice*> (best))
|
|
{
|
|
pv->prepareForSteal();
|
|
pv->stopNote (0.0f, true); // allow tail-off for smoother steal
|
|
requestDeclick (64);
|
|
--voicesToSteal;
|
|
}
|
|
else
|
|
{
|
|
// No more voices to steal, break to avoid infinite loop
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
|
|
bool Pm2Synth::hasActiveVoiceForNote (int midiNoteNumber) const
|
|
{
|
|
for (int i = 0; i < getNumVoices(); ++i)
|
|
if (auto* v = dynamic_cast<Pm2Voice*> (getVoice (i)))
|
|
if (v->isActive() && v->getCurrentMidiNote() == midiNoteNumber)
|
|
return true;
|
|
return false;
|
|
}
|
|
|
|
bool Pm2Synth::hardRetriggerActiveVoice (int midiNoteNumber, float velocity)
|
|
{
|
|
Pm2Voice* best = nullptr;
|
|
uint64_t bestAge = 0;
|
|
for (int i = 0; i < getNumVoices(); ++i)
|
|
if (auto* v = dynamic_cast<Pm2Voice*> (getVoice (i)))
|
|
if (v->isActive() && v->getCurrentMidiNote() == midiNoteNumber)
|
|
{
|
|
const uint64_t age = v->getNoteAge();
|
|
if (best == nullptr || age > bestAge)
|
|
{
|
|
best = v;
|
|
bestAge = age;
|
|
}
|
|
}
|
|
if (best == nullptr)
|
|
return false;
|
|
best->hardRetrigger (midiNoteNumber, velocity);
|
|
return true;
|
|
}
|
|
|
|
//==============================================================================
|
|
// Processor ctor
|
|
FluteSynthAudioProcessor::FluteSynthAudioProcessor()
|
|
#ifndef JucePlugin_PreferredChannelConfigurations
|
|
: AudioProcessor (BusesProperties().withOutput ("Output", juce::AudioChannelSet::stereo(), true))
|
|
#endif
|
|
, apvts (*this, nullptr, "Params", createParameterLayout())
|
|
{
|
|
// CPU OPTIMIZATION: Reduced polyphony from 18 to 15 voices
|
|
for (int i = 0; i < 15; ++i)
|
|
synth.addVoice (new FluteVoice (apvts));
|
|
synth.addSound (new SimpleSound());
|
|
|
|
for (int i = 0; i < 15; ++i)
|
|
pmSynth.addVoice (new PmVoice());
|
|
pmSynth.addSound (new PmSound());
|
|
|
|
for (int i = 0; i < 15; ++i)
|
|
pm2Synth.addVoice (new Pm2Voice());
|
|
pm2Synth.addSound (new Pm2Sound());
|
|
|
|
// FIX #1: Set shared bus pointers on all PM2 voices
|
|
pm2Synth.setSharedBuses (&couplingBus, &sympBus);
|
|
|
|
applyMasterTuneToVoices();
|
|
loadEmbeddedPreset();
|
|
}
|
|
|
|
void FluteSynthAudioProcessor::prepareToPlay (double sr, int samplesPerBlock)
|
|
{
|
|
// CPU OPTIMIZATION: Initialize fast math lookup tables (only done once)
|
|
FastMath::initTables();
|
|
|
|
lastSampleRate = sr;
|
|
prepared = true;
|
|
applyMasterTuneToVoices();
|
|
{
|
|
const double smoothTimeSec = 0.020; // ~20ms smoothing for click reduction
|
|
auto resetSmooth = [sr, smoothTimeSec] (juce::SmoothedValue<float, juce::ValueSmoothingTypes::Linear>& s, float value)
|
|
{
|
|
s.reset (sr, smoothTimeSec);
|
|
s.setCurrentAndTargetValue (value);
|
|
};
|
|
resetSmooth (pm2GainLinSmoothed, pm2GainLin);
|
|
resetSmooth (outputGainLinSmoothed, outputGainLin);
|
|
resetSmooth (postCutoffHzSmoothed, postCutoffHz);
|
|
resetSmooth (postQSmoothed, postQ);
|
|
resetSmooth (postTiltDbSmoothed, postTiltDb);
|
|
resetSmooth (outputLpfCutoffSmoothed, outputLpfCutoff);
|
|
resetSmooth (outputLpfQSmoothed, outputLpfQ);
|
|
resetSmooth (sustainGainLinSmoothed, 1.0f);
|
|
resetSmooth (sustainReleaseScaleSmoothed, 1.0f);
|
|
resetSmooth (sustainValueSmoothed, 0.0f);
|
|
const float noteTerm = brightnessNoteSlopeDb * ((float) lastMidiNote - 60.0f) * (1.0f / 24.0f);
|
|
const float initialBrightness = juce::jlimit (-12.0f, brightnessMaxDb,
|
|
brightnessBaseDb + lastVelocityNorm * brightnessVelSlopeDb + noteTerm);
|
|
resetSmooth (brightnessDbSmoothed, initialBrightness);
|
|
brightnessCurrentDb = initialBrightness;
|
|
}
|
|
|
|
// VA synth
|
|
synth.setCurrentPlaybackSampleRate (sr);
|
|
for (int i = 0; i < synth.getNumVoices(); ++i)
|
|
if (auto* v = dynamic_cast<FluteVoice*> (synth.getVoice (i)))
|
|
{
|
|
v->prepare (sr, samplesPerBlock, getTotalNumOutputChannels());
|
|
v->setNoteOffsets (noteOffsetsCents);
|
|
}
|
|
|
|
pmSynth.setCurrentPlaybackSampleRate (sr);
|
|
for (int i = 0; i < pmSynth.getNumVoices(); ++i)
|
|
if (auto* v = dynamic_cast<PmVoice*> (pmSynth.getVoice (i)))
|
|
{
|
|
v->prepare (sr, samplesPerBlock, getTotalNumOutputChannels());
|
|
v->setNoteOffsets (noteOffsetsCents);
|
|
}
|
|
|
|
pm2Synth.setCurrentPlaybackSampleRate (sr);
|
|
for (int i = 0; i < pm2Synth.getNumVoices(); ++i)
|
|
if (auto* v = dynamic_cast<Pm2Voice*> (pm2Synth.getVoice (i)))
|
|
{
|
|
v->prepare (sr, samplesPerBlock, getTotalNumOutputChannels());
|
|
v->setNoteOffsets (noteOffsetsCents);
|
|
}
|
|
|
|
// Sync PM/PM2 voice params to current state on prepare
|
|
const float a = apvts.getRawParameterValue (ParamIDs::attack)->load();
|
|
const float d = apvts.getRawParameterValue (ParamIDs::decay)->load();
|
|
const float s = apvts.getRawParameterValue (ParamIDs::sustain)->load();
|
|
const float r = apvts.getRawParameterValue (ParamIDs::release)->load();
|
|
baseRelease = r;
|
|
for (int i = 0; i < pmSynth.getNumVoices(); ++i)
|
|
if (auto* v = dynamic_cast<PmVoice*> (pmSynth.getVoice (i)))
|
|
{
|
|
v->setEnvParams (a, d, s, r);
|
|
v->setReleaseScale (baseRelease, 1.0f);
|
|
}
|
|
for (int i = 0; i < pm2Synth.getNumVoices(); ++i)
|
|
if (auto* v = dynamic_cast<Pm2Voice*> (pm2Synth.getVoice (i)))
|
|
{
|
|
v->setEnvParams (a, d, s, r);
|
|
v->setParams (pmString);
|
|
v->setHammerParams (pmHammer);
|
|
v->setFeltParams (pmFelt);
|
|
v->setDuplexParams (duplexCfg);
|
|
v->setWdfParams (wdfCfg);
|
|
v->setSoftPedal (softPedalDown, unaCfg);
|
|
v->setReleaseScale (baseRelease, 1.0f);
|
|
v->setReleaseExtension (releaseExtension);
|
|
v->setSustainPedalDown (sustainPedalDown);
|
|
v->setDamperParams (damperCfg);
|
|
v->setDamperLift (damperLift);
|
|
v->setEconomyMode (pm2EconomyMode);
|
|
v->setHighPolyMode (pm2EconomyMode);
|
|
v->setCouplingParams (couplingCfg);
|
|
v->setDispersionCurve (dispersionCfg);
|
|
}
|
|
|
|
// FIX #1: Ensure shared bus pointers are set on all PM2 voices
|
|
pm2Synth.setSharedBuses (&couplingBus, &sympBus);
|
|
|
|
// Breath + formants
|
|
juce::dsp::ProcessSpec spec;
|
|
spec.sampleRate = sr;
|
|
spec.maximumBlockSize = (juce::uint32) samplesPerBlock;
|
|
spec.numChannels = (juce::uint32) juce::jmax (1, getTotalNumOutputChannels());
|
|
mainSpec = spec;
|
|
|
|
auto allocScratch = [] (juce::AudioBuffer<float>& buf, int channels, int samples)
|
|
{
|
|
buf.setSize (channels, samples, false, false, true);
|
|
buf.clear();
|
|
};
|
|
const int scratchCh = (int) spec.numChannels;
|
|
allocScratch (hybridVaBuf, scratchCh, samplesPerBlock);
|
|
allocScratch (hybridPmBuf, scratchCh, samplesPerBlock);
|
|
allocScratch (hybridPm2Buf, scratchCh, samplesPerBlock);
|
|
allocScratch (micScratch, scratchCh, samplesPerBlock);
|
|
allocScratch (breathScratch, 1, samplesPerBlock);
|
|
allocScratch (formantScratch, scratchCh, samplesPerBlock);
|
|
allocScratch (pedalScratch, scratchCh, samplesPerBlock);
|
|
allocScratch (sympScratch, scratchCh, samplesPerBlock);
|
|
allocScratch (modalScratch, scratchCh, samplesPerBlock);
|
|
allocScratch (soundboardScratch, scratchCh, samplesPerBlock);
|
|
if (DebugToggles::kSoundboardConvolutionDownsample > 1)
|
|
{
|
|
const int dsSamples = (samplesPerBlock + DebugToggles::kSoundboardConvolutionDownsample - 1)
|
|
/ DebugToggles::kSoundboardConvolutionDownsample;
|
|
allocScratch (soundboardScratchDs, scratchCh, dsSamples);
|
|
}
|
|
soundboardConvolution.reset();
|
|
soundboardConvolution.prepare (spec);
|
|
if (DebugToggles::kSoundboardConvolutionDownsample > 1)
|
|
{
|
|
juce::dsp::ProcessSpec dsSpec = spec;
|
|
dsSpec.sampleRate = spec.sampleRate / (double) DebugToggles::kSoundboardConvolutionDownsample;
|
|
dsSpec.maximumBlockSize = (juce::uint32)
|
|
((spec.maximumBlockSize + DebugToggles::kSoundboardConvolutionDownsample - 1)
|
|
/ DebugToggles::kSoundboardConvolutionDownsample);
|
|
soundboardConvolutionDs.reset();
|
|
soundboardConvolutionDs.prepare (dsSpec);
|
|
}
|
|
soundboardIrDirty = true;
|
|
soundboardIrLastT60 = 0.0f;
|
|
soundboardIrLastDamp = 0.0f;
|
|
postReverb.reset();
|
|
postReverbParamsValid = false;
|
|
|
|
breathBp.reset(); breathBp.prepare (spec);
|
|
breathBp.setType (juce::dsp::StateVariableTPTFilterType::bandpass);
|
|
breathBp.setCutoffFrequency (breathBpFreqStored);
|
|
breathBp.setResonance (breathBpQStored);
|
|
|
|
for (int i = 0; i < 2; ++i)
|
|
{
|
|
formant[i].f.reset();
|
|
formant[i].f.prepare (spec);
|
|
formant[i].f.setType (juce::dsp::StateVariableTPTFilterType::bandpass);
|
|
formant[i].gainLin = 1.0f;
|
|
formant[i].enabled = false;
|
|
}
|
|
|
|
// Post tone controls (per-engine key-tracked LPF cascaded + shared tilt shelves)
|
|
auto prepLP = [&spec] (juce::dsp::StateVariableTPTFilter<float>& f)
|
|
{
|
|
f.reset();
|
|
f.prepare (spec);
|
|
f.setType (juce::dsp::StateVariableTPTFilterType::lowpass);
|
|
};
|
|
prepLP (postVaLp1); prepLP (postVaLp2);
|
|
prepLP (postPmLp1); prepLP (postPmLp2);
|
|
prepLP (postPm2Lp1); prepLP (postPm2Lp2);
|
|
|
|
tiltLow.reset(); tiltHigh.reset();
|
|
tiltLow.prepare (spec); tiltHigh.prepare (spec);
|
|
tiltNumChannels = (int) spec.numChannels;
|
|
tiltReady = false;
|
|
|
|
auto prepSendHpf = [this] (decltype (pedalSendHpf)& f)
|
|
{
|
|
f.reset();
|
|
f.prepare (mainSpec);
|
|
auto coeffs = juce::dsp::IIR::Coefficients<float>::makeHighPass (mainSpec.sampleRate,
|
|
sendHpfCutoff,
|
|
0.707f);
|
|
if (f.state == nullptr)
|
|
f.state = coeffs;
|
|
else
|
|
*f.state = *coeffs;
|
|
};
|
|
prepSendHpf (pedalSendHpf);
|
|
prepSendHpf (sympSendHpf);
|
|
prepSendHpf (soundboardSendHpf);
|
|
{
|
|
soundboardReturnHpf.reset();
|
|
soundboardReturnHpf.prepare (mainSpec);
|
|
auto coeffs = juce::dsp::IIR::Coefficients<float>::makeHighPass (mainSpec.sampleRate,
|
|
soundboardReturnHpfCutoff,
|
|
0.707f);
|
|
if (soundboardReturnHpf.state == nullptr)
|
|
soundboardReturnHpf.state = coeffs;
|
|
else
|
|
*soundboardReturnHpf.state = *coeffs;
|
|
}
|
|
{
|
|
juce::dsp::ProcessSpec monoSpec = mainSpec;
|
|
monoSpec.numChannels = 1;
|
|
modalSendHpf.reset();
|
|
modalSendHpf.prepare (monoSpec);
|
|
modalSendHpf.coefficients = juce::dsp::IIR::Coefficients<float>::makeHighPass (monoSpec.sampleRate,
|
|
sendHpfCutoff,
|
|
0.707f);
|
|
}
|
|
sendHpfNumChannels = (int) mainSpec.numChannels;
|
|
|
|
// Final output LPF
|
|
outputLpf.reset();
|
|
outputLpf.prepare (spec);
|
|
outputLpf.setType (juce::dsp::StateVariableTPTFilterType::lowpass);
|
|
outputLpfNumChannels = (int) spec.numChannels;
|
|
updateOutputLpf();
|
|
for (auto& f : outputEqFilters)
|
|
{
|
|
f.reset();
|
|
f.prepare (spec);
|
|
}
|
|
outputEqNumChannels = (int) spec.numChannels;
|
|
updateOutputEq();
|
|
// Output HPF for rumble control
|
|
outputHpf.reset();
|
|
outputHpf.prepare (spec);
|
|
{
|
|
auto coeffs = juce::dsp::IIR::Coefficients<float>::makeHighPass (spec.sampleRate,
|
|
outputHpfCutoff,
|
|
0.707f);
|
|
if (outputHpf.state == nullptr)
|
|
outputHpf.state = coeffs;
|
|
else
|
|
*outputHpf.state = *coeffs;
|
|
}
|
|
outputHpfNumChannels = (int) spec.numChannels;
|
|
|
|
// Lookahead final limiter
|
|
{
|
|
const int la = juce::jmax (1, (int) std::round (sr * (limiterLookaheadMs * 0.001f)));
|
|
limiterLookaheadSamples = la;
|
|
limiterDelayBufferSize = la + samplesPerBlock + 1;
|
|
limiterDelayBuffer.setSize ((int) spec.numChannels, limiterDelayBufferSize, false, false, true);
|
|
limiterDelayBuffer.clear();
|
|
limiterWritePos = 0;
|
|
limiterGain = 1.0f;
|
|
auto msToCoeff = [sr] (float ms)
|
|
{
|
|
const double sec = juce::jmax (0.0001, (double) ms * 0.001);
|
|
return (float) std::exp (-1.0 / (sec * sr));
|
|
};
|
|
limiterAttackCoeff = msToCoeff (limiterAttackMs);
|
|
limiterReleaseCoeff = msToCoeff (limiterReleaseMs);
|
|
setLatencySamples (limiterLookaheadSamples);
|
|
}
|
|
|
|
prepareBrightnessFilters();
|
|
for (int ch = 0; ch < juce::jmin (2, (int) spec.numChannels); ++ch)
|
|
outputDcBlock[(size_t) ch].reset (sr);
|
|
outputDcNumChannels = (int) spec.numChannels;
|
|
|
|
// Mic mixer setup
|
|
updateMicProcessors();
|
|
|
|
// Hammer HP
|
|
hammerHP.reset();
|
|
hammerHP.prepare (juce::dsp::ProcessSpec{ sr, (juce::uint32) samplesPerBlock, 1u });
|
|
hammerHP.setType (juce::dsp::StateVariableTPTFilterType::highpass);
|
|
hammerHP.setCutoffFrequency (hammerHpHz);
|
|
// Recalculate hammer decay coefficient now that sample rate is valid
|
|
// (may have been skipped during construction when lastSampleRate was 0)
|
|
if (hammerDecaySec > 0.0005f)
|
|
{
|
|
const double tau = std::max (0.0005, (double) hammerDecaySec);
|
|
hammerDecayCoeff = (float) std::exp (-1.0 / (tau * sr));
|
|
}
|
|
else
|
|
{
|
|
hammerDecayCoeff = 0.0f;
|
|
}
|
|
|
|
// Action noises
|
|
auto decayToCoeff = [sr] (float sec)
|
|
{
|
|
if (sec <= 0.0005f) return 0.0f;
|
|
const double tau = std::max (0.0005, (double) sec);
|
|
return (float) std::exp (-1.0 / (tau * sr));
|
|
};
|
|
|
|
keyOffHP.reset();
|
|
keyOffHP.prepare (juce::dsp::ProcessSpec{ sr, (juce::uint32) samplesPerBlock, 1u });
|
|
keyOffHP.setType (juce::dsp::StateVariableTPTFilterType::highpass);
|
|
keyOffHP.setCutoffFrequency (keyOffHpHz);
|
|
keyOffDecayCoeff = decayToCoeff (keyOffDecaySec);
|
|
|
|
pedalThumpLP.reset();
|
|
pedalThumpLP.prepare (juce::dsp::ProcessSpec{ sr, (juce::uint32) samplesPerBlock, 1u });
|
|
pedalThumpLP.setType (juce::dsp::StateVariableTPTFilterType::lowpass);
|
|
pedalThumpLP.setCutoffFrequency (pedalThumpLpHz);
|
|
pedalThumpDecayCoeff = decayToCoeff (pedalThumpDecaySec);
|
|
|
|
releaseThumpLP.reset();
|
|
releaseThumpLP.prepare (juce::dsp::ProcessSpec{ sr, (juce::uint32) samplesPerBlock, 1u });
|
|
releaseThumpLP.setType (juce::dsp::StateVariableTPTFilterType::lowpass);
|
|
releaseThumpLP.setCutoffFrequency (releaseThumpLpHz);
|
|
releaseThudHP.reset();
|
|
releaseThudHP.prepare (juce::dsp::ProcessSpec{ sr, (juce::uint32) samplesPerBlock, 1u });
|
|
releaseThudHP.setType (juce::dsp::StateVariableTPTFilterType::highpass);
|
|
releaseThudHP.setCutoffFrequency (releaseThudHpHz);
|
|
releaseThumpDecayCoeff = decayToCoeff (releaseThumpDecaySec);
|
|
|
|
updateDamperCoeffs();
|
|
|
|
// Soundboard
|
|
soundboardReverb.reset();
|
|
soundboardConvolution.reset();
|
|
soundboardConvolutionDs.reset();
|
|
pedalReverb.reset();
|
|
pedalReverbParamsValid = false;
|
|
sympParamsValid = false;
|
|
soundboardParamsValid = false;
|
|
postReverb.reset();
|
|
postReverbParamsValid = false;
|
|
modalChannels = juce::jmax (1, juce::jmin (2, getTotalNumOutputChannels()));
|
|
const int maxPredelaySamples = (int) std::ceil (sr * 0.020); // pmPredelayMs clamps to 20 ms
|
|
predelayCapacitySamples = juce::jmax (256, maxPredelaySamples + samplesPerBlock + 2);
|
|
predelayBuf.assign ((size_t) predelayCapacitySamples, 0.0f);
|
|
predelayWrite = 0;
|
|
modalDirty = true;
|
|
|
|
// Apply current APVTS values to runtime state after DSP is prepared.
|
|
syncExtendedParamsFromAPVTS();
|
|
updateSoundboardConvolution (true);
|
|
|
|
updatePostFiltersForNote (lastMidiNote);
|
|
}
|
|
|
|
void FluteSynthAudioProcessor::releaseResources() { prepared = false; tiltReady = false; }
|
|
|
|
juce::AudioProcessorEditor* FluteSynthAudioProcessor::createEditor()
|
|
{
|
|
return new FluteSynthAudioProcessorEditor (*this);
|
|
}
|
|
|
|
#ifndef JucePlugin_PreferredChannelConfigurations
|
|
bool FluteSynthAudioProcessor::isBusesLayoutSupported (const BusesLayout& layouts) const
|
|
{
|
|
auto out = layouts.getMainOutputChannelSet();
|
|
return out == juce::AudioChannelSet::mono() || out == juce::AudioChannelSet::stereo();
|
|
}
|
|
#endif
|
|
|
|
static inline float fastRand01(uint32_t& s)
|
|
{
|
|
s = 1664525u * s + 1013904223u;
|
|
return (float)((s >> 8) * (1.0 / 16777216.0)); // ~[0..1)
|
|
}
|
|
|
|
static juce::dsp::IIR::Coefficients<float>::Ptr makeBandPass (double sr, float f, float q)
|
|
{
|
|
f = juce::jlimit (60.0f, 5000.0f, f);
|
|
q = juce::jlimit (0.7f, 8.0f, q);
|
|
return juce::dsp::IIR::Coefficients<float>::makeBandPass (sr, f, q);
|
|
}
|
|
|
|
void FluteSynthAudioProcessor::updateSoundboardConvolution (bool force)
|
|
{
|
|
if (! DebugToggles::kEnableSoundboardConvolution)
|
|
return;
|
|
if (! prepared || lastSampleRate <= 0.0)
|
|
return;
|
|
if (! force && ! soundboardIrDirty)
|
|
return;
|
|
|
|
auto ir = buildSoundboardIr (lastSampleRate, soundboardT60Sec, soundboardDampParam);
|
|
const size_t irSamples = (size_t) ir.getNumSamples();
|
|
if (irSamples > 0)
|
|
{
|
|
#if JUCE_VERSION_MAJOR >= 7
|
|
soundboardConvolution.loadImpulseResponse (std::move (ir),
|
|
lastSampleRate,
|
|
juce::dsp::Convolution::Stereo::no,
|
|
juce::dsp::Convolution::Trim::no,
|
|
juce::dsp::Convolution::Normalise::no);
|
|
#else
|
|
soundboardConvolution.loadImpulseResponse (std::move (ir),
|
|
lastSampleRate,
|
|
juce::dsp::Convolution::Stereo::no,
|
|
juce::dsp::Convolution::Trim::no,
|
|
juce::dsp::Convolution::Normalise::no);
|
|
#endif
|
|
}
|
|
if (DebugToggles::kSoundboardConvolutionDownsample > 1)
|
|
{
|
|
const double dsRate = lastSampleRate / (double) DebugToggles::kSoundboardConvolutionDownsample;
|
|
auto irDs = buildSoundboardIr (dsRate, soundboardT60Sec, soundboardDampParam);
|
|
if (irDs.getNumSamples() > 0)
|
|
{
|
|
#if JUCE_VERSION_MAJOR >= 7
|
|
soundboardConvolutionDs.loadImpulseResponse (std::move (irDs),
|
|
dsRate,
|
|
juce::dsp::Convolution::Stereo::no,
|
|
juce::dsp::Convolution::Trim::no,
|
|
juce::dsp::Convolution::Normalise::no);
|
|
#else
|
|
soundboardConvolutionDs.loadImpulseResponse (std::move (irDs),
|
|
dsRate,
|
|
juce::dsp::Convolution::Stereo::no,
|
|
juce::dsp::Convolution::Trim::no,
|
|
juce::dsp::Convolution::Normalise::no);
|
|
#endif
|
|
}
|
|
}
|
|
soundboardIrLastT60 = soundboardT60Sec;
|
|
soundboardIrLastDamp = soundboardDampParam;
|
|
soundboardIrDirty = false;
|
|
}
|
|
|
|
juce::AudioBuffer<float> FluteSynthAudioProcessor::buildSoundboardIr (double sampleRate, float t60Sec, float damp) const
|
|
{
|
|
t60Sec = juce::jlimit (0.9f, 2.4f, t60Sec);
|
|
damp = juce::jlimit (0.0f, 1.0f, damp);
|
|
|
|
const float lengthSec = juce::jlimit (0.50f, 1.60f, 0.50f + 0.45f * t60Sec);
|
|
const int numSamples = juce::jmax (64, (int) std::round (lengthSec * sampleRate));
|
|
juce::AudioBuffer<float> ir (1, numSamples);
|
|
ir.clear();
|
|
|
|
struct Mode
|
|
{
|
|
float phase { 0.0f };
|
|
float phaseInc { 0.0f };
|
|
float amp { 0.0f };
|
|
float decay { 0.999f };
|
|
};
|
|
|
|
const int numModes = 48;
|
|
const float fMin = 190.0f;
|
|
const float fMax = 2200.0f;
|
|
const float twoPi = juce::MathConstants<float>::twoPi;
|
|
uint32_t seed = 0x1F2E3D4Cu;
|
|
|
|
std::vector<Mode> modes;
|
|
modes.reserve ((size_t) numModes);
|
|
|
|
for (int i = 0; i < numModes; ++i)
|
|
{
|
|
float r = (float) (i + 1) / (float) (numModes + 1);
|
|
r = r * r * r; // cluster modes toward low-mid for more tonal body
|
|
r += (fastRand01 (seed) - 0.5f) * 0.025f;
|
|
r = juce::jlimit (0.0f, 1.0f, r);
|
|
const float freq = fMin * std::pow (fMax / fMin, r);
|
|
const float freqNorm = freq / fMax;
|
|
float modeT60 = t60Sec / (1.0f + damp * (0.9f + 3.5f * freqNorm));
|
|
modeT60 = juce::jlimit (0.10f, t60Sec, modeT60);
|
|
const double tau = modeT60 / std::log (1000.0);
|
|
const float decay = (float) std::exp (-1.0 / (tau * sampleRate));
|
|
const float amp = 0.08f * std::pow (1.0f - r, 1.2f) / std::sqrt (freq / fMin);
|
|
const float phase = fastRand01 (seed) * twoPi;
|
|
const float phaseInc = twoPi * freq / (float) sampleRate;
|
|
modes.push_back ({ phase, phaseInc, amp, decay });
|
|
}
|
|
|
|
float noiseEnv = 1.0f;
|
|
const float noiseTau = 0.003f + 0.004f * (1.0f - damp);
|
|
const float noiseDecay = (float) std::exp (-1.0 / (noiseTau * sampleRate));
|
|
const float noiseAmp = 0.0015f;
|
|
const float lpCutoff = juce::jlimit (900.0f, 2400.0f, 2400.0f - damp * 1200.0f);
|
|
const float lpA = std::exp (-2.0f * juce::MathConstants<float>::pi * lpCutoff / (float) sampleRate);
|
|
float lpState = 0.0f;
|
|
|
|
float* out = ir.getWritePointer (0);
|
|
for (int n = 0; n < numSamples; ++n)
|
|
{
|
|
float sum = 0.0f;
|
|
for (auto& m : modes)
|
|
{
|
|
sum += m.amp * std::sin (m.phase);
|
|
m.phase += m.phaseInc;
|
|
if (m.phase > twoPi)
|
|
m.phase -= twoPi;
|
|
m.amp *= m.decay;
|
|
}
|
|
const float noise = (fastRand01 (seed) * 2.0f - 1.0f) * noiseAmp * noiseEnv;
|
|
noiseEnv *= noiseDecay;
|
|
const float raw = sum + noise;
|
|
lpState = (1.0f - lpA) * raw + lpA * lpState;
|
|
out[n] = lpState;
|
|
}
|
|
|
|
// Gentle tail fade to avoid abrupt truncation.
|
|
const int fadeSamples = juce::jmax (8, (int) std::round (numSamples * 0.12f));
|
|
for (int i = 0; i < fadeSamples; ++i)
|
|
{
|
|
const float t = (float) i / (float) (fadeSamples - 1);
|
|
const float g = 1.0f - t;
|
|
out[numSamples - fadeSamples + i] *= g;
|
|
}
|
|
|
|
float maxAbs = 0.0f;
|
|
for (int i = 0; i < numSamples; ++i)
|
|
maxAbs = juce::jmax (maxAbs, std::abs (out[i]));
|
|
if (maxAbs > 0.0f)
|
|
ir.applyGain (0, numSamples, 0.65f / maxAbs);
|
|
|
|
return ir;
|
|
}
|
|
|
|
void FluteSynthAudioProcessor::applyMasterTuneToVoices()
|
|
{
|
|
masterTuneFactor = std::pow (2.0f, masterTuneCents * (1.0f / 1200.0f));
|
|
|
|
for (int i = 0; i < synth.getNumVoices(); ++i)
|
|
if (auto* v = dynamic_cast<FluteVoice*> (synth.getVoice (i)))
|
|
v->setMasterTuneFactor (masterTuneFactor);
|
|
|
|
for (int i = 0; i < pmSynth.getNumVoices(); ++i)
|
|
if (auto* v = dynamic_cast<PmVoice*> (pmSynth.getVoice (i)))
|
|
v->setMasterTune (masterTuneFactor);
|
|
|
|
for (int i = 0; i < pm2Synth.getNumVoices(); ++i)
|
|
if (auto* v = dynamic_cast<Pm2Voice*> (pm2Synth.getVoice (i)))
|
|
v->setMasterTune (masterTuneFactor);
|
|
}
|
|
|
|
bool FluteSynthAudioProcessor::anyVoiceActive() const
|
|
{
|
|
for (int i = 0; i < synth.getNumVoices(); ++i)
|
|
if (auto* v = synth.getVoice (i))
|
|
if (v->isVoiceActive())
|
|
return true;
|
|
for (int i = 0; i < pmSynth.getNumVoices(); ++i)
|
|
if (auto* v = pmSynth.getVoice (i))
|
|
if (v->isVoiceActive())
|
|
return true;
|
|
for (int i = 0; i < pm2Synth.getNumVoices(); ++i)
|
|
if (auto* v = dynamic_cast<Pm2Voice*> (pm2Synth.getVoice (i)))
|
|
if (v->isActive())
|
|
return true;
|
|
return false;
|
|
}
|
|
|
|
void FluteSynthAudioProcessor::syncExtendedParamsFromAPVTS()
|
|
{
|
|
auto get = [this] (const char* id) { return apvts.getRawParameterValue (id)->load(); };
|
|
auto getB = [this] (const char* id) { return apvts.getRawParameterValue (id)->load() >= 0.5f; };
|
|
|
|
// Formants
|
|
auto setFormant = [this, get, getB] (int idx,
|
|
const char* enId,
|
|
const char* fId,
|
|
const char* qId,
|
|
const char* gId)
|
|
{
|
|
const bool enabled = getB (enId);
|
|
const float freq = PresetModel::clamp (get (fId), 300.0f, 12000.0f);
|
|
const float q = PresetModel::clamp (get (qId), 0.5f, 6.0f);
|
|
const float gDb = PresetModel::clamp (get (gId), -9.0f, 9.0f);
|
|
|
|
formant[idx].enabled = enabled;
|
|
formant[idx].f.setCutoffFrequency (freq);
|
|
formant[idx].f.setResonance (q);
|
|
formant[idx].gainLin = juce::Decibels::decibelsToGain (gDb);
|
|
};
|
|
|
|
setFormant (0, ParamIDs::formant1Enable, ParamIDs::formant1Freq, ParamIDs::formant1Q, ParamIDs::formant1GainDb);
|
|
setFormant (1, ParamIDs::formant2Enable, ParamIDs::formant2Freq, ParamIDs::formant2Q, ParamIDs::formant2GainDb);
|
|
|
|
// Amp envelope (sync to voices so GUI changes are audible)
|
|
const float a = get (ParamIDs::attack);
|
|
const float d = get (ParamIDs::decay);
|
|
const float s = get (ParamIDs::sustain);
|
|
const float r = get (ParamIDs::release);
|
|
baseRelease = r;
|
|
const float releaseExtScale = juce::jlimit (0.7f, 2.0f,
|
|
juce::jmap (r, 0.03f, 7.0f, 0.7f, 2.0f));
|
|
const float releaseExt = releaseExtension * releaseExtScale;
|
|
for (int i = 0; i < pmSynth.getNumVoices(); ++i)
|
|
if (auto* v = dynamic_cast<PmVoice*> (pmSynth.getVoice (i)))
|
|
v->setEnvParams (a, d, s, r);
|
|
for (int i = 0; i < pm2Synth.getNumVoices(); ++i)
|
|
if (auto* v = dynamic_cast<Pm2Voice*> (pm2Synth.getVoice (i)))
|
|
{
|
|
v->setEnvParams (a, d, s, r);
|
|
v->setReleaseExtension (releaseExt);
|
|
}
|
|
|
|
const int velocityChoice = (int) std::round (get (ParamIDs::velocityCurve));
|
|
float velocityScale = 1.0f;
|
|
velocityFixed = (velocityChoice == 3);
|
|
if (velocityChoice == 0)
|
|
velocityScale = 0.90f;
|
|
else if (velocityChoice == 2)
|
|
velocityScale = 1.15f;
|
|
velocityGamma = juce::jlimit (0.1f, 3.0f, velocityGammaBase * velocityScale);
|
|
|
|
// Soundboard
|
|
soundboardEnabled = getB (ParamIDs::soundboardEnable);
|
|
soundboardMix = juce::jlimit (0.0f, 1.0f, get (ParamIDs::soundboardMix));
|
|
soundboardT60Sec = juce::jlimit (1.2f, 2.2f, get (ParamIDs::soundboardT60));
|
|
soundboardDampParam = juce::jlimit (0.0f, 1.0f, get (ParamIDs::soundboardDamp));
|
|
if (std::abs (soundboardIrLastT60 - soundboardT60Sec) > 1.0e-4f
|
|
|| std::abs (soundboardIrLastDamp - soundboardDampParam) > 1.0e-4f)
|
|
soundboardIrDirty = true;
|
|
soundboardParams.roomSize = juce::jlimit (0.0f, 1.0f, soundboardT60Sec / 3.0f);
|
|
soundboardParams.damping = soundboardDampParam;
|
|
soundboardParams.width = 0.6f;
|
|
soundboardParams.wetLevel = 1.0f;
|
|
soundboardParams.dryLevel = 0.0f;
|
|
postRoomMix = juce::jlimit (0.0f, 1.0f, get (ParamIDs::postRoomMix));
|
|
postRoomEnabled = getB (ParamIDs::postRoomEnable);
|
|
|
|
// Felt / duplex (pm2)
|
|
pmFelt.preload = juce::jlimit (0.0f, 0.6f, get (ParamIDs::feltPreload));
|
|
pmFelt.stiffness = juce::jlimit (1.0f, 5.0f, get (ParamIDs::feltStiffness));
|
|
pmFelt.hysteresis = juce::jlimit (0.0f, 0.6f, get (ParamIDs::feltHysteresis));
|
|
pmFelt.maxAmp = juce::jlimit (0.4f, 4.0f, get (ParamIDs::feltMax));
|
|
|
|
duplexCfg.ratio = juce::jlimit (1.1f, 4.0f, get (ParamIDs::duplexRatio));
|
|
duplexCfg.gainDb = juce::jlimit (-20.0f, -6.0f, get (ParamIDs::duplexGainDb));
|
|
duplexCfg.decayMs = juce::jlimit (10.0f, 400.0f, get (ParamIDs::duplexDecayMs));
|
|
duplexCfg.sympSend = juce::jlimit (0.0f, 1.0f, get (ParamIDs::duplexSympSend));
|
|
duplexCfg.sympMix = juce::jlimit (0.0f, 1.0f, get (ParamIDs::duplexSympMix));
|
|
|
|
pm2GainDb = juce::jlimit (-24.0f, 42.0f, get (ParamIDs::pm2GainDb));
|
|
pm2GainLin = juce::Decibels::decibelsToGain (pm2GainDb);
|
|
pm2GainLinSmoothed.setTargetValue (pm2GainLin);
|
|
|
|
sympParams.roomSize = juce::jlimit (0.0f, 1.0f, duplexCfg.decayMs / 400.0f);
|
|
sympParams.damping = 0.4f;
|
|
sympParams.wetLevel = 1.0f;
|
|
sympParams.dryLevel = 0.0f;
|
|
|
|
for (int i = 0; i < pm2Synth.getNumVoices(); ++i)
|
|
if (auto* v = dynamic_cast<Pm2Voice*> (pm2Synth.getVoice (i)))
|
|
{
|
|
v->setFeltParams (pmFelt);
|
|
v->setDuplexParams (duplexCfg);
|
|
}
|
|
|
|
const int temperamentChoice = (int) std::round (get (ParamIDs::temperament));
|
|
if (temperamentChoice == 0)
|
|
noteOffsetsCents = presetNoteOffsetsCents;
|
|
else
|
|
noteOffsetsCents = expandPitchClassOffsets (getTemperamentOffsetsByChoice (temperamentChoice));
|
|
|
|
for (int i = 0; i < synth.getNumVoices(); ++i)
|
|
if (auto* v = dynamic_cast<FluteVoice*> (synth.getVoice (i)))
|
|
v->setNoteOffsets (noteOffsetsCents);
|
|
for (int i = 0; i < pmSynth.getNumVoices(); ++i)
|
|
if (auto* v = dynamic_cast<PmVoice*> (pmSynth.getVoice (i)))
|
|
v->setNoteOffsets (noteOffsetsCents);
|
|
for (int i = 0; i < pm2Synth.getNumVoices(); ++i)
|
|
if (auto* v = dynamic_cast<Pm2Voice*> (pm2Synth.getVoice (i)))
|
|
v->setNoteOffsets (noteOffsetsCents);
|
|
|
|
outputLpfEnabled = getB (ParamIDs::outputLpfEnable);
|
|
outputLpfCutoff = get (ParamIDs::outputLpfCutoff);
|
|
outputLpfQ = get (ParamIDs::outputLpfQ);
|
|
outputLpfCutoffSmoothed.setTargetValue (outputLpfCutoff);
|
|
outputLpfQSmoothed.setTargetValue (outputLpfQ);
|
|
updateOutputLpf();
|
|
}
|
|
void FluteSynthAudioProcessor::updatePostFiltersForNote (int midiNote)
|
|
{
|
|
if (midiNote > 0)
|
|
lastMidiNote = midiNote;
|
|
|
|
updatePostFiltersSmoothed();
|
|
}
|
|
|
|
void FluteSynthAudioProcessor::updatePostFiltersSmoothed()
|
|
{
|
|
if (lastSampleRate <= 0.0)
|
|
return;
|
|
|
|
const float note = (float) juce::jlimit (0, 127, lastMidiNote);
|
|
const float kt = juce::jlimit (0.0f, 1.0f, postKeytrack);
|
|
const float baseCutoff = postCutoffHzSmoothed.getCurrentValue();
|
|
const float baseQ = postQSmoothed.getCurrentValue();
|
|
float cutoff = baseCutoff * std::pow (2.0f, (note - 60.0f) * kt * (1.0f / 12.0f));
|
|
cutoff = juce::jlimit (300.0f, 12000.0f, cutoff);
|
|
|
|
auto setLP = [cutoff, q = baseQ] (juce::dsp::StateVariableTPTFilter<float>& f1,
|
|
juce::dsp::StateVariableTPTFilter<float>& f2)
|
|
{
|
|
f1.setCutoffFrequency (cutoff);
|
|
f2.setCutoffFrequency (cutoff);
|
|
f1.setResonance (q);
|
|
f2.setResonance (q);
|
|
};
|
|
setLP (postVaLp1, postVaLp2);
|
|
setLP (postPmLp1, postPmLp2);
|
|
setLP (postPm2Lp1, postPm2Lp2);
|
|
|
|
const float tilt = juce::jlimit (-6.0f, 6.0f, postTiltDbSmoothed.getCurrentValue());
|
|
const float pivot = 1000.0f;
|
|
const float gHigh = juce::Decibels::decibelsToGain (tilt);
|
|
const float gLow = juce::Decibels::decibelsToGain (-tilt);
|
|
tiltLow.coefficients = juce::dsp::IIR::Coefficients<float>::makeLowShelf (lastSampleRate, pivot, 0.7071f, gLow);
|
|
tiltHigh.coefficients = juce::dsp::IIR::Coefficients<float>::makeHighShelf (lastSampleRate, pivot, 0.7071f, gHigh);
|
|
tiltReady = (tiltLow.coefficients != nullptr && tiltHigh.coefficients != nullptr);
|
|
}
|
|
|
|
void FluteSynthAudioProcessor::updateOutputLpf()
|
|
{
|
|
const float cutoff = juce::jlimit (200.0f, 20000.0f, outputLpfCutoffSmoothed.getCurrentValue());
|
|
const float q = juce::jlimit (0.2f, 4.0f, outputLpfQSmoothed.getCurrentValue());
|
|
outputLpf.setCutoffFrequency (cutoff);
|
|
outputLpf.setResonance (q);
|
|
}
|
|
|
|
void FluteSynthAudioProcessor::updateOutputEq()
|
|
{
|
|
if (lastSampleRate <= 0.0)
|
|
return;
|
|
|
|
auto clamp = PresetModel::clamp;
|
|
for (size_t i = 0; i < outputEqCfg.bands.size(); ++i)
|
|
{
|
|
const auto& b = outputEqCfg.bands[i];
|
|
const float freq = clamp (b.freq, 40.0f, 16000.0f);
|
|
const float q = clamp (b.q, 0.3f, 6.0f);
|
|
const float gainDb = clamp (b.gainDb, -18.0f, 18.0f);
|
|
outputEqFilters[i].coefficients = juce::dsp::IIR::Coefficients<float>::makePeakFilter (
|
|
lastSampleRate, freq, q, juce::Decibels::decibelsToGain (gainDb));
|
|
}
|
|
}
|
|
|
|
void FluteSynthAudioProcessor::prepareBrightnessFilters()
|
|
{
|
|
const int numCh = juce::jmax (1, getTotalNumOutputChannels());
|
|
brightnessFilters.resize ((size_t) numCh);
|
|
juce::dsp::ProcessSpec spec;
|
|
spec.sampleRate = lastSampleRate;
|
|
spec.maximumBlockSize = mainSpec.maximumBlockSize;
|
|
spec.numChannels = (juce::uint32) numCh;
|
|
for (auto& f : brightnessFilters)
|
|
{
|
|
f.reset();
|
|
f.prepare (spec);
|
|
}
|
|
brightnessNumChannels = numCh;
|
|
updateBrightnessFilters (0.0f);
|
|
}
|
|
|
|
void FluteSynthAudioProcessor::updateBrightnessFilters (float targetDb)
|
|
{
|
|
if (lastSampleRate <= 0.0 || brightnessFilters.empty())
|
|
return;
|
|
|
|
const float limitedDb = juce::jlimit (-12.0f, brightnessMaxDb, targetDb);
|
|
auto coeff = juce::dsp::IIR::Coefficients<float>::makeHighShelf (
|
|
lastSampleRate,
|
|
juce::jlimit (800.0f, 12000.0f, brightnessCutoffHz),
|
|
juce::jlimit (0.2f, 4.0f, brightnessQ),
|
|
juce::Decibels::decibelsToGain (limitedDb));
|
|
|
|
for (auto& f : brightnessFilters)
|
|
f.coefficients = coeff;
|
|
brightnessCurrentDb = limitedDb;
|
|
}
|
|
|
|
void FluteSynthAudioProcessor::updateDamperCoeffs()
|
|
{
|
|
if (lastSampleRate <= 0.0) return;
|
|
float tauSamples = (float) (damperCfg.smoothMs * 0.001 * lastSampleRate);
|
|
damperSmoothCoeff = tauSamples > 1.0f ? (1.0f - std::exp (-1.0f / juce::jmax (1.0f, tauSamples))) : 1.0f;
|
|
damperSoftenSamples = (int) std::round (damperCfg.softenMs * 0.001 * lastSampleRate);
|
|
damperSoftenA = std::exp (-2.0f * juce::MathConstants<float>::pi * juce::jlimit (40.0f, 8000.0f, damperCfg.softenHz) / (float) juce::jmax (20.0, lastSampleRate));
|
|
for (int i = 0; i < pm2Synth.getNumVoices(); ++i)
|
|
if (auto* v = dynamic_cast<Pm2Voice*> (pm2Synth.getVoice (i)))
|
|
v->setDamperParams (damperCfg);
|
|
}
|
|
|
|
void FluteSynthAudioProcessor::updateMicProcessors()
|
|
{
|
|
if (! prepared || lastSampleRate <= 0.0)
|
|
return;
|
|
|
|
micSpec.sampleRate = lastSampleRate;
|
|
micSpec.maximumBlockSize = mainSpec.maximumBlockSize;
|
|
micSpec.numChannels = (juce::uint32) juce::jmax (1, getTotalNumOutputChannels());
|
|
micMaxDelaySamples = (int) std::round (lastSampleRate * 0.05); // up to ~50 ms
|
|
|
|
for (auto& st : micState)
|
|
{
|
|
for (int ch = 0; ch < 2; ++ch)
|
|
{
|
|
st.delay[ch].reset();
|
|
st.delay[ch].prepare (micSpec);
|
|
st.delay[ch].setMaximumDelayInSamples ((size_t) juce::jmax (8, micMaxDelaySamples));
|
|
st.delay[ch].setDelay (0.0f);
|
|
st.lowShelf[ch].reset();
|
|
st.lowShelf[ch].prepare (micSpec);
|
|
st.highShelf[ch].reset();
|
|
st.highShelf[ch].prepare (micSpec);
|
|
}
|
|
st.delaySamples = 0.0f;
|
|
st.gainLin = 1.0f;
|
|
}
|
|
micReady = true;
|
|
}
|
|
|
|
void FluteSynthAudioProcessor::applyMicMix (juce::AudioBuffer<float>& buffer)
|
|
{
|
|
if (! micReady || buffer.getNumChannels() == 0)
|
|
return;
|
|
|
|
const int numCh = buffer.getNumChannels();
|
|
const int n = buffer.getNumSamples();
|
|
const float sr = (float) lastSampleRate;
|
|
if (sr <= 0.0f)
|
|
return;
|
|
|
|
auto micConfigs = std::array<PresetModel::Mic,3>{ micCfg.close, micCfg.player, micCfg.room };
|
|
auto blend = micCfg.blend;
|
|
float bsum = blend[0] + blend[1] + blend[2];
|
|
if (bsum <= 1.0e-6f)
|
|
blend = { 1.0f, 0.0f, 0.0f };
|
|
else
|
|
for (float& b : blend) b /= bsum;
|
|
|
|
// Calculate makeup gain to compensate for mic gain attenuation
|
|
// Without this, blending mics with negative gain_db values causes overall level loss
|
|
float totalLinGain = 0.0f;
|
|
for (int micIdx = 0; micIdx < 3; ++micIdx)
|
|
{
|
|
const float w = blend[(size_t) micIdx];
|
|
if (w > 1.0e-4f)
|
|
totalLinGain += juce::Decibels::decibelsToGain (micConfigs[(size_t) micIdx].gainDb) * w;
|
|
}
|
|
const float micMakeupGain = (totalLinGain > 1.0e-4f) ? (1.0f / totalLinGain) : 1.0f;
|
|
|
|
// Fast path: close-only, no delay/EQ
|
|
const auto& closeCfg = micConfigs[0];
|
|
if (blend[0] > 0.999f && blend[1] < 1.0e-4f && blend[2] < 1.0e-4f
|
|
&& std::abs (closeCfg.delayMs) < 1.0e-4f
|
|
&& std::abs (closeCfg.lowShelfDb) < 1.0e-4f
|
|
&& std::abs (closeCfg.highShelfDb) < 1.0e-4f)
|
|
{
|
|
// No makeup needed for close-only (micMakeupGain already accounts for close gain_db)
|
|
buffer.applyGain (juce::Decibels::decibelsToGain (closeCfg.gainDb) * micMakeupGain);
|
|
return;
|
|
}
|
|
|
|
if (micScratch.getNumChannels() != numCh || micScratch.getNumSamples() != n)
|
|
micScratch.setSize (numCh, n, false, false, true);
|
|
micScratch.clear();
|
|
|
|
for (int micIdx = 0; micIdx < 3; ++micIdx)
|
|
{
|
|
const float w = blend[(size_t) micIdx];
|
|
if (w <= 1.0e-4f)
|
|
continue;
|
|
|
|
const auto& cfg = micConfigs[(size_t) micIdx];
|
|
auto& st = micState[(size_t) micIdx];
|
|
|
|
st.delaySamples = juce::jlimit (0.0f, (float) micMaxDelaySamples, cfg.delayMs * 0.001f * sr);
|
|
for (int ch = 0; ch < juce::jmin (numCh, 2); ++ch)
|
|
st.delay[ch].setDelay (st.delaySamples);
|
|
|
|
const float lsFreq = juce::jlimit (100.0f, 8000.0f, cfg.shelfFreq);
|
|
const float hsFreq = juce::jlimit (500.0f, 12000.0f, cfg.shelfFreq);
|
|
auto lsCoeff = juce::dsp::IIR::Coefficients<float>::makeLowShelf (lastSampleRate, lsFreq, 0.7071f,
|
|
juce::Decibels::decibelsToGain (cfg.lowShelfDb));
|
|
auto hsCoeff = juce::dsp::IIR::Coefficients<float>::makeHighShelf (lastSampleRate, hsFreq, 0.7071f,
|
|
juce::Decibels::decibelsToGain (cfg.highShelfDb));
|
|
for (int ch = 0; ch < juce::jmin (numCh, 2); ++ch)
|
|
{
|
|
st.lowShelf[ch].coefficients = lsCoeff;
|
|
st.highShelf[ch].coefficients = hsCoeff;
|
|
}
|
|
st.gainLin = juce::Decibels::decibelsToGain (cfg.gainDb) * w;
|
|
|
|
for (int ch = 0; ch < numCh; ++ch)
|
|
{
|
|
const int stateIdx = juce::jmin (ch, 1);
|
|
auto* dst = micScratch.getWritePointer (ch);
|
|
const auto* src = buffer.getReadPointer (ch);
|
|
for (int i = 0; i < n; ++i)
|
|
{
|
|
float s = st.delay[stateIdx].popSample (0);
|
|
st.delay[stateIdx].pushSample (0, src[i]);
|
|
if (st.lowShelf[stateIdx].coefficients != nullptr)
|
|
s = st.lowShelf[stateIdx].processSample (s);
|
|
if (st.highShelf[stateIdx].coefficients != nullptr)
|
|
s = st.highShelf[stateIdx].processSample (s);
|
|
dst[i] += s * st.gainLin;
|
|
}
|
|
}
|
|
}
|
|
|
|
buffer.makeCopyOf (micScratch, true);
|
|
|
|
// Apply makeup gain to compensate for mic blend attenuation
|
|
buffer.applyGain (micMakeupGain);
|
|
}
|
|
void FluteSynthAudioProcessor::processBlock (juce::AudioBuffer<float>& buffer, juce::MidiBuffer& midi)
|
|
{
|
|
juce::ScopedNoDenormals noDenormals;
|
|
|
|
// Guard against hosts calling before prepareToPlay.
|
|
if (! prepared || lastSampleRate <= 0.0)
|
|
{
|
|
buffer.clear();
|
|
return;
|
|
}
|
|
|
|
const int numCh = juce::jmax (1, buffer.getNumChannels());
|
|
const int numSamples = buffer.getNumSamples();
|
|
auto ensureScratch = [numSamples] (juce::AudioBuffer<float>& buf, int channels)
|
|
{
|
|
if (buf.getNumChannels() != channels || buf.getNumSamples() != numSamples)
|
|
buf.setSize (channels, numSamples, false, false, true);
|
|
buf.clear();
|
|
};
|
|
const int maxPredelaySamples = (int) std::ceil (lastSampleRate * 0.020);
|
|
const int minPredelayCapacity = maxPredelaySamples + numSamples + 2;
|
|
if (predelayCapacitySamples < minPredelayCapacity)
|
|
{
|
|
predelayCapacitySamples = minPredelayCapacity;
|
|
predelayBuf.assign ((size_t) predelayCapacitySamples, 0.0f);
|
|
predelayWrite = 0;
|
|
}
|
|
|
|
auto reverbParamsEqual = [] (const juce::Reverb::Parameters& a, const juce::Reverb::Parameters& b)
|
|
{
|
|
const float eps = 1.0e-4f;
|
|
return std::abs (a.roomSize - b.roomSize) < eps
|
|
&& std::abs (a.damping - b.damping) < eps
|
|
&& std::abs (a.wetLevel - b.wetLevel) < eps
|
|
&& std::abs (a.dryLevel - b.dryLevel) < eps
|
|
&& std::abs (a.width - b.width) < eps
|
|
&& std::abs (a.freezeMode - b.freezeMode) < eps;
|
|
};
|
|
auto resetPostState = [this]()
|
|
{
|
|
postVaLp1.reset(); postVaLp2.reset();
|
|
postPmLp1.reset(); postPmLp2.reset();
|
|
postPm2Lp1.reset(); postPm2Lp2.reset();
|
|
tiltLow.reset(); tiltHigh.reset();
|
|
outputLpf.reset();
|
|
for (auto& f : outputEqFilters)
|
|
f.reset();
|
|
breathBp.reset();
|
|
for (int i = 0; i < 2; ++i) formant[i].f.reset();
|
|
soundboardReverb.reset();
|
|
soundboardConvolution.reset();
|
|
soundboardConvolutionDs.reset();
|
|
soundboardIrDirty = true;
|
|
soundboardIrLastT60 = 0.0f;
|
|
soundboardIrLastDamp = 0.0f;
|
|
pedalReverb.reset();
|
|
sympReverb.reset();
|
|
postReverb.reset();
|
|
soundboardParamsValid = false;
|
|
pedalReverbParamsValid = false;
|
|
sympParamsValid = false;
|
|
postReverbParamsValid = false;
|
|
std::fill (predelayBuf.begin(), predelayBuf.end(), 0.0f);
|
|
tiltReady = (tiltLow.coefficients != nullptr && tiltHigh.coefficients != nullptr);
|
|
};
|
|
|
|
if (pendingStateReset && ! anyVoiceActive())
|
|
{
|
|
resetPostState();
|
|
pendingStateReset = false;
|
|
}
|
|
|
|
if (auto* vol = apvts.getRawParameterValue (ParamIDs::masterVolume))
|
|
masterVolumeLin = juce::jlimit (0.0f, 2.0f, vol->load());
|
|
// Minimum note duration: pitch-dependent, fixed milliseconds (tempo-independent).
|
|
// FIX: Drastically reduced minimum durations to allow very short staccato notes
|
|
// Previous values (240-960ms) forced notes to play much longer than intended
|
|
const double minLowMs = 0.0; // Was 240.0 - no forced minimum
|
|
const double minMidLowMs = 0.0; // Was 480.0 - no forced minimum
|
|
const double minMidHighMs = 0.0; // Was 720.0 - no forced minimum
|
|
const double minHighMs = 0.0; // Was 960.0 - no forced minimum
|
|
const int minNoteLow = (int) std::round (minLowMs * 0.001 * lastSampleRate);
|
|
const int minNoteMidLow = (int) std::round (minMidLowMs * 0.001 * lastSampleRate);
|
|
const int minNoteMidHigh = (int) std::round (minMidHighMs * 0.001 * lastSampleRate);
|
|
const int minNoteHigh = (int) std::round (minHighMs * 0.001 * lastSampleRate);
|
|
const int split1 = 36; // C2 and below
|
|
const int split2 = 48; // C3 and below
|
|
const int split3 = DebugToggles::kEnablePm2MinDurationC4Split ? 60 : 127; // C4 split optional
|
|
pm2Synth.setMinNoteDurationRanges (minNoteLow, minNoteMidLow, minNoteMidHigh, minNoteHigh,
|
|
split1, split2, split3);
|
|
|
|
// Apply any pending preset reset on the audio thread to avoid GUI/DSP races.
|
|
if (const int pendingPreset = pendingEmbeddedPresetIndex.exchange (-1, std::memory_order_acq_rel);
|
|
pendingPreset >= 0)
|
|
{
|
|
if (! embeddedPresetLoaded.load())
|
|
loadEmbeddedPresetModel();
|
|
|
|
const int numPresets = (int) embeddedPresets.size();
|
|
if (embeddedPresetLoaded.load() && numPresets > 0)
|
|
{
|
|
const int presetIdx = juce::jlimit (0, numPresets - 1, pendingPreset);
|
|
activeEmbeddedPresetIndex.store (presetIdx, std::memory_order_release);
|
|
|
|
// Stop current voices to avoid artifacts when parameters jump.
|
|
synth.allNotesOff (0, true);
|
|
pmSynth.allNotesOff (0, true);
|
|
pm2Synth.allNotesOff (0, true);
|
|
|
|
applyPresetToParameters (embeddedPresets[(size_t) presetIdx].model);
|
|
pendingStateReset = true;
|
|
if (! anyVoiceActive())
|
|
{
|
|
resetPostState();
|
|
pendingStateReset = false;
|
|
}
|
|
}
|
|
}
|
|
|
|
buffer.clear();
|
|
syncExtendedParamsFromAPVTS();
|
|
const float outputGainStart = outputGainLinSmoothed.getCurrentValue();
|
|
if (numSamples > 0)
|
|
{
|
|
pm2GainLinSmoothed.skip (numSamples);
|
|
outputGainLinSmoothed.skip (numSamples);
|
|
postCutoffHzSmoothed.skip (numSamples);
|
|
postQSmoothed.skip (numSamples);
|
|
postTiltDbSmoothed.skip (numSamples);
|
|
outputLpfCutoffSmoothed.skip (numSamples);
|
|
outputLpfQSmoothed.skip (numSamples);
|
|
}
|
|
const float outputGainEnd = outputGainLinSmoothed.getCurrentValue();
|
|
pm2GainLin = pm2GainLinSmoothed.getCurrentValue();
|
|
outputGainLin = outputGainEnd;
|
|
updatePostFiltersSmoothed();
|
|
updateOutputLpf();
|
|
|
|
// FIX #1 & #4: Clear shared buses once at block start (not per segment)
|
|
// This ensures all voices can read/write with consistent block-relative indices
|
|
couplingBus.begin (numSamples);
|
|
sympBus.begin (numSamples);
|
|
|
|
// detect note-on/off (for hammer trigger + filter keytracking + pedal state) and apply velocity curve shaping
|
|
struct MidiEvent { juce::MidiMessage msg; int pos { 0 }; };
|
|
std::vector<MidiEvent> events;
|
|
events.reserve ((size_t) midi.getNumEvents());
|
|
std::vector<int> splitPoints;
|
|
splitPoints.reserve ((size_t) midi.getNumEvents() + 2);
|
|
splitPoints.push_back (0);
|
|
splitPoints.push_back (numSamples);
|
|
|
|
int noteOnCount = 0;
|
|
int noteOffCount = 0;
|
|
int cc64Count = 0;
|
|
int otherCount = 0;
|
|
int firstEventPos = -1;
|
|
int lastEventPos = -1;
|
|
for (const auto meta : midi)
|
|
{
|
|
auto m = meta.getMessage();
|
|
if (m.isNoteOn())
|
|
{
|
|
// FIX: getVelocity() returns 0-127 (uint8), NOT 0.0-1.0!
|
|
// Must normalize by dividing by 127
|
|
float vel = juce::jlimit<float> (0.0f, 1.0f, (float) m.getVelocity() / 127.0f);
|
|
if (velocityFixed)
|
|
vel = 1.0f;
|
|
else
|
|
vel = std::pow (vel, juce::jmax (0.1f, velocityGamma));
|
|
vel = juce::jlimit (0.0f, 1.0f, vel);
|
|
m = juce::MidiMessage::noteOn (m.getChannel(), m.getNoteNumber(), vel);
|
|
++noteOnCount;
|
|
}
|
|
else if (m.isNoteOff())
|
|
{
|
|
++noteOffCount;
|
|
}
|
|
else if (m.isController())
|
|
{
|
|
if (m.getControllerNumber() == 64)
|
|
++cc64Count;
|
|
else
|
|
++otherCount;
|
|
}
|
|
else
|
|
{
|
|
++otherCount;
|
|
}
|
|
const int clampedPos = juce::jlimit (0, juce::jmax (0, numSamples - 1), meta.samplePosition);
|
|
if (firstEventPos < 0)
|
|
firstEventPos = clampedPos;
|
|
lastEventPos = clampedPos;
|
|
events.push_back (MidiEvent { m, clampedPos });
|
|
splitPoints.push_back (clampedPos);
|
|
}
|
|
|
|
std::stable_sort (events.begin(), events.end(),
|
|
[] (const MidiEvent& a, const MidiEvent& b) { return a.pos < b.pos; });
|
|
std::sort (splitPoints.begin(), splitPoints.end());
|
|
splitPoints.erase (std::unique (splitPoints.begin(), splitPoints.end()), splitPoints.end());
|
|
|
|
auto smoothstep = [] (float t)
|
|
{
|
|
t = juce::jlimit (0.0f, 1.0f, t);
|
|
return t * t * (3.0f - 2.0f * t);
|
|
};
|
|
|
|
const float sustainHysteresis = 0.08f;
|
|
const float sustainOnThresh = juce::jlimit (0.0f, 1.0f, pedalCfg.sustainThresh);
|
|
const float sustainOffThresh = juce::jmax (pedalCfg.halfThresh, sustainOnThresh - sustainHysteresis);
|
|
|
|
const bool usePM = (currentEngine == "pm") || (currentEngine == "hybrid");
|
|
const bool usePM2 = (currentEngine == "pm2") || (currentEngine == "hybrid");
|
|
const bool useVA = (currentEngine == "va") || (currentEngine == "hybrid");
|
|
if (currentEngine == "hybrid")
|
|
{
|
|
if (useVA) ensureScratch (hybridVaBuf, numCh);
|
|
if (usePM) ensureScratch (hybridPmBuf, numCh);
|
|
if (usePM2) ensureScratch (hybridPm2Buf, numCh);
|
|
}
|
|
|
|
auto applyPostLpfSegment = [] (juce::AudioBuffer<float>& buf,
|
|
juce::dsp::StateVariableTPTFilter<float>& f1,
|
|
juce::dsp::StateVariableTPTFilter<float>& f2,
|
|
int start, int num)
|
|
{
|
|
if (num <= 0)
|
|
return;
|
|
juce::dsp::AudioBlock<float> block (buf);
|
|
auto sub = block.getSubBlock ((size_t) start, (size_t) num);
|
|
juce::dsp::ProcessContextReplacing<float> ctx (sub);
|
|
f1.process (ctx);
|
|
f2.process (ctx);
|
|
};
|
|
|
|
auto clearSegment = [] (juce::AudioBuffer<float>& buf, int start, int num)
|
|
{
|
|
for (int ch = 0; ch < buf.getNumChannels(); ++ch)
|
|
buf.clear (ch, start, num);
|
|
};
|
|
auto applyGainSegment = [] (juce::AudioBuffer<float>& buf, float gain, int start, int num)
|
|
{
|
|
if (std::abs (gain - 1.0f) < 1.0e-6f || num <= 0)
|
|
return;
|
|
for (int ch = 0; ch < buf.getNumChannels(); ++ch)
|
|
buf.applyGain (ch, start, num, gain);
|
|
};
|
|
auto applyGainRampSegment = [] (juce::AudioBuffer<float>& buf, float startGain, float endGain, int start, int num)
|
|
{
|
|
if (num <= 0)
|
|
return;
|
|
if (std::abs (startGain - endGain) < 1.0e-6f)
|
|
{
|
|
if (std::abs (startGain - 1.0f) < 1.0e-6f)
|
|
return;
|
|
for (int ch = 0; ch < buf.getNumChannels(); ++ch)
|
|
buf.applyGain (ch, start, num, startGain);
|
|
return;
|
|
}
|
|
for (int ch = 0; ch < buf.getNumChannels(); ++ch)
|
|
buf.applyGainRamp (ch, start, num, startGain, endGain);
|
|
};
|
|
auto applySoftClipSegment = [] (juce::AudioBuffer<float>& buf, int start, int num, float drive)
|
|
{
|
|
if (num <= 0)
|
|
return;
|
|
const float k = juce::jlimit (0.5f, 3.0f, drive);
|
|
const float norm = 1.0f / std::tanh (k);
|
|
for (int ch = 0; ch < buf.getNumChannels(); ++ch)
|
|
{
|
|
auto* x = buf.getWritePointer (ch, start);
|
|
for (int i = 0; i < num; ++i)
|
|
x[i] = std::tanh (k * x[i]) * norm;
|
|
}
|
|
};
|
|
auto applyDeclickSegment = [&smoothstep] (juce::AudioBuffer<float>& buf, int start, int num, int declickSamples)
|
|
{
|
|
if (declickSamples <= 0 || num <= 0)
|
|
return;
|
|
const int rampLen = juce::jmin (num, declickSamples);
|
|
for (int ch = 0; ch < buf.getNumChannels(); ++ch)
|
|
{
|
|
auto* x = buf.getWritePointer (ch, start);
|
|
for (int i = 0; i < rampLen; ++i)
|
|
{
|
|
const float t = (float) (i + 1) / (float) rampLen;
|
|
const float g = smoothstep (t);
|
|
x[i] *= g;
|
|
}
|
|
}
|
|
};
|
|
auto applyDeclickOutSegment = [&smoothstep] (juce::AudioBuffer<float>& buf, int start, int num, int declickSamples)
|
|
{
|
|
if (declickSamples <= 0 || num <= 0)
|
|
return;
|
|
const int rampLen = juce::jmin (num, declickSamples);
|
|
for (int ch = 0; ch < buf.getNumChannels(); ++ch)
|
|
{
|
|
auto* x = buf.getWritePointer (ch, start);
|
|
for (int i = 0; i < rampLen; ++i)
|
|
{
|
|
const float t = (float) (i + 1) / (float) rampLen;
|
|
const float g = smoothstep (1.0f - t);
|
|
x[i] *= g;
|
|
}
|
|
}
|
|
};
|
|
|
|
auto countActivePm2Voices = [&]()
|
|
{
|
|
int active = 0;
|
|
for (int i = 0; i < pm2Synth.getNumVoices(); ++i)
|
|
if (auto* v = dynamic_cast<Pm2Voice*> (pm2Synth.getVoice (i)))
|
|
if (v->isActive())
|
|
++active;
|
|
return active;
|
|
};
|
|
|
|
std::size_t eventIdx = 0;
|
|
for (size_t s = 0; s + 1 < splitPoints.size(); ++s)
|
|
{
|
|
const int segStart = splitPoints[s];
|
|
const int segEnd = splitPoints[s + 1];
|
|
const int segLen = segEnd - segStart;
|
|
if (segLen <= 0)
|
|
continue;
|
|
|
|
// CPU optimisation: earlier economy mode activation under high polyphony
|
|
const int activeVoicesNow = countActivePm2Voices();
|
|
const bool highPolyMode = activeVoicesNow >= 4;
|
|
if (pm2EconomyMode != highPolyMode)
|
|
{
|
|
pm2EconomyMode = highPolyMode;
|
|
for (int i = 0; i < pm2Synth.getNumVoices(); ++i)
|
|
if (auto* v = dynamic_cast<Pm2Voice*> (pm2Synth.getVoice (i)))
|
|
{
|
|
v->setEconomyMode (pm2EconomyMode);
|
|
v->setHighPolyMode (pm2EconomyMode);
|
|
}
|
|
}
|
|
{
|
|
const int maxVoices = 12;
|
|
const float minScale = 0.6f;
|
|
float polyScale = 1.0f;
|
|
if (activeVoicesNow > 4)
|
|
{
|
|
const float t = juce::jlimit (0.0f, 1.0f, (float) (activeVoicesNow - 4) / (float) (maxVoices - 4));
|
|
polyScale = 1.0f + t * (minScale - 1.0f);
|
|
}
|
|
for (int i = 0; i < pm2Synth.getNumVoices(); ++i)
|
|
if (auto* v = dynamic_cast<Pm2Voice*> (pm2Synth.getVoice (i)))
|
|
v->setPolyphonyScale (polyScale);
|
|
}
|
|
|
|
bool sustainChanged = false;
|
|
bool sustainUpdatePending = false;
|
|
float keyOffVelAcc = 0.0f;
|
|
float releaseThumpAcc = 0.0f;
|
|
bool sawNoteOn = false;
|
|
int noteNumber = 0;
|
|
float noteOnVel = 0.0f;
|
|
juce::MidiBuffer midiSegment;
|
|
juce::MidiBuffer midiSegmentPm2;
|
|
|
|
while (eventIdx < events.size() && events[eventIdx].pos < segStart)
|
|
++eventIdx;
|
|
|
|
// FIX: Count note-ons at this position FIRST, then preallocate all voices at once
|
|
// This prevents JUCE's internal voice stealing from interfering with chords
|
|
// Also include repeated same-note note-ons so we allow overlap instead of hard retrigger.
|
|
{
|
|
int noteOnCount = 0;
|
|
if (usePM2)
|
|
{
|
|
size_t peekIdx = eventIdx;
|
|
while (peekIdx < events.size() && events[peekIdx].pos == segStart)
|
|
{
|
|
const auto& msg = events[peekIdx].msg;
|
|
if (msg.isNoteOn())
|
|
++noteOnCount;
|
|
++peekIdx;
|
|
}
|
|
}
|
|
if (noteOnCount > 0)
|
|
pm2Synth.preallocateVoicesForChord (noteOnCount);
|
|
}
|
|
|
|
while (eventIdx < events.size() && events[eventIdx].pos == segStart)
|
|
{
|
|
const int eventPos = events[eventIdx].pos;
|
|
auto m = events[eventIdx].msg;
|
|
const bool isAllowedController = m.isController()
|
|
&& (m.getControllerNumber() == 64 || m.getControllerNumber() == 67);
|
|
const bool allowToPass = m.isNoteOn() || m.isNoteOff() || isAllowedController;
|
|
if (m.isController())
|
|
{
|
|
if (m.getControllerNumber() == 64) // sustain
|
|
{
|
|
sustainValue = juce::jlimit (0.0f, 1.0f, m.getControllerValue() / 127.0f);
|
|
sustainUpdatePending = true;
|
|
}
|
|
else if (m.getControllerNumber() == 67) // soft / una corda
|
|
{
|
|
softPedalDown = m.getControllerValue() >= 32;
|
|
for (int i = 0; i < pm2Synth.getNumVoices(); ++i)
|
|
if (auto* v = dynamic_cast<Pm2Voice*> (pm2Synth.getVoice (i)))
|
|
v->setSoftPedal (softPedalDown, unaCfg);
|
|
}
|
|
}
|
|
if (m.isNoteOn())
|
|
{
|
|
sawNoteOn = true;
|
|
noteNumber = m.getNoteNumber();
|
|
// FIX: getVelocity() returns 0-127, must normalize by dividing by 127
|
|
noteOnVel = juce::jlimit (0.0f, 1.0f, (float) m.getVelocity() / 127.0f);
|
|
// Note: preallocateVoicesForChord was already called above for all note-ons
|
|
updatePostFiltersForNote (noteNumber);
|
|
}
|
|
else if (m.isNoteOff())
|
|
{
|
|
// FIX: getVelocity() returns 0-127, must normalize by dividing by 127
|
|
float relVel = juce::jlimit (0.0f, 1.0f, (float) m.getVelocity() / 127.0f);
|
|
if (keyOffEnabled)
|
|
{
|
|
float amt = keyOffLevel * (keyOffVelScale ? juce::jlimit (0.2f, 1.0f, relVel) : 1.0f);
|
|
keyOffVelAcc = juce::jlimit (0.0f, 4.0f, keyOffVelAcc + amt);
|
|
}
|
|
if (releaseThumpEnabled && ! sustainPedalDown)
|
|
{
|
|
float amt = releaseThumpLevel * (keyOffVelScale ? juce::jlimit (0.2f, 1.0f, relVel) : 1.0f);
|
|
releaseThumpAcc = juce::jlimit (0.0f, 4.0f, releaseThumpAcc + amt);
|
|
}
|
|
}
|
|
|
|
if (allowToPass)
|
|
{
|
|
if (usePM2)
|
|
midiSegmentPm2.addEvent (m, eventPos);
|
|
midiSegment.addEvent (m, eventPos);
|
|
}
|
|
++eventIdx;
|
|
}
|
|
|
|
if (sawNoteOn)
|
|
{
|
|
lastVelocityNorm = noteOnVel;
|
|
if (hammerEnabled)
|
|
{
|
|
hammerActive = true;
|
|
hammerEnv = hammerLevel;
|
|
}
|
|
}
|
|
|
|
if (keyOffVelAcc > 0.0f)
|
|
keyOffEnv = juce::jlimit (0.0f, 4.0f, keyOffEnv + keyOffVelAcc);
|
|
if (releaseThumpAcc > 0.0f)
|
|
releaseThumpEnv = juce::jlimit (0.0f, 4.0f, releaseThumpEnv + releaseThumpAcc);
|
|
|
|
float sustainValuePrev = sustainValueSmoothed.getCurrentValue();
|
|
sustainValueSmoothed.setTargetValue (sustainValue);
|
|
if (segLen > 0)
|
|
sustainValueSmoothed.skip (segLen);
|
|
float sustainValueSmooth = sustainValueSmoothed.getCurrentValue();
|
|
if (DebugToggles::kDisableSustainPedal)
|
|
{
|
|
sustainValue = 0.0f;
|
|
sustainValuePrev = 0.0f;
|
|
sustainValueSmooth = 0.0f;
|
|
sustainValueSmoothed.setTargetValue (0.0f);
|
|
sustainPedalDown = false;
|
|
}
|
|
{
|
|
const bool newDown = sustainPedalDown ? (sustainValueSmooth >= sustainOffThresh)
|
|
: (sustainValueSmooth >= sustainOnThresh);
|
|
if (newDown != sustainPedalDown)
|
|
{
|
|
sustainPedalDown = newDown;
|
|
sustainChanged = true;
|
|
const int activeVoices = countActivePm2Voices();
|
|
const int declickSamples = juce::jlimit (96, 384, 96 + activeVoices * 24);
|
|
pm2Synth.requestDeclickOut (declickSamples);
|
|
pm2Synth.requestDeclick (declickSamples);
|
|
if (pedalThumpEnabled)
|
|
{
|
|
const float changeAmt = std::abs (sustainValueSmooth - sustainValuePrev);
|
|
const float scale = juce::jlimit (0.2f, 1.0f, changeAmt > 0.0001f ? changeAmt : (newDown ? sustainValueSmooth : sustainValuePrev));
|
|
pedalThumpEnv = juce::jlimit (0.0f, 4.0f, pedalThumpEnv + pedalThumpLevel * scale);
|
|
}
|
|
}
|
|
else if (sustainUpdatePending && pedalThumpEnabled)
|
|
{
|
|
const float changeAmt = std::abs (sustainValueSmooth - sustainValuePrev);
|
|
const float scale = juce::jlimit (0.1f, 0.6f, changeAmt);
|
|
if (scale > 1.0e-4f)
|
|
pedalThumpEnv = juce::jlimit (0.0f, 4.0f, pedalThumpEnv + pedalThumpLevel * scale);
|
|
}
|
|
}
|
|
|
|
float damperLiftBlock = damperLift;
|
|
if (sustainPedalDown) damperLiftBlock = 1.0f;
|
|
else if (sustainValueSmooth <= pedalCfg.halfThresh) damperLiftBlock = 0.0f;
|
|
else
|
|
{
|
|
const float span = juce::jmax (0.001f, pedalCfg.sustainThresh - pedalCfg.halfThresh);
|
|
const float t = (sustainValueSmooth - pedalCfg.halfThresh) / span;
|
|
damperLiftBlock = smoothstep (t);
|
|
}
|
|
|
|
const float sustainReleaseScale = juce::jlimit (1.0f, 4.0f, pedalCfg.sustainReleaseScale);
|
|
const float releaseScaleTarget = sustainPedalDown ? sustainReleaseScale
|
|
: ((sustainValueSmooth >= pedalCfg.halfThresh) ? halfReleaseScale : 1.0f);
|
|
sustainReleaseScaleSmoothed.setTargetValue (releaseScaleTarget);
|
|
if (segLen > 0)
|
|
sustainReleaseScaleSmoothed.skip (segLen);
|
|
const float releaseScale = sustainReleaseScaleSmoothed.getCurrentValue();
|
|
for (int i = 0; i < pmSynth.getNumVoices(); ++i)
|
|
if (auto* v = dynamic_cast<PmVoice*> (pmSynth.getVoice (i)))
|
|
v->setReleaseScale (baseRelease, releaseScale);
|
|
for (int i = 0; i < pm2Synth.getNumVoices(); ++i)
|
|
if (auto* v = dynamic_cast<Pm2Voice*> (pm2Synth.getVoice (i)))
|
|
v->setReleaseScale (baseRelease, releaseScale);
|
|
if (sustainChanged)
|
|
{
|
|
for (int i = 0; i < pm2Synth.getNumVoices(); ++i)
|
|
if (auto* v = dynamic_cast<Pm2Voice*> (pm2Synth.getVoice (i)))
|
|
v->setSustainPedalDown (sustainPedalDown);
|
|
}
|
|
damperLift = damperLiftBlock;
|
|
for (int i = 0; i < pm2Synth.getNumVoices(); ++i)
|
|
if (auto* v = dynamic_cast<Pm2Voice*> (pm2Synth.getVoice (i)))
|
|
v->setDamperLift (damperLiftBlock);
|
|
|
|
if (currentEngine == "pm")
|
|
{
|
|
pmSynth.renderNextBlock (buffer, midiSegment, segStart, segLen);
|
|
if (DebugToggles::kEnableGlobalFilters)
|
|
applyPostLpfSegment (buffer, postPmLp1, postPmLp2, segStart, segLen);
|
|
}
|
|
else if (currentEngine == "pm2")
|
|
{
|
|
// FIX #4: Buses are now cleared once at block start, not per segment
|
|
pm2Synth.renderNextBlock (buffer, midiSegmentPm2, segStart, segLen);
|
|
applyDeclickOutSegment (buffer, segStart, segLen, pm2Synth.consumeDeclickOutSamples());
|
|
applyDeclickSegment (buffer, segStart, segLen, pm2Synth.consumeDeclickSamples());
|
|
applyGainSegment (buffer, pm2GainLin, segStart, segLen);
|
|
// FIX #2: Smoothed polyphony compensation to prevent gain jumps
|
|
{
|
|
int activeVoices = 0;
|
|
for (int i = 0; i < pm2Synth.getNumVoices(); ++i)
|
|
if (auto* v = dynamic_cast<Pm2Voice*> (pm2Synth.getVoice (i)))
|
|
if (v->isActive())
|
|
++activeVoices;
|
|
// Gentle linear compensation: 100% at 1 voice, 90% at 4, 80% at 7, 70% floor at 10+
|
|
const float rawComp = (activeVoices > 1) ? (1.0f - ((float) (activeVoices - 1)) / 140.0f) : 1.0f;
|
|
polyCompTarget = juce::jmax (0.95f, rawComp);
|
|
// Smooth towards target over ~5-10ms to avoid clicks (closed-form per segment)
|
|
if (segLen > 0)
|
|
{
|
|
const float decay = std::pow (1.0f - polyCompSmoothCoeff, (float) segLen);
|
|
polyCompSmoothed = polyCompTarget + (polyCompSmoothed - polyCompTarget) * decay;
|
|
}
|
|
if (std::abs (polyCompSmoothed - 1.0f) > 1.0e-4f)
|
|
applyGainSegment (buffer, polyCompSmoothed, segStart, segLen);
|
|
}
|
|
{
|
|
float targetGain = 1.0f;
|
|
if (sustainPedalDown && pedalCfg.sustainGainDb > 0.01f)
|
|
{
|
|
const float sustainGainLin = juce::Decibels::decibelsToGain (pedalCfg.sustainGainDb);
|
|
targetGain = mixLinear (1.0f, sustainGainLin, sustainValueSmooth);
|
|
}
|
|
sustainGainLinSmoothed.setTargetValue (targetGain);
|
|
const float startGain = sustainGainLinSmoothed.getCurrentValue();
|
|
sustainGainLinSmoothed.skip (segLen);
|
|
const float endGain = sustainGainLinSmoothed.getCurrentValue();
|
|
if (std::abs (startGain - 1.0f) > 1.0e-4f || std::abs (endGain - 1.0f) > 1.0e-4f)
|
|
applyGainRampSegment (buffer, startGain, endGain, segStart, segLen);
|
|
}
|
|
if (pm2GainDb > 0.01f)
|
|
applySoftClipSegment (buffer, segStart, segLen, 1.6f);
|
|
if (DebugToggles::kEnableGlobalFilters)
|
|
applyPostLpfSegment (buffer, postPm2Lp1, postPm2Lp2, segStart, segLen);
|
|
}
|
|
else if (currentEngine == "va")
|
|
{
|
|
synth.renderNextBlock (buffer, midiSegment, segStart, segLen);
|
|
if (DebugToggles::kEnableGlobalFilters)
|
|
applyPostLpfSegment (buffer, postVaLp1, postVaLp2, segStart, segLen);
|
|
}
|
|
else // hybrid
|
|
{
|
|
if (useVA)
|
|
{
|
|
clearSegment (hybridVaBuf, segStart, segLen);
|
|
synth.renderNextBlock (hybridVaBuf, midiSegment, segStart, segLen);
|
|
if (DebugToggles::kEnableGlobalFilters)
|
|
applyPostLpfSegment (hybridVaBuf, postVaLp1, postVaLp2, segStart, segLen);
|
|
}
|
|
|
|
if (usePM)
|
|
{
|
|
clearSegment (hybridPmBuf, segStart, segLen);
|
|
pmSynth.renderNextBlock (hybridPmBuf, midiSegment, segStart, segLen);
|
|
if (DebugToggles::kEnableGlobalFilters)
|
|
applyPostLpfSegment (hybridPmBuf, postPmLp1, postPmLp2, segStart, segLen);
|
|
}
|
|
|
|
if (usePM2)
|
|
{
|
|
clearSegment (hybridPm2Buf, segStart, segLen);
|
|
// FIX #4: Buses are now cleared once at block start, not per segment
|
|
pm2Synth.renderNextBlock (hybridPm2Buf, midiSegmentPm2, segStart, segLen);
|
|
applyDeclickOutSegment (hybridPm2Buf, segStart, segLen, pm2Synth.consumeDeclickOutSamples());
|
|
applyDeclickSegment (hybridPm2Buf, segStart, segLen, pm2Synth.consumeDeclickSamples());
|
|
applyGainSegment (hybridPm2Buf, pm2GainLin, segStart, segLen);
|
|
// FIX #2: Smoothed polyphony compensation to prevent gain jumps
|
|
{
|
|
int activeVoices = 0;
|
|
for (int i = 0; i < pm2Synth.getNumVoices(); ++i)
|
|
if (auto* v = dynamic_cast<Pm2Voice*> (pm2Synth.getVoice (i)))
|
|
if (v->isActive())
|
|
++activeVoices;
|
|
// Gentle linear compensation: 100% at 1 voice, 90% at 4, 80% at 7, 70% floor at 10+
|
|
const float rawComp = (activeVoices > 1) ? (1.0f - ((float) (activeVoices - 1)) / 140.0f) : 1.0f;
|
|
polyCompTarget = juce::jmax (0.95f, rawComp);
|
|
if (segLen > 0)
|
|
{
|
|
const float decay = std::pow (1.0f - polyCompSmoothCoeff, (float) segLen);
|
|
polyCompSmoothed = polyCompTarget + (polyCompSmoothed - polyCompTarget) * decay;
|
|
}
|
|
if (std::abs (polyCompSmoothed - 1.0f) > 1.0e-4f)
|
|
applyGainSegment (hybridPm2Buf, polyCompSmoothed, segStart, segLen);
|
|
}
|
|
{
|
|
float targetGain = 1.0f;
|
|
if (sustainPedalDown && pedalCfg.sustainGainDb > 0.01f)
|
|
{
|
|
const float sustainGainLin = juce::Decibels::decibelsToGain (pedalCfg.sustainGainDb);
|
|
targetGain = mixLinear (1.0f, sustainGainLin, sustainValueSmooth);
|
|
}
|
|
sustainGainLinSmoothed.setTargetValue (targetGain);
|
|
const float startGain = sustainGainLinSmoothed.getCurrentValue();
|
|
sustainGainLinSmoothed.skip (segLen);
|
|
const float endGain = sustainGainLinSmoothed.getCurrentValue();
|
|
if (std::abs (startGain - 1.0f) > 1.0e-4f || std::abs (endGain - 1.0f) > 1.0e-4f)
|
|
applyGainRampSegment (hybridPm2Buf, startGain, endGain, segStart, segLen);
|
|
}
|
|
if (pm2GainDb > 0.01f)
|
|
applySoftClipSegment (hybridPm2Buf, segStart, segLen, 1.6f);
|
|
if (DebugToggles::kEnableGlobalFilters)
|
|
applyPostLpfSegment (hybridPm2Buf, postPm2Lp1, postPm2Lp2, segStart, segLen);
|
|
}
|
|
|
|
float wVa = juce::jlimit (0.0f, 1.0f, vaMix);
|
|
float wPm = juce::jlimit (0.0f, 1.0f, pmMix);
|
|
float wPm2 = juce::jlimit (0.0f, 1.0f, pm2Mix);
|
|
float sum = juce::jmax (0.0001f, wVa + wPm + wPm2);
|
|
wVa /= sum; wPm /= sum; wPm2 /= sum;
|
|
|
|
for (int chOut = 0; chOut < buffer.getNumChannels(); ++chOut)
|
|
{
|
|
auto* dst = buffer.getWritePointer (chOut, segStart);
|
|
auto* vaP = hybridVaBuf.getReadPointer (juce::jmin (chOut, hybridVaBuf.getNumChannels() - 1), segStart);
|
|
auto* pmP = hybridPmBuf.getReadPointer (juce::jmin (chOut, hybridPmBuf.getNumChannels() - 1), segStart);
|
|
auto* pm2P = hybridPm2Buf.getReadPointer (juce::jmin (chOut, hybridPm2Buf.getNumChannels() - 1), segStart);
|
|
for (int i = 0; i < segLen; ++i)
|
|
dst[i] += vaP[i] * wVa + pmP[i] * wPm + pm2P[i] * wPm2;
|
|
}
|
|
}
|
|
}
|
|
|
|
#if JUCE_DEBUG
|
|
static int dbgBlockCounter = 0;
|
|
++dbgBlockCounter;
|
|
const int eventsCount = (int) events.size();
|
|
const int segmentsCount = (int) splitPoints.size() - 1;
|
|
const bool eventDrift = (eventIdx != events.size());
|
|
const float peak = buffer.getMagnitude (0, buffer.getNumSamples());
|
|
if ((dbgBlockCounter % 200) == 0 || eventsCount > 0 || peak > 1.0e-4f || eventDrift)
|
|
{
|
|
int activePm2 = 0;
|
|
for (int i = 0; i < pm2Synth.getNumVoices(); ++i)
|
|
if (auto* v = dynamic_cast<Pm2Voice*> (pm2Synth.getVoice (i)))
|
|
if (v->isActive())
|
|
++activePm2;
|
|
|
|
DBG ("[MusPianoVST] blk=" << dbgBlockCounter
|
|
<< " events=" << eventsCount
|
|
<< " noteOn=" << noteOnCount
|
|
<< " noteOff=" << noteOffCount
|
|
<< " cc64=" << cc64Count
|
|
<< " other=" << otherCount
|
|
<< " segments=" << segmentsCount
|
|
<< " pm2Active=" << activePm2
|
|
<< " peak=" << peak
|
|
<< " firstPos=" << firstEventPos
|
|
<< " lastPos=" << lastEventPos
|
|
<< " eventDrift=" << (eventDrift ? "YES" : "no"));
|
|
}
|
|
#endif
|
|
|
|
// Optional shaper applied to VA-only or hybrid paths
|
|
if (currentEngine == "va")
|
|
{
|
|
if (shaperEnabled && shaperDrive > 0.001f)
|
|
{
|
|
const float k = juce::jlimit (0.01f, 3.0f, shaperDrive * 3.0f);
|
|
for (int ch = 0; ch < buffer.getNumChannels(); ++ch)
|
|
{
|
|
auto* x = buffer.getWritePointer (ch);
|
|
for (int i = 0; i < buffer.getNumSamples(); ++i)
|
|
x[i] = std::tanh (k * x[i]);
|
|
}
|
|
}
|
|
}
|
|
else if (currentEngine == "hybrid")
|
|
{
|
|
if (shaperEnabled && shaperDrive > 0.001f)
|
|
{
|
|
const float k = juce::jlimit (0.01f, 3.0f, shaperDrive * 3.0f);
|
|
for (int ch = 0; ch < buffer.getNumChannels(); ++ch)
|
|
{
|
|
auto* x = buffer.getWritePointer (ch);
|
|
for (int i = 0; i < buffer.getNumSamples(); ++i)
|
|
x[i] = std::tanh (k * x[i]);
|
|
}
|
|
}
|
|
}
|
|
|
|
#if 0
|
|
// --- Hammer transient (additive burst) ---
|
|
if (hammerActive && hammerEnv > 1e-6f)
|
|
{
|
|
const int n = buffer.getNumSamples();
|
|
const int chs = buffer.getNumChannels();
|
|
|
|
for (int i = 0; i < n; ++i)
|
|
{
|
|
float w = fastRand01(hammerRng) * 2.0f - 1.0f; // white ~[-1,1]
|
|
w *= hammerNoise;
|
|
|
|
float shaped = DebugToggles::kEnableHammerFilter ? hammerHP.processSample (0, w) : w;
|
|
float s = shaped * hammerEnv;
|
|
|
|
for (int ch = 0; ch < chs; ++ch)
|
|
{
|
|
auto* dst = buffer.getWritePointer (ch);
|
|
dst[i] += s;
|
|
}
|
|
|
|
hammerEnv *= hammerDecayCoeff;
|
|
if (hammerEnv < 1e-6f)
|
|
{
|
|
hammerEnv = 0.0f;
|
|
hammerActive = false;
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
#endif
|
|
|
|
// --- Key-off noise burst ---
|
|
if (keyOffEnabled && keyOffEnv > 1e-6f)
|
|
{
|
|
const int n = buffer.getNumSamples();
|
|
const int chs = buffer.getNumChannels();
|
|
for (int i = 0; i < n; ++i)
|
|
{
|
|
float w = fastRand01 (hammerRng) * 2.0f - 1.0f;
|
|
float shaped = DebugToggles::kEnableKeyOffFilter ? keyOffHP.processSample (0, w) : w;
|
|
float s = shaped * keyOffEnv;
|
|
for (int ch = 0; ch < chs; ++ch)
|
|
buffer.getWritePointer (ch)[i] += s;
|
|
|
|
keyOffEnv *= keyOffDecayCoeff;
|
|
if (keyOffEnv < 1e-6f) { keyOffEnv = 0.0f; break; }
|
|
}
|
|
}
|
|
|
|
// --- Release thump (dampers hitting strings) ---
|
|
if (releaseThumpEnabled && releaseThumpEnv > 1e-6f)
|
|
{
|
|
const int n = buffer.getNumSamples();
|
|
const int chs = buffer.getNumChannels();
|
|
for (int i = 0; i < n; ++i)
|
|
{
|
|
float w = fastRand01 (hammerRng) * 2.0f - 1.0f;
|
|
float low = DebugToggles::kEnableReleaseThumpFilter ? releaseThumpLP.processSample (0, w) : w;
|
|
float thud = DebugToggles::kEnableReleaseThudFilter ? releaseThudHP.processSample (0, low) : low;
|
|
float s = (low * (1.0f - releaseThudMix) + thud * releaseThudMix) * releaseThumpEnv;
|
|
for (int ch = 0; ch < chs; ++ch)
|
|
buffer.getWritePointer (ch)[i] += s;
|
|
|
|
releaseThumpEnv *= releaseThumpDecayCoeff;
|
|
if (releaseThumpEnv < 1e-6f) { releaseThumpEnv = 0.0f; break; }
|
|
}
|
|
}
|
|
|
|
// --- Pedal thump (CC64 transitions) ---
|
|
if (pedalThumpEnabled && pedalThumpEnv > 1e-6f)
|
|
{
|
|
const int n = buffer.getNumSamples();
|
|
const int chs = buffer.getNumChannels();
|
|
for (int i = 0; i < n; ++i)
|
|
{
|
|
float w = fastRand01 (hammerRng) * 2.0f - 1.0f;
|
|
float shaped = DebugToggles::kEnablePedalThumpFilter ? pedalThumpLP.processSample (0, w) : w;
|
|
float s = shaped * pedalThumpEnv;
|
|
for (int ch = 0; ch < chs; ++ch)
|
|
buffer.getWritePointer (ch)[i] += s;
|
|
|
|
pedalThumpEnv *= pedalThumpDecayCoeff;
|
|
if (pedalThumpEnv < 1e-6f) { pedalThumpEnv = 0.0f; break; }
|
|
}
|
|
}
|
|
|
|
// --- Breath noise (post) ---
|
|
if (breathEnabled && breathGainLin > 0.0f)
|
|
{
|
|
ensureScratch (breathScratch, 1);
|
|
float* t = breathScratch.getWritePointer (0);
|
|
static uint32_t rng = 0xBEEFBEEF;
|
|
for (int i = 0; i < buffer.getNumSamples(); ++i)
|
|
{
|
|
rng = 1664525u * rng + 1013904223u;
|
|
t[i] = ((rng >> 8) * (1.0f / 16777216.0f)) * 2.0f - 1.0f;
|
|
}
|
|
|
|
if (DebugToggles::kEnableBreathFilter)
|
|
{
|
|
juce::dsp::AudioBlock<float> b (breathScratch);
|
|
juce::dsp::ProcessContextReplacing<float> ctx (b);
|
|
breathBp.process (ctx);
|
|
}
|
|
|
|
for (int ch = 0; ch < buffer.getNumChannels(); ++ch)
|
|
{
|
|
auto* dst = buffer.getWritePointer (ch);
|
|
for (int i = 0; i < buffer.getNumSamples(); ++i)
|
|
dst[i] += breathGainLin * t[i];
|
|
}
|
|
}
|
|
|
|
// --- Formants (post) --- PARALLEL RESONANCE PEAKS
|
|
// Instead of replacing the signal with bandpass-filtered content,
|
|
// we add resonance peaks in parallel to preserve the full spectrum.
|
|
if (DebugToggles::kEnableFormant)
|
|
{
|
|
for (int k = 0; k < 2; ++k)
|
|
{
|
|
if (! formant[k].enabled) continue;
|
|
|
|
// Create a copy for wet/resonance signal
|
|
formantScratch.makeCopyOf (buffer, true);
|
|
|
|
juce::dsp::AudioBlock<float> block (formantScratch);
|
|
juce::dsp::ProcessContextReplacing<float> ctx (block);
|
|
formant[k].f.process (ctx);
|
|
|
|
// Mix formant resonance back in additively. 0 dB means neutral.
|
|
const float peakGain = formant[k].gainLin - 1.0f;
|
|
if (std::abs (peakGain) < 1.0e-4f)
|
|
continue;
|
|
for (int ch = 0; ch < buffer.getNumChannels(); ++ch)
|
|
{
|
|
auto* dry = buffer.getWritePointer (ch);
|
|
const auto* w = formantScratch.getReadPointer (ch);
|
|
for (int i = 0; i < buffer.getNumSamples(); ++i)
|
|
dry[i] += w[i] * peakGain;
|
|
}
|
|
}
|
|
}
|
|
|
|
// --- Post tone controls: tilt only (LPFs were applied per-engine pre-sum) ---
|
|
if (DebugToggles::kEnableTilt)
|
|
{
|
|
// If host channel count changed after prepareToPlay, re-prepare tilt filters.
|
|
if (buffer.getNumChannels() > 0 && buffer.getNumChannels() != tiltNumChannels)
|
|
{
|
|
juce::dsp::ProcessSpec spec = mainSpec;
|
|
spec.numChannels = (juce::uint32) juce::jmax (1, buffer.getNumChannels());
|
|
tiltLow.reset(); tiltHigh.reset();
|
|
tiltLow.prepare (spec); tiltHigh.prepare (spec);
|
|
tiltNumChannels = buffer.getNumChannels();
|
|
tiltReady = false;
|
|
updatePostFiltersForNote (lastMidiNote);
|
|
auto prepSendHpfDynamic = [this, &spec] (decltype (pedalSendHpf)& f)
|
|
{
|
|
f.reset();
|
|
f.prepare (spec);
|
|
auto coeffs = juce::dsp::IIR::Coefficients<float>::makeHighPass (spec.sampleRate,
|
|
sendHpfCutoff,
|
|
0.707f);
|
|
if (f.state == nullptr)
|
|
f.state = coeffs;
|
|
else
|
|
*f.state = *coeffs;
|
|
};
|
|
prepSendHpfDynamic (pedalSendHpf);
|
|
prepSendHpfDynamic (sympSendHpf);
|
|
prepSendHpfDynamic (soundboardSendHpf);
|
|
{
|
|
juce::dsp::ProcessSpec monoSpec = spec;
|
|
monoSpec.numChannels = 1;
|
|
modalSendHpf.reset();
|
|
modalSendHpf.prepare (monoSpec);
|
|
modalSendHpf.coefficients = juce::dsp::IIR::Coefficients<float>::makeHighPass (monoSpec.sampleRate,
|
|
sendHpfCutoff,
|
|
0.707f);
|
|
}
|
|
sendHpfNumChannels = buffer.getNumChannels();
|
|
prepareBrightnessFilters();
|
|
}
|
|
|
|
juce::dsp::AudioBlock<float> block (buffer);
|
|
juce::dsp::ProcessContextReplacing<float> ctx (block);
|
|
if (std::abs (postTiltDbSmoothed.getCurrentValue()) > 1.0e-4f)
|
|
{
|
|
if (tiltLow.coefficients == nullptr || tiltHigh.coefficients == nullptr)
|
|
updatePostFiltersForNote (lastMidiNote);
|
|
|
|
if (tiltLow.coefficients != nullptr && tiltHigh.coefficients != nullptr && lastSampleRate > 0.0 && tiltReady)
|
|
{
|
|
tiltLow.process (ctx);
|
|
tiltHigh.process (ctx);
|
|
}
|
|
}
|
|
}
|
|
|
|
// --- Velocity-driven brightness shelf ---
|
|
if (DebugToggles::kEnableBrightness && brightnessEnabled && ! brightnessFilters.empty())
|
|
{
|
|
if (buffer.getNumChannels() != brightnessNumChannels)
|
|
prepareBrightnessFilters();
|
|
|
|
const float noteTerm = brightnessNoteSlopeDb * ((float) lastMidiNote - 60.0f) * (1.0f / 24.0f);
|
|
float targetDb = brightnessBaseDb + lastVelocityNorm * brightnessVelSlopeDb + noteTerm;
|
|
targetDb = juce::jlimit (-12.0f, brightnessMaxDb, targetDb);
|
|
brightnessDbSmoothed.setTargetValue (targetDb);
|
|
if (buffer.getNumSamples() > 0)
|
|
brightnessDbSmoothed.skip (buffer.getNumSamples());
|
|
const float currentDb = brightnessDbSmoothed.getCurrentValue();
|
|
if (std::abs (currentDb - brightnessCurrentDb) > 1.0e-4f)
|
|
updateBrightnessFilters (currentDb);
|
|
|
|
const int chs = buffer.getNumChannels();
|
|
const int n = buffer.getNumSamples();
|
|
for (int chIdx = 0; chIdx < chs; ++chIdx)
|
|
{
|
|
auto* x = buffer.getWritePointer (chIdx);
|
|
auto& f = brightnessFilters[(size_t) juce::jmin (chIdx, (int) brightnessFilters.size() - 1)];
|
|
for (int i = 0; i < n; ++i)
|
|
{
|
|
if (f.coefficients != nullptr)
|
|
x[i] = f.processSample (x[i]);
|
|
}
|
|
}
|
|
}
|
|
|
|
// CPU optimisation: Lowered threshold from 14 to 8 for earlier effect bypass
|
|
const bool highPolyProcessing = countActivePm2Voices() >= 6;
|
|
|
|
// --- Pedal resonance send/return (subtle body) ---
|
|
if (DebugToggles::kEnablePm2PedalResonance && DebugToggles::kEnableReverb
|
|
&& ! highPolyProcessing && pedalCfg.resonanceMix > 0.0001f && sustainValue >= pedalCfg.halfThresh)
|
|
{
|
|
const float send = pedalCfg.resonanceSend * sustainValue;
|
|
const float mix = pedalCfg.resonanceMix * sustainValue;
|
|
if (send > 0.0001f && mix > 0.0001f)
|
|
{
|
|
pedalScratch.makeCopyOf (buffer, true);
|
|
for (int ch = 0; ch < pedalScratch.getNumChannels(); ++ch)
|
|
{
|
|
auto* x = pedalScratch.getWritePointer (ch);
|
|
for (int i = 0; i < pedalScratch.getNumSamples(); ++i)
|
|
x[i] *= send;
|
|
}
|
|
|
|
{
|
|
juce::dsp::AudioBlock<float> wetBlock (pedalScratch);
|
|
juce::dsp::ProcessContextReplacing<float> wetCtx (wetBlock);
|
|
pedalSendHpf.process (wetCtx);
|
|
}
|
|
|
|
auto p = pedalReverbParams;
|
|
p.wetLevel = 1.0f;
|
|
p.dryLevel = 0.0f;
|
|
p.roomSize = juce::jlimit (0.0f, 0.4f, p.roomSize); // Limit room size for less wash
|
|
if (! pedalReverbParamsValid || ! reverbParamsEqual (p, pedalReverbParamsApplied))
|
|
{
|
|
pedalReverb.setParameters (p);
|
|
pedalReverbParamsApplied = p;
|
|
pedalReverbParamsValid = true;
|
|
}
|
|
|
|
if (pedalScratch.getNumChannels() >= 2)
|
|
pedalReverb.processStereo (pedalScratch.getWritePointer (0), pedalScratch.getWritePointer (1), pedalScratch.getNumSamples());
|
|
else
|
|
pedalReverb.processMono (pedalScratch.getWritePointer (0), pedalScratch.getNumSamples());
|
|
|
|
for (int ch = 0; ch < buffer.getNumChannels(); ++ch)
|
|
{
|
|
auto* dry = buffer.getWritePointer (ch);
|
|
auto* ww = pedalScratch.getWritePointer (ch);
|
|
for (int i = 0; i < buffer.getNumSamples(); ++i)
|
|
dry[i] = dry[i] * (1.0f - mix) + ww[i] * mix;
|
|
}
|
|
}
|
|
}
|
|
|
|
// --- Sympathetic send/return (undamped strings/pedal) ---
|
|
// Allow a scaled amount even without sustain, for controllable sympathetic ringing.
|
|
const float sympPedalScale = sustainPedalDown ? sustainValue : duplexCfg.sympNoPedalScale;
|
|
if (DebugToggles::kEnableReverb && ! highPolyProcessing && duplexCfg.sympMix > 0.0001f && sympPedalScale > 0.0001f)
|
|
{
|
|
const float send = juce::jlimit (0.0f, 1.0f, duplexCfg.sympSend) * sympPedalScale;
|
|
const float mix = juce::jlimit (0.0f, 1.0f, duplexCfg.sympMix) * sympPedalScale;
|
|
if (send > 0.0001f && mix > 0.0001f)
|
|
{
|
|
sympScratch.makeCopyOf (buffer, true);
|
|
for (int ch = 0; ch < sympScratch.getNumChannels(); ++ch)
|
|
{
|
|
auto* x = sympScratch.getWritePointer (ch);
|
|
for (int i = 0; i < sympScratch.getNumSamples(); ++i)
|
|
x[i] *= send;
|
|
}
|
|
|
|
{
|
|
juce::dsp::AudioBlock<float> wetBlock (sympScratch);
|
|
juce::dsp::ProcessContextReplacing<float> wetCtx (wetBlock);
|
|
sympSendHpf.process (wetCtx);
|
|
}
|
|
|
|
auto p = sympParams;
|
|
p.wetLevel = 1.0f;
|
|
p.dryLevel = 0.0f;
|
|
p.roomSize = juce::jlimit (0.0f, 0.3f, p.roomSize); // Smaller room for less wash
|
|
if (! sympParamsValid || ! reverbParamsEqual (p, sympParamsApplied))
|
|
{
|
|
sympReverb.setParameters (p);
|
|
sympParamsApplied = p;
|
|
sympParamsValid = true;
|
|
}
|
|
|
|
if (sympScratch.getNumChannels() >= 2)
|
|
sympReverb.processStereo (sympScratch.getWritePointer (0), sympScratch.getWritePointer (1), sympScratch.getNumSamples());
|
|
else
|
|
sympReverb.processMono (sympScratch.getWritePointer (0), sympScratch.getNumSamples());
|
|
|
|
for (int ch = 0; ch < buffer.getNumChannels(); ++ch)
|
|
{
|
|
auto* dry = buffer.getWritePointer (ch);
|
|
auto* ww = sympScratch.getWritePointer (ch);
|
|
for (int i = 0; i < buffer.getNumSamples(); ++i)
|
|
dry[i] = dry[i] * (1.0f - mix) + ww[i] * mix;
|
|
}
|
|
}
|
|
}
|
|
|
|
// --- Modal soundboard (BPF bank + predelay) ---
|
|
const bool useModal = DebugToggles::kEnablePm2ModalSoundboard
|
|
&& (! pmBoardModes.isEmpty()) && (pmBoardMix > 0.0001f) && (pmBoardSend > 0.0001f);
|
|
if (useModal)
|
|
{
|
|
ensureScratch (modalScratch, numCh);
|
|
if (modalDirty || (int) modalModes.size() != pmBoardModes.size())
|
|
{
|
|
modalModes.clear();
|
|
modalModes.reserve ((size_t) pmBoardModes.size());
|
|
for (const auto& m : pmBoardModes)
|
|
{
|
|
modalModes.emplace_back();
|
|
auto& mm = modalModes.back();
|
|
auto coeff = makeBandPass (lastSampleRate, m.f, m.q);
|
|
for (int ch = 0; ch < modalChannels; ++ch)
|
|
{
|
|
mm.bp[ch].coefficients = coeff;
|
|
mm.bp[ch].reset();
|
|
}
|
|
mm.gainLin = juce::Decibels::decibelsToGain (m.gainDb);
|
|
}
|
|
modalDirty = false;
|
|
}
|
|
|
|
predelaySamples = juce::jlimit (0, juce::jmax (0, (int) predelayBuf.size() - 2),
|
|
(int) std::round (pmPredelayMs * 0.001 * lastSampleRate));
|
|
const int needed = predelaySamples + buffer.getNumSamples() + 2;
|
|
if (needed > (int) predelayBuf.size())
|
|
predelaySamples = juce::jlimit (0, juce::jmax (0, (int) predelayBuf.size() - buffer.getNumSamples() - 2),
|
|
predelaySamples);
|
|
|
|
modalScratch.clear();
|
|
|
|
const float send = juce::jlimit (0.0f, 1.0f, pmBoardSend);
|
|
const float mix = juce::jlimit (0.0f, 1.0f, pmBoardMix);
|
|
|
|
for (int i = 0; i < buffer.getNumSamples(); ++i)
|
|
{
|
|
float mono = 0.0f;
|
|
for (int ch = 0; ch < buffer.getNumChannels(); ++ch)
|
|
mono += buffer.getReadPointer (ch)[i];
|
|
mono *= (buffer.getNumChannels() > 0) ? (1.0f / (float) buffer.getNumChannels()) : 1.0f;
|
|
mono *= send;
|
|
mono = modalSendHpf.processSample (mono);
|
|
|
|
predelayBuf[(size_t) predelayWrite] = mono;
|
|
int readIdx = predelayWrite - predelaySamples;
|
|
if (readIdx < 0) readIdx += (int) predelayBuf.size();
|
|
float delayed = predelayBuf[(size_t) readIdx];
|
|
predelayWrite = (predelayWrite + 1) % (int) predelayBuf.size();
|
|
|
|
for (size_t m = 0; m < modalModes.size(); ++m)
|
|
{
|
|
for (int ch = 0; ch < buffer.getNumChannels(); ++ch)
|
|
{
|
|
auto* f = (ch < modalChannels) ? &modalModes[m].bp[ch] : &modalModes[m].bp[0];
|
|
if (f->coefficients != nullptr)
|
|
{
|
|
float w = f->processSample (delayed) * modalModes[m].gainLin;
|
|
modalScratch.getWritePointer (ch)[i] += w;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
for (int ch = 0; ch < buffer.getNumChannels(); ++ch)
|
|
{
|
|
auto* dry = buffer.getWritePointer (ch);
|
|
auto* ww = modalScratch.getWritePointer (ch);
|
|
for (int i = 0; i < buffer.getNumSamples(); ++i)
|
|
dry[i] = dry[i] * (1.0f - mix) + ww[i] * mix;
|
|
}
|
|
}
|
|
|
|
// --- Soundboard resonator (procedural IR or JUCE reverb fallback) ---
|
|
if (DebugToggles::kEnableReverb && soundboardEnabled && soundboardMix > 0.0001f)
|
|
{
|
|
soundboardScratch.makeCopyOf (buffer, true);
|
|
|
|
{
|
|
juce::dsp::AudioBlock<float> wetBlock (soundboardScratch);
|
|
juce::dsp::ProcessContextReplacing<float> wetCtx (wetBlock);
|
|
soundboardSendHpf.process (wetCtx);
|
|
}
|
|
|
|
if (DebugToggles::kEnableSoundboardConvolution)
|
|
{
|
|
updateSoundboardConvolution (false);
|
|
if (DebugToggles::kSoundboardConvolutionDownsample > 1)
|
|
{
|
|
const int dsFactor = DebugToggles::kSoundboardConvolutionDownsample;
|
|
const int numSamples = soundboardScratch.getNumSamples();
|
|
const int numCh = soundboardScratch.getNumChannels();
|
|
const int dsSamples = (numSamples + dsFactor - 1) / dsFactor;
|
|
|
|
if (soundboardScratchDs.getNumChannels() != numCh
|
|
|| soundboardScratchDs.getNumSamples() != dsSamples)
|
|
soundboardScratchDs.setSize (numCh, dsSamples, false, false, true);
|
|
soundboardScratchDs.clear();
|
|
|
|
// Downsample by simple averaging to reduce convolution workload.
|
|
for (int ch = 0; ch < numCh; ++ch)
|
|
{
|
|
const float* src = soundboardScratch.getReadPointer (ch);
|
|
float* dst = soundboardScratchDs.getWritePointer (ch);
|
|
int di = 0;
|
|
for (int i = 0; i < numSamples; i += dsFactor)
|
|
{
|
|
float sum = 0.0f;
|
|
int count = 0;
|
|
for (int k = 0; k < dsFactor && i + k < numSamples; ++k)
|
|
{
|
|
sum += src[i + k];
|
|
++count;
|
|
}
|
|
dst[di++] = (count > 0) ? (sum / (float) count) : 0.0f;
|
|
}
|
|
}
|
|
|
|
{
|
|
juce::dsp::AudioBlock<float> wetBlock (soundboardScratchDs);
|
|
juce::dsp::ProcessContextReplacing<float> wetCtx (wetBlock);
|
|
soundboardConvolutionDs.process (wetCtx);
|
|
}
|
|
|
|
// Upsample with linear interpolation back to full-rate scratch.
|
|
for (int ch = 0; ch < numCh; ++ch)
|
|
{
|
|
const float* src = soundboardScratchDs.getReadPointer (ch);
|
|
float* dst = soundboardScratch.getWritePointer (ch);
|
|
const int dsCount = soundboardScratchDs.getNumSamples();
|
|
for (int i = 0; i < numSamples; ++i)
|
|
{
|
|
const int idx = i / dsFactor;
|
|
if (idx >= dsCount - 1)
|
|
{
|
|
dst[i] = src[dsCount - 1];
|
|
}
|
|
else if ((i % dsFactor) == 0)
|
|
{
|
|
dst[i] = src[idx];
|
|
}
|
|
else
|
|
{
|
|
const float frac = (float) (i % dsFactor) / (float) dsFactor;
|
|
dst[i] = src[idx] + (src[idx + 1] - src[idx]) * frac;
|
|
}
|
|
}
|
|
}
|
|
|
|
{
|
|
juce::dsp::AudioBlock<float> wetBlock (soundboardScratch);
|
|
juce::dsp::ProcessContextReplacing<float> wetCtx (wetBlock);
|
|
soundboardReturnHpf.process (wetCtx);
|
|
}
|
|
}
|
|
else
|
|
{
|
|
juce::dsp::AudioBlock<float> wetBlock (soundboardScratch);
|
|
juce::dsp::ProcessContextReplacing<float> wetCtx (wetBlock);
|
|
soundboardConvolution.process (wetCtx);
|
|
soundboardReturnHpf.process (wetCtx);
|
|
}
|
|
}
|
|
else
|
|
{
|
|
auto p = soundboardParams;
|
|
p.wetLevel = 1.0f; // wet only
|
|
p.dryLevel = 0.0f; // no dry inside
|
|
p.roomSize = juce::jlimit (0.0f, 0.5f, p.roomSize); // Limit room size
|
|
if (! soundboardParamsValid || ! reverbParamsEqual (p, soundboardParamsApplied))
|
|
{
|
|
soundboardReverb.setParameters (p);
|
|
soundboardParamsApplied = p;
|
|
soundboardParamsValid = true;
|
|
}
|
|
|
|
if (soundboardScratch.getNumChannels() >= 2)
|
|
soundboardReverb.processStereo (soundboardScratch.getWritePointer (0), soundboardScratch.getWritePointer (1), soundboardScratch.getNumSamples());
|
|
else
|
|
soundboardReverb.processMono (soundboardScratch.getWritePointer (0), soundboardScratch.getNumSamples());
|
|
{
|
|
juce::dsp::AudioBlock<float> wetBlock (soundboardScratch);
|
|
juce::dsp::ProcessContextReplacing<float> wetCtx (wetBlock);
|
|
soundboardReturnHpf.process (wetCtx);
|
|
}
|
|
}
|
|
|
|
const float mix = juce::jlimit (0.0f, 1.0f, soundboardMix);
|
|
for (int ch = 0; ch < buffer.getNumChannels(); ++ch)
|
|
{
|
|
auto* dry = buffer.getWritePointer (ch);
|
|
auto* ww = soundboardScratch.getWritePointer (ch);
|
|
for (int i = 0; i < buffer.getNumSamples(); ++i)
|
|
dry[i] = dry[i] * (1.0f - mix) + ww[i] * mix;
|
|
}
|
|
}
|
|
|
|
// --- Mic perspectives blend (post chain) ---
|
|
if (DebugToggles::kEnableMic)
|
|
applyMicMix (buffer);
|
|
|
|
// --- Optional post room/hall reverb (testing toggle) ---
|
|
const float postRoomMixParam = apvts.getRawParameterValue (ParamIDs::postRoomMix)->load();
|
|
const bool postRoomEnableParam = apvts.getRawParameterValue (ParamIDs::postRoomEnable)->load() >= 0.5f;
|
|
if (DebugToggles::kEnablePostRoomReverb && postRoomEnableParam && postRoomMixParam > 0.0001f)
|
|
{
|
|
juce::Reverb::Parameters p;
|
|
if (DebugToggles::kPostRoomIsHall)
|
|
{
|
|
p.roomSize = 0.78f;
|
|
p.damping = 0.45f;
|
|
p.width = 1.0f;
|
|
p.wetLevel = 0.22f * postRoomMixParam;
|
|
p.dryLevel = 1.0f;
|
|
}
|
|
else
|
|
{
|
|
p.roomSize = 0.42f;
|
|
p.damping = 0.35f;
|
|
p.width = 0.9f;
|
|
p.wetLevel = 0.16f * postRoomMixParam;
|
|
p.dryLevel = 1.0f;
|
|
}
|
|
p.freezeMode = 0.0f;
|
|
if (! postReverbParamsValid || ! reverbParamsEqual (p, postReverbParamsApplied))
|
|
{
|
|
postReverb.setParameters (p);
|
|
postReverbParamsApplied = p;
|
|
postReverbParamsValid = true;
|
|
}
|
|
if (buffer.getNumChannels() >= 2)
|
|
postReverb.processStereo (buffer.getWritePointer (0), buffer.getWritePointer (1), buffer.getNumSamples());
|
|
else
|
|
postReverb.processMono (buffer.getWritePointer (0), buffer.getNumSamples());
|
|
}
|
|
|
|
// --- Final output HPF to remove sub-bass/rumble ---
|
|
if (DebugToggles::kEnableGlobalFilters)
|
|
{
|
|
if (buffer.getNumChannels() > 0 && buffer.getNumChannels() != outputHpfNumChannels)
|
|
{
|
|
juce::dsp::ProcessSpec spec = mainSpec;
|
|
spec.numChannels = (juce::uint32) juce::jmax (1, buffer.getNumChannels());
|
|
outputHpf.reset();
|
|
outputHpf.prepare (spec);
|
|
auto coeffs = juce::dsp::IIR::Coefficients<float>::makeHighPass (spec.sampleRate,
|
|
outputHpfCutoff,
|
|
0.707f);
|
|
if (outputHpf.state == nullptr)
|
|
outputHpf.state = coeffs;
|
|
else
|
|
*outputHpf.state = *coeffs;
|
|
outputHpfNumChannels = buffer.getNumChannels();
|
|
}
|
|
{
|
|
juce::dsp::AudioBlock<float> block (buffer);
|
|
juce::dsp::ProcessContextReplacing<float> ctx (block);
|
|
outputHpf.process (ctx);
|
|
}
|
|
}
|
|
|
|
// --- Output padding to ease limiter load ---
|
|
buffer.applyGain (juce::Decibels::decibelsToGain (-3.0f));
|
|
|
|
// --- Final lookahead limiter (gain riding + delay) ---
|
|
if (DebugToggles::kEnableFinalLimiter)
|
|
{
|
|
const int numCh = buffer.getNumChannels();
|
|
const int numSamples = buffer.getNumSamples();
|
|
const int requiredSize = limiterLookaheadSamples + numSamples + 1;
|
|
if (numCh != limiterDelayBuffer.getNumChannels() || requiredSize > limiterDelayBufferSize)
|
|
{
|
|
limiterDelayBufferSize = requiredSize;
|
|
limiterDelayBuffer.setSize (juce::jmax (1, numCh), limiterDelayBufferSize, false, false, true);
|
|
limiterDelayBuffer.clear();
|
|
limiterWritePos = 0;
|
|
limiterGain = 1.0f;
|
|
}
|
|
|
|
for (int i = 0; i < numSamples; ++i)
|
|
{
|
|
float peak = 0.0f;
|
|
for (int ch = 0; ch < numCh; ++ch)
|
|
{
|
|
const float s = buffer.getReadPointer (ch)[i];
|
|
peak = juce::jmax (peak, std::abs (s));
|
|
limiterDelayBuffer.setSample (ch, limiterWritePos, s);
|
|
}
|
|
|
|
const float desiredGain = (peak > limiterThreshold && peak > 0.0f) ? (limiterThreshold / peak) : 1.0f;
|
|
const float coeff = (desiredGain < limiterGain) ? limiterAttackCoeff : limiterReleaseCoeff;
|
|
limiterGain = desiredGain + coeff * (limiterGain - desiredGain);
|
|
|
|
int readPos = limiterWritePos - limiterLookaheadSamples;
|
|
if (readPos < 0)
|
|
readPos += limiterDelayBufferSize;
|
|
for (int ch = 0; ch < numCh; ++ch)
|
|
{
|
|
const float delayed = limiterDelayBuffer.getSample (ch, readPos);
|
|
buffer.getWritePointer (ch)[i] = delayed * limiterGain;
|
|
}
|
|
|
|
limiterWritePos = (limiterWritePos + 1) % limiterDelayBufferSize;
|
|
}
|
|
}
|
|
|
|
// --- Final output LPF (post everything) ---
|
|
if (buffer.getNumChannels() > 0 && buffer.getNumChannels() != outputLpfNumChannels)
|
|
{
|
|
juce::dsp::ProcessSpec spec = mainSpec;
|
|
spec.numChannels = (juce::uint32) juce::jmax (1, buffer.getNumChannels());
|
|
outputLpf.reset();
|
|
outputLpf.prepare (spec);
|
|
outputLpf.setType (juce::dsp::StateVariableTPTFilterType::lowpass);
|
|
outputLpfNumChannels = buffer.getNumChannels();
|
|
updateOutputLpf();
|
|
}
|
|
|
|
if (DebugToggles::kEnableGlobalFilters && outputLpfEnabled)
|
|
{
|
|
juce::dsp::AudioBlock<float> block (buffer);
|
|
juce::dsp::ProcessContextReplacing<float> ctx (block);
|
|
outputLpf.process (ctx);
|
|
}
|
|
|
|
// --- Output EQ (5-band, post LPF) ---
|
|
if (buffer.getNumChannels() > 0 && buffer.getNumChannels() != outputEqNumChannels)
|
|
{
|
|
juce::dsp::ProcessSpec spec = mainSpec;
|
|
spec.numChannels = (juce::uint32) juce::jmax (1, buffer.getNumChannels());
|
|
for (auto& f : outputEqFilters)
|
|
{
|
|
f.reset();
|
|
f.prepare (spec);
|
|
}
|
|
outputEqNumChannels = buffer.getNumChannels();
|
|
updateOutputEq();
|
|
}
|
|
|
|
if (DebugToggles::kEnableEq && outputEqEnabled)
|
|
{
|
|
juce::dsp::AudioBlock<float> block (buffer);
|
|
for (auto& f : outputEqFilters)
|
|
{
|
|
juce::dsp::ProcessContextReplacing<float> ctx (block);
|
|
f.process (ctx);
|
|
}
|
|
}
|
|
|
|
// --- Final DC blocker (gentle, post EQ) ---
|
|
if (DebugToggles::kEnableOutputDcBlock)
|
|
{
|
|
if (buffer.getNumChannels() > 0 && buffer.getNumChannels() != outputDcNumChannels)
|
|
{
|
|
const int chs = juce::jmin (2, buffer.getNumChannels());
|
|
for (int ch = 0; ch < chs; ++ch)
|
|
outputDcBlock[(size_t) ch].reset (lastSampleRate);
|
|
outputDcNumChannels = buffer.getNumChannels();
|
|
}
|
|
for (int ch = 0; ch < buffer.getNumChannels(); ++ch)
|
|
{
|
|
auto* x = buffer.getWritePointer (ch);
|
|
auto& dc = outputDcBlock[(size_t) juce::jmin (ch, 1)];
|
|
for (int i = 0; i < buffer.getNumSamples(); ++i)
|
|
x[i] = dc.process (x[i]);
|
|
}
|
|
}
|
|
|
|
// --- Master volume (post everything, host-automatable, not preset-controlled) ---
|
|
if (buffer.getNumSamples() > 0 && std::abs (outputGainEnd - outputGainStart) > 1.0e-6f)
|
|
buffer.applyGainRamp (0, buffer.getNumSamples(),
|
|
masterVolumeLin * outputGainStart,
|
|
masterVolumeLin * outputGainEnd);
|
|
else
|
|
buffer.applyGain (masterVolumeLin * outputGainLin);
|
|
}
|
|
|
|
//============================== State =========================================
|
|
void FluteSynthAudioProcessor::getStateInformation (juce::MemoryBlock& destData)
|
|
{
|
|
auto state = apvts.copyState();
|
|
std::unique_ptr<juce::XmlElement> xml (state.createXml());
|
|
copyXmlToBinary (*xml, destData);
|
|
}
|
|
|
|
void FluteSynthAudioProcessor::setStateInformation (const void* data, int sizeInBytes)
|
|
{
|
|
std::unique_ptr<juce::XmlElement> xml (getXmlFromBinary (data, sizeInBytes));
|
|
if (xml != nullptr && xml->hasTagName (apvts.state.getType()))
|
|
{
|
|
apvts.replaceState (juce::ValueTree::fromXml (*xml));
|
|
syncExtendedParamsFromAPVTS();
|
|
}
|
|
}
|
|
|
|
//============================= Parameters (VA) =================================
|
|
juce::AudioProcessorValueTreeState::ParameterLayout FluteSynthAudioProcessor::createParameterLayout()
|
|
{
|
|
std::vector<std::unique_ptr<juce::RangedAudioParameter>> p;
|
|
|
|
auto mk = [] (const juce::String& id, const juce::String& name,
|
|
float min, float max, float def, float centreSkew = 0.0f)
|
|
{
|
|
auto r = juce::NormalisableRange<float> (min, max);
|
|
#if JUCE_VERSION_MAJOR >= 7
|
|
if (centreSkew != 0.0f) r.setSkewForCentre (centreSkew);
|
|
#else
|
|
(void) centreSkew;
|
|
#endif
|
|
return std::make_unique<juce::AudioParameterFloat> (juce::ParameterID (id, 1), name, r, def);
|
|
};
|
|
auto mkBool = [] (const juce::String& id, const juce::String& name, bool def)
|
|
{
|
|
return std::make_unique<juce::AudioParameterBool> (juce::ParameterID (id, 1), name, def);
|
|
};
|
|
|
|
// osc mix
|
|
p.push_back (mk (ParamIDs::oscSine, "Sine", 0.0f, 1.0f, 0.7f));
|
|
p.push_back (mk (ParamIDs::oscSaw, "Saw", 0.0f, 1.0f, 0.3f));
|
|
p.push_back (mk (ParamIDs::oscSquare, "Square", 0.0f, 1.0f, 0.0f));
|
|
|
|
// filter
|
|
p.push_back (mk (ParamIDs::cutoff, "Cutoff", 100.0f, 8000.0f, 1800.0f, 1000.0f));
|
|
p.push_back (mk (ParamIDs::resonance, "Resonance", 0.1f, 1.5f, 0.7f));
|
|
|
|
// ADSR - Note: For pm2 engine, Decay controls physical string sustain time
|
|
p.push_back (mk (ParamIDs::attack, "Attack", 0.001f, 3.000f, 0.010f));
|
|
p.push_back (mk (ParamIDs::decay, "Decay (Sustain)", 0.100f, 9.100f, 2.00f, 2.0f));
|
|
p.push_back (mk (ParamIDs::sustain, "Sustain", 0.00f, 1.00f, 0.00f));
|
|
p.push_back (mk (ParamIDs::release, "Release", 0.030f, 7.000f, 1.00f, 1.0f));
|
|
|
|
// optional breath/noise pre (voice) gain in dB
|
|
p.push_back (mk (ParamIDs::noiseDb, "Noise (dB)", -62.0f, -12.0f, -48.0f));
|
|
|
|
// Formants (post)
|
|
p.push_back (mkBool (ParamIDs::formant1Enable, "Formant 1 Enable", false));
|
|
p.push_back (mk (ParamIDs::formant1Freq, "Formant 1 Freq", 300.0f, 12000.0f, 1800.0f));
|
|
p.push_back (mk (ParamIDs::formant1Q, "Formant 1 Q", 0.5f, 6.0f, 2.0f));
|
|
p.push_back (mk (ParamIDs::formant1GainDb, "Formant 1 Gain", -9.0f, 9.0f, 0.0f));
|
|
p.push_back (mkBool (ParamIDs::formant2Enable, "Formant 2 Enable", false));
|
|
p.push_back (mk (ParamIDs::formant2Freq, "Formant 2 Freq", 300.0f, 12000.0f, 1800.0f));
|
|
p.push_back (mk (ParamIDs::formant2Q, "Formant 2 Q", 0.5f, 6.0f, 2.0f));
|
|
p.push_back (mk (ParamIDs::formant2GainDb, "Formant 2 Gain", -9.0f, 9.0f, 0.0f));
|
|
|
|
// Soundboard
|
|
p.push_back (mkBool (ParamIDs::soundboardEnable, "Soundboard Enable", false));
|
|
p.push_back (mk (ParamIDs::soundboardMix, "Soundboard Mix", 0.0f, 0.20f, 0.02f));
|
|
p.push_back (mk (ParamIDs::soundboardT60, "Soundboard T60", 1.6f, 2.8f, 2.2f));
|
|
p.push_back (mk (ParamIDs::soundboardDamp, "Soundboard Damping", 0.0f, 1.0f, 0.40f));
|
|
p.push_back (mk (ParamIDs::postRoomMix, "Post Room Mix", 0.0f, 1.0f, 1.0f));
|
|
p.push_back (mkBool (ParamIDs::postRoomEnable, "Post Room Enable", true));
|
|
|
|
// Felt/contact
|
|
p.push_back (mk (ParamIDs::feltPreload, "Felt Preload", 0.0f, 0.6f, 0.08f));
|
|
p.push_back (mk (ParamIDs::feltStiffness, "Felt Stiffness", 1.0f, 5.0f, 2.4f));
|
|
p.push_back (mk (ParamIDs::feltHysteresis, "Felt Hysteresis", 0.0f, 0.6f, 0.15f));
|
|
p.push_back (mk (ParamIDs::feltMax, "Felt Max", 0.4f, 4.0f, 1.4f));
|
|
|
|
// Duplex
|
|
p.push_back (mk (ParamIDs::duplexRatio, "Duplex Ratio", 1.1f, 4.0f, 2.2f));
|
|
p.push_back (mk (ParamIDs::duplexGainDb, "Duplex Gain", -20.0f, -6.0f, -12.0f));
|
|
p.push_back (mk (ParamIDs::duplexDecayMs, "Duplex Decay", 10.0f, 400.0f, 120.0f));
|
|
p.push_back (mk (ParamIDs::duplexSympSend, "Sympathetic Send", 0.0f, 1.0f, 0.15f));
|
|
p.push_back (mk (ParamIDs::duplexSympMix, "Sympathetic Mix", 0.0f, 1.0f, 0.20f));
|
|
|
|
// PM2 gain trim - FIXED: Changed default from +12dB to 0dB to prevent overwhelming output
|
|
p.push_back (mk (ParamIDs::pm2GainDb, "PM2 Gain (dB)", -24.0f, 42.0f, 0.0f));
|
|
|
|
// Final output LPF
|
|
p.push_back (mkBool (ParamIDs::outputLpfEnable, "Output LPF Enable", false));
|
|
p.push_back (mk (ParamIDs::outputLpfCutoff, "Output LPF Cutoff", 0.0f, 18000.0f, 18000.0f, 4000.0f));
|
|
p.push_back (mk (ParamIDs::outputLpfQ, "Output LPF Q", 0.2f, 2.5f, 0.707f));
|
|
|
|
// Master volume (linear gain)
|
|
p.push_back (mk (ParamIDs::masterVolume, "Master Volume", 0.0f, 2.0f, 0.9f, 1.0f));
|
|
p.push_back (std::make_unique<juce::AudioParameterChoice> (
|
|
juce::ParameterID (ParamIDs::temperament, 1),
|
|
"Temperament",
|
|
juce::StringArray { "Preset", "12-TET", "Werckmeister", "Kirnberger", "Meantone", "Pythagorean" },
|
|
0));
|
|
p.push_back (std::make_unique<juce::AudioParameterChoice> (
|
|
juce::ParameterID (ParamIDs::velocityCurve, 1),
|
|
"Velocity Curve",
|
|
juce::StringArray { "Light", "Normal", "Heavy", "Fixed" },
|
|
1)); // Default to "Normal"
|
|
|
|
return { p.begin(), p.end() };
|
|
}
|
|
|
|
//=========================== JSON preset helpers ===============================
|
|
bool FluteSynthAudioProcessor::hasProp (const juce::DynamicObject& o, const juce::Identifier& id)
|
|
{ return o.hasProperty (id); }
|
|
|
|
float FluteSynthAudioProcessor::getFloatProp (const juce::DynamicObject& o, const juce::Identifier& id, float def)
|
|
{
|
|
if (! hasProp (o, id)) return def;
|
|
auto v = o.getProperty (id);
|
|
if (v.isDouble() || v.isInt()) return (float) v;
|
|
return def;
|
|
}
|
|
bool FluteSynthAudioProcessor::getBoolProp (const juce::DynamicObject& o, const juce::Identifier& id, bool def)
|
|
{
|
|
if (! hasProp (o, id)) return def;
|
|
auto v = o.getProperty (id);
|
|
if (v.isBool()) return (bool) v;
|
|
if (v.isInt()) return ((int) v) != 0;
|
|
return def;
|
|
}
|
|
juce::String FluteSynthAudioProcessor::getStringProp (const juce::DynamicObject& o, const juce::Identifier& id, const juce::String& def)
|
|
{
|
|
if (! hasProp (o, id)) return def;
|
|
auto v = o.getProperty (id);
|
|
if (v.isString()) return v.toString();
|
|
return def;
|
|
}
|
|
|
|
static std::array<float,12> getTemperamentOffsetsByName (juce::String name)
|
|
{
|
|
name = name.trim().toLowerCase();
|
|
if (name == "pythagorean")
|
|
return { { 0.0f, 23.46f, 3.91f, 27.37f, 7.82f, -13.69f, 11.73f, -1.96f, 21.50f, 1.96f, 25.46f, 5.87f } };
|
|
if (name == "meantone" || name == "quarter-comma meantone" || name == "quarter comma meantone")
|
|
return { { 0.0f, 20.51f, 3.42f, 23.94f, 6.84f, -11.73f, 9.78f, -1.95f, 18.57f, 1.71f, 22.24f, 5.13f } };
|
|
if (name == "werckmeister" || name == "werckmeister iii" || name == "werckmeister3")
|
|
return { { 0.0f, 3.91f, 1.96f, 5.87f, -1.96f, 0.0f, 3.91f, -1.96f, 1.96f, -3.91f, 1.96f, -5.87f } };
|
|
if (name == "kirnberger" || name == "kirnberger iii" || name == "kirnberger3")
|
|
return { { 0.0f, 3.91f, 1.96f, 5.87f, -1.96f, 0.0f, 3.91f, -1.96f, 1.96f, -3.91f, 1.96f, -5.87f } };
|
|
|
|
return { { 0.0f } }; // 12-TET default
|
|
}
|
|
|
|
static std::array<float,12> getTemperamentOffsetsByChoice (int choice)
|
|
{
|
|
switch (choice)
|
|
{
|
|
case 1: return getTemperamentOffsetsByName ("12-TET");
|
|
case 2: return getTemperamentOffsetsByName ("Werckmeister");
|
|
case 3: return getTemperamentOffsetsByName ("Kirnberger");
|
|
case 4: return getTemperamentOffsetsByName ("Meantone");
|
|
case 5: return getTemperamentOffsetsByName ("Pythagorean");
|
|
default: return { { 0.0f } };
|
|
}
|
|
}
|
|
|
|
static std::array<float,128> expandPitchClassOffsets (const std::array<float,12>& offsets)
|
|
{
|
|
std::array<float,128> expanded { { 0.0f } };
|
|
for (int i = 0; i < 128; ++i)
|
|
expanded[(size_t) i] = offsets[(size_t) (i % 12)];
|
|
return expanded;
|
|
}
|
|
|
|
PresetModel FluteSynthAudioProcessor::buildPhysicsPresetModel() const
|
|
{
|
|
PresetModel p;
|
|
p.engine = "pm2";
|
|
p.engineMixVa = 0.0f;
|
|
p.engineMixPm = 0.0f;
|
|
p.engineMixPm2 = 1.0f;
|
|
|
|
const int refMidi = 60;
|
|
p.hammerModel.force = 0.65f;
|
|
p.hammerModel.massKg = PianoPhysics::Hammer::getMass (refMidi);
|
|
p.hammerModel.contactExponent = PianoPhysics::Hammer::getExponent (refMidi);
|
|
p.hammerModel.contactStiffness = mapHammerStiffnessToModel (PianoPhysics::Hammer::getStiffness (refMidi));
|
|
p.hammerModel.contactDamping = juce::jmap (PianoPhysics::Hammer::getHysteresis (refMidi), 0.08f, 0.18f, 4.0f, 8.0f);
|
|
|
|
p.feltModel.hysteresis = PianoPhysics::Hammer::getHysteresis (refMidi);
|
|
|
|
p.damper.lossDamped = PianoPhysics::Damper::maxDamping;
|
|
p.damper.smoothMs = PianoPhysics::Damper::engageTime_s * 1000.0f;
|
|
|
|
p.soundboard.enabled = true;
|
|
p.soundboard.mix = 0.025f;
|
|
const float sbT60 = PianoPhysics::Soundboard::getT60 (200.0f);
|
|
p.soundboard.t60_s = juce::jlimit (1.6f, 2.8f, sbT60);
|
|
p.soundboard.damp = juce::jlimit (0.0f, 1.0f, PianoPhysics::Soundboard::typicalLossFactor / 0.03f);
|
|
|
|
// FIX: Reduced PM2 gain and output gain to prevent hot/clipping output
|
|
p.pm2GainDb = -4.0f; // Was 0.0f - reduce per-voice level
|
|
p.outputGainDb = -9.0f; // Was -6.0f - additional headroom
|
|
p.breathEnabled = false;
|
|
p.noiseDb = -62.0f;
|
|
p.pedal.sustainGainDb = 3.0f;
|
|
p.damper.lossOff = 1.0f;
|
|
p.damper.lossHalf = 0.80f; // Was 0.90f - more damping when half-pedal
|
|
p.damper.lossDamped = 0.35f; // Was 0.65f - VERY aggressive damping for instant note-off
|
|
p.damper.smoothMs = 8.0f; // Fast damper engage time
|
|
|
|
// FIX: Very short ADSR release for tight key-tracking
|
|
// Notes should stop almost immediately when you release the key
|
|
p.release = 0.025f; // Was 0.08f - now 25ms for very tight response
|
|
p.releaseExtension = 1.0f; // No extension for tighter response
|
|
|
|
// FIX: Enable output LPF to remove high-frequency crackle/noise/aliasing
|
|
// Cutoff at 12kHz removes harsh artifacts while preserving musicality
|
|
p.outputLpf.enabled = true;
|
|
p.outputLpf.cutoff = 12000.0f; // Reduced from 14kHz to catch more aliasing
|
|
p.outputLpf.q = 0.707f; // Butterworth response - smooth rolloff
|
|
|
|
return p;
|
|
}
|
|
|
|
PresetModel FluteSynthAudioProcessor::parsePresetJson (const juce::var& v)
|
|
{
|
|
PresetModel p; // defaults
|
|
if (! v.isObject()) return p;
|
|
|
|
auto* obj = v.getDynamicObject();
|
|
auto clamp = PresetModel::clamp;
|
|
const int ver = obj->hasProperty ("schema_version") ? (int) obj->getProperty ("schema_version") : 3;
|
|
juce::ignoreUnused (ver);
|
|
|
|
// top-level
|
|
if (hasProp (*obj, "schema_version")) p.schemaVersion = (int) obj->getProperty ("schema_version");
|
|
p.engine = getStringProp (*obj, "engine", p.engine);
|
|
p.masterTuneCents = clamp (getFloatProp (*obj, "master_tune_cents", p.masterTuneCents), -200.0f, 200.0f);
|
|
p.pitchCompOffsetCents = clamp (getFloatProp (*obj, "pitch_comp_offset_cents", p.pitchCompOffsetCents), -50.0f, 50.0f);
|
|
p.pitchCompSlopeCents = clamp (getFloatProp (*obj, "pitch_comp_slope_cents", p.pitchCompSlopeCents), -0.5f, 0.5f);
|
|
// FIXED: Extended range to allow larger negative slopes for realistic treble attenuation
|
|
p.pm2LoudnessSlopeDbPerSemi = clamp (getFloatProp (*obj, "pm2_loudness_slope_db_per_semi", p.pm2LoudnessSlopeDbPerSemi), -0.10f, 0.1f);
|
|
p.noiseDb = clamp (getFloatProp (*obj, "noise_db", p.noiseDb), -62.0f, -12.0f);
|
|
p.outputGainDb = clamp (getFloatProp (*obj, "output_gain_db", p.outputGainDb), -24.0f, 12.0f);
|
|
p.releaseExtension = clamp (getFloatProp (*obj, "release_extension", p.releaseExtension), 1.0f, 4.0f);
|
|
p.velocityGamma = clamp (getFloatProp (*obj, "velocity_curve_gamma", p.velocityGamma), 0.3f, 3.0f);
|
|
p.velocityCurve = getStringProp (*obj, "velocity_curve", p.velocityCurve);
|
|
|
|
if (auto tv = obj->getProperty ("temperament"); tv.isObject())
|
|
{
|
|
if (auto* to = tv.getDynamicObject())
|
|
{
|
|
p.temperamentName = getStringProp (*to, "name", p.temperamentName);
|
|
if (auto ov = to->getProperty ("offsets_cents"); ov.isArray())
|
|
{
|
|
auto* arr = ov.getArray();
|
|
if (arr != nullptr && arr->size() >= 12)
|
|
{
|
|
for (int i = 0; i < 12; ++i)
|
|
p.temperamentOffsetsCents[(size_t) i] = (float) arr->getUnchecked (i);
|
|
p.temperamentUseOffsets = true;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
if (auto ov = obj->getProperty ("per_note_offsets_cents"); ov.isArray())
|
|
{
|
|
auto* arr = ov.getArray();
|
|
if (arr != nullptr && arr->size() > 0)
|
|
{
|
|
if (arr->size() == 12)
|
|
{
|
|
for (int i = 0; i < 128; ++i)
|
|
p.perNoteOffsetsCents[(size_t) i] = (float) arr->getUnchecked (i % 12);
|
|
}
|
|
else
|
|
{
|
|
for (int i = 0; i < 128; ++i)
|
|
{
|
|
const int idx = juce::jmin (i, arr->size() - 1);
|
|
p.perNoteOffsetsCents[(size_t) i] = (float) arr->getUnchecked (idx);
|
|
}
|
|
}
|
|
p.perNoteOffsetsEnabled = true;
|
|
}
|
|
}
|
|
|
|
if (auto bv = obj->getProperty ("brightness"); bv.isObject())
|
|
{
|
|
if (auto* bo = bv.getDynamicObject())
|
|
{
|
|
p.brightnessEnabled = getBoolProp (*bo, "enabled", p.brightnessEnabled);
|
|
p.brightnessBaseDb = clamp (getFloatProp (*bo, "base_db", p.brightnessBaseDb), -12.0f, 12.0f);
|
|
p.brightnessVelSlopeDb = clamp (getFloatProp (*bo, "vel_slope_db", p.brightnessVelSlopeDb), -12.0f, 12.0f);
|
|
p.brightnessNoteSlopeDb = clamp (getFloatProp (*bo, "note_slope_db", p.brightnessNoteSlopeDb), -12.0f, 12.0f);
|
|
p.brightnessMaxDb = clamp (getFloatProp (*bo, "max_db", p.brightnessMaxDb), -12.0f, 18.0f);
|
|
p.brightnessCutoffHz = clamp (getFloatProp (*bo, "cutoff_hz", p.brightnessCutoffHz), 800.0f, 12000.0f);
|
|
p.brightnessQ = clamp (getFloatProp (*bo, "q", p.brightnessQ), 0.2f, 4.0f);
|
|
}
|
|
}
|
|
|
|
if (auto dv = obj->getProperty ("dispersion_curve"); dv.isObject())
|
|
{
|
|
if (auto* d = dv.getDynamicObject())
|
|
{
|
|
p.dispersion.highMult = clamp (getFloatProp (*d, "high_mult", p.dispersion.highMult), 0.8f, 2.5f);
|
|
p.dispersion.pow = clamp (getFloatProp (*d, "pow", p.dispersion.pow), 0.2f, 4.0f);
|
|
}
|
|
}
|
|
|
|
// optional engine_mix
|
|
if (auto mv = obj->getProperty ("engine_mix"); mv.isObject())
|
|
{
|
|
if (auto* mm = mv.getDynamicObject())
|
|
{
|
|
p.engineMixVa = clamp (getFloatProp (*mm, "va", p.engineMixVa), 0.0f, 1.0f);
|
|
p.engineMixPm = clamp (getFloatProp (*mm, "pm", p.engineMixPm), 0.0f, 1.0f);
|
|
p.engineMixPm2 = clamp (getFloatProp (*mm, "pm2", p.engineMixPm2), 0.0f, 1.0f);
|
|
float s = juce::jmax (0.0001f, p.engineMixVa + p.engineMixPm + p.engineMixPm2);
|
|
p.engineMixVa /= s;
|
|
p.engineMixPm /= s;
|
|
p.engineMixPm2 /= s;
|
|
}
|
|
}
|
|
else
|
|
{
|
|
if (p.engine == "va") { p.engineMixVa = 1.0f; p.engineMixPm = 0.0f; p.engineMixPm2 = 0.0f; }
|
|
else if (p.engine == "pm") { p.engineMixVa = 0.0f; p.engineMixPm = 1.0f; p.engineMixPm2 = 0.0f; }
|
|
else if (p.engine == "pm2"){ p.engineMixVa = 0.0f; p.engineMixPm = 0.0f; p.engineMixPm2 = 1.0f; }
|
|
else if (p.engine == "hybrid") { p.engineMixVa = 0.5f; p.engineMixPm = 0.5f; p.engineMixPm2 = 0.0f; }
|
|
}
|
|
|
|
// osc_mix
|
|
if (auto mv = obj->getProperty ("osc_mix"); mv.isObject())
|
|
{
|
|
if (auto* om = mv.getDynamicObject())
|
|
{
|
|
p.oscSine = clamp (getFloatProp (*om, "sine", p.oscSine), 0.0f, 1.0f);
|
|
p.oscSaw = clamp (getFloatProp (*om, "saw", p.oscSaw), 0.0f, 1.0f);
|
|
p.oscSquare = clamp (getFloatProp (*om, "square", p.oscSquare), 0.0f, 1.0f);
|
|
auto sum = std::max (0.0001f, p.oscSine + p.oscSaw + p.oscSquare);
|
|
p.oscSine /= sum; p.oscSaw /= sum; p.oscSquare /= sum;
|
|
}
|
|
}
|
|
|
|
// filter
|
|
if (auto fv = obj->getProperty ("filter"); fv.isObject())
|
|
{
|
|
if (auto* fo = fv.getDynamicObject())
|
|
{
|
|
p.cutoff = clamp (getFloatProp (*fo, "cutoff", p.cutoff), 100.0f, 8000.0f);
|
|
p.q = clamp (getFloatProp (*fo, "q", p.q), 0.1f, 1.5f);
|
|
}
|
|
}
|
|
|
|
// env
|
|
if (auto ev = obj->getProperty ("env"); ev.isObject())
|
|
{
|
|
if (auto* eo = ev.getDynamicObject())
|
|
{
|
|
// Use the same ranges as the parameter layout so the embedded preset
|
|
// loads without being truncated.
|
|
p.attack = clamp (getFloatProp (*eo, "attack", p.attack), 0.001f, 3.000f);
|
|
p.decay = clamp (getFloatProp (*eo, "decay", p.decay), 0.100f, 9.100f);
|
|
p.sustain = clamp (getFloatProp (*eo, "sustain", p.sustain), 0.00f, 1.00f);
|
|
p.release = clamp (getFloatProp (*eo, "release", p.release), 0.030f, 7.000f);
|
|
}
|
|
}
|
|
|
|
// shaper
|
|
if (auto sv = obj->getProperty ("shaper"); sv.isObject())
|
|
{
|
|
if (auto* so = sv.getDynamicObject())
|
|
{
|
|
p.shaperEnabled = getBoolProp (*so, "enabled", p.shaperEnabled);
|
|
p.shaperDrive = clamp (getFloatProp (*so, "drive", p.shaperDrive), 0.0f, 1.0f);
|
|
}
|
|
}
|
|
|
|
// breath
|
|
if (auto bv = obj->getProperty ("breath"); bv.isObject())
|
|
{
|
|
if (auto* bo = bv.getDynamicObject())
|
|
{
|
|
p.breathEnabled = getBoolProp (*bo, "enabled", p.breathEnabled);
|
|
p.breathLevelDb = clamp (getFloatProp (*bo, "level_db", p.breathLevelDb), -60.0f, -20.0f);
|
|
p.breathBpFreq = clamp (getFloatProp (*bo, "bp_freq", p.breathBpFreq), 1000.0f, 12000.0f);
|
|
p.breathBpQ = clamp (getFloatProp (*bo, "bp_q", p.breathBpQ), 0.4f, 3.0f);
|
|
}
|
|
}
|
|
|
|
// formants
|
|
if (auto fv2 = obj->getProperty ("formants"); fv2.isArray())
|
|
{
|
|
auto* arr = fv2.getArray();
|
|
for (int i = 0; i < juce::jmin (2, arr->size()); ++i)
|
|
{
|
|
auto el = arr->getUnchecked (i);
|
|
if (! el.isObject()) continue;
|
|
auto* fo = el.getDynamicObject();
|
|
p.formants[i].enabled = getBoolProp (*fo, "enabled", p.formants[i].enabled);
|
|
p.formants[i].freq = clamp (getFloatProp (*fo, "freq", p.formants[i].freq), 300.0f, 12000.0f);
|
|
p.formants[i].q = clamp (getFloatProp (*fo, "q", p.formants[i].q), 0.5f, 6.0f);
|
|
p.formants[i].gainDb = clamp (getFloatProp (*fo, "gain_db", p.formants[i].gainDb), -9.0f, +9.0f);
|
|
}
|
|
}
|
|
|
|
// hammer
|
|
if (auto hv = obj->getProperty ("hammer"); hv.isObject())
|
|
{
|
|
if (auto* ho = hv.getDynamicObject())
|
|
{
|
|
p.hammer.enabled = getBoolProp (*ho, "enabled", p.hammer.enabled);
|
|
p.hammer.level = clamp (getFloatProp (*ho, "level", p.hammer.level), 0.0f, 1.0f);
|
|
p.hammer.decay_s = clamp (getFloatProp (*ho, "decay_s", p.hammer.decay_s), 0.001f, 0.100f);
|
|
p.hammer.noise = clamp (getFloatProp (*ho, "noise", p.hammer.noise), 0.0f, 1.0f);
|
|
p.hammer.hp_hz = clamp (getFloatProp (*ho, "hp_hz", p.hammer.hp_hz), 200.0f, 12000.0f);
|
|
}
|
|
}
|
|
|
|
// action / mechanical noises
|
|
if (auto av = obj->getProperty ("action"); av.isObject())
|
|
{
|
|
if (auto* ao = av.getDynamicObject())
|
|
{
|
|
p.action.keyOffEnabled = getBoolProp (*ao, "key_off_enabled", p.action.keyOffEnabled);
|
|
p.action.keyOffLevel = clamp (getFloatProp (*ao, "key_off_level", p.action.keyOffLevel), 0.0f, 1.0f);
|
|
p.action.keyOffDecay_s = clamp (getFloatProp (*ao, "key_off_decay_s", p.action.keyOffDecay_s), 0.001f, 0.200f);
|
|
p.action.keyOffVelScale = getBoolProp (*ao, "key_off_vel_scale", p.action.keyOffVelScale);
|
|
p.action.keyOffHp_hz = clamp (getFloatProp (*ao, "key_off_hp_hz", p.action.keyOffHp_hz), 200.0f, 12000.0f);
|
|
|
|
p.action.pedalEnabled = getBoolProp (*ao, "pedal_enabled", p.action.pedalEnabled);
|
|
p.action.pedalLevel = clamp (getFloatProp (*ao, "pedal_level", p.action.pedalLevel), 0.0f, 1.0f);
|
|
p.action.pedalDecay_s = clamp (getFloatProp (*ao, "pedal_decay_s", p.action.pedalDecay_s), 0.001f, 0.300f);
|
|
p.action.pedalLp_hz = clamp (getFloatProp (*ao, "pedal_lp_hz", p.action.pedalLp_hz), 80.0f, 2000.0f);
|
|
|
|
p.action.releaseEnabled = getBoolProp (*ao, "release_enabled", p.action.releaseEnabled);
|
|
p.action.releaseLevel = clamp (getFloatProp (*ao, "release_level", p.action.releaseLevel), 0.0f, 1.0f);
|
|
p.action.releaseDecay_s = clamp (getFloatProp (*ao, "release_decay_s", p.action.releaseDecay_s), 0.001f, 0.400f);
|
|
p.action.releaseLp_hz = clamp (getFloatProp (*ao, "release_lp_hz", p.action.releaseLp_hz), 80.0f, 2000.0f);
|
|
p.action.releaseThudMix = clamp (getFloatProp (*ao, "release_thud_mix",p.action.releaseThudMix), 0.0f, 1.0f);
|
|
p.action.releaseThudHp_hz = clamp (getFloatProp (*ao, "release_thud_hp_hz", p.action.releaseThudHp_hz), 20.0f, 400.0f);
|
|
}
|
|
}
|
|
|
|
// soundboard
|
|
if (auto svb = obj->getProperty ("soundboard"); svb.isObject())
|
|
{
|
|
if (auto* so = svb.getDynamicObject())
|
|
{
|
|
p.soundboard.enabled = getBoolProp (*so, "enabled", p.soundboard.enabled);
|
|
p.soundboard.mix = clamp (getFloatProp (*so, "mix", p.soundboard.mix), 0.0f, 1.0f);
|
|
p.soundboard.t60_s = clamp (getFloatProp (*so, "t60_s", p.soundboard.t60_s), 1.6f, 2.8f);
|
|
p.soundboard.damp = clamp (getFloatProp (*so, "damp", p.soundboard.damp), 0.0f, 1.0f);
|
|
}
|
|
}
|
|
|
|
// pm_string (pm2 scaffolding)
|
|
if (auto psv = obj->getProperty ("pm_string"); psv.isObject())
|
|
{
|
|
if (auto* ps = psv.getDynamicObject())
|
|
{
|
|
p.pmString.numStrings = juce::jlimit (1, 3, (int) getFloatProp (*ps, "num_strings", (float) p.pmString.numStrings));
|
|
|
|
auto clampDetune = [] (float x) { return PresetModel::clamp (x, -5.0f, 5.0f); };
|
|
auto clampGain = [] (float x) { return PresetModel::clamp (x, 0.0f, 1.0f); };
|
|
|
|
if (auto dv = ps->getProperty ("detune_cents"); dv.isArray())
|
|
{
|
|
auto* arr = dv.getArray();
|
|
for (int i = 0; i < juce::jmin ((int) arr->size(), 3); ++i)
|
|
p.pmString.detuneCents[(size_t) i] = clampDetune ((float) arr->getUnchecked (i));
|
|
}
|
|
|
|
if (auto gv = ps->getProperty ("gain"); gv.isArray())
|
|
{
|
|
auto* arr = gv.getArray();
|
|
for (int i = 0; i < juce::jmin ((int) arr->size(), 3); ++i)
|
|
p.pmString.gain[(size_t) i] = clampGain ((float) arr->getUnchecked (i));
|
|
}
|
|
|
|
if (auto pv = ps->getProperty ("pan"); pv.isArray())
|
|
{
|
|
auto* arr = pv.getArray();
|
|
for (int i = 0; i < juce::jmin ((int) arr->size(), 3); ++i)
|
|
p.pmString.pan[(size_t) i] = PresetModel::clamp ((float) arr->getUnchecked (i), -1.0f, 1.0f);
|
|
}
|
|
|
|
p.pmString.stereoWidthLow = clamp (getFloatProp (*ps, "stereo_width_low", p.pmString.stereoWidthLow), 0.0f, 1.5f);
|
|
p.pmString.stereoWidthHigh = clamp (getFloatProp (*ps, "stereo_width_high", p.pmString.stereoWidthHigh), 0.0f, 1.5f);
|
|
p.pmString.stereoWidthNoteLo = clamp (getFloatProp (*ps, "stereo_width_note_lo", p.pmString.stereoWidthNoteLo), 0.0f, 127.0f);
|
|
p.pmString.stereoWidthNoteHi = clamp (getFloatProp (*ps, "stereo_width_note_hi", p.pmString.stereoWidthNoteHi), 0.0f, 127.0f);
|
|
|
|
// normalize gain to sum=1
|
|
float gsum = p.pmString.gain[0] + p.pmString.gain[1] + p.pmString.gain[2];
|
|
if (gsum <= 1e-6f) gsum = 1.0f;
|
|
for (float& g : p.pmString.gain) g = g / gsum;
|
|
for (int i = p.pmString.numStrings; i < 3; ++i)
|
|
{
|
|
p.pmString.detuneCents[(size_t) i] = 0.0f;
|
|
p.pmString.gain[(size_t) i] = 0.0f;
|
|
}
|
|
|
|
p.pmString.dispersionAmt = clamp (getFloatProp (*ps, "dispersion_amt", p.pmString.dispersionAmt), 0.0f, 1.0f);
|
|
p.pmString.apStages = juce::jlimit (1, 4, (int) getFloatProp (*ps, "ap_stages", (float) p.pmString.apStages));
|
|
p.pmString.loss = clamp (getFloatProp (*ps, "loss", p.pmString.loss), 0.0005f, 0.02f);
|
|
p.pmString.dcBlockHz = clamp (getFloatProp (*ps, "dc_block_hz", p.pmString.dcBlockHz), 3.0f, 20.0f);
|
|
}
|
|
}
|
|
|
|
// hammer_model (pm2 excitation)
|
|
if (auto hv = obj->getProperty ("hammer_model"); hv.isObject())
|
|
{
|
|
if (auto* hm = hv.getDynamicObject())
|
|
{
|
|
p.hammerModel.force = clamp (getFloatProp (*hm, "force", p.hammerModel.force), 0.0f, 1.0f);
|
|
p.hammerModel.toneHz = clamp (getFloatProp (*hm, "tone_hz", p.hammerModel.toneHz), 1500.0f, 6000.0f);
|
|
p.hammerModel.attackMs = clamp (getFloatProp (*hm, "attack_ms", p.hammerModel.attackMs), 1.0f, 12.0f);
|
|
p.hammerModel.softclip = getBoolProp (*hm, "softclip", p.hammerModel.softclip);
|
|
p.hammerModel.gamma = clamp (getFloatProp (*hm, "gamma", p.hammerModel.gamma), 0.6f, 2.5f);
|
|
p.hammerModel.massKg = clamp (getFloatProp (*hm, "mass_kg", p.hammerModel.massKg), 0.005f, 0.08f);
|
|
p.hammerModel.contactStiffness = clamp (getFloatProp (*hm, "contact_stiffness", p.hammerModel.contactStiffness), 200.0f, 20000.0f);
|
|
p.hammerModel.contactExponent = clamp (getFloatProp (*hm, "contact_exponent", p.hammerModel.contactExponent), 1.4f, 4.0f);
|
|
p.hammerModel.contactDamping = clamp (getFloatProp (*hm, "contact_damping", p.hammerModel.contactDamping), 0.5f, 40.0f);
|
|
p.hammerModel.maxPenetration = clamp (getFloatProp (*hm, "max_penetration", p.hammerModel.maxPenetration), 0.0005f, 0.03f);
|
|
p.hammerModel.attackWindowMs = clamp (getFloatProp (*hm, "attack_window_ms", p.hammerModel.attackWindowMs), 1.0f, 20.0f);
|
|
p.hammerModel.simplifiedMode = getBoolProp (*hm, "simplified_mode", p.hammerModel.simplifiedMode);
|
|
p.hammerModel.stiffnessVelScale = clamp (getFloatProp (*hm, "stiffness_vel_scale", p.hammerModel.stiffnessVelScale), 0.0f, 3.0f);
|
|
p.hammerModel.toneVelScale = clamp (getFloatProp (*hm, "tone_vel_scale", p.hammerModel.toneVelScale), 0.0f, 3.0f);
|
|
p.hammerModel.preloadVelScale = clamp (getFloatProp (*hm, "preload_vel_scale", p.hammerModel.preloadVelScale), 0.0f, 3.0f);
|
|
p.hammerModel.toneMinHz = clamp (getFloatProp (*hm, "tone_min_hz", p.hammerModel.toneMinHz), 800.0f, 12000.0f);
|
|
p.hammerModel.toneMaxHz = clamp (getFloatProp (*hm, "tone_max_hz", p.hammerModel.toneMaxHz), 2000.0f, 18000.0f);
|
|
}
|
|
}
|
|
|
|
// felt/contact shaping
|
|
if (auto fv = obj->getProperty ("felt"); fv.isObject())
|
|
{
|
|
if (auto* fo = fv.getDynamicObject())
|
|
{
|
|
p.feltModel.preload = clamp (getFloatProp (*fo, "felt_preload", p.feltModel.preload), 0.0f, 0.6f);
|
|
p.feltModel.stiffness = clamp (getFloatProp (*fo, "felt_stiffness", p.feltModel.stiffness), 1.0f, 5.0f);
|
|
p.feltModel.hysteresis = clamp (getFloatProp (*fo, "felt_hysteresis", p.feltModel.hysteresis), 0.0f, 0.6f);
|
|
p.feltModel.maxAmp = clamp (getFloatProp (*fo, "felt_max", p.feltModel.maxAmp), 0.4f, 4.0f);
|
|
}
|
|
}
|
|
else
|
|
{
|
|
// also accept top-level felt_* keys for convenience
|
|
p.feltModel.preload = clamp (getFloatProp (*obj, "felt_preload", p.feltModel.preload), 0.0f, 0.6f);
|
|
p.feltModel.stiffness = clamp (getFloatProp (*obj, "felt_stiffness", p.feltModel.stiffness), 1.0f, 5.0f);
|
|
p.feltModel.hysteresis = clamp (getFloatProp (*obj, "felt_hysteresis", p.feltModel.hysteresis), 0.0f, 0.6f);
|
|
p.feltModel.maxAmp = clamp (getFloatProp (*obj, "felt_max", p.feltModel.maxAmp), 0.4f, 4.0f);
|
|
}
|
|
|
|
// WDF/PH blend
|
|
if (auto wv = obj->getProperty ("wdf"); wv.isObject())
|
|
{
|
|
if (auto* wo = wv.getDynamicObject())
|
|
{
|
|
p.wdf.enabled = getBoolProp (*wo, "enabled", p.wdf.enabled);
|
|
p.wdf.blend = clamp (getFloatProp (*wo, "blend", p.wdf.blend), 0.0f, 1.0f);
|
|
p.wdf.loss = clamp (getFloatProp (*wo, "loss", p.wdf.loss), 0.0f, 0.1f);
|
|
p.wdf.bridgeMass = clamp (getFloatProp (*wo, "bridge_mass", p.wdf.bridgeMass), 0.1f, 10.0f);
|
|
p.wdf.plateStiffness = clamp (getFloatProp (*wo, "plate_stiffness", p.wdf.plateStiffness), 0.1f, 5.0f);
|
|
}
|
|
}
|
|
if (auto cv = obj->getProperty ("coupling"); cv.isObject())
|
|
{
|
|
if (auto* co = cv.getDynamicObject())
|
|
{
|
|
p.coupling.gain = clamp (getFloatProp (*co, "gain", p.coupling.gain), 0.0f, 0.2f);
|
|
p.coupling.q = clamp (getFloatProp (*co, "q", p.coupling.q), 0.2f, 5.0f);
|
|
p.coupling.sympGain = clamp (getFloatProp (*co, "symp_gain", p.coupling.sympGain), 0.0f, 0.3f);
|
|
p.coupling.sympHighDamp= clamp (getFloatProp (*co, "symp_high_damp", p.coupling.sympHighDamp), 0.0f, 1.0f);
|
|
}
|
|
}
|
|
|
|
// board_modes (modal body)
|
|
if (auto bm = obj->getProperty ("board_modes"); bm.isArray())
|
|
{
|
|
p.boardModes.clear();
|
|
auto* arr = bm.getArray();
|
|
const int maxModes = 16;
|
|
for (int i = 0; i < juce::jmin (maxModes, (int) arr->size()); ++i)
|
|
{
|
|
auto el = arr->getUnchecked (i);
|
|
if (! el.isObject()) continue;
|
|
auto* mo = el.getDynamicObject();
|
|
PresetModel::BoardMode m;
|
|
m.f = clamp (getFloatProp (*mo, "f", m.f), 60.0f, 5000.0f);
|
|
m.q = clamp (getFloatProp (*mo, "q", m.q), 0.7f, 8.0f);
|
|
m.gainDb = clamp (getFloatProp (*mo, "gain_db", m.gainDb), -12.0f, 6.0f);
|
|
p.boardModes.add (m);
|
|
}
|
|
if (p.boardModes.isEmpty())
|
|
{
|
|
p.boardModes.add ({ 110.0f, 1.2f, -2.0f });
|
|
p.boardModes.add ({ 250.0f, 1.4f, -1.5f });
|
|
p.boardModes.add ({ 750.0f, 2.0f, -3.0f });
|
|
}
|
|
}
|
|
if (p.boardModes.isEmpty())
|
|
{
|
|
p.boardModes.add ({ 110.0f, 1.2f, -2.0f });
|
|
p.boardModes.add ({ 250.0f, 1.4f, -1.5f });
|
|
p.boardModes.add ({ 750.0f, 2.0f, -3.0f });
|
|
}
|
|
|
|
p.boardSend = clamp (getFloatProp (*obj, "board_send", p.boardSend), 0.0f, 1.0f);
|
|
p.boardMix = clamp (getFloatProp (*obj, "board_mix", p.boardMix), 0.0f, 1.0f);
|
|
|
|
// pm_filter
|
|
if (auto pf = obj->getProperty ("pm_filter"); pf.isObject())
|
|
{
|
|
if (auto* pfo = pf.getDynamicObject())
|
|
{
|
|
p.pmFilter.cutoff = clamp (getFloatProp (*pfo, "cutoff", p.pmFilter.cutoff), 300.0f, 12000.0f);
|
|
p.pmFilter.q = clamp (getFloatProp (*pfo, "q", p.pmFilter.q), 0.01f, 1.2f);
|
|
p.pmFilter.keytrack = clamp (getFloatProp (*pfo, "keytrack", p.pmFilter.keytrack), 0.0f, 1.0f);
|
|
}
|
|
}
|
|
|
|
if (auto of = obj->getProperty ("output_lpf"); of.isObject())
|
|
{
|
|
if (auto* oo = of.getDynamicObject())
|
|
{
|
|
p.outputLpf.enabled = getBoolProp (*oo, "enabled", p.outputLpf.enabled);
|
|
p.outputLpf.cutoff = clamp (getFloatProp (*oo, "cutoff", p.outputLpf.cutoff), 0.0f, 18000.0f);
|
|
p.outputLpf.q = clamp (getFloatProp (*oo, "q", p.outputLpf.q), 0.2f, 2.5f);
|
|
}
|
|
}
|
|
|
|
if (auto prv = obj->getProperty ("post_room"); prv.isObject())
|
|
{
|
|
if (auto* pr = prv.getDynamicObject())
|
|
{
|
|
p.postRoomMix = clamp (getFloatProp (*pr, "mix", p.postRoomMix), 0.0f, 1.0f);
|
|
p.postRoomEnabled = getBoolProp (*pr, "enabled", p.postRoomEnabled);
|
|
}
|
|
}
|
|
|
|
if (auto eqv = obj->getProperty ("eq"); eqv.isObject())
|
|
{
|
|
if (auto* eo = eqv.getDynamicObject())
|
|
{
|
|
p.outputEq.enabled = getBoolProp (*eo, "enabled", p.outputEq.enabled);
|
|
if (auto bv = eo->getProperty ("bands"); bv.isArray())
|
|
{
|
|
auto* arr = bv.getArray();
|
|
for (int i = 0; i < juce::jmin (5, (int) arr->size()); ++i)
|
|
{
|
|
auto el = arr->getUnchecked (i);
|
|
if (! el.isObject()) continue;
|
|
auto* bo = el.getDynamicObject();
|
|
p.outputEq.bands[(size_t) i].freq = clamp (getFloatProp (*bo, "freq", p.outputEq.bands[(size_t) i].freq), 40.0f, 16000.0f);
|
|
p.outputEq.bands[(size_t) i].q = clamp (getFloatProp (*bo, "q", p.outputEq.bands[(size_t) i].q), 0.3f, 6.0f);
|
|
p.outputEq.bands[(size_t) i].gainDb = clamp (getFloatProp (*bo, "gain_db", p.outputEq.bands[(size_t) i].gainDb), -18.0f, 18.0f);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
p.tiltDb = clamp (getFloatProp (*obj, "tilt_db", p.tiltDb), -6.0f, 6.0f);
|
|
p.predelayMs = clamp (getFloatProp (*obj, "predelay_ms", p.predelayMs), 0.0f, 20.0f);
|
|
|
|
// Pedal
|
|
if (auto ped = obj->getProperty ("pedal"); ped.isObject())
|
|
{
|
|
if (auto* po = ped.getDynamicObject())
|
|
{
|
|
p.pedal.sustainThresh = clamp (getFloatProp (*po, "sustain_thresh", p.pedal.sustainThresh), 0.0f, 1.0f);
|
|
p.pedal.halfThresh = clamp (getFloatProp (*po, "half_thresh", p.pedal.halfThresh), 0.0f, 1.0f);
|
|
p.pedal.halfReleaseScale = clamp (getFloatProp (*po, "half_release_scale", p.pedal.halfReleaseScale), 0.5f, 4.0f);
|
|
p.pedal.repedalMs = clamp (getFloatProp (*po, "repedal_ms", p.pedal.repedalMs), 10.0f, 400.0f);
|
|
p.pedal.resonanceSend = clamp (getFloatProp (*po, "resonance_send", p.pedal.resonanceSend), 0.0f, 1.0f);
|
|
p.pedal.resonanceMix = clamp (getFloatProp (*po, "resonance_mix", p.pedal.resonanceMix), 0.0f, 1.0f);
|
|
p.pedal.resonanceT60 = clamp (getFloatProp (*po, "resonance_t60", p.pedal.resonanceT60), 0.2f, 4.0f);
|
|
p.pedal.sustainReleaseScale= clamp (getFloatProp (*po, "sustain_release_scale", p.pedal.sustainReleaseScale), 1.0f, 4.0f);
|
|
p.pedal.sustainGainDb = clamp (getFloatProp (*po, "sustain_gain_db", p.pedal.sustainGainDb), 0.0f, 6.0f);
|
|
}
|
|
}
|
|
|
|
// Damper
|
|
if (auto dv = obj->getProperty ("damper"); dv.isObject())
|
|
{
|
|
if (auto* d = dv.getDynamicObject())
|
|
{
|
|
p.damper.lossDamped = clamp (getFloatProp (*d, "loss_damped", p.damper.lossDamped), 0.5f, 1.0f);
|
|
p.damper.lossHalf = clamp (getFloatProp (*d, "loss_half", p.damper.lossHalf), 0.5f, 1.0f);
|
|
p.damper.lossOff = clamp (getFloatProp (*d, "loss_off", p.damper.lossOff), 0.8f, 1.2f);
|
|
p.damper.smoothMs = clamp (getFloatProp (*d, "smooth_ms", p.damper.smoothMs), 1.0f, 120.0f);
|
|
p.damper.softenMs = clamp (getFloatProp (*d, "soften_ms", p.damper.softenMs), 1.0f, 80.0f);
|
|
p.damper.softenHz = clamp (getFloatProp (*d, "soften_hz", p.damper.softenHz), 100.0f, 8000.0f);
|
|
}
|
|
}
|
|
|
|
if (auto una = obj->getProperty ("una_corda"); una.isObject())
|
|
{
|
|
if (auto* uo = una.getDynamicObject())
|
|
{
|
|
p.unaCorda.detuneCents = clamp (getFloatProp (*uo, "detune_cents", p.unaCorda.detuneCents), -12.0f, 12.0f);
|
|
p.unaCorda.gainScale = clamp (getFloatProp (*uo, "gain_scale", p.unaCorda.gainScale), 0.3f, 1.0f);
|
|
}
|
|
}
|
|
|
|
if (auto dv = obj->getProperty ("duplex"); dv.isObject())
|
|
{
|
|
if (auto* du = dv.getDynamicObject())
|
|
{
|
|
p.duplex.ratio = clamp (getFloatProp (*du, "ratio", p.duplex.ratio), 1.1f, 4.0f);
|
|
p.duplex.gainDb = clamp (getFloatProp (*du, "gain_db", p.duplex.gainDb), -20.0f, -6.0f);
|
|
p.duplex.decayMs = clamp (getFloatProp (*du, "decay_ms", p.duplex.decayMs), 10.0f, 400.0f);
|
|
p.duplex.sympSend = clamp (getFloatProp (*du, "symp_send", p.duplex.sympSend), 0.0f, 1.0f);
|
|
p.duplex.sympMix = clamp (getFloatProp (*du, "symp_mix", p.duplex.sympMix), 0.0f, 1.0f);
|
|
p.duplex.sympNoPedalScale = clamp (getFloatProp (*du, "symp_no_pedal_scale", p.duplex.sympNoPedalScale), 0.0f, 1.0f);
|
|
}
|
|
}
|
|
|
|
// Mic perspectives (optional)
|
|
if (auto mv = obj->getProperty ("mics"); mv.isObject())
|
|
{
|
|
if (auto* mo = mv.getDynamicObject())
|
|
{
|
|
auto parseMic = [&clamp] (const juce::DynamicObject* o, PresetModel::Mic& m)
|
|
{
|
|
if (o == nullptr) return;
|
|
m.gainDb = clamp (getFloatProp (*o, "gain_db", m.gainDb), -24.0f, 12.0f);
|
|
m.delayMs = clamp (getFloatProp (*o, "delay_ms", m.delayMs), 0.0f, 30.0f);
|
|
m.lowShelfDb = clamp (getFloatProp (*o, "low_shelf_db", m.lowShelfDb), -12.0f, 12.0f);
|
|
m.highShelfDb = clamp (getFloatProp (*o, "high_shelf_db",m.highShelfDb),-12.0f, 12.0f);
|
|
m.shelfFreq = clamp (getFloatProp (*o, "shelf_freq", m.shelfFreq), 200.0f, 8000.0f);
|
|
};
|
|
|
|
if (auto c = mo->getProperty ("close"); c.isObject()) parseMic (c.getDynamicObject(), p.mics.close);
|
|
if (auto c = mo->getProperty ("player"); c.isObject()) parseMic (c.getDynamicObject(), p.mics.player);
|
|
if (auto c = mo->getProperty ("room"); c.isObject()) parseMic (c.getDynamicObject(), p.mics.room);
|
|
|
|
if (auto bv = mo->getProperty ("blend"); bv.isArray())
|
|
{
|
|
auto* arr = bv.getArray();
|
|
for (int i = 0; i < juce::jmin (3, (int) arr->size()); ++i)
|
|
p.mics.blend[(size_t) i] = clamp ((float) arr->getUnchecked (i), 0.0f, 1.0f);
|
|
}
|
|
}
|
|
}
|
|
{
|
|
float s = p.mics.blend[0] + p.mics.blend[1] + p.mics.blend[2];
|
|
if (s <= 1.0e-6f) { p.mics.blend = { 1.0f, 0.0f, 0.0f }; }
|
|
else { for (float& b : p.mics.blend) b /= s; }
|
|
}
|
|
|
|
// Optional loudness trim for pm2 path (defaults to +32 dB, clamps for safety)
|
|
p.pm2GainDb = clamp (getFloatProp (*obj, "pm2_gain_db", p.pm2GainDb), -24.0f, 42.0f);
|
|
|
|
return p;
|
|
}
|
|
|
|
void FluteSynthAudioProcessor::applyPresetToParameters (const PresetModel& p)
|
|
{
|
|
currentEngine = p.engine;
|
|
|
|
vaMix = juce::jlimit (0.0f, 1.0f, p.engineMixVa);
|
|
pmMix = juce::jlimit (0.0f, 1.0f, p.engineMixPm);
|
|
pm2Mix = juce::jlimit (0.0f, 1.0f, p.engineMixPm2);
|
|
masterTuneCents = p.masterTuneCents;
|
|
pitchCompOffsetCents = p.pitchCompOffsetCents;
|
|
pitchCompSlopeCents = p.pitchCompSlopeCents;
|
|
{
|
|
const auto pcOffsets = p.temperamentUseOffsets ? p.temperamentOffsetsCents
|
|
: getTemperamentOffsetsByName (p.temperamentName);
|
|
presetNoteOffsetsCents = p.perNoteOffsetsEnabled ? p.perNoteOffsetsCents
|
|
: expandPitchClassOffsets (pcOffsets);
|
|
noteOffsetsCents = presetNoteOffsetsCents;
|
|
}
|
|
pm2LoudnessSlopeDbPerSemi = p.pm2LoudnessSlopeDbPerSemi;
|
|
velocityGammaBase = p.velocityGamma;
|
|
velocityGamma = p.velocityGamma;
|
|
velocityCurveName = p.velocityCurve;
|
|
if (velocityCurveName == "soft") velocityGamma = 0.9f * velocityGamma;
|
|
else if (velocityCurveName == "hard") velocityGamma = 1.15f * velocityGamma;
|
|
|
|
brightnessEnabled = p.brightnessEnabled;
|
|
brightnessBaseDb = p.brightnessBaseDb;
|
|
brightnessVelSlopeDb = p.brightnessVelSlopeDb;
|
|
brightnessNoteSlopeDb = p.brightnessNoteSlopeDb;
|
|
brightnessMaxDb = p.brightnessMaxDb;
|
|
brightnessCutoffHz = p.brightnessCutoffHz;
|
|
brightnessQ = p.brightnessQ;
|
|
|
|
if (lastSampleRate > 0.0)
|
|
{
|
|
const float initialDb = brightnessEnabled ? juce::jlimit (-12.0f, brightnessMaxDb,
|
|
brightnessBaseDb + lastVelocityNorm * brightnessVelSlopeDb)
|
|
: 0.0f;
|
|
updateBrightnessFilters (initialDb);
|
|
brightnessDbSmoothed.setTargetValue (initialDb);
|
|
}
|
|
|
|
dispersionCfg = p.dispersion;
|
|
applyMasterTuneToVoices();
|
|
|
|
// APVTS (VA)
|
|
*apvts.getRawParameterValue (ParamIDs::oscSine) = p.oscSine;
|
|
*apvts.getRawParameterValue (ParamIDs::oscSaw) = p.oscSaw;
|
|
*apvts.getRawParameterValue (ParamIDs::oscSquare) = p.oscSquare;
|
|
|
|
*apvts.getRawParameterValue (ParamIDs::cutoff) = p.cutoff;
|
|
*apvts.getRawParameterValue (ParamIDs::resonance) = p.q;
|
|
|
|
*apvts.getRawParameterValue (ParamIDs::attack) = p.attack;
|
|
*apvts.getRawParameterValue (ParamIDs::decay) = p.decay;
|
|
*apvts.getRawParameterValue (ParamIDs::sustain) = p.sustain;
|
|
*apvts.getRawParameterValue (ParamIDs::release) = p.release;
|
|
baseRelease = p.release;
|
|
releaseExtension = p.releaseExtension;
|
|
|
|
*apvts.getRawParameterValue (ParamIDs::noiseDb) = p.noiseDb;
|
|
outputGainLin = juce::Decibels::decibelsToGain (p.outputGainDb);
|
|
outputGainLinSmoothed.setTargetValue (outputGainLin);
|
|
|
|
// Extended controls (post / pm2 scaffolding)
|
|
*apvts.getRawParameterValue (ParamIDs::formant1Enable) = p.formants[0].enabled ? 1.0f : 0.0f;
|
|
*apvts.getRawParameterValue (ParamIDs::formant1Freq) = p.formants[0].freq;
|
|
*apvts.getRawParameterValue (ParamIDs::formant1Q) = p.formants[0].q;
|
|
*apvts.getRawParameterValue (ParamIDs::formant1GainDb) = p.formants[0].gainDb;
|
|
*apvts.getRawParameterValue (ParamIDs::formant2Enable) = p.formants[1].enabled ? 1.0f : 0.0f;
|
|
*apvts.getRawParameterValue (ParamIDs::formant2Freq) = p.formants[1].freq;
|
|
*apvts.getRawParameterValue (ParamIDs::formant2Q) = p.formants[1].q;
|
|
*apvts.getRawParameterValue (ParamIDs::formant2GainDb) = p.formants[1].gainDb;
|
|
|
|
*apvts.getRawParameterValue (ParamIDs::soundboardEnable) = p.soundboard.enabled ? 1.0f : 0.0f;
|
|
*apvts.getRawParameterValue (ParamIDs::soundboardMix) = p.soundboard.mix;
|
|
*apvts.getRawParameterValue (ParamIDs::soundboardT60) = p.soundboard.t60_s;
|
|
*apvts.getRawParameterValue (ParamIDs::soundboardDamp) = p.soundboard.damp;
|
|
*apvts.getRawParameterValue (ParamIDs::postRoomMix) = p.postRoomMix;
|
|
*apvts.getRawParameterValue (ParamIDs::postRoomEnable) = p.postRoomEnabled ? 1.0f : 0.0f;
|
|
|
|
*apvts.getRawParameterValue (ParamIDs::feltPreload) = p.feltModel.preload;
|
|
*apvts.getRawParameterValue (ParamIDs::feltStiffness) = p.feltModel.stiffness;
|
|
*apvts.getRawParameterValue (ParamIDs::feltHysteresis) = p.feltModel.hysteresis;
|
|
*apvts.getRawParameterValue (ParamIDs::feltMax) = p.feltModel.maxAmp;
|
|
|
|
*apvts.getRawParameterValue (ParamIDs::duplexRatio) = p.duplex.ratio;
|
|
*apvts.getRawParameterValue (ParamIDs::duplexGainDb) = p.duplex.gainDb;
|
|
*apvts.getRawParameterValue (ParamIDs::duplexDecayMs) = p.duplex.decayMs;
|
|
*apvts.getRawParameterValue (ParamIDs::duplexSympSend) = p.duplex.sympSend;
|
|
*apvts.getRawParameterValue (ParamIDs::duplexSympMix) = p.duplex.sympMix;
|
|
|
|
*apvts.getRawParameterValue (ParamIDs::pm2GainDb) = p.pm2GainDb;
|
|
*apvts.getRawParameterValue (ParamIDs::outputLpfEnable) = p.outputLpf.enabled ? 1.0f : 0.0f;
|
|
*apvts.getRawParameterValue (ParamIDs::outputLpfCutoff) = p.outputLpf.cutoff;
|
|
*apvts.getRawParameterValue (ParamIDs::outputLpfQ) = p.outputLpf.q;
|
|
if (auto* v = apvts.getRawParameterValue (ParamIDs::temperament))
|
|
*v = 0.0f; // "Preset"
|
|
|
|
// Shaper
|
|
shaperEnabled = p.shaperEnabled;
|
|
shaperDrive = p.shaperDrive;
|
|
|
|
// Breath
|
|
breathEnabled = p.breathEnabled;
|
|
breathGainLin = juce::Decibels::decibelsToGain (p.breathLevelDb);
|
|
breathBpFreqStored = p.breathBpFreq;
|
|
breathBpQStored = p.breathBpQ;
|
|
breathBp.setCutoffFrequency (breathBpFreqStored);
|
|
breathBp.setResonance (breathBpQStored);
|
|
|
|
// Formants
|
|
for (int i = 0; i < 2; ++i)
|
|
{
|
|
formant[i].enabled = p.formants[i].enabled;
|
|
formant[i].f.setCutoffFrequency (p.formants[i].freq);
|
|
formant[i].f.setResonance (p.formants[i].q);
|
|
formant[i].gainLin = juce::Decibels::decibelsToGain (p.formants[i].gainDb);
|
|
}
|
|
|
|
// Hammer
|
|
hammerEnabled = p.hammer.enabled;
|
|
hammerLevel = p.hammer.level;
|
|
hammerNoise = p.hammer.noise;
|
|
hammerActive = false;
|
|
hammerEnv = 0.0f;
|
|
hammerHpHz = p.hammer.hp_hz;
|
|
hammerDecaySec = p.hammer.decay_s; // Store for recalculation in prepareToPlay
|
|
// Calculate decay coefficient (will be recalculated in prepareToPlay if sample rate was 0)
|
|
if (hammerDecaySec <= 0.0005f) hammerDecayCoeff = 0.0f;
|
|
else if (lastSampleRate > 0.0)
|
|
{
|
|
const double tau = std::max (0.0005, (double) hammerDecaySec);
|
|
hammerDecayCoeff = (float) std::exp (-1.0 / (tau * lastSampleRate));
|
|
}
|
|
// else: leave hammerDecayCoeff at default, will be fixed in prepareToPlay
|
|
hammerHP.setCutoffFrequency (hammerHpHz);
|
|
|
|
// Action / mechanical noises
|
|
keyOffEnabled = p.action.keyOffEnabled;
|
|
keyOffVelScale = p.action.keyOffVelScale;
|
|
keyOffLevel = p.action.keyOffLevel;
|
|
keyOffEnv = 0.0f;
|
|
keyOffDecaySec = p.action.keyOffDecay_s;
|
|
keyOffHpHz = p.action.keyOffHp_hz;
|
|
if (keyOffDecaySec <= 0.0005f) keyOffDecayCoeff = 0.0f;
|
|
else if (lastSampleRate > 0.0)
|
|
{
|
|
const double tau = std::max (0.0005, (double) keyOffDecaySec);
|
|
keyOffDecayCoeff = (float) std::exp (-1.0 / (tau * lastSampleRate));
|
|
}
|
|
keyOffHP.setCutoffFrequency (keyOffHpHz);
|
|
|
|
pedalThumpEnabled = p.action.pedalEnabled;
|
|
pedalThumpLevel = p.action.pedalLevel;
|
|
pedalThumpEnv = 0.0f;
|
|
pedalThumpDecaySec = p.action.pedalDecay_s;
|
|
pedalThumpLpHz = p.action.pedalLp_hz;
|
|
if (pedalThumpDecaySec <= 0.0005f) pedalThumpDecayCoeff = 0.0f;
|
|
else if (lastSampleRate > 0.0)
|
|
{
|
|
const double tau = std::max (0.0005, (double) pedalThumpDecaySec);
|
|
pedalThumpDecayCoeff = (float) std::exp (-1.0 / (tau * lastSampleRate));
|
|
}
|
|
pedalThumpLP.setCutoffFrequency (pedalThumpLpHz);
|
|
|
|
releaseThumpEnabled = p.action.releaseEnabled;
|
|
releaseThumpLevel = p.action.releaseLevel;
|
|
releaseThumpEnv = 0.0f;
|
|
releaseThumpDecaySec = p.action.releaseDecay_s;
|
|
releaseThumpLpHz = p.action.releaseLp_hz;
|
|
releaseThudMix = p.action.releaseThudMix;
|
|
releaseThudHpHz = p.action.releaseThudHp_hz;
|
|
if (releaseThumpDecaySec <= 0.0005f) releaseThumpDecayCoeff = 0.0f;
|
|
else if (lastSampleRate > 0.0)
|
|
{
|
|
const double tau = std::max (0.0005, (double) releaseThumpDecaySec);
|
|
releaseThumpDecayCoeff = (float) std::exp (-1.0 / (tau * lastSampleRate));
|
|
}
|
|
releaseThumpLP.setCutoffFrequency (releaseThumpLpHz);
|
|
releaseThudHP.setCutoffFrequency (releaseThudHpHz);
|
|
damperCfg = p.damper;
|
|
updateDamperCoeffs();
|
|
|
|
// Soundboard
|
|
soundboardEnabled = p.soundboard.enabled;
|
|
soundboardMix = p.soundboard.mix;
|
|
|
|
soundboardParams = {};
|
|
const float room = juce::jlimit (0.0f, 1.0f, p.soundboard.t60_s / 3.0f); // 0..~3s
|
|
soundboardParams.roomSize = room;
|
|
soundboardParams.damping = juce::jlimit (0.0f, 1.0f, p.soundboard.damp);
|
|
soundboardParams.width = 0.6f;
|
|
soundboardParams.wetLevel = 1.0f; // we do wet/dry outside
|
|
soundboardParams.dryLevel = 0.0f;
|
|
|
|
// NEW: pass the same ADSR to the PM engine so env.* applies there too
|
|
for (int i = 0; i < synth.getNumVoices(); ++i)
|
|
if (auto* v = dynamic_cast<FluteVoice*> (synth.getVoice (i)))
|
|
{
|
|
v->setPitchComp (pitchCompOffsetCents, pitchCompSlopeCents);
|
|
v->setNoteOffsets (noteOffsetsCents);
|
|
}
|
|
for (int i = 0; i < pmSynth.getNumVoices(); ++i)
|
|
if (auto* v = dynamic_cast<PmVoice*> (pmSynth.getVoice (i)))
|
|
{
|
|
v->setEnvParams (p.attack, p.decay, p.sustain, p.release);
|
|
v->setReleaseScale (baseRelease, 1.0f);
|
|
v->setPitchComp (pitchCompOffsetCents, pitchCompSlopeCents);
|
|
v->setNoteOffsets (noteOffsetsCents);
|
|
}
|
|
for (int i = 0; i < pm2Synth.getNumVoices(); ++i)
|
|
if (auto* v = dynamic_cast<Pm2Voice*> (pm2Synth.getVoice (i)))
|
|
{
|
|
v->setEnvParams (p.attack, p.decay, p.sustain, p.release);
|
|
v->setReleaseScale (baseRelease, 1.0f);
|
|
v->setWdfParams (sanitizeWdf (p.wdf));
|
|
v->setPitchComp (pitchCompOffsetCents, pitchCompSlopeCents);
|
|
v->setNoteOffsets (noteOffsetsCents);
|
|
v->setLoudnessSlope (pm2LoudnessSlopeDbPerSemi);
|
|
v->setReleaseExtension (p.releaseExtension);
|
|
v->setSustainPedalDown (sustainPedalDown);
|
|
v->setDamperParams (p.damper);
|
|
v->setDamperLift (damperLift);
|
|
v->setCouplingParams (p.coupling);
|
|
}
|
|
|
|
// pm2 scaffolding (store parsed values; DSP lands in Phase 2)
|
|
pmString = p.pmString;
|
|
pmHammer = p.hammerModel;
|
|
pmFelt = p.feltModel;
|
|
wdfCfg = sanitizeWdf (p.wdf);
|
|
couplingCfg = p.coupling;
|
|
pmBoardModes = p.boardModes;
|
|
pmBoardSend = p.boardSend;
|
|
pmBoardMix = p.boardMix;
|
|
pmToneFilter = p.pmFilter;
|
|
pmTiltDb = p.tiltDb;
|
|
pmPredelayMs = p.predelayMs;
|
|
pedalCfg = p.pedal;
|
|
damperCfg = p.damper;
|
|
unaCfg = p.unaCorda;
|
|
duplexCfg = p.duplex;
|
|
micCfg = p.mics;
|
|
halfReleaseScale = p.pedal.halfReleaseScale;
|
|
pm2GainDb = p.pm2GainDb;
|
|
pm2GainLin = juce::Decibels::decibelsToGain (pm2GainDb);
|
|
postCutoffHz = p.pmFilter.cutoff;
|
|
postQ = p.pmFilter.q;
|
|
postKeytrack = p.pmFilter.keytrack;
|
|
postTiltDb = p.tiltDb;
|
|
outputLpfEnabled = p.outputLpf.enabled && DebugToggles::kEnableOutputLpf;
|
|
outputLpfCutoff = p.outputLpf.cutoff;
|
|
outputLpfQ = p.outputLpf.q;
|
|
postCutoffHzSmoothed.setTargetValue (postCutoffHz);
|
|
postQSmoothed.setTargetValue (postQ);
|
|
postTiltDbSmoothed.setTargetValue (postTiltDb);
|
|
outputLpfCutoffSmoothed.setTargetValue (outputLpfCutoff);
|
|
outputLpfQSmoothed.setTargetValue (outputLpfQ);
|
|
outputEqEnabled = p.outputEq.enabled;
|
|
outputEqCfg = p.outputEq;
|
|
if (! prepared || ! anyVoiceActive())
|
|
{
|
|
postVaLp1.reset(); postVaLp2.reset();
|
|
postPmLp1.reset(); postPmLp2.reset();
|
|
postPm2Lp1.reset(); postPm2Lp2.reset();
|
|
tiltLow.reset(); tiltHigh.reset();
|
|
outputLpf.reset();
|
|
for (auto& f : outputEqFilters)
|
|
f.reset();
|
|
}
|
|
else
|
|
{
|
|
pendingStateReset = true;
|
|
}
|
|
for (int i = 0; i < pm2Synth.getNumVoices(); ++i)
|
|
if (auto* v = dynamic_cast<Pm2Voice*> (pm2Synth.getVoice (i)))
|
|
{
|
|
v->setParams (p.pmString);
|
|
v->setHammerParams (p.hammerModel);
|
|
v->setFeltParams (p.feltModel);
|
|
v->setDuplexParams (p.duplex);
|
|
v->setSoftPedal (softPedalDown, p.unaCorda);
|
|
v->setEnvParams (p.attack, p.decay, p.sustain, p.release);
|
|
}
|
|
modalDirty = true;
|
|
// Pedal resonance
|
|
if (! prepared || ! anyVoiceActive())
|
|
pedalReverb.reset();
|
|
pedalReverbParams = {};
|
|
pedalReverbParams.roomSize = juce::jlimit (0.0f, 1.0f, p.pedal.resonanceT60 / 3.0f);
|
|
pedalReverbParams.damping = 0.4f;
|
|
pedalReverbParams.wetLevel = 1.0f;
|
|
pedalReverbParams.dryLevel = 0.0f;
|
|
|
|
// Sympathetic reverb params (light, short)
|
|
if (! prepared || ! anyVoiceActive())
|
|
sympReverb.reset();
|
|
sympParams = {};
|
|
sympParams.roomSize = juce::jlimit (0.0f, 1.0f, p.duplex.decayMs / 400.0f); // rough tie to decay
|
|
sympParams.damping = 0.4f;
|
|
sympParams.wetLevel = 1.0f;
|
|
sympParams.dryLevel = 0.0f;
|
|
|
|
// Ensure runtime state matches APVTS (so host automation works)
|
|
syncExtendedParamsFromAPVTS();
|
|
|
|
updatePostFiltersForNote (lastMidiNote);
|
|
updateOutputLpf();
|
|
updateOutputEq();
|
|
tiltReady = (tiltLow.coefficients != nullptr && tiltHigh.coefficients != nullptr);
|
|
updateMicProcessors();
|
|
presetUiSyncPending.store (true, std::memory_order_release);
|
|
}
|
|
|
|
// Public API used by CLI or GUI reset
|
|
bool FluteSynthAudioProcessor::loadEmbeddedPreset ()
|
|
{
|
|
if (! loadEmbeddedPresetModel())
|
|
return false;
|
|
|
|
if (embeddedPresets.empty())
|
|
return false;
|
|
|
|
const int presetIdx = juce::jlimit (0, (int) embeddedPresets.size() - 1,
|
|
activeEmbeddedPresetIndex.load());
|
|
|
|
// If not yet prepared (constructor/startup), apply immediately.
|
|
if (! prepared)
|
|
{
|
|
activeEmbeddedPresetIndex.store (presetIdx, std::memory_order_release);
|
|
applyPresetToParameters (embeddedPresets[(size_t) presetIdx].model);
|
|
return true;
|
|
}
|
|
|
|
// Otherwise, schedule application on the audio thread to avoid GUI/DSP races.
|
|
requestEmbeddedPresetApply (presetIdx);
|
|
return true;
|
|
}
|
|
|
|
bool FluteSynthAudioProcessor::loadEmbeddedPresetModel ()
|
|
{
|
|
// FIX: Always load presets from JSON file, but apply physics-based parameters
|
|
// This ensures all presets appear in the dropdown menu
|
|
|
|
if (BinaryData::preset_jsonSize <= 0)
|
|
{
|
|
// Fallback: create single physics preset if no JSON available
|
|
if (PhysicsToggles::kUsePhysicsDefaults)
|
|
{
|
|
embeddedPresetLoaded.store (false, std::memory_order_release);
|
|
embeddedPresets.clear();
|
|
EmbeddedPreset ep;
|
|
ep.name = "Physics Default";
|
|
ep.model = buildPhysicsPresetModel();
|
|
embeddedPresets.push_back (std::move (ep));
|
|
embeddedPresetLoaded.store (true, std::memory_order_release);
|
|
activeEmbeddedPresetIndex.store (0, std::memory_order_release);
|
|
return true;
|
|
}
|
|
return false;
|
|
}
|
|
|
|
embeddedPresetLoaded.store (false, std::memory_order_release);
|
|
embeddedPresets.clear();
|
|
|
|
juce::MemoryInputStream in (BinaryData::preset_json,
|
|
(size_t) BinaryData::preset_jsonSize,
|
|
false);
|
|
auto text = in.readEntireStreamAsString();
|
|
|
|
if (text.startsWith ("```"))
|
|
{
|
|
text = text.fromFirstOccurrenceOf ("```", false, false);
|
|
text = text.fromFirstOccurrenceOf ("\n", false, false);
|
|
text = text.upToLastOccurrenceOf ("```", false, false);
|
|
}
|
|
text = text.replace ("\\_", "_");
|
|
|
|
juce::var v = juce::JSON::parse (text);
|
|
if (v.isVoid())
|
|
return false;
|
|
|
|
embeddedPresets.clear();
|
|
|
|
auto pushPreset = [this] (const juce::var& presetVar, int idx)
|
|
{
|
|
juce::String name = "Preset " + juce::String (idx + 1);
|
|
if (auto* obj = presetVar.getDynamicObject())
|
|
if (obj->hasProperty ("name") && obj->getProperty ("name").isString())
|
|
name = obj->getProperty ("name").toString();
|
|
|
|
EmbeddedPreset ep;
|
|
ep.name = name;
|
|
// Parse preset from JSON - the JSON now contains physics-compatible values
|
|
ep.model = parsePresetJson (presetVar);
|
|
embeddedPresets.push_back (std::move (ep));
|
|
};
|
|
|
|
if (auto* obj = v.getDynamicObject())
|
|
{
|
|
if (auto presetsVar = obj->getProperty ("presets"); presetsVar.isArray())
|
|
{
|
|
auto* arr = presetsVar.getArray();
|
|
for (int i = 0; i < arr->size(); ++i)
|
|
pushPreset (arr->getReference (i), i);
|
|
}
|
|
else
|
|
{
|
|
pushPreset (v, 0);
|
|
}
|
|
}
|
|
else if (v.isArray())
|
|
{
|
|
auto* arr = v.getArray();
|
|
for (int i = 0; i < arr->size(); ++i)
|
|
pushPreset (arr->getReference (i), i);
|
|
}
|
|
|
|
if (embeddedPresets.empty())
|
|
return false;
|
|
|
|
embeddedPresetLoaded.store (true, std::memory_order_release);
|
|
activeEmbeddedPresetIndex.store (juce::jlimit (0, (int) embeddedPresets.size() - 1,
|
|
activeEmbeddedPresetIndex.load()),
|
|
std::memory_order_release);
|
|
return true;
|
|
}
|
|
|
|
void FluteSynthAudioProcessor::resetToEmbeddedPreset()
|
|
{
|
|
requestEmbeddedPresetApply (activeEmbeddedPresetIndex.load());
|
|
}
|
|
|
|
bool FluteSynthAudioProcessor::loadPresetFromJson (const juce::File& file)
|
|
{
|
|
if (PhysicsToggles::kUsePhysicsDefaults)
|
|
{
|
|
applyPresetToParameters (buildPhysicsPresetModel());
|
|
return true;
|
|
}
|
|
|
|
if (! file.existsAsFile()) return false;
|
|
|
|
juce::var v;
|
|
{
|
|
juce::FileInputStream in (file);
|
|
if (! in.openedOk()) return false;
|
|
auto text = in.readEntireStreamAsString();
|
|
|
|
// tolerate common chat artifacts
|
|
if (text.startsWith ("```"))
|
|
{
|
|
text = text.fromFirstOccurrenceOf ("```", false, false);
|
|
text = text.fromFirstOccurrenceOf ("\n", false, false);
|
|
text = text.upToLastOccurrenceOf ("```", false, false);
|
|
}
|
|
text = text.replace ("\\_", "_");
|
|
|
|
v = juce::JSON::parse (text);
|
|
if (v.isVoid()) return false;
|
|
}
|
|
|
|
auto model = parsePresetJson (v);
|
|
applyPresetToParameters (model);
|
|
|
|
return true;
|
|
}
|
|
|
|
void FluteSynthAudioProcessor::setWdfForTest (const PresetModel::WdfModel& wdfModel)
|
|
{
|
|
wdfCfg = sanitizeWdf (wdfModel);
|
|
for (int i = 0; i < pm2Synth.getNumVoices(); ++i)
|
|
if (auto* v = dynamic_cast<Pm2Voice*> (pm2Synth.getVoice (i)))
|
|
v->setWdfParams (wdfCfg);
|
|
}
|
|
|
|
void FluteSynthAudioProcessor::requestEmbeddedPresetApply()
|
|
{
|
|
requestEmbeddedPresetApply (activeEmbeddedPresetIndex.load());
|
|
}
|
|
|
|
void FluteSynthAudioProcessor::requestEmbeddedPresetApply (int index)
|
|
{
|
|
if (! embeddedPresetLoaded.load())
|
|
loadEmbeddedPresetModel();
|
|
|
|
if (embeddedPresets.empty())
|
|
return;
|
|
|
|
const int clamped = juce::jlimit (0, (int) embeddedPresets.size() - 1, index);
|
|
activeEmbeddedPresetIndex.store (clamped, std::memory_order_release);
|
|
|
|
if (! prepared)
|
|
{
|
|
applyPresetToParameters (embeddedPresets[(size_t) clamped].model);
|
|
return;
|
|
}
|
|
|
|
pendingEmbeddedPresetIndex.store (clamped, std::memory_order_release);
|
|
}
|
|
|
|
juce::StringArray FluteSynthAudioProcessor::getEmbeddedPresetNames() const
|
|
{
|
|
if (! embeddedPresetLoaded.load())
|
|
const_cast<FluteSynthAudioProcessor*> (this)->loadEmbeddedPresetModel();
|
|
|
|
juce::StringArray names;
|
|
for (const auto& p : embeddedPresets)
|
|
names.add (p.name);
|
|
return names;
|
|
}
|
|
|
|
int FluteSynthAudioProcessor::getActiveEmbeddedPresetIndex() const
|
|
{
|
|
return activeEmbeddedPresetIndex.load();
|
|
}
|
|
|
|
void FluteSynthAudioProcessor::selectEmbeddedPreset (int index)
|
|
{
|
|
requestEmbeddedPresetApply (index);
|
|
}
|
|
|
|
bool FluteSynthAudioProcessor::consumePendingPresetUiSync()
|
|
{
|
|
return presetUiSyncPending.exchange (false, std::memory_order_acq_rel);
|
|
}
|
|
|
|
// Factory function for JUCE wrappers
|
|
juce::AudioProcessor* JUCE_CALLTYPE createPluginFilter()
|
|
{
|
|
return new FluteSynthAudioProcessor();
|
|
}
|