Upload files to "Source"
This commit is contained in:
@@ -1,23 +1,27 @@
|
|||||||
/*
|
|
||||||
==============================================================================
|
|
||||||
|
|
||||||
This file contains the basic framework code for a JUCE plugin processor.
|
|
||||||
|
|
||||||
==============================================================================
|
|
||||||
*/
|
|
||||||
|
|
||||||
#include "PluginProcessor.h"
|
#include "PluginProcessor.h"
|
||||||
#include "PluginEditor.h"
|
#include "PluginEditor.h"
|
||||||
|
|
||||||
//==============================================================================
|
//==============================================================================
|
||||||
NeuralSynthAudioProcessor::NeuralSynthAudioProcessor() : parameters(*this, nullptr, "PARAMETERS", createParameterLayout())
|
NeuralSynthAudioProcessor::NeuralSynthAudioProcessor()
|
||||||
|
: parameters(*this, nullptr, "PARAMETERS", createParameterLayout())
|
||||||
, AudioProcessor(BusesProperties().withOutput("Output", juce::AudioChannelSet::stereo(), true))
|
, AudioProcessor(BusesProperties().withOutput("Output", juce::AudioChannelSet::stereo(), true))
|
||||||
, audioEngine(sp)
|
, audioEngine(sp)
|
||||||
{
|
{
|
||||||
parameters.addParameterListener("waveform", this);
|
parameters.addParameterListener("waveform", this);
|
||||||
//sp.waveform = parameters.getRawParameterValue("waveform");
|
// Wavetable params
|
||||||
|
sp.wtOn = parameters.getRawParameterValue("wt_on");
|
||||||
|
sp.wtMorph = parameters.getRawParameterValue("wt_morph");
|
||||||
|
|
||||||
// === Chorus ===
|
// === Per-panel bypass (default OFF) ===
|
||||||
|
sp.chorusOn = parameters.getRawParameterValue("chorus_on");
|
||||||
|
sp.delayOn = parameters.getRawParameterValue("delay_on");
|
||||||
|
sp.reverbOn = parameters.getRawParameterValue("reverb_on");
|
||||||
|
sp.flangerOn = parameters.getRawParameterValue("flanger_on");
|
||||||
|
sp.distortionOn = parameters.getRawParameterValue("distortion_on");
|
||||||
|
sp.filterOn = parameters.getRawParameterValue("filter_on");
|
||||||
|
sp.eqOn = parameters.getRawParameterValue("eq_on");
|
||||||
|
|
||||||
|
// === Chorus ===
|
||||||
parameters.addParameterListener("chorus_rate", this);
|
parameters.addParameterListener("chorus_rate", this);
|
||||||
parameters.addParameterListener("chorus_depth", this);
|
parameters.addParameterListener("chorus_depth", this);
|
||||||
parameters.addParameterListener("chorus_centre", this);
|
parameters.addParameterListener("chorus_centre", this);
|
||||||
@@ -47,9 +51,9 @@ NeuralSynthAudioProcessor::NeuralSynthAudioProcessor() : parameters(*this, nullp
|
|||||||
sp.reverbWetLevel = parameters.getRawParameterValue("reverb_wetLevel");
|
sp.reverbWetLevel = parameters.getRawParameterValue("reverb_wetLevel");
|
||||||
sp.reverbDryLevel = parameters.getRawParameterValue("reverb_dryLevel");
|
sp.reverbDryLevel = parameters.getRawParameterValue("reverb_dryLevel");
|
||||||
sp.reverbWidth = parameters.getRawParameterValue("reverb_width");
|
sp.reverbWidth = parameters.getRawParameterValue("reverb_width");
|
||||||
sp.reverbFreezeMode = parameters.getRawParameterValue("reverb_freezeMode");
|
sp.reverbFreezeMode= parameters.getRawParameterValue("reverb_freezeMode");
|
||||||
|
|
||||||
// === ADSR ===
|
// === Amp ADSR ===
|
||||||
parameters.addParameterListener("adsr_attack", this);
|
parameters.addParameterListener("adsr_attack", this);
|
||||||
parameters.addParameterListener("adsr_decay", this);
|
parameters.addParameterListener("adsr_decay", this);
|
||||||
parameters.addParameterListener("adsr_sustain", this);
|
parameters.addParameterListener("adsr_sustain", this);
|
||||||
@@ -60,22 +64,20 @@ NeuralSynthAudioProcessor::NeuralSynthAudioProcessor() : parameters(*this, nullp
|
|||||||
sp.adsrSustain = parameters.getRawParameterValue("adsr_sustain");
|
sp.adsrSustain = parameters.getRawParameterValue("adsr_sustain");
|
||||||
sp.adsrRelease = parameters.getRawParameterValue("adsr_release");
|
sp.adsrRelease = parameters.getRawParameterValue("adsr_release");
|
||||||
|
|
||||||
// === Flanger ===
|
// === Filter Env ===
|
||||||
parameters.addParameterListener("flanger_rate", this);
|
parameters.addParameterListener("fenv_attack", this);
|
||||||
parameters.addParameterListener("flanger_depth", this);
|
parameters.addParameterListener("fenv_decay", this);
|
||||||
parameters.addParameterListener("flanger_feedback", this);
|
parameters.addParameterListener("fenv_sustain", this);
|
||||||
parameters.addParameterListener("flanger_dryMix", this);
|
parameters.addParameterListener("fenv_release", this);
|
||||||
parameters.addParameterListener("flanger_phase", this);
|
parameters.addParameterListener("fenv_amount", this);
|
||||||
parameters.addParameterListener("flanger_delay", this);
|
|
||||||
|
|
||||||
sp.flangerRate = parameters.getRawParameterValue("flanger_rate");
|
sp.fenvAttack = parameters.getRawParameterValue("fenv_attack");
|
||||||
sp.flangerDepth = parameters.getRawParameterValue("flanger_depth");
|
sp.fenvDecay = parameters.getRawParameterValue("fenv_decay");
|
||||||
sp.flangerFeedback = parameters.getRawParameterValue("flanger_feedback");
|
sp.fenvSustain = parameters.getRawParameterValue("fenv_sustain");
|
||||||
sp.flangerDryMix = parameters.getRawParameterValue("flanger_dryMix");
|
sp.fenvRelease = parameters.getRawParameterValue("fenv_release");
|
||||||
sp.flangerPhase = parameters.getRawParameterValue("flanger_phase");
|
sp.fenvAmount = parameters.getRawParameterValue("fenv_amount");
|
||||||
sp.flangerDelay = parameters.getRawParameterValue("flanger_delay");
|
|
||||||
|
|
||||||
// === Filter ===
|
// === Filter base ===
|
||||||
parameters.addParameterListener("filter_cutoff", this);
|
parameters.addParameterListener("filter_cutoff", this);
|
||||||
parameters.addParameterListener("filter_resonance", this);
|
parameters.addParameterListener("filter_resonance", this);
|
||||||
parameters.addParameterListener("filter_type", this);
|
parameters.addParameterListener("filter_type", this);
|
||||||
@@ -103,8 +105,7 @@ NeuralSynthAudioProcessor::NeuralSynthAudioProcessor() : parameters(*this, nullp
|
|||||||
sp.distortionTone = parameters.getRawParameterValue("distortion_tone");
|
sp.distortionTone = parameters.getRawParameterValue("distortion_tone");
|
||||||
sp.distortionShape = parameters.getRawParameterValue("distortion_shape");
|
sp.distortionShape = parameters.getRawParameterValue("distortion_shape");
|
||||||
|
|
||||||
|
// === Master / EQ ===
|
||||||
|
|
||||||
parameters.addParameterListener("master", this);
|
parameters.addParameterListener("master", this);
|
||||||
parameters.addParameterListener("lowEQ", this);
|
parameters.addParameterListener("lowEQ", this);
|
||||||
parameters.addParameterListener("midEQ", this);
|
parameters.addParameterListener("midEQ", this);
|
||||||
@@ -116,15 +117,10 @@ NeuralSynthAudioProcessor::NeuralSynthAudioProcessor() : parameters(*this, nullp
|
|||||||
sp.highGainDbls = parameters.getRawParameterValue("highEQ");
|
sp.highGainDbls = parameters.getRawParameterValue("highEQ");
|
||||||
}
|
}
|
||||||
|
|
||||||
NeuralSynthAudioProcessor::~NeuralSynthAudioProcessor()
|
NeuralSynthAudioProcessor::~NeuralSynthAudioProcessor() = default;
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
//==============================================================================
|
//==============================================================================
|
||||||
const juce::String NeuralSynthAudioProcessor::getName() const
|
const juce::String NeuralSynthAudioProcessor::getName() const { return JucePlugin_Name; }
|
||||||
{
|
|
||||||
return JucePlugin_Name;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool NeuralSynthAudioProcessor::acceptsMidi() const
|
bool NeuralSynthAudioProcessor::acceptsMidi() const
|
||||||
{
|
{
|
||||||
@@ -153,52 +149,24 @@ bool NeuralSynthAudioProcessor::isMidiEffect() const
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
double NeuralSynthAudioProcessor::getTailLengthSeconds() const
|
double NeuralSynthAudioProcessor::getTailLengthSeconds() const { return 0.0; }
|
||||||
{
|
|
||||||
return 0.0;
|
|
||||||
}
|
|
||||||
|
|
||||||
int NeuralSynthAudioProcessor::getNumPrograms()
|
int NeuralSynthAudioProcessor::getNumPrograms() { return 1; }
|
||||||
{
|
int NeuralSynthAudioProcessor::getCurrentProgram() { return 0; }
|
||||||
return 1; // NB: some hosts don't cope very well if you tell them there are 0 programs,
|
void NeuralSynthAudioProcessor::setCurrentProgram (int) {}
|
||||||
// so this should be at least 1, even if you're not really implementing programs.
|
const juce::String NeuralSynthAudioProcessor::getProgramName (int) { return {}; }
|
||||||
}
|
void NeuralSynthAudioProcessor::changeProgramName (int, const juce::String&) {}
|
||||||
|
|
||||||
int NeuralSynthAudioProcessor::getCurrentProgram()
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
void NeuralSynthAudioProcessor::setCurrentProgram (int index)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
const juce::String NeuralSynthAudioProcessor::getProgramName (int index)
|
|
||||||
{
|
|
||||||
return {};
|
|
||||||
}
|
|
||||||
|
|
||||||
void NeuralSynthAudioProcessor::changeProgramName (int index, const juce::String& newName)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
//==============================================================================
|
//==============================================================================
|
||||||
void NeuralSynthAudioProcessor::prepareToPlay (double sampleRate, int samplesPerBlock)
|
void NeuralSynthAudioProcessor::prepareToPlay (double sampleRate, int samplesPerBlock)
|
||||||
{
|
{
|
||||||
audioEngine.prepare({ sampleRate, (juce::uint32)samplesPerBlock, 2 });
|
audioEngine.prepare({ sampleRate, (juce::uint32)samplesPerBlock, 2 });
|
||||||
midiMessageCollector.reset(sampleRate);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void NeuralSynthAudioProcessor::releaseResources()
|
void NeuralSynthAudioProcessor::releaseResources() {}
|
||||||
{
|
|
||||||
// When playback stops, you can use this as an opportunity to free up any
|
|
||||||
// spare memory, etc.
|
|
||||||
}
|
|
||||||
|
|
||||||
bool NeuralSynthAudioProcessor::isBusesLayoutSupported (const BusesLayout& layouts) const
|
bool NeuralSynthAudioProcessor::isBusesLayoutSupported (const BusesLayout& layouts) const
|
||||||
{
|
{
|
||||||
// This is the place where you check if the layout is supported.
|
|
||||||
// In this template code we only support mono or stereo.
|
|
||||||
if (layouts.getMainOutputChannelSet() != juce::AudioChannelSet::mono()
|
if (layouts.getMainOutputChannelSet() != juce::AudioChannelSet::mono()
|
||||||
&& layouts.getMainOutputChannelSet() != juce::AudioChannelSet::stereo())
|
&& layouts.getMainOutputChannelSet() != juce::AudioChannelSet::stereo())
|
||||||
return false;
|
return false;
|
||||||
@@ -221,8 +189,6 @@ void NeuralSynthAudioProcessor::processBlock(juce::AudioSampleBuffer& buffer, ju
|
|||||||
auto totalNumInputChannels = getTotalNumInputChannels();
|
auto totalNumInputChannels = getTotalNumInputChannels();
|
||||||
auto totalNumOutputChannels = getTotalNumOutputChannels();
|
auto totalNumOutputChannels = getTotalNumOutputChannels();
|
||||||
|
|
||||||
midiMessageCollector.removeNextBlockOfMessages(midiMessages, buffer.getNumSamples());
|
|
||||||
|
|
||||||
for (int i = totalNumInputChannels; i < totalNumOutputChannels; ++i)
|
for (int i = totalNumInputChannels; i < totalNumOutputChannels; ++i)
|
||||||
buffer.clear(i, 0, buffer.getNumSamples());
|
buffer.clear(i, 0, buffer.getNumSamples());
|
||||||
|
|
||||||
@@ -231,10 +197,7 @@ void NeuralSynthAudioProcessor::processBlock(juce::AudioSampleBuffer& buffer, ju
|
|||||||
}
|
}
|
||||||
|
|
||||||
//==============================================================================
|
//==============================================================================
|
||||||
bool NeuralSynthAudioProcessor::hasEditor() const
|
bool NeuralSynthAudioProcessor::hasEditor() const { return true; }
|
||||||
{
|
|
||||||
return true; // (change this to false if you choose to not supply an editor)
|
|
||||||
}
|
|
||||||
|
|
||||||
juce::AudioProcessorEditor* NeuralSynthAudioProcessor::createEditor()
|
juce::AudioProcessorEditor* NeuralSynthAudioProcessor::createEditor()
|
||||||
{
|
{
|
||||||
@@ -242,39 +205,28 @@ juce::AudioProcessorEditor* NeuralSynthAudioProcessor::createEditor()
|
|||||||
}
|
}
|
||||||
|
|
||||||
//==============================================================================
|
//==============================================================================
|
||||||
void NeuralSynthAudioProcessor::getStateInformation (juce::MemoryBlock& destData)
|
void NeuralSynthAudioProcessor::getStateInformation (juce::MemoryBlock& destData) { juce::ignoreUnused(destData); }
|
||||||
{
|
void NeuralSynthAudioProcessor::setStateInformation (const void* data, int sizeInBytes) { juce::ignoreUnused(data, sizeInBytes); }
|
||||||
// You should use this method to store your parameters in the memory block.
|
|
||||||
// You could do that either as raw data, or use the XML or ValueTree classes
|
|
||||||
// as intermediaries to make it easy to save and load complex data.
|
|
||||||
}
|
|
||||||
|
|
||||||
void NeuralSynthAudioProcessor::setStateInformation (const void* data, int sizeInBytes)
|
|
||||||
{
|
|
||||||
// You should use this method to restore your parameters from this memory block,
|
|
||||||
// whose contents will have been created by the getStateInformation() call.
|
|
||||||
}
|
|
||||||
|
|
||||||
void NeuralSynthAudioProcessor::parameterChanged(const juce::String& id, float newValue)
|
void NeuralSynthAudioProcessor::parameterChanged(const juce::String& id, float newValue)
|
||||||
{
|
{
|
||||||
|
juce::ignoreUnused(newValue);
|
||||||
if (id == "waveform")
|
if (id == "waveform")
|
||||||
sp.waveform.store((int)newValue, std::memory_order_release);
|
sp.waveform.store((int)newValue, std::memory_order_release);
|
||||||
}
|
}
|
||||||
|
|
||||||
//==============================================================================
|
//==============================================================================
|
||||||
// This creates new instances of the plugin..
|
// This creates new instances of the plugin..
|
||||||
juce::AudioProcessor* JUCE_CALLTYPE createPluginFilter()
|
juce::AudioProcessor* JUCE_CALLTYPE createPluginFilter() { return new NeuralSynthAudioProcessor(); }
|
||||||
{
|
|
||||||
return new NeuralSynthAudioProcessor();
|
|
||||||
}
|
|
||||||
|
|
||||||
void NeuralSynthAudioProcessor::buildParams(std::vector<std::unique_ptr<juce::RangedAudioParameter>> ¶ms, const std::string& paramGroup) {
|
void NeuralSynthAudioProcessor::buildParams(std::vector<std::unique_ptr<juce::RangedAudioParameter>>& params, const std::string& paramGroup) {
|
||||||
const auto& paramGroupSettings = PARAM_SETTINGS.at(paramGroup);
|
const auto& paramGroupSettings = PARAM_SETTINGS.at(paramGroup);
|
||||||
|
|
||||||
for (const auto& [name, paramSettings] : paramGroupSettings) {
|
for (const auto& [name, s] : paramGroupSettings) {
|
||||||
params.push_back(std::make_unique<juce::AudioParameterFloat>(paramGroup + "_" + name, paramSettings.label,
|
params.push_back(std::make_unique<juce::AudioParameterFloat>(
|
||||||
juce::NormalisableRange<float>(paramSettings.min, paramSettings.max, paramSettings.interval),
|
paramGroup + "_" + name, s.label,
|
||||||
paramSettings.defValue));
|
juce::NormalisableRange<float>(s.min, s.max, s.interval),
|
||||||
|
s.defValue));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -285,8 +237,24 @@ juce::AudioProcessorValueTreeState::ParameterLayout NeuralSynthAudioProcessor::c
|
|||||||
params.push_back(std::make_unique<juce::AudioParameterChoice>(
|
params.push_back(std::make_unique<juce::AudioParameterChoice>(
|
||||||
"waveform", "Waveform",
|
"waveform", "Waveform",
|
||||||
juce::StringArray{ "Sine", "Saw", "Square", "Triangle" }, 0));
|
juce::StringArray{ "Sine", "Saw", "Square", "Triangle" }, 0));
|
||||||
|
// --- Wavetable on/off + morph position (0..15) ---
|
||||||
|
params.push_back(std::make_unique<juce::AudioParameterBool>("wt_on", "Wavetable On", true));
|
||||||
|
params.push_back(std::make_unique<juce::AudioParameterFloat>(
|
||||||
|
"wt_morph", "WT Morph",
|
||||||
|
juce::NormalisableRange<float>(0.0f, 15.0f, 0.001f), 0.0f));
|
||||||
|
|
||||||
|
|
||||||
|
// Per-panel bypass toggles (default OFF)
|
||||||
|
params.push_back(std::make_unique<juce::AudioParameterBool>("chorus_on", "Chorus On", false));
|
||||||
|
params.push_back(std::make_unique<juce::AudioParameterBool>("delay_on", "Delay On", false));
|
||||||
|
params.push_back(std::make_unique<juce::AudioParameterBool>("reverb_on", "Reverb On", false));
|
||||||
|
params.push_back(std::make_unique<juce::AudioParameterBool>("flanger_on", "Flanger On", false));
|
||||||
|
params.push_back(std::make_unique<juce::AudioParameterBool>("distortion_on", "Distortion On", false));
|
||||||
|
params.push_back(std::make_unique<juce::AudioParameterBool>("filter_on", "Filter On", false));
|
||||||
|
params.push_back(std::make_unique<juce::AudioParameterBool>("eq_on", "EQ On", false));
|
||||||
|
|
||||||
buildParams(params, "adsr");
|
buildParams(params, "adsr");
|
||||||
|
buildParams(params, "fenv");
|
||||||
buildParams(params, "chorus");
|
buildParams(params, "chorus");
|
||||||
buildParams(params, "delay");
|
buildParams(params, "delay");
|
||||||
buildParams(params, "reverb");
|
buildParams(params, "reverb");
|
||||||
|
|||||||
@@ -1,11 +1,3 @@
|
|||||||
/*
|
|
||||||
==============================================================================
|
|
||||||
|
|
||||||
This file contains the basic framework code for a JUCE plugin processor.
|
|
||||||
|
|
||||||
==============================================================================
|
|
||||||
*/
|
|
||||||
|
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <JuceHeader.h>
|
#include <JuceHeader.h>
|
||||||
@@ -15,20 +7,17 @@
|
|||||||
#include "NeuralSharedParams.h"
|
#include "NeuralSharedParams.h"
|
||||||
|
|
||||||
//==============================================================================
|
//==============================================================================
|
||||||
/**
|
// Processor
|
||||||
*/
|
|
||||||
class NeuralSynthAudioProcessor : public juce::AudioProcessor,
|
class NeuralSynthAudioProcessor : public juce::AudioProcessor,
|
||||||
private juce::AudioProcessorValueTreeState::Listener
|
private juce::AudioProcessorValueTreeState::Listener
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
//==============================================================================
|
|
||||||
NeuralSynthAudioProcessor();
|
NeuralSynthAudioProcessor();
|
||||||
~NeuralSynthAudioProcessor() override;
|
~NeuralSynthAudioProcessor() override;
|
||||||
|
|
||||||
//==============================================================================
|
// AudioProcessor overrides
|
||||||
void prepareToPlay(double sampleRate, int samplesPerBlock) override;
|
void prepareToPlay(double sampleRate, int samplesPerBlock) override;
|
||||||
void releaseResources() override;
|
void releaseResources() override;
|
||||||
bool isBusesLayoutSupported(const BusesLayout& layouts) const;
|
|
||||||
|
|
||||||
#ifndef JucePlugin_PreferredChannelConfigurations
|
#ifndef JucePlugin_PreferredChannelConfigurations
|
||||||
bool isBusesLayoutSupported(const BusesLayout& layouts) const override;
|
bool isBusesLayoutSupported(const BusesLayout& layouts) const override;
|
||||||
@@ -36,66 +25,66 @@ public:
|
|||||||
|
|
||||||
void processBlock(juce::AudioBuffer<float>&, juce::MidiBuffer&) override;
|
void processBlock(juce::AudioBuffer<float>&, juce::MidiBuffer&) override;
|
||||||
|
|
||||||
//==============================================================================
|
// Editor
|
||||||
juce::AudioProcessorEditor* createEditor() override;
|
juce::AudioProcessorEditor* createEditor() override;
|
||||||
bool hasEditor() const override;
|
bool hasEditor() const override;
|
||||||
|
|
||||||
//==============================================================================
|
// Info
|
||||||
const juce::String getName() const override;
|
const juce::String getName() const override;
|
||||||
|
|
||||||
bool acceptsMidi() const override;
|
bool acceptsMidi() const override;
|
||||||
bool producesMidi() const override;
|
bool producesMidi() const override;
|
||||||
bool isMidiEffect() const override;
|
bool isMidiEffect() const override;
|
||||||
double getTailLengthSeconds() const override;
|
double getTailLengthSeconds() const override;
|
||||||
|
|
||||||
//==============================================================================
|
// Programs
|
||||||
int getNumPrograms() override;
|
int getNumPrograms() override;
|
||||||
int getCurrentProgram() override;
|
int getCurrentProgram() override;
|
||||||
void setCurrentProgram(int index) override;
|
void setCurrentProgram(int index) override;
|
||||||
const juce::String getProgramName(int index) override;
|
const juce::String getProgramName(int index) override;
|
||||||
void changeProgramName(int index, const juce::String& newName) override;
|
void changeProgramName(int index, const juce::String& newName) override;
|
||||||
|
|
||||||
//==============================================================================
|
// State
|
||||||
void getStateInformation(juce::MemoryBlock& destData) override;
|
void getStateInformation(juce::MemoryBlock& destData) override;
|
||||||
void setStateInformation(const void* data, int sizeInBytes) override;
|
void setStateInformation(const void* data, int sizeInBytes) override;
|
||||||
|
|
||||||
//==============================================================================
|
// Parameters
|
||||||
void parameterChanged(const juce::String& id, float newValue) override;
|
void parameterChanged(const juce::String& id, float newValue) override;
|
||||||
|
void buildParams(std::vector<std::unique_ptr<juce::RangedAudioParameter>>& params,
|
||||||
void buildParams(std::vector<std::unique_ptr<juce::RangedAudioParameter>>& params, const std::string& paramGroup);
|
const std::string& paramGroup);
|
||||||
|
|
||||||
juce::MidiMessageCollector& getMidiMessageCollector() noexcept { return midiMessageCollector; }
|
|
||||||
|
|
||||||
juce::MidiMessageCollector midiMessageCollector;
|
|
||||||
juce::AudioProcessorValueTreeState parameters;
|
|
||||||
juce::AudioProcessorValueTreeState::ParameterLayout createParameterLayout();
|
juce::AudioProcessorValueTreeState::ParameterLayout createParameterLayout();
|
||||||
|
|
||||||
|
// Utilities
|
||||||
|
juce::MidiMessageCollector& getMidiMessageCollector() noexcept { return midiMessageCollector; }
|
||||||
AudioBufferQueue<float>& getAudioBufferQueue() noexcept { return audioBufferQueue; }
|
AudioBufferQueue<float>& getAudioBufferQueue() noexcept { return audioBufferQueue; }
|
||||||
|
|
||||||
AudioBufferQueue<float>& getChorusAudioBufferQueue() noexcept { return chorusBufferQueue; }
|
AudioBufferQueue<float>& getChorusAudioBufferQueue() noexcept { return chorusBufferQueue; }
|
||||||
AudioBufferQueue<float>& getDelayAudioBufferQueue() noexcept { return delayBufferQueue; }
|
AudioBufferQueue<float>& getDelayAudioBufferQueue() noexcept { return delayBufferQueue; }
|
||||||
AudioBufferQueue<float>& getReverbAudioBufferQueue() noexcept { return reverbBufferQueue; }
|
AudioBufferQueue<float>& getReverbAudioBufferQueue() noexcept { return reverbBufferQueue; }
|
||||||
|
|
||||||
AudioBufferQueue<float>& getFlangerAudioBufferQueue() noexcept { return flangerBufferQueue; }
|
AudioBufferQueue<float>& getFlangerAudioBufferQueue() noexcept { return flangerBufferQueue; }
|
||||||
AudioBufferQueue<float>& getDistortionAudioBufferQueue() noexcept { return distortionBufferQueue; }
|
AudioBufferQueue<float>& getDistortionAudioBufferQueue() noexcept { return distortionBufferQueue; }
|
||||||
AudioBufferQueue<float>& getFilterAudioBufferQueue() noexcept { return filterBufferQueue; }
|
AudioBufferQueue<float>& getFilterAudioBufferQueue() noexcept { return filterBufferQueue; }
|
||||||
|
|
||||||
|
// Public members (by JUCE convention)
|
||||||
|
juce::MidiMessageCollector midiMessageCollector;
|
||||||
|
juce::AudioProcessorValueTreeState parameters;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
//==============================================================================
|
JUCE_DECLARE_NON_COPYABLE_WITH_LEAK_DETECTOR (NeuralSynthAudioProcessor)
|
||||||
JUCE_DECLARE_NON_COPYABLE_WITH_LEAK_DETECTOR(NeuralSynthAudioProcessor)
|
|
||||||
|
|
||||||
NeuralAudioEngine audioEngine;
|
// ---- IMPORTANT ORDER FIX ----
|
||||||
|
// Objects are constructed in THIS order. 'sp' must come BEFORE audioEngine.
|
||||||
|
NeuralSharedParams sp; // <— construct first
|
||||||
|
NeuralAudioEngine audioEngine; // needs a valid reference to 'sp'
|
||||||
|
|
||||||
|
// Meter/scope queues
|
||||||
AudioBufferQueue<float> audioBufferQueue;
|
AudioBufferQueue<float> audioBufferQueue;
|
||||||
|
|
||||||
AudioBufferQueue<float> chorusBufferQueue;
|
AudioBufferQueue<float> chorusBufferQueue;
|
||||||
AudioBufferQueue<float> delayBufferQueue;
|
AudioBufferQueue<float> delayBufferQueue;
|
||||||
AudioBufferQueue<float> reverbBufferQueue;
|
AudioBufferQueue<float> reverbBufferQueue;
|
||||||
|
|
||||||
AudioBufferQueue<float> flangerBufferQueue;
|
AudioBufferQueue<float> flangerBufferQueue;
|
||||||
AudioBufferQueue<float> distortionBufferQueue;
|
AudioBufferQueue<float> distortionBufferQueue;
|
||||||
AudioBufferQueue<float> filterBufferQueue;
|
AudioBufferQueue<float> filterBufferQueue;
|
||||||
|
|
||||||
ScopeDataCollector<float> scopeDataCollector{ audioBufferQueue };
|
// Scope collector (uses audioBufferQueue, so declare after it)
|
||||||
|
ScopeDataCollector<float> scopeDataCollector { audioBufferQueue };
|
||||||
NeuralSharedParams sp;
|
|
||||||
};
|
};
|
||||||
|
|||||||
Reference in New Issue
Block a user