Initial plugin version
This commit is contained in:
49
Source/AudioBufferQueue.h
Normal file
49
Source/AudioBufferQueue.h
Normal file
@@ -0,0 +1,49 @@
|
||||
#pragma once
|
||||
|
||||
//==============================================================================
|
||||
template <typename SampleType>
|
||||
class AudioBufferQueue
|
||||
{
|
||||
public:
|
||||
//==============================================================================
|
||||
static constexpr size_t order = 9;
|
||||
static constexpr size_t bufferSize = 1U << order;
|
||||
static constexpr size_t numBuffers = 5;
|
||||
|
||||
//==============================================================================
|
||||
void push(const SampleType* dataToPush, size_t numSamples)
|
||||
{
|
||||
jassert(numSamples <= bufferSize);
|
||||
|
||||
int start1, size1, start2, size2;
|
||||
abstractFifo.prepareToWrite(1, start1, size1, start2, size2);
|
||||
|
||||
jassert(size1 <= 1);
|
||||
jassert(size2 == 0);
|
||||
|
||||
if (size1 > 0)
|
||||
juce::FloatVectorOperations::copy(buffers[(size_t)start1].data(), dataToPush, (int)juce::jmin(bufferSize, numSamples));
|
||||
|
||||
abstractFifo.finishedWrite(size1);
|
||||
}
|
||||
|
||||
//==============================================================================
|
||||
void pop(SampleType* outputBuffer)
|
||||
{
|
||||
int start1, size1, start2, size2;
|
||||
abstractFifo.prepareToRead(1, start1, size1, start2, size2);
|
||||
|
||||
jassert(size1 <= 1);
|
||||
jassert(size2 == 0);
|
||||
|
||||
if (size1 > 0)
|
||||
juce::FloatVectorOperations::copy(outputBuffer, buffers[(size_t)start1].data(), (int)bufferSize);
|
||||
|
||||
abstractFifo.finishedRead(size1);
|
||||
}
|
||||
|
||||
private:
|
||||
//==============================================================================
|
||||
juce::AbstractFifo abstractFifo{ numBuffers };
|
||||
std::array<std::array<SampleType, bufferSize>, numBuffers> buffers;
|
||||
};
|
||||
43
Source/AudioEngine.h
Normal file
43
Source/AudioEngine.h
Normal file
@@ -0,0 +1,43 @@
|
||||
#pragma once
|
||||
|
||||
#include "SynthVoice.h"
|
||||
#include <JuceHeader.h>
|
||||
|
||||
class NeuralAudioEngine : public juce::MPESynthesiser
|
||||
{
|
||||
public:
|
||||
static constexpr auto maxNumVoices = 4;
|
||||
|
||||
//==============================================================================
|
||||
NeuralAudioEngine(NeuralSharedParams &sp)
|
||||
{
|
||||
for (auto i = 0; i < maxNumVoices; ++i)
|
||||
addVoice(new NeuralSynthVoice(sp));
|
||||
|
||||
setVoiceStealingEnabled(true);
|
||||
}
|
||||
|
||||
//==============================================================================
|
||||
void prepare(const juce::dsp::ProcessSpec& spec) noexcept
|
||||
{
|
||||
setCurrentPlaybackSampleRate(spec.sampleRate);
|
||||
|
||||
for (auto* v : voices)
|
||||
dynamic_cast<NeuralSynthVoice*> (v)->prepare(spec);
|
||||
}
|
||||
|
||||
//==============================================================================
|
||||
template <typename VoiceFunc>
|
||||
void applyToVoices(VoiceFunc&& fn) noexcept
|
||||
{
|
||||
for (auto* v : voices)
|
||||
fn(dynamic_cast<NeuralSynthVoice*> (v));
|
||||
}
|
||||
|
||||
private:
|
||||
//==============================================================================
|
||||
void renderNextSubBlock(juce::AudioBuffer<float>& outputAudio, int startSample, int numSamples) override
|
||||
{
|
||||
MPESynthesiser::renderNextSubBlock(outputAudio, startSample, numSamples);
|
||||
}
|
||||
};
|
||||
23
Source/NeuralSharedParams.h
Normal file
23
Source/NeuralSharedParams.h
Normal file
@@ -0,0 +1,23 @@
|
||||
/*
|
||||
==============================================================================
|
||||
|
||||
NeuralSharedParams.h
|
||||
Created: 21 Jun 2025 7:53:02am
|
||||
Author: timot
|
||||
|
||||
==============================================================================
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <atomic>
|
||||
|
||||
struct NeuralSharedParams
|
||||
{
|
||||
std::atomic<int> waveform{ -1 };
|
||||
|
||||
std::atomic<float>* attack;
|
||||
std::atomic<float>* decay;
|
||||
std::atomic<float>* sustain;
|
||||
std::atomic<float>* release;
|
||||
};
|
||||
101
Source/PluginEditor.cpp
Normal file
101
Source/PluginEditor.cpp
Normal file
@@ -0,0 +1,101 @@
|
||||
/*
|
||||
==============================================================================
|
||||
|
||||
This file contains the basic framework code for a JUCE plugin editor.
|
||||
|
||||
==============================================================================
|
||||
*/
|
||||
|
||||
#include "PluginProcessor.h"
|
||||
#include "PluginEditor.h"
|
||||
#include "ScopeComponent.h"
|
||||
|
||||
//==============================================================================
|
||||
NeuralSynthAudioProcessorEditor::NeuralSynthAudioProcessorEditor (NeuralSynthAudioProcessor& p)
|
||||
: AudioProcessorEditor (&p), audioProcessor (p), scopeComponent(audioProcessor.getAudioBufferQueue())
|
||||
{
|
||||
// Make sure that before the constructor has finished, you've set the
|
||||
// editor's size to whatever you need it to be.
|
||||
setSize(400, 500);
|
||||
|
||||
auto& tree = audioProcessor.parameters;
|
||||
|
||||
auto area = getLocalBounds();
|
||||
scopeComponent.setTopLeftPosition(0, 0);
|
||||
scopeComponent.setSize(400, 200);
|
||||
|
||||
addAndMakeVisible(scopeComponent);
|
||||
|
||||
attackAttachment = std::make_unique<juce::AudioProcessorValueTreeState::SliderAttachment>(tree, "attack", attackSlider);
|
||||
decayAttachment = std::make_unique<juce::AudioProcessorValueTreeState::SliderAttachment>(tree, "decay", decaySlider);
|
||||
sustainAttachment = std::make_unique<juce::AudioProcessorValueTreeState::SliderAttachment>(tree, "sustain", sustainSlider);
|
||||
releaseAttachment = std::make_unique<juce::AudioProcessorValueTreeState::SliderAttachment>(tree, "release", releaseSlider);
|
||||
|
||||
addAndMakeVisible(waveformSelector);
|
||||
|
||||
waveformSelector.setTopLeftPosition(15, 225);
|
||||
|
||||
int leftPosition = 15;
|
||||
const int sliderWidth = 60;
|
||||
for (auto* slider : { &attackSlider, &decaySlider, &sustainSlider, &releaseSlider })
|
||||
{
|
||||
slider->setSliderStyle(juce::Slider::Rotary);
|
||||
slider->setTextBoxStyle(juce::Slider::TextBoxBelow, false, sliderWidth, 20);
|
||||
addAndMakeVisible(*slider);
|
||||
slider->setTopLeftPosition(leftPosition, 250);
|
||||
leftPosition += (sliderWidth + 40);
|
||||
}
|
||||
|
||||
waveformSelector.addItem("Sine", 1);
|
||||
waveformSelector.addItem("Saw", 2);
|
||||
waveformSelector.addItem("Square", 3);
|
||||
waveformSelector.addItem("Triangle", 4);
|
||||
|
||||
|
||||
// Attach to parameter
|
||||
waveformAttachment = std::make_unique<juce::AudioProcessorValueTreeState::ComboBoxAttachment>(
|
||||
audioProcessor.parameters, "waveform", waveformSelector);
|
||||
|
||||
addAndMakeVisible(midiKeyboardComponent);
|
||||
|
||||
|
||||
//scopeComponent.setSize(area.getWidth(), area.getHeight());
|
||||
|
||||
midiKeyboardComponent.setMidiChannel(2);
|
||||
midiKeyboardState.addListener(&audioProcessor.getMidiMessageCollector());
|
||||
|
||||
midiKeyboardComponent.setBounds(area.removeFromTop(80).reduced(8));
|
||||
midiKeyboardComponent.setTopLeftPosition(8, 420);
|
||||
}
|
||||
|
||||
NeuralSynthAudioProcessorEditor::~NeuralSynthAudioProcessorEditor()
|
||||
{
|
||||
}
|
||||
|
||||
//==============================================================================
|
||||
void NeuralSynthAudioProcessorEditor::paint (juce::Graphics& g)
|
||||
{
|
||||
// (Our component is opaque, so we must completely fill the background with a solid colour)
|
||||
g.fillAll (getLookAndFeel().findColour (juce::ResizableWindow::backgroundColourId));
|
||||
|
||||
//g.setColour (juce::Colours::white);
|
||||
//g.setFont (juce::FontOptions (15.0f));
|
||||
//g.drawFittedText ("Hello World!", getLocalBounds(), juce::Justification::centred, 1);
|
||||
}
|
||||
|
||||
void NeuralSynthAudioProcessorEditor::resized()
|
||||
{
|
||||
// This is generally where you'll want to lay out the positions of any
|
||||
// subcomponents in your editor..
|
||||
auto bounds = getLocalBounds().reduced(20);
|
||||
auto row = bounds.removeFromTop(150);
|
||||
|
||||
int knobWidth = row.getWidth() / 4;
|
||||
|
||||
attackSlider.setBounds(row.removeFromLeft(knobWidth).reduced(10));
|
||||
decaySlider.setBounds(row.removeFromLeft(knobWidth).reduced(10));
|
||||
sustainSlider.setBounds(row.removeFromLeft(knobWidth).reduced(10));
|
||||
releaseSlider.setBounds(row.removeFromLeft(knobWidth).reduced(10));
|
||||
|
||||
waveformSelector.setBounds(20, 20, 120, 30);
|
||||
}
|
||||
48
Source/PluginEditor.h
Normal file
48
Source/PluginEditor.h
Normal file
@@ -0,0 +1,48 @@
|
||||
/*
|
||||
==============================================================================
|
||||
|
||||
This file contains the basic framework code for a JUCE plugin editor.
|
||||
|
||||
==============================================================================
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <JuceHeader.h>
|
||||
#include "PluginProcessor.h"
|
||||
#include "ScopeComponent.h"
|
||||
|
||||
//==============================================================================
|
||||
/**
|
||||
*/
|
||||
class NeuralSynthAudioProcessorEditor : public juce::AudioProcessorEditor
|
||||
{
|
||||
public:
|
||||
NeuralSynthAudioProcessorEditor (NeuralSynthAudioProcessor&);
|
||||
~NeuralSynthAudioProcessorEditor() override;
|
||||
|
||||
//==============================================================================
|
||||
void paint (juce::Graphics&) override;
|
||||
void resized() override;
|
||||
|
||||
private:
|
||||
// This reference is provided as a quick way for your editor to
|
||||
// access the processor object that created it.
|
||||
NeuralSynthAudioProcessor& audioProcessor;
|
||||
|
||||
JUCE_DECLARE_NON_COPYABLE_WITH_LEAK_DETECTOR (NeuralSynthAudioProcessorEditor)
|
||||
|
||||
juce::ComboBox waveformSelector;
|
||||
std::unique_ptr<juce::AudioProcessorValueTreeState::ComboBoxAttachment> waveformAttachment;
|
||||
|
||||
juce::Slider attackSlider, decaySlider, sustainSlider, releaseSlider;
|
||||
|
||||
std::unique_ptr<juce::AudioProcessorValueTreeState::SliderAttachment> attackAttachment;
|
||||
std::unique_ptr<juce::AudioProcessorValueTreeState::SliderAttachment> decayAttachment;
|
||||
std::unique_ptr<juce::AudioProcessorValueTreeState::SliderAttachment> sustainAttachment;
|
||||
std::unique_ptr<juce::AudioProcessorValueTreeState::SliderAttachment> releaseAttachment;
|
||||
|
||||
juce::MidiKeyboardState midiKeyboardState;
|
||||
juce::MidiKeyboardComponent midiKeyboardComponent{ midiKeyboardState, juce::MidiKeyboardComponent::horizontalKeyboard };
|
||||
ScopeComponent<float> scopeComponent;
|
||||
};
|
||||
201
Source/PluginProcessor.cpp
Normal file
201
Source/PluginProcessor.cpp
Normal file
@@ -0,0 +1,201 @@
|
||||
/*
|
||||
==============================================================================
|
||||
|
||||
This file contains the basic framework code for a JUCE plugin processor.
|
||||
|
||||
==============================================================================
|
||||
*/
|
||||
|
||||
#include "PluginProcessor.h"
|
||||
#include "PluginEditor.h"
|
||||
|
||||
//==============================================================================
|
||||
NeuralSynthAudioProcessor::NeuralSynthAudioProcessor() : parameters(*this, nullptr, "PARAMETERS", createParameterLayout())
|
||||
, AudioProcessor(BusesProperties().withOutput("Output", juce::AudioChannelSet::stereo(), true))
|
||||
, audioEngine(sp)
|
||||
{
|
||||
parameters.addParameterListener("waveform", this);
|
||||
|
||||
parameters.addParameterListener("attack", this);
|
||||
parameters.addParameterListener("decay", this);
|
||||
parameters.addParameterListener("sustain", this);
|
||||
parameters.addParameterListener("release", this);
|
||||
|
||||
sp.attack = parameters.getRawParameterValue("attack");
|
||||
sp.decay = parameters.getRawParameterValue("decay");
|
||||
sp.sustain = parameters.getRawParameterValue("sustain");
|
||||
sp.release = parameters.getRawParameterValue("release");
|
||||
}
|
||||
|
||||
NeuralSynthAudioProcessor::~NeuralSynthAudioProcessor()
|
||||
{
|
||||
}
|
||||
|
||||
//==============================================================================
|
||||
const juce::String NeuralSynthAudioProcessor::getName() const
|
||||
{
|
||||
return JucePlugin_Name;
|
||||
}
|
||||
|
||||
bool NeuralSynthAudioProcessor::acceptsMidi() const
|
||||
{
|
||||
#if JucePlugin_WantsMidiInput
|
||||
return true;
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
bool NeuralSynthAudioProcessor::producesMidi() const
|
||||
{
|
||||
#if JucePlugin_ProducesMidiOutput
|
||||
return true;
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
bool NeuralSynthAudioProcessor::isMidiEffect() const
|
||||
{
|
||||
#if JucePlugin_IsMidiEffect
|
||||
return true;
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
double NeuralSynthAudioProcessor::getTailLengthSeconds() const
|
||||
{
|
||||
return 0.0;
|
||||
}
|
||||
|
||||
int NeuralSynthAudioProcessor::getNumPrograms()
|
||||
{
|
||||
return 1; // NB: some hosts don't cope very well if you tell them there are 0 programs,
|
||||
// so this should be at least 1, even if you're not really implementing programs.
|
||||
}
|
||||
|
||||
int NeuralSynthAudioProcessor::getCurrentProgram()
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
void NeuralSynthAudioProcessor::setCurrentProgram (int index)
|
||||
{
|
||||
}
|
||||
|
||||
const juce::String NeuralSynthAudioProcessor::getProgramName (int index)
|
||||
{
|
||||
return {};
|
||||
}
|
||||
|
||||
void NeuralSynthAudioProcessor::changeProgramName (int index, const juce::String& newName)
|
||||
{
|
||||
}
|
||||
|
||||
//==============================================================================
|
||||
void NeuralSynthAudioProcessor::prepareToPlay (double sampleRate, int samplesPerBlock)
|
||||
{
|
||||
audioEngine.prepare({ sampleRate, (juce::uint32)samplesPerBlock, 2 });
|
||||
midiMessageCollector.reset(sampleRate);
|
||||
}
|
||||
|
||||
void NeuralSynthAudioProcessor::releaseResources()
|
||||
{
|
||||
// When playback stops, you can use this as an opportunity to free up any
|
||||
// spare memory, etc.
|
||||
}
|
||||
|
||||
bool NeuralSynthAudioProcessor::isBusesLayoutSupported (const BusesLayout& layouts) const
|
||||
{
|
||||
// This is the place where you check if the layout is supported.
|
||||
// In this template code we only support mono or stereo.
|
||||
if (layouts.getMainOutputChannelSet() != juce::AudioChannelSet::mono()
|
||||
&& layouts.getMainOutputChannelSet() != juce::AudioChannelSet::stereo())
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void NeuralSynthAudioProcessor::processBlock(juce::AudioSampleBuffer& buffer, juce::MidiBuffer& midiMessages)
|
||||
{
|
||||
const int newWaveform = sp.waveform.exchange(-1);
|
||||
|
||||
if (newWaveform != -1) {
|
||||
audioEngine.applyToVoices([newWaveform](NeuralSynthVoice* v)
|
||||
{
|
||||
v->changeWaveform(newWaveform);
|
||||
});
|
||||
}
|
||||
|
||||
juce::ScopedNoDenormals noDenormals;
|
||||
auto totalNumInputChannels = getTotalNumInputChannels();
|
||||
auto totalNumOutputChannels = getTotalNumOutputChannels();
|
||||
|
||||
midiMessageCollector.removeNextBlockOfMessages(midiMessages, buffer.getNumSamples());
|
||||
|
||||
for (int i = totalNumInputChannels; i < totalNumOutputChannels; ++i)
|
||||
buffer.clear(i, 0, buffer.getNumSamples());
|
||||
|
||||
audioEngine.renderNextBlock(buffer, midiMessages, 0, buffer.getNumSamples());
|
||||
scopeDataCollector.process(buffer.getReadPointer(0), (size_t)buffer.getNumSamples());
|
||||
}
|
||||
|
||||
//==============================================================================
|
||||
bool NeuralSynthAudioProcessor::hasEditor() const
|
||||
{
|
||||
return true; // (change this to false if you choose to not supply an editor)
|
||||
}
|
||||
|
||||
juce::AudioProcessorEditor* NeuralSynthAudioProcessor::createEditor()
|
||||
{
|
||||
return new NeuralSynthAudioProcessorEditor (*this);
|
||||
}
|
||||
|
||||
//==============================================================================
|
||||
void NeuralSynthAudioProcessor::getStateInformation (juce::MemoryBlock& destData)
|
||||
{
|
||||
// You should use this method to store your parameters in the memory block.
|
||||
// You could do that either as raw data, or use the XML or ValueTree classes
|
||||
// as intermediaries to make it easy to save and load complex data.
|
||||
}
|
||||
|
||||
void NeuralSynthAudioProcessor::setStateInformation (const void* data, int sizeInBytes)
|
||||
{
|
||||
// You should use this method to restore your parameters from this memory block,
|
||||
// whose contents will have been created by the getStateInformation() call.
|
||||
}
|
||||
|
||||
void NeuralSynthAudioProcessor::parameterChanged(const juce::String& id, float newValue)
|
||||
{
|
||||
if (id == "waveform")
|
||||
sp.waveform.store((int)newValue, std::memory_order_release);
|
||||
}
|
||||
|
||||
//==============================================================================
|
||||
// This creates new instances of the plugin..
|
||||
juce::AudioProcessor* JUCE_CALLTYPE createPluginFilter()
|
||||
{
|
||||
return new NeuralSynthAudioProcessor();
|
||||
}
|
||||
|
||||
juce::AudioProcessorValueTreeState::ParameterLayout NeuralSynthAudioProcessor::createParameterLayout()
|
||||
{
|
||||
std::vector<std::unique_ptr<juce::RangedAudioParameter>> params;
|
||||
|
||||
params.push_back(std::make_unique<juce::AudioParameterChoice>(
|
||||
"waveform", "Waveform",
|
||||
juce::StringArray{ "Sine", "Saw", "Square", "Triangle" }, 0));
|
||||
|
||||
// Start/end/interval
|
||||
params.push_back(std::make_unique<juce::AudioParameterFloat>("attack", "Attack",
|
||||
juce::NormalisableRange<float>(0.0f, 1.0f, 0.01f), 0.1f));
|
||||
params.push_back(std::make_unique<juce::AudioParameterFloat>("decay", "Decay",
|
||||
juce::NormalisableRange<float>(0.0f, 1.0f, 0.01f), 0.5f));
|
||||
params.push_back(std::make_unique<juce::AudioParameterFloat>("sustain", "Sustain",
|
||||
juce::NormalisableRange<float>(0.0f, 1.0f, 0.01f), 0.8f));
|
||||
params.push_back(std::make_unique<juce::AudioParameterFloat>("release", "Release",
|
||||
juce::NormalisableRange<float>(0.01f, 1.0f, 0.01f), 1.0f));
|
||||
|
||||
return { params.begin(), params.end() };
|
||||
}
|
||||
80
Source/PluginProcessor.h
Normal file
80
Source/PluginProcessor.h
Normal file
@@ -0,0 +1,80 @@
|
||||
/*
|
||||
==============================================================================
|
||||
|
||||
This file contains the basic framework code for a JUCE plugin processor.
|
||||
|
||||
==============================================================================
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <JuceHeader.h>
|
||||
#include "AudioBufferQueue.h"
|
||||
#include "AudioEngine.h"
|
||||
#include "ScopeDataCollector.h"
|
||||
#include "NeuralSharedParams.h"
|
||||
|
||||
//==============================================================================
|
||||
/**
|
||||
*/
|
||||
class NeuralSynthAudioProcessor : public juce::AudioProcessor,
|
||||
private juce::AudioProcessorValueTreeState::Listener
|
||||
{
|
||||
public:
|
||||
//==============================================================================
|
||||
NeuralSynthAudioProcessor();
|
||||
~NeuralSynthAudioProcessor() override;
|
||||
|
||||
//==============================================================================
|
||||
void prepareToPlay(double sampleRate, int samplesPerBlock) override;
|
||||
void releaseResources() override;
|
||||
|
||||
#ifndef JucePlugin_PreferredChannelConfigurations
|
||||
bool isBusesLayoutSupported(const BusesLayout& layouts) const override;
|
||||
#endif
|
||||
|
||||
void processBlock(juce::AudioBuffer<float>&, juce::MidiBuffer&) override;
|
||||
|
||||
//==============================================================================
|
||||
juce::AudioProcessorEditor* createEditor() override;
|
||||
bool hasEditor() const override;
|
||||
|
||||
//==============================================================================
|
||||
const juce::String getName() const override;
|
||||
|
||||
bool acceptsMidi() const override;
|
||||
bool producesMidi() const override;
|
||||
bool isMidiEffect() const override;
|
||||
double getTailLengthSeconds() const override;
|
||||
|
||||
//==============================================================================
|
||||
int getNumPrograms() override;
|
||||
int getCurrentProgram() override;
|
||||
void setCurrentProgram(int index) override;
|
||||
const juce::String getProgramName(int index) override;
|
||||
void changeProgramName(int index, const juce::String& newName) override;
|
||||
|
||||
//==============================================================================
|
||||
void getStateInformation(juce::MemoryBlock& destData) override;
|
||||
void setStateInformation(const void* data, int sizeInBytes) override;
|
||||
|
||||
//==============================================================================
|
||||
void parameterChanged(const juce::String& id, float newValue) override;
|
||||
|
||||
juce::MidiMessageCollector& getMidiMessageCollector() noexcept { return midiMessageCollector; }
|
||||
|
||||
juce::MidiMessageCollector midiMessageCollector;
|
||||
juce::AudioProcessorValueTreeState parameters;
|
||||
juce::AudioProcessorValueTreeState::ParameterLayout createParameterLayout();
|
||||
|
||||
AudioBufferQueue<float>& getAudioBufferQueue() noexcept { return audioBufferQueue; }
|
||||
private:
|
||||
//==============================================================================
|
||||
JUCE_DECLARE_NON_COPYABLE_WITH_LEAK_DETECTOR(NeuralSynthAudioProcessor)
|
||||
|
||||
NeuralAudioEngine audioEngine;
|
||||
AudioBufferQueue<float> audioBufferQueue;
|
||||
ScopeDataCollector<float> scopeDataCollector{ audioBufferQueue };
|
||||
|
||||
NeuralSharedParams sp;
|
||||
};
|
||||
102
Source/ScopeComponent.h
Normal file
102
Source/ScopeComponent.h
Normal file
@@ -0,0 +1,102 @@
|
||||
#pragma once
|
||||
|
||||
#include "AudioBufferQueue.h"
|
||||
|
||||
//==============================================================================
|
||||
template <typename SampleType>
|
||||
class ScopeComponent : public juce::Component,
|
||||
private juce::Timer
|
||||
{
|
||||
public:
|
||||
using Queue = AudioBufferQueue<SampleType>;
|
||||
|
||||
//==============================================================================
|
||||
ScopeComponent(Queue& queueToUse)
|
||||
: audioBufferQueue(queueToUse)
|
||||
{
|
||||
sampleData.fill(SampleType(0));
|
||||
setFramesPerSecond(30);
|
||||
}
|
||||
|
||||
//==============================================================================
|
||||
void setFramesPerSecond(int framesPerSecond)
|
||||
{
|
||||
jassert(framesPerSecond > 0 && framesPerSecond < 1000);
|
||||
startTimerHz(framesPerSecond);
|
||||
}
|
||||
|
||||
//==============================================================================
|
||||
void paint(juce::Graphics& g) override
|
||||
{
|
||||
g.fillAll(juce::Colours::black);
|
||||
g.setColour(juce::Colours::white);
|
||||
|
||||
auto area = getLocalBounds();
|
||||
auto h = (SampleType)area.getHeight();
|
||||
auto w = (SampleType)area.getWidth();
|
||||
|
||||
// Oscilloscope
|
||||
auto scopeRect = juce::Rectangle<SampleType>{ SampleType(0), SampleType(0), w, h / 2 };
|
||||
plot(sampleData.data(), sampleData.size(), g, scopeRect, SampleType(1), h / 4);
|
||||
|
||||
// Spectrum
|
||||
auto spectrumRect = juce::Rectangle<SampleType>{ SampleType(0), h / 2, w, h / 2 };
|
||||
plot(spectrumData.data(), spectrumData.size() / 4, g, spectrumRect);
|
||||
}
|
||||
|
||||
//==============================================================================
|
||||
void resized() override {}
|
||||
|
||||
private:
|
||||
//==============================================================================
|
||||
Queue& audioBufferQueue;
|
||||
std::array<SampleType, Queue::bufferSize> sampleData;
|
||||
|
||||
juce::dsp::FFT fft{ Queue::order };
|
||||
using WindowFun = juce::dsp::WindowingFunction<SampleType>;
|
||||
WindowFun windowFun{ (size_t)fft.getSize(), WindowFun::hann };
|
||||
std::array<SampleType, 2 * Queue::bufferSize> spectrumData;
|
||||
|
||||
//==============================================================================
|
||||
void timerCallback() override
|
||||
{
|
||||
audioBufferQueue.pop(sampleData.data());
|
||||
juce::FloatVectorOperations::copy(spectrumData.data(), sampleData.data(), (int)sampleData.size());
|
||||
|
||||
auto fftSize = (size_t)fft.getSize();
|
||||
|
||||
jassert(spectrumData.size() == 2 * fftSize);
|
||||
windowFun.multiplyWithWindowingTable(spectrumData.data(), fftSize);
|
||||
fft.performFrequencyOnlyForwardTransform(spectrumData.data());
|
||||
|
||||
static constexpr auto mindB = SampleType(-160);
|
||||
static constexpr auto maxdB = SampleType(0);
|
||||
|
||||
for (auto& s : spectrumData)
|
||||
s = juce::jmap(juce::jlimit(mindB, maxdB, juce::Decibels::gainToDecibels(s) - juce::Decibels::gainToDecibels(SampleType(fftSize))), mindB, maxdB, SampleType(0), SampleType(1));
|
||||
|
||||
repaint();
|
||||
}
|
||||
|
||||
//==============================================================================
|
||||
static void plot(const SampleType* data,
|
||||
size_t numSamples,
|
||||
juce::Graphics& g,
|
||||
juce::Rectangle<SampleType> rect,
|
||||
SampleType scaler = SampleType(1),
|
||||
SampleType offset = SampleType(0))
|
||||
{
|
||||
auto w = rect.getWidth();
|
||||
auto h = rect.getHeight();
|
||||
auto right = rect.getRight();
|
||||
|
||||
auto center = rect.getBottom() - offset;
|
||||
auto gain = h * scaler;
|
||||
|
||||
for (size_t i = 1; i < numSamples; ++i)
|
||||
g.drawLine({ juce::jmap(SampleType(i - 1), SampleType(0), SampleType(numSamples - 1), SampleType(right - w), SampleType(right)),
|
||||
center - gain * data[i - 1],
|
||||
juce::jmap(SampleType(i), SampleType(0), SampleType(numSamples - 1), SampleType(right - w), SampleType(right)),
|
||||
center - gain * data[i] });
|
||||
}
|
||||
};
|
||||
62
Source/ScopeDataCollector.h
Normal file
62
Source/ScopeDataCollector.h
Normal file
@@ -0,0 +1,62 @@
|
||||
#pragma once
|
||||
|
||||
template <typename SampleType>
|
||||
class ScopeDataCollector
|
||||
{
|
||||
public:
|
||||
//==============================================================================
|
||||
ScopeDataCollector(AudioBufferQueue<SampleType>& queueToUse)
|
||||
: audioBufferQueue(queueToUse)
|
||||
{
|
||||
}
|
||||
|
||||
//==============================================================================
|
||||
void process(const SampleType* data, size_t numSamples)
|
||||
{
|
||||
size_t index = 0;
|
||||
|
||||
if (state == State::waitingForTrigger)
|
||||
{
|
||||
while (index++ < numSamples)
|
||||
{
|
||||
auto currentSample = *data++;
|
||||
|
||||
if (currentSample >= triggerLevel && prevSample < triggerLevel)
|
||||
{
|
||||
numCollected = 0;
|
||||
state = State::collecting;
|
||||
break;
|
||||
}
|
||||
|
||||
prevSample = currentSample;
|
||||
}
|
||||
}
|
||||
|
||||
if (state == State::collecting)
|
||||
{
|
||||
while (index++ < numSamples)
|
||||
{
|
||||
buffer[numCollected++] = *data++;
|
||||
|
||||
if (numCollected == buffer.size())
|
||||
{
|
||||
audioBufferQueue.push(buffer.data(), buffer.size());
|
||||
state = State::waitingForTrigger;
|
||||
prevSample = SampleType(100);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
//==============================================================================
|
||||
AudioBufferQueue<SampleType>& audioBufferQueue;
|
||||
std::array<SampleType, AudioBufferQueue<SampleType>::bufferSize> buffer;
|
||||
size_t numCollected;
|
||||
SampleType prevSample = SampleType(100);
|
||||
|
||||
static constexpr auto triggerLevel = SampleType(0.05);
|
||||
|
||||
enum class State { waitingForTrigger, collecting } state{ State::waitingForTrigger };
|
||||
};
|
||||
107
Source/SynthVoice.cpp
Normal file
107
Source/SynthVoice.cpp
Normal file
@@ -0,0 +1,107 @@
|
||||
#include "SynthVoice.h"
|
||||
|
||||
//==============================================================================
|
||||
NeuralSynthVoice::NeuralSynthVoice(NeuralSharedParams& sp) : shared(sp) {}
|
||||
|
||||
//==============================================================================
|
||||
void NeuralSynthVoice::prepare(const juce::dsp::ProcessSpec& spec)
|
||||
{
|
||||
setWaveform(0);
|
||||
tempBlock = juce::dsp::AudioBlock<float>(heapBlock, spec.numChannels, spec.maximumBlockSize);
|
||||
processorChain.prepare(spec);
|
||||
adsr.setSampleRate(spec.sampleRate);
|
||||
}
|
||||
|
||||
//==============================================================================
|
||||
void NeuralSynthVoice::noteStarted()
|
||||
{
|
||||
auto velocity = getCurrentlyPlayingNote().noteOnVelocity.asUnsignedFloat();
|
||||
auto freqHz = (float)getCurrentlyPlayingNote().getFrequencyInHertz();
|
||||
|
||||
processorChain.get<synthIndex>().setFrequency(freqHz, true);
|
||||
|
||||
juce::ADSR::Parameters p;
|
||||
p.attack = shared.attack->load();
|
||||
p.decay = shared.decay->load();
|
||||
p.sustain = shared.sustain->load();
|
||||
p.release = shared.release->load();
|
||||
|
||||
adsr.setParameters(p);
|
||||
adsr.noteOn();
|
||||
}
|
||||
|
||||
//==============================================================================
|
||||
void NeuralSynthVoice::notePitchbendChanged()
|
||||
{
|
||||
auto freqHz = (float)getCurrentlyPlayingNote().getFrequencyInHertz();
|
||||
processorChain.get<synthIndex>().setFrequency(freqHz, true);
|
||||
}
|
||||
|
||||
//==============================================================================
|
||||
void NeuralSynthVoice::noteStopped(bool allowTailOff)
|
||||
{
|
||||
adsr.noteOff(); //Triggers release phase
|
||||
}
|
||||
|
||||
//==============================================================================
|
||||
void NeuralSynthVoice::notePressureChanged() {}
|
||||
void NeuralSynthVoice::noteTimbreChanged() {}
|
||||
void NeuralSynthVoice::noteKeyStateChanged() {}
|
||||
|
||||
//==============================================================================
|
||||
void NeuralSynthVoice::renderNextBlock(juce::AudioBuffer<float>& outputBuffer, int startSample, int numSamples)
|
||||
{
|
||||
if (!adsr.isActive())
|
||||
clearCurrentNote();
|
||||
|
||||
if (waveform != -1) {
|
||||
setWaveform(waveform);
|
||||
waveform = -1;
|
||||
}
|
||||
|
||||
auto block = tempBlock.getSubBlock(0, (size_t)numSamples);
|
||||
block.clear();
|
||||
juce::dsp::ProcessContextReplacing<float> context(block);
|
||||
processorChain.process(context);
|
||||
|
||||
// 3. Apply ADSR envelope to tempBlock
|
||||
std::vector<float*> channelPtrs;
|
||||
for (size_t ch = 0; ch < tempBlock.getNumChannels(); ++ch)
|
||||
channelPtrs.push_back(tempBlock.getChannelPointer(ch));
|
||||
|
||||
juce::AudioBuffer<float> buffer(channelPtrs.data(),
|
||||
static_cast<int>(tempBlock.getNumChannels()),
|
||||
static_cast<int>(tempBlock.getNumSamples()));
|
||||
|
||||
adsr.applyEnvelopeToBuffer(buffer, 0, numSamples);
|
||||
|
||||
juce::dsp::AudioBlock<float>(outputBuffer)
|
||||
.getSubBlock((size_t)startSample, (size_t)numSamples)
|
||||
.add(tempBlock);
|
||||
}
|
||||
|
||||
void NeuralSynthVoice::setWaveform(int waveformType)
|
||||
{
|
||||
auto& osc = processorChain.template get<synthIndex>();
|
||||
|
||||
switch (waveformType)
|
||||
{
|
||||
case 0:
|
||||
osc.initialise([](float x) { return std::sin(x); });
|
||||
break;
|
||||
|
||||
case 1:
|
||||
osc.initialise([](float x) { return x / juce::MathConstants<float>::pi; }); // Saw
|
||||
break;
|
||||
|
||||
case 2:
|
||||
osc.initialise([](float x) { return x < 0.0f ? -1.0f : 1.0f; }); // Square
|
||||
break;
|
||||
|
||||
case 3:
|
||||
osc.initialise([](float x) {
|
||||
return 2.0f * std::abs(2.0f * (x / juce::MathConstants<float>::twoPi) - 1.0f) - 1.0f;
|
||||
}); // Triangle
|
||||
break;
|
||||
}
|
||||
}
|
||||
100
Source/SynthVoice.h
Normal file
100
Source/SynthVoice.h
Normal file
@@ -0,0 +1,100 @@
|
||||
#pragma once
|
||||
|
||||
#include <JuceHeader.h>
|
||||
#include "NeuralSharedParams.h"
|
||||
|
||||
#include <iostream>
|
||||
|
||||
/*struct ADSRProcessor : public juce::dsp::ProcessorBase
|
||||
{
|
||||
// -----------------------------------------------------------------
|
||||
void prepare(const juce::dsp::ProcessSpec& spec) override
|
||||
{
|
||||
adsr.setSampleRate(spec.sampleRate);
|
||||
}
|
||||
|
||||
void reset() override { adsr.reset(); }
|
||||
|
||||
void process(const juce::dsp::ProcessContextReplacing<float> &ctx) override
|
||||
{
|
||||
DBG("Processing...");
|
||||
|
||||
auto& outputBlock = context.getOutputBlock();
|
||||
const auto numSamples = (int)outputBlock.getNumSamples();
|
||||
const auto numChannels = (int)outputBlock.getNumChannels();
|
||||
|
||||
// Wrap the outputBlock into AudioBuffer
|
||||
for (int ch = 0; ch < numChannels; ++ch)
|
||||
buffer.setWritePointer(ch, outputBlock.getChannelPointer(ch));
|
||||
|
||||
adsr.applyEnvelopeToBuffer(buffer, 0, numSamples);
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------
|
||||
// These two are NOT part of the ProcessorBase interface <20> they are
|
||||
// your private hooks that the voice will call on note events.
|
||||
void noteOn(const juce::ADSR::Parameters& p) {
|
||||
adsr.setParameters(p); adsr.noteOn();
|
||||
}
|
||||
void noteOff() { adsr.noteOff(); }
|
||||
|
||||
private:
|
||||
juce::ADSR adsr;
|
||||
juce::AudioBuffer<float> buffer;
|
||||
};*/
|
||||
|
||||
//==============================================================================
|
||||
class NeuralSynthVoice : public juce::MPESynthesiserVoice
|
||||
{
|
||||
public:
|
||||
NeuralSynthVoice(NeuralSharedParams& sp);
|
||||
|
||||
//==============================================================================
|
||||
void prepare(const juce::dsp::ProcessSpec& spec);
|
||||
|
||||
//==============================================================================
|
||||
void noteStarted() override;
|
||||
|
||||
//==============================================================================
|
||||
void notePitchbendChanged() override;
|
||||
|
||||
//==============================================================================
|
||||
void noteStopped(bool) override;
|
||||
|
||||
//==============================================================================
|
||||
void notePressureChanged();
|
||||
void noteTimbreChanged();
|
||||
void noteKeyStateChanged();
|
||||
|
||||
//==============================================================================
|
||||
void renderNextBlock(juce::AudioBuffer<float>& outputBuffer, int startSample, int numSamples);
|
||||
|
||||
void setWaveform(int waveformType);
|
||||
|
||||
void changeWaveform(int waveform) noexcept {
|
||||
this->waveform = waveform;
|
||||
}
|
||||
|
||||
private:
|
||||
//==============================================================================
|
||||
juce::HeapBlock<char> heapBlock;
|
||||
juce::dsp::AudioBlock<float> tempBlock;
|
||||
|
||||
enum
|
||||
{
|
||||
synthIndex
|
||||
};
|
||||
|
||||
juce::dsp::ProcessorChain<
|
||||
juce::dsp::Oscillator<float>
|
||||
> processorChain;
|
||||
|
||||
juce::ADSR adsr;
|
||||
NeuralSharedParams& shared;
|
||||
|
||||
static constexpr size_t lfoUpdateRate = 100;
|
||||
|
||||
static inline float msToSecs(float ms) { return ms * 0.001f; }
|
||||
|
||||
std::atomic<int> waveform { -1 };
|
||||
};
|
||||
Reference in New Issue
Block a user