/* ============================================================================== This file contains the basic framework code for a JUCE plugin processor. ============================================================================== */ #include "PluginProcessor.h" #include "PluginEditor.h" //============================================================================== NeuralSynthAudioProcessor::NeuralSynthAudioProcessor() : parameters(*this, nullptr, "PARAMETERS", createParameterLayout()) , AudioProcessor(BusesProperties().withOutput("Output", juce::AudioChannelSet::stereo(), true)) , audioEngine(sp) { parameters.addParameterListener("waveform", this); parameters.addParameterListener("attack", this); parameters.addParameterListener("decay", this); parameters.addParameterListener("sustain", this); parameters.addParameterListener("release", this); sp.attack = parameters.getRawParameterValue("attack"); sp.decay = parameters.getRawParameterValue("decay"); sp.sustain = parameters.getRawParameterValue("sustain"); sp.release = parameters.getRawParameterValue("release"); } NeuralSynthAudioProcessor::~NeuralSynthAudioProcessor() { } //============================================================================== const juce::String NeuralSynthAudioProcessor::getName() const { return JucePlugin_Name; } bool NeuralSynthAudioProcessor::acceptsMidi() const { #if JucePlugin_WantsMidiInput return true; #else return false; #endif } bool NeuralSynthAudioProcessor::producesMidi() const { #if JucePlugin_ProducesMidiOutput return true; #else return false; #endif } bool NeuralSynthAudioProcessor::isMidiEffect() const { #if JucePlugin_IsMidiEffect return true; #else return false; #endif } double NeuralSynthAudioProcessor::getTailLengthSeconds() const { return 0.0; } int NeuralSynthAudioProcessor::getNumPrograms() { return 1; // NB: some hosts don't cope very well if you tell them there are 0 programs, // so this should be at least 1, even if you're not really implementing programs. } int NeuralSynthAudioProcessor::getCurrentProgram() { return 0; } void NeuralSynthAudioProcessor::setCurrentProgram (int index) { } const juce::String NeuralSynthAudioProcessor::getProgramName (int index) { return {}; } void NeuralSynthAudioProcessor::changeProgramName (int index, const juce::String& newName) { } //============================================================================== void NeuralSynthAudioProcessor::prepareToPlay (double sampleRate, int samplesPerBlock) { audioEngine.prepare({ sampleRate, (juce::uint32)samplesPerBlock, 2 }); midiMessageCollector.reset(sampleRate); } void NeuralSynthAudioProcessor::releaseResources() { // When playback stops, you can use this as an opportunity to free up any // spare memory, etc. } bool NeuralSynthAudioProcessor::isBusesLayoutSupported (const BusesLayout& layouts) const { // This is the place where you check if the layout is supported. // In this template code we only support mono or stereo. if (layouts.getMainOutputChannelSet() != juce::AudioChannelSet::mono() && layouts.getMainOutputChannelSet() != juce::AudioChannelSet::stereo()) return false; return true; } void NeuralSynthAudioProcessor::processBlock(juce::AudioSampleBuffer& buffer, juce::MidiBuffer& midiMessages) { const int newWaveform = sp.waveform.exchange(-1); if (newWaveform != -1) { audioEngine.applyToVoices([newWaveform](NeuralSynthVoice* v) { v->changeWaveform(newWaveform); }); } juce::ScopedNoDenormals noDenormals; auto totalNumInputChannels = getTotalNumInputChannels(); auto totalNumOutputChannels = getTotalNumOutputChannels(); midiMessageCollector.removeNextBlockOfMessages(midiMessages, buffer.getNumSamples()); for (int i = totalNumInputChannels; i < totalNumOutputChannels; ++i) buffer.clear(i, 0, buffer.getNumSamples()); audioEngine.renderNextBlock(buffer, midiMessages, 0, buffer.getNumSamples()); scopeDataCollector.process(buffer.getReadPointer(0), (size_t)buffer.getNumSamples()); } //============================================================================== bool NeuralSynthAudioProcessor::hasEditor() const { return true; // (change this to false if you choose to not supply an editor) } juce::AudioProcessorEditor* NeuralSynthAudioProcessor::createEditor() { return new NeuralSynthAudioProcessorEditor (*this); } //============================================================================== void NeuralSynthAudioProcessor::getStateInformation (juce::MemoryBlock& destData) { // You should use this method to store your parameters in the memory block. // You could do that either as raw data, or use the XML or ValueTree classes // as intermediaries to make it easy to save and load complex data. } void NeuralSynthAudioProcessor::setStateInformation (const void* data, int sizeInBytes) { // You should use this method to restore your parameters from this memory block, // whose contents will have been created by the getStateInformation() call. } void NeuralSynthAudioProcessor::parameterChanged(const juce::String& id, float newValue) { if (id == "waveform") sp.waveform.store((int)newValue, std::memory_order_release); } //============================================================================== // This creates new instances of the plugin.. juce::AudioProcessor* JUCE_CALLTYPE createPluginFilter() { return new NeuralSynthAudioProcessor(); } juce::AudioProcessorValueTreeState::ParameterLayout NeuralSynthAudioProcessor::createParameterLayout() { std::vector> params; params.push_back(std::make_unique( "waveform", "Waveform", juce::StringArray{ "Sine", "Saw", "Square", "Triangle" }, 0)); // Start/end/interval params.push_back(std::make_unique("attack", "Attack", juce::NormalisableRange(0.0f, 1.0f, 0.01f), 0.1f)); params.push_back(std::make_unique("decay", "Decay", juce::NormalisableRange(0.0f, 1.0f, 0.01f), 0.5f)); params.push_back(std::make_unique("sustain", "Sustain", juce::NormalisableRange(0.0f, 1.0f, 0.01f), 0.8f)); params.push_back(std::make_unique("release", "Release", juce::NormalisableRange(0.01f, 1.0f, 0.01f), 1.0f)); return { params.begin(), params.end() }; }